path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
Convert Color to GrayScale/CtoBGresults.ipynb | ###Markdown
Convert Colored Images to Gray Scale Images and analyze the results for time taken
###Code
import cv2
import shutil
import os
import time
from matplotlib import pyplot as plt
###Output
_____no_output_____
###Markdown
Generate Images 100,200,300
###Code
for i in range(5):
os.mkdir(f"{i+1}KImages")
for j in range(100*(i+1)):
shutil.copy("./FolderWithImages/check.jpg",dst=f"{i+1}KImages/copy{j}.jpg")
###Output
_____no_output_____
###Markdown
Convert Images to Gray Scale and Note the time taken
###Code
data = {}
for i in range(5):
start = time.time()
images = os.listdir(f'{i+1}KImages')
for image in images:
check = cv2.imread(f'./{i+1}KImages/{image}')
gray = cv2.cvtColor(check,cv2.COLOR_BGR2GRAY)
os.remove(f'./{i+1}KImages/{image}')
cv2.imwrite(f'./{i+1}KImages/{image}',gray)
end = time.time()
data[f'{i+1}KImages'] = end - start
print(data)
###Output
{'1KImages': 1.9955856800079346, '2KImages': 3.9975745677948, '3KImages': 5.86980938911438, '4KImages': 8.362041473388672, '5KImages': 9.881592512130737}
###Markdown
Plot the results
###Code
plt.plot(list(data.keys()),list(data.values()))
plt.title("Time Taken to Convert Images to GrayScale")
plt.xlabel("Number of Files(K)")
plt.ylabel("Time Taken")
plt.show()
###Output
_____no_output_____ |
notebooks/preprocessing/amino_acid_embedding.ipynb | ###Markdown
Drop non numeric columns
###Code
data = data.drop(["3-letter", "AminoAcid"], axis =1 )
data = data.drop(["Pc", "Residue mass"], axis =1 )
#data = data.drop(["3-letter", "AminoAcid", "Chou-Fasman code for helix propensity","Chou-Fasman code for sheet propensity"], axis=1)
data.abs().max(axis=0)
std = data.std(ddof=0, axis=0)
std
data_std = data.div(std, axis=1)
data_std
data_std["Hyd"] = data_std["Hyd"] * (-1)
transposed = data_std.transpose()
transposed["0"] = [0, 0, 0, 0, 0, 0, 0, 7/1.724522]
data_std = transposed.transpose()
#data = pd.merge(left=data, right=generated, how="inner", left_index=True, right_index=True)
data_std.sort_index()
data = data_std
delta_0_center = (data.max(axis=0)+data.min(axis=0))/2 - np.abs(np.random.normal(scale=0.0001, size=data.shape[1]))
data = (data.subtract(delta_0_center, axis=1))#.div(data.std(ddof=0, axis=0), axis=1))
# data = (data.subtract(data.mean(axis=0), axis=1).div(data.std(ddof=0, axis=0), axis=1))
data = data.div(data.abs().max(axis=0)+np.abs(np.random.normal(scale=0.04, size=data.shape[1])), axis=1)
data = data + np.random.normal(scale=0.0001, size=data.shape)
data.max(), data.min()
data
# transposed = data.transpose()
# transposed[0] = -1 + np.abs(np.random.normal(scale=0.001, size=data.shape[1]))
# final = transposed.transpose()
final=data.sort_index()
final
final = final.fillna(value=0.00000000000000001)
def calculate_variation(data, col):
one_column = data[[col]]
one_column = one_column.append(pd.DataFrame([-0.999, 0.999], index = ["N_0", "N_1"], columns=[col]))
one_column[col+"_min"] =one_column.sort_values(col).rolling(window=2).apply(lambda x: (x[0] - x[1])/2, raw=True)
one_column[col +"_max"] =one_column.sort_values(col, ascending=False)[col].rolling(window=2).apply(lambda x: (x[0] - x[1])/2, raw=True)
one_column = one_column.drop(["N_0", "N_1"], axis=0)
return list(zip(one_column[col+"_min"].values.tolist(), one_column[col+"_max"].values.tolist()))
#return one_column
col = "Lip"
one_column = final[[col]]
one_column = one_column.append(pd.DataFrame([-0.999, 0.999], index = ["N_0", "N_1"], columns=[col]))
one_column[col+"_min"] =one_column.sort_values(col).rolling(window=2).apply(lambda x: (x[0] - x[1])/2, raw=True)
one_column[col +"_max"] =one_column.sort_values(col, ascending=False)[col].rolling(window=2).apply(lambda x: (x[0] - x[1])/2, raw=True)
one_column = one_column.drop(["N_0", "N_1"], axis=0)
one_column
final.shape
variation = []
for c in final.columns:
variation.append(calculate_variation(final, c))
np.array(variation).shape
np.array(variation).shape
final.sum(axis=1)
final_values = final.values
final_values
np.save("../data/protein/hand_crafted_embeddings_8.npy", final_values)
np.save("../data/protein/embeddings_variation_8.npy", variation)
variation[0][0]
variation[0][0][0] + variation[0][0][1]
abs(variation[0][0][0] - variation[0][0][1])/1.95
variation[0][0][0] + variation[0][0][1]
hist, bin_edges = np.histogram(final.values)
hist, bin_edges
np.max(final.values)
np.min(final.values)
import tensorflow as tf
tf.enable_eager_execution()
embeddings = tf.constant([[ 0. , 0. , 0. ],
[-0.78874256, 0.43069857, -0.47393251],
[-0.68765258, 0.19845299, -0.64110991],
[-0.19540625, -0.46831037, -0.43458175],
[ 0.39009043, -0.46831037, -0.14043892],
[-0.4217928 , 0.31515591, 0.5829058 ],
[-1. , 0.43069857, -0.93603945],
[ 0.088746 , -0.32018371, 0.23223556],
[ 0.49063027, 0.43069857, 0.32813161],
[ 0.97943816, -0.1715736 , 0.46384033],
[ 0.74644982, 0.43069857, 0.26766763],
[ 0.45239487, 0.1340585 , 0.05631338],
[-0.14891861, -0.26652163, -0.23952655],
[-0.58491215, 0.43069857, 0.03943025],
[ 0.3517175 , -0.26652163, 0.0522144 ],
[ 0.68015679, -1. , 0.360837 ],
[-0.70594505, -0.15329949, -0.60368124],
[ 0.93336313, 0.00826686, -0.29583345],
[-0.2963587 , 0.43069857, 0.16728669],
[ 0.12175498, 0.01397148, 1. ],
[-0.40501324, -0.13937636, 0.21428112]])
embeddings
acid_embeddings = tf.get_variable("acid_embeddings", shape=[21, 3],
initializer=tf.constant_initializer(final[:, :3]), trainable=False)
real_x = tf.nn.embedding_lookup(acid_embeddings, [0,1,2,3,4,5,6])
real_x
emb_distances = tf.matmul(
tf.nn.l2_normalize(acid_embeddings, axis=1),
tf.nn.l2_normalize(real_x, axis=1),
transpose_b=True)
emb_distances
tf.argmax(emb_distances, axis=0)
tf.nn.l2_normalize(acid_embeddings, axis=1) #[-0.78874254, 0.43069857, -0.4739325 ]
a = [[1,2,3],[50,60,70],[800, 900,1000], [-1, -2, -3]]
a = tf.expand_dims(a, axis=0)
a = tf.expand_dims(a, axis=3)
tf.squeeze(a)
x = tf.image.resize_nearest_neighbor(a, [4, 3 * 2])
tf.squeeze(x)
###Output
_____no_output_____ |
ARIMA/daily-exchange-rates.ipynb | ###Markdown
Autoregressive Integrated Moving Average (ARIMA)The ARIMA model is a generalisation of an ARMA model that can be applied to non-stationary time series.
###Code
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from time import time
import statsmodels.api as sm
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.stattools import adfuller
matplotlib.rcParams['figure.figsize'] = (16, 9)
pd.options.display.max_columns = 999
###Output
_____no_output_____
###Markdown
Load Dataset
###Code
df = pd.read_csv('../_datasets/daily-exchange-rates.csv', parse_dates=[0], index_col='DateTime')
print(df.shape)
df.head()
###Output
(7588, 8)
###Markdown
Define ParametersMake predictions for four-week period using a training period of one year.
###Code
dataset_name = 'Daily Exchange Rates'
dataset_abbr = 'DER'
model_name = 'ARIMA'
context_length = 52*5 # 52 weeks
prediction_length = 4*5 # four weeks
###Output
_____no_output_____
###Markdown
Define Error MetricThe mean absolute scaled error (MASE) will be used to evaluate the forecasts.
###Code
def calc_MASE(training_series, testing_series, prediction_series):
a = training_series.iloc[1:].values
b = training_series.iloc[:-1].values
d = np.sum(np.abs(a-b)) / len(a)
errors = np.abs(testing_series - prediction_series)
return np.mean(errors) / d
###Output
_____no_output_____
###Markdown
Example ARIMA ModelExploration of how ARIMA models work using a single example time series.
###Code
ts_ex = 'ts8'
df_ex = df.loc[:, ts_ex]
# Plot data from two months
df_ex.iloc[:4*5*2].plot();
###Output
_____no_output_____
###Markdown
Time Series DecompositionDecompose the example time series into trend, seasonal, and residual components.
###Code
fig = seasonal_decompose(df_ex, model='additive').plot()
###Output
_____no_output_____
###Markdown
Plot ACF and PACFThe Autocorrelation Function (ACF) is the correlation of a signal with a delayed copy of itself as a function of delay.The Partial Autocorrelation Function (PACF) is the partial correlation of a signal with a delayed copy of itself, controlling for the values of the time series at all shorter delays, as a function of delay.
###Code
fig, ax = plt.subplots(2)
ax[0] = sm.graphics.tsa.plot_acf(df_ex, lags=50, ax=ax[0])
ax[1] = sm.graphics.tsa.plot_pacf(df_ex, lags=50, ax=ax[1])
###Output
_____no_output_____
###Markdown
There is no seasonality. Build ModelGrid search will be implemented to identify optimal parameters for the ARIMA(p, d, q) model, using the following possible values:
###Code
from itertools import product
ps = range(0, 5) # Up to 4 AR terms
ds = range(0, 2) # Either no or first differencing
qs = range(0, 5) # Up to 4 MA terms
params = product(ps, ds, qs)
params_list = list(params)
print("Number of parameter combinations for grid search: {}".format(len(params_list)))
def optimiseARIMA(time_series, params_list=params_list, test_length=prediction_length, train_length=context_length):
ts = time_series.iloc[-(test_length+train_length):]
ts_train = ts.iloc[:-test_length]
ts_test = ts.iloc[-test_length:]
# Select the best model using a holdout validation period
val_length = test_length
ts_train_val = ts.iloc[:-(test_length+val_length)]
ts_val = ts.iloc[-(test_length+val_length):-test_length]
results = []
for params in params_list:
p = params[0]
d = params[1]
q = params[2]
# try/except loop in case model fails to converge for given parameters
try:
arima = sm.tsa.ARIMA(ts_train_val, order=(p, d, q)).fit()
except:
continue
# Make predictions for validation holdout set and update best model if necessary
val_pred = arima.predict(start=ts_val.index[0], end=ts_val.index[-1], dynamic=True)
MASE = calc_MASE(ts_train, ts_val, val_pred)
results.append([params, MASE])
df_results = pd.DataFrame(results)
df_results.columns = ['parameters', 'MASE']
df_results = df_results.sort_values(by='MASE', ascending=True).reset_index(drop=True)
# Retrain model with best parameters using all training data and generate test forecast
# Use loop to fall back to next best model in case training fails using full dataset
trained = False
model_rank = 1
while not trained:
train_params = df_results.iloc[model_rank-1, 0]
try:
arima = sm.tsa.ARIMA(ts_train, order=train_params).fit()
trained = True
except:
model_rank += 1
summary = arima.summary()
# Start index must be greater than q. Fill missing initial entries with first actual prediction
fcst = arima.predict(start=ts_train.index[train_params[2]+1], end=ts_test.index[-1])
first_pred = fcst[0]
fcst = np.concatenate([np.array([first_pred for i in range(train_params[2]+1)]), fcst])
fcst = pd.DataFrame(data=fcst, index=ts.index, columns=['pred%s' % ts.name[2:]])
return fcst, train_params, summary
import warnings
warnings.filterwarnings('ignore')
%%time
fcst, train_params, summary = optimiseARIMA(df_ex)
df_ex = pd.concat([df_ex, fcst], axis=1)
print("Best model: ARIMA{}".format(train_params))
print(summary)
# Example forecast
fcst0 = df_ex.copy()
fcst0['pred%s' % ts_ex[2:]][fcst0['pred%s' % ts_ex[2:]] < 0] = 0
fcst0.iloc[-4*prediction_length:, 0].plot(label='Actual', c='k', alpha=0.5)
fcst0.iloc[-4*prediction_length:, 1].plot(label='ARIMA%s' % str(train_params), c='b', alpha=0.5)
plt.axvline(x=fcst0.index[-prediction_length], linestyle=':', linewidth=2, color='r', label='Start of test data')
plt.legend()
plt.title(ts_ex);
###Output
_____no_output_____
###Markdown
Evaluating ARIMATo evaluate ARIMA, forecasts will be generated for each time series using the grid search methodology shown above (with subsequent zeroing of the negative values). MASE will be calculated for each individual time series, and the mean of all these scores will be used as the overall accuracy metric for ARIMA on this dataset.
###Code
parameters = []
results = df.iloc[-(prediction_length+context_length):].copy()
tic = time()
for i, col in enumerate(df.columns):
if i % 10 == 0:
toc = time()
print("Running predictions for {}. Cumulative time: {:.1f} minutes.".format(col, (toc-tic)/60))
# Prepare DataFrame for selected column
dft = df.loc[:, col]
# Find best model
fcst, train_params, summary = optimiseARIMA(dft)
# Add predictions to results DataFrame
results['pred%s' % col[2:]] = fcst.values
# Store model parameteres for reference
parameters.append(train_params)
toc = time()
print("Finished! Total run time: {:.1f} minutes.".format((toc-tic)/60))
results0 = results.copy()
results0[results0 < 0] = 0
results0.head()
MASEs = []
for i, col in enumerate(df.columns):
MASEs.append(calc_MASE(results0[col].iloc[-(context_length + prediction_length):-prediction_length],
results0[col].iloc[-prediction_length:],
results0['pred%s' % str(i+1)].iloc[-prediction_length:]))
fig, ax = plt.subplots()
ax.hist(MASEs, bins=20)
ax.set_title('Distributions of MASEs for {} dataset'.format(dataset_name))
ax.set_xlabel('MASE')
ax.set_ylabel('Count');
MASE = np.mean(MASEs)
print("Overall MASE: {:.4f}".format(MASE))
###Output
Overall MASE: 6.1503
###Markdown
Show some example forecasts.
###Code
fig, ax = plt.subplots(4, 2, sharex=True)
ax = ax.ravel()
for col in range(1, 9):
ax[col-1].plot(results0.index[-prediction_length:], results0['ts%s' % col].iloc[-prediction_length:],
label='Actual', c='k', linestyle='--', linewidth=1)
ax[col-1].plot(results0.index[-prediction_length:], results0['pred%s' % col].iloc[-prediction_length:],
label='ARIMA%s' % str(parameters[col-1]), c='b')
ax[col-1].legend()
fig.suptitle('{} Predictions'.format(dataset_name));
###Output
_____no_output_____
###Markdown
Clearly, some of the time series adopt an ARIMA(0,1,0) model which just flatlines at zero for the test period. The higher order models appear to do a better job of generating forecasts, although when such a large number of data points are zero, always forecasting zero is presumably a strategy that performs quite well. Store the predictions and accuracy score for the ARIMA models.
###Code
import pickle
with open('{}-MASE.pkl'.format(dataset_abbr), 'wb') as f:
pickle.dump(MASE, f)
with open('../_results/{}/{}-results.pkl'.format(model_name, dataset_abbr), 'wb') as f:
pickle.dump(results.iloc[-prediction_length:], f)
###Output
_____no_output_____ |
Natural Language Processing in TensorFlow/week3 Sequence models/NLP_in_TensorFlow_Week3_Note.ipynb | ###Markdown
Sequence ModelPrevious lesson, finish by looking at the effect of tokenizing words and classifer fail to get any meaningful results. The main reason is that context of words **was hard to follow for sub-word instead the sequence**For Example, Fibonacci sequence, $$x_{n} = x_{n-1} + x_{n-2}$$The ieda of Fibonacci sequence is similar to recurrent neural network. You can x as input and y as output. There's also an element that fed into function from previous function. **Output of previous function as input to next stage function**, **As you can see $x_{0}$ fed all the way through network**
###Code
Image('rnn.PNG', width = 600)
###Output
_____no_output_____
###Markdown
LSTMMotivating Example: I lived in Ireland, so at school they made me learn how to speak (Gaelic) Context Keyword gives the details about the language. Irish describe the people, Gaelic describe language. The word Irish appear eariler in the sentence, if we're looking at the sequence, we may lose that context.LSTM instead of context passed in RNN, LSTM have additional pipelines contexts called **cell state**, which can pass through the network to impact it. Cell state can also be **bidirectional**, so later context can impact earlier ones
###Code
Image('LSTM.PNG', width = 600)
import tensorflow as tf
model = tf.keras.Sequential([
tf.keras.layers.Embedding(tokenizer.vocab_size, 64),
# Bidirectional RNN, output will be 128, even if we specify 64, but bidirectional rnn double it
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64)), # 64 is the number of outputs that I desire from that layer,
tf.keras.layers.Dense(64, activation = 'relu'),
tf.keras.layers.Dense(1, activation = 'sigmoid')
])
model.summary()
model2 = tf.keras.Sequential([
tf.keras.layers.Embedding(tokenizer.vocab_size, 64),
# stack two LSTM
# do have put return_sequences = True on the first one
# return_sequences = True : ensure the output of LSTM match the desired inputs of next LSTM
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64), return_sequences = True),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32))
tf.keras.layers.Dense(64, activation = 'relu'),
tf.keras.layers.Dense(1, activation = 'sigmoid')
])
###Output
_____no_output_____
###Markdown
Return Sequences,Whether to return the full sequence in the output sequence, or only the last output.default tf.keras.layers.Bidirectional return_sequence = False, it means **only output the last hidden state output** $a^{}$, The last hidden state output captures an abstract representation of the input sequence. * If specify return_sequence = True, output dimenison will be (Samples, Time steps, LSTM units)* If specify return_sequence = False, output dimenison will be (Samples, LSTM units), only one timestep for last hidden state output Return statesIn GRU, hidden state and cell state is equal , $c^{} = a^{}$, and for LSTM, $c^{} \neq a^{}$return_state = True, can output RNN last cell state in additional to hidden state. * **LSTM(units, return_state=True)**: output of LSTM have three components, $(a^{}, a^{}, c^{} )$, T stands for last step. **Note**: the number in first dimenison $a^{}$ and the number in second dimension $a^{}$ is the same. * **LSTM(units, return_sequences=True, return_state=True)**, output will be $(a^{}, a^{}, c^{} )$, **Note** at first dimension, $(a^{}$ last one number is the same as second dimension $a^{}$. * **GRU(units, return_sequences=True, return_state=True)**: output will be $(a^{}, c^{} )$ since in GRU $c^{} = a^{}$[Link for Quotation](https://www.dlology.com/blog/how-to-use-return_state-or-return_sequences-in-keras/targetText=By%20default%2C%20the%20return_sequences%20is,representation%20of%20the%20input%20sequence.)
###Code
Image('rnn2.PNG', width = 600)
###Output
_____no_output_____
###Markdown
Using GRU**Time for training is quicker than LSTM**
###Code
model = tf.keras.Sequential([
tf.keras.layers.Embedding(tokenizer.vocab_size, 64),
tf.keras.layers.Bidirectional(tf.keras.layers.GRU(32)), # 32 is the number of outputs that I desire from that layer,
tf.keras.layers.Dense(64, activation = 'relu'),
tf.keras.layers.Dense(1, activation = 'sigmoid')
])
###Output
_____no_output_____
###Markdown
Inspect Result using LSTM Using IMDB dataset, Compare the accuracy between Flatten plus GlobalAveragePooling and LSTM* **FLatten + GlobalAveragePooling**: traing quickly. quickly get to 85% and flatten there.* **LSTM**: traing slowly. reach 85% really quickly and continue climbing toward about 97.5% within 50 epochs. The validataion set drop slowly but close to NON-LSTM version value. A little overfitting. When accuracy of prediction increase, whereas the confidence decrease
###Code
model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length = max_length),
#tf.keras.layers.Flatten(),
#tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32)),
tf.keras.layers.Dense(24, activation = 'relu'),
tf.keras.layers.Dense(1, activation = 'sigmoid')
])
Image('LSTM_result.PNG', width = 800)
# Use convolution Neural Network
model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length = max_length),
tf.keras.layers.Conv1D(128, 5, activation = 'relu'), #output is (max_length - 5, 128)
tf.keras.layers.GlobalMaxPooling1D(),
tf.keras.layers.Dense(24, activation = 'relu'),
tf.keras.layers.Dense(1, activation = 'sigmoid')
])
###Output
_____no_output_____
###Markdown
Inspect Result Using ConvolutionThe accuracy better than before with close to 100% on training and 80% on validation. As before, loss increase on validation, indicating **overfitting**
###Code
Image('conv_result.PNG', width = 800)
###Output
_____no_output_____ |
examples/Classification example.ipynb | ###Markdown
Victor Moraes - 2016027600 Sexto trabalho pratico de reconhecimento de padrões LS - SVM - Máquinas de vetores de suporte de mínimos quadráticos IntroduçãoNeste trabalho, será aplicado o classificador LS-SVM na resolução de um problema de classificação sintético. Neste exercı́cio o(a) aluno(a) aplicará o classificador LS-SVM na resolução de umproblema de classificação real. O aluno deverá seguir os seguintes passos: 1. Carregar a base de dados; 2. Separar os dados em treinamento e teste; 3. Treinar a SVM (atenção para a definição dos parâmetros do kernel escolhidoe do parâmetro C de regularização da SVM.); O aluno deve treinar uma SVM para resolver o problema de classificaçãode tipos de vidros do banco de dados Glass a partir de suas caracterı́sticasquı́micas. Ele possui 214 instâncias de 10 atributos. Entradas numéricas evariável de saida categórica Este banco de dados já é nativo do R para aquelesque estão utilizando o R mas pode ser encontrado no UCI Machine LearningRepositóry. No relatório deverá ser mostrado a acurácia média e desvio padrão para 10experimentos variando o conjunto de treinamento e teste. Apresentar tambémquais os parâmetros definidos por você e como eles foram definidos, como porexemplo o parâmetro C. 1- Conjunto de testes.Inicialmente importaremos as bibliotecas necessárias e em seguida criaremos o conjunto de amostras.
###Code
import numpy as np
import pandas as pd
import sys
#sys.path.insert(0, './LSSVMlib/')
from LSSVMlib.LSSVMClassification import LSSVMClassification
lssvc = LSSVMClassification(gamma=1, kernel='rbf', sigma=3.5) # Class instantiation
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, ScalarFormatter
from sklearn.model_selection import train_test_split
from matplotlib.colors import Normalize
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import cross_val_score
glass = pd.read_csv('glass.csv').dropna()
glass.head()
labels = pd.read_csv('classifications.csv').dropna()
labels.head()
###Output
_____no_output_____
###Markdown
2. Separação de conjuntos de treinamento e testesNeste ponto é realizada a separação de amostras de treino e validação iniciais.
###Code
glass.describe()
features = ['RI','Na','Mg','Al','Si','K','Ca','Ba','Fe']
label = ['glass_type']
X = glass.iloc[:,1:-1].to_numpy()
print(X.shape)
y = glass.iloc[:,-1:].to_numpy().ravel()
print(y.shape)
test_size=0.2
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size)
enable_scalar = True
sc_train = StandardScaler().fit(X_train)
sc_test = StandardScaler().fit(X_test)
sc_all = StandardScaler().fit(X)
if(enable_scalar == True):
X_train_std = sc_train.transform(X_train)
X_test_std = sc_test.transform(X_test)
X_std = sc_all.transform(X)
else:
X_train_std = X_train
X_test_std = sc_test.transform(X_test)
X_std = sc_all.transform(X)
# Get information about input and outputs
print(f"X_train.shape: {X_train.shape}")
print(f"X_test.shape: {X_test.shape}")
print(f"y_train.shape: {y_train.shape}")
print(f"y_test.shape: {y_test.shape}")
print(f"np.unique(y_train): {np.unique(y_train)}")
print(f"np.unique(y_test): {np.unique(y_test)}")
###Output
X_train.shape: (171, 9)
X_test.shape: (43, 9)
y_train.shape: (171,)
y_test.shape: (43,)
np.unique(y_train): [1 2 3 5 6 7]
np.unique(y_test): [1 2 3 5 6 7]
###Markdown
3 Treinamento inicial do modelo, com parametros a determinar.Será utilizado a implementação SVC, ou classificação de vetores de suporte. Nesta implementação é permitido que os pontos não sejam necessariamente linearmente separáveis, sendo o desvio tolerável ditado pelo parâmetro C. Para determinar isso foi feito uma busca em rede para validar qual kernel e quais parametros possuem o melhor desempenho. A busca em rede conta com validação cruzada kfold com 5 conjuntos para cada classificador testado. Obtivemos que o kernel rbf ( radial basis function ), ou função radial, foi a de melhor acurácia.Outro parametro do kernel RBF é o gamma, representado na equação a seguir por sigma. É um parâmetro do kernel que determina a geometria e curvatura do superplano de classificação. Aqui é utilizado o kernel radial : $K(x_i,x_j)= exp(-\frac{||x_i-x_j||}{2\sigma^2})$Para a escolha de C e gama, adiante será feita varredura e de forma que evite overfitting e maximize a média de acertos. O problema de otimização do LS-SVM, como demonstra Suykens, na equação 3.5, se trata de solucionar o seguinte sistema linear:E a predição é feita por:Sendo $\Omega = y^t y K(X,X)$ Nos itens a seguir é investigado qual topologia apresenta melhor acurácia. RBT apresentou o melhor escore. Portanto é realizado uma busca em grid neste e variando gamma e sigma, para se encontrar o melhor modelo.
###Code
# Use the classifier with different kernels
print('Gaussian kernel:')
lssvc = LSSVMClassification(gamma=1, kernel='rbf', sigma=3.5) # Class instantiation
lssvc.fit(X_train_std, y_train) # Fitting the model
y_pred = lssvc.predict(X_test_std) # Making predictions with the trained model
acc = accuracy_score(y_test, y_pred) # Calculate Accuracy
print('acc_test = ', acc, '\n')
print('Polynomial kernel:')
lssvc = LSSVMClassification(gamma=1, kernel='poly', d=2)
lssvc.fit(X_train_std, y_train)
y_pred = lssvc.predict(X_test_std)
acc = accuracy_score(y_test, y_pred)
print('acc_test = ', acc, '\n')
print('Linear kernel:')
lssvc = LSSVMClassification(gamma=1, kernel='linear')
lssvc.fit(X_train_std, y_train)
y_pred = lssvc.predict(X_test_std)
acc = accuracy_score(y_test, y_pred)
print('acc_test = ', acc, '\n')
kernels = ['poly', 'rbf']
#kernels = ['linear', 'poly', 'rbf', 'sigmoid']
#C_range = np.logspace(-3,6,2)
C_gamma = np.logspace(-6,3,50)
#coef_0_range = np.linspace(-100,100,10)
#coef_0_range = np.linspace(-10,10,5)
param_grid = {
'kernel':kernels,
'gamma':C_gamma,
}
grid = GridSearchCV(LSSVMClassification(), param_grid=param_grid, n_jobs=3, verbose=True)
grid.fit(X_std, y)
grid.best_params_
best_estimator = grid.best_estimator_
best_estimator.fit(X_train_std,y_train)
final_score = best_estimator.score(X_test_std, y_test)
print("The best parameters are {} with a score of {:2.2f} %"
.format(grid.best_params_, 100*final_score))
sigma_range = np.logspace(-1, 3, 30)
gamma_range = np.logspace(-1, 1, 30)
param_grid = dict(sigma=sigma_range, gamma=gamma_range)
grid = GridSearchCV(LSSVMClassification(), param_grid=param_grid, n_jobs=3, verbose=True)
grid.fit(X_std, y)
grid.best_params_
best_estimator = grid.best_estimator_
best_estimator.fit(X_train_std,y_train)
final_score = best_estimator.score(X_test_std, y_test)
print("The best parameters are {} with a score of {:2.2f} %"
.format(grid.best_params_, 100*final_score))
###Output
Fitting 5 folds for each of 900 candidates, totalling 4500 fits
The best parameters are {'gamma': 2.807216203941177, 'sigma': 4.520353656360243} with a score of 60.47 %
###Markdown
Aqui é apresentado o gráfico de acurácia da busca de grid, identificando õtimos locais de maior desempenho.
###Code
scores_plot = np.reshape(grid.cv_results_['mean_test_score'],(gamma_range.shape[0],sigma_range.shape[0]))
gamma_plot = grid.cv_results_['param_gamma']
sigma_plot = grid.cv_results_['param_sigma']
#plt.axhline(grid.best_params_['sigma'], color='gray')
#plt.axvline(grid.best_params_['gamma'], color='gray')
X_mesh,Y_mesh=np.meshgrid(gamma_range,sigma_range)
Z = scores_plot
im = plt.pcolor(X_mesh,Y_mesh,Z, cmap=plt.cm.hot)
plt.xscale('log')
plt.yscale('log')
plt.xlabel('gamma')
plt.ylabel('sigma')
plt.colorbar(im, orientation='vertical')
plt.title('Validation accuracy')
plt.show()
###Output
_____no_output_____
###Markdown
Em seguida realizamos a validação com todo conjunto de testes, obtendo acurácia de 60.47 % 3.d Escolha de modeloComo pode-se ver a seguir, a biblioteca seleciona o primeiro melhor estimador na métrica de escore, que foi o seguintes hiper parametros e kernel rbt: {'gamma': 2.807216203941177, 'sigma': 4.520353656360243} Cross validationO estimador será validado utilizando cross validation kfold, de 10 grupos.
###Code
scores = cross_val_score(best_estimator, X_std, y, cv=10)
print()
print(scores)
mean_score = np.mean(scores) * 100
mean_std = np.std(scores) * 100
print('Score medio:{:.2f}%, desvio medio:{:.2f}%'.format(mean_score, mean_std))
print(10*'\n')
###Output
/home/vektor/.virtualenvs/science/lib/python3.8/site-packages/sklearn/model_selection/_split.py:666: UserWarning: The least populated class in y has only 9 members, which is less than n_splits=10.
warnings.warn(("The least populated class in y has only %d"
[0.63636364 0.54545455 0.77272727 0.68181818 0.47619048 0.61904762
0.76190476 0.71428571 0.66666667 0.76190476]
Score medio:66.36%, desvio medio:9.27%
###Markdown
Portanto temos que a acuracia final foi de 66.36%, com desvio médio de 5.75%.A baixa acuracia se deve, provavelmente, a quantidade baixa de amostras por classe. Demonstração de prediçãoAqui é feita uma demonstração e faz a previsão corretamente da amostra de vidro do tipo 2.
###Code
sample = glass.iloc[70].to_numpy()[1:-1].reshape(1, -1)
glass.iloc[70]
best_estimator.predict(sc_all.transform(sample))
###Output
_____no_output_____ |
List/python-List.ipynb | ###Markdown
Python - Lists An Introduction to lists :* Let us consider an example : Write a program to accept marks of a student in 5 subject find the average of it.
###Code
def main():
n1 = eval(input("Enter the marks of 1 subject : "))
n2 = eval(input("Enter the marks of 1 subject : "))
n3 = eval(input("Enter the marks of 1 subject : "))
n4 = eval(input("Enter the marks of 1 subject : "))
n5 = eval(input("Enter the marks of 1 subject : "))
avg = (n1+n2+n3+n4+n5) / 5
print("Average is : ",avg)
main()
###Output
Enter the marks of 1 subject : 90
Enter the marks of 1 subject : 89
Enter the marks of 1 subject : 76
Enter the marks of 1 subject : 68
Enter the marks of 1 subject : 94
Average is : 83.4
###Markdown
Suppose the number of values to average must increase from five to 25. If we use above program as a guide, twenty additional variables must be introduced, and the overall length of the program necessarily will grow. Averaging 1,000 numbers using this approach is impractical. But there is alternative approach for averaging numbers.
###Code
def main():
sum = 0.0;
for i in range(5):
num = eval(input("Enter the marks of " + str(i + 1)+" subject : "))
sum += num
print("Average is : ",sum / 5)
main()
###Output
Enter the marks of 1 subject : 90
Enter the marks of 2 subject : 89
Enter the marks of 3 subject : 76
Enter the marks of 4 subject : 68
Enter the marks of 5 subject : 94
Average is : 83.4
###Markdown
* In fact, the coding change to average 1,000 numbers is no more difficult.* However, unlike the original average program, this new version does not display the numbers entered. This is a significant difference; it may be necessary to retain all the values entered for various reasons:* All the values can be redisplayed after entry so the user can visually verify their correctness.* The values may need to be displayed in some creative way; for example, they may be placed in a graphical user interface component, like a visual grid (spreadsheet).* The values entered may need to be processed in a different way after they are all entered; for example, we may wish to display just the values entered above a certain value (like greater than zero), but the limit is not determined until after all the numbers are entered.In all the above situation we need to retain the values of all the variables for future use.To solve this python provides us a non-primitive datatype List. Using Lists* A list is collection of object.* A single list can hold intergers,floating point numbers,Strings and even functions.
###Code
list1 = [2,5,3,6,10]
print(list1)
list2 = ['I',"am",'a','python','developer']
print(list2)
###Output
[2, 5, 3, 6, 10]
['I', 'am', 'a', 'python', 'developer']
###Markdown
* We can access each element in the list by its index.Index starts from 0 from the left.
###Code
print(list1[0])
print(list1[1])
print(list1[4])
print()
print(list1[-1]) # prints the last element of the list
print(list1[-2]) # prints the second last elements of the list
###Output
2
5
10
10
6
###Markdown
Python-list is mutable.That is its value can be changed.
###Code
print(list1)
print("The value at position 0 is :" , list1[0])
print()
list1[0] = 44 # change the value at the position 0 to 44
print(list1)
print("The value at position 0 is :" , list1[0])
###Output
[2, 5, 3, 6, 10]
The value at position 0 is : 2
[44, 5, 3, 6, 10]
The value at position 0 is : 44
###Markdown
Let's write a program to reverse the elemnts in the list. def main() : lst = [3,1,4,6,23,45,73,33] for i in range(len(lst) - 1,-1,-1): print(lst[i])main() * len() function returns the number of elements in the list .* The range function accept start parameter as len(lst) - 1 which returns the number of elements of the list (here 7 ),stop parameter as -1 and step parameter as -1.So the value of i ranges from 7,6,5,4,3,2,1,0. Program to add each element by particular value.
###Code
lst = [3,43,12,56,34,23,1,3]
val = eval(input("Enter the value to be added "))
print("Before adding the value")
print(lst)
print()
for i in range(0,len(lst)):
lst[i] = lst[i] + val
print("after adding the value ",val)
print(lst)
###Output
Enter the value to be added 5
Before adding the value
[3, 43, 12, 56, 34, 23, 1, 3]
after adding the value 5
[8, 48, 17, 61, 39, 28, 6, 8]
###Markdown
Program to add n elements to a list of same pattern.
###Code
a = [0] * 3
print(a)
a = ['abc'] * 5
print(a)
a = [10,20,30] * 3
print(a)
###Output
[0, 0, 0]
['abc', 'abc', 'abc', 'abc', 'abc']
[10, 20, 30, 10, 20, 30, 10, 20, 30]
###Markdown
Now we can write a the program to calculate the average.
###Code
def main():
marks = [] # initialized a empty list
sum = 0.0
n = int(input("Enter the total number of subject "))
for i in range(0,n):
value = int(input("Enter the mark of " + str(i+1)+ " subject "))
## adding the element to the marks list .
## NB: we need to make the element in list inorder to add to
## the exisiting list .Otherqwise error occurs.
marks = marks + [value]
sum = sum + value
print("\nMark of the student is ",marks)
print("Average : ",sum / n)
main()
###Output
Enter the total number of subject 5
Enter the mark of 1 subject 89
Enter the mark of 2 subject 90
Enter the mark of 3 subject 67
Enter the mark of 4 subject 78
Enter the mark of 5 subject 65
Mark of the student is [89, 90, 67, 78, 65]
Average : 77.8
|
mini_projects/Tic Tac Toe Game my work.ipynb | ###Markdown
this Game has a small flaw don't choose filled places in the board ohterwise it will stop you can work to fix this!!!!
###Code
# Step One (1)
def player_indentification():
# Iam making this while loop in case of player hits a wrong input
while True:
player_id = input('you wnat to be player 1 or 2')
if player_id in ['1','2'] :
print( f'you are player {int(player_id)}')
print('you chooce X id')
return int(player_id)
else:
print('please choose carefuley!')
continue
# Step Two (2)
def player_markeer():
marker = input('you whant to be "X" or "O" : ')
return marker
# Step Three (3)
def player_position():
pos = input('enter your play: ')
return pos
# Step Four (4)
def player_move():
move = input('enter your move: ')
# Step Five (5)
def player_turn():
print('player 2 ')
player2move = input('enter your move: ')
if player2move in range(1,10):
pos
# Step Six (6)
def marker_choose():
mark = input('You wnat to go for "X" or "O" :')
return mark
# Step Seven (7)
from IPython.display import clear_output
def board_display(board):
clear_output()
print(board[7],'|',board[8],'|',board[9])
print('----------')
print(board[4],'|',board[5],'|',board[6])
print('----------')
print(board[1],'|',board[2],'|',board[3])
# Step Eight (8)
def choose_position():
while True:
pos = int(input('Enter your move position (form 1:9): '))
return pos
# Step Nine (9)
def position_check(board,i):
if board[i] == 'x' or board[i] =='o' :
print()
print('You choose full position try again')
return False
else:
pass
# Step Ten (10)
def winning_check(board):
for i in range(1,len(board)-3) :
# check for horizontal line winning condintion
if board[i] == 'x':
if board[i] == board[i+1] and board[i] == board[i+2]:
return True
else:
pass
# check for horizontal line winning condintion
if i in range(1,4):
if board[i] == board[i+3] and board[i] == board[i+6]:
return True
else:
pass
elif board[i] == 'o':
# check for horizontal line winning condintion
if board[i] == board[i+1] and board[i] == board[i+2]:
return True
else:
pass
# check for horizontal line winning condintion
if i in range(1,4):
if board[i] == board[i+3] and board[i] == board[i+6]:
return True
else:
pass
else:
pass
# Step Elevne(11)
def continue_playing():
ask = input('Want to continue playing "Y" or"N"')
return ask
from IPython.display import clear_output
def play_game():
# writing the complete Game Here!!
clear_output()
print('Welcom to Tic Tac Toe Game! My first Game Ever!!!!!!!!!!')
# in case user didn't choose x or o characters
while True:
markers = ['x','o']
player1 = player_markeer()
if player1 not in markers:
print('You must choose "X" or "O" ')
continue
else:
break
markers.remove(player1)
player2 = markers[0]
board = [' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ']
board_display(board)
play = True
while play:
p1 = True
# first player Turn
while p1:
print('Player 1 ',player1,' choose position')
pos = choose_position()
check = position_check(board,pos)
if check == False:
continue
else:
pass
board[pos] = player1
board_display(board)
# winning check for player one
win = winning_check(board)
if win == True:
print('congratulations!! you won')
decision = continue_playing()
if decision == 'y':
%rerun
elif decision == 'n':
play = False
break
else:
p1 = False
continue
# second player Turn
while not p1:
print('Player 2 ',player2,' choose position')
pos = choose_position()
check = position_check(board,pos)
if check == False:
continue
else:
pass
board[pos] = player2
board_display(board)
# winning check for player 2
win = winning_check(board)
if win == True:
print('congratulations!! you won')
decision = continue_playing()
if decision == 'y':
%rerun
break
elif decision == 'n':
play = False
break
else:
p1 = True
continue
play_game()
###Output
_____no_output_____ |
notebooks/n04_inertia.ipynb | ###Markdown
Introduction The next step is to provide some information about the mass and inertia of the bodies involved. Each of the three rigid bodies have both a mass which resists linear accelerations and inertia which resists rotational accelerations. In this notebook we will specify the mass of the three bodies, the inertia tensor/dyadic, and also create three `RigidBody` objects that hold all of the necessary information for each rigid body. Setup First, we will import the results from the previous notebook. Even if you didn't get everything correctly working, the following import statement will bring in the correct solution so you can move forward. We will do this in all of the subsquent notebooks.
###Code
from __future__ import print_function, division
from solution.kinematics import *
###Output
_____no_output_____
###Markdown
We will also need the function for easily generating inertial quantities and the `RigigBody` class so we can create some rigid bodies.
###Code
from sympy.physics.mechanics import inertia, RigidBody
###Output
_____no_output_____
###Markdown
We will need to specify some constants for the mass and inertia values.
###Code
from sympy import symbols
###Output
_____no_output_____
###Markdown
Once again, initalize SymPy printing so that we get nicely renderd symbols.
###Code
from sympy.physics.vector import init_vprinting
init_vprinting(use_latex='mathjax', pretty_print=False)
###Output
_____no_output_____
###Markdown
Mass The masses of each rigid body can be represented by constant values, so we create a symbol for each body.
###Code
lower_leg_mass, upper_leg_mass, torso_mass = symbols('m_L, m_U, m_T')
lower_leg_mass
upper_leg_mass
torso_mass
###Output
_____no_output_____
###Markdown
Inertia Since we are studying a 2D planar problem, we are only concerned about the rotational inertia about the $\hat{i}_z$ axis. We will assume that the rigid bodies are symmetric about the $XZ$ and $YZ$ planes, so we only need a single variable for each rigid body to specify the rotation inertia.
###Code
lower_leg_inertia, upper_leg_inertia, torso_inertia = symbols('I_Lz, I_Uz, I_Tz')
###Output
_____no_output_____
###Markdown
The `inertia()` function is a convenience function for creating inertia dyadics (i.e. basis dependent tensors). You specify a reference frame to define the inertia with respect to and at a minimum for symmetric bodies provide the diagonal entries of the inertia tensor. In our case the rotational inertia about the $x$ and $y$ are not neeed so they are set to zero and $z$ inertia entry is set to the defined variable.
###Code
lower_leg_inertia_dyadic = inertia(lower_leg_frame, 0, 0, lower_leg_inertia)
lower_leg_inertia_dyadic
###Output
_____no_output_____
###Markdown
In general, we store the inertia as dyadics, i.e. basis dependent tensors. If you want to see what the inertia is expressed in a particular frame, use the `to_matrix()` method.
###Code
lower_leg_inertia_dyadic.to_matrix(lower_leg_frame)
###Output
_____no_output_____
###Markdown
We will also eventually need to know what point the inertia is defined with respect to. In our case, we will simply define all inertia's about the mass center. We can store the total information needed by PyDy in a tuple of an inertia `Dyadic` and a `Point`.
###Code
lower_leg_central_inertia = (lower_leg_inertia_dyadic, lower_leg_mass_center)
###Output
_____no_output_____
###Markdown
The upper leg and torso inertias are found in the same fashion.
###Code
upper_leg_inertia_dyadic = inertia(upper_leg_frame, 0, 0, upper_leg_inertia)
upper_leg_inertia_dyadic.to_matrix(upper_leg_frame)
upper_leg_central_inertia = (upper_leg_inertia_dyadic, upper_leg_mass_center)
###Output
_____no_output_____
###Markdown
Exercise Create a tuple of an inertia `Dyadic` and `Point` for the torso.
###Code
torso_inertia_dyadic =
torso_central_inertia =
%load exercise_solutions/n04_inertia_inertia-dyadic.py
###Output
_____no_output_____
###Markdown
Rigid Bodies To completely define a rigid body, the mass center point, the reference frame, the mass, and the inertia defined about a point must be specified.
###Code
lower_leg = RigidBody('Lower Leg', lower_leg_mass_center, lower_leg_frame,
lower_leg_mass, lower_leg_central_inertia)
###Output
_____no_output_____
###Markdown
Exercise Create RigidBody objects for the upper leg and torso
###Code
upper_leg =
torso =
%load exercise_solutions/n04_inertia_define-rigid-body.py
###Output
_____no_output_____ |
benchmarking/Final_Camel_Batch_Mango.ipynb | ###Markdown
Example of optimizing a convex function Goal is to test the objective values found by Mango- Search space size: Uniform- Number of iterations to try: 40- domain size: 5000 - Initial Random: 5 Benchmarking test with different iterations for serial executions
###Code
from mango.tuner import Tuner
from scipy.stats import uniform
import math
def get_param_dict():
param_dict = {
'a': uniform(-2, 4),
'b': uniform(-2, 4)
}
return param_dict
def get_objective(x,y):
x2 = math.pow(x,2)
x4 = math.pow(x,4)
y2 = math.pow(y,2)
return ((4.0 - 2.1 * x2 + (x4 / 3.0)) * x2 + x*y + (-4.0 + 4.0 * y2) * y2)
def objfunc(args_list):
results = []
for hyper_par in args_list:
a = hyper_par['a']
b = hyper_par['b']
result = -1.0*get_objective(a,b)
results.append(result)
return results
def get_conf():
conf = dict()
conf['batch_size'] = 5
conf['initial_random'] = 5
conf['num_iteration'] = 100
conf['domain_size'] = 1000
return conf
def get_optimal_x():
param_dict = get_param_dict()
conf = get_conf()
tuner = Tuner(param_dict, objfunc,conf)
results = tuner.maximize()
return results
Store_Optimal_X = []
Store_Results = []
num_of_tries = 20
for i in range(num_of_tries):
results = get_optimal_x()
Store_Results.append(results)
print(i,":",results['best_objective'])
#results['best_objective']
#len(Store_Results[0]['objective_values'])
#Store_Results[0]['objective_values'][:15]
#len(Store_Results[0]['params_tried'])
###Output
_____no_output_____
###Markdown
Extract from the results returned the true optimal values for each iteration
###Code
import numpy as np
total_experiments = 20
initial_random = 5
plotting_itr =[10, 20,30,40,50,60,70,80,90,100]
plotting_list = []
for exp in range(total_experiments): #for all exp
local_list = []
for itr in plotting_itr: # for all points to plot
# find the value of optimal parameters in itr+ initial_random
max_value = np.array(Store_Results[exp]['objective_values'][:itr*5+initial_random]).max()
local_list.append(max_value)
plotting_list.append(local_list)
plotting_array = np.array(plotting_list)
plotting_array.shape
#plotting_array
Y = []
#count range between -1 and 1 and show it
for i in range(len(plotting_itr)):
y_value = plotting_array[:,i].mean()
Y.append(y_value)
Y
import numpy as np
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(10,10))
plt.plot(plotting_itr,Y,label = 'Mango(Batch=5)',linewidth=4.0) #x, y
plt.xlabel('Number of Iterations',fontsize=25)
plt.ylabel('Mean optimal achieved',fontsize=25)
#plt.title('Variation of Optimal Value of X with iterations',fontsize=20)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
#plt.yticks(np.arange(10, 110, step=10))
#plt.xticks(np.arange(10, 110, step=10))
plt.grid(True)
plt.legend(fontsize=20)
plt.show()
###Output
_____no_output_____ |
farmgate_prices/Untitled.ipynb | ###Markdown
This is where you set color sceme to color blindness friendly
###Code
import matplotlib.pyplot as plt
import pandas as pd
# Read the data into a pandas DataFrame.
gender_degree_data = pd.read_csv("http://www.randalolson.com/wp-content/uploads/percent-bachelors-degrees-women-usa.csv")
# These are the "Tableau 20" colors as RGB.
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts.
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
# You typically want your plot to be ~1.33x wider than tall. This plot is a rare
# exception because of the number of lines being plotted on it.
# Common sizes: (10, 7.5) and (12, 9)
plt.figure(figsize=(12, 14))
# Remove the plot frame lines. They are unnecessary chartjunk.
ax = plt.subplot(111)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
# Ensure that the axis ticks only show up on the bottom and left of the plot.
# Ticks on the right and top of the plot are generally unnecessary chartjunk.
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
# Limit the range of the plot to only where the data is.
# Avoid unnecessary whitespace.
plt.ylim(0, 90)
plt.xlim(1968, 2014)
# Make sure your axis ticks are large enough to be easily read.
# You don't want your viewers squinting to read your plot.
plt.yticks(range(0, 91, 10), [str(x) + "%" for x in range(0, 91, 10)], fontsize=14)
plt.xticks(fontsize=14)
# Provide tick lines across the plot to help your viewers trace along
# the axis ticks. Make sure that the lines are light and small so they
# don't obscure the primary data lines.
for y in range(10, 91, 10):
plt.plot(range(1968, 2012), [y] * len(range(1968, 2012)), "--", lw=0.5, color="black", alpha=0.3)
# # Remove the tick marks; they are unnecessary with the tick lines we just plotted.
# plt.tick_params(axis="both", which="both", bottom="off", top="off",
# labelbottom="on", left="off", right="off", labelleft="on")
# Now that the plot is prepared, it's time to actually plot the data!
# Note that I plotted the majors in order of the highest % in the final year.
majors = ['Health Professions', 'Public Administration', 'Education', 'Psychology',
'Foreign Languages', 'English', 'Communications\nand Journalism',
'Art and Performance', 'Biology', 'Agriculture',
'Social Sciences and History', 'Business', 'Math and Statistics',
'Architecture', 'Physical Sciences', 'Computer Science',
'Engineering']
for rank, column in enumerate(majors):
# Plot each line separately with its own color, using the Tableau 20
# color set in order.
plt.plot(gender_degree_data.Year.values,
gender_degree_data[column.replace("\n", " ")].values,
lw=2.5, color=tableau20[rank])
# Add a text label to the right end of every line. Most of the code below
# is adding specific offsets y position because some labels overlapped.
y_pos = gender_degree_data[column.replace("\n", " ")].values[-1] - 0.5
if column == "Foreign Languages":
y_pos += 0.5
elif column == "English":
y_pos -= 0.5
elif column == "Communications\nand Journalism":
y_pos += 0.75
elif column == "Art and Performance":
y_pos -= 0.25
elif column == "Agriculture":
y_pos += 1.25
elif column == "Social Sciences and History":
y_pos += 0.25
elif column == "Business":
y_pos -= 0.75
elif column == "Math and Statistics":
y_pos += 0.75
elif column == "Architecture":
y_pos -= 0.75
elif column == "Computer Science":
y_pos += 0.75
elif column == "Engineering":
y_pos -= 0.25
# # Again, make sure that all labels are large enough to be easily read
# # by the viewer.
plt.text(2011.5, y_pos, column, fontsize=14, color=tableau20[rank])
# # matplotlib's title() call centers the title on the plot, but not the graph,
# # so I used the text() call to customize where the title goes.
# # Make the title big enough so it spans the entire plot, but don't make it
# # so big that it requires two lines to show.
# # Note that if the title is descriptive enough, it is unnecessary to include
# # axis labels; they are self-evident, in this plot's case.
# plt.text(1995, 93, "Percentage of Bachelor's degrees conferred to women in the U.S.A."
# ", by major (1970-2012)", fontsize=17, ha="center")
# # Always include your data source(s) and copyright notice! And for your
# # data sources, tell your viewers exactly where the data came from,
# # preferably with a direct link to the data. Just telling your viewers
# # that you used data from the "U.S. Census Bureau" is completely useless:
# # the U.S. Census Bureau provides all kinds of data, so how are your
# # viewers supposed to know which data set you used?
# plt.text(1966, -8, "Data source: nces.ed.gov/programs/digest/2013menu_tables.asp"
# "\nAuthor: Randy Olson (randalolson.com / @randal_olson)"
# "\nNote: Some majors are missing because the historical data "
# "is not available for them", fontsize=10)
# # Finally, save the figure as a PNG.
# # You can also save it as a PDF, JPEG, etc.
# # Just change the file extension in this call.
bbox_inches="tight" removes all the extra whitespace on the edges of your plot.
plt.savefig("percent-bachelors-degrees-women-usa.png", bbox_inches="tight")
gender_degree_data.Year
# You typically want your plot to be ~1.33x wider than tall. This plot is a rare
# exception because of the number of lines being plotted on it.
# Common sizes: (10, 7.5) and (12, 9)
plt.figure(figsize=(12, 14))
# Remove the plot frame lines. They are unnecessary chartjunk.
ax = plt.subplot(111)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
# Ensure that the axis ticks only show up on the bottom and left of the plot.
# Ticks on the right and top of the plot are generally unnecessary chartjunk.
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
# Limit the range of the plot to only where the data is.
# Avoid unnecessary whitespace.
plt.ylim(0, 90)
plt.xlim(1968, 2014)
# Make sure your axis ticks are large enough to be easily read.
# You don't want your viewers squinting to read your plot.
plt.yticks(range(0, 91, 10), [str(x) + "%" for x in range(0, 91, 10)], fontsize=14)
plt.xticks(fontsize=14)
# Provide tick lines across the plot to help your viewers trace along
# the axis ticks. Make sure that the lines are light and small so they
# don't obscure the primary data lines.
for y in range(10, 91, 10):
plt.plot(range(1968, 2012), [y] * len(range(1968, 2012)), "--", lw=0.5, color="black", alpha=0.3)
# Remove the tick marks; they are unnecessary with the tick lines we just plotted.
plt.tick_params(axis="both", which="both", bottom="off", top="off",
labelbottom="on", left="off", right="off", labelleft="on")
plt.figure(figsize=(12, 14))
# Now that the plot is prepared, it's time to actually plot the data!
# Note that I plotted the majors in order of the highest % in the final year.
majors = ['Health Professions', 'Public Administration', 'Education', 'Psychology',
'Foreign Languages', 'English', 'Communications\nand Journalism',
'Art and Performance', 'Biology', 'Agriculture',
'Social Sciences and History', 'Business', 'Math and Statistics',
'Architecture', 'Physical Sciences', 'Computer Science',
'Engineering']
for rank, column in enumerate(majors):
# Plot each line separately with its own color, using the Tableau 20
# color set in order.
plt.plot(gender_degree_data.Year.values,
gender_degree_data[j.replace("\n", " ")].values,
lw=2.5, color=tableau20[rank])
# Add a text label to the right end of every line. Most of the code below
# is adding specific offsets y position because some labels overlapped.
y_pos = gender_degree_data[column.replace("\n", " ")].values[-1] - 0.5
if column == "Foreign Languages":
y_pos += 0.5
elif column == "English":
y_pos -= 0.5
elif column == "Communications\nand Journalism":
y_pos += 0.75
elif column == "Art and Performance":
y_pos -= 0.25
elif column == "Agriculture":
y_pos += 1.25
elif column == "Social Sciences and History":
y_pos += 0.25
elif column == "Business":
y_pos -= 0.75
elif column == "Math and Statistics":
y_pos += 0.75
elif column == "Architecture":
y_pos -= 0.75
elif column == "Computer Science":
y_pos += 0.75
elif column == "Engineering":
y_pos -= 0.25
# Again, make sure that all labels are large enough to be easily read
# by the viewer.
plt.text(2011.5, y_pos, column, fontsize=14, color=tableau20[rank])
# matplotlib's title() call centers the title on the plot, but not the graph,
# so I used the text() call to customize where the title goes.
# Make the title big enough so it spans the entire plot, but don't make it
# so big that it requires two lines to show.
# Note that if the title is descriptive enough, it is unnecessary to include
# axis labels; they are self-evident, in this plot's case.
plt.text(1995, 93, "Percentage of Bachelor's degrees conferred to women in the U.S.A."
", by major (1970-2012)", fontsize=17, ha="center")
# Always include your data source(s) and copyright notice! And for your
# data sources, tell your viewers exactly where the data came from,
# preferably with a direct link to the data. Just telling your viewers
# that you used data from the "U.S. Census Bureau" is completely useless:
# the U.S. Census Bureau provides all kinds of data, so how are your
# viewers supposed to know which data set you used?
plt.text(1966, -8, "Data source: nces.ed.gov/programs/digest/2013menu_tables.asp"
"\nAuthor: Randy Olson (randalolson.com / @randal_olson)"
"\nNote: Some majors are missing because the historical data "
"is not available for them", fontsize=10)
# Finally, save the figure as a PNG.
# You can also save it as a PDF, JPEG, etc.
# Just change the file extension in this call.
# bbox_inches="tight" removes all the extra whitespace on the edges of your plot.
plt.savefig("percent-bachelors-degrees-women-usa.png", bbox_inches="tight")
gender_degree_data.Year.values
###Output
_____no_output_____ |
SparkETLDemoPython.ipynb | ###Markdown
Spark ETL Demo PythonThis demo written in Python for Watson Data Studio illustrates the use of a Spark cluster to perform ETL. It imports data in flat files into Spark DataFrames, manipulates the data, aggregates it and then writes the result out to a relational database. The advantage of using Spark for this is scalability (by using a larger cluster one can achieve close to linear scalability) and simplified error recovery (a failed attempt at running this ETL job can be repeated at any stage and the final result will be the same). Step 1 Read in the source dataWe read two CSV files. One has statistics about Social Security payments for the state of Texas by zipcode and the other maps US zipcodes to US counties so we can aggregate the Social Security data by county rather than zipcode. Grab the input data files from Github and stick them in in gpfs using wget
###Code
# Install wget if you don't already have it.
!pip install wget
import wget
link_to_ssdata = 'https://raw.githubusercontent.com/djccarew/sparketldemo/master/data/oasdi-tx-clean.csv'
link_to_zipdata = 'https://raw.githubusercontent.com/djccarew/sparketldemo/master/data/zip_codes_states.csv'
social_security_data_file = wget.download(link_to_ssdata)
print(social_security_data_file)
zipcode_data_file = wget.download(link_to_zipdata)
print(zipcode_data_file)
###Output
_____no_output_____
###Markdown
Read in the Social Security data file into a DataFrame using a schema. Note the schema can be inferred but the inferred schema typically converts various numeric types to string so it's better to specify the schema so you know what you end up with
###Code
from pyspark.sql.types import StructType, StructField, IntegerType, StringType, DoubleType
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
ssdata_schema = StructType([
StructField("Zip", StringType(), False),
StructField("NumTotal", IntegerType(), False),
StructField("NumRetired", IntegerType(), False),
StructField("NumDisabled", IntegerType(), False),
StructField("NumWidowerOrParent", IntegerType(), False),
StructField("NumSpouses", IntegerType(), False),
StructField("NumChildren", IntegerType(), False),
StructField("BenTotal", IntegerType(), False),
StructField("BenRetired", IntegerType(), False),
StructField("BenWidowerOrParent", IntegerType(), False),
StructField("NumSeniors", IntegerType(), False)])
df_ssdata_raw = spark.read\
.format('org.apache.spark.sql.execution.datasources.csv.CSVFileFormat')\
.option('header', 'true')\
.load(social_security_data_file, schema=ssdata_schema)
df_ssdata_raw.printSchema()
###Output
_____no_output_____
###Markdown
Repeat for zipcode data file
###Code
zipdata_schema = StructType([
StructField("Zip", StringType(), False),
StructField("Latitude", DoubleType(), False),
StructField("Longitude", DoubleType(), False),
StructField("City", StringType(), False),
StructField("State", StringType(), False),
StructField("County", StringType(), False)])
df_zipdata_raw = spark.read\
.format('org.apache.spark.sql.execution.datasources.csv.CSVFileFormat')\
.option('header', 'true')\
.load(zipcode_data_file, schema=zipdata_schema)
df_zipdata_raw.printSchema()
###Output
_____no_output_____
###Markdown
Step 2 Transform raw source data Only need County name and zip code columns for this demo so we don't use the other columns in the zipcode data
###Code
df_counties = df_zipdata_raw.select('Zip','County')
df_counties.printSchema()
###Output
_____no_output_____
###Markdown
Join Social Security data with zipcode data to add a County column to Social Security data
###Code
df_ssdata_counties = df_ssdata_raw.join(df_counties, "Zip")
df_ssdata_counties.printSchema()
###Output
_____no_output_____
###Markdown
Don't need the zipcode column anymore since we'll be aggregating by County instead
###Code
df_ssdata_counties = df_ssdata_counties.drop("Zip")
df_ssdata_counties.printSchema()
###Output
_____no_output_____
###Markdown
Create a temp view so we can do the "by county" aggregation via SQL rather than using the Spark SQL DataFrame API. (Doing it via SQL is usually easier)
###Code
df_ssdata_counties.createOrReplaceTempView("aggregated_by_county")
###Output
_____no_output_____
###Markdown
Spark SQL query to aggregate Social Security data by county and sort by county name
###Code
df_ssdata_data_by_county = spark.sql("select County, sum(NumTotal) as NumTotal, sum(NumRetired) as NumRetired, sum(NumDisabled) as NumDisabled, sum(NumWidowerOrParent) as NumWidowerOrParent, sum(NumSpouses) as NumSpouses, sum(NumChildren) as NumChildren, sum(BenTotal) as BenTotal, sum(BenRetired) as BenRetired, sum(BenWidowerOrParent) as BenWidowerOrParent, sum(NumSeniors) as NumSeniors from aggregated_by_county group by County order by County")
df_ssdata_data_by_county.take(5)
###Output
_____no_output_____
###Markdown
Step 3 Write modified data to target database We use the jdbc method of the DataFrameWriter to write the modified data to the target db. Appropriate credentials for the target db need to be set up first. Modify the code below with the approparaite values for your database
###Code
jdbc_url = 'your-jdbc-url'
dest_table = 'your-table-name'
jdbc_properties = {
'driver': 'com.ibm.db2.jcc.DB2Driver',
'user': 'your-db-user',
'password': 'your-db-password'
}
df_ssdata_data_by_county.write.jdbc(jdbc_url, table=dest_table, mode='overwrite', properties=jdbc_properties)
###Output
_____no_output_____ |
2021Q1_DSF/6.- Spark ML/notebooks/04_regression.ipynb | ###Markdown
Spark ML Problema de RegresiónEn este notebook abordaremos el problema de Machine Learning Supervisado de regresión. Trabajaremos con el dataset Boston Housing que contiene información sobre las diferentes características de casas en la ciudad de Boston. Utilizaremos como variable objetivo el precio de las casas. Accederemos a este a través de la librería de ML de Python scikit-learn. Ajustaremos distintos modelos y compararemos los resultados obtenidos en éstos. Crear SparkSessionNota: en Datio no es necesario crear la sesión de Spark ya al iniciar un notebook con el kernel PySpark Python3 - Spark 2.1.0 se crea automáticamente.
###Code
# Respuesta aqui
###Output
_____no_output_____
###Markdown
Cargamos los datos en un DataFrame de SparkCargamos los datos de scikit-learn y los consolidamos en un DataFrame de Spark.
###Code
# Respuesta aqui
###Output
_____no_output_____
###Markdown
Vemos el schema.
###Code
# Respuesta aqui
###Output
_____no_output_____
###Markdown
Podemos ver la descripción de las variables del objeto cargado de scikit-learn.
###Code
# Respuesta aqui
###Output
_____no_output_____
###Markdown
Pasos previos Vector AssemblerPara ajustar un modelo en Spark necesitamos indicar qué variables se van a utilizar como variables independientes. A través del parámetro _featuresCol_ de los distintos algoritmos, se le indica la variable que contiene la salida del VectorAssembler con las variables independientes. Hacemos el VectorAssembler con todas las variables con excepcion del target.
###Code
# Respuesta aqui
###Output
_____no_output_____
###Markdown
División train/testRealizamos la division train/test (o train/validation) para medir el desempeño del modelo tras el ajuste.
###Code
# Respuesta aqui
###Output
_____no_output_____
###Markdown
Regresión LinealModelo matemático usado para aproximar la relación de dependencia entre una variable dependiente $Y$, las variables independientes $X_i$ y un término aleatorio $ε$. Se expresa a traves de la siguiente ecuación:$$Y = \beta_0+\beta_1*X_1+\beta_2*X_2+...+\beta_p*X_p+ε$$donde - $Y$ es la variable dependiente, explicada o target,- $X_i$ son las variables explicativas, independientes o regresoras,- $\beta_i$ son los parametros que miden la influencia que tienen las variables explicativas sobre el target,para todo $0<=i<=p$.Ajustamos un modelo de regresión y sacamos los valores reales y las predicciones.
###Code
# Respuesta aqui
###Output
_____no_output_____
###Markdown
Arbol de DecisiónModelo predictivo que se construye ejecutando una partición binaria recursiva de los datos identificando las variables y los puntos de corte de las mismas que mejor determinan el valor de la variable objetivo. En el entrenamiento son importantes los parámetros: medida de impureza y criterio de parada (normalmente profundidad).Ajustamos un árbol de decisión para nuestro conjunto de datos y sacamos los valores reales y los predichos.
###Code
# Respuesta aqui
###Output
_____no_output_____
###Markdown
Random ForestModelo predictivo basado en árboles de decisión. Construye varios árboles tomando muestras del conjunto de datos (boosting) y muestras del conjunto de variables. Realiza la predicción para cada nuevo registro pasandolo por cada uno de los árboles y promediando los resultados obtenidos.Ajustamos un random forest y obtenemos los valores reales y los predichos.
###Code
# Respuesta aqui
###Output
_____no_output_____
###Markdown
Evaluación de modelos Para comparar el desempeño de los modelos ajustados y poder seleccionar el mejor de ellos disponemos de diversas métricas. Algunas de ellas son:- Error cuadrático medio (MSE)- Raíz del error cuadrático medio (RMSE)- R cuadrado (R²)- Error absoluto medio (MAE)Como ejemplo obtenemos el RMSE y el MAE de los modelos ajustados.
###Code
# Respuesta aqui
###Output
_____no_output_____
###Markdown
Imprimir métricas para los distintos modelos
###Code
# Respuesta aqui
###Output
_____no_output_____
###Markdown
Observando los valores obtenidos en las métricas de evaluación de modelos decidiríamos quedarnos con el random forest. Podemos crear una función que tenga como parámetros de entrada el dataframe transformado, la columna de predicción, y el target, y devuelva un diccionario con todas las métricas disponibles y sus respectivos valores.
###Code
def calculate_metrics(dataset, predictionCol='prediction', labelCol='MEDV'):
metrics = RegressionEvaluator(predictionCol=predictionCol, labelCol=labelCol)
rmse = metrics.evaluate(dataset, {metrics.metricName: "rmse"})
mae = metrics.evaluate(dataset, {metrics.metricName: "mae"})
mse = metrics.evaluate(dataset, {metrics.metricName: "mse"})
r2 = metrics.evaluate(dataset, {metrics.metricName: "r2"})
return {'rmse': rmse, 'mae':mae, 'mse': mse, 'r2':r2}
calculate_metrics(boston_test_linear_regression)
calculate_metrics(boston_test_decision_tree_regression)
calculate_metrics(boston_test_random_forest)
###Output
_____no_output_____ |
Vhanilla_RNN/.ipynb_checkpoints/RNN-checkpoint.ipynb | ###Markdown
VANILLA RNN ON 8*8 MNIST DATASET TO PREDICT TEN CLASS Its a dynamic sequence and batch vhanilla rnn. This is created with tensorflow scan and map higher ops!!!! This is a base rnn which can be used to create GRU, LSTM, Neural Stack Machine, Neural Turing Machine and RNN-EM and so on! Importing Libraries
###Code
import numpy as np
import tensorflow as tf
from sklearn.datasets import load_digits
from sklearn.cross_validation import train_test_split
import pylab as pl
from IPython import display
import sys
%matplotlib inline
###Output
/usr/local/lib/python3.5/dist-packages/sklearn/cross_validation.py:44: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. Also note that the interface of the new CV iterators are different from that of this module. This module will be removed in 0.20.
"This module will be removed in 0.20.", DeprecationWarning)
###Markdown
Vhanilla RNN class and functions
###Code
class RNN_cell(object):
"""
RNN cell object which takes 3 arguments for initialization.
input_size = Input Vector size
hidden_layer_size = Hidden layer size
target_size = Output vector size
"""
def __init__(self, input_size, hidden_layer_size, target_size):
# Initialization of given values
self.input_size = input_size
self.hidden_layer_size = hidden_layer_size
self.target_size = target_size
# Weights and Bias for input and hidden tensor
self.Wx = tf.Variable(tf.zeros(
[self.input_size, self.hidden_layer_size]))
self.Wh = tf.Variable(tf.zeros(
[self.hidden_layer_size, self.hidden_layer_size]))
self.bi = tf.Variable(tf.zeros([self.hidden_layer_size]))
# Weights for output layers
self.Wo = tf.Variable(tf.truncated_normal(
[self.hidden_layer_size, self.target_size],mean=0,stddev=.01))
self.bo = tf.Variable(tf.truncated_normal([self.target_size],mean=0,stddev=.01))
# Placeholder for input vector with shape[batch, seq, embeddings]
self._inputs = tf.placeholder(tf.float32,
shape=[None, None, self.input_size],
name='inputs')
# Processing inputs to work with scan function
self.processed_input = process_batch_input_for_RNN(self._inputs)
'''
Initial hidden state's shape is [1,self.hidden_layer_size]
In First time stamp, we are doing dot product with weights to
get the shape of [batch_size, self.hidden_layer_size].
For this dot product tensorflow use broadcasting. But during
Back propagation a low level error occurs.
So to solve the problem it was needed to initialize initial
hiddden state of size [batch_size, self.hidden_layer_size].
So here is a little hack !!!! Getting the same shaped
initial hidden state of zeros.
'''
self.initial_hidden = self._inputs[:, 0, :]
self.initial_hidden = tf.matmul(
self.initial_hidden, tf.zeros([input_size, hidden_layer_size]))
# Function for vhanilla RNN.
def vanilla_rnn(self, previous_hidden_state, x):
"""
This function takes previous hidden state and input and
outputs current hidden state.
"""
current_hidden_state = tf.tanh(
tf.matmul(previous_hidden_state, self.Wh) +
tf.matmul(x, self.Wx) + self.bi)
return current_hidden_state
# Function for getting all hidden state.
def get_states(self):
"""
Iterates through time/ sequence to get all hidden state
"""
# Getting all hidden state throuh time
all_hidden_states = tf.scan(self.vanilla_rnn,
self.processed_input,
initializer=self.initial_hidden,
name='states')
return all_hidden_states
# Function to get output from a hidden layer
def get_output(self, hidden_state):
"""
This function takes hidden state and returns output
"""
output = tf.nn.relu(tf.matmul(hidden_state, self.Wo) + self.bo)
return output
# Function for getting all output layers
def get_outputs(self):
"""
Iterating through hidden states to get outputs for all timestamp
"""
all_hidden_states = self.get_states()
all_outputs = tf.map_fn(self.get_output, all_hidden_states)
return all_outputs
# Function to convert batch input data to use scan ops of tensorflow.
def process_batch_input_for_RNN(batch_input):
"""
Process tensor of size [5,3,2] to [3,5,2]
"""
batch_input_ = tf.transpose(batch_input, perm=[2, 0, 1])
X = tf.transpose(batch_input_)
return X
###Output
_____no_output_____
###Markdown
Placeholder and initializers
###Code
hidden_layer_size = 110
input_size = 8
target_size = 10
y = tf.placeholder(tf.float32, shape=[None, target_size],name='inputs')
###Output
_____no_output_____
###Markdown
Models
###Code
#Initializing rnn object
rnn=RNN_cell( input_size, hidden_layer_size, target_size)
#Getting all outputs from rnn
outputs = rnn.get_outputs()
#Getting final output through indexing after reversing
last_output = outputs[-1]
#As rnn model output the final layer through Relu activation softmax is used for final output.
output=tf.nn.softmax(last_output)
#Computing the Cross Entropy loss
cross_entropy = -tf.reduce_sum(y * tf.log(output))
# Trainning with Adadelta Optimizer
train_step = tf.train.AdamOptimizer().minimize(cross_entropy)
#Calculatio of correct prediction and accuracy
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(output,1))
accuracy = (tf.reduce_mean(tf.cast(correct_prediction, tf.float32)))*100
###Output
_____no_output_____
###Markdown
Dataset Preparation
###Code
sess=tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
#Using Sklearn MNIST dataset.
digits = load_digits()
X=digits.images
Y_=digits.target
# One hot encoding
Y = sess.run(tf.one_hot(indices=Y_, depth=target_size))
#Getting Train and test Dataset
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.22, random_state=42)
#Cuttting for simple iteration
X_train=X_train[:1400]
y_train=y_train[:1400]
#Iterations to do trainning
for epoch in range(120):
start=0
end=100
for i in range(14):
X=X_train[start:end]
Y=y_train[start:end]
start=end
end=start+100
sess.run(train_step,feed_dict={rnn._inputs:X, y:Y})
Loss=str(sess.run(cross_entropy,feed_dict={rnn._inputs:X, y:Y}))
Train_accuracy=str(sess.run(accuracy,feed_dict={rnn._inputs:X_train, y:y_train}))
Test_accuracy=str(sess.run(accuracy,feed_dict={rnn._inputs:X_test, y:y_test}))
pl.plot([epoch],Loss,'b.',)
pl.plot([epoch],Train_accuracy,'r*',)
pl.plot([epoch],Test_accuracy,'g+')
display.clear_output(wait=True)
display.display(pl.gcf())
sys.stdout.flush()
print("\rIteration: %s Loss: %s Train Accuracy: %s Test Accuracy: %s"%(epoch,Loss,Train_accuracy,Test_accuracy)),
sys.stdout.flush()
###Output
_____no_output_____ |
Experiments/Data_Pipeline_with_TensorFlow.ipynb | ###Markdown
###Code
!nvidia-smi
###Output
NVIDIA-SMI has failed because it couldn't communicate with the NVIDIA driver. Make sure that the latest NVIDIA driver is installed and running.
###Markdown
Learning how to build data pipelines with `tf.data`The `tf.data` help us to build complex input pipelines from single, resuable pieces. For example the pipeline, - for an image model might aggregate data from files in a distributed file system and apply random perturbations to each image, and merge randomly selected images into a batch for training. - can be even used for text model might involve extracting symbolds from raw text data, converting them to embedding idenitifiers with a lookup table and batching together sequences of different lengths. The `tf.data` API makes it possible to handle large amounts of data, read from different data formats, and perform complex transformations.The `tf.data` API introduces a `tf.data.Dataset` abstraction that represents a sequence of elements, in which each element consists of one or more components. For example, in an image pipeline, an element might be a single training example, with a **pair of tensor components representing the image and it's label.****The two distinct ways to create a dataset**: - A data **source** constructs a `Dataset` from data stored in memory or in one or more files. - A data **transformation** constructs a dataset from one or more `tf.data.Dataset`. Basic Mechanics - To create an input pipeline, we must start with a data source. - (Other files) For example, to construct a `Dataset` from data in memory (folders etc..) we can use `tf.data.ataset.from_tensors()` or `tf.data.Dataset.from_tensor_slices()`. - (TFRecord file) If the input data is stored in a TFRecord format, we can then use `tf.data.TFRecordDataset()`> The `Dataset` object is a Python iterable (we can loop through).
###Code
# Importing the things we need
import tensorflow as tf
import pathlib
import os
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# Creating a dummy data and using tf.data.Dataset.from_tensor_slices()
dum_list = [8 , 3, 0 , 8 , 2 , 1]
dataset = tf.data.Dataset.from_tensor_slices(dum_list)
dataset
# Iterating a looking what's inside the dataset we created
for elem in dataset:
print(elem.numpy())
# Trying out a synthetic data
(train_data , train_labels) , (test_data , test_labels) = tf.keras.datasets.mnist.load_data()
# Printing out the shapes of our mnist dataset
train_data.shape , train_labels.shape , test_data.shape , test_labels.shape
###Output
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz
11493376/11490434 [==============================] - 0s 0us/step
###Markdown
Loading our data usig `tf.data` and create a TensorSliceDataset object for our train data
###Code
# Turning our train data into TensorSliceDataset object
train_dataset_slices = tf.data.Dataset.from_tensor_slices((train_data , train_labels))
train_dataset_slices
###Output
_____no_output_____
###Markdown
Cool! Now we have packed our train images and labels into a one whole Dataset. To view the labels https://stackoverflow.com/questions/64132847/how-to-iterate-over-tensorslicedataset-object-in-tensorflow
###Code
train_dataset_slices.element_spec
###Output
_____no_output_____
###Markdown
Let's try the same for but this time with `tf.data.Dataset.from_tensors()`
###Code
# Using tf.data.Dataset_from_tensors()
train_data_tensors = tf.data.Dataset.from_tensors((train_data , train_labels))
train_data_tensors
# Looking into our dataset
train_data_tensors.element_spec
train_data_tensors.list_files
###Output
_____no_output_____
###Markdown
Using the `tf.data.Dataset.from_generator()` now, this well help us to create a Dataset object from a datagenerator object. Useful links- [Converting ImageDatasetGenerator into dataset object](https://stackoverflow.com/questions/54606302/tf-data-dataset-from-tf-keras-preprocessing-image-imagedatagenerator-flow-from-d)- [How to use during fit function]( https://stackoverflow.com/questions/52636127/how-to-use-keras-generator-with-tf-data-api)
###Code
# Loading in the cats and dogs dataset
# data's url
_URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip'
# Extracting from the path
path_to_zip = tf.keras.utils.get_file('cats_and_dogs.zip' , origin = _URL , extract = True)
PATH = os.path.join(os.path.dirname(path_to_zip) , 'cats_and_dogs_filtered')
# What's inside PATH?
os.listdir(PATH)
# Now setting up our train and validation directory (for images)
train_dir = os.path.join(PATH , 'train')
valid_dir = os.path.join(PATH , 'validation')
# What's inside our train_dir
os.listdir(train_dir)
# Looking intos cats folder
os.listdir(f'{train_dir}/cats')[:10]
# Using ImageDataGenerator
train_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale = 1/255.)
# Getting the images from our directory
train_gen = train_datagen.flow_from_directory(train_dir)
# For Validation
valid_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale = 1/255.)
valid_gen = valid_datagen.flow_from_directory(valid_dir)
images, labels = next(train_gen)
# Checking their shapes
images.shape , labels.shape , images.dtype , labels.dtype
train_gen.labels
# Gotta inspect our train_gen and collect some info that may help us in converting to Dataset object
print(f'Target size of images: {train_gen.target_size}')
print(f'Number of classes: {train_gen.num_classes}')
print(f'Getting the class indices: {train_gen.class_indices}')
###Output
Target size of images: (256, 256)
Number of classes: 2
Getting the class indices: {'cats': 0, 'dogs': 1}
###Markdown
Alright! Now is the big game of converting our generator to Dataset.
###Code
train_dataset_gen = tf.data.Dataset.from_generator(
lambda: train_datagen.flow_from_directory(train_dir) ,
output_types = (tf.float32 , tf.float32),
output_shapes = ([None, 256, 256 ,3] , [None , 2])
)
valid_dataset_gen = tf.data.Dataset.from_generator(
lambda: valid_datagen.flow_from_directory(valid_dir),
output_types = (tf.float32 , tf.float32),
output_shapes = ([None , 256 , 256 , 3] , [None , 2])
)
train_dataset_gen , valid_dataset_gen
it = iter(train_dataset_gen)
for elem in train_dataset_gen:
print
unbatch_data = train_dataset_gen.apply(tf.data.experimental.unbatch())
images , labels = next(iter(train_dataset_gen))
class GeneratorLen(object):
def __init__(self, gen, length):
self.gen = gen
self.length = length
def __len__(self):
return self.length
def __iter__(self):
return self.gen
g = train_dataset_gen
h = GeneratorLen(g, 1)
print(len(h))
list(h)
dum_train = train_dataset_gen.shuffle(buffer_size= 1000).prefetch(buffer_size = tf.data.AUTOTUNE)
dum_train
len(dum_train)
unbatch_data = unbatch_data.
unbatch_data.padded_batch(32)
train_dataset_gen.take(1)
len(train_dataset_gen)
model.compile(loss = tf.keras.losses.SparseCategoricalCrossentropy() ,
optimizer = tf.keras.optimizers.Adam(),
metrics = ['accuracy'])
model.fit(train_dataset_gen ,
epochs = 5)
train_dataset_gen.element_spec
###Output
_____no_output_____
###Markdown
Extracting images and labels from our dataset object. Useful link: https://stackoverflow.com/questions/56226621/how-to-extract-data-labels-back-from-tensorflow-dataset
###Code
# Extracting images and labels from our dataset object
for images , labels in train_dataset_gen.take(1):
sample_images = images
sample_labels = labels
len(sample_images) , len(sample_labels)
# Checking the image
sample_images[:1]
# Checking our labels
sample_labels[:10]
# Applying the same on the whole dataset
#for images , labels in train_dataset_gen.take(-1):
# train_images = images
# train_labels = labels
# train_images , train_labels = tuple(zip(*train_dataset_gen))
# The for loop is taking *infinitely* long time
def preprocess_func(image , label):
image = tf.image.resize(image , [224 , 224])
return tf.cast(image , tf.float32) , label
# Map preprocess function to train and valid
train_dataset_gen = train_dataset_gen.map(map_func=preprocess_func , num_parallel_calls=tf.data.AUTOTUNE)
#train_dataset_gen = train_dataset_gen.shuffle(buffer_size = 1000).batch(batch_size = 32).prefetch(buffer_size = tf.data.AUTOTUNE)
valid_dataset_gen = valid_dataset_gen.map(map_func=preprocess_func , num_parallel_calls=tf.data.AUTOTUNE)
#valid_dataset_gen = valid_dataset_gen.batch(batch_size = 32).prefetch(buffer_size = tf.data.AUTOTUNE)
train_dataset_gen , valid_dataset_gen
model.fit(train_dataset_gen ,
epochs = 5)
train_dataset_gen.class_names
from tensorflow.keras import layers
from tensorflow.keras.layers.experimental import preprocessing
# Create base model
input_shape = (256, 256, 3)
base_model = tf.keras.applications.EfficientNetB0(include_top=False)
base_model.trainable = False # freeze base model layers
# Create Functional model
inputs = layers.Input(shape=input_shape, name="input_layer")
# Note: EfficientNetBX models have rescaling built-in but if your model didn't you could have a layer like below
# x = preprocessing.Rescaling(1./255)(x)
x = base_model(inputs, training=False) # set base_model to inference mode only
x = layers.GlobalAveragePooling2D(name="pooling_layer")(x)
#x = layers.Dense(2)(x) # want one output neuron per class
# Separate activation of output layer so we can output float32 activations
outputs = layers.Dense(2, activation="softmax")(x)
model = tf.keras.Model(inputs, outputs)
# Compile the model
model.compile(loss="sparse_categorical_crossentropy", # Use sparse_categorical_crossentropy when labels are *not* one-hot
optimizer=tf.keras.optimizers.Adam(),
metrics=["accuracy"])
model.summary()
# Compile the model
model.compile(loss = tf.keras.losses.CategoricalCrossentropy() ,
optimizer = tf.keras.optimizers.Adam() ,
metrics = ['accuracy'])
2000 / 32
model.fit(train_dataset_gen , epochs = 3,
steps_per_epoch = 62.5 ,
validation_data = valid_dataset_gen ,
validation_steps = 10)
c
###Output
_____no_output_____ |
rosalind_workbook/dynamic_programming.ipynb | ###Markdown
Dynamic ProgrammingThe algorithmic notion of building up a solution to a problem by solving it on progressively larger cases.Rosalind link: [Dynamic Programming](http://rosalind.info/problems/topics/dynamic-programming/) Import modules
###Code
import os
import sys
from itertools import permutations
import numpy as np
import pandas as pd
from Bio.Seq import Seq
from Bio import SeqIO
from Bio.Alphabet import generic_rna
print('DONE!')
###Output
_____no_output_____
###Markdown
Rabbits and Recurrence RelationsRosalind link: [Rabbits and Recurrence Relations](http://rosalind.info/problems/fib/)
###Code
# TODO
###Output
_____no_output_____
###Markdown
Mortal Fibonacci RabbitsRosalind link: [Mortal Fibonacci Rabbits](http://rosalind.info/problems/fibd/)
###Code
# TODO
###Output
_____no_output_____
###Markdown
Longest Increasing SubsequenceRosalind link: [Longest Increasing Subsequence](http://rosalind.info/problems/lgis/)
###Code
# TODO
###Output
_____no_output_____
###Markdown
Perfect Matchings and RNA Secondary StructuresRosalind link: [Perfect Matchings and RNA Secondary Structures](http://rosalind.info/problems/pmch/)
###Code
# TODO
###Output
_____no_output_____
###Markdown
Catalan Numbers and RNA Secondary StructuresRosalind link: [Catalan Numbers and RNA Secondary Structures](http://rosalind.info/problems/cat/)
###Code
# TODO
###Output
_____no_output_____
###Markdown
Finding a Shared Spliced MotifRosalind link: [Finding a Shared Spliced Motif](http://rosalind.info/problems/lcsq/)
###Code
# TODO
###Output
_____no_output_____
###Markdown
Maximum Matchings and RNA Secondary StructuresRosalind link: [Maximum Matchings and RNA Secondary Structures](http://rosalind.info/problems/mmch/)
###Code
# TODO
###Output
_____no_output_____
###Markdown
Edit DistanceRosalind link: [Edit Distance](http://rosalind.info/problems/edit/)
###Code
# TODO
###Output
_____no_output_____
###Markdown
Motzkin Numbers and RNA Secondary StructuresRosalind link: [Motzkin Numbers and RNA Secondary Structures](http://rosalind.info/problems/motz/)
###Code
# TODO
###Output
_____no_output_____
###Markdown
Interleaving Two MotifsRosalind link: [Interleaving Two Motifs](http://rosalind.info/problems/scsp/)
###Code
# TODO
###Output
_____no_output_____
###Markdown
Edit Distance AlignmentRosalind link: [Edit Distance Alignment](http://rosalind.info/problems/edta/)
###Code
# TODO
###Output
_____no_output_____
###Markdown
Finding Disjoint Motifs in a GeneRosalind link: [Finding Disjoint Motifs in a Gene](http://rosalind.info/problems/itwv/)
###Code
# TODO
###Output
_____no_output_____
###Markdown
Wobble Bonding and RNA Secondary StructuresRosalind link: [Wobble Bonding and RNA Secondary Structures](http://rosalind.info/problems/rnas/)
###Code
# TODO
###Output
_____no_output_____
###Markdown
Global Alignment with Scoring MatrixRosalind link: [Global Alignment with Scoring Matrix](http://rosalind.info/problems/glob/)
###Code
# TODO
###Output
_____no_output_____
###Markdown
Global Alignment with Constant Gap PenaltyRosalind link: [Global Alignment with Constant Gap Penalty](http://rosalind.info/problems/gcon/)
###Code
# TODO
###Output
_____no_output_____
###Markdown
Local Alignment with Scoring MatrixRosalind link: [Local Alignment with Scoring Matrix](http://rosalind.info/problems/loca/)
###Code
# TODO
###Output
_____no_output_____
###Markdown
Maximizing the Gap Symbols of an Optimal AlignmentRosalind link: [Maximizing the Gap Symbols of an Optimal Alignment](http://rosalind.info/problems/mgap/)
###Code
# TODO
###Output
_____no_output_____
###Markdown
Multiple AlignmentRosalind link: [Multiple Alignment](http://rosalind.info/problems/mult/)
###Code
# TODO
###Output
_____no_output_____
###Markdown
Global Alignment with Scoring Matrix and Affine Gap PenaltyRosalind link: [Global Alignment with Scoring Matrix and Affine Gap Penalty](http://rosalind.info/problems/gaff/)
###Code
# TODO
###Output
_____no_output_____
###Markdown
Overlap AlignmentRosalind link: [Overlap Alignment](http://rosalind.info/problems/oap/)
###Code
# TODO
###Output
_____no_output_____
###Markdown
Semiglobal AlignmentRosalind link: [Semiglobal Alignment](http://rosalind.info/problems/smgb/)
###Code
# TODO
###Output
_____no_output_____
###Markdown
Local Alignment with Affine Gap PenaltyRosalind link: [Local Alignment with Affine Gap Penalty](http://rosalind.info/problems/laff/)
###Code
# TODO
###Output
_____no_output_____
###Markdown
Isolating Symbols in AlignmentsRosalind link: [Isolating Symbols in Alignments](http://rosalind.info/problems/osym/)
###Code
# TODO
###Output
_____no_output_____ |
notebooks/Milestone3_Task1,2,3_73117335.ipynb | ###Markdown
Task 1
###Code
import pandas as pd
import seaborn as sns
data = pd.read_csv('Bike-Sharing-Dataset/hour.csv')
###Output
_____no_output_____
###Markdown
Show the dataframe headers
###Code
data.head()
###Output
_____no_output_____
###Markdown
Let's plot the distribution of column 'cnt'
###Code
sns.histplot(data=data, x="cnt")
###Output
_____no_output_____
###Markdown
Let's plot the cnt vs time
###Code
sns.relplot(x="dteday", y="cnt", kind="line", ci=None, data=data)
###Output
_____no_output_____
###Markdown
Let's plot a crossplot of cnt and registered
###Code
sns.relplot(x="cnt", y="registered", data=data);
###Output
_____no_output_____
###Markdown
Task 2 1. Loading
###Code
data = pd.read_csv('Bike-Sharing-Dataset/hour.csv')
###Output
_____no_output_____
###Markdown
Print out the column data types and check for missing values
###Code
data.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 17379 entries, 0 to 17378
Data columns (total 17 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 instant 17379 non-null int64
1 dteday 17379 non-null object
2 season 17379 non-null int64
3 yr 17379 non-null int64
4 mnth 17379 non-null int64
5 hr 17379 non-null int64
6 holiday 17379 non-null int64
7 weekday 17379 non-null int64
8 workingday 17379 non-null int64
9 weathersit 17379 non-null int64
10 temp 17379 non-null float64
11 atemp 17379 non-null float64
12 hum 17379 non-null float64
13 windspeed 17379 non-null float64
14 casual 17379 non-null int64
15 registered 17379 non-null int64
16 cnt 17379 non-null int64
dtypes: float64(4), int64(12), object(1)
memory usage: 2.3+ MB
###Markdown
Show the dataframe headers
###Code
data.head()
###Output
_____no_output_____
###Markdown
2. Clean Data Drop column instant since we don't need it
###Code
data.drop('instant', axis=1, inplace=True)
data.head()
###Output
_____no_output_____
###Markdown
3. Process Data Calculate registered_ratio and casual_ratio
###Code
data['registered_ratio'] = data['registered']/ data['cnt']
data['casual_ratio'] = data['casual']/ data['cnt']
###Output
_____no_output_____
###Markdown
4. Wrangle Data Replace values in column yr
###Code
data.loc[data['yr']==0,'yr'] = 2011
data.loc[data['yr']==1,'yr'] = 2012
data.head()
###Output
_____no_output_____
###Markdown
Task 3 Step 1 Testing chaining in pandas
###Code
# Method chaining begins
import numpy as np
df = pd.read_csv('Bike-Sharing-Dataset/hour.csv').drop('instant', axis=1).assign(registered_ratio=data['registered']/ data['cnt']).assign(casual_ratio=data['casual']/ data['cnt']).assign(yr=lambda x: np.where(x.yr==0, 2011, 2012))
df
###Output
_____no_output_____
###Markdown
Moving chaining parts to a function
###Code
def load_and_process(url_or_path_to_csv_file):
df = pd.read_csv(url_or_path_to_csv_file).drop('instant', axis=1).assign(registered_ratio=lambda x : x['registered']/ x['cnt']).assign(casual_ratio=data['casual']/ data['cnt']).assign(yr=lambda x: np.where(x.yr==0, 2011, 2012))
return df
load_and_process('Bike-Sharing-Dataset/hour.csv')
import project_functions
df = project_functions.load_and_process('Bike-Sharing-Dataset/hour.csv')
df
###Output
_____no_output_____ |
notebooks/tutorials/01 - Code Fundamentals.ipynb | ###Markdown
Code Fundamentals Code is a language for creating and building things. This course teaches the fundamentals of coding, music and signal processing. In lesson 1, we will learn the fundamentals of coding with python. In order to use MusiCode effectively in this class, you are required to know basic programming which is outlined in this lesson. For a more extended tutorial on python, you can also check out [Learn Python](https://www.learnpython.org/). MusiCode MusiCode is a python library for creating music. It is a digital audio workstation (DAW) controlled with code. You can create musical notes, intervals, chords, progressions, melodies, bass lines, drum beats and full songs! Jupyter NotebooksThe file we are working in right now is a Jupyter notebook or IPython notebook (.ipynb) in the Microsoft Azure system. This is the primary file type you will be using. Code is split into individual cells. You can add a new cell by clicking the plus sign on the top bar. To run a cell of code, make sure it is selected, then use the hot key shift-enter or click the 'run' button on the top bar of Jupyter. For more information on Jupyter notebooks check out: [Jupyter Notebook Beginner Guide](https://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/what_is_jupyter.html) A few helpful tips:1. Start each session by going to the top menu, select kernel -> resart & run all. This will restart the kernal and run every cell of code. 2. Save your work consistently by cliicking file - save and checkpoint on the top bar. Or use the save icon on the top menu, directly to the left of the plus sign. 3. When you are finished working, save then select file - close and halt. TroubleshootIf you are experience issues with your notebook, try restart and run all, or log out and log back into Azure.
###Code
# this is a cell
# use a hashtag symbol to create a comment, which does not run as code
# comments help other coders understand what your code is doing
# using comments to describe what your code is doing is VERY important!
x = 1
###Output
_____no_output_____
###Markdown
Key TermsBefore we start coding, it is important for you to know the following definitions:1. variable - named container for data2. data type - defines what operations can be performed on a particular object3. function - a body of code that returns a single value4. object/class - bundle of data and code5. method - a function defined within an object. 6. attribute - data stored within an object Object Oriented ProgrammingWhat is an object? View lesson here: [powerpoint about objects](https://teams.microsoft.com/l/file/44E58E79-5BB9-4E38-9A55-4C9E27E58891?tenantId=8deb1d4d-d0a4-4d04-89ae-f7076cbaa9fb&fileType=pptx&objectUrl=https%3A%2F%2Fduvalschoolsorg.sharepoint.com%2Fteams%2F3107_DAElectronicMusic%2FClass%20Materials%2FSlides%2FObject%20Oriented%20Programming.pptx&baseUrl=https%3A%2F%2Fduvalschoolsorg.sharepoint.com%2Fteams%2F3107_DAElectronicMusic&serviceName=teams&threadId=19:[email protected]&groupId=ed3f13a3-741b-42ef-badc-a300efd4a9cb) Let's talk about object oriented programming (OOP). An understanding of object oriented programming is fundamental to coding. Objects are the building blocks of software. Objects contain data and code (attributes & methods). Objects are sometimes referred to as classes. Once an object is defined, you can create many different instances of that object. In MusiCode we will create many different instances of the wave object. - Method: a function defined within an object. - Attribute: data stored within an objectBelow let's look at the two primary objects we will be using: Wave objectIn MusiCode, we are primarily working with wave objects. A wave object is a subclass of the numpy array. The wave object is a 1D (mono) or 2D (stereo) array, designed for signal processing and music making. Every time you create a waveform in MusiCode, it generates a wave object. Then we can use wave methods to modify the waveform. We will see many examples of this concept in the upcoming lessons. MusiCode objectWe use the musicode class to generate waveforms. There are several methods for generating waveforms: create_wave, rest, sequence, chord and arpeggio. all these methods return wave objects. Then we can call wave methods to transform the signal and create many different interesting sounds. Data TypesBelow we will look at some of the main data types that are built into python.
###Code
# string
type("hello world!")
# integer / whole number
type(10)
# integer / decimal
type(1.753)
# list
my_list = [1, 2, 3, 4, 5]
type(my_list)
my_list[0]
###Output
_____no_output_____
###Markdown
VariablesA variable is a named container for data. Using variables, we can store many different values and refer to them by name in our code. Here are a few examples.
###Code
x = 1
print(x)
name = 'Wesley'
print(name)
# list of strings
names = ['Wesley', 'Josh', 'Jarred']
print(names)
###Output
['Wesley', 'Josh', 'Jarred']
###Markdown
Functions Example 1 - Greeting
###Code
# define function named greet_user
# input: name
# output: greeting
def greet_user(name):
greeting = 'Hello ' + name +'!'
return greeting
###Output
_____no_output_____
###Markdown
There are a few ways we can use a function once it has been defined. Lets see an example of each way: 1
###Code
# define variable
name = 'Wesley'
# call function and insert name variable as input
greet_user(name)
###Output
_____no_output_____
###Markdown
2
###Code
# call function and insert name
greet_user(name='Wesley')
###Output
_____no_output_____
###Markdown
3
###Code
# call function and insert name, no label
greet_user('Wesley')
###Output
_____no_output_____
###Markdown
Example 2 - Straight Line
###Code
# define function named f. this is the equation for a straight line: y=mx+b
# input: x, m, b
# output: y value
def f(x, m, b):
# equation
y = m*x+b
# output
return y
# define variables
x = 1
m = 1/8
b = 4
# call function
y = f(x, m, b)
###Output
_____no_output_____
###Markdown
LoopsLoops allow you to apply an operation to every item in a list. Here is an example: for loops
###Code
# create list of string values
names = ['Wesley', 'Josh', 'Jarred']
# print each name in list
for name in names:
print(name)
# create list of intgers
numbers = [1,3,5,7,9]
# add 100 to each value in list
for number in numbers:
new_value = number+100
print(new_value)
###Output
101
103
105
107
109
###Markdown
while loops
###Code
# iteration
i = 0
# while this condition is true, loop
while i < 5:
# increment
i = i + 1
# display result
print(i)
###Output
1
2
3
4
5
###Markdown
Practice Problems 1)Create a list of string values naming your top 3 favorite genres of music. Then use a for loop to print all the values.
###Code
# problem 1 code goes here
###Output
_____no_output_____
###Markdown
2)Write code to calcuate: square root of 808. Search Google and figure out how to take the square root of a number in python. Stack overflow and python.org are good resources.
###Code
# problem 2 answer goes here
###Output
_____no_output_____
###Markdown
3)Define a function named add_nums that takes in two inputs, x and y, then returns the sum. Then use the function you build to sum the following numbers: 1731, 8332
###Code
# problem 3 code goes here
###Output
_____no_output_____ |
ML-week/python/Pandas.ipynb | ###Markdown
Putting Some Pandas In Your Python
###Code
import platform
print(platform.python_version())
%load_ext watermark
%watermark -a 'Gopala KR' -u -d -v -p watermark,numpy,pandas
###Output
3.5.4
Gopala KR
last updated: 2018-08-30
CPython 3.5.4
IPython 6.2.1
watermark 1.6.1
numpy 1.15.0
pandas 0.19.2
###Markdown
Command: pip3 install pandas
###Code
import pandas as pd
import numpy as np
df = pd.DataFrame({
'col-1': ['Item-1', 'Item-2', 'Item-3', 'Item-4'],
'col-2': ['Gold', 'Bronze', 'Gold', 'Silver'],
'col-3': [1, 2, np.nan, 4]
})
print(df)
###Output
col-1 col-2 col-3
0 Item-1 Gold 1.0
1 Item-2 Bronze 2.0
2 Item-3 Gold NaN
3 Item-4 Silver 4.0
###Markdown
Creating a DataFrame from Dictionary
###Code
data = {'Name':['Tom', 'Jack', 'Steve', 'Ricky'], 'Age':[28,34,29,42]}
df = pd.DataFrame(data)
print(df)
# Creating indexed dataframe
data = {'Name':['Tom', 'Jack', 'Steve', 'Ricky'], 'Age':[28,34,29,42]}
df = pd.DataFrame(data, index=['I-1', 'I-2', 'I-3', 'I-4'])
print(df)
###Output
Age Name
I-1 28 Tom
I-2 34 Jack
I-3 29 Steve
I-4 42 Ricky
###Markdown
DataFrame Basic Functionality
###Code
import pandas as pd
import numpy as np
# Create Dictionary of Series
dict = {'Name':pd.Series(['Tom', 'Jack', 'Steve', 'Ricky', 'Vin', 'James', 'Smith']),
'Age':pd.Series([25,26,25,35,23,33,31]),
'Rating':pd.Series([4.23,4.1,3.4,5,2.9,4.7,3.1])}
df = pd.DataFrame(dict)
print(df)
# Transpose-> returns transpose of DataFrame
print(df.T)
# Axes-> returns list of row axis labels and column axis labels
print(df.axes)
# dtypes-> return datatype of each column
print(df.dtypes)
# shape-> returns tuple representing dimensionallity
print(df.shape)
# values-> returns actual data as ndarray
print(df.values)
# head-> by default head returns first n rows
print(df.head())
print('*'*50)
print(df.head(2))
# tail-> by default tail returns last n rows
print(df.tail())
print('*'*50)
print(df.tail(2))
###Output
Age Name Rating
2 25 Steve 3.4
3 35 Ricky 5.0
4 23 Vin 2.9
5 33 James 4.7
6 31 Smith 3.1
**************************************************
Age Name Rating
5 33 James 4.7
6 31 Smith 3.1
###Markdown
Statistics
###Code
# sum()-> returns the sum of values for requested axis. by default axis = 0
print(df.sum())
# axis = 1 -> row wise sum
print(df.sum(1))
# mean()
print(df.mean())
# std()
print(df.std())
# describe() -> summarizing the data
print(df.describe())
# include object, number, all
print(df.describe(include=['object']))
print(df.describe(include=['number']))
# Don't pass 'all' as a list
print(df.describe(include='all'))
###Output
Age Name Rating
count 7.000000 7 7.000000
unique NaN 7 NaN
top NaN Steve NaN
freq NaN 1 NaN
mean 28.285714 NaN 3.918571
std 4.644505 NaN 0.804828
min 23.000000 NaN 2.900000
25% 25.000000 NaN 3.250000
50% 26.000000 NaN 4.100000
75% 32.000000 NaN 4.465000
max 35.000000 NaN 5.000000
###Markdown
Working with .csv
###Code
import pandas as pd
df = pd.read_csv('Iris.csv')
df.head()
df.tail()
print(df.shape)
print(df.columns)
print(df.mean())
print(df.std())
df.describe()
###Output
_____no_output_____
###Markdown
Series Data Structure
###Code
# pd.Series(data,index)
# index-> Unique, Hashable, same length as data. By default np.arange(n)
import pandas as pd
s = pd.Series()
print(s)
###Output
Series([], dtype: float64)
###Markdown
Creating Series from ndarray
###Code
import numpy as np
data = np.array(['a', 'b', 'c', 'd'])
s = pd.Series(data)
print(s)
###Output
0 a
1 b
2 c
3 d
dtype: object
###Markdown
Create Series from dict
###Code
data = {'a':0., 'b':1., 'c':2.}
s = pd.Series(data)
print(s)
###Output
a 0.0
b 1.0
c 2.0
dtype: float64
###Markdown
Data accessing using Index
###Code
s = pd.Series([1, 2, 3, 4, 5], index=['a', 'b', 'c', 'd', 'e'])
print(s)
print(s['a'])
# Retrieve multiple elements
print(s[['a', 'b', 'e']])
print(s['f'])
###Output
_____no_output_____ |
sonstiges/DSP_Python_Matlab/15.11 DFT Exercise.ipynb | ###Markdown
DFT in Python
###Code
import numpy as np
z = [1, 0, -1, 0]
z = np.fft.fft(z)
print(z)
###Output
[0.+0.j 2.+0.j 0.+0.j 2.+0.j]
|
CamVid2Tiramisu.ipynb | ###Markdown
Imports
###Code
%reload_ext autoreload
%autoreload 2
%matplotlib inline
from fastai import *
from fastai.vision import *
path = Path('C:/Users/jpatn/data/camvid_orig/')
image_path = path/'images'
label_path = path/'labels'
valid_path = path/'valid.txt'
###Output
_____no_output_____
###Markdown
Original Data
###Code
im_fp= get_image_files(image_path)
lbl_fp = get_image_files(label_path)
def open_im(fp):
return PIL.Image.open(fp)
im1_fp = im_fp[0]
im1 = open_im(im1_fp); im1
get_lbl_fp = lambda x: label_path/f'{x.stem}_P{x.suffix}'
lbl1_fp = get_lbl_fp(im1_fp)
lbl1_im = open_im(lbl1_fp);lbl1_im
###Output
_____no_output_____
###Markdown
Where the labels come from:
###Code
np.array(lbl1_im)
###Output
_____no_output_____
###Markdown
Label opened as mask:
###Code
lbl1_msk = open_mask(lbl1_fp)
lbl1_msk.show(figsize=(8,8))
###Output
_____no_output_____
###Markdown
Converting Data
###Code
orig_codes = np.loadtxt(path/'codes.txt',dtype=str);orig_codes
new_codes = ['Building', 'Car', 'Cyclist', 'Fence', 'Pedestrian', 'Pole', 'Road', 'Sidewalk',
'Sign', 'Sky', 'Vegetation', 'Void']
maps = [(4,1,31,3),(5,14,25,22,27),(2,13),(9,),(16,7,0,6),(8,23),(17,10,11,28),(19,18,15),(20,24,12),(21,),(29,26),(30,)]
rev_maps = {c:i for i,m in enumerate(maps) for c in m}
def map_codes(c):
for k,v in rev_maps.items():
if c==k: return v
map_codes(31)
def lbl_from_imp(im_path):
return open_im(get_lbl_fp(im_path))
lbl_from_imp(im1_fp)
def lbl_array(im):
return np.array(im)
lbl1_arr = lbl_array(lbl1_im)
lbl1_arr
def convert_arr(arr):
v = np.vectorize(map_codes)
return v(arr)
convert_arr(lbl1_arr)
def arr_to_lbl(arr):
new_arr = np.asarray(arr,dtype=np.uint8)
return PIL.Image.fromarray(new_arr,'L')
###Output
_____no_output_____
###Markdown
New label with classes converted:
###Code
arr_to_lbl(convert_arr(lbl1_arr))
###Output
_____no_output_____
###Markdown
Convert all labels:
###Code
def get_new_labels(im_path):
lbl = lbl_from_imp(im_path)
arr = lbl_array(lbl)
new_arr = convert_arr(arr)
new_lbl = arr_to_lbl(new_arr)
return new_lbl.save(get_lbl_fp(im_path))
for im in im_fp:
get_new_labels(im)
###Output
_____no_output_____
###Markdown
New Data
###Code
new_lbls = path/'labels'
###Output
_____no_output_____
###Markdown
New label:
###Code
new_lbl1_fp = get_lbl_fp(im1_fp)
new_lbl1 = open_im(new_lbl1_fp)
new_lbl1
###Output
_____no_output_____
###Markdown
Labels:
###Code
lbl_array(new_lbl1)
###Output
_____no_output_____
###Markdown
Label opened as mask:
###Code
new_msk = open_mask(new_lbl1_fp)
new_msk
###Output
_____no_output_____ |
models/deprecated/4-2 (1). InceptionV3 Triplet Network.ipynb | ###Markdown
Load Data
###Code
train_df = pd.read_csv('./data/triplet/train.csv')
val_df = pd.read_csv('./data/triplet/validation.csv')
test_df = pd.read_csv('./data/triplet/test.csv')
print('Train:\t\t', train_df.shape)
print('Validation:\t', val_df.shape)
print('Test:\t\t', test_df.shape)
print('\nTrain Landmarks:\t', len(train_df['landmark_id'].unique()))
print('Validation Landmarks:\t', len(val_df['landmark_id'].unique()))
print('Test Landmarks:\t\t', len(test_df['landmark_id'].unique()))
train_df.head()
###Output
_____no_output_____
###Markdown
Helper Functions
###Code
# training set triplet generator
def train_triplet_generator(df, batch_size=74, img_size=(224, 224), seed=42,
prefix='./data/triplet/train/'):
""" training set triplet generator
it will generate 7400 triplet images in total
"""
# get images with only one training image landmark id and the rest landmark ids
np.random.seed(seed)
grouped = df[['landmark_id', 'image_id']].groupby('landmark_id').count().reset_index()
unique_neg_ids = list(grouped[grouped['image_id'] == 1]['landmark_id'].values)
rest_ids = list(grouped[grouped['image_id'] > 1]['landmark_id'].values)
size = 7400 * 2 - len(unique_neg_ids)
zeros = np.zeros((batch_size, 3, 1), dtype=K.floatx())
while True:
# get positive and negative image landmark ids
np.random.shuffle(rest_ids)
candidate_ids = list(np.random.choice(rest_ids, size=size, replace=False))
pos_landmark_ids = candidate_ids[:7400]
neg_landmark_ids = candidate_ids[7400:] + unique_neg_ids
np.random.shuffle(neg_landmark_ids)
# transform landmark id into image id
anc_img_ids = []
pos_img_ids = []
neg_img_ids = []
for i in range(len(pos_landmark_ids)):
tmp_pos_ids = df[df['landmark_id'] == pos_landmark_ids[i]]['image_id'].values
anc_img_ids.append(tmp_pos_ids[0])
pos_img_ids.append(tmp_pos_ids[1])
tmp_neg_ids = df[df['landmark_id'] == neg_landmark_ids[i]]['image_id'].values
neg_img_ids.append(tmp_neg_ids[0])
# iterator to read batch images
for j in range(len(pos_img_ids) // batch_size):
batch_anc_img_ids = anc_img_ids[j * batch_size: (j + 1) * batch_size]
batch_pos_img_ids = pos_img_ids[j * batch_size: (j + 1) * batch_size]
batch_neg_img_ids = neg_img_ids[j * batch_size: (j + 1) * batch_size]
# get images
anc_imgs = []
pos_imgs = []
neg_imgs = []
# iteratively read images
for k in range(batch_size):
anc_path = prefix + str(batch_anc_img_ids[k]) + '.jpg'
pos_path = prefix + str(batch_pos_img_ids[k]) + '.jpg'
neg_path = prefix + str(batch_neg_img_ids[k]) + '.jpg'
tmp_anc_img = load_img(anc_path, target_size=img_size)
tmp_anc_img = img_to_array(tmp_anc_img)
anc_imgs.append(tmp_anc_img)
tmp_pos_img = load_img(pos_path, target_size=img_size)
tmp_pos_img = img_to_array(tmp_pos_img)
pos_imgs.append(tmp_pos_img)
tmp_neg_img = load_img(neg_path, target_size=img_size)
tmp_neg_img = img_to_array(tmp_neg_img)
neg_imgs.append(tmp_neg_img)
# transform list to array
anc_imgs = np.array(anc_imgs, dtype=K.floatx()) / 255.0
pos_imgs = np.array(pos_imgs, dtype=K.floatx()) / 255.0
neg_imgs = np.array(neg_imgs, dtype=K.floatx()) / 255.0
yield [anc_imgs, pos_imgs, neg_imgs], zeros
# validation set triplet generator
def val_triplet_generator(df, batch_size=128, img_size=(224, 224),
seed=42, prefix='./data/triplet/validation'):
""" validation set triplet collector """
# get images with only one image landmark id and the rest landmark ids
grouped = df[['landmark_id', 'image_id']].groupby('landmark_id').count().reset_index()
unique_neg_ids = list(grouped[grouped['image_id'] == 1]['landmark_id'].values)
rest_ids = list(grouped[grouped['image_id'] > 1]['landmark_id'].values)
size = 3072 * 2 - len(unique_neg_ids)
zeros = np.zeros((batch_size, 3, 1), dtype=K.floatx())
while True:
# get positive and negative image landmark ids
np.random.seed(seed)
candidate_ids = list(np.random.choice(rest_ids, size=size, replace=False))
pos_landmark_ids = candidate_ids[:3072]
neg_landmark_ids = candidate_ids[3072:] + unique_neg_ids
np.random.shuffle(neg_landmark_ids)
# transform landmark id into image id
anc_img_ids = []
pos_img_ids = []
neg_img_ids = []
for i in range(len(pos_landmark_ids)):
tmp_pos_ids = df[df['landmark_id'] == pos_landmark_ids[i]]['image_id'].values
anc_img_ids.append(tmp_pos_ids[0])
pos_img_ids.append(tmp_pos_ids[1])
tmp_neg_ids = df[df['landmark_id'] == neg_landmark_ids[i]]['image_id'].values
neg_img_ids.append(tmp_neg_ids[0])
# iterator to read batch images
for j in range(len(pos_img_ids) // batch_size):
batch_anc_img_ids = anc_img_ids[j * batch_size: (j + 1) * batch_size]
batch_pos_img_ids = pos_img_ids[j * batch_size: (j + 1) * batch_size]
batch_neg_img_ids = neg_img_ids[j * batch_size: (j + 1) * batch_size]
# get images
anc_imgs = []
pos_imgs = []
neg_imgs = []
# iteratively read images
for k in range(batch_size):
anc_path = prefix + str(batch_anc_img_ids[k]) + '.jpg'
pos_path = prefix + str(batch_pos_img_ids[k]) + '.jpg'
neg_path = prefix + str(batch_neg_img_ids[k]) + '.jpg'
tmp_anc_img = load_img(anc_path, target_size=img_size)
tmp_anc_img = img_to_array(tmp_anc_img)
anc_imgs.append(tmp_anc_img)
tmp_pos_img = load_img(pos_path, target_size=img_size)
tmp_pos_img = img_to_array(tmp_pos_img)
pos_imgs.append(tmp_pos_img)
tmp_neg_img = load_img(neg_path, target_size=img_size)
tmp_neg_img = img_to_array(tmp_neg_img)
neg_imgs.append(tmp_neg_img)
# transform list to array
anc_imgs = np.array(anc_imgs, dtype=K.floatx()) / 255.0
pos_imgs = np.array(pos_imgs, dtype=K.floatx()) / 255.0
neg_imgs = np.array(neg_imgs, dtype=K.floatx()) / 255.0
yield [anc_imgs, pos_imgs, neg_imgs], zeros
###Output
_____no_output_____
###Markdown
Define Triplet Loss Model
###Code
# Define base network for triplet network
def base_net(input_shape=(224, 224, 3), trainable=False):
""" define triplet network """
# load pre-trained InceptionV3 model
inception = InceptionV3(include_top=False, weights='imagenet', input_shape=input_shape)
inception.trainable = trainable
# define sequential model
model = Sequential(name='base_net')
model.add(inception)
model.add(Flatten(name='flatten'))
model.add(Dropout(rate=0.5, name='dropout'))
model.add(Dense(512, activation=None, name='fc'))
model.add(Lambda(lambda x: K.l2_normalize(x, axis=1), name='l2_norm'))
return model
# Define triplet network
def triplet_net(base_model, input_shape=(224, 224, 3)):
""" function to define triplet networks """
# define input: anchor, positive, negative
anchor = Input(shape=input_shape, name='anchor_input')
positive = Input(shape=input_shape, name='positive_input')
negative = Input(shape=input_shape, name='negative_input')
# extract vector represent using CNN based model
anc_vec = base_model(anchor)
pos_vec = base_model(positive)
neg_vec = base_model(negative)
# stack outputs
stacks = Lambda(lambda x: K.stack(x, axis=1), name='output')([anc_vec, pos_vec, neg_vec])
# define inputs and outputs
inputs=[anchor, positive, negative]
outputs = stacks
# define the triplet model
model = Model(inputs=inputs, outputs=outputs, name='triplet_net')
return model
# Define triplet loss
def triplet_loss(y_true, y_pred):
""" function to compute triplet loss
margin is predefined coded, manually change if needed
"""
# define triplet margin
margin = K.constant(0.3)
zero = K.constant(0.0)
# get the prediction vector
anchor, positive, negative = y_pred[:, 0], y_pred[:, 1], y_pred[:, 2]
# compute distance
pos_distance = K.sum(K.square(anchor - positive), axis=1)
neg_distance = K.sum(K.square(anchor - negative), axis=1)
# compute loss
partial_loss = pos_distance - neg_distance + margin
full_loss = K.sum(K.maximum(partial_loss, zero), axis=0)
return full_loss
###Output
_____no_output_____
###Markdown
Build Triplet Model
###Code
# For reproduciable purpose
seed = 42
K.clear_session()
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(seed)
random.seed(seed)
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
tf.set_random_seed(seed)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
# Define Parameters
img_size = (224, 224, 3) # target image size
# triplet image generator
train_generator = train_triplet_generator(train_df, batch_size=74, img_size=img_size[:2],
seed=42, prefix='./data/triplet/train/')
val_generator = val_triplet_generator(val_df, batch_size=64, img_size=img_size[:2],
seed=42, prefix='./data/triplet/validation/')
# Define triplet network model
base_model = base_net(input_shape=img_size, trainable=False)
base_model.summary()
triplet_model = triplet_net(base_model=base_model, input_shape=img_size)
triplet_model.summary()
###Output
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
anchor_input (InputLayer) (None, 224, 224, 3) 0
__________________________________________________________________________________________________
positive_input (InputLayer) (None, 224, 224, 3) 0
__________________________________________________________________________________________________
negative_input (InputLayer) (None, 224, 224, 3) 0
__________________________________________________________________________________________________
base_net (Sequential) (None, 512) 48017696 anchor_input[0][0]
positive_input[0][0]
negative_input[0][0]
__________________________________________________________________________________________________
output (Lambda) (None, 3, 512) 0 base_net[1][0]
base_net[2][0]
base_net[3][0]
==================================================================================================
Total params: 48,017,696
Trainable params: 26,214,912
Non-trainable params: 21,802,784
__________________________________________________________________________________________________
###Markdown
Fit Triplet Model
###Code
# define learning scheduler
def lr_schedule(epoch):
""" Learning rate schedule """
lr = 1e-3
if epoch > 80:
lr *= 6e-1
elif epoch > 60:
lr *= 7e-1
elif epoch > 40:
lr *= 8e-1
elif epoch > 20:
lr *= 9e-1
print('Learning rate: ', lr)
return lr
# define optimizer
opt = keras.optimizers.Adam(lr=lr_schedule(0))
# Create call backs
checkpoint = ModelCheckpoint(filepath='./models/inception-triplet(1)-ckpt.h5',
monitor='val_loss', save_best_only=True)
lr_scheduler = LearningRateScheduler(lr_schedule)
lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1), cooldown=0, patience=5, min_lr=0.5e-6)
callbacks = [checkpoint, lr_reducer, lr_scheduler]
# compile the model
triplet_model.compile(optimizer=opt, loss=triplet_loss)
# fit the mode
history = triplet_model.fit_generator(train_generator, steps_per_epoch=100, epochs=100,
validation_data=val_generator, validation_steps=48,
verbose=2, callbacks=callbacks)
triplet_model.save('./models/inception-triplet(1)-model.h5')
base_model.save('./models/inception-base(1)-model.h5')
pickle.dump(history.history, open('./models/inception-triplet(1)-history.p', 'wb'))
_ = gc.collect()
# Visualize the training process
train_loss = history.history['loss']
val_loss = history.history['val_loss']
fig, ax = plt.subplots(figsize=(10, 7))
ax.plot(train_loss, label='Training Loss')
ax.plot(val_loss, label='Validation Loss')
ax.set_title('Loss vs. Epochs', fontsize=16)
ax.set_xlabel('Epochs', fontsize=14)
ax.set_ylabel('Loss', fontsize=14)
ax.legend(fontsize=14)
ax.grid(True)
plt.show()
###Output
_____no_output_____
###Markdown
Extract Features using Triplet Network
###Code
train_df = pd.read_csv('./data/triplet/train.csv')
val_df = pd.read_csv('./data/triplet/validation.csv')
test_df = pd.read_csv('./data/triplet/test.csv')
print('Train:\t\t', train_df.shape)
print('Validation:\t', val_df.shape)
print('Test:\t\t', test_df.shape)
print('\nTrain Landmarks:\t', len(train_df['landmark_id'].unique()))
print('Validation Landmarks:\t', len(val_df['landmark_id'].unique()))
print('Test Landmarks:\t\t', len(test_df['landmark_id'].unique()))
# Load trained model
base_model = load_model('./models/inception-base(1)-model.h5')
base_model.summary()
# Define train_imgs and test_imgs
train_imgs = np.zeros(shape=(len(train_df), 512), dtype=np.float32)
val_imgs = np.zeros(shape=(len(val_df), 512), dtype=np.float32)
test_imgs = np.zeros(shape=(len(test_df), 512), dtype=np.float32)
# Process training images
img_ids = train_df['image_id'].values
steps = 20000
for i in range(0, len(train_df), steps):
tmp_imgs = []
print('\nProcess: {:10d}'.format(i))
start = i
end = min(len(train_df), i + steps)
for idx in range(start, end):
if idx % 250 == 0:
print('=', end='')
img_id = img_ids[idx]
path = './data/triplet/train/' + str(img_id) + '.jpg'
img = load_img(path, target_size=img_size[:2])
img = img_to_array(img)
tmp_imgs.append(img)
tmp_imgs = np.array(tmp_imgs, dtype=np.float32) / 255.0
tmp_prediction = base_model.predict(tmp_imgs)
train_imgs[start: end, ] = tmp_prediction
_ = gc.collect()
# Process validation images
img_ids = val_df['image_id'].values
steps = 4000
for i in range(0, len(val_df), steps):
tmp_imgs = []
print('\nProcess: {:10d}'.format(i))
start = i
end = min(len(val_df), i + steps)
for idx in range(start, end):
if idx % 50 == 0:
print('=', end='')
img_id = img_ids[idx]
path = './data/triplet/validation/' + str(img_id) + '.jpg'
img = load_img(path, target_size=img_size[:2])
img = img_to_array(img)
tmp_imgs.append(img)
tmp_imgs = np.array(tmp_imgs, dtype=np.float32) / 255.0
tmp_prediction = base_model.predict(tmp_imgs)
val_imgs[start: end, ] = tmp_prediction
_ = gc.collect()
# Process test images
img_ids = test_df['image_id'].values
steps = 4000
for i in range(0, len(test_df), steps):
tmp_imgs = []
print('\nProcess: {:10d}'.format(i))
start = i
end = min(len(test_df), i + steps)
for idx in range(start, end):
if idx % 50 == 0:
print('=', end='')
img_id = img_ids[idx]
path = './data/triplet/test/' + str(img_id) + '.jpg'
img = load_img(path, target_size=img_size[:2])
img = img_to_array(img)
tmp_imgs.append(img)
tmp_imgs = np.array(tmp_imgs, dtype=np.float32) / 255.0
tmp_prediction = base_model.predict(tmp_imgs)
test_imgs[start: end, ] = tmp_prediction
_ = gc.collect()
print('Train:\t\t', train_imgs.shape)
print('Validation:\t', val_imgs.shape)
print('Test:\t\t', test_imgs.shape)
# Save to disk
np.save('./data/triplet/train_triplet_inception(1)_features.npy', train_imgs)
np.save('./data/triplet/validation_triplet_inception(1)_features.npy', val_imgs)
np.save('./data/triplet/test_triplet_inception(1)_features.npy', test_imgs)
###Output
_____no_output_____ |
site/en/2/guide/eager.ipynb | ###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Eager Execution View on TensorFlow.org Run in Google Colab View source on GitHub TensorFlow's eager execution is an imperative programming environment thatevaluates operations immediately, without building graphs: operations returnconcrete values instead of constructing a computational graph to run later. Thismakes it easy to get started with TensorFlow and debug models, and itreduces boilerplate as well. To follow along with this guide, run the codesamples below in an interactive `python` interpreter.Eager execution is a flexible machine learning platform for research andexperimentation, providing:* *An intuitive interface*—Structure your code naturally and use Python data structures. Quickly iterate on small models and small data.* *Easier debugging*—Call ops directly to inspect running models and test changes. Use standard Python debugging tools for immediate error reporting.* *Natural control flow*—Use Python control flow instead of graph control flow, simplifying the specification of dynamic models.Eager execution supports most TensorFlow operations and GPU acceleration.Note: Some models may experience increased overhead with eager executionenabled. Performance improvements are ongoing, but please[file a bug](https://github.com/tensorflow/tensorflow/issues) if you find aproblem and share your benchmarks. Setup and basic usageUpgrade to the latest version of TensorFlow:
###Code
from __future__ import absolute_import, division, print_function
!pip install tf-nightly-2.0-preview
import tensorflow as tf
###Output
_____no_output_____
###Markdown
In Tensorflow 2.0, eager execution is enabled by default.
###Code
tf.executing_eagerly()
###Output
_____no_output_____
###Markdown
Now you can run TensorFlow operations and the results will return immediately:
###Code
x = [[2.]]
m = tf.matmul(x, x)
print("hello, {}".format(m))
###Output
_____no_output_____
###Markdown
Enabling eager execution changes how TensorFlow operations behave—now theyimmediately evaluate and return their values to Python. `tf.Tensor` objectsreference concrete values instead of symbolic handles to nodes in a computationalgraph. Since there isn't a computational graph to build and run later in asession, it's easy to inspect results using `print()` or a debugger. Evaluating,printing, and checking tensor values does not break the flow for computinggradients.Eager execution works nicely with [NumPy](http://www.numpy.org/). NumPyoperations accept `tf.Tensor` arguments. TensorFlow[math operations](https://www.tensorflow.org/api_guides/python/math_ops) convertPython objects and NumPy arrays to `tf.Tensor` objects. The`tf.Tensor.numpy` method returns the object's value as a NumPy `ndarray`.
###Code
a = tf.constant([[1, 2],
[3, 4]])
print(a)
# Broadcasting support
b = tf.add(a, 1)
print(b)
# Operator overloading is supported
print(a * b)
# Use NumPy values
import numpy as np
c = np.multiply(a, b)
print(c)
# Obtain numpy value from a tensor:
print(a.numpy())
# => [[1 2]
# [3 4]]
###Output
_____no_output_____
###Markdown
Dynamic control flowA major benefit of eager execution is that all the functionality of the hostlanguage is available while your model is executing. So, for example,it is easy to write [fizzbuzz](https://en.wikipedia.org/wiki/Fizz_buzz):
###Code
def fizzbuzz(max_num):
counter = tf.constant(0)
max_num = tf.convert_to_tensor(max_num)
for num in range(1, max_num.numpy()+1):
num = tf.constant(num)
if int(num % 3) == 0 and int(num % 5) == 0:
print('FizzBuzz')
elif int(num % 3) == 0:
print('Fizz')
elif int(num % 5) == 0:
print('Buzz')
else:
print(num.numpy())
counter += 1
fizzbuzz(15)
###Output
_____no_output_____
###Markdown
This has conditionals that depend on tensor values and it prints these valuesat runtime. Build a modelMany machine learning models are represented by composing layers. Whenusing TensorFlow with eager execution you can either write your own layers oruse a layer provided in the `tf.keras.layers` package.While you can use any Python object to represent a layer,TensorFlow has `tf.keras.layers.Layer` as a convenient base class. Inherit fromit to implement your own layer, and set `self.dynamic=True` in the constructor if the layer must be executed imperatively:
###Code
class MySimpleLayer(tf.keras.layers.Layer):
def __init__(self, output_units):
super(MySimpleLayer, self).__init__()
self.output_units = output_units
self.dynamic = True
def build(self, input_shape):
# The build method gets called the first time your layer is used.
# Creating variables on build() allows you to make their shape depend
# on the input shape and hence removes the need for the user to specify
# full shapes. It is possible to create variables during __init__() if
# you already know their full shapes.
self.kernel = self.add_variable(
"kernel", [input_shape[-1], self.output_units])
def call(self, input):
# Override call() instead of __call__ so we can perform some bookkeeping.
return tf.matmul(input, self.kernel)
###Output
_____no_output_____
###Markdown
Use `tf.keras.layers.Dense` layer instead of `MySimpleLayer` above as it hasa superset of its functionality (it can also add a bias).When composing layers into models you can use `tf.keras.Sequential` to representmodels which are a linear stack of layers. It is easy to use for basic models:
###Code
model = tf.keras.Sequential([
tf.keras.layers.Dense(10, input_shape=(784,)), # must declare input shape
tf.keras.layers.Dense(10)
])
###Output
_____no_output_____
###Markdown
Alternatively, organize models in classes by inheriting from `tf.keras.Model`.This is a container for layers that is a layer itself, allowing `tf.keras.Model`objects to contain other `tf.keras.Model` objects.
###Code
class MNISTModel(tf.keras.Model):
def __init__(self):
super(MNISTModel, self).__init__()
self.dense1 = tf.keras.layers.Dense(units=10)
self.dense2 = tf.keras.layers.Dense(units=10)
def call(self, input):
"""Run the model."""
result = self.dense1(input)
result = self.dense2(result)
result = self.dense2(result) # reuse variables from dense2 layer
return result
model = MNISTModel()
###Output
_____no_output_____
###Markdown
It's not required to set an input shape for the `tf.keras.Model` class sincethe parameters are set the first time input is passed to the layer.`tf.keras.layers` classes create and contain their own model variables thatare tied to the lifetime of their layer objects. To share layer variables, sharetheir objects. Eager training Computing gradients[Automatic differentiation](https://en.wikipedia.org/wiki/Automatic_differentiation)is useful for implementing machine learning algorithms such as[backpropagation](https://en.wikipedia.org/wiki/Backpropagation) for trainingneural networks. During eager execution, use `tf.GradientTape` to traceoperations for computing gradients later.`tf.GradientTape` is an opt-in feature to provide maximal performance whennot tracing. Since different operations can occur during each call, allforward-pass operations get recorded to a "tape". To compute the gradient, playthe tape backwards and then discard. A particular `tf.GradientTape` can onlycompute one gradient; subsequent calls throw a runtime error.
###Code
w = tf.Variable([[1.0]])
with tf.GradientTape() as tape:
loss = w * w
grad = tape.gradient(loss, w)
print(grad) # => tf.Tensor([[ 2.]], shape=(1, 1), dtype=float32)
###Output
_____no_output_____
###Markdown
Train a modelThe following example creates a multi-layer model that classifies the standardMNIST handwritten digits. It demonstrates the optimizer and layer APIs to buildtrainable graphs in an eager execution environment.
###Code
# Fetch and format the mnist data
(mnist_images, mnist_labels), _ = tf.keras.datasets.mnist.load_data()
dataset = tf.data.Dataset.from_tensor_slices(
(tf.cast(mnist_images[...,tf.newaxis]/255, tf.float32),
tf.cast(mnist_labels,tf.int64)))
dataset = dataset.shuffle(1000).batch(32)
# Build the model
mnist_model = tf.keras.Sequential([
tf.keras.layers.Conv2D(16,[3,3], activation='relu'),
tf.keras.layers.Conv2D(16,[3,3], activation='relu'),
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(10)
])
###Output
_____no_output_____
###Markdown
Even without training, call the model and inspect the output in eager execution:
###Code
for images,labels in dataset.take(1):
print("Logits: ", mnist_model(images[0:1]).numpy())
###Output
_____no_output_____
###Markdown
While keras models have a builtin training loop (using the `fit` method), sometimes you need more customization. Here's an example, of a training loop implemented with eager:
###Code
optimizer = tf.keras.optimizers.Adam()
loss_history = []
for (batch, (images, labels)) in enumerate(dataset.take(400)):
if batch % 80 == 0:
print()
print('.', end='')
with tf.GradientTape() as tape:
logits = mnist_model(images, training=True)
loss_value = tf.keras.losses.sparse_categorical_crossentropy(labels, logits, from_logits=True)
loss_history.append(loss_value.numpy().mean())
grads = tape.gradient(loss_value, mnist_model.variables)
optimizer.apply_gradients(zip(grads, mnist_model.variables))
import matplotlib.pyplot as plt
plt.plot(loss_history)
plt.xlabel('Batch #')
plt.ylabel('Loss [entropy]')
###Output
_____no_output_____
###Markdown
This example uses the[dataset.py module](https://github.com/tensorflow/models/blob/master/official/mnist/dataset.py)from the[TensorFlow MNIST example](https://github.com/tensorflow/models/tree/master/official/mnist);download this file to your local directory. Run the following to download theMNIST data files to your working directory and prepare a `tf.data.Dataset`for training: Variables and optimizers`tf.Variable` objects store mutable `tf.Tensor` values accessed duringtraining to make automatic differentiation easier. The parameters of a model canbe encapsulated in classes as variables.Better encapsulate model parameters by using `tf.Variable` with`tf.GradientTape`. For example, the automatic differentiation example abovecan be rewritten:
###Code
class Model(tf.keras.Model):
def __init__(self):
super(Model, self).__init__()
self.W = tf.Variable(5., name='weight')
self.B = tf.Variable(10., name='bias')
def call(self, inputs):
return inputs * self.W + self.B
# A toy dataset of points around 3 * x + 2
NUM_EXAMPLES = 2000
training_inputs = tf.random.normal([NUM_EXAMPLES])
noise = tf.random.normal([NUM_EXAMPLES])
training_outputs = training_inputs * 3 + 2 + noise
# The loss function to be optimized
def loss(model, inputs, targets):
error = model(inputs) - targets
return tf.reduce_mean(tf.square(error))
def grad(model, inputs, targets):
with tf.GradientTape() as tape:
loss_value = loss(model, inputs, targets)
return tape.gradient(loss_value, [model.W, model.B])
# Define:
# 1. A model.
# 2. Derivatives of a loss function with respect to model parameters.
# 3. A strategy for updating the variables based on the derivatives.
model = Model()
optimizer = tf.keras.optimizers.SGD(learning_rate=0.01)
print("Initial loss: {:.3f}".format(loss(model, training_inputs, training_outputs)))
# Training loop
for i in range(300):
grads = grad(model, training_inputs, training_outputs)
optimizer.apply_gradients(zip(grads, [model.W, model.B]))
if i % 20 == 0:
print("Loss at step {:03d}: {:.3f}".format(i, loss(model, training_inputs, training_outputs)))
print("Final loss: {:.3f}".format(loss(model, training_inputs, training_outputs)))
print("W = {}, B = {}".format(model.W.numpy(), model.B.numpy()))
###Output
_____no_output_____
###Markdown
Use objects for state during eager executionWith TF 1.x graph execution, program state (such as the variables) is stored in globalcollections and their lifetime is managed by the `tf.Session` object. Incontrast, during eager execution the lifetime of state objects is determined bythe lifetime of their corresponding Python object. Variables are objectsDuring eager execution, variables persist until the last reference to the objectis removed, and is then deleted.
###Code
if tf.test.is_gpu_available():
with tf.device("gpu:0"):
v = tf.Variable(tf.random_normal([1000, 1000]))
v = None # v no longer takes up GPU memory
###Output
_____no_output_____
###Markdown
Object-based saving`tf.train.Checkpoint` can save and restore `tf.Variable`s to and fromcheckpoints:
###Code
x = tf.Variable(10.)
checkpoint = tf.train.Checkpoint(x=x)
x.assign(2.) # Assign a new value to the variables and save.
checkpoint_path = './ckpt/'
checkpoint.save('./ckpt/')
x.assign(11.) # Change the variable after saving.
# Restore values from the checkpoint
checkpoint.restore(tf.train.latest_checkpoint(checkpoint_path))
print(x) # => 2.0
###Output
_____no_output_____
###Markdown
To save and load models, `tf.train.Checkpoint` stores the internal state of objects,without requiring hidden variables. To record the state of a `model`,an `optimizer`, and a global step, pass them to a `tf.train.Checkpoint`:
###Code
import os
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(16,[3,3], activation='relu'),
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(10)
])
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
checkpoint_dir = 'path/to/model_dir'
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
root = tf.train.Checkpoint(optimizer=optimizer,
model=model)
root.save(checkpoint_prefix)
root.restore(tf.train.latest_checkpoint(checkpoint_dir))
###Output
_____no_output_____
###Markdown
Object-oriented metrics`tf.keras.metrics` are stored as objects. Update a metric by passing the new data tothe callable, and retrieve the result using the `tf.keras.metrics.result` method,for example:
###Code
m = tf.keras.metrics.Mean("loss")
m(0)
m(5)
m.result() # => 2.5
m([8, 9])
m.result() # => 5.5
###Output
_____no_output_____
###Markdown
Advanced automatic differentiation topics Dynamic models`tf.GradientTape` can also be used in dynamic models. This example for a[backtracking line search](https://wikipedia.org/wiki/Backtracking_line_search)algorithm looks like normal NumPy code, except there are gradients and isdifferentiable, despite the complex control flow:
###Code
def line_search_step(fn, init_x, rate=1.0):
with tf.GradientTape() as tape:
# Variables are automatically recorded, but manually watch a tensor
tape.watch(init_x)
value = fn(init_x)
grad = tape.gradient(value, init_x)
grad_norm = tf.reduce_sum(grad * grad)
init_value = value
while value > init_value - rate * grad_norm:
x = init_x - rate * grad
value = fn(x)
rate /= 2.0
return x, value
###Output
_____no_output_____
###Markdown
Custom gradientsCustom gradients are an easy way to override gradients. Within the forward function, define the gradient with respect to theinputs, outputs, or intermediate results. For example, here's an easy way to clipthe norm of the gradients in the backward pass:
###Code
@tf.custom_gradient
def clip_gradient_by_norm(x, norm):
y = tf.identity(x)
def grad_fn(dresult):
return [tf.clip_by_norm(dresult, norm), None]
return y, grad_fn
###Output
_____no_output_____
###Markdown
Custom gradients are commonly used to provide a numerically stable gradient for asequence of operations:
###Code
def log1pexp(x):
return tf.math.log(1 + tf.exp(x))
def grad_log1pexp(x):
with tf.GradientTape() as tape:
tape.watch(x)
value = log1pexp(x)
return tape.gradient(value, x)
# The gradient computation works fine at x = 0.
grad_log1pexp(tf.constant(0.)).numpy()
# However, x = 100 fails because of numerical instability.
grad_log1pexp(tf.constant(100.)).numpy()
###Output
_____no_output_____
###Markdown
Here, the `log1pexp` function can be analytically simplified with a customgradient. The implementation below reuses the value for `tf.exp(x)` that iscomputed during the forward pass—making it more efficient by eliminatingredundant calculations:
###Code
@tf.custom_gradient
def log1pexp(x):
e = tf.exp(x)
def grad(dy):
return dy * (1 - 1 / (1 + e))
return tf.math.log(1 + e), grad
def grad_log1pexp(x):
with tf.GradientTape() as tape:
tape.watch(x)
value = log1pexp(x)
return tape.gradient(value, x)
# As before, the gradient computation works fine at x = 0.
grad_log1pexp(tf.constant(0.)).numpy()
# And the gradient computation also works at x = 100.
grad_log1pexp(tf.constant(100.)).numpy()
###Output
_____no_output_____
###Markdown
PerformanceComputation is automatically offloaded to GPUs during eager execution. If youwant control over where a computation runs you can enclose it in a`tf.device('/gpu:0')` block (or the CPU equivalent):
###Code
import time
def measure(x, steps):
# TensorFlow initializes a GPU the first time it's used, exclude from timing.
tf.matmul(x, x)
start = time.time()
for i in range(steps):
x = tf.matmul(x, x)
# tf.matmul can return before completing the matrix multiplication
# (e.g., can return after enqueing the operation on a CUDA stream).
# The x.numpy() call below will ensure that all enqueued operations
# have completed (and will also copy the result to host memory,
# so we're including a little more than just the matmul operation
# time).
_ = x.numpy()
end = time.time()
return end - start
shape = (1000, 1000)
steps = 200
print("Time to multiply a {} matrix by itself {} times:".format(shape, steps))
# Run on CPU:
with tf.device("/cpu:0"):
print("CPU: {} secs".format(measure(tf.random.normal(shape), steps)))
# Run on GPU, if available:
if tf.test.is_gpu_available():
with tf.device("/gpu:0"):
print("GPU: {} secs".format(measure(tf.random.normal(shape), steps)))
else:
print("GPU: not found")
###Output
_____no_output_____
###Markdown
A `tf.Tensor` object can be copied to a different device to execute itsoperations:
###Code
if tf.test.is_gpu_available():
x = tf.random_normal([10, 10])
x_gpu0 = x.gpu()
x_cpu = x.cpu()
_ = tf.matmul(x_cpu, x_cpu) # Runs on CPU
_ = tf.matmul(x_gpu0, x_gpu0) # Runs on GPU:0
###Output
_____no_output_____ |
04c_tfrecord.ipynb | ###Markdown
Library
###Code
import tensorflow as tf
import numpy as np
import os
import glob
import pandas as pd
import PIL
import gc
from PIL import Image
print(f'Numpy version : {np.__version__}')
print(f'Pandas version : {pd.__version__}')
print(f'Tensorflow version : {tf.__version__}')
print(f'Pillow version : {PIL.__version__}')
###Output
Numpy version : 1.18.1
Pandas version : 1.0.3
Tensorflow version : 2.2.0
Pillow version : 5.4.1
###Markdown
Dataset
###Code
!ls /kaggle/input
# df_train = pd.read_parquet('/kaggle/input/csv-with-cleaned-ocr-text/train.parquet', engine='pyarrow').sort_values("filename").reset_index(drop=True)
df_test = pd.read_parquet('/kaggle/input/csv-with-cleaned-ocr-text/test.parquet', engine='pyarrow')
df_test
###Output
_____no_output_____
###Markdown
Create TFRecord
###Code
def _bytes_feature(value):
"""Returns a bytes_list from a string / byte."""
if isinstance(value, type(tf.constant(0))):
value = value.numpy() # BytesList won't unpack a string from an EagerTensor.
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _float_feature(value):
"""Returns a float_list from a float / double."""
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def _list_float_feature(value):
"""Returns a float_list from a float / double."""
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _int64_feature(value):
"""Returns an int64_list from a bool / enum / int / uint."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _list_int64_feature(value):
"""Returns an int64_list from a bool / enum / int / uint."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
RESIZE_WIDTH = 512
RESIZE_HEIGHT = 512
TFRECORD_MAX_SIZE = 80 * 1024 * 1024 # 80 MB
# TOTAL_IMAGES = len(df_train.index)
TOTAL_IMAGES = len(df_test.index)
# part 1 : 0:TOTAL_IMAGES // 2 (train)
# part 2 : TOTAL_IMAGES // 2:TOTAL_IMAGES (train)
# part 1 : 0:TOTAL_IMAGES (test) [CURRENT]
START_INDEX = 0
END_INDEX = TOTAL_IMAGES
BATCH_IMAGE = 1024
def create_tfrecord(index, df):
index = str(index).zfill(3)
curr_file = f"test-{index}.tfrecords"
writer = tf.io.TFRecordWriter(curr_file)
for index, row in df.iterrows():
category_str = str(row['category']).zfill(2)
image = f'/kaggle/input/shopee-product-detection-student/test/test/test/{row["filename"]}'
img = open(image, 'rb')
img_read = img.read()
image_decoded = tf.image.decode_jpeg(img_read, channels=3)
resized_img = tf.image.resize_with_pad(image_decoded,target_width=RESIZE_WIDTH,target_height=RESIZE_HEIGHT,method=tf.image.ResizeMethod.BILINEAR)
resized_img = tf.cast(resized_img,tf.uint8)
resized_img = tf.io.encode_jpeg(resized_img)
feature = {
'filename': _bytes_feature(tf.compat.as_bytes(row['filename'])),
'label': _int64_feature(row['category']),
'words': _list_float_feature(row['words']),
'image': _bytes_feature(resized_img),
'height' : _int64_feature(RESIZE_HEIGHT),
'width' : _int64_feature(RESIZE_WIDTH)
}
example = tf.train.Example(features=tf.train.Features(feature=feature))
writer.write(example.SerializeToString())
writer.close()
for i in range(START_INDEX, END_INDEX, BATCH_IMAGE):
print(f'Create TFRecords #{i // BATCH_IMAGE}')
if i + BATCH_IMAGE < END_INDEX:
create_tfrecord(i // BATCH_IMAGE, df_test.loc[i:i+BATCH_IMAGE])
else:
create_tfrecord(i // BATCH_IMAGE, df_test.loc[i:END_INDEX])
gc.collect()
!ls -lah
###Output
total 1.1G
drwxr-xr-x 2 root root 4.0K Jul 1 09:13 .
drwxr-xr-x 6 root root 4.0K Jul 1 09:10 ..
---------- 1 root root 5.2K Jul 1 09:10 __notebook__.ipynb
-rw-r--r-- 1 root root 93M Jul 1 09:11 test-000.tfrecords
-rw-r--r-- 1 root root 94M Jul 1 09:11 test-001.tfrecords
-rw-r--r-- 1 root root 93M Jul 1 09:11 test-002.tfrecords
-rw-r--r-- 1 root root 93M Jul 1 09:11 test-003.tfrecords
-rw-r--r-- 1 root root 94M Jul 1 09:12 test-004.tfrecords
-rw-r--r-- 1 root root 93M Jul 1 09:12 test-005.tfrecords
-rw-r--r-- 1 root root 94M Jul 1 09:12 test-006.tfrecords
-rw-r--r-- 1 root root 93M Jul 1 09:13 test-007.tfrecords
-rw-r--r-- 1 root root 93M Jul 1 09:13 test-008.tfrecords
-rw-r--r-- 1 root root 95M Jul 1 09:13 test-009.tfrecords
-rw-r--r-- 1 root root 94M Jul 1 09:13 test-010.tfrecords
-rw-r--r-- 1 root root 84M Jul 1 09:14 test-011.tfrecords
|
Projekt_ZPO.ipynb | ###Markdown
Wgranie danych
###Code
!wget "https://chmura.put.poznan.pl/s/MLk1k6RWWQQuOXs/download?path=%2F&files=train.tar.xz" -O train.tar.xz
!tar xf train.tar.xz
!rm train.tar.xz
###Output
--2022-02-21 16:32:32-- https://chmura.put.poznan.pl/s/MLk1k6RWWQQuOXs/download?path=%2F&files=train.tar.xz
Resolving chmura.put.poznan.pl (chmura.put.poznan.pl)... 150.254.5.31, 2001:808:201::5:31
Connecting to chmura.put.poznan.pl (chmura.put.poznan.pl)|150.254.5.31|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 251078400 (239M) [application/octet-stream]
Saving to: ‘train.tar.xz’
train.tar.xz 100%[===================>] 239.45M 21.2MB/s in 13s
2022-02-21 16:32:45 (18.7 MB/s) - ‘train.tar.xz’ saved [251078400/251078400]
###Markdown
Przydatne funkcje do konwersji
###Code
def convert_rgb_to_ids(labels: np.ndarray) -> np.ndarray:
result = np.zeros(labels.shape[:2], dtype=np.uint8)
result[np.where((labels == (0, 0, 255)).all(axis=2))] = 1
result[np.where((labels == (0, 255, 0)).all(axis=2))] = 2
result[np.where((labels == (255, 0, 0)).all(axis=2))] = 3
return result
def convert_ids_to_rgb(labels: np.ndarray) -> np.ndarray:
result = np.zeros((*labels.shape, 3), dtype=np.uint8)
result[labels == 1] = (0, 0, 255)
result[labels == 2] = (0, 255, 0)
result[labels == 3] = (255, 0, 0)
return result
###Output
_____no_output_____
###Markdown
Customowy obiekt dataset dziedziczony z torcha
###Code
class LunarDataset(torch.utils.data.Dataset):
def __init__(self, path: Path, file_names: List[str], augment: bool = False):
self._file_names = file_names
self._images_dir = path / 'images'
self._labels_dir = path / 'masks'
self._augment = augment
self.image_size = (270, 480)
self.padded_image_size = (
math.ceil(self.image_size[0] / 32) * 32,
math.ceil(self.image_size[1] / 32) * 32
)
self.transforms = A.Compose([
A.Resize(*self.image_size),
A.PadIfNeeded(*self.padded_image_size),
A.ToFloat(max_value=255),
ToTensorV2()
])
self.augmentations = A.Compose([
A.Resize(*self.image_size),
A.PadIfNeeded(*self.padded_image_size),
A.ToFloat(max_value=255),
ToTensorV2()
])
def __getitem__(self, index: int):
image_path = self._images_dir / self._file_names[index].replace('.png', '.jpg')
labels_path = self._labels_dir / self._file_names[index]
image = cv2.imread(str(image_path))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
labels = cv2.imread(str(labels_path))
labels = cv2.cvtColor(labels, cv2.COLOR_BGR2RGB)
labels = convert_rgb_to_ids(labels)
if self._augment:
transformed = self.augmentations(image=image, mask=labels)
else:
transformed = self.transforms(image=image, mask=labels)
return transformed['image'], transformed['mask'].type(torch.int64)
def __len__(self):
return len(self._file_names)
def _convert_mask(self, mask):
# (h, w, 3) -> (h, w)
result = np.zeros(mask.shape[:2], dtype=np.uint8)
result[np.where((mask == (0, 0, 255)).all(axis=2))] = 1
result[np.where((mask == (0, 255, 0)).all(axis=2))] = 2
result[np.where((mask == (255, 0, 0)).all(axis=2))] = 3
return result
def calculate_weights(self):
classes_sum = np.zeros((4,), dtype=np.uint64)
for file_name in tqdm(self._file_names):
labels_path = self._labels_dir / file_name
labels = cv2.imread(str(labels_path))
labels = cv2.cvtColor(labels, cv2.COLOR_BGR2RGB)
labels = self._convert_mask(labels)
histogram, _ = np.histogram(labels.flatten(), bins=4, range=(0, 4))
classes_sum += histogram.astype(np.uint64)
weights = 1 / classes_sum
weights /= np.sum(weights)
return weights
###Output
_____no_output_____
###Markdown
Wczytywanie i dzielenie danych treningowych
###Code
from sklearn.model_selection import train_test_split
base_path = Path('/content/LunarSeg/train')
train_names = sorted([path.name for path in (base_path / 'masks').iterdir()])
train_names, val_names = train_test_split(train_names, test_size=0.15, random_state=42)
train_dataset = LunarDataset(base_path, train_names, augment=True)
val_dataset = LunarDataset(base_path, val_names)
###Output
_____no_output_____
###Markdown
Próba loss function z odwróceniem wag
###Code
train_dataset.calculate_weights()
weights = np.array([0.07004456, 0.02316237, 0.66572621, 0.24106686], dtype=np.float32)
###Output
_____no_output_____
###Markdown
Próba loss function z dice loss
###Code
class MultiClassDiceLoss(nn.Module):
def __init__(self, smooth = 1.0):
super().__init__()
self._smooth = smooth
def forward(self, preds, ground_truth):
preds = torch.softmax(preds, dim=1)
num_classes = preds.shape[1]
dice_sum = torch.tensor(0.0, dtype=torch.float32, device=preds.device)
for class_id in range(num_classes):
class_preds = preds[:, class_id].reshape(-1)
class_ground_truth = (ground_truth == class_id).view(-1)
tp = (class_preds * class_ground_truth).sum()
class_dice = 1 - (2 * tp + self._smooth) / (class_preds.sum() + class_ground_truth.sum() + self._smooth)
dice_sum += class_dice
return dice_sum / num_classes
###Output
_____no_output_____
###Markdown
Segment class
###Code
class Segmenter(pl.LightningModule):
def __init__(self):
super().__init__()
self.network = Unet(encoder_name='resnet50', classes=4)
# self.loss_function = torch.nn.CrossEntropyLoss()
self.loss_function = MultiClassDiceLoss()
# self.loss_function = nn.CrossEntropyLoss(
# weight=torch.from_numpy(weights)
# )
metrics = torchmetrics.MetricCollection([
torchmetrics.Precision(num_classes=4, average='macro', mdmc_average='samplewise'),
torchmetrics.Recall(num_classes=4, average='macro', mdmc_average='samplewise'),
torchmetrics.F1Score(num_classes=4, average='macro', mdmc_average='samplewise'),
torchmetrics.Accuracy(num_classes=4, average='macro', mdmc_average='samplewise')
])
self.train_metrics = metrics.clone('train_')
self.val_metrics = metrics.clone('val_')
def forward(self, x):
return self.network(x)
def training_step(self, batch, batch_idx):
inputs, labels = batch
outputs = self(inputs)
loss = self.loss_function(outputs, labels)
self.log('train_loss', loss)
outputs = torch.softmax(outputs, dim=1)
self.log_dict(self.train_metrics(outputs, labels))
return loss
def validation_step(self, batch, batch_idx):
inputs, labels = batch
outputs = self(inputs)
loss = self.loss_function(outputs, labels)
self.log('val_loss', loss, prog_bar=True)
outputs = torch.softmax(outputs, dim=1)
self.log_dict(self.val_metrics(outputs, labels))
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=1e-3)
segmenter = Segmenter()
model_checkpoint = pl.callbacks.ModelCheckpoint(dirpath='/content/checkpoints')
early_stopping = pl.callbacks.EarlyStopping(monitor='val_loss', patience=10)
logger = pl.loggers.NeptuneLogger(
api_key='eyJhcGlfYWRkcmVzcyI6Imh0dHBzOi8vYXBwLm5lcHR1bmUuYWkiLCJhcGlfdXJsIjoiaHR0cHM6Ly9hcHAubmVwdHVuZS5haSIsImFwaV9rZXkiOiIzOWI2ZGJmZi1hNTVjLTQ4NmQtODBmOS00MDdkYWMyM2JhOGYifQ==',
project='LunarSeg'
)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=8, num_workers=2)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=8, num_workers=2)
trainer = pl.Trainer(logger=logger, callbacks=[model_checkpoint, early_stopping], gpus=1, max_epochs=100)
trainer.fit(segmenter, train_dataloaders=train_loader, val_dataloaders=val_loader)
logger.run.stop()
###Output
GPU available: True, used: True
TPU available: False, using: 0 TPU cores
IPU available: False, using: 0 IPUs
LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]
| Name | Type | Params
-----------------------------------------------------
0 | network | Unet | 32.5 M
1 | loss_function | MultiClassDiceLoss | 0
2 | train_metrics | MetricCollection | 0
3 | val_metrics | MetricCollection | 0
-----------------------------------------------------
32.5 M Trainable params
0 Non-trainable params
32.5 M Total params
130.086 Total estimated model params size (MB)
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/callbacks/model_checkpoint.py:631: UserWarning: Checkpoint directory /content/checkpoints exists and is not empty.
rank_zero_warn(f"Checkpoint directory {dirpath} exists and is not empty.")
###Markdown
Wgranie danych do predykcji
###Code
!wget "https://chmura.put.poznan.pl/s/MLk1k6RWWQQuOXs/download?path=%2F&files=test.tar.xz" -O test.tar.xz
!tar xf test.tar.xz
!rm test.tar.xz
device = torch.device('cuda')
segmenter = Segmenter.load_from_checkpoint(model_checkpoint.best_model_path).to(device) # wczytanie najlepszych wag z treningu
segmenter = segmenter.eval()
import cv2
input_transforms = val_dataset.transforms
output_transforms = A.Compose([
A.CenterCrop(*val_dataset.image_size),
A.Resize(720, 1280, interpolation=cv2.INTER_NEAREST)
])
test_base_path = Path('/content/LunarSeg/test')
predictions_path = Path('/content/LunarSeg/test/predictions')
predictions_path.mkdir(exist_ok=True, parents=True)
for test_image_path in (test_base_path / 'images').iterdir():
image = cv2.imread(str(test_image_path))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = input_transforms(image=image)['image'][None, ...]
with torch.no_grad():
prediction = segmenter(image.to(device)).cpu().squeeze().argmax(dim=0).numpy()
prediction = convert_ids_to_rgb(prediction)
prediction = cv2.cvtColor(prediction, cv2.COLOR_RGB2BGR)
prediction = output_transforms(image=prediction)['image']
cv2.imwrite(str(predictions_path / f'{test_image_path.stem}.png'), prediction)
# predictions_path = Path('/content/LunarSeg/test/predictions_reverse_weights')
predictions_path = Path('/content/LunarSeg/test/predictions_dice')
# predictions_path = Path('/content/drive/MyDrive/Automatyka/mgr/predictions')
import requests
import pickle
import zlib
from multiprocessing.dummy import Pool as ThreadPool
sum_result = 0
def calculate_score(prediction_path: Path):
prediction = cv2.imread(str(prediction_path))
prediction = cv2.cvtColor(prediction, cv2.COLOR_BGR2RGB)
response = requests.post(f'http://zpo.dpieczynski.pl/{prediction_path.stem}', data=zlib.compress(pickle.dumps(prediction)))
if response.status_code == 200:
result = response.json()
global sum_result
sum_result += float(str(result)[6:-1])
return f'{prediction_path.name} {result}'
else:
return f'Error processing prediction {prediction_path.name}: {response.text}'
return None
i = 0
with ThreadPool(processes=16) as pool:
for result in pool.imap_unordered(calculate_score, predictions_path.iterdir()):
i += 1
print(i)
print(sum_result / i)
from google.colab import drive
drive.mount('/content/drive')
!cp -r '/content/LunarSeg/test/predictions_reverse_weights' '/content/drive/MyDrive/Automatyka/mgr/predictions_reverse_weights'
###Output
_____no_output_____ |
jupyterhub/notebooks/pmml/pmml_balancescale/04_PredictModel.ipynb | ###Markdown
Predict with Model Init Model
###Code
%%bash
pio init-model \
--model-server-url http://prediction-pmml.community.pipeline.io/ \
--model-type pmml \
--model-namespace default \
--model-name pmml_balancescale \
--model-version v1 \
--model-path .
###Output
_____no_output_____
###Markdown
Predict with Model (CLI)
###Code
%%bash
pio predict \
--model-test-request-path ./data/test_request.json
###Output
_____no_output_____
###Markdown
Predict ManyThis is a mini load test to provide instant feedback on relative performance.
###Code
%%bash
pio predict_many \
--model-test-request-path ./data/test_request.json \
--num-iterations 5
###Output
_____no_output_____
###Markdown
Predict with Model (REST)
###Code
import requests
model_type = 'scikit'
model_namespace = 'default'
model_name = 'scikit_decisiontree'
model_version = 'v1'
deploy_url = 'http://prediction-%s.community.pipeline.io/api/v1/model/predict/%s/%s/%s/%s' % (model_type, model_type, model_namespace, model_name, model_version)
with open('./data/test_request.json', 'rb') as fh:
model_input_binary = fh.read()
response = requests.post(url=deploy_url,
data=model_input_binary,
timeout=30)
print("Success! %s" % response.text)
###Output
_____no_output_____ |
src/data_sorting/compare_microarray_RNAseq.ipynb | ###Markdown
identify how many promoters potentially bidirectional in each promoter category (potentially overlapping promoters where the upstream gene was positioned in the opposite direction and was less than 2000 bp away from the TSS.)
###Code
overlapping_promoters_bed = f'../../data/output/{file_names}/overlapping_promoters.bed'
overlappingproms_df = pd.read_table(overlapping_promoters_bed, sep='\t',header=None)
cols = ['chr', 'start', 'stop', 'numberoverlapping', 'gene1','gene2']
overlappingproms_df.columns = cols
#select genes present in both czechowski gene set and overlapping
czechowski_overlappingproms = czechowski_df[czechowski_df.AGI.isin(overlappingproms_df.gene1) | czechowski_df.AGI.isin(overlappingproms_df.gene2)]
len(czechowski_overlappingproms[czechowski_overlappingproms.gene_type == 'constitutive'])
len(czechowski_overlappingproms[czechowski_overlappingproms.gene_type == 'variable'])
len(czechowski_overlappingproms[czechowski_overlappingproms.gene_type == 'control'])
###Output
_____no_output_____ |
.ipynb_checkpoints/predicting_survival_rate_lung_cancer_surgery-checkpoint.ipynb | ###Markdown
폐암 수술 환자의 생존율 예측하기 실습
###Code
import tensorflow as tf
tf.__version__
###Output
_____no_output_____ |
Machine Learning/8. Logistic Regression(Multi-class Classification) 2.ipynb | ###Markdown
Exercise Solution (Iris_Dataset)
###Code
from sklearn.datasets import load_iris # load iris is one of the dataset which is available in sklearn for practice
iris = load_iris() # storing the dataset
type(iris)
dir(iris)
iris.data[0:5] # Every idividual data has sepal length,width and petal length,width
iris.feature_names
iris.target # Target seem our desired dependent variable
iris.target_names[0:3]
iris.filename
len(iris.data)
###Output
_____no_output_____
###Markdown
Base on above exploration of Dataset, we can use iris.data as independent and iris.target dependent variable Train,Test Split
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(iris.data,iris.target, test_size=0.2)
X_test
y_test
len(X_train)
###Output
_____no_output_____
###Markdown
Building Model
###Code
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
model.fit(X_train, y_train) # Training
###Output
_____no_output_____
###Markdown
Results
###Code
y_test # This is the target that model should predict
model.predict(X_test)
model.score(X_test,y_test) # So, accuracy is 96.66%
model.predict([[7.7, 3.8, 6.7, 2.2]])
###Output
_____no_output_____
###Markdown
Confusion Matrix
###Code
y_predicted = model.predict(X_test) # Predicted values for my all test data
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_predicted) # Truth Values & Predicted Values as parameter
cm
import seaborn as sns
plt.figure(figsize = (10,7))
sns.heatmap(cm, annot=True) # Using Heatmap
plt.xlabel('Predicted')
plt.ylabel('Truth')
###Output
_____no_output_____ |
assembly_eda_studies/change_point_bayes.ipynb | ###Markdown
Bayesian Changepoint Detection in PythonThis code computes the probability of changepoints in a time series.7 In this notebook I show how you can use it.First let's generate some data:
###Code
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import seaborn
%matplotlib inline
%load_ext autoreload
%autoreload 2
def generate_normal_time_series(num, minl=50, maxl=1000):
data = np.array([], dtype=np.float64)
partition = np.random.randint(minl, maxl, num)
for p in partition:
mean = np.random.randn()*10
var = np.random.randn()*1
if var < 0:
var = var * -1
tdata = np.random.normal(mean, var, p)
data = np.concatenate((data, tdata))
return data
data = generate_normal_time_series(7, 50, 200)
data.shape
###Output
_____no_output_____
###Markdown
Let's have a look, how they look like:
###Code
fig, ax = plt.subplots(figsize=[16, 12])
ax.plot(data)
###Output
_____no_output_____
###Markdown
Offline Changepoint DetectionLets compute the probability of changepoints at each time step. We need two things for that. First a prior of how probable is it to have two successive changepoints with the distance `t`. The second thing is a model of the likelihood of data in a sequence `[s, t]` of the data, given that in this sequence there is *no* changepoint.For this example we assume a uniform prior over the length of sequences (`const_prior`) and a piecewise gaussian model (`gaussian_obs_log_likelihood`).
###Code
import cProfile
import bayesian_changepoint_detection.offline_changepoint_detection as offcd
from functools import partial
Q, P, Pcp = offcd.offline_changepoint_detection(data, partial(offcd.const_prior, l=(len(data)+1)), offcd.gaussian_obs_log_likelihood, truncate=-40)
sum_exp=np.exp(Pcp).sum(0)
sum_exp.shape
sum_exp
###Output
_____no_output_____
###Markdown
The `offline_changepoint_detection()` function returns three things: `Q[t]`, the log-likelihood of data `[t, n]`, `P[t, s]`, the log-likelihood of a datasequence `[t, s]`, given there is no changepoint between `t` and `s` and `Pcp[i, t]`, the log-likelihood that the `i`-th changepoint is at time step `t`. To actually get the probility of a changepoint at time step `t` sum the probabilities.How does that look like for our toy-data?
###Code
fig, ax = plt.subplots(figsize=[18, 16])
ax = fig.add_subplot(2, 1, 1)
ax.plot(data[:])
ax = fig.add_subplot(2, 1, 2, sharex=ax)
ax.plot(np.exp(Pcp).sum(0))
###Output
_____no_output_____
###Markdown
That works pretty well, but is somewhat slow. It's possible to speed that up by truncating a sum in the algorithm. However that sometimes leeds to $\infty$ values. Set the `truncate` parameter to e.g. `-10` to test that out.To understand, what is happening have a look at the following papers:[1] Paul Fearnhead, Exact and Efficient Bayesian Inference for MultipleChangepoint problems, Statistics and computing 16.2 (2006), pp. 203--213[2] Xuan Xiang, Kevin Murphy, Modeling Changing Dependency Structure inMultivariate Time Series, ICML (2007), pp. 1055--1062 Online Changepoint DetectionLet's assume the data points come in one after another and not as these nice batches. During the process you want to know if the new point has the same hyperparameter or different ones. You need an online changepoint detection.Happily there is one, although it's interface is kind of suboptimal so far, in that it expects batches of data still and just assumes they drop in over time... I will change that at some point.
###Code
import bayesian_changepoint_detection.online_changepoint_detection as oncd
from functools import partial
R, maxes = oncd.online_changepoint_detection(data, partial(oncd.constant_hazard, 250), oncd.StudentT(0.1, .01, 1, 0))
###Output
_____no_output_____
###Markdown
The online version computes slightly different things. For each time step it returns the probability distribution over the length of the last sequence. E.g. `R[7, 3]` is the probability at time step `7` that the last sequence is already `3` time steps long. It also returns the MAP estimate at each timestep for convenience.To plot the distributions we use a grey-scale colormap, black is zero, white 1. We also plot the probability at each time step for a sequence length of 0, i.e. the probability of the current time step to be a changepoint.Because it's very hard to correctly evaluate a change after a single sample of a new distribution, we instead can "wait" for `Nw` samples and evalute the probability of a change happening `Nw` samples prior.
###Code
import matplotlib.cm as cm
fig, ax = plt.subplots(figsize=[18, 16])
ax = fig.add_subplot(3, 1, 1)
ax.plot(data)
ax = fig.add_subplot(3, 1, 2, sharex=ax)
sparsity = 5 # only plot every fifth data for faster display
ax.pcolor(np.array(range(0, len(R[:,0]), sparsity)),
np.array(range(0, len(R[:,0]), sparsity)),
-np.log(R[0:-1:sparsity, 0:-1:sparsity]),
cmap=cm.Greys, vmin=0, vmax=30)
ax = fig.add_subplot(3, 1, 3, sharex=ax)
Nw=10;
ax.plot(R[Nw,Nw:-1])
###Output
_____no_output_____
###Markdown
Well, not bad, considering how much faster it is (if you can afford waiting for that extra `Nw` samples). To understand the whole algorithm look at[1] Ryan P. Adams, David J.C. MacKay, Bayesian Online Changepoint Detection,arXiv 0710.3742 (2007)There you also find a Matlab version, which this code is based on.
###Code
data = generate_normal_time_series(7, 50, 200)
%timeit Q, P, Pcp = offcd.offline_changepoint_detection(data, partial(offcd.const_prior, l=(len(data)+1)), offcd.gaussian_obs_log_likelihood)
%timeit R, maxes = oncd.online_changepoint_detection(data, partial(oncd.constant_hazard, 250), oncd.StudentT(10, .03, 1, 0))
###Output
1 loops, best of 3: 474 ms per loop
|
RL-Quadcopter/notebooks/RL-Quadcopter.ipynb | ###Markdown
Project: Train a Quadcopter How to FlyDesign an agent that can fly a quadcopter, and then train it using a reinforcement learning algorithm of your choice! Try to apply the techniques you have learnt, but also feel free to come up with innovative ideas and test them. Instructions> **Note**: If you haven't done so already, follow the steps in this repo's README to install ROS, and ensure that the simulator is running and correctly connecting to ROS.When you are ready to start coding, take a look at the `quad_controller_rl/src/` (source) directory to better understand the structure. Here are some of the salient items:- `src/`: Contains all the source code for the project. - `quad_controller_rl/`: This is the root of the Python package you'll be working in. - ... - `tasks/`: Define your tasks (environments) in this sub-directory. - `__init__.py`: When you define a new task, you'll have to import it here. - `base_task.py`: Generic base class for all tasks, with documentation. - `takeoff.py`: This is the first task, already defined for you, and set to run by default. - ... - `agents/`: Develop your reinforcement learning agents here. - `__init__.py`: When you define a new agent, you'll have to import it here, just like tasks. - `base_agent.py`: Generic base class for all agents, with documentation. - `policy_search.py`: A sample agent has been provided here, and is set to run by default. - ... TasksOpen up the base class for tasks, `BaseTask`, defined in `tasks/base_task.py`:```pythonclass BaseTask: """Generic base class for reinforcement learning tasks.""" def __init__(self): """Define state and action spaces, initialize other task parameters.""" pass def set_agent(self, agent): """Set an agent to carry out this task; to be called from update.""" self.agent = agent def reset(self): """Reset task and return initial condition.""" raise NotImplementedError def update(self, timestamp, pose, angular_velocity, linear_acceleration): """Process current data, call agent, return action and done flag.""" raise NotImplementedError ```All tasks must inherit from this class to function properly. You will need to override the `reset()` and `update()` methods when defining a task, otherwise you will get `NotImplementedError`'s. Besides these two, you should define the state (observation) space and the action space for the task in the constructor, `__init__()`, and initialize any other variables you may need to run the task.Now compare this with the first concrete task `Takeoff`, defined in `tasks/takeoff.py`:```pythonclass Takeoff(BaseTask): """Simple task where the goal is to lift off the ground and reach a target height.""" ...```In `__init__()`, notice how the state and action spaces are defined using [OpenAI Gym spaces](https://gym.openai.com/docs/spaces), like [`Box`](https://github.com/openai/gym/blob/master/gym/spaces/box.py). These objects provide a clean and powerful interface for agents to explore. For instance, they can inspect the dimensionality of a space (`shape`), ask for the limits (`high` and `low`), or even sample a bunch of observations using the `sample()` method, before beginning to interact with the environment. We also set a time limit (`max_duration`) for each episode here, and the height (`target_z`) that the quadcopter needs to reach for a successful takeoff.The `reset()` method is meant to give you a chance to reset/initialize any variables you need in order to prepare for the next episode. You do not need to call it yourself; it will be invoked externally. And yes, it will be called once before each episode, including the very first one. Here `Takeoff` doesn't have any episode variables to initialize, but it must return a valid _initial condition_ for the task, which is a tuple consisting of a [`Pose`](http://docs.ros.org/api/geometry_msgs/html/msg/Pose.html) and [`Twist`](http://docs.ros.org/api/geometry_msgs/html/msg/Twist.html) object. These are ROS message types used to convey the pose (position, orientation) and velocity (linear, angular) you want the quadcopter to have at the beginning of an episode. You may choose to supply the same initial values every time, or change it a little bit, e.g. `Takeoff` drops the quadcopter off from a small height with a bit of randomness.> **Tip**: Slightly randomized initial conditions can help the agent explore the state space faster.Finally, the `update()` method is perhaps the most important. This is where you define the dynamics of the task and engage the agent. It is called by a ROS process periodically (roughly 30 times a second, by default), with current data from the simulation. A number of arguments are available: `timestamp` (you can use this to check for timeout, or compute velocities), `pose` (position, orientation of the quadcopter), `angular_velocity`, and `linear_acceleration`. You do not have to include all these variables in every task, e.g. `Takeoff` only uses pose information, and even that requires a 7-element state vector.Once you have prepared the state you want to pass on to your agent, you will need to compute the reward, and check whether the episode is complete (e.g. agent crossed the time limit, or reached a certain height). Note that these two things (`reward` and `done`) are based on actions that the agent took in the past. When you are writing your own agents, you have to be mindful of this.Now you can pass in the `state`, `reward` and `done` values to the agent's `step()` method and expect an action vector back that matches the action space that you have defined, in this case a `Box(6,)`. After checking that the action vector is non-empty, and clamping it to the space limits, you have to convert it into a ROS `Wrench` message. The first 3 elements of the action vector are interpreted as force in x, y, z directions, and the remaining 3 elements convey the torque to be applied around those axes, respectively.Return the `Wrench` object (or `None` if you don't want to take any action) and the `done` flag from your `update()` method (note that when `done` is `True`, the `Wrench` object is ignored, so you can return `None` instead). This will be passed back to the simulation as a control command, and will affect the quadcopter's pose, orientation, velocity, etc. You will be able to gauge the effect when the `update()` method is called in the next time step. AgentsReinforcement learning agents are defined in a similar way. Open up the generic agent class, `BaseAgent`, defined in `agents/base_agent.py`, and the sample agent `RandomPolicySearch` defined in `agents/policy_search.py`. They are actually even simpler to define - you only need to implement the `step()` method that is discussed above. It needs to consume `state` (vector), `reward` (scalar value) and `done` (boolean), and produce an `action` (vector). The state and action vectors must match the respective space indicated by the task. And that's it!Well, that's just to get things working correctly! The sample agent given `RandomPolicySearch` uses a very simplistic linear policy to directly compute the action vector as a dot product of the state vector and a matrix of weights. Then, it randomly perturbs the parameters by adding some Gaussian noise, to produce a different policy. Based on the average reward obtained in each episode ("score"), it keeps track of the best set of parameters found so far, how the score is changing, and accordingly tweaks a scaling factor to widen or tighten the noise.
###Code
%%html
<div style="width: 100%; text-align: center;">
<h3>Teach a Quadcopter How to Tumble</h3>
<video poster="images/quadcopter_tumble.png" width="640" controls muted>
<source src="images/quadcopter_tumble.mp4" type="video/mp4" />
<p>Video: Quadcopter tumbling, trying to get off the ground</p>
</video>
</div>
###Output
_____no_output_____
###Markdown
Obviously, this agent performs very poorly on the task. It does manage to move the quadcopter, which is good, but instead of a stable takeoff, it often leads to dizzying cartwheels and somersaults! And that's where you come in - your first _task_ is to design a better agent for this takeoff task. Instead of messing with the sample agent, create new file in the `agents/` directory, say `policy_gradients.py`, and define your own agent in it. Remember to inherit from the base agent class, e.g.:```pythonclass DDPG(BaseAgent): ...```You can borrow whatever you need from the sample agent, including ideas on how you might modularize your code (using helper methods like `act()`, `learn()`, `reset_episode_vars()`, etc.).> **Note**: This setup may look similar to the common OpenAI Gym paradigm, but there is one small yet important difference. Instead of the agent calling a method on the environment (to execute an action and obtain the resulting state, reward and done value), here it is the task that is calling a method on the agent (`step()`). If you plan to store experience tuples for learning, you will need to cache the last state ($S_{t-1}$) and last action taken ($A_{t-1}$), then in the next time step when you get the new state ($S_t$) and reward ($R_t$), you can store them along with the `done` flag ($\left\langle S_{t-1}, A_{t-1}, R_t, S_t, \mathrm{done?}\right\rangle$).When an episode ends, the agent receives one last call to the `step()` method with `done` set to `True` - this is your chance to perform any cleanup/reset/batch-learning (note that no reset method is called on an agent externally). The action returned on this last call is ignored, so you may safely return `None`. The next call would be the beginning of a new episode.One last thing - in order to run your agent, you will have to edit `agents/__init__.py` and import your agent class in it, e.g.:```pythonfrom quad_controller_rl.agents.policy_gradients import DDPG```Then, while launching ROS, you will need to specify this class name on the commandline/terminal:```bashroslaunch quad_controller_rl rl_controller.launch agent:=DDPG```Okay, now the first task is cut out for you - follow the instructions below to implement an agent that learns to take off from the ground. For the remaining tasks, you get to define the tasks as well as the agents! Use the `Takeoff` task as a guide, and refer to the `BaseTask` docstrings for the different methods you need to override. Use some debug print statements to understand the flow of control better. And just like creating new agents, new tasks must inherit `BaseTask`, they need be imported into `tasks/__init__.py`, and specified on the commandline when running:```bashroslaunch quad_controller_rl rl_controller.launch task:=Hover agent:=DDPG```> **Tip**: You typically need to launch ROS and then run the simulator manually. But you can automate that process by either copying/symlinking your simulator to `quad_controller_rl/sim/DroneSim` (`DroneSim` must be an executable/link to one), or by specifying it on the command line, as follows:> > ```bash> roslaunch quad_controller_rl rl_controller.launch task:=Hover agent:=DDPG sim:=> ``` Task 1: Takeoff Implement takeoff agentTrain an agent to successfully lift off from the ground and reach a certain threshold height. Develop your agent in a file under `agents/` as described above, implementing at least the `step()` method, and any other supporting methods that might be necessary. You may use any reinforcement learning algorithm of your choice (note that the action space consists of continuous variables, so that may somewhat limit your choices).The task has already been defined (in `tasks/takeoff.py`), which you should not edit. The default target height (Z-axis value) to reach is 10 units above the ground. And the reward function is essentially the negative absolute distance from that set point (upto some threshold). An episode ends when the quadcopter reaches the target height (x and y values, orientation, velocity, etc. are ignored), or when the maximum duration is crossed (5 seconds). See `Takeoff.update()` for more details, including episode bonus/penalty.As you develop your agent, it's important to keep an eye on how it's performing. Build in a mechanism to log/save the total rewards obtained in each episode to file. Once you are satisfied with your agent's performance, return to this notebook to plot episode rewards, and answer the questions below. Plot episode rewardsPlot the total rewards obtained in each episode, either from a single run, or averaged over multiple runs.
###Code
import os
os.getcwd()
# TODO: Read and plot episode rewards
import pandas as pd
df_stats = pd.read_csv('../out/stats_2018-02-13_23-50-28.csv')
df_stats[['total_reward']].plot(title="Episode Rewards")
###Output
_____no_output_____
###Markdown
**Q**: What algorithm did you use? Briefly discuss why you chose it for this task.**A**: Deep Q algorithm**Q**: Using the episode rewards plot, discuss how the agent learned over time.- Was it an easy task to learn or hard?- Was there a gradual learning curve, or an aha moment?- How good was the final performance of the agent? (e.g. mean rewards over the last 10 episodes)**A**: hard to learn Task 2: Hover Implement hover agentNow, your agent must take off and hover at the specified set point (say, 10 units above the ground). Same as before, you will need to create an agent and implement the `step()` method (and any other supporting methods) to apply your reinforcement learning algorithm. You may use the same agent as before, if you think your implementation is robust, and try to train it on the new task. But then remember to store your previous model weights/parameters, in case your results were worth keeping. States and rewardsEven if you can use the same agent, you will need to create a new task, which will allow you to change the state representation you pass in, how you verify when the episode has ended (the quadcopter needs to hover for at least a few seconds), etc. In this hover task, you may want to pass in the target height as part of the state (otherwise how would the agent know where you want it to go?). You may also need to revisit how rewards are computed. You can do all this in a new task file, e.g. `tasks/hover.py` (remember to follow the steps outlined above to create a new task):```pythonclass Hover(BaseTask): ...```**Q**: Did you change the state representation or reward function? If so, please explain below what worked best for you, and why you chose that scheme. Include short code snippet(s) if needed.**A**: Implementation notes**Q**: Discuss your implementation below briefly, using the following questions as a guide:- What algorithm(s) did you try? What worked best for you?- What was your final choice of hyperparameters (such as $\alpha$, $\gamma$, $\epsilon$, etc.)?- What neural network architecture did you use (if any)? Specify layers, sizes, activation functions, etc.**A**: Plot episode rewardsAs before, plot the episode rewards, either from a single run, or averaged over multiple runs. Comment on any changes in learning behavior.
###Code
# TODO: Read and plot episode rewards
###Output
_____no_output_____
###Markdown
Task 3: LandingWhat goes up, must come down! But safely! Implement landing agentThis time, you will need to edit the starting state of the quadcopter to place it at a position above the ground (at least 10 units). And change the reward function to make the agent learn to settle down _gently_. Again, create a new task for this (e.g. `Landing` in `tasks/landing.py`), and implement the changes. Note that you will have to modify the `reset()` method to return a position in the air, perhaps with some upward velocity to mimic a recent takeoff.Once you're satisfied with your task definition, create another agent or repurpose an existing one to learn this task. This might be a good chance to try out a different approach or algorithm. Initial condition, states and rewards**Q**: How did you change the initial condition (starting state), state representation and/or reward function? Please explain below what worked best for you, and why you chose that scheme. Were you able to build in a reward mechanism for landing gently?**A**: Implementation notes**Q**: Discuss your implementation below briefly, using the same questions as before to guide you.**A**: Plot episode rewardsAs before, plot the episode rewards, either from a single run, or averaged over multiple runs. This task is a little different from the previous ones, since you're starting in the air. Was it harder to learn? Why/why not?
###Code
# TODO: Read and plot episode rewards
###Output
_____no_output_____
###Markdown
Task 4: CombinedIn order to design a complete flying system, you will need to incorporate all these basic behaviors into a single agent. Setup end-to-end taskThe end-to-end task we are considering here is simply to takeoff, hover in-place for some duration, and then land. Time to create another task! But think about how you might go about it. Should it be one meta-task that activates appropriate sub-tasks, one at a time? Or would a single combined task with something like waypoints be easier to implement? There is no right or wrong way here - experiment and find out what works best (and then come back to answer the following).**Q**: What setup did you ultimately go with for this combined task? Explain briefly.**A**: Implement combined agentUsing your end-to-end task, implement the combined agent so that it learns to takeoff (at least 10 units above ground), hover (again, at least 10 units above ground), and gently come back to ground level. Combination scheme and implementation notesJust like the task itself, it's up to you whether you want to train three separate (sub-)agents, or a single agent for the complete end-to-end task.**Q**: What did you end up doing? What challenges did you face, and how did you resolve them? Discuss any other implementation notes below.**A**: Plot episode rewardsAs before, plot the episode rewards, either from a single run, or averaged over multiple runs.
###Code
# TODO: Read and plot episode rewards
###Output
_____no_output_____ |
jupyter notebooks/Explore classes and properties of Schema.ipynb | ###Markdown
How to use biothings_schema package to explorer classes and properties
###Code
# load python package
from biothings_schema import Schema
# load schema
se = Schema("https://raw.githubusercontent.com/data2health/schemas/biothings/biothings/biothings_curie_kevin.jsonld")
###Output
_____no_output_____
###Markdown
Find all classes defined in the Schema
###Code
se.list_all_classes()
###Output
_____no_output_____
###Markdown
Access class using different ID formats 1. Access using URI
###Code
scls = se.get_class("http://schema.biothings.io/Gene")
scls
###Output
_____no_output_____
###Markdown
2. Access using CURIE
###Code
scls = se.get_class("bts:Gene")
scls
###Output
_____no_output_____
###Markdown
3. Access using label
###Code
scls = se.get_class("Gene")
scls
###Output
_____no_output_____
###Markdown
Find the URI of a specific class
###Code
scls = se.get_class("Gene")
scls.uri
###Output
_____no_output_____
###Markdown
Find the label of a specific class
###Code
scls = se.get_class("Gene")
scls.label
###Output
_____no_output_____
###Markdown
Find the CURIE of a specific class
###Code
scls = se.get_class("Gene")
scls.name
###Output
_____no_output_____
###Markdown
Response if class is not defined
###Code
scls = se.get_class("dd")
scls.uri
scls.name
scls.label
###Output
_____no_output_____
###Markdown
Find all parents of a specific class
###Code
# find parents of "Gene" class
scls = se.get_class("Gene")
scls.parent_classes
###Output
_____no_output_____
###Markdown
Find all direct children of a specific class
###Code
# find direct children of "MolecularEntity" class
scls = se.get_class("MolecularEntity")
scls.child_classes
###Output
_____no_output_____
###Markdown
Find all descendants of a specific class
###Code
# find descendants of "MolecularEntity" class
scls = se.get_class("MolecularEntity")
scls.descendant_classes
###Output
_____no_output_____
###Markdown
Find properties specifically defined for a class
###Code
# find properties specifically defined for "Gene" class
scls = se.get_class("Gene")
scls.list_properties(group_by_class=False)
###Output
_____no_output_____
###Markdown
Find all properties related to a class (including the parents' properties)
###Code
# find all properties related to "Gene"
scls = se.get_class("Gene")
scls.list_properties(class_specific=False)
###Output
_____no_output_____
###Markdown
Explore where a class is used
###Code
# find where "GenomicEntity" class is used
scls = se.get_class("GenomicEntity")
scls.used_by()
###Output
_____no_output_____
###Markdown
Explore all information related to the class Including:1. Related properties2. Parent classes3. Direct child classes4. Where the class is used
###Code
# explore all information related to "GenomicEntity" class
scls = se.get_class("GenomicEntity")
scls.describe()
###Output
_____no_output_____
###Markdown
Access property using different ID formats 1. Access using URI
###Code
sp = se.get_property("http://schema.biothings.io/ensembl")
print(sp)
###Output
bts:ensembl
###Markdown
2. Access using CURIE
###Code
sp = se.get_property("bts:ensembl")
print(sp)
###Output
bts:ensembl
###Markdown
3. Access using label
###Code
sp = se.get_property("ensembl")
print(sp)
###Output
bts:ensembl
###Markdown
Find all parents of a specific property
###Code
# find parents of "ensembl" property
sp = se.get_property("ensembl")
sp.parent_properties
###Output
_____no_output_____
###Markdown
Find all children of a specific property
###Code
# find parents of "identifier" property
sp = se.get_property("identifier")
sp.child_properties
###Output
_____no_output_____
###Markdown
Find property description
###Code
# description of "ensembl" property
sp = se.get_property("ensembl")
sp.description
###Output
_____no_output_____
###Markdown
Explore information about a property Includes:1. ID2. Description3. Domain (which class(es) use this property)4. Range (the value type)5. Parent properties6. Child properties
###Code
# explore "ensembl" property
sp = se.get_property("ensembl")
sp.describe()
###Output
_____no_output_____ |
03CodingExercise7Dictionaries.ipynb | ###Markdown
Create a dictionary where all the keys are strings, and all values are integers. For example {'Monday' : 19,'Tuesday' : 20} Just write the dictionary on a single line, don't assign a variable name to the dictionary.
###Code
{'Coursera' : 1,'Udemy' : 2,'EdX' : 3}
###Output
_____no_output_____ |
Section 4/Trading_system.ipynb | ###Markdown
Trading System with Moving Average Signals
###Code
import numpy as np
import pandas as pd
import pandas_datareader as pdr
import matplotlib.pyplot as plt
%matplotlib inline
gld = pdr.get_data_yahoo('GLD', '2013-01-01')
gld.drop('Adj Close', axis=1, inplace=True)
gld['9-day'] = gld['Close'].rolling(9).mean()
gld['21-day'] = gld['Close'].rolling(21).mean()
gld['Change'] = np.log(gld.Close / gld.Close.shift())
gld.tail()
with plt.style.context('ggplot'):
plt.figure(figsize=(8,6))
plt.plot(gld.Close[-120:])
plt.plot(gld['9-day'][-120:])
plt.plot(gld['21-day'][-120:])
plt.legend(loc=2)
gld['position'] = np.where(gld['9-day'] > gld['21-day'], 1 , 0)
gld['position'] = np.where(gld['9-day'] < gld['21-day'], -1, gld['position'])
gld.tail(10)
gld['system'] = gld['position'] * gld['Change']
gld[['Change', 'system']].cumsum().plot()
###Output
_____no_output_____ |
content/python/pandas/.ipynb_checkpoints/Pandas_basic-Copy1-checkpoint.ipynb | ###Markdown
---title: "Pandas"author: "Palaniappan S"date: 2020-08-11description: "-"type: technical_notedraft: false---
###Code
import pandas as pd
df = pd.read_csv('glass.csv')
df
df.head(5)
df.tail(5)
df.info()
df.shape
df.columns
df.isnull()
df.isnull().sum()
###Output
_____no_output_____ |
9_not_in_final/ETL_trinity.ipynb | ###Markdown
make data db
###Code
# load file pickle
plateDB = pickle.load(open(os.path.join(dir_save, 'file_summary_mwt.pickle'),'rb'))
# get paths with trinity.id.dat
pMWT = plateDB.index[~plateDB[('filepath','trinity.id.dat')].isna()].values
del plateDB
# make dataframe
MWTDB = pd.DataFrame({'mwtpath':pMWT})
# take a look at the db to see if any missing trinity pickle
# instantiate
report_capture = np.zeros(len(pMWT),dtype='bool')
for plateid, pPlate in enumerate(MWTDB['mwtpath']):
# get expected apth to trinity data
pfile = os.path.join(pPlate, 'trinity_all_worms.pickle')
# see if file exist
if os.path.exists(pfile):
report_capture[plateid] = True
else:
print(f'{plateid} does not exist', end='\r')
# report result
print(f'{np.sum(report_capture)}/{len(report_capture)} files exist')
# delete the plate that failed to concatenate trinity
MWTDB.drop(index=MWTDB.index[~report_capture].values, inplace=True)
###Output
_____no_output_____
###Markdown
Create MWTDB
###Code
# add paths to trinitu files
MWTDB['trinity_path'] = list(map(lambda x: os.path.join(x,'trinity_all_worms.pickle'), MWTDB['mwtpath']))
# reset index
MWTDB.reset_index(drop=True, inplace=True)
# extract experiment features
df = MWTDB['mwtpath'].str.split(pat='/', expand=True)
MWTDB['expname'] = df.iloc[:,4]
MWTDB['groupname'] = df.iloc[:,5]
MWTDB['platename'] = df.iloc[:,6]
# get number of rows per pickle file
# note some trinity files may not be converted to pickle files. Instead of checking availability,
# random choose 1.1M numbers and then use only first 1M rows that has files
pickle_rows = np.zeros(MWTDB.shape[0], dtype='int')
for i, p in enumerate(MWTDB['trinity_path']):
if i%5==0:
print(f'getting row numbers from {i}th file', end='\r')
df = pd.read_pickle(p)
pickle_rows[i] = df.shape[0]
MWTDB['rows'] = pickle_rows
# define dropbox save folder, mkdir if not exist
pDropbox_home = '/Users/connylin/Dropbox/MWT/db'
pReplace = '/Volumes/COBOLT'
# replace path
MWTDB['mwtpath_dropbox'] = list(map(lambda p: p.replace(pReplace, pDropbox_home), MWTDB['mwtpath']))
MWTDB['trinity_path_dropbox'] = list(map(lambda p: p.replace(pReplace, pDropbox_home), MWTDB['trinity_path']))
# save database
pickle.dump(MWTDB, open(os.path.join(dir_save, 'MWTDB_trinity_N2400mM.pickle'),'wb'))
###Output
_____no_output_____
###Markdown
Data wrangling add labels to individual plate data
###Code
# take a sample to see if need per file processing
for ind in MWTDB.index.values:
# get path
ptrinity = MWTDB['trinity_path'].iloc[ind]
# load to dataframe
df = pickle.load(open(ptrinity,'rb'))
row_n_original = df.shape[0]
# check if the data already been cleaned
if any(df.columns=='mwtid_trdb'):
continue
# clean nan data
df.dropna(axis=0, inplace=True)
row_n_after = df.shape[0]
print(f'plateid {ind} dropped {row_n_original - row_n_after} rows to {row_n_after} rows', end='\r')
# add file path
df.insert(0,'mwtid_trdb', np.tile(MWTDB.index[ind], df.shape[0]))
# add group id (ethanol=1 vs no ethanol=0)
if MWTDB['groupname'][ind]=='N2':
df.insert(1,'etoh', np.tile(0, df.shape[0]))
else:
df.insert(1,'etoh', np.tile(1, df.shape[0]))
# save the file
pickle.dump(df, open(ptrinity,'wb'))
###Output
plateid 869 dropped 7112 rows to 162496 rowssss
###Markdown
concat all trinity datahttps://stackoverflow.com/questions/56012595/how-to-pickle-multiple-pandas-dataframes-and-concatenate-all-of-them-i```df = pd.concat([pd.read_pickle('/PATH/df/{}/{}.F.K.df'.format('train', f)).iloc[:, :100] for f in Files], axis=1)````a = [pd.read_pickle(p) for p in MWTDB['trinity_path'][:10]]`Issues:* Each csv is ~100MB * 800 = 80GB csv. My computer won't be able to open this file. Can I predict which tap number the worm is reacting to by it's behavior before and after the tap?* for wildtype* for ethanol vs non ethanol* for mutants?
###Code
# look at behavior and see if can predict which tap it is
MWTDB = pickle.load(open(os.path.join(dir_save, 'MWTDB_trinity_N2400mM.pickle'),'rb'))
###Output
_____no_output_____
###Markdown
approach 1 random 10 plates
###Code
# choose 10 plates of 0mM and 10 plates of 400mM to look at
np.random.seed(318)
ind_0mM = np.random.choice(MWTDB.index[MWTDB['groupname']=='N2'].values, 10, replace=False)
ind_400mM = np.random.choice(MWTDB.index[MWTDB['groupname']=='N2_400mM'].values, 10, replace=False)
# combine index from 0mM and 400mM
i = np.hstack((ind_0mM, ind_400mM))
# get trininty file paths from random samples
ptrinity = MWTDB['trinity_path'].iloc[i].values
# load data
df = pd.concat([pd.read_pickle(p) for p in ptrinity])
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 11894033 entries, 1 to 1579776
Data columns (total 17 columns):
# Column Dtype
--- ------ -----
0 mwtid_trdb int64
1 etoh int64
2 time float64
3 speed float64
4 bias float64
5 tap float64
6 loc_x float64
7 loc_y float64
8 morphwidth float64
9 midline float64
10 area float64
11 angular float64
12 aspect float64
13 kink float64
14 curve float64
15 crab float64
16 wormid int64
dtypes: float64(14), int64(3)
memory usage: 1.6 GB
###Markdown
approach 2, random 1 million rows from each group* 20 plates gives 11,894,033 rows of data. 800/20 = 40*12M = 480M rows of data* 300s*20 frame per sec = 6000 time points. 1M rows would have 1000/6 = 500/3 = 166 samples per time point. Will start with this and see how it goes.
###Code
# get number of rows per trinity file
df = pd.read_pickle(os.path.join(dir_save, 'fileinfo_trinity_N2400mM.pickle'))
# get sum
row_total = df['row_number'].sum()
print(f'total number of rows: {row_total}')
# randomly choose between those numbers
# get the data
MWTDB['trinity_path']
###Output
_____no_output_____ |
KanchiTank_ML_Classification.ipynb | ###Markdown
Mushroom Classification Using Different Classifiers In this project, we will examine the data and create a machine learning algorithm that will detect if the mushroom is edible or poisonous by its specifications like cap shape, cap color, gill color, etc. using different classifiers. The dataset used in this project is "mushrooms.csv" which contains 8124 instances of mushrooms with 23 features like cap-shape, cap-surface, cap-color, bruises, odor, etc. and is made available by UCI Machine Learning. Importing the packages
###Code
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import os
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.tree import export_graphviz
import graphviz
###Output
_____no_output_____
###Markdown
Checking the files in the directory
###Code
print(os.listdir("C:/Users/Kanchi/PycharmProjects/Mushroom-Classification"))
###Output
['.ipynb_checkpoints', 'images', 'KanchiTank_ML_Classification.ipynb', 'Mushroom-Classification', 'mushrooms.csv']
###Markdown
Reading the csv file of the dataset Pandas read_csv() function imports a CSV file (in our case, ‘mushrooms.csv’) to DataFrame format.
###Code
df = pd.read_csv("mushrooms.csv")
###Output
_____no_output_____
###Markdown
Examining the Data After importing the data, to learn more about the dataset, we'll use .head() .info() and .describe() methods.
###Code
df.head()
df.info()
df.describe()
###Output
_____no_output_____
###Markdown
Shape of the dataset
###Code
print("Dataset shape:", df.shape)
###Output
Dataset shape: (8124, 23)
###Markdown
Visualizing the count of edible and poisonous mushrooms
###Code
df['class'].value_counts()
df["class"].unique()
count = df['class'].value_counts()
plt.figure(figsize=(8,7))
sns.barplot(count.index, count.values, alpha=0.8, palette="prism")
plt.ylabel('Count', fontsize=12)
plt.xlabel('Class', fontsize=12)
plt.title('Number of poisonous/edible mushrooms')
#plt.savefig("mushrooms1.png", format='png', dpi=900)
plt.show()
###Output
_____no_output_____
###Markdown
The dataset is balanced. Data Manipulation The data is categorical so we’ll use LabelEncoder to convert it to ordinal. LabelEncoder converts each value in a column to a number. This approach requires the category column to be of ‘category’ datatype. By default, a non-numerical column is of ‘object’ datatype. From the df.describe() method, we saw that our columns are of ‘object’ datatype. So we will have to change the type to ‘category’ before using this approach.
###Code
df = df.astype('category')
df.dtypes
labelencoder=LabelEncoder()
for column in df.columns:
df[column] = labelencoder.fit_transform(df[column])
df.head()
###Output
_____no_output_____
###Markdown
The column "veil-type" is 0 and not contributing to the data so we remove it.
###Code
df['veil-type']
df=df.drop(["veil-type"],axis=1)
###Output
_____no_output_____
###Markdown
Quick look at the characteristics of the data The violin plot below represents the distribution of the classification characteristics. It is possible to see that "gill-color" property of the mushroom breaks to two parts, one below 3 and one above 3, that may contribute to the classification.
###Code
df_div = pd.melt(df, "class", var_name="Characteristics")
fig, ax = plt.subplots(figsize=(16,6))
p = sns.violinplot(ax = ax, x="Characteristics", y="value", hue="class", split = True, data=df_div, inner = 'quartile', palette = 'Set1')
df_no_class = df.drop(["class"],axis = 1)
p.set_xticklabels(rotation = 90, labels = list(df_no_class.columns));
#plt.savefig("violinplot.png", format='png', dpi=900, bbox_inches='tight')
###Output
_____no_output_____
###Markdown
Let's look at the correlation between the variables
###Code
plt.figure(figsize=(14,12))
sns.heatmap(df.corr(),linewidths=.1,cmap="Purples", annot=True, annot_kws={"size": 7})
plt.yticks(rotation=0);
#plt.savefig("corr.png", format='png', dpi=900, bbox_inches='tight')
###Output
_____no_output_____
###Markdown
Usually, the least correlating variable is the most important one for classification. In this case, "gill-color" has -0.53 so let's look at it closely.
###Code
df[['class', 'gill-color']].groupby(['gill-color'], as_index=False).mean().sort_values(by='class', ascending=False)
###Output
_____no_output_____
###Markdown
Let's look closely at the feature "gill-color".
###Code
new_var = df[['class', 'gill-color']]
new_var = new_var[new_var['gill-color']<=3.5]
sns.factorplot('class', col='gill-color', data=new_var, kind='count', size=4.5, aspect=.8, col_wrap=4);
#plt.savefig("gillcolor1.png", format='png', dpi=900, bbox_inches='tight')
new_var=df[['class', 'gill-color']]
new_var=new_var[new_var['gill-color']>3.5]
sns.factorplot('class', col='gill-color', data=new_var, kind='count', size=4.5, aspect=.8, col_wrap=4);
#plt.savefig("gillcolor2.png", format='png', dpi=900, bbox_inches='tight')
###Output
_____no_output_____
###Markdown
Preparing the Data Setting X and y axis and splitting the data into train and test respectively.
###Code
X = df.drop(['class'], axis=1)
y = df["class"]
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42, test_size=0.1)
###Output
_____no_output_____
###Markdown
Classification Methods 1. Decision Tree Classification
###Code
from sklearn.tree import DecisionTreeClassifier
dt = DecisionTreeClassifier()
dt.fit(X_train, y_train)
os.environ["PATH"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin/'
dot_data = export_graphviz(dt, out_file=None,
feature_names=X.columns,
filled=True, rounded=True,
special_characters=True)
graph = graphviz.Source(dot_data)
#graph.render(filename='DecisionTree')
graph
###Output
_____no_output_____
###Markdown
Feature importance By all methods examined before the feature that is most important is "gill-color".
###Code
features_list = X.columns.values
feature_importance = dt.feature_importances_
sorted_idx = np.argsort(feature_importance)
plt.figure(figsize=(8,7))
plt.barh(range(len(sorted_idx)), feature_importance[sorted_idx], align='center', color ="red")
plt.yticks(range(len(sorted_idx)), features_list[sorted_idx])
plt.xlabel('Importance')
plt.title('Feature importance')
plt.draw()
#plt.savefig("featureimp.png", format='png', dpi=900, bbox_inches='tight')
plt.show()
###Output
_____no_output_____
###Markdown
Predicting and estimating the result
###Code
y_pred_dt = dt.predict(X_test)
print("Decision Tree Classifier report: \n\n", classification_report(y_test, y_pred_dt))
print("Test Accuracy: {}%".format(round(dt.score(X_test, y_test)*100, 2)))
###Output
Test Accuracy: 100.0%
###Markdown
Confusion Matrix for Decision Tree Classifier
###Code
cm = confusion_matrix(y_test, y_pred_dt)
x_axis_labels = ["Edible", "Poisonous"]
y_axis_labels = ["Edible", "Poisonous"]
f, ax = plt.subplots(figsize =(7,7))
sns.heatmap(cm, annot = True, linewidths=0.2, linecolor="black", fmt = ".0f", ax=ax, cmap="Purples", xticklabels=x_axis_labels, yticklabels=y_axis_labels)
plt.xlabel("PREDICTED LABEL")
plt.ylabel("TRUE LABEL")
plt.title('Confusion Matrix for Decision Tree Classifier')
#plt.savefig("dtcm.png", format='png', dpi=900, bbox_inches='tight')
plt.show()
###Output
_____no_output_____
###Markdown
2. Logistic Regression Classification
###Code
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression(solver="lbfgs", max_iter=500)
lr.fit(X_train, y_train)
print("Test Accuracy: {}%".format(round(lr.score(X_test, y_test)*100,2)))
###Output
Test Accuracy: 94.96%
###Markdown
Classification report of Logistic Regression Classifier
###Code
y_pred_lr = lr.predict(X_test)
print("Logistic Regression Classifier report: \n\n", classification_report(y_test, y_pred_lr))
###Output
Logistic Regression Classifier report:
precision recall f1-score support
0 0.96 0.94 0.95 433
1 0.94 0.96 0.95 380
accuracy 0.95 813
macro avg 0.95 0.95 0.95 813
weighted avg 0.95 0.95 0.95 813
###Markdown
Confusion Matrix for Logistic Regression Classifier
###Code
cm = confusion_matrix(y_test, y_pred_lr)
x_axis_labels = ["Edible", "Poisonous"]
y_axis_labels = ["Edible", "Poisonous"]
f, ax = plt.subplots(figsize =(7,7))
sns.heatmap(cm, annot = True, linewidths=0.2, linecolor="black", fmt = ".0f", ax=ax, cmap="Purples", xticklabels=x_axis_labels, yticklabels=y_axis_labels)
plt.xlabel("PREDICTED LABEL")
plt.ylabel("TRUE LABEL")
plt.title('Confusion Matrix for Logistic Regression Classifier')
#plt.savefig("lrcm.png", format='png', dpi=900, bbox_inches='tight')
plt.show()
###Output
_____no_output_____
###Markdown
3. KNN Classification
###Code
from sklearn.neighbors import KNeighborsClassifier
best_Kvalue = 0
best_score = 0
for i in range(1,10):
knn = KNeighborsClassifier(n_neighbors=i)
knn.fit(X_train, y_train)
if knn.score(X_test, y_test) > best_score:
best_score = knn.score(X_train, y_train)
best_Kvalue = i
print("Best KNN Value: {}".format(best_Kvalue))
print("Test Accuracy: {}%".format(round(best_score*100,2)))
###Output
Best KNN Value: 1
Test Accuracy: 100.0%
###Markdown
Classification report of KNN Classifier
###Code
y_pred_knn = knn.predict(X_test)
print("KNN Classifier report: \n\n", classification_report(y_test, y_pred_knn))
###Output
KNN Classifier report:
precision recall f1-score support
0 1.00 0.99 1.00 433
1 0.99 1.00 1.00 380
accuracy 1.00 813
macro avg 1.00 1.00 1.00 813
weighted avg 1.00 1.00 1.00 813
###Markdown
Confusion Matrix for KNN Classifier
###Code
cm = confusion_matrix(y_test, y_pred_knn)
x_axis_labels = ["Edible", "Poisonous"]
y_axis_labels = ["Edible", "Poisonous"]
f, ax = plt.subplots(figsize =(7,7))
sns.heatmap(cm, annot = True, linewidths=0.2, linecolor="black", fmt = ".0f", ax=ax, cmap="Purples", xticklabels=x_axis_labels, yticklabels=y_axis_labels)
plt.xlabel("PREDICTED LABEL")
plt.ylabel("TRUE LABEL")
plt.title('Confusion Matrix for KNN Classifier')
#plt.savefig("knncm.png", format='png', dpi=900, bbox_inches='tight')
plt.show()
###Output
_____no_output_____
###Markdown
4. SVM Classification
###Code
from sklearn.svm import SVC
svm = SVC(random_state=42, gamma="auto")
svm.fit(X_train, y_train)
print("Test Accuracy: {}%".format(round(svm.score(X_test, y_test)*100, 2)))
###Output
Test Accuracy: 100.0%
###Markdown
Classification report of SVM Classifier
###Code
y_pred_svm = svm.predict(X_test)
print("SVM Classifier report: \n\n", classification_report(y_test, y_pred_svm))
###Output
SVM Classifier report:
precision recall f1-score support
0 1.00 1.00 1.00 433
1 1.00 1.00 1.00 380
accuracy 1.00 813
macro avg 1.00 1.00 1.00 813
weighted avg 1.00 1.00 1.00 813
###Markdown
Confusion Matrix for SVM Classifier
###Code
cm = confusion_matrix(y_test, y_pred_svm)
x_axis_labels = ["Edible", "Poisonous"]
y_axis_labels = ["Edible", "Poisonous"]
f, ax = plt.subplots(figsize =(7,7))
sns.heatmap(cm, annot = True, linewidths=0.2, linecolor="black", fmt = ".0f", ax=ax, cmap="Purples", xticklabels=x_axis_labels, yticklabels=y_axis_labels)
plt.xlabel("PREDICTED LABEL")
plt.ylabel("TRUE LABEL")
plt.title('Confusion Matrix for SVM Classifier')
#plt.savefig("svmcm.png", format='png', dpi=900, bbox_inches='tight')
plt.show()
###Output
_____no_output_____
###Markdown
5. Naive Bayes Classification
###Code
from sklearn.naive_bayes import GaussianNB
nb = GaussianNB()
nb.fit(X_train, y_train)
print("Test Accuracy: {}%".format(round(nb.score(X_test, y_test)*100, 2)))
###Output
Test Accuracy: 92.62%
###Markdown
Classification report of Naive Bayes Classifier
###Code
y_pred_nb = nb.predict(X_test)
print("Naive Bayes Classifier report: \n\n", classification_report(y_test, y_pred_nb))
###Output
Naive Bayes Classifier report:
precision recall f1-score support
0 0.94 0.92 0.93 433
1 0.91 0.94 0.92 380
accuracy 0.93 813
macro avg 0.93 0.93 0.93 813
weighted avg 0.93 0.93 0.93 813
###Markdown
Confusion Matrix for Naive Bayes Classifier
###Code
cm = confusion_matrix(y_test, y_pred_nb)
x_axis_labels = ["Edible", "Poisonous"]
y_axis_labels = ["Edible", "Poisonous"]
f, ax = plt.subplots(figsize =(7,7))
sns.heatmap(cm, annot = True, linewidths=0.2, linecolor="black", fmt = ".0f", ax=ax, cmap="Purples", xticklabels=x_axis_labels, yticklabels=y_axis_labels)
plt.xlabel("PREDICTED LABEL")
plt.ylabel("TRUE LABEL")
plt.title('Confusion Matrix for Naive Bayes Classifier')
#plt.savefig("nbcm.png", format='png', dpi=900, bbox_inches='tight')
plt.show()
###Output
_____no_output_____
###Markdown
6. Random Forest Classification
###Code
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(n_estimators=100, random_state=42)
rf.fit(X_train, y_train)
print("Test Accuracy: {}%".format(round(rf.score(X_test, y_test)*100, 2)))
###Output
Test Accuracy: 100.0%
###Markdown
Classification report of Random Forest Classifier
###Code
y_pred_rf = rf.predict(X_test)
print("Random Forest Classifier report: \n\n", classification_report(y_test, y_pred_rf))
###Output
Random Forest Classifier report:
precision recall f1-score support
0 1.00 1.00 1.00 433
1 1.00 1.00 1.00 380
accuracy 1.00 813
macro avg 1.00 1.00 1.00 813
weighted avg 1.00 1.00 1.00 813
###Markdown
Confusion Matrix for Random Forest Classifier
###Code
cm = confusion_matrix(y_test, y_pred_rf)
x_axis_labels = ["Edible", "Poisonous"]
y_axis_labels = ["Edible", "Poisonous"]
f, ax = plt.subplots(figsize =(7,7))
sns.heatmap(cm, annot = True, linewidths=0.2, linecolor="black", fmt = ".0f", ax=ax, cmap="Purples", xticklabels=x_axis_labels, yticklabels=y_axis_labels)
plt.xlabel("PREDICTED LABEL")
plt.ylabel("TRUE LABEL")
plt.title('Confusion Matrix for Random Forest Classifier');
#plt.savefig("rfcm.png", format='png', dpi=900, bbox_inches='tight')
plt.show()
###Output
_____no_output_____
###Markdown
Predictions Predicting some of the X_test results and matching it with true i.e. y_test values using Decision Tree Classifier.
###Code
preds = dt.predict(X_test)
print(preds[:36])
print(y_test[:36].values)
# 0 - Edible
# 1 - Poisonous
###Output
[0 1 1 0 1 1 1 1 0 0 0 1 0 0 0 0 0 1 0 0 0 0 1 0 1 0 0 0 0 1 1 1 0 0 0 1]
[0 1 1 0 1 1 1 1 0 0 0 1 0 0 0 0 0 1 0 0 0 0 1 0 1 0 0 0 0 1 1 1 0 0 0 1]
|
notebooks-1/8-mlp-scratch.ipynb | ###Markdown
多层感知机的从零开始实现
###Code
import d2l
from mxnet import np, npx, gluon
npx.set_np()
batch_size = 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
###Output
_____no_output_____
###Markdown
初始模型参数。
###Code
num_inputs, num_outputs, num_hiddens = 784, 10, 256
W1 = np.random.normal(scale=0.01, size=(num_inputs, num_hiddens))
b1 = np.zeros(num_hiddens)
W2 = np.random.normal(scale=0.01, size=(num_hiddens, num_outputs))
b2 = np.zeros(num_outputs)
params = [W1, b1, W2, b2]
for param in params:
param.attach_grad()
###Output
_____no_output_____
###Markdown
激活函数。
###Code
def relu(X):
return np.maximum(X, 0)
###Output
_____no_output_____
###Markdown
定义模型。
###Code
def net(X):
X = X.reshape((-1, num_inputs))
H = relu(np.dot(X, W1) + b1)
return np.dot(H, W2) + b2
###Output
_____no_output_____
###Markdown
训练。
###Code
loss = gluon.loss.SoftmaxCrossEntropyLoss()
num_epochs, lr = 10, 0.5
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs,
lambda batch_size: d2l.sgd(params, lr, batch_size))
###Output
_____no_output_____
###Markdown
预测。
###Code
d2l.predict_ch3(net, test_iter)
###Output
_____no_output_____
###Markdown
多层感知机的从零开始实现
###Code
import d2l
from mxnet import np, npx, gluon
npx.set_np()
batch_size = 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
###Output
_____no_output_____
###Markdown
初始模型参数。
###Code
num_inputs, num_outputs, num_hiddens = 784, 10, 256
W1 = np.random.normal(scale=0.01, size=(num_inputs, num_hiddens))
b1 = np.zeros(num_hiddens)
W2 = np.random.normal(scale=0.01, size=(num_hiddens, num_outputs))
b2 = np.zeros(num_outputs)
params = [W1, b1, W2, b2]
for param in params:
param.attach_grad()
###Output
_____no_output_____
###Markdown
激活函数。
###Code
def relu(X):
return np.maximum(X, 0)
###Output
_____no_output_____
###Markdown
定义模型。
###Code
def net(X):
X = X.reshape((-1, num_inputs))
H = relu(np.dot(X, W1) + b1)
return np.dot(H, W2) + b2
###Output
_____no_output_____
###Markdown
训练。
###Code
loss = gluon.loss.SoftmaxCrossEntropyLoss()
num_epochs, lr = 10, 0.5
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs,
lambda batch_size: d2l.sgd(params, lr, batch_size))
###Output
_____no_output_____
###Markdown
预测。
###Code
d2l.predict_ch3(net, test_iter)
###Output
_____no_output_____ |
archive/Chris_clstm_best_model_training_and_retrospective_analysis.ipynb | ###Markdown
First, load data
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from keras.preprocessing.text import Tokenizer
from sklearn.model_selection import train_test_split
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.layers import Dropout
from keras.layers.embeddings import Embedding
import random
#import ndac
import sklearn
%matplotlib inline
# read original data from /gscratch/pfaendtner/cnyambura/NovoNordisk_Capstone/dataframes
data = pd.read_csv('/gscratch/pfaendtner/cnyambura/NovoNordisk_Capstone/dataframes/DF_prest.csv', index_col=0)
data.head()
#check shape of data
data.shape
###Output
_____no_output_____
###Markdown
Setup nt doc and classify expression
###Code
#remove sequences that are not divisile by three
def nt_seq_doc(nt_sequence):
if 'GACAAGCTTGCGGCCGCA' not in nt_sequence:
return None
true_nt = nt_sequence.split('GACAAGCTTGCGGCCGCA')[1]
if len(true_nt) % 3 != 0:
return None
return ' '.join([true_nt[i:i+3]
for i in range(0, len(true_nt), 3)])
# split quantiles
def assign_class(conc):
if conc <= low_cut:
return 0
elif conc >= high_cut:
return 1
return
data['nt_seq_doc'] = data['nt_seq'].apply(nt_seq_doc)
data = data[pd.notnull(data['nt_seq_doc'])]
# identify high and low classes by conc_cf quantiles
low_cut = data['conc_cf'].quantile(0.25)
high_cut = data['conc_cf'].quantile(0.75)
data['class'] = data['conc_cf'].apply(assign_class)
data = data[pd.notnull(data['class'])]
# check shape
print('data shape: ', data.shape)
###Output
data shape: (22364, 8)
###Markdown
Model Training and Data Pre-Processing
###Code
#only keep proteins that have <5 PrESTs per protein
low_num_uniprots = data.groupby('uniprot_id').count().aa_seq[data.groupby('uniprot_id').count().aa_seq < 5].index.tolist()
#len(low_num_uniprots)
data_filtered = data[data.uniprot_id.isin(low_num_uniprots)]
#data_filtered
#X = data['nt_seq_doc']
#y = data['class'].values
# Get the number of prESTs per each uniprot
uniprot_counts = data_filtered.groupby('uniprot_id').count().prest_id
# Add all uniprots with a single prEST to the training set
training_uniprots = uniprot_counts[uniprot_counts == 1].index.tolist()
len(training_uniprots)
# Randomly pick 70% of other uniprots and add them to training set
random.seed(10)
other_uniprots = uniprot_counts[uniprot_counts > 1].index.tolist()
k = int(len(other_uniprots)*0.70)
training_uniprots += random.sample(other_uniprots, k)
len(training_uniprots)
# Add all remaining uniprots to test set
testing_uniprots = list(set(uniprot_counts.index.tolist()) - set(training_uniprots))
len(testing_uniprots)
print('Total number of proteins:', len(data_filtered.uniprot_id.unique()))
print('Number of training proteins:', len(training_uniprots))
print('Number of testing proteins:', len(testing_uniprots))
# Add all prESTs in training uniprots to training set
nt_train = data_filtered[data_filtered.uniprot_id.isin(training_uniprots)]
nt_train.shape
# Repeat for test set
nt_test = data_filtered[data_filtered.uniprot_id.isin(testing_uniprots)]
nt_test.shape
# define sequence documents
docs_train = list(nt_train['nt_seq_doc'])
# create the tokenizer
t = Tokenizer()
# fit the tokenizer on the documents
t.fit_on_texts(docs_train)
# integer encode documents
X_train = t.texts_to_sequences(docs_train)
y_train = nt_train['class'].values
# repeat to test set
docs_test = list(nt_test['nt_seq_doc'])
# fit the tokenizer on the documents
t.fit_on_texts(docs_test)
# integer encode documents
X_test = t.texts_to_sequences(docs_test)
y_test = nt_test['class'].values
# create test-train split
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
# fix random seed for reproducibility
#np.random.seed(7)
# repeat to test set
docs = list(data['nt_seq_doc'])
# fit the tokenizer on the documents
t.fit_on_texts(docs)
X = t.texts_to_sequences(docs)
# load the dataset but only keep the top n words, zero the rest
top_words = len(t.word_index) + 1
# truncate and pad input sequences
seq_lengths = [len(seq) for seq in X]
max_seq_length = max(seq_lengths)
X_train = sequence.pad_sequences(X_train, maxlen=max_seq_length)
X_test = sequence.pad_sequences(X_test, maxlen=max_seq_length)
#X = sequence.pad_sequences(X, maxlen=max_seq_length)
# create the model using parameters from grid search
embedding_vecor_length = 16
drop = 0.5
recurrent_drop = 0.5
model = Sequential()
model.add(Embedding(top_words, embedding_vecor_length, input_length=max_seq_length))
model.add(Conv1D(filters=200, kernel_size=5, padding='same', activation='selu'))
model.add(MaxPooling1D(pool_size=4))
model.add(LSTM(150, dropout=drop, recurrent_dropout=recurrent_drop))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# record training progress
history = model.fit(X_train, y_train, epochs=35, batch_size=64, validation_data=(X_test, y_test))
# Final evaluation of the model
scores = model.evaluate(X_test, y_test, verbose=0)
print("Accuracy: %.2f%%" % (scores[1]*100))
history.history
# plot loss vs. epoch
# https://machinelearningmastery.com/diagnose-overfitting-underfitting-lstm-models/
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model train vs validation loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper right')
plt.show()
y_pred = model.predict(X_test)
len(y_pred)
nt_test.loc[:,'act_prob'] = y_pred
nt_test_final = nt_test[['prest_id','uniprot_id','conc_cf','aa_seq','nt_seq','class','act_prob']].sort_values('uniprot_id',ascending=0).reset_index(drop=True)
print('Original total number of experiments:',len(nt_test_final))
print('Original total number of proteins:',len(nt_test_final.uniprot_id.unique()))
print('Original Number of passed experiments:',len(nt_test_final[nt_test_final['class']==1]))
print('Original Pass rate: %.2f%%'%(np.true_divide(len(nt_test_final[nt_test_final['class']==1]),len(nt_test))*100))
print('Number of proteins with >1 high expression:',len(nt_test_final[nt_test_final['class']==1].uniprot_id.unique()))
len(nt_test_final[nt_test_final['class']==1])
print('True positive:', len(nt_test_final[(nt_test_final['act_prob'] > 0.5)]) & len(nt_test_final[nt_test_final['class']==1]))
len(nt_test_final[nt_test_final['class']==0])
print('False positive:', len(nt_test_final[(nt_test_final['act_prob'] > 0.5)]) & len(nt_test_final[nt_test_final['class']==0]))
#print 'False Positive:',len(DF_retro_final[(DF_retro_final.ens_score > .5) & (DF_retro_final.expressed==False)])
print('True negative:', len(nt_test_final[(nt_test_final['act_prob'] < 0.5)]) & len(nt_test_final[nt_test_final['class']==0]))
len(nt_test_final[(nt_test_final['act_prob'] < 0.5)])
print('False negative:', len(nt_test_final[(nt_test_final['act_prob'] < 0.5)]) & len(nt_test_final[nt_test_final['class']==1]))
#print 'False Negative:',len(DF_retro_final[(DF_retro_final.ens_score < .5) & (DF_retro_final.expressed)])
#grab only the top expressing proteins
n = 5
np.random.seed(0)
output_df = pd.DataFrame(columns=['prest_id','uniprot_id','class','act_prob','nt_seq'])
remaining_df = nt_test_final.copy()
for i in range(n):
print('Iteration',i)
new_output_df = remaining_df.sort_values(['uniprot_id','act_prob'],ascending=[1,0]).drop_duplicates('uniprot_id')
output_df = pd.concat([output_df,new_output_df])
pred_pos_proteins = set(output_df[output_df['act_prob'] > 0.5].uniprot_id)
true_pos_proteins = set(output_df[output_df['class']==1].uniprot_id)
print('Total number of proposed experiments:',len(output_df))
print('Total number of expressed proteins:',len(true_pos_proteins))
print('Overall pass rate:',np.true_divide(len(true_pos_proteins),len(output_df)))
# Prepare for next iteration
remaining_df = remaining_df.drop(new_output_df.index)
remaining_df = remaining_df[remaining_df.uniprot_id.isin(true_pos_proteins)==False]
print
print('Percent saved experiments:',(1 - np.true_divide(len(output_df),len(nt_test_final)))*100,'%')
remain_df = nt_test_final.copy()
n = 5
np.random.seed(0)
n_output_df = remain_df.sort_values(['uniprot_id','act_prob'],ascending=[1,0]).drop_duplicates('uniprot_id')
oput_df = pd.concat([oput_df,n_output_df])
pred_pos_proteins = set(oput_df[oput_df['act_prob'] > 0.5].uniprot_id)
true_pos_proteins = set(oput_df[oput_df['class']==1].uniprot_id)
print('Total number of proposed experiments:',len(oput_df))
print('Total number of expressed proteins:',len(true_pos_proteins))
print('Overall pass rate:',np.true_divide(len(true_pos_proteins),len(oput_df)))
# Prepare for next iteration
remaining_df = remain_df.drop(new_output_df.index)
remaining_df = remain_df[remain_df.uniprot_id.isin(true_pos_proteins)==False]
remaining_df
###Output
_____no_output_____ |
Kinematic_Bicycle_Model.ipynb | ###Markdown
In this notebook, you will implement the kinematic bicycle model. The model accepts velocity and steering rate inputs and steps through the bicycle kinematic equations. Once the model is implemented, you will provide a set of inputs to drive the bicycle in a figure 8 trajectory.The bicycle kinematics are governed by the following set of equations:\begin{align*}\dot{x}_c &= v \cos{(\theta + \beta)} \\\dot{y}_c &= v \sin{(\theta + \beta)} \\\dot{\theta} &= \frac{v \cos{\beta} \tan{\delta}}{L} \\\dot{\delta} &= \omega \\\beta &= \tan^{-1}(\frac{l_r \tan{\delta}}{L})\end{align*}where the inputs are the bicycle speed $v$ and steering angle rate $\omega$. The input can also directly be the steering angle $\delta$ rather than its rate in the simplified case. The Python model will allow us both implementations.In order to create this model, it's a good idea to make use of Python class objects. This allows us to store the state variables as well as make functions for implementing the bicycle kinematics. The bicycle begins with zero initial conditions, has a maximum turning rate of 1.22 rad/s, a wheelbase length of 2m, and a length of 1.2m to its center of mass from the rear axle.From these conditions, we initialize the Python class as follows:
###Code
from notebook_grader import BicycleSolution, grade_bicycle
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
class Bicycle():
def __init__(self):
self.xc = 0
self.yc = 0
self.theta = 0
self.delta = 0
self.beta = 0
self.L = 2
self.lr = 1.2
self.w_max = 1.22
self.sample_time = 0.01
def reset(self):
self.xc = 0
self.yc = 0
self.theta = 0
self.delta = 0
self.beta = 0
###Output
_____no_output_____
###Markdown
A sample time is required for numerical integration when propagating the kinematics through time. This is set to 10 milliseconds. We also have a reset function which sets all the state variables back to 0. With this sample time, implement the kinematic model using the function $\textit{step}$ defined in the next cell. The function should take speed + angular rate as inputs and update the state variables. Don't forget about the maximum turn rate on the bicycle!
###Code
class Bicycle(Bicycle):
def step(self, v, w):
# ==================================
# Implement kinematic model here
# ==================================
self.xc += v * np.cos(self.theta + self.beta) * self.sample_time
self.yc += v * np.sin(self.theta + self.beta) * self.sample_time
self.theta += v * np.cos(self.beta) * np.tan(self.delta) / self.L * self.sample_time
if (w<self.w_max):
self.delta += w * self.sample_time
else:
self.delta += self.w_max * self.sample_time
self.beta = np.arctan2(self.lr * np.tan(self.delta), self.L) * self.sample_time
pass
###Output
_____no_output_____
###Markdown
With the model setup, we can now start giving bicycle inputs and producing trajectories. Suppose we want the model to travel a circle of radius 10 m in 20 seconds. Using the relationship between the radius of curvature and the steering angle, the desired steering angle can be computed.\begin{align*} \tan{\delta} &= \frac{L}{r} \\ \delta &= \tan^{-1}(\frac{L}{r}) \\ &= \tan^{-1}(\frac{2}{10}) \\ &= 0.1974\end{align*}If the steering angle is directly set to 0.1974 using a simplied bicycled model, then the bicycle will travel in a circle without requiring any additional steering input. The desired speed can be computed from the circumference of the circle:\begin{align*} v &= \frac{d}{t}\\ &= \frac{2 \pi 10}{20}\\ &= \pi\end{align*}We can now implement this in a loop to step through the model equations. We will also run our bicycle model solution along with your model to show you the expected trajectory. This will help you verify the correctness of your model.
###Code
sample_time = 0.01
time_end = 20
model = Bicycle()
solution_model = BicycleSolution()
# set delta directly
model.delta = np.arctan(2/10)
solution_model.delta = np.arctan(2/10)
t_data = np.arange(0,time_end,sample_time)
x_data = np.zeros_like(t_data)
y_data = np.zeros_like(t_data)
x_solution = np.zeros_like(t_data)
y_solution = np.zeros_like(t_data)
for i in range(t_data.shape[0]):
x_data[i] = model.xc
y_data[i] = model.yc
model.step(np.pi, 0)
x_solution[i] = solution_model.xc
y_solution[i] = solution_model.yc
solution_model.step(np.pi, 0)
model.beta = 0
solution_model.beta=0
plt.axis('equal')
plt.plot(x_data, y_data,label='Learner Model')
plt.plot(x_solution, y_solution,label='Solution Model')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
The plot above shows the desired circle of 10m radius. The path is slightly offset which is caused by the sideslip effects due to $\beta$. By forcing $\beta = 0$ through uncommenting the last line in the loop, you can see that the offset disappears and the circle becomes centered at (0,10). However, in practice the steering angle cannot be directly set and must be changed through angular rate inputs $\omega$. The cell below corrects for this and sets angular rate inputs to generate the same circle trajectory. The speed $v$ is still maintained at $\pi$ m/s.
###Code
sample_time = 0.01
time_end = 20
model.reset()
solution_model.reset()
t_data = np.arange(0,time_end,sample_time)
x_data = np.zeros_like(t_data)
y_data = np.zeros_like(t_data)
x_solution = np.zeros_like(t_data)
y_solution = np.zeros_like(t_data)
for i in range(t_data.shape[0]):
x_data[i] = model.xc
y_data[i] = model.yc
if model.delta < np.arctan(2/10):
model.step(np.pi, model.w_max)
else:
model.step(np.pi, 0)
x_solution[i] = solution_model.xc
y_solution[i] = solution_model.yc
if solution_model.delta < np.arctan(2/10):
solution_model.step(np.pi, model.w_max)
else:
solution_model.step(np.pi, 0)
plt.axis('equal')
plt.plot(x_data, y_data,label='Learner Model')
plt.plot(x_solution, y_solution,label='Solution Model')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Here are some other example trajectories: a square path, a spiral path, and a wave path. Uncomment each section to view.
###Code
sample_time = 0.01
time_end = 60
model.reset()
solution_model.reset()
t_data = np.arange(0,time_end,sample_time)
x_data = np.zeros_like(t_data)
y_data = np.zeros_like(t_data)
x_solution = np.zeros_like(t_data)
y_solution = np.zeros_like(t_data)
# maintain velocity at 4 m/s
v_data = np.zeros_like(t_data)
v_data[:] = 4
w_data = np.zeros_like(t_data)
# ==================================
# Square Path: set w at corners only
# ==================================
w_data[670:670+100] = 0.753
w_data[670+100:670+100*2] = -0.753
w_data[2210:2210+100] = 0.753
w_data[2210+100:2210+100*2] = -0.753
w_data[3670:3670+100] = 0.753
w_data[3670+100:3670+100*2] = -0.753
w_data[5220:5220+100] = 0.753
w_data[5220+100:5220+100*2] = -0.753
# ==================================
# Spiral Path: high positive w, then small negative w
# ==================================
# w_data[:] = -1/100
# w_data[0:100] = 1
# ==================================
# Wave Path: square wave w input
# ==================================
#w_data[:] = 0
#w_data[0:100] = 1
#w_data[100:300] = -1
#w_data[300:500] = 1
#w_data[500:5700] = np.tile(w_data[100:500], 13)
#w_data[5700:] = -1
# ==================================
# Step through bicycle model
# ==================================
for i in range(t_data.shape[0]):
x_data[i] = model.xc
y_data[i] = model.yc
model.step(v_data[i], w_data[i])
x_solution[i] = solution_model.xc
y_solution[i] = solution_model.yc
solution_model.step(v_data[i], w_data[i])
plt.axis('equal')
plt.plot(x_data, y_data,label='Learner Model')
plt.plot(x_solution, y_solution,label='Solution Model')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
We would now like the bicycle to travel a figure eight trajectory. Both circles in the figure eight have a radius of 8m and the path should complete in 30 seconds. The path begins at the bottom of the left circle and is shown in the figure below:Determine the speed and steering rate inputs required to produce such trajectory and implement in the cell below. Make sure to also save your inputs into the arrays v_data and w_data, these will be used to grade your solution. The cell below also plots the trajectory generated by your own model.
###Code
sample_time = 0.01
time_end = 30
model.reset()
t_data = np.arange(0,time_end,sample_time)
x_data = np.zeros_like(t_data)
y_data = np.zeros_like(t_data)
v_data = np.zeros_like(t_data)
w_data = np.zeros_like(t_data)
# ==================================
# Learner solution begins here
# ==================================
model.delta = np.arctan(2/8)
w_max = 1.22
w_max_in_step = w_max * sample_time
desired_angle = np.arctan(2/8)
desired_time_steps = int(np.floor(desired_angle / w_max_in_step))
last_disired_w = desired_angle - desired_time_steps * w_max_in_step
print(desired_time_steps)
w_data[0:desired_time_steps-1] = w_max
w_data[desired_time_steps] = last_disired_w
half_quarter = int(1/8 * t_data.shape[0])
w_data[half_quarter - desired_time_steps : half_quarter + desired_time_steps - 1] = -w_max
w_data[half_quarter*5 - desired_time_steps : half_quarter*5 + desired_time_steps] = w_max
v_data = np.ones_like(t_data) * np.pi * 16/15
print(w_data[half_quarter])
for i in range(t_data.shape[0]):
x_data[i] = model.xc
y_data[i] = model.yc
model.step(v_data[i], w_data[i])
# ==================================
# Learner solution ends here
# ==================================
plt.axis('equal')
plt.plot(x_data, y_data)
plt.show()
###Output
20
-1.22
###Markdown
We will now run your speed and angular rate inputs through our bicycle model solution. This is to ensure that your trajectory is correct along with your model. The cell below will display the path generated by our model along with some waypoints on a desired figure 8. Surrounding these waypoints are error tolerance circles with radius 1.5m, your solution will pass the grader if the trajectory generated stays within 80% of these circles.
###Code
grade_bicycle(t_data,v_data,w_data)
###Output
Assessment passed! Your trajectory meets 82.5% of the waypoints.
###Markdown
The cell below will save the time and vehicle inputs as text file named $\textit{figure8.txt}$. To locate the file, change the end of your web directory to $\textit{/notebooks/Course_1_Module_4/figure8.txt}$Once you are there, you can download the file and submit to the Coursera grader to complete this assessment.
###Code
data = np.vstack([t_data, v_data, w_data]).T
np.savetxt('figure8.txt', data, delimiter=', ')
###Output
_____no_output_____
###Markdown
Congratulations! You have now completed the assessment! Feel free to test the bicycle model with different inputs in the cell below, and see what trajectories they form. For example, try moving in an equilateral triangle. You'll find that it's rather difficult to generate desired trajectories by pre-setting the inputs. The next module on vehicle control will show you an easier and more accurate method. See you there!
###Code
sample_time = 0.01
time_end = 30
model.reset()
t_data = np.arange(0,time_end,sample_time)
x_data = np.zeros_like(t_data)
y_data = np.zeros_like(t_data)
v_data = np.zeros_like(t_data)
w_data = np.zeros_like(t_data)
# ==================================
# Test various inputs here
# ==================================
for i in range(t_data.shape[0]):
model.step(v_data[i], w_data[i])
plt.axis('equal')
plt.plot(x_data, y_data)
plt.show()
###Output
_____no_output_____ |
conll_2003baseline.ipynb | ###Markdown
Conll 2003 Name Entity Recognitiondataset link: https://huggingface.co/datasets/conll2003
###Code
from google.colab import drive
drive.mount('/content/drive')
! pip install transformers
import numpy as np
import pandas as pd
from transformers import AutoTokenizer
from tqdm import tqdm
import tensorflow as tf
import matplotlib.pyplot as plt
def load_sentences(filepath):
final = []
sentences = []
with open(filepath, 'r') as f:
for line in f.readlines():
if (line == ('-DOCSTART- -X- -X- O\n') or line == '\n'):
if len(sentences) > 0:
final.append(sentences)
sentences = []
else:
l = line.split(' ')
sentences.append((l[0], l[3].strip('\n')))
return final
base_path = './drive/MyDrive/berkeley/capstone/demo/conll2003/'
train_samples = load_sentences(base_path + 'train.txt')
test_samples = load_sentences(base_path + 'test.txt')
valid_samples = load_sentences(base_path + 'valid.txt')
samples = train_samples + test_samples
schema = ['_'] + sorted({tag for sentence in samples
for _, tag in sentence})
# dataset quick view
train_data = []
sentences=[]
with open(base_path + 'train.txt', 'r') as f:
for line in f.readlines():
if (line == ('-DOCSTART- -X- -X- O\n') or line == '\n'):
if len(sentences) > 0:
train_data.append(sentences)
sentences = []
else:
l = line.split(" ")
sentences.append((l[i].strip('\n') for i in range(4)))
l[0].strip('\n')
###Output
[1;30;43mStreaming output truncated to the last 5000 lines.[0m
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
-DOCSTART- -X- -X- O
###Markdown
Model
###Code
from transformers import AutoConfig, TFAutoModelForTokenClassification
MODEL_NAME = 'bert-base-cased'
config = AutoConfig.from_pretrained(MODEL_NAME, num_labels=len(schema))
model = TFAutoModelForTokenClassification.from_pretrained(MODEL_NAME,
config=config)
model.summary()
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
def tokenize_sample(sample):
seq = [
(subtoken, tag)
for token, tag in sample
for subtoken in tokenizer(token)['input_ids'][1:-1]
]
return [(3, 'O')] + seq + [(4, 'O')]
def preprocess(samples):
tag_index = {tag: i for i, tag in enumerate(schema)}
tokenized_samples = list(tqdm(map(tokenize_sample, samples)))
max_len = max(map(len, tokenized_samples))
X = np.zeros((len(samples), max_len), dtype=np.int32)
y = np.zeros((len(samples), max_len), dtype=np.int32)
for i, sentence in enumerate(tokenized_samples):
for j, (subtoken_id, tag) in enumerate(sentence):
X[i, j] = subtoken_id
y[i,j] = tag_index[tag]
return X, y
X_train, y_train = preprocess(train_samples)
X_test, y_test = preprocess(test_samples)
X_valid, y_valid = preprocess(valid_samples)
EPOCHS=5
BATCH_SIZE=8
optimizer = tf.keras.optimizers.Adam(lr=0.000001)
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
model.compile(optimizer=optimizer, loss=loss, metrics='accuracy')
history = model.fit(tf.constant(X_train), tf.constant(y_train),
validation_data=(X_test, y_test),
epochs=EPOCHS,
batch_size=BATCH_SIZE)
plt.figure(figsize=(14,8))
plt.title('Losses')
plt.plot(history.history['loss'], label='Train Loss')
plt.plot(history.history['val_loss'], label='Valid Loss')
plt.ylabel('Loss')
plt.xlabel('Epochs')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
###Code
###Output
_____no_output_____ |
periodicHill/sst_iddes_06/plots.ipynb | ###Markdown
Plot Periodic Hill Results for SST-IDDES model Run sst_iddes_06 with dynamic forcing everywhere, new code
###Code
%%capture
import sys
import os
sys.path.insert(1, '../utilities')
import utilities
import plotter
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import numpy as np
import pandas as pd
import copy
figsize=(15,6)
# Setup directories
# Reference data
refdir = os.path.abspath("../marchdf.periodicHill/refdata")
basestyle= {'lw':1.25, 'ls':'-', 'marker':None, 'color':'r',
'mfc':plotter.cmap[-1], 'mec':plotter.cmap[-1], 'ms':3,}
# Put all Nalu-Wind directories to plot here
fdirlist=[
# Directory Input file Legend suffix Linestyle
#['../sst/', 'periodicHill_sst.yaml', '', basestyle],
['../sst_iddes_05/', 'periodicHill_sstiddes.yaml', 'OLD CODE', {**basestyle, **dict(color='b')}],
['', 'periodicHill_sstiddes.yaml', 'NEW CODE', {**basestyle, **dict(color='r', ls='-', lw=2)}],
]
# Define the data, styles, and the legend
expdict = {'data':plotter.read_exp_data(os.path.join(refdir, "exp")), 'label':"EXP",
'lw':0, 'marker':plotter.markertype[2], 'color':plotter.cmap[-1],
'mfc':plotter.cmap[-1], 'mec':plotter.cmap[-1], 'ms':3, 'xoff':1.0}
lesdict = {'data':plotter.read_les_data(os.path.join(refdir, "les")), 'label':"LES",
'lw':2.5, 'marker':None, 'color':plotter.cmap[2],
'mfc':plotter.cmap[-1], 'mec':plotter.cmap[-1], 'ms':0, 'xoff':1.0}
v2fdict = {'data':plotter.read_cdp_data(os.path.join(refdir, "cdp-v2f")),'label':"CDP-v2f",
'lw':2.5, 'marker':None, 'color':plotter.cmap[2],
'mfc':plotter.cmap[-1], 'mec':plotter.cmap[-1], 'ms':0, 'xoff':0.0 }
tamsdict = {'data':plotter.read_cdp_data(os.path.join(refdir, "cdp-tams")),'label':"CDP-v2f-TAMS",
'lw':2.5, 'marker':None, 'color':plotter.cmap[3],
'mfc':plotter.cmap[-1], 'mec':plotter.cmap[-1], 'ms':0, 'xoff':0.0 }
# Comment out the data series you don't want to plot
data2plot = [expdict,
#lesdict,
#v2fdict,
#tamsdict,
]
###Output
_____no_output_____
###Markdown
Plot velocity profiles
###Code
legend_elements=[]
# Loop through and plot each dataseriese
for ds in data2plot:
grouped = ds['data'].groupby(["x"])
xoffset = ds['xoff']
legend_elements += [
Line2D([0], [0], lw=ds['lw'], marker=ds['marker'], color=ds['color'], mfc=ds['mfc'], mec=ds['mec'], markersize=ds['ms'], label=ds['label'], ),
]
#print(ds['data'].columns.tolist())
for k, (name, group) in enumerate(grouped):
idx = group.y.values >= utilities.hill(group.x.values)
if ("u" in ds['data'].columns.tolist()):
plt.figure("u", figsize=figsize)
p = plt.plot(group[idx].u + xoffset*group[idx].x, group[idx].y,
lw=ds['lw'], color=ds['color'], marker=ds['marker'], mec=ds['mec'], mfc=ds['mfc'], ms=ds['ms'], )
if ("v" in ds['data'].columns.tolist()):
plt.figure("v", figsize=figsize)
p = plt.plot(group[idx].v + xoffset*group[idx].x, group[idx].y,
lw=ds['lw'], color=ds['color'], marker=ds['marker'], mec=ds['mec'], mfc=ds['mfc'], ms=ds['ms'], )
# Nalu data
for i, fdirentry in enumerate(fdirlist):
fdir = fdirentry[0]
yamlfile = fdirentry[1]
suffix = fdirentry[2]
style = fdirentry[3]
yname = os.path.join(os.path.dirname(fdir), yamlfile)
u0, rho0, mu, turb_model = plotter.parse_ic(yname)
model = turb_model.upper().replace("_", "-")
legend_elements += [ Line2D([0], [0], lw=style['lw'], ls=style['ls'], color=style['color'], label=f"Nalu-{model} "+suffix,
marker=style['marker'], mfc=style['mfc'], mec=style['mec'], markersize=style['ms']) ]
ndf = pd.read_csv(os.path.join(fdir, "profiles.dat"))
ndf.loc[ndf.u > 5, ["u", "v", "w"]] = 0.0
grouped = ndf.groupby(["x"])
for k, (name, group) in enumerate(grouped):
idx = group.y.values >= utilities.hill(group.x.values)
plt.figure("u")
p = plt.plot(group[idx].u + group[idx].x, group[idx].y, lw=style['lw'], ls=style['ls'], color=style['color'],
marker=style['marker'], mfc=style['mfc'], mec=style['mec'], markersize=style['ms'])
plt.figure("v")
p = plt.plot(group[idx].v + group[idx].x, group[idx].y, lw=style['lw'], ls=style['ls'], color=style['color'],
marker=style['marker'], mfc=style['mfc'], mec=style['mec'], markersize=style['ms'])
x = np.linspace(0,9,1001)
# Set up the U-velocity plot
plt.figure("u")
ax = plt.gca()
plt.plot(x, utilities.hill(x), lw=1)
plt.fill_between(x, utilities.hill(x),0, color='gray')
plt.xlabel(r"$\langle u_x \rangle + x$", fontsize=22, fontweight="bold")
plt.ylabel(r"$y / h$", fontsize=22, fontweight="bold")
plt.setp(ax.get_xmajorticklabels(), fontsize=18, fontweight="bold")
plt.setp(ax.get_ymajorticklabels(), fontsize=18, fontweight="bold")
plt.xlim([-0.5, 9.5])
plt.ylim([0, 3.5])
legend = ax.legend(handles=legend_elements, loc="lower left")
plt.grid()
plt.title('Horizontal velocity', fontsize=18)
plt.tight_layout()
# Set up the V-velocity plot
plt.figure("v")
ax = plt.gca()
plt.plot(x, utilities.hill(x), lw=1)
plt.fill_between(x, utilities.hill(x),0, color='gray')
plt.xlabel(r"$\langle u_y \rangle + x$", fontsize=22, fontweight="bold")
plt.ylabel(r"$y / h$", fontsize=22, fontweight="bold")
plt.setp(ax.get_xmajorticklabels(), fontsize=18, fontweight="bold")
plt.setp(ax.get_ymajorticklabels(), fontsize=18, fontweight="bold")
plt.xlim([-0.5, 9.5])
plt.ylim([0, 3.5])
legend = ax.legend(handles=legend_elements, loc="best")
plt.grid()
plt.title('Vertical velocity', fontsize=18)
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Plot surface & time history of Nalu-Runs
###Code
# Nalu data
maxt=-1.0E100
for i, fdirentry in enumerate(fdirlist):
fdir = fdirentry[0]
yamlfile = fdirentry[1]
suffix = fdirentry[2]
style = fdirentry[3]
yname = os.path.join(os.path.dirname(fdir), yamlfile)
u0, rho0, mu, turb_model = plotter.parse_ic(yname)
model = turb_model.upper().replace("_", "-")
legend_elements += [ Line2D([0], [0], lw=style['lw'], ls=style['ls'], color=style['color'], label=f"Nalu-{model} "+suffix,
marker=style['marker'], mfc=style['mfc'], mec=style['mec'], markersize=style['ms']) ]
h = 1.0
tau = h / u0
dynPres = rho0 * 0.5 * u0 * u0
ndf = pd.read_csv(os.path.join(fdir, "profiles.dat"))
ndf.loc[ndf.u > 5, ["u", "v", "w"]] = 0.0
grouped = ndf.groupby(["x"])
cf = pd.read_csv(os.path.join(fdir, "tw.dat"))
cf["cf"] = cf.tauw / dynPres
plt.figure("cf", figsize=figsize)
plt.plot(cf.x, cf.cf, lw=style['lw'], ls=style['ls'], color=style['color'], label=f"Nalu-{model} "+suffix,
marker=style['marker'], mfc=style['mfc'], mec=style['mec'], markersize=style['ms'])
inlet = pd.read_csv(os.path.join(fdir, "inlet.dat"))
plt.figure("u_inlet", figsize=figsize)
plt.plot((inlet.t-inlet.t[0]) / tau, inlet.u, lw=style['lw'], ls=style['ls'], color=style['color'], label=f"Nalu-{model} "+suffix,
marker=style['marker'], mfc=style['mfc'], mec=style['mec'], markersize=style['ms'])
plt.figure("tke_inlet", figsize=figsize)
plt.plot((inlet.t-inlet.t[0]) / tau, inlet.tke, lw=style['lw'], ls=style['ls'], color=style['color'], label=f"Nalu-{model} "+suffix,
marker=style['marker'], mfc=style['mfc'], mec=style['mec'], markersize=style['ms'])
plt.figure("sdr_inlet", figsize=figsize)
plt.plot((inlet.t-inlet.t[0]) / tau, inlet.sdr, lw=style['lw'], ls=style['ls'], color=style['color'], label=f"Nalu-{model} "+suffix,
marker=style['marker'], mfc=style['mfc'], mec=style['mec'], markersize=style['ms'])
# Get the simulation time
if (max((inlet.t-inlet.t[0]) / tau) > maxt): maxt=max((inlet.t-inlet.t[0]) / tau)
# Plot the LES cf
ldir = os.path.join(refdir, "les")
cf = pd.read_csv(os.path.join(ldir, "hill_LES_cf_digitized.dat"), delim_whitespace=True)
plt.figure("cf")
plt.plot(cf.x, cf.cf, lw=2, color=plotter.cmap[2], label="LES")
# Format figures
plt.figure("cf")
ax = plt.gca()
plt.xlabel(r"$x$", fontsize=22, fontweight="bold")
plt.ylabel(r"$c_f$", fontsize=22, fontweight="bold")
plt.setp(ax.get_xmajorticklabels(), fontsize=18, fontweight="bold")
plt.setp(ax.get_ymajorticklabels(), fontsize=18, fontweight="bold")
plt.hlines(0.0, 0, 9.5, linestyles='dashed', linewidth=0.5)
legend = ax.legend(loc="best")
plt.title('Skin friction', fontsize=18, fontweight="bold")
plt.tight_layout()
plt.figure("u_inlet")
ax = plt.gca()
plt.xlabel(r"$(t-t_0) / \tau$", fontsize=22, fontweight="bold")
plt.ylabel(r"$\bar{u} (x=0)$", fontsize=22, fontweight="bold")
plt.setp(ax.get_xmajorticklabels(), fontsize=18, fontweight="bold")
plt.setp(ax.get_ymajorticklabels(), fontsize=18, fontweight="bold")
plt.hlines(1.0, 0, maxt, linestyles='dashed', linewidth=0.5)
legend = ax.legend(loc="best")
plt.title('Inlet velocity', fontsize=18, fontweight="bold")
plt.tight_layout()
plt.figure("tke_inlet")
ax = plt.gca()
plt.xlabel(r"$(t-t_0) / \tau$", fontsize=22, fontweight="bold")
plt.ylabel(r"$\bar{k} (x=0)$", fontsize=22, fontweight="bold")
plt.setp(ax.get_xmajorticklabels(), fontsize=18, fontweight="bold")
plt.setp(ax.get_ymajorticklabels(), fontsize=18, fontweight="bold")
legend = ax.legend(loc="best")
plt.title('Inlet TKE', fontsize=18, fontweight="bold")
plt.tight_layout()
plt.figure("sdr_inlet")
ax = plt.gca()
plt.xlabel(r"$(t-t_0) / \tau$", fontsize=22, fontweight="bold")
plt.ylabel(r"$\bar{\omega} (x=0)$", fontsize=22, fontweight="bold")
plt.setp(ax.get_xmajorticklabels(), fontsize=18, fontweight="bold")
plt.setp(ax.get_ymajorticklabels(), fontsize=18, fontweight="bold")
legend = ax.legend(loc="best")
plt.tight_layout()
###Output
_____no_output_____ |
titanic_competition.ipynb | ###Markdown
[kaggle notebook kernel](https://www.kaggle.com/allieubissetroinuxsa/titanic-competition-v1?scriptVersionId=29609674) Imports
###Code
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
Loading Data
###Code
train_URL = 'https://raw.githubusercontent.com/Data-HAWKS/titanic-competition/master/titanic-competition/train.csv'
test_URL = 'https://raw.githubusercontent.com/Data-HAWKS/titanic-competition/master/titanic-competition/test.csv'
sub_URL = 'https://raw.githubusercontent.com/Data-HAWKS/titanic-competition/master/titanic-competition/gender_submission.csv'
train_df1 = pd.read_csv(train_URL, index_col='PassengerId')
test_df = pd.read_csv(test_URL, index_col='PassengerId')
sub_df = pd.read_csv(sub_URL)
###Output
_____no_output_____
###Markdown
Data Exploration
###Code
test_df.head()
#test_df.shape
train_df1.head()
train_df1.shape
train_df = pd.concat([train_df,test_df], axis=0)
print(train_df.shape)
print(train_df.shape)
train_df.reset_index().head()
###Output
(1309, 11)
###Markdown
Train
###Code
train_df.head()
train_df.dtypes
train_df.shape
train_df.info()
train_df.isnull().sum()
sns.heatmap(data=train_df.isnull(),yticklabels=False,cbar=False,cmap='viridis')
###Output
_____no_output_____
###Markdown
Imputing NAN valuesmethod = mean() Survived countplotSurvived = 1NOT Survived = 0
###Code
sns.set_style('whitegrid')
sns.countplot(x='Survived', data=train_df,palette='RdBu_r')
# Survived based on Sex, so we set the parameter hue = 'Sex'
sns.countplot(x='Survived', hue='Sex' ,data=train_df,palette='RdBu_r')
###Output
_____no_output_____
###Markdown
From the above:more females survived then male.
###Code
sns.countplot(x='Survived', hue='Pclass' ,data=train_df,palette='RdBu_r')
sns.distplot(train_df['Age'].dropna(), kde=False, color='darkred',bins=30)
train_df['Age'].hist(bins=30,color='darkred',alpha=0.7)
###Output
_____no_output_____
###Markdown
the dist is more close to normal distribution
###Code
sns.countplot(x='SibSp', data=train_df)
plt.figure(figsize=(12,7))
sns.boxplot(x='Pclass',y='Age',data=train_df, palette='winter')
def impute_nan_age(cols):
Age = cols[0]
Pclass = cols[1]
if pd.isnull(Age):
if Pclass == 1:
return int(train_df.groupby('Pclass')['Age'].mean()[1])
elif Pclass == 2:
return int(train_df.groupby('Pclass')['Age'].mean()[2])
else:
return int(train_df.groupby('Pclass')['Age'].mean()[3])
else:
return Age
###Output
_____no_output_____
###Markdown
applying the function to impute the Age using Pclass
###Code
train_df['Age'] = train_df[['Age','Pclass']].apply(impute_nan_age, axis=1)
sns.heatmap(data=train_df.isnull(),yticklabels=False,cbar=False,cmap='viridis')
###Output
_____no_output_____
###Markdown
For now, i will drop cabin since its having many null values
###Code
train = train_df.drop('Cabin',axis=1)
train.head()
sex = pd.get_dummies(train['Sex'],drop_first=True)
embark = pd.get_dummies(train['Embarked'],drop_first=True)
train.drop(['Name','Sex','Embarked','Ticket'], axis=1, inplace=True)
train = pd.concat([train,sex,embark],axis=1)
train.head()
train.reset_index().head()
###Output
_____no_output_____
###Markdown
Building a Logistic Regression Model
###Code
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn.ensemble import RandomForestClassifier
###Output
_____no_output_____
###Markdown
Train Test Split
###Code
x = train.drop('Survived',axis=1)
y = train['Survived']
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.30, random_state=101)
###Output
_____no_output_____
###Markdown
Training & testing the model KNeighborsClassifier
###Code
classifier = KNeighborsClassifier(
n_neighbors = 5,
weights = 'distance',
metric = 'minkowski',
p=2
)
model = classifier.fit(x_train,y_train)
y_pred1 = model.predict(x_test)
###Output
_____no_output_____
###Markdown
Random Forest Classifier[scikit-learn Random Forest Classifier ](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html)
###Code
model_2 = RandomForestClassifier(n_estimators=100)
model_2.fit(x_train,y_train)
y_pred2 = model_2.predict(x_test)
###Output
_____no_output_____
###Markdown
accuracy_score
###Code
print(f'Random Forest Classifier accuracy score : {accuracy_score(y_true=y_test,y_pred= y_pred2)}')
print(f'KNeighbors Classifier accuracy score : {accuracy_score(y_true=y_test,y_pred=y_pred1)}')
###Output
Random Forest Classifier accuracy score : 0.8059701492537313
KNeighbors Classifier accuracy score : 0.7126865671641791
###Markdown
Submission
###Code
sub_df.head()
y_final_pred = y_pred3
submission = pd.DataFrame( {
'PassengerId' : y_test.reset_index()['PassengerId'],
'Survived': y_final_pred
} )
submission.to_csv('titanic_submission.csv', index = False)
submission.head()
###Output
_____no_output_____
###Markdown
extras```python importing required librariesimport pandas as pdfrom xgboost import XGBClassifierfrom sklearn.metrics import accuracy_score read the train and test datasettrain_data = pd.read_csv('train-data.csv')test_data = pd.read_csv('test-data.csv') shape of the datasetprint('Shape of training data :',train_data.shape)print('Shape of testing data :',test_data.shape) Now, we need to predict the missing target variable in the test data target variable - Survived seperate the independent and target variable on training datatrain_x = train_data.drop(columns=['Survived'],axis=1)train_y = train_data['Survived'] seperate the independent and target variable on testing datatest_x = test_data.drop(columns=['Survived'],axis=1)test_y = test_data['Survived']```**Create the object of the XGBoost model**You can also add other parameters and test your code hereSome settings are : *max_depth* and *n_estimators*```pythonmodel = XGBClassifier() fit the model with the training datamodel.fit(train_x,train_y) predict the target on the train datasetpredict_train = model.predict(train_x)print('\nTarget on train data',predict_train) Accuray Score on train datasetaccuracy_train = accuracy_score(train_y,predict_train)print('\naccuracy_score on train dataset : ', accuracy_train) predict the target on the test datasetpredict_test = model.predict(test_x)print('\nTarget on test data',predict_test) Accuracy Score on test datasetaccuracy_test = accuracy_score(test_y,predict_test)print('\naccuracy_score on test dataset : ', accuracy_test)```[XGBoost Model in Python using scikit-learn](https://hackernoon.com/want-a-complete-guide-for-xgboost-model-in-python-using-scikit-learn-sc11f31bq)[xgboost read the docs](https://xgboost.readthedocs.io/en/latest/index.html)
###Code
!pip install xgboost
from xgboost import XGBClassifier
param = {'max_depth': 2, 'eta': 1, 'objective': 'binary:logistic'}
model_3 = XGBClassifier()
model_3.fit(x_train,y_train)
y_pred3 = model_3.predict(x_test)
###Output
_____no_output_____
###Markdown
linear SVC
###Code
from sklearn import svm
clf = svm.LinearSVC(max_iter=3000, random_state=101)
clf.fit(x_train,y_train)
y_pred4 = clf.predict(x_test)
print(f'Random Forest Classifier | accuracy score : {accuracy_score(y_true=y_test,y_pred= y_pred2)}\n')
print(f'KNeighbors Classifier | accuracy score : {accuracy_score(y_true=y_test,y_pred=y_pred1)} \n')
print(f'xgboost Classifier | accuracy score : {accuracy_score(y_true=y_test,y_pred=y_pred3)} \n')
print(f'LinearSVC Classifier | accuracy score : {accuracy_score(y_true=y_test,y_pred=y_pred4)} \n')
###Output
Random Forest Classifier | accuracy score : 0.8059701492537313
KNeighbors Classifier | accuracy score : 0.7126865671641791
xgboost Classifier | accuracy score : 0.8283582089552238
LinearSVC Classifier | accuracy score : 0.7985074626865671
|
doc/ipynb/executed/demo_cachedjit.ipynb | ###Markdown
IPython + cachedjit decoratorWe can define cachedjit function in IPython (and Jupyter, which uses IPython). The only limitation is that the type variables have to be defined in the same cell as the function.
###Code
import numpy as np
from fluidpythran import cachedjit
# pythran import numpy as np
@cachedjit
def laplace_pythran(image):
"""Laplace operator in NumPy for 2D images."""
laplacian = (
image[:-2, 1:-1] + image[2:, 1:-1] + image[1:-1, :-2] + image[1:-1, 2:]
- 4*image[1:-1, 1:-1]
)
thresh = np.abs(laplacian) > 0.05
return thresh
from skimage.data import astronaut
from skimage.color import rgb2gray
image = astronaut()
image = rgb2gray(image)
laplace_pythran(image)
###Output
[32mINFO [0m [34mwrite Pythran signature in file /home/users/augier3pi/.fluidpythran/__cachedjit__/__ipython__2823fa65cda0fe3f58b60391a9d2e13b/laplace_pythran.pythran with types
['float64[:, :]'][0m
|
01-pandas-ipywidgets/jupyter-widget-ecosystem/notebooks/10.11-ipywebrtc.ipynb | ###Markdown
ipywebrtcUsing master (7b364018501957fb9c778c0ee3d850b88e52f83f), will not work with v0.2.0
###Code
import ipywebrtc as webrtc
import ipyvolume as ipv
import ipywidgets as widgets
video = webrtc.VideoStream.from_file('big-buck-bunny_trailer.webm')
video
camera = webrtc.CameraStream()
camera
fig = ipv.figure(render_continuous=True)
back = ipv.plot_plane("back", texture=video)
right = ipv.plot_plane("right", texture=camera)
ipv.show()
right.texture = fig
room = webrtc.chat(room='scipy2018', stream=fig)
back.texture = room.streams[1]
recorder = webrtc.MediaRecorder(stream=fig, filename='record')
recorder
recorder.data[:1000]
room.close()
camera.close()
video.close()
###Output
_____no_output_____ |
06_change_detection/04_map_to_image_change.ipynb | ###Markdown
Introduction - Map-to-Image Change DetectionChange detection is a *hot* topic within the Earth Observation (EO) community, particular since the release of the landsat archive and more recently the availablity of the ESA Sentinel-1 and 2 data.There are several approaches to change detection and these can be categorised as:- Map-to-Map- Image-to-Image- Map-to-Image- Dense TimeseriesThese approaches have different advances and disadvantages and this tutorial will demonstrate the implementation of a method for each type. What is Map-to-Image Change?Probably the least explored method, here an existing classification at the first date is required but rather than undertaking a second independent classification a methods which compares the image and map to identify the change pixels and then classifies them to update the existing map. These methods **have few assumptions** but generally **expect the area of change to be small** compared to the not changed regions. When a change occurs there is also an **assumption that the change causes the EO signal (reflectance, backscatter etc.) to change being identifiable when compared to the original class**. What are the steps:Again, this is quite simple. We need a classification and new input image from a different date and we will try to use that input image to find the changes between the two dates. 1. Define Imports
###Code
import os
import shutil
import rsgislib
import rsgislib.imagecalc
import rsgislib.imagecalc.calcindices
import rsgislib.changedetect.pxloutlierchng
import rsgislib.imageutils
import rsgislib.rastergis
# Imports for data visualisation
import rsgislib.tools.plotting
# Import the matplotlib plotting module and set it to work within
# a notebook environment.
%matplotlib inline
import matplotlib
import matplotlib.colors as mcolors
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
2. Define Input Images and Directories
###Code
cls_2018_dir = "2018_cls"
if not os.path.exists(cls_2018_dir):
os.mkdir(cls_2018_dir)
cls_1997_dir = "baseline_cls"
if not os.path.exists(cls_1997_dir):
os.mkdir(cls_1997_dir)
map_to_img_chg_dir = "map_to_img_chg"
if not os.path.exists(map_to_img_chg_dir):
os.mkdir(map_to_img_chg_dir)
# File path to the 1997 classification
cls_1997_img = os.path.join(cls_1997_dir, "base_1997_class_img.kea")
# File path to the input Landsat 5 image from 1997.
input_97_img = (
"../data/chg_data/LS5TM_19970716_vmsk_mclds_topshad_rad_srefdem_stdsref_subset.tif"
)
# File path to the Landsat 5 valid data mask from 1997
vld_97_img = os.path.join(cls_1997_dir, "LS5TM_19970716_vmsk.kea")
# File path to the input Landsat 8 image from 2018.
input_18_img = (
"../data/chg_data/LS8_20180608_vmsk_mclds_topshad_rad_srefdem_stdsref_subset.tif"
)
# File path to the Landsat 8 valid data mask from 2018.
vld_18_img = os.path.join(cls_2018_dir, "LS8_20180608_vmsk.kea")
###Output
_____no_output_____
###Markdown
3. Calculate NDVI for 2018
###Code
ls_2018_ndvi_img = os.path.join(map_to_img_chg_dir, "LS8_20180608_ndvi.kea")
rsgislib.imagecalc.calcindices.calc_ndvi(input_18_img, 4, 5, ls_2018_ndvi_img)
###Output
Image: ../data/chg_data/LS8_20180608_vmsk_mclds_topshad_rad_srefdem_stdsref_subset.tif
Variable 'red' is band 4
Variable 'nir' is band 5
New image width = 1281 height = 3659 bands = 1
Calculating Image Pyramids.
###Markdown
4. Identify Mangrove Change Features
###Code
out_mng_chng_msk = os.path.join(map_to_img_chg_dir, "mng_2018_map2img_chngs.kea")
plot_mng_thres_file = os.path.join(map_to_img_chg_dir, "mng_2018_map2img_chngs_plot.png")
rsgislib.changedetect.pxloutlierchng.find_class_kurt_skew_outliers(
ls_2018_ndvi_img,
cls_1997_img,
out_mng_chng_msk,
-1,
1,
0.35,
True,
10.0,
False,
img_mask_val=1, # Specify mangrove class
img_band=1,
gdalformat="KEA",
plot_thres_file=plot_mng_thres_file,
)
###Output
100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 133/133 [00:00<00:00, 769.83it/s]
###Markdown
5. Identify Water Change Features
###Code
out_water_chng_msk = os.path.join(map_to_img_chg_dir, "water_2018_map2img_chngs.kea")
plot_water_thres_file = os.path.join(map_to_img_chg_dir, "water_2018_map2img_chngs_plot.png")
rsgislib.changedetect.pxloutlierchng.find_class_kurt_skew_outliers(
ls_2018_ndvi_img,
cls_1997_img,
out_water_chng_msk,
-1,
1,
0.35,
False,
10.0,
False,
img_mask_val=3, # Specify water class
img_band=1,
gdalformat="KEA",
plot_thres_file=plot_water_thres_file,
)
###Output
100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 133/133 [00:00<00:00, 931.89it/s]
###Markdown
6. Update 1997 Classification to 2018
###Code
chng_2018_cls_img = os.path.join(map_to_img_chg_dir, "chng_map_2018_map2img.kea")
band_defns = []
band_defns.append(rsgislib.imagecalc.BandDefn("vld_18_msk", vld_18_img, 1))
band_defns.append(rsgislib.imagecalc.BandDefn("cls_1997", cls_1997_img, 1))
band_defns.append(rsgislib.imagecalc.BandDefn("mng_chng", out_mng_chng_msk, 1))
band_defns.append(rsgislib.imagecalc.BandDefn("wat_chng", out_water_chng_msk, 1))
exp = """(vld_18_msk==1)&&(cls_1997==1)&&(mng_chng==2)?3:
(vld_18_msk==1)&&(cls_1997==3)&&(wat_chng==2)?1:
cls_1997
"""
rsgislib.imagecalc.band_math(
chng_2018_cls_img, exp, "KEA", rsgislib.TYPE_8UINT, band_defns
)
rsgislib.rastergis.pop_rat_img_stats(
chng_2018_cls_img, add_clr_tab=True, calc_pyramids=True, ignore_zero=True
)
###Output
Image: 2018_cls/LS8_20180608_vmsk.kea
Variable 'vld_18_msk' is band 1
Image: baseline_cls/base_1997_class_img.kea
Variable 'cls_1997' is band 1
Image: map_to_img_chg/mng_2018_map2img_chngs.kea
Variable 'mng_chng' is band 1
Image: map_to_img_chg/water_2018_map2img_chngs.kea
Variable 'wat_chng' is band 1
New image width = 1281 height = 3659 bands = 1
Get Image Min and Max.
Get Image Histogram.
Adding Histogram and Colour Table to image file
Calculating Image Pyramids.
###Markdown
7. Create Change Image
###Code
# Define output image path
chng_map_img = os.path.join(map_to_img_chg_dir, "chng_map_1997_2018_map2img.kea")
# Define the input image bands
band_defns = []
band_defns.append(rsgislib.imagecalc.BandDefn("cls_1997", cls_1997_img, 1))
band_defns.append(rsgislib.imagecalc.BandDefn("cls_2018", chng_2018_cls_img, 1))
# Expression to define change classes
exp = """(cls_1997==1)&&(cls_2018==1)?11:
(cls_1997==1)&&(cls_2018==2)?12:
(cls_1997==1)&&(cls_2018==3)?13:
(cls_1997==2)&&(cls_2018==1)?21:
(cls_1997==2)&&(cls_2018==2)?22:
(cls_1997==2)&&(cls_2018==3)?23:
(cls_1997==3)&&(cls_2018==1)?31:
(cls_1997==3)&&(cls_2018==2)?32:
(cls_1997==3)&&(cls_2018==3)?33:0
"""
# Run band_math
rsgislib.imagecalc.band_math(chng_map_img, exp, "KEA", rsgislib.TYPE_8UINT, band_defns)
# Populate stats and pyramids
rsgislib.rastergis.pop_rat_img_stats(
chng_map_img, add_clr_tab=True, calc_pyramids=True, ignore_zero=True
)
###Output
Image: baseline_cls/base_1997_class_img.kea
Variable 'cls_1997' is band 1
Image: map_to_img_chg/chng_map_2018_map2img.kea
Variable 'cls_2018' is band 1
New image width = 1281 height = 3659 bands = 1
Get Image Min and Max.
Get Image Histogram.
Adding Histogram and Colour Table to image file
Calculating Image Pyramids.
###Markdown
8. Colour and Name Change Classes
###Code
class_info_dict = dict()
class_info_dict[11] = {
"classname": "Mangrove_Mangrove",
"red": 25,
"green": 200,
"blue": 25,
}
class_info_dict[12] = {
"classname": "Mangrove_Terrestrial",
"red": 200,
"green": 100,
"blue": 25,
}
class_info_dict[13] = {
"classname": "Mangrove_Water",
"red": 200,
"green": 25,
"blue": 200,
}
class_info_dict[21] = {
"classname": "Terrestrial_Mangrove",
"red": 125,
"green": 100,
"blue": 125,
}
class_info_dict[22] = {
"classname": "Terrestrial_Terrestrial",
"red": 125,
"green": 125,
"blue": 125,
}
class_info_dict[23] = {
"classname": "Terrestrial_Water",
"red": 125,
"green": 125,
"blue": 100,
}
class_info_dict[31] = {
"classname": "Water_Mangrove",
"red": 25,
"green": 255,
"blue": 200,
}
class_info_dict[32] = {
"classname": "Water_Terrestrial",
"red": 62,
"green": 62,
"blue": 200,
}
class_info_dict[33] = {
"classname": "Water_Water",
"red": 25,
"green": 25,
"blue": 200
}
rsgislib.rastergis.set_class_names_colours(chng_map_img, "class_names", class_info_dict)
###Output
_____no_output_____
###Markdown
9. Visual Change Result
###Code
sub_bbox = [523000, 550000, -877000, -854000]
# Get the image data using the get_gdal_thematic_raster_mpl_imshow function.
(
img_msk_data,
img_msk_coords,
lgd_msk_patches,
) = rsgislib.tools.plotting.get_gdal_thematic_raster_mpl_imshow(
chng_map_img, bbox=sub_bbox
)
# Create the matplotlib figure
fig, ax = plt.subplots(figsize=(10, 10))
# Use the imshow function to display the image data within the plot
# the extent option defines the x and y axis values.
ax.imshow(img_msk_data, extent=img_msk_coords)
###Output
Image Data Size: 900 x 767
|
NLP Project (Solutions).ipynb | ###Markdown
___ ___ Natural Language Processing ProjectWelcome to the NLP Project for this section of the course. In this NLP project you will be attempting to classify Yelp Reviews into 1 star or 5 star categories based off the text content in the reviews. This will be a simpler procedure than the lecture, since we will utilize the pipeline methods for more complex tasks.We will use the [Yelp Review Data Set from Kaggle](https://www.kaggle.com/c/yelp-recsys-2013).Each observation in this dataset is a review of a particular business by a particular user.The "stars" column is the number of stars (1 through 5) assigned by the reviewer to the business. (Higher stars is better.) In other words, it is the rating of the business by the person who wrote the review.The "cool" column is the number of "cool" votes this review received from other Yelp users. All reviews start with 0 "cool" votes, and there is no limit to how many "cool" votes a review can receive. In other words, it is a rating of the review itself, not a rating of the business.The "useful" and "funny" columns are similar to the "cool" column.Let's get started! Just follow the directions below! Imports **Import the usual suspects. :) **
###Code
import numpy as np
import pandas as pd
###Output
_____no_output_____
###Markdown
The Data**Read the yelp.csv file and set it as a dataframe called yelp.**
###Code
yelp = pd.read_csv('yelp.csv')
###Output
_____no_output_____
###Markdown
** Check the head, info , and describe methods on yelp.**
###Code
yelp.head()
yelp.info()
yelp.describe()
###Output
_____no_output_____
###Markdown
**Create a new column called "text length" which is the number of words in the text column.**
###Code
yelp['text length'] = yelp['text'].apply(len)
###Output
_____no_output_____
###Markdown
EDALet's explore the data Imports**Import the data visualization libraries if you haven't done so already.**
###Code
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('white')
%matplotlib inline
###Output
_____no_output_____
###Markdown
**Use FacetGrid from the seaborn library to create a grid of 5 histograms of text length based off of the star ratings. Reference the seaborn documentation for hints on this**
###Code
g = sns.FacetGrid(yelp,col='stars')
g.map(plt.hist,'text length')
###Output
_____no_output_____
###Markdown
**Create a boxplot of text length for each star category.**
###Code
sns.boxplot(x='stars',y='text length',data=yelp,palette='rainbow')
###Output
_____no_output_____
###Markdown
**Create a countplot of the number of occurrences for each type of star rating.**
###Code
sns.countplot(x='stars',data=yelp,palette='rainbow')
###Output
_____no_output_____
###Markdown
** Use groupby to get the mean values of the numerical columns, you should be able to create this dataframe with the operation:**
###Code
stars = yelp.groupby('stars').mean()
stars
###Output
_____no_output_____
###Markdown
**Use the corr() method on that groupby dataframe to produce this dataframe:**
###Code
stars.corr()
###Output
_____no_output_____
###Markdown
**Then use seaborn to create a heatmap based off that .corr() dataframe:**
###Code
sns.heatmap(stars.corr(),cmap='coolwarm',annot=True)
###Output
_____no_output_____
###Markdown
NLP Classification TaskLet's move on to the actual task. To make things a little easier, go ahead and only grab reviews that were either 1 star or 5 stars.**Create a dataframe called yelp_class that contains the columns of yelp dataframe but for only the 1 or 5 star reviews.**
###Code
yelp_class = yelp[(yelp.stars==1) | (yelp.stars==5)]
###Output
_____no_output_____
###Markdown
** Create two objects X and y. X will be the 'text' column of yelp_class and y will be the 'stars' column of yelp_class. (Your features and target/labels)**
###Code
X = yelp_class['text']
y = yelp_class['stars']
###Output
_____no_output_____
###Markdown
**Import CountVectorizer and create a CountVectorizer object.**
###Code
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer()
###Output
_____no_output_____
###Markdown
** Use the fit_transform method on the CountVectorizer object and pass in X (the 'text' column). Save this result by overwriting X.**
###Code
X = cv.fit_transform(X)
###Output
_____no_output_____
###Markdown
Train Test SplitLet's split our data into training and testing data.** Use train_test_split to split up the data into X_train, X_test, y_train, y_test. Use test_size=0.3 and random_state=101 **
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=0.3,random_state=101)
###Output
_____no_output_____
###Markdown
Training a ModelTime to train a model!** Import MultinomialNB and create an instance of the estimator and call is nb **
###Code
from sklearn.naive_bayes import MultinomialNB
nb = MultinomialNB()
###Output
_____no_output_____
###Markdown
**Now fit nb using the training data.**
###Code
nb.fit(X_train,y_train)
###Output
_____no_output_____
###Markdown
Predictions and EvaluationsTime to see how our model did!**Use the predict method off of nb to predict labels from X_test.**
###Code
predictions = nb.predict(X_test)
###Output
_____no_output_____
###Markdown
** Create a confusion matrix and classification report using these predictions and y_test **
###Code
from sklearn.metrics import confusion_matrix,classification_report
print(confusion_matrix(y_test,predictions))
print('\n')
print(classification_report(y_test,predictions))
###Output
[[159 69]
[ 22 976]]
precision recall f1-score support
1 0.88 0.70 0.78 228
5 0.93 0.98 0.96 998
avg / total 0.92 0.93 0.92 1226
###Markdown
**Great! Let's see what happens if we try to include TF-IDF to this process using a pipeline.** Using Text Processing** Import TfidfTransformer from sklearn. **
###Code
from sklearn.feature_extraction.text import TfidfTransformer
###Output
_____no_output_____
###Markdown
** Import Pipeline from sklearn. **
###Code
from sklearn.pipeline import Pipeline
###Output
_____no_output_____
###Markdown
** Now create a pipeline with the following steps:CountVectorizer(), TfidfTransformer(),MultinomialNB()**
###Code
pipeline = Pipeline([
('bow', CountVectorizer()), # strings to token integer counts
('tfidf', TfidfTransformer()), # integer counts to weighted TF-IDF scores
('classifier', MultinomialNB()), # train on TF-IDF vectors w/ Naive Bayes classifier
])
###Output
_____no_output_____
###Markdown
Using the Pipeline**Time to use the pipeline! Remember this pipeline has all your pre-process steps in it already, meaning we'll need to re-split the original data (Remember that we overwrote X as the CountVectorized version. What we need is just the text** Train Test Split**Redo the train test split on the yelp_class object.**
###Code
X = yelp_class['text']
y = yelp_class['stars']
X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=0.3,random_state=101)
###Output
_____no_output_____
###Markdown
**Now fit the pipeline to the training data. Remember you can't use the same training data as last time because that data has already been vectorized. We need to pass in just the text and labels**
###Code
# May take some time
pipeline.fit(X_train,y_train)
###Output
_____no_output_____
###Markdown
Predictions and Evaluation** Now use the pipeline to predict from the X_test and create a classification report and confusion matrix. You should notice strange results.**
###Code
predictions = pipeline.predict(X_test)
print(confusion_matrix(y_test,predictions))
print(classification_report(y_test,predictions))
###Output
[[ 0 228]
[ 0 998]]
precision recall f1-score support
1 0.00 0.00 0.00 228
5 0.81 1.00 0.90 998
avg / total 0.66 0.81 0.73 1226
|
examples/mnist_nn.ipynb | ###Markdown
Digit recognition using TT neural networksThe TT layer is applied to the MNIST dataset.Imports:
###Code
import torch as tn
import torch.nn as nn
try:
import torchtt as tntt
except:
print('Installing torchTT...')
%pip install git+https://github.com/ion-g-ion/torchTT
import torchtt as tntt
from torch import optim
from torchvision import datasets
from torchvision.transforms import ToTensor
from torch.utils.data import DataLoader
device = tn.device('cuda' if tn.cuda.is_available() else 'cpu')
###Output
_____no_output_____
###Markdown
Download the dataset and store it to a subfolder 'data'.
###Code
train_data = datasets.MNIST(root = 'downloads', train = True, transform = ToTensor(), download = True)
test_data = datasets.MNIST(root = 'downloads', train = False, transform = ToTensor())
###Output
_____no_output_____
###Markdown
Create 2 dataloaders for the training set and the test set.
###Code
dataloader_train = tn.utils.data.DataLoader(train_data, batch_size=1000, shuffle=True, num_workers=10)
dataloader_test = tn.utils.data.DataLoader(test_data, batch_size=100, shuffle=True, num_workers=10)
###Output
_____no_output_____
###Markdown
Define the neural network arhitecture. I contains 2 hidden TT layers (with RELU activation function) with a linear output layer. A sotmax is applied at the output.
###Code
class BasicTT(nn.Module):
def __init__(self):
super().__init__()
self.ttl1 = tntt.nn.LinearLayerTT([1,7,4,7,4], [8,10,10,10,10], [1,4,2,2,2,1])
self.ttl2 = tntt.nn.LinearLayerTT([8,10,10,10,10], [8,3,3,3,3], [1,2,2,2,2,1])
self.linear = nn.Linear(81*8, 10, dtype = tn.float32)
self.logsoftmax = nn.LogSoftmax(1)
def forward(self, x):
x = self.ttl1(x)
x = tn.relu(x)
x = self.ttl2(x)
x = tn.relu(x)
x = x.view(-1,81*8)
x = self.linear(x)
return self.logsoftmax(x)
###Output
_____no_output_____
###Markdown
Instantiate the model and choose the optimizer and the loss function.
###Code
model = BasicTT().to(device)
loss_function = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr = 0.001)
###Output
_____no_output_____
###Markdown
Start the training for 30 epochs
###Code
n_epochs = 30
for epoch in range(n_epochs):
for i,(input,label) in enumerate(dataloader_train):
input = tn.reshape(input.to(device),[-1,1,7,4,7,4])
label = label.to(device)
optimizer.zero_grad()
output = model(input)
loss = loss_function(output, label)
loss.backward()
optimizer.step()
print('Epoch %d/%d iteration %d/%d loss %e'%(epoch+1,n_epochs,i+1,len(dataloader_train),loss))
###Output
_____no_output_____
###Markdown
Compute the accuracy over the test set.
###Code
n_correct = 0
n_total = 0
for (input,label) in dataloader_test:
input = tn.reshape(input.to(device),[-1,1,7,4,7,4])
output = model(input).cpu()
n_correct += tn.sum(tn.max(output,1)[1] == label)
n_total += input.shape[0]
print('Test accuracy ',n_correct/n_total)
###Output
_____no_output_____ |
datasetHandle/gov/spider.ipynb | ###Markdown
政府网站公开数据爬取 爬取漳州历史天气数据
###Code
import requests
header = {
'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36',
}
res_list = []
for year in range(2020,2022):
for month in range(1,13):
url = 'https://lishi.tianqi.com/zhangzhou/{}{:0>2d}.html'.format(year,month)
# print(url)
res = requests.get(url=url,headers=header)
res_list.append(res)
from lxml import etree
# 2020年每天的天气情况
day = 1
csv_str = ""
for month in range(1,13):
dom = etree.HTML(res_list[month].text)
values = dom.xpath('/html/body/div[7]/div[1]/div[4]/ul/li')
for one in values:
high_temp = int(one.xpath('./div[2]/text()')[0][0:-1])
low_temp = int(one.xpath('./div[3]/text()')[0][0:-1])
weather = one.xpath('./div[4]/text()')[0]
# print(day,high_temp,low_temp,weather)
line = "{},{},{},{}\n".format(day,high_temp,low_temp,weather)
csv_str = csv_str + line
day += 1
head_str = "day,high,low,weather\n"
csv_str = head_str + csv_str
# print(csv_str)
with open('weather.csv','w',encoding='utf-8') as f:
f.write(csv_str)
###Output
_____no_output_____
###Markdown
福建爬取福建省生态环保厅的数据
###Code
import requests
import time
header = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36'}
url = 'http://sthjt.fujian.gov.cn/was5/web/search'
params = {
'channelid':280067,
'sortfield':'-s4',
'classsql':'(dockind=10)',
'r':'0.3624881561901028',
'prepage':100,
'page':1
}
res_list = []
for i in range(1,177):
params['page'] = i
res = requests.get(url=url,params=params,headers=header)
res_list.append(res)
time.sleep(0.1)
print('done')
import json
docs_list = []
for one in res_list:
doc = json.loads(one.text.replace('\r','').replace('\n',''))
docs_list = docs_list + doc['docs']
print('done 1')
csv_str = ""
for i in range(len(docs_list)):
one = docs_list[i]
try:
line = "{},{},{},{},{},{},{},{},{},{},{},{},{}\n".format(one['s1'] ,one['s2'],one['s3'],
one['s4'],one['s5'],one['s6'],
one['s7'],one['f1'],one['f2'],
one['f3'],one['f4'],one['f5'],one['f6'],)
except Exception as e:
# print(e)
# print(i)
continue
csv_str = csv_str + line
# if i == 2:
# break
# print(csv_str)
print('done 2')
head_str = '水系,点位名称,断面名称,年,周,起始时间,结束时间,pH,DO,COD,TP,氨氮,总氮\n'
csv_str = head_str + csv_str
with open('water.csv','w',encoding='utf-8') as f:
f.write(csv_str)
print('done')
import pandas as pd
import numpy as np
fujian_df = pd.read_csv('data/network/gov/fujian/fujian.csv')
print(fujian_df.describe())
fujian_df['年'] = fujian_df['年'].astype(int)
for year in range(2004,2022):
year_df = fujian_df[(fujian_df['年'] >= year) & (fujian_df['年'] < year + 1) ]
print("{}年记录总数:{}".format(year,len(year_df)))
year_df = fujian_df[(fujian_df['年'] >= 2017) & (fujian_df['年'] < 2018) ]
print("记录总数:{}".format(len(year_df)))
site_list = year_df['断面名称'].unique()
print("站点总数:{}".format(len(site_list)))
for site in site_list:
one = year_df[year_df['断面名称'] == site]
print("{},{}".format(site,len(one)))
###Output
记录总数:3520
站点总数:58
姑田溪(龙岩-三明交界断面),55
尤溪(三明-南平交界断面),55
干流(对照),55
翠江河(宁化-清流交界断面),55
干流(周宁-福安交界断面),55
饮用水水源地,330
水库、湖泊,110
富屯溪(三明-南平交界断面),55
黄潭河(上杭-永定交界断面),55
西溪(龙岩-漳州交界断面),55
干流(上杭湖洋乡涧头村),55
干流(寿宁-福安交界断面),55
九龙溪(清流-永安交界断面),55
沙溪(三明-南平交界断面),55
富屯溪(邵武-顺昌交界断面),55
北溪(控制断面),55
西溪(安溪-南安交界断面),55
濉溪(建宁-泰宁交界断面),55
黄潭河(新罗-上杭交界断面),55
鱼塘溪(明溪-三元交界断面),55
建溪(武夷山-建阳交界断面),55
干流(宁德-福州交界断面),55
干流(仙游-城厢交界断面),55
北团溪(龙岩-三明交界断面),55
北溪 (龙岩-漳州交界断面),55
干流(宁德-福州交界断面),55
干流(南安-丰泽交界断面),55
北溪(新罗-漳平交界断面),55
北溪 (华安-芗城交界断面),55
北溪(厦门-漳州交界断面),55
桃溪(永春-南安交界断面),55
干流(罗源-连江交界断面),55
干流(屏南-蕉城交界断面),55
沙溪(永安-三元交界断面),55
沙溪(梅列-沙县交界断面),55
金溪(泰宁-将乐交界断面),55
富屯溪(光泽-邵武交界断面),55
富屯溪(顺昌-延平交界断面),55
建溪(建阳-建瓯交界断面),55
省界(浙-闽),55
建溪(政和-建瓯交界断面),55
均溪(大田-尤溪交界断面),55
干流(南平-宁德交界断面),55
建溪(浦城-建阳交界断面),55
建溪(松溪-政和交界断面),55
建溪(建瓯-延平交界断面),55
干流(闽侯-福州交界断面),55
大樟溪(泉州-福州交界断面),55
干流(闽江入海口),55
北溪(长泰-龙文交界断面),55
西溪(平和-南靖交界断面),55
西溪(南靖-芗城交界断面),55
干流(长汀-上杭交界断面),55
干流(上杭-永定交界断面),55
文川河(龙岩-三明交界断面),55
干流(连江-马尾交界断面),55
大樟溪(永泰-闽侯交界断面),55
干流(闽清-闽侯交界断面),55
###Markdown
山东爬取山东省生态环保厅的数据http://sthj.shandong.gov.cn/
###Code
import requests
import json
url = 'http://fb.sdem.org.cn:8801/wryfb/ajax/map.ashx'
param = {
'Method':'SelectSubList',
'stcode':'0',
'type':'WasteWaterGis',
'isall':'0'
}
header = {
'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36',
# 'Referer':'http://fb.sdem.org.cn:8801/wryfb/WebGis/WasteWaterGis/HistoryData_New.aspx?SubId=29941%20&SubName=%u79D1%u6CD3%u79D1%u6280%u96C6%u56E2%u6709%u9650%u516C%u53F8',
# 'X-Requested-With':'XMLHttpRequest',
# 'Origin':'http://fb.sdem.org.cn:8801',
# 'Pragma':'no-cache',
# 'Cookie':'ASP.NET_SessionId=y5zc211bfydoziryjxzimesx; ASP.NET_SessionId_NS_Sig=oenCV6md0Dtq6Bby',
# 'Content-Type':'application/x-www-form-urlencoded; charset=UTF-8'
}
res = requests.post(url=url, data=param, headers=header)
site_list = json.loads(res.text)['items']
print("站点总数:{}".format(len(site_list)))
import time
url = 'http://fb.sdem.org.cn:8801/wryfb/ajax/WasteWaterGis/WasteWaterHandler.ashx'
param = {
'Method':'GetHisChart_New',
'strID':'2326',
'strTime':'2021-07-01'
}
header = {
'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36',
}
month_data_list = []
for year in range(2010,2021):
print("爬取{}年".format(year))
for month in range(1,13):
str_time = '{}-{}-01'.format(year,month)
param['strTime'] = str_time
month_res = requests.post(url=url, data=param, headers=header)
month_data = json.loads(month_res.text)
month_data_list.append(month_data)
time.sleep(0.05)
# break
# break
# print(res.text)
# doc = json.loads(res.text)
# print(doc)
###Output
爬取2010年
爬取2011年
爬取2012年
爬取2013年
爬取2014年
爬取2015年
爬取2016年
爬取2017年
爬取2018年
爬取2019年
爬取2020年
###Markdown
中国环境监测总站 水质自动检测周报http://www.cnemc.cn/sssj/szzdjczb/
###Code
import requests
import time
page_list = []
for i in range(25):
if i == 0:
page = requests.get(url='http://www.cnemc.cn/sssj/szzdjczb/index.shtml')
else:
url = 'http://www.cnemc.cn/sssj/szzdjczb/index_{}.shtml'.format(i)
page = requests.get(url=url)
time.sleep(0.05)
page_list.append(page)
print('done')
# print(page.text)
from lxml import etree
import requests
import time
url_base = 'http://www.cnemc.cn/sssj/szzdjczb'
for page in page_list:
dom = etree.HTML(page.text)
for i in range(1,21):
xpath_str = '//*[@id="contentPageData"]/li[{}]/a/@href'.format(i)
line = dom.xpath(xpath_str)[0]
if line.split('.')[-1] == 'shtml':
url = url_base + line[1:]
page = requests.get(url=url)
# 获取下载页
doc_page = etree.HTML(page.text)
# 获取所有a标签的链接
all_link = doc_page.xpath('//a/@href')
found = False
for link in all_link:
last_str = link.split('.')[-1]
if (last_str == 'doc' or last_str == 'pdf') and link[:2] == './':
found = True
# 获取下载链接
download_link = "/".join(url.split("/")[:-1]) + link[1:]
doc_file = requests.get(url=download_link)
# 下载保存
dir = 'data/network/gov/cnemc/'
filename = dir + link[2:]
with open(filename,'wb') as f:
f.write(doc_file.content)
# print(download_link)
if not found:
print('{}没有找到下载链接'.format(url))
time.sleep(0.05)
# break
else:
print('{}可以直接下载'.format(line))
# break
import os #用于获取目标文件所在路径
path="E:\\project\\mvp\\Graph-WaveNet\\data\\network\\gov\\cnemc\\" # 文件夹绝对路径
files=[]
for file in os.listdir(path):
if file.endswith(".doc"): #排除文件夹内的其它干扰文件,只获取".doc"后缀的word文件
files.append(path+file)
from win32com import client as wc #导入模块
word = wc.Dispatch("Word.Application") # 打开word应用程序
for file in files:
doc = word.Documents.Open(file) #打开word文件
doc.SaveAs("{}x".format(file), 12)#另存为后缀为".docx"的文件,其中参数12指docx文件
doc.Close() #关闭原来word文件
word.Quit()
print("完成!")
# import docx
from docx import Document
docFile = 'data/network/gov/cnemc/2018-51.docx'
document = Document(docFile) #读入文件
table = document.tables[0] #获取文件中的表格集
table.cell(2,0).text
###Output
_____no_output_____ |
resnet_binary_model.ipynb | ###Markdown
###Code
# git clone repository
!git clone https://github.com/yasohasakii/binary_categorical.git
!mv ./binary_categorical/* ./
!rm -r ./binary_categorical ./sample_data
!pip install tensorflow-gpu
!nvidia-smi
# import modual
from __future__ import print_function
import numpy as np
import os, glob, time
from keras.layers import Input
from keras.models import Model
from keras.preprocessing import image
from keras import regularizers, optimizers
from keras.callbacks import ModelCheckpoint
from keras.layers import GlobalAveragePooling2D, Dense, Dropout
from keras.applications.resnet50 import ResNet50
os.environ['CUDA_VISIBLE_DEVICES']='0'
np.random.seed(1337)
# assert parameters
batch_size = 32
EPOCHS=30
train_count = len(glob.glob('./PetImages/train/*/*.jpg'))
valid_count = len(glob.glob('./PetImages/test/*/*.jpg'))
# def build_model
def build_model(trainable=True):
input = Input(shape = (224,224,3),name="kfb_image")
base_model = ResNet50(include_top=False, weights='imagenet', input_tensor=input)
x = base_model.output
# for layer in base_model.layers:
# layer.trainable=False
x = GlobalAveragePooling2D()(x)
if trainable:
x = Dropout(0.5,name="dropout_1")(x)
x = Dense(256, activation='relu', kernel_regularizer=regularizers.l1(1e-4),name="dense_1")(x)
x = Dropout(0.5,name="dropout_2")(x)
x = Dense(64, activation='relu', kernel_regularizer=regularizers.l1(1e-4),name="dense_2")(x)
x = Dropout(0.5,name="dropout_3")(x)
else:
x = Dense(256, activation='relu' ,name="dense_1")(x)
x = Dense(64, activation='relu', name="dense_2")(x)
prediction = Dense(1, activation='sigmoid',name = "dense_3")(x)
model = Model(inputs=base_model.input, outputs=prediction)
# model.summary()
return model
# def train
def train(train_count,valid_count):
train_datagen = image.ImageDataGenerator(
rescale = 1./255,
rotation_range=40,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.1,
horizontal_flip=True,
vertical_flip=True,
fill_mode='nearest')
valid_datagen = image.ImageDataGenerator(
rescale = 1./255)
train_generator = train_datagen.flow_from_directory(
'./PetImages/train/',
target_size=(224, 224),
batch_size=batch_size,
class_mode='binary')
validation_generator = valid_datagen.flow_from_directory(
'./PetImages/test',
target_size=(224, 224),
batch_size=batch_size,
class_mode='binary')
model = build_model()
model.compile(loss="binary_crossentropy", optimizer=optimizers.Adam(lr=1e-4,decay =1e-6), metrics=["accuracy"])
models_save_path = "./models"
if not os.path.exists(models_save_path):
os.makedirs(models_save_path)
checkpoint = ModelCheckpoint(filepath=os.path.join(models_save_path, 'resnet-{epoch:02d}-{val_acc:.4f}.h5'),
monitor='val_acc',
mode='max',
save_best_only=True,
save_weights_only=True)
print("Train files: {}, valid files: {}".format(train_count,valid_count))
print('-----------Start training-----------')
start = time.time()
history = model.fit_generator(train_generator,
steps_per_epoch=train_count // batch_size,
epochs=EPOCHS,
initial_epoch=0,
validation_data=validation_generator,
validation_steps=valid_count // batch_size,
callbacks=[checkpoint],
use_multiprocessing=False)
end = time.time()
print("train finished, cost time = {} hours".format(round((end - start) / 3600.0,3)))
return history
# def plot modual
def plot_train_history(history):
import matplotlib.pyplot as plt
plt.figure(figsize=(20, 10))
plt.subplot(1,2,1)
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.subplot(1,2,2)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.savefig("history.png")
# def test
def test(model_path,image_dir):
model = build_model(trainable=False)
model.load_weights(model_path, by_name=True)
for layer in model.layers:
layer.trainable=False
images = glob.glob(image_dir+"/*.jpg")
images.sort()
cat=0.0
dog=0.0
for imgs in images[:2]:
print(imgs)
im = image.load_img(imgs, target_size=(224, 224))
im = image.img_to_array(im)* 1. / 255
im = np.expand_dims(im, axis=0)
out = model.predict(im)[0][0]
print(out)
if out>=0.5:
dog += 1
else:
cat += 1
print(dog+cat)
if dog>cat:
print("{}: dog, score{}, inage nums {}".format(os.path.basename(image_dir),dog/(dog+cat),len(images)))
else:
print("{}: cat, score{}, image nums {}".format(os.path.basename(image_dir),cat/(dog+cat),len(images)))
if __name__=="__main__":
history = train(train_count,valid_count)
plot_train_history(history)
test('./resnet-04-0.9722.h5','./PetImages/test/Cat')
from google.colab import files
uploaded = files.upload()
for fn in uploaded.keys():
print('User uploaded file "{name}" with length {length} bytes'.format(
name=fn, length=len(uploaded[fn])))
###Output
_____no_output_____ |
docs/python/jupyter/findmybgc.ipynb | ###Markdown
Run as usual. If you're following along, change the filepaths in the first code block. Using `FindMyBGC()`
###Code
from socialgene.classes.findmybgc import FindMyBGC
hmm_filepath = "/home/chase/Documents/socialgene_data/test/long_cache/HMM_HASH/socialgene_all_hmms.hmm"
gbk_filepath = "/home/chase/Documents/socialgene_data/mibig_gbk_2.0//BGC0001848.gbk"
findmybgc_object = FindMyBGC()
findmybgc_object.parse(gbk_filepath=gbk_filepath)
findmybgc_object.annotate_with_pyhmmer(
hmm_filepath=hmm_filepath,
use_neo4j_precalc=False,
cpus=0,
)
###Output
_____no_output_____
###Markdown
Run the comparison (all-vs-all)
###Code
findmybgc_object.compare_proteins()
###Output
_____no_output_____
###Markdown
THe results could be accessed no but it's not super useful in this form (shoing the first nfive results below)
###Code
findmybgc_object.protein_comparison[0:5]
###Output
_____no_output_____
###Markdown
So convert the results into a pandas dataframe
###Code
findmybgc_object.protein_comparison_to_df()
###Output
_____no_output_____
###Markdown
And view the results
###Code
findmybgc_object.protein_comparison
###Output
_____no_output_____ |
generate-backgrounds.ipynb | ###Markdown
Generating backgrounds
###Code
%matplotlib inline
from pathlib import Path
import matplotlib as mpl
from thesis_cover import *
mpl.rcParams["figure.dpi"] = 300
options = sorted(Path("data").glob("*/*.pickle"))
def generate_cover(
learner,
save_fname: str,
npoints_interp=1000,
dpi=300,
resolution=(4096, 2160),
cmap=None,
):
data = list(learner.data.items())
x_size, y_size = xy_size = (resolution[0] / dpi, resolution[1] / dpi)
fig, ax = plt.subplots(figsize=(x_size, y_size))
fig.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=None, hspace=None)
ax.set_xticks([])
ax.set_yticks([])
cmap = cmap or get_cmap("inferno", 0.15, 0.95, 1.15)
npoints_tri = len(data) // 4
if len(data) > 4000:
npoints_tri = max(npoints_tri, 4000)
im, line1, line2 = get_new_artists(
npoints_tri, learner, data, ax, xy_size, npoints_interp, cmap
)
ax.axis("off")
ax.set_xlim(-x_size / 2, x_size / 2)
ax.set_ylim(-y_size / 2, y_size / 2)
print(f"Saving {save_fname}")
if save_fname is not None:
fig.savefig(
save_fname, format=save_fname.suffix[1:], pad_inches=0, dpi=dpi,
)
else:
plt.show()
plt.close(fig)
folder = Path("background-defense")
folder.mkdir(exist_ok=True)
###Output
_____no_output_____
###Markdown
Pick one using a widget
###Code
import adaptive
from ipywidgets import Dropdown, IntText, interact
def get_background(fname, cmap, x_resolution, y_resolution):
learner = load_learner(fname)
cmap = get_cmap(cmap, 0.10, 0.85, 0.85)
generate_cover(
learner,
None,
cmap=cmap,
npoints_interp=2000,
resolution=(x_resolution, y_resolution),
)
interact(
get_background,
fname=Dropdown(options=options),
cmap=Dropdown(options=plt.colormaps(), value="inferno"),
x_resolution=IntText(value=4096),
y_resolution=IntText(value=2160),
)
###Output
_____no_output_____
###Markdown
Generate all
###Code
for i, fname in enumerate(options):
learner = load_learner(fname)
print(f"cover {i+1}, npoints: {learner.npoints}")
cmap = get_cmap("inferno", 0.10, 0.85, 0.85)
generate_cover(
learner,
fname_out(folder, fname).with_suffix(".png"),
cmap=cmap,
npoints_interp=2000,
dpi=300,
)
print()
###Output
_____no_output_____ |
Illyuvieva_Tafintseva-2.ipynb | ###Markdown
Hi, our names are *Illyuvieva Alice* and *Tafintseva Albina* and we decided to write a tutirial on a subject of ***ARIMA model***. In this tutorial we want to illustrate how ARIMA model can be applied in python, how we can measure it's efficiency and make predictions. We have chosen data of COVID19, this data contains infromation about countries from 2020-01-22 : 2021-02-01. Data is collected in the way that all indicators are summed with the previous ones, so each day does not indicate the statistics of that day, put indicates the overall statictics for all pevious days since 2020-01-22. For each day for each country we can see the sum of number of people who had confirmed COVID19 (to that day), number of people who recovered (since 2020-01-22 to that day) and number of people who had died since 2020-01-22 to that day. In the next sell below you can see all the libraries which are going to be used in order to upload data, to modify data, to construct arima model, to extract train and test parts from the data, to get predictions, to estimate the model, to visualize and so on.
###Code
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from statsmodels.tsa.arima_model import ARIMA
from matplotlib import pyplot
from math import sqrt
from sklearn.metrics import mean_squared_error
from statsmodels.regression.rolling import RollingOLS
from statsmodels.tsa.stattools import acf, pacf
from statsmodels.tsa.statespace.sarimax import SARIMAX
from pandas import DataFrame
from sklearn.metrics import mean_absolute_percentage_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from math import sqrt
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from statsmodels.tsa.stattools import adfuller
from sklearn.metrics import accuracy_score
import sklearn.metrics as sm
###Output
_____no_output_____
###Markdown
...Uploading data
###Code
csv_url = 'time-series-19-covid-combined.csv'
data = pd.read_csv(csv_url, index_col=0, parse_dates=True, squeeze=True)
data
###Output
_____no_output_____
###Markdown
Now you can see visualized data. Therefore, it is time to tell you what task we want to solve and how we will need to modify data in order to complete it.So, we decided that we want to explore statistics about deaths in Afganistan. And make predictions about number of deaths in the future. We say, that: H0 (null hypothesis) = number of deaths in Afganistan is going to decrease. 1. We will take only data concerning Afganistan (trim number of rows in th dataset)2. We will drop all columns in the resulted from '2.' dataset, apart from dates and number of deaths3. We are going to create column 'deaths per day' in which deaths per day will be calculated smart, right?) ) - the algorithm is pretty easy and understandable from the code That's all for modifications with dataset, and below you can see the code for everything described above. Enjoy!
###Code
data = data.drop(['Country/Region', 'Province/State', 'Confirmed', 'Recovered'], axis = 1)
data = data.iloc[0:377]
data
new_data_deaths = list()
old_data_deaths1 = list(data['Deaths'])[1:]
old_data_deaths2 = list(data['Deaths'])[:-1]
new_data_deaths.append(list(data['Deaths'])[0])
new_data_deaths.extend(np.array(old_data_deaths1) - np.array(old_data_deaths2))
data['Deaths_per_day'] = new_data_deaths
data
data = data.drop(['Deaths'], axis = 1)
data
data.plot()
###Output
_____no_output_____
###Markdown
**Examine whether data is stationary or not:**1. H0: time-series data is non-stationary2. apply adfuller() function to our time-series3. make conclusion (if p-value of the test is reject H0)
###Code
result_of_adfuller = adfuller(data['Deaths_per_day'])
print('p-value: %f' % result_of_adfuller[1])
###Output
p-value: 0.286124
###Markdown
P-value is greater than the sighnificance level => process is non-stationary => we have to difference the series and whatch with what level of differnec we will be able to achive stationarity. Also it is vital to do, because when we will specificate our model, we will have to spacificate order = (p, d, q), where d stands for the level of difference. Firstly look at the original data:
###Code
plt.rcParams["figure.figsize"] = (20,3)
print(plt.plot(data['Deaths_per_day']))
###Output
[<matplotlib.lines.Line2D object at 0x7ffa483d1d10>]
###Markdown
Now our data with the first order of differencing:
###Code
plt.rcParams["figure.figsize"] = (20,3)
difference_order_1 = data['Deaths_per_day'].diff()
print(plt.plot(difference_order_1))
###Output
[<matplotlib.lines.Line2D object at 0x7ffa48457890>]
###Markdown
Now our data with the second order of differencing:
###Code
plt.rcParams["figure.figsize"] = (20,3)
difference_order_2 = data['Deaths_per_day'].diff().diff()
print(plt.plot(difference_order_2))
###Output
[<matplotlib.lines.Line2D object at 0x7ffa43b99810>]
###Markdown
We decided to take difference level = 1, as level differnce > 1 does not make much changes Now we specificate our model. We do not need date columns, as we are constructing ARIMA models regarding time-series data of deaths. Lets start from finding parameters for the model. 1. AR term (p)2. MA term (q)3. diff term is already found aboveAR term:required number of AR terms can be found by inspecting the Partial Autocorrelation (PACF) plot.
###Code
print(plot_acf(difference_order_1.dropna()))
print(plot_pacf(difference_order_1.dropna()))
###Output
Figure(1440x216)
Figure(1440x216)
###Markdown
 Looking at the table above, we can colclude that we have an ARMA model => we have to specify both p and q.When both terms (p and q) are supposed to be nonzero, we can use AIC (Akaike information criterion) and BIC (Bayesian information criterion). For better forecasting model it is better to use AIC. BIC is better when searching for better explanatory model. I will write a loop which will help to deterine order of the model.
###Code
for p in range(6):
for q in range(6):
model_tr = SARIMAX(data, order=(p,1,q))
model_tr_fit = model_tr.fit()
print(p, q, model_tr_fit.aic, model_tr_fit.bic)
###Output
/usr/local/lib/python3.7/dist-packages/statsmodels/tsa/base/tsa_model.py:527: ValueWarning: No frequency information was provided, so inferred frequency D will be used.
% freq, ValueWarning)
/usr/local/lib/python3.7/dist-packages/statsmodels/tsa/base/tsa_model.py:527: ValueWarning: No frequency information was provided, so inferred frequency D will be used.
% freq, ValueWarning)
/usr/local/lib/python3.7/dist-packages/statsmodels/tsa/base/tsa_model.py:527: ValueWarning: No frequency information was provided, so inferred frequency D will be used.
% freq, ValueWarning)
/usr/local/lib/python3.7/dist-packages/statsmodels/tsa/base/tsa_model.py:527: ValueWarning: No frequency information was provided, so inferred frequency D will be used.
% freq, ValueWarning)
/usr/local/lib/python3.7/dist-packages/statsmodels/tsa/base/tsa_model.py:527: ValueWarning: No frequency information was provided, so inferred frequency D will be used.
% freq, ValueWarning)
/usr/local/lib/python3.7/dist-packages/statsmodels/tsa/base/tsa_model.py:527: ValueWarning: No frequency information was provided, so inferred frequency D will be used.
% freq, ValueWarning)
/usr/local/lib/python3.7/dist-packages/statsmodels/tsa/base/tsa_model.py:527: ValueWarning: No frequency information was provided, so inferred frequency D will be used.
% freq, ValueWarning)
/usr/local/lib/python3.7/dist-packages/statsmodels/tsa/base/tsa_model.py:527: ValueWarning: No frequency information was provided, so inferred frequency D will be used.
% freq, ValueWarning)
/usr/local/lib/python3.7/dist-packages/statsmodels/tsa/statespace/sarimax.py:977: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
warn('Non-invertible starting MA parameters found.'
###Markdown
Best aic result is gotten for p = 5 and q = 4
###Code
model = SARIMAX(data, order=(5,1,4))
model_fit = model.fit()
print(model_fit.summary())
###Output
/usr/local/lib/python3.7/dist-packages/statsmodels/tsa/base/tsa_model.py:527: ValueWarning: No frequency information was provided, so inferred frequency D will be used.
% freq, ValueWarning)
/usr/local/lib/python3.7/dist-packages/statsmodels/tsa/base/tsa_model.py:527: ValueWarning: No frequency information was provided, so inferred frequency D will be used.
% freq, ValueWarning)
###Markdown
Interpretation of our ARIMA model:( $Y_t = Y_{t-1} + e_t - 0.8205*e_{t-1}$ ) - for auto arima model
###Code
residuals = DataFrame(model_fit.resid)
residuals.plot()
pyplot.show
residuals.plot(kind='kde')
pyplot.show()
print(residuals.describe())
print(model_fit.plot_diagnostics(figsize=(7,7)))
###Output
Figure(504x504)
###Markdown
Interpretation of residual plots:1.2.3. Normal Q-Q : All the dots should fall perfectly in line with the red line. Any significant deviations would imply the distribution is skewed. => We can conclude that in our data disrtibution may be a little skewed.4. **Accuracy metrics**So here are some of the commonly used accuracy metrics for time-series data:1. Mean Absolute Percentage Error (MAPE)2. Mean Error (ME)3. Mean Absolute Error (MAE)4. Mean Percentage Error (MPE)5. Root Mean Squared Error (RMSE)6. Min-Max Error (minmax)Below we will get those metrics for our models
###Code
def forecast_accuracy(forecast, actual):
mape = mean_absolute_percentage_error(actual, forecast) # MAPE
me = np.mean(forecast - actual) # ME
mae = mean_absolute_error(actual, forecast) # MAE
mpe = np.mean((forecast - actual)/actual) # MPE
rmse = sqrt(mean_squared_error(actual, forecast)) # RMSE
mins = np.amin(np.hstack([forecast[:,None],
actual[:,None]]), axis=1)
maxs = np.amax(np.hstack([forecast[:,None],
actual[:,None]]), axis=1)
minmax = 1 - np.mean(mins/maxs) # minmax
return({'mape': mape, 'me': me, 'mae': mae,
'mpe': mpe, 'rmse': rmse, 'minmax': minmax})
###Output
_____no_output_____
###Markdown
**Rolling Forecast ARIMA**
###Code
X = data.values
size = int(len(X) * 0.66)
train, test = X[0:size], X[size:len(X)]
history = list(train[:])
predictions = list()
for t in range(len(test)):
model = ARIMA(history, order=(5,1,0))
model_fit = model.fit()
output = model_fit.forecast()
yhat = output[0]
predictions.append(yhat)
obs = test[t]
history.append(obs)
print('predicted = %f, expected = %f' % (yhat, obs))
pyplot.plot(test)
pyplot.plot(predictions, color='red')
pyplot.show()
###Output
/usr/local/lib/python3.7/dist-packages/statsmodels/tsa/arima_model.py:472: FutureWarning:
statsmodels.tsa.arima_model.ARMA and statsmodels.tsa.arima_model.ARIMA have
been deprecated in favor of statsmodels.tsa.arima.model.ARIMA (note the .
between arima and model) and
statsmodels.tsa.SARIMAX. These will be removed after the 0.12 release.
statsmodels.tsa.arima.model.ARIMA makes use of the statespace framework and
is both well tested and maintained.
To silence this warning and continue using ARMA and ARIMA until they are
removed, use:
import warnings
warnings.filterwarnings('ignore', 'statsmodels.tsa.arima_model.ARMA',
FutureWarning)
warnings.filterwarnings('ignore', 'statsmodels.tsa.arima_model.ARIMA',
FutureWarning)
warnings.warn(ARIMA_DEPRECATION_WARN, FutureWarning)
###Markdown
**Forecast Accuracy**
###Code
print(forecast_accuracy(np.array(predictions), np.array(test)))
pred = list()
test1 = list()
for i in range(len(test)):
pred.append(int(predictions[i]))
test1.append(int(test[i]))
print(f"Accuracy score: {accuracy_score(test1, pred)}")
print(f"R-squared: {round(sm.r2_score(test1, pred), 2)}")
###Output
Accuracy score: 0.12403100775193798
R-squared: 0.19
|
Machine Learning/Course files/stdDevVariance/StdDevVariance.ipynb | ###Markdown
Standard Deviation and Variance
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
incomes = np.random.normal(100.0, 50.0, 10000)
plt.hist(incomes, 50)
plt.show()
incomes.std()
incomes.var()
###Output
_____no_output_____ |
programcao_python.ipynb | ###Markdown
1 - Introdução ao Python O Python é uma das linguagens de programação mais populares do mundo. Criado no início da década de 1990, ele tem uma ampla variedade de usos, desde automatizar tarefas repetitivas e escrever aplicativos Web até criar modelos de machine learning e implementar redes neurais. Cientistas de dados, matemáticos e pesquisadores em particular gostam do Python devido à sua sintaxe avançada e fácil de entender e à grande variedade de pacotes open-source disponíveis. Pacotes são bibliotecas de códigos compartilhados comumente usados disponíveis gratuitamente para qualquer pessoa.O Python tem uma sintaxe simples, fácil de aprender que enfatiza a legibilidade. Aplicativos escritos em Python podem ser executados em praticamente qualquer computador, incluindo aqueles que executam Windows, macOS e distribuições populares do Linux. Além disso, o ecossistema contém um conjunto avançado de ferramentas de desenvolvimento para escrever, depurar e publicar aplicativos do Python.Por fim, o Python tem o suporte de uma comunidade de usuários ativa que está ávida por ajudar novos programadores a aprender o estilo do Python, em que você não apenas acerta a sintaxe, mas usa a linguagem da maneira que ela foi projetada para ser usada. 2 - Lógica de Programação Algoritmos Conjunto das regras e procedimentos lógicos perfeitamente definidos que levam à solução de um problema em um número finito de etapas.**Mas o que isso quer dizer?**Quando vamos à padaria comprar pão francês, por exemplo, precisamos criar um algoritmo para executar tal ação. Nessa sequência, nós ordenamos os atos em uma sucessão que faça sentido e seja capaz de atingir o objetivo inicial, afinal de contas, não faz sentido algum primeiro sair à rua para depois voltar e pegar a carteira, por exemplo. É importante entender também que existe mais de um algoritmo capaz de solucionar o meu problema e, desde que ele funcione, não existe certo ou errado – mas o conceito de mais ou menos eficiente é válido aqui.Seguem dois exemplos de sequências onde o intuito das duas é, no final, ter um ovo frito no prato.Enquanto o primeiro exemplo é mais cuidadoso e evita que a pessoa frite um ovo podre ou acabe espirrando óleo acidentalmente ao jogar o ovo na gordura quente, por exemplo, a segunda é mais rápida e prática. As duas estão certas porque ambas conseguem chegar ao meu objetivo final, mas, talvez, a primeira seja mais eficiente por evitar problemas ao longo dos seus procedimentos. O que são variáveis?Na matemática usamos variáveis para fazer contas por exemplo:x = 5 + 10x = 15No exemplo acima **x** é a variável e está sendo usada para armazenar algo que podemos usar depois.Na programação não é muito diferente disso, também usamos as variáveis para armazenar o que precisamos, como se fossem caixinhas para organizar tudo que vamos usar, e sabermos onde está.
###Code
numero = 5 + 10
print(numero)
caixa_de_sapatos = 15
print(caixa_de_sapatos)
###Output
15
###Markdown
No exemplo acima estamos guardando **"Sapatos"** na variável **caixa_de_sapatos** e **"Camisas"** na variável **caixa_de_camisas**Cada variável só aceita uma coisa de cada vez, se eu quiser botar **"Sapatos"** na variável **caixa_de_camisas** não vai sobrar mais espaço para as **"Camisas"**. Veja o exemplo:
###Code
caixa_de_sapatos = "Camisas"
print(caixa_de_sapatos)
###Output
Camisas
###Markdown
3 - Comandos Básicos Print Como vocês devem ter notado nos exemplo anteriores utilizamos o "print" para exibir algumas coisas na tela.O "print" é uma das muitas funções que temos no python, e a principal função dela é exibir o que chamamos de saída de dados.Normalmente utilizamos ele para exibir mensagens para o usuário final, mas ele também é uma boa ferramenta para testar se o seu código está se comportando como você planejou, te possibilitando exibir o conteúdo das variáveis enquanto o programa está funcionando(em tempo de execução).
###Code
caixa_de_fruta1 = "b"
print("Passo 1:" + caixa_de_fruta1)
caixa_de_fruta2 = caixa_de_fruta1 + "ana"
print("Passo 2:" + caixa_de_fruta2)
caixa_de_fruta4 = caixa_de_fruta2 + "na"
print("Final:" + caixa_de_fruta4)
###Output
Passo 1:b
Passo 2:bana
Final:banana
###Markdown
Type A função type exibe o tipo de um valor ou variável. O valor ou variável, que é chamado de argumento da função, tem que vir entre parênteses. É comum se dizer que uma função ‘recebe’ um valor ou mais valores e ‘retorna’ um resultado. O resultado é chamado de valor de retorno.
###Code
x = True
tipo = type(x)
print(tipo)
###Output
<class 'bool'>
###Markdown
Exercícios Crie uma variável que receba seu nome e outra que receba seu e-mail e exiba ela na tela.
###Code
#Aqui vai meu nome
nOme = "Russel Franco"
#Aqui vai meu email
emAil = "[email protected]"
#Aqui eu exibo meus dados
print(nOme)
print(emAil)
'''
Neste exemplo irei mostrar
como
funciona
os comentários de múltimplas linhas
print(123)
'''
print("Texto")
###Output
Texto
###Markdown
Importar biblioteca Uma das coisas que torna o python tão poderoso é a possibilidade de importar bibliotecas que possuem funções que não vem incluídas por padrão no python, mas podem ser incluídas conforme a sua necessidade.Como exemplo temos a biblioteca "math" que serve para fazer diversos cáculos matemáticos como por exemplo raiz quadrada.Veja o exemplo:
###Code
import math
x = math.sqrt(4)
print(x)
###Output
2.0
###Markdown
ExercícioUse a biblioteca math para realizar a raiz quadrada de 25 e exibir na tela.
###Code
##Escreva sua solução aqui
import math
raiz_quadrada = math.sqrt(25)
print(raiz_quadrada)
###Output
5.0
###Markdown
4 - Variáveis e Tipos de dados Python possui vários tipos básicos ou fundamentais: números inteiros, números de ponto flutuante, booleanos, números complexos, cadeias de caracteres, etc.Números inteirosOs tipos de variáveis em Python são inferidos automaticamente pelo interpretador, sem que haja a necessidade de se especificar o tipo da variável no momento de sua declaração. Os exemplos abaixo deixarão esses conceitos mais claros.
###Code
# Números inteiros
x = 1
y = 2
resultado = x + y
print(resultado)
print(type(resultado))
###Output
3
<class 'int'>
###Markdown
Números de ponto flutuanteSe o nome ponto flutuante te assusta, não se preocupe. Números de ponto flutuante nada mais são que números com vírgula, ou como costumamos dizer "números quebrados", constrastando com os números inteiros com os quais estamos mais acostumados. Vejamos alguns exemplos de expressões com números de ponto flutuante.
###Code
# Números de ponto flutuante
x = 1.1
y = 2.2
resultado = x + y
print(resultado)
print(type(resultado))
###Output
3.3000000000000003
<class 'float'>
###Markdown
É importante notar que se pelo menos um dos valores envolvidos em uma expressão numérica for um número de ponto flutuante, o retultado da expressão também será um número de ponto flutuante, como mostrado no exemplo abaixo.
###Code
x = 10
y = 2.5
resultado = x * y
print(resultado)
print(type(resultado))
###Output
25.0
<class 'float'>
###Markdown
Strings ou cadeias de caracteres Strings nada mais são do que sequências (cadeias) de caracteres. Em outras palavras, uma string é simplesmente uma sequência de zero ou mais letras juntas. Vejamos alguns exemplos.
###Code
string_vazia = ''
uma_letra = 'a'
varias_letras = 'abacate'
print(type(string_vazia))
print(type(uma_letra))
print(type(varias_letras))
###Output
<class 'str'>
<class 'str'>
<class 'str'>
###Markdown
Você deve ter percebido que nos exemplos acima, a sequência de letras das strings aparece entre aspas simples. Em Python, assim como em outras linguagens, precisamos das aspas para declarar uma variável do tipo string. Tanto aspas simples quanto aspas duplas funcionam. A razão da necessidade das aspas é simples: elas diferenciam uma declaração de uma variável do string de uma atribuição entre duas variáveis. O exemplo abaixo vai deixar isso mais claro.
###Code
v = 'var'
s = v
print(v)
print(s)
###Output
var
var
###Markdown
Valores booleanos* Um booleano (ou tipo ***bool***), em ciência da computação, é um tipo de dado lógico que pode ter apenas um de dois valores possíveis: verdadeiro ou falso. Em Python, condicionais booleanas são usadas para decidir quais trechos do código serão executados ou repetidas. Esses valores são úteis para representar, por exemplo, o resultado de uma comparação. Experimente:
###Code
# Criando as variáveis
a = 5
b = 6
print("Valor de a: ", a)
print("Valor de b: ", b)
c = a < b # c mostra o valor comparado entre a < b
print("Valor de c: ", c)
d = a > b # d mostra o valor comparado entre a > b
print("Valor de d: ", d)
e = a == b # e mostra o valor comparado entre a == b
print("Valor de e: ", e)
a = True
true = True
true = "True"
type(true)
###Output
_____no_output_____
###Markdown
Fiquem atentos que True e False não são a mesma coisa que strings (Textos ou Frase), esses valores são do tipo `Bool` e são usados para expressa um valor de verdade (Verdadeiro ou Falso), portanto `True`(**Bool**) não é a mesma coisa que "True"(**String**). Operadores e expressões lógicasAssim como expressões aritméticas podem ser formadas por operadores aritméticos (como 2 + 3 * 4), expressões lógicas são formadas por operadores lógicos. Para construir expressões lógicas usando operadores lógicos será possivel utilizar três operadores lógicos: and, or e not. O significado desses operadores é bastante intuitivo, uma vez que ele equivale à interpretação que fazemos deles na língua inglesa (`and` = E | `or` = Ou | `not` = Não ).1. **Operador** `and` Dados dois valores booleanos A e B, o operador lógico and resulta em True apenas quando A e B foram ambos True, e retorna False caso contrário.Execute o programa e veja o resultado, em seguida troque o valor `False` por `True` e execute novamente:
###Code
C = False
D = False # Após rodar uma primeira vez, troque este valor de False para True e execute novamente.
print ("Os valores de C e D são: ", C and D) # Comparação lógica entre as duas variaveis.
c = True
print(c)
###Output
True
###Markdown
2. **Operador** `or`Dados dois valores booleanos A e B, o operador lógico `or` resulta em `False` apenas quando A e B foram ambos False, e retorna True caso contrário.Execute o programa e veja o resultado, em seguida troque o valor `True` por `False` e execute novamente:
###Code
A = True # Após rodar uma primeira vez, troque este valor de True para False e execute novamente.
B = False
print ("Os valores de A ou B são: ", A or B)
###Output
Os valores de A ou B são: True
###Markdown
Aqui temos uma tabela que mostra como é feito a comparação utilizando o operador `or` para todas as combinações de A e B:or |A = True | A = False:------ |:-----: |:----:B = True |True |TrueB = False |True |False 3. **Operador** `not` O operador lógico not muda o valor de seu argumento, ou seja, se `not` for igual a `True`, então ele responde como `False` e se `not` for igual a `False` ele responde como `True`. 5 - Condicionais Estruturas CondicionaisNeste tópico falaremos sobre as estruturas condicionais em Python, mas antes de exibir um exemplo de estrutura condicional no Python, vamos a um pouco de teoria. Uma Estrutura de Condição, como o próprio nome já diz, verifica a condição dos argumentos passados e, executa um comando caso a condição seja verdadeira, como se pode ver no algorítimo abaixo:`SE condição (SE = IF)``ENTÃO comando`Na estrutura em Python vamos começar conhecendo os Operadores Condicionais do Python. Os Operadores Condicionais são utilizados para fazer as comparações dos valores que são passados e retornam o valor Verdadeiro ou Falso.Operador| Tipo| Valor---|---|---==| Igualdade| Verifica a igualdade entre dois valores.!=| Igualdade| Verifica a diferença entre dois valores.>| Comparação| Verificar se o valor A é maior que o valor B.<| Comparação| Verifica se o valor A é menor que o valor B.>=| Comparação| Verifica se o valor A é maior ou igual ao valor B.<=| Comparação| Verifica se o valor A é menor ou igual ao valor B.In| Seqüência| Verifica se o valor A está contido em um conjunto. 1. Estrutura Condicional SimplesAgora que já conhecemos os Operadores vamos ver como fazer uma estrutura condicional em Python. Incrementando o nosso exemplo do artigo anterior, iremos verificar se a soma dos valores que o usuário informou é maior que zero e exibir o resultado na tela.Abaixo podemos perceber a estrutura do condicional IF, execute o programa e veja o resultado, em seguida troque o valor da váriavel `valor` de **0** por **1** e execute novamente:
###Code
valor = 0
if valor > 0:
print ("Maior que Zero.")
###Output
_____no_output_____
###Markdown
Veja que quando foi executado o comando com a váriavel `valor` em valor **0**, o sistema não retornou absolutamente nada, pois como a comparação exige que a variável seja de número maior que **0** então enquanto o valor for este o sistema entenderá como valor booleano `False`, e se você alterar a váriavel `valor` para valor maior que **0** seja 1,2,3 etc, o sistema entenderá que a comparação é verdadeira ou seja `True`, pois a váriavel é maior que **0**. 2. Estrutura Condicional CompostaA Estrutura Condicional Composta executa um comando quando a condição for verdadeira e outra condição quando for falsa. Vamos melhorar o nosso exemplo anterior, agora teremos que mostrar a mensagem **"Menor que Zero"** caso o resultado da soma seja menor que zero, como podemos ver abaixo:
###Code
valor = 0
if valor > 1:
print ("Maior que um.")
else:
print ("Menor que um.")
###Output
Menor que um.
###Markdown
Agora usando uma Estrutura Condicional Composta, vemos que há resultado para ambos valores tanto como igual a **0** como também valores maiores que **0**. 3. Estrutura Condicional EncadeadasEstruturas Condicionais Encadeadas é usada quando precisamos verificar mais de uma condição, ou seja, um IF dentro de outro IF. Uma outra estrutura encadeada é como pode-se notar abaixo. Incrementando o nosso exemplo, agora teremos que exibir uma mensagem caso o valor seja igual a Zero.
###Code
valor = 5
if valor > 4:
print ("Maior que Quatro.")
elif valor > 3:
print ("Maior que Três.")
elif valor > 2:
print ("Maior que Dois.")
elif valor > 1:
print ("Maior que Um.")
else:
print ("Menor que Um.")
###Output
Menor que Um.
###Markdown
Neste exemplo podemos perceber um comando diferente, o elif. Este comando é a junção do comando ELSE+IF(SENÂOSE traduzindo para o português) que é utilizado nas Estruturas Condicionais Encadeadas. Cada condição é verificada em ordem. Se a primeira for falsa, a próxima é verificada, e assim por diante. Se uma delas for verdadeira, o ramo correspondente é executado e a instrução é encerrada. Mesmo se mais de uma condição for verdade, só o primeiro ramo verdadeiro é executado. 4. Estrutura Condicional AninhadasUma condicional também pode ser aninhada dentro de outra. Poderíamos ter escrito o exemplo na seção anterior desta forma:
###Code
valor = 0
if valor > 0:
print ("Maior que Zero.")
else:
if valor == 0:
print ("Igual a Zero.")
else:
print ("Menor que Zero.")
###Output
Igual a Zero.
###Markdown
A condicional exterior contém dois ramos. O primeiro ramo contém uma instrução simples. O segundo ramo contém outra instrução if, que tem outros dois ramos próprios. Esses dois ramos são instruções simples, embora pudessem ser instruções condicionais também.Embora a endentação das instruções evidencie a estrutura das condicionais, condicionais aninhadas são difíceis de ler rapidamente. É uma boa ideia evitá-las quando for possível.
###Code
###Output
_____no_output_____
###Markdown
6.1 - Strings Métodos de strings
###Code
string_pcdas = 'plataforma DE cieNcia de dAdos aplicada à saúde'
string_pcdas[11]
string_pcdas[11:13]
print('Maiúsculo:', string_pcdas.upper())
print()
print('Minúsculo:', string_pcdas.lower())
print()
print('Primeira letra da frase:', string_pcdas.capitalize())
print()
print('Primeira letra de cada palavra:', string_pcdas.title())
if 'CIENCIA' in string_pcdas:
print("Valor encontrado")
else:
print("Valor não encontrado")
if 'ciencia' in string_pcdas.lower():
print("Valor encontrado")
else:
print("Valor não encontrado")
if 'for' in string_pcdas.lower():
print("Valor encontrado")
else:
print("Valor não encontrado")
string_frutas = 'banana,maçã,pera,kiwi,uva,morango'
print(string_frutas.split(sep='k'))
string_espacos = 'Meu Email [email protected] '
print(string_espacos)
print(string_espacos.strip())
print(len(string_espacos))
print(len(string_espacos.strip()))
num_string = '1,564'
print(num_string.replace(',', '.'))
num_string = '1.564'
float(num_string)
num_string = '1,564'
num_string = num_string.replace(',', '.')
float(num_string)
nome = "Balthazar"
idade = 21
print(f'Olá meu nome é {nome} e tenho {idade} anos.')
###Output
_____no_output_____
###Markdown
6.2 - Listas Subconjuntos de uma lista (acesso por índices e slice notation);
###Code
lista_conteudos_do_curso = ["introdução", "python", "listas", "funções", 'pandas']
print(lista_conteudos_do_curso)
lista_conteudos_do_curso[1]
lista_conteudos_do_curso[1:4]
###Output
_____no_output_____
###Markdown
Adicionando um item à minha lista
###Code
lista_conteudos_do_curso.append("graficos")
lista_conteudos_do_curso.append("Machine Learning")
print(lista_conteudos_do_curso)
###Output
['introdução', 'python', 'listas', 'funções', 'pandas', 'graficos', 'Machine Learning']
###Markdown
Removendo um item da minha lista
###Code
lista_conteudos_do_curso.remove("funções")
print(lista_conteudos_do_curso)
lista_conteudos_do_curso.pop(0)
print(lista_conteudos_do_curso)
print(lista_conteudos_do_curso)
###Output
['python', 'listas', 'pandas', 'graficos']
###Markdown
Tamanho de uma lista
###Code
len(lista_conteudos_do_curso)
###Output
_____no_output_____
###Markdown
7 - Dicionários Estou programando um sistema de agenda de contatos telefônicos em Python. Para isso, preciso armazenar os números dos contatos. A princípio, podemos pensar em usar uma lista:
###Code
telefones = ['1234-5678', '9999-9999', '8765-4321', '8877-7788']
###Output
_____no_output_____
###Markdown
Tudo bem, temos os números de telefone armazenados. Mas… qual o sentido de termos uma lista de números soltos? De quem é o número que está na segunda posição?Precisamos, de algum modo, conectar os telefones a seus respectivos contatos. Já conhecemos um tipo que pode nos ajudar com isso a tupla:
###Code
contato = ('Yan', '1234-5678')
###Output
_____no_output_____
###Markdown
Para não precisarmos de uma variável para cada contato, podemos colocá-los direto em uma lista de contatos:
###Code
contatos_lista = [('Yan', '1234-5678'), ('Pedro', '9999-9999'),
('Ana', '8765-4321'), ('Marina', '8877-7788')]
###Output
_____no_output_____
###Markdown
Ok! Se quisermos acessar o número de telefone da Marina, podemos fazer:
###Code
print(contatos_lista[3][1])
###Output
8877-7788
###Markdown
Conseguimos! Agora, o número do Pedro: … Mas espera, qual é mesmo a posição do Pedro na nossa lista de contatos?Repare que do modo como está, mal faz diferença ter os nomes dos contatos salvos, porque só conseguimos acessar cada contato pela sua posição na lista. Será que não há um jeito melhor? Mapeando contatos com um dicionário Até agora temos uma lista de contatos em que, ao menos, cada contato tem seu nome e telefone conectados. Entretanto, por enquanto, só conseguimos acessar um contato individualmente pela sua posição na lista, e não pelo seu próprio nome.O ideal seria mapear o nome de cada contato com seu telefone, evitando outros problemas.Por exemplo, podemos falar que o contato Yan tem o número de telefone 1234-5678. Assim, quando quisermos saber qual o n de telefone do Yan, basta ir até o seu nome. Dessa forma, não precisamos decorar qual a posição na lista que o telefone se encontra, basta sabermos seu nome de contato.Veja que, nesse caso, estamos criando uma espécie de dicionário, parecido com os dicionários de língua portuguesa, ou inglesa. Nesses dicionários, temos uma chave que é a palavra que estamos a buscar, que no nosso caso é o nome de contato.Quando achamos essa palavra, podemos ver o seu significado, isto é, o valor daquela palavra na língua, que no nosso caso, é o número de telefone.Esse tipo de estrutura é muito utilizado em diversas linguagens de programação (mas normalmente tem outro nome, como array associativo. Com ela, conseguimos ter um comportamento parecido com o de dicionários.Bem, vamos falar para o Python criar um desses dicionários para a gente. No Python, usamos chaves ({}) para construir nossos dicionários. Neste caso, falamos para o Python, que a chave 'Yan' possuí (:) o valor 1234-5678 como seu telefone:
###Code
contatos = {'Yan': '1234-5678'}
print(type(contatos))
###Output
<class 'dict'>
###Markdown
E olha o tipo da variável contatos que acabamos de criar:dict - de fato um dicionário. Mas será que vamos ter que redigitar todos os dados de contatos que já colocamos em nossa lista de contatos? Também podemos criar um dicionário usando sua função construtora dict() e passando, como parâmetro, uma lista de tuplas, como em nosso caso:
###Code
contatos_lista = [('Yan', '1234-5678'), ('Pedro', '9999-9999'),
('Ana', '8765-4321'), ('Marina', '8877-7788')]
contatos = dict(contatos_lista)
print(contatos)
###Output
{'Yan': '1234-5678', 'Pedro': '9999-9999', 'Ana': '8765-4321', 'Marina': '8877-7788'}
###Markdown
Certo, temos nossa estrutura pronta! Mas espera aí, o nosso dicionário não está ordenado em ordem alfabética, ele não tem ordem nenhuma… Como podemos acessar seus itens? Acessando os itens de um dicionário Podemos acessar os valores dele de forma similar a como acessamos os valores de uma lista, por exemplo, com a diferença de que usamos as chaves que definimos no lugar dos índices numéricos:
###Code
print(contatos['Ana'])
###Output
8765-4321
###Markdown
Tudo bem! Até que, depois de um tempo, quis ver se eu encontrava o telefone de um velho amigo João. Fiz o seguinte:
###Code
print(contatos['João'])
###Output
_____no_output_____
###Markdown
Hum… uma exceção de tipo KeyError indicando que a chave 'João' não foi encontrada. Mas é um pouco estranho imprimir toda essa mensagem para o usuário, não é? Pode ser confuso... Será que não podemos substituir isso?Os dicionários possuem um método específico para busca de valores, o get(), no qual podemos passar como parâmetros a chave que queremos e um valor padrão para retornar caso essa chave não seja encontrada:
###Code
print(contatos.get('Yan', 'Contato não encontrado'))
print(contatos.get('João', 'Contato não encontrado'))
###Output
1234-5678
Contato não encontrado
###Markdown
Muito melhor agora!Também podemos verificar se um contato está em nosso dicionário através da palavra chave in:
###Code
print('Yan' in contatos)
###Output
True
###Markdown
Como esperado!Esses dias, achei um número solto aqui e quis verificar se ele estava em minha agenda:
###Code
print('9999-9999' in contatos)
###Output
False
###Markdown
Ué! Mas esse número está sim na agenda, é o número do Pedro! Por que será que o resultado foi False, então?Acontece que o in, usado dessa forma, verifica apenas as chaves do dicionário, não os valores. Para obtermos valores, podemos usar o método values():
###Code
print('9999-9999' in contatos.values())
###Output
True
###Markdown
Agora sim! Temos nossa estrutura de mapeamento e já conseguimos visualizar os dados dela. Mas e agora, o que mais conseguimos fazer? Adicionando valores ao dicionário Encontrei meu amigo João e, finalmente, decidi adicionar o número dele na minha agenda. Mas… como posso fazer isso com nosso dicionário de contatos? Fui tentar usar um método append(), como nas listas, e olha o que apareceu: Esse método não existe… Ainda tentei criar um outro dicionário e fazer uma soma, mas o resultado foi esse:Traceback (most recent call last): File "", line 1, in TypeError: unsupported operand type(s) for +: 'dict' and 'dict'Também não funciona! A sintaxe de adicionar um item em um dicionário é um pouco diferente de que em outros tipos do Python, mas também bastante objetiva. Por exemplo, se queremos adicionar o João no nosso dicionário de contatos, basta atribuir seu telefone na chave 'João':
###Code
contatos['João'] = '8887-7778'
print(contatos)
###Output
{'Yan': '1234-5678', 'Pedro': '9999-9999', 'Ana': '8765-4321', 'Marina': '8877-7788', 'João': '8887-7778'}
###Markdown
Deu certo! Removendo itens do dicionário Infelizmente, minha amiga Marina perdeu o celular e, consequentemente, não era mais dona do número salvo em meu dicionário de contatos. Precisamos, agora, apagar o item que corresponde a ela. Mas como?Uma maneira simples é usando o statement del, dessa forma:
###Code
del contatos['Marina']
print(contatos)
###Output
{'Yan': '1234-5678', 'Pedro': '9999-9999', 'Ana': '8765-4321', 'João': '8887-7778'}
###Markdown
Certo! Mas e se tentarmos remover um item que não existe?
###Code
del contatos['Catarina']
###Output
_____no_output_____
###Markdown
Um KeyError, como aquele que obtivemos ao tentar pegar um item que não existia! Para evitar essa exceção, também temos um método de dicionário que pode nos ajudar - o pop().O método pop(), além de remover o elemento com a chave especificada do dicionário, nos retorna o valor desse elemento. Também podemos definir um valor padrão de retorno, para caso a chave não seja encontrada:
###Code
contatos = {'Yan': '1234-5678', 'Pedro': '9999-9999', 'Ana': '8765-4321',
'Marina': '8877-7788', 'João': '8887-7778'}
print(contatos.pop('Marina', 'Contato não encontrado'))
print(contatos.pop('Catarina', 'Contato não encontrado'))
print()
print(contatos)
###Output
8877-7788
Contato não encontrado
{'Yan': '1234-5678', 'Pedro': '9999-9999', 'Ana': '8765-4321', 'João': '8887-7778'}
###Markdown
Crie uma lista que receba sua idade e sua altura e exiba na tela
###Code
meus_dados = [32, 1.77]
meus_dados
###Output
_____no_output_____ |
Untitled-Copy1 (1).ipynb | ###Markdown
ASSIGNMENT-1 DAY-3
###Code
altitude=6000
if altitude<=1000:
print("safe to land")
elif altitude<=5000:
print("come down to 1000ft")
else:
print("go around and try later")
altitude=2500
if altitude<=1000:
print("safe to land")
elif altitude<=5000:
print("come down to 1000ft")
else:
print("go around and try later")
altitude=1000
if altitude<=1000:
print("safe to land")
elif altitude<=5000:
print("come down to 1000ft")
else:
print("go around and try later")
###Output
safe to land
###Markdown
ASSIGNMENT-2 DAY-3
###Code
lower = 1
upper = 200
for num in range(lower, upper + 1):
if num > 1:
for i in range(2,num):
if (num % i) == 0:
break
else:
print(num,"is a prime")
###Output
2 is a prime
3 is a prime
5 is a prime
7 is a prime
11 is a prime
13 is a prime
17 is a prime
19 is a prime
23 is a prime
29 is a prime
31 is a prime
37 is a prime
41 is a prime
43 is a prime
47 is a prime
53 is a prime
59 is a prime
61 is a prime
67 is a prime
71 is a prime
73 is a prime
79 is a prime
83 is a prime
89 is a prime
97 is a prime
101 is a prime
103 is a prime
107 is a prime
109 is a prime
113 is a prime
127 is a prime
131 is a prime
137 is a prime
139 is a prime
149 is a prime
151 is a prime
157 is a prime
163 is a prime
167 is a prime
173 is a prime
179 is a prime
181 is a prime
191 is a prime
193 is a prime
197 is a prime
199 is a prime
|
examples/basic/tutorial.ipynb | ###Markdown
WARP TutorialThis notebook takes the pedagogy of learn by example, showing how to use WARP to organize your project's pipeline and keep track of hyperparameters therein.
###Code
%load_ext autoreload
%autoreload 2
###Output
_____no_output_____
###Markdown
Declaring pipesThe following cell(s) will show how to define a pipe that can be recognized by WARP.Pipes currently must be declared in external files (i.e. not within a notebook) with a strict one-pipe-per-file rule.This next cell shows the contents of the file `./example/A.py`, which defines an initial pipe for this pipeline.The following features of WARP are showcased:- **Parameters:** Parameters are intended to be tunable values necessary to reproduce a pipe's output (e.g. learning rate of a neural network). Parameters are limited to simple types (string, numeric types) as well as basic iterables (list, dict) containing simple types.The first argument of the `Parameter` constructor is always the parameter name and the keyword argument `default` can be passed to specify a default value.If no `default` argument is passed (e.g. `Parameter('text')`), then the parameter assumes the default value of the string equal to its name (concretely, the value contained in `Parameter('text')` is `'text'`).A parameter can be treated within `Main` like a variable containing the value that `Parameter` wraps.- **Parameter config files:** A config file for the pipe parameters can be specified using the `ParameterFile` class, which takes one string argument corresponding to the file path of the config file (relative to the working directory).This config file can be in yaml or json format -- the codeblock below shows a yaml example:``` contents of the file `config/A.yml`message: text_yaml```Parameters in the config file must have names that match their variable names (e.g. `message: text_yaml` will be matched to an attribute `message = Parameter(...)`).Config files can be used to override the default values for a parameter (in this case, we override the `message` string value `'text'` with `'text_yaml'`), thus you don't need to specify every parameter of the pipe if the default values suffice. All parameter values used at runtime are logged by WARP automatically.- **Product specification:** you must specify a pipe's products using the `Product` class, which takes one string argument corresponding to the file path for the product to be generated.You can treat instances of this object as the string it contains.The `Product` class accepts strings, lists, and dicts.- **Lazy imports:** the preference when declaring WARP pipes is to lazily import modules that are used within the pipe. You don't have to adhere to this rule of thumb, but doing so will significantly speedup the launching of sessions.`warp.utils.GlobalImport` provides a convenient way of doing lazy imports for your pipe, as shown in the next cell.Using `GlobalImport` will allow you to import any number of modules which will be available to any object within scope.
###Code
"""example/A.py
Creates two output files `A1.txt` and `A2.txt` that both contain the value of the `message` parameter.
The `message` parameter value is stored in the `config/A.yml` file.
"""
from warp import Pipe, Product, Parameter, ParameterFile
from warp.utils import GlobalImport
class Main(Pipe):
### parameters
config_file = ParameterFile('config/A.yml')
message = Parameter('text')
### products
products = Product([
'data/A1.txt',
'data/A2.txt'])
@staticmethod
def makedirs(path :str) -> None:
# `os` module is available due to GlobalImport
os.makedirs(path, exist_ok=True)
def run(self) -> None:
# lazy imports
with GlobalImport(globals()):
import os
self.makedirs('data')
for p in self.products:
with open(p, 'w') as f:
f.write(self.message)
###Output
_____no_output_____
###Markdown
Instantiating the graph
###Code
from warp import PipeGraph, WorkSpace
from example import A, B, C, D
pg = PipeGraph()
###Output
_____no_output_____
###Markdown
The next cell shows how external files can be included in a WARP pipeline.These kinds of nodes in the graph are considered source nodes from a flow perspective -- nodes with no parents.Note that source nodes are not a required component of a WARP pipeline, they are only a convenience for tracking externally generated data in your workflow.Intuitively, one only needs to specify a product with no pipe file and no parent_products.Here, we add a config file for a downstream pipe -- this is not necessary in practice since WARP will log parameter values later anyways.
###Code
pg.add(products='config/A.yml')
###Output
_____no_output_____
###Markdown
Next, we can add a pipe to the graph that depends on the product of previous pipe (in this case, the source pipe).We only need to name a product in the `parent_products` argument that was generated by pipe already attached to the graph.Specifying a config file as a product Since a pipe's products are always specified locally using the `Product` class (as seen above), specifying them via the `products` argument to `pg.add` is unnecessary. The only reason to specify products redundantly is for verbosity purposes, which can be handy when developing a pipeline.- **Note:** specifying products at `PipeGraph` creation time can lead to excessive verbosity -- prefer implicit product specifications, similar to parameters.
###Code
pg.add(
parent_products='config/A.yml',
pipe=A)
###Output
_____no_output_____
###Markdown
You can pass a module to the `parents` argument to automatically add all products of the pipe as dependencies.In this case, by passing `parents=A`, WARP adds `'data/A1.txt'` and `'data/A2.txt'` as dependencies of `B`.
###Code
pg.add(
parents=A,
pipe=B)
###Output
_____no_output_____
###Markdown
Since all of a pipe's products are specified as attributes, we can directly pass them in to the `parent_products` argument.This allows us to exclude certain products of an upstream pipe that we don't need.
###Code
pg.add(
parent_products=A.Main.products[1], # data/A2.txt
pipe=C)
###Output
_____no_output_____
###Markdown
**Note:** terminal pipes in the pipeline don't have to have products -- this is a choice left to the user.Here, we indicate optionality using the commented line.
###Code
pg.add(
parents=[A, C],
pipe=D)
ws = WorkSpace(
pathgraph=pg,
config_dir='./config')
###Output
_____no_output_____
###Markdown
To make sure that we're working in a fresh environment, we can clear the cache.You can clear all workspace sessions by passing the `clear_all=True` flag.You can clear the cache for a particular session by passing its id number (e.g. `ws.clear_cache(0)`).
###Code
ws.clear_cache(clear_all=True)
###Output
_____no_output_____
###Markdown
Visualizing the graphWe can visually inspect the graph we built using `view`. This can let us check for discrepencies in the graph at a glance.The `__source__` pipe corresponds to the external artifact that we attached using `pg.add(products='config/A.yml')`.Notice that `B` is a terminal node -- pipes `C` and `D` do not depend on this pipe in any way.
###Code
ws.show()
###Output
_____no_output_____
###Markdown
You can get a view that is more verbose (i.e. provides relative paths for everything):
###Code
ws.show(verbose=True)
###Output
_____no_output_____
###Markdown
Running pipes in the graphNow that we've defined our graph and instantiated our workspace, we're ready to start running pipes.
###Code
ws.methods
ws.pipes
###Output
_____no_output_____
###Markdown
We can view the source code of a pipe using `ws.view_pipe(...)`.Notice that this matches the example pipe declaration code written in the cell above.
###Code
ws.view_pipe('A')
###Output
_____no_output_____
###Markdown
We want to get the products of the `example.D` pipe but we don't want to think about which pipes we need to run in which order to get there (e.g. `example.B` is not necessary for `example.D`).We also don't want to remember which of the intermediate pipes we might need to rerun to preserve data provenance.This is the core functionality of WARP; we can use the `backfill` operation to find the ordered sequence of pipes that need to be rerun.Critically, WARP will only include a pipe in this sequence if an upstream pipe has been run more recently.When we run `ws.backfill('D')` in the following cell, we haven't actually run any of the pipes before, so WARP will choose the build sequence `A -> B -> C -> D`.
###Code
%%time
ws.backfill('D')
###Output
_____no_output_____
###Markdown
When we ran `backfill` on the pipe `D`, WARP automatically detected that the ancestral pipes of `D` were out of sync (in this case, they had never been built).In this case, WARP noticed that `D` has no dependency on `B`, so `B` was left unbuilt.We can confirm this by checking the `status` of `B`.
###Code
ws.status('B')
###Output
_____no_output_____
###Markdown
The `backfill` function is idempotent -- running backfill again does nothing.
###Code
ws.backfill('D')
###Output
_____no_output_____
###Markdown
---Suppose we want to regenerate the products of `C` with different parameter values.We can do this using `build` and passing in keyword arguments that correspond to parameter names.In this case, `C` has a parameter called `message`.
###Code
ws.build('A', message='new message')
###Output
_____no_output_____
###Markdown
If we try to build `D`, WARP will detect that the graph is now out-of-sync and will throw an error.The methodology of WARP is to put hard stops in place to prevent bugs that arise from using out-of-sync data.For dev purposes, you can force a pipe to run by passing the flag `force_build=True`.You probably shouldn't do this.
###Code
ws.build('D')
###Output
_____no_output_____
###Markdown
Per the hint, we can resync the graph via `backfill`.
###Code
ws.backfill('D')
###Output
_____no_output_____
###Markdown
Loading a sessionYou can inspect sessions
###Code
ws.load_session(0)
###Output
_____no_output_____
###Markdown
You can inspect metadata associated with a pipe in the session using `view`- **pipe cache directory** is the relative path to where the specified pipe's metadata and logs are cached- **Commit hash** is the hash of the git commit of the code that was used to run the pipe- **Last build** is a timestamp of when a pipe build was last initiated- **Elapsed time** is the number of seconds the last pipe build took to complete- **Parameters** shows the parameter values that were used in the last pipe build
###Code
ws.status('A')
###Output
_____no_output_____ |
week12/qlearn.ipynb | ###Markdown
Q-LearningAgent
###Code
import random
import numpy as np
#package for defining abstract base classes
from abc import ABC, abstractmethod
class AbstractQLearningAgent(ABC):
def __init__(self, env, epsilon_start, alpha, gamma):
self.env = env
self.epsilon = epsilon_start # exploration constant
self.alpha = alpha # learning rate
self.gamma = gamma # discount factor
self.actions = range(env.action_space.n) #actions
@abstractmethod
def get_q_value(self, state, action):
raise Exception("Not Implemented")
def choose_action(self, state):
#epsilon-greedy policy
if random.random() < self.epsilon:
action = random.choice(self.actions)
else:
q_values = [self.get_q_value(state, action) for action in self.actions]
action = np.argmax(q_values)
return action
@abstractmethod
def update(self, state, action, reward, next_state):
raise Exception("Not Implemented")
###Output
_____no_output_____
###Markdown
Tabular method with discretized states
###Code
class QLearningAgentTabular(AbstractQLearningAgent):
def __init__(self, env, epsilon_start, alpha, gamma, discretisations=10):
super().__init__(env, epsilon_start, alpha, gamma)
self.q_table = {}
self.discretisations = discretisations
def _discretize(self, state):
#returns the discrete_state?
low = self.env.observation_space.low
high = self.env.observation_space.high
diff = (high - low) / self.discretisations
discrete_state = (state - low) // diff
return tuple(discrete_state.tolist())
def get_q_value(self, state, action):
discrete_state = self._discretize(state)
if (discrete_state, action) not in self.q_table:
return 0.0
else:
return self.q_table[(discrete_state, action)]
def update(self, state, action, reward, next_state):
#do one learning step
discrete_state = self._discretize(state)
q_value_old = self.q_table.get((discrete_state, action))
q_value_new = reward + self.gamma * max([self.get_q_value(next_state, a) for a in self.actions])
if q_value_old is None:
self.q_table[(discrete_state, action)] = reward
else:
self.q_table[(discrete_state, action)] = q_value_old + self.alpha * (q_value_new - q_value_old)
###Output
_____no_output_____
###Markdown
Linear Approximation
###Code
class QLearningAgentApproximator(AbstractQLearningAgent):
def __init__(self, env, epsilon, alpha, gamma, basis_functions_per_dimension=10):
super().__init__(env, epsilon, alpha, gamma)
low = env.observation_space.low
high = env.observation_space.high
xx, yy = np.meshgrid(np.linspace(low[0], high[0], basis_functions_per_dimension),
np.linspace(low[1], high[1], basis_functions_per_dimension))
radials = np.append(xx.reshape(xx.shape + (1,)), yy.reshape(yy.shape + (1,)), axis=2)
self.radials = radials.reshape((radials.size // 2, 2))
low = env.observation_space.low
high = env.observation_space.high
self.sigma_inv = 1 / (high - low) * basis_functions_per_dimension
self.weights = np.random.random((len(self.actions), basis_functions_per_dimension ** 2)) * 0.01
#shape of weights = (3, 100)
def _feature_vector(self, state):
r = self.sigma_inv * (self.radials - state)
return np.exp(-0.5 * np.sum(r * r, axis=1))
def get_q_value(self, state, action):
x = self._feature_vector(state)
return np.dot(np.transpose(x), self.weights[action])
def update(self, state, action, reward, next_state):
# do one learning step
x = self._feature_vector(state) #gradient!
approx = self.get_q_value(state, action)
target = reward + self.gamma * max([self.get_q_value(next_state, a) for a in self.actions])
#Stochastic gradient descent
self.weights[action] += self.alpha * (target - approx)*x
###Output
_____no_output_____
###Markdown
Main
###Code
%matplotlib notebook
import gym
from mpl_toolkits.mplot3d import axes3d
from matplotlib import pyplot as plt
import numpy as np
def episode(env, agent, gamma, render=False):
state = env.reset()
discounted_return = 0
done = False
time_step = 0
while not done:
action = agent.choose_action(state)
next_state, reward, done, _ = env.step(action)
agent.update(state, action, reward, next_state)
if render:
env.render()
discounted_return += reward*(gamma**time_step)
state = next_state
time_step +=1
return discounted_return
def train(env, agent, gamma, nr_episodes, epsilon_start, fig, ax, fig2, ax2):
returns = []
test_returns = []
for i in range(nr_episodes):
agent.epsilon = max(0.1, agent.epsilon - epsilon_start/nr_episodes)
episode_return = episode(env, agent, gamma)
returns.append(episode_return)
if i % (nr_episodes//20) == 0:
print("episode {:5d}, return {}, epsilon {:.2f}".format(i, episode_return, agent.epsilon))
epsilon = agent.epsilon
agent.epsilon = 0
test_returns.append(sum([episode(env, agent, gamma) for _ in range(10)]) / 10.)
agent.epsilon = epsilon
ax.plot(returns)
fig.canvas.draw()
ax2.plot(test_returns)
fig2.canvas.draw()
def plot_values(env, agent):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
low = env.observation_space.low
high = env.observation_space.high
xs = np.linspace(low[0], high[0], 500)
ys = np.linspace(low[1], high[1], 500)
X, Y = np.meshgrid(xs, ys)
states = np.append(X.reshape(X.shape + (1,)), Y.reshape(Y.shape + (1,)), axis=2)
states = states.reshape((states.shape[0]*states.shape[1], 2,))
values = np.array(list(map(lambda x: max([agent.get_q_value(x, a) for a in agent.actions]), states))) #state value
Z = -values.reshape(X.shape)
# plot reward function (multiplied by -1)
ax.plot_wireframe(X, Y, Z, rstride=10, cstride=10)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel("state value")
plt.show()
env = gym.make('MountainCar-v0')
eps_start = 0.9
alpha = 0.01
gamma = 0.999
fig,ax = plt.subplots(1,1)
ax.set_title("Training returns")
ax.set_xlabel('episode')
fig2,ax2 = plt.subplots(1,1)
ax2.set_title("Test returns")
ax2.set_xlabel('episode')
#agent = QLearningAgentTabular(env, eps_start, alpha, gamma)
agent = QLearningAgentApproximator(env, eps_start, alpha, gamma)
nr_episodes = 3000
train(env, agent, gamma, nr_episodes, eps_start, fig, ax, fig2, ax2)
#Plot the state values
plot_values(env, agent)
#visualize learned policy within domain for 3 episodes
agent.epsilon = 0
for _ in range(3):
episode(env, agent, render = True)
###Output
_____no_output_____ |
Summary/Python_04.ipynb | ###Markdown
for 반복문 - 리스트, 문자열 등등 순회 가능한 객체를 순회하면서 값을 처리할 때 사용 - 아래와 같은 문법으로 사용 - 여기서 i는 매번 수행될 때마다, a의 아이템으로 순차적으로 변경됨 - 모든 아이템이 순회되면 for 문 종료
###Code
a = [1,2,3,4,5]
for i in a :
print(i, i*2)
a = [1,2,3,4,5]
for i in a :
print(i, i*2)
print('hahaa')
###Output
_____no_output_____
###Markdown
문자열의 아이템 출력하기 - 문자열의 경우 순회 가능, 리스트의 유사하게 순회 가능
###Code
for x in 10:
print(x) # 정수형은 순회할 수 없다.
a = 'hello world'
for character in a :
print(character)
###Output
_____no_output_____
###Markdown
리스트 아이템 출력하기
###Code
a = [1,2,10,3,5,6]
for num in a :
if num % 2 == 0:
print(num/2)
else:
print(num+1)
print(num)
###Output
_____no_output_____
###Markdown
dict 아이템 출력하기 - dictionary의 경우 기본적으로 순회하게 되면 key 값을 참조 - keys()함수를 이용하여 key값만 순회가능 - values() 함수를 이용하여 value 값만 순회가능 - items()함수를 이용하여 tuple 형태로 key, value 순회가능
###Code
a = {'korea':'seoul', 'japan':'tokyo'}
for k in a :
print(k, a[k])
for value in a.values():
print(value)
list(a.items())
for key , value in a.items():
print(key,value)
###Output
_____no_output_____
###Markdown
for 에서 index 사용하기 - 기본적으로 for에 리스트를 순회하는 경우, 값만 추출함 - 아래와 같은 코드로 인덱스와 값 모두 사용가능(enumerate함수 이용)
###Code
a = [1,2,3,4,5]
for i , val in enumerate(a):
print(i, val)
a = [1,2,3,4,5]
for num in a:
print(num)
###Output
_____no_output_____
###Markdown
break - for 문의 경우에도 특정 조건일 때, loop 종료가 가능
###Code
a = [100,90,80,70,60,50]
for num in a:
if num < 80:
break
print(num)
###Output
_____no_output_____
###Markdown
continue
###Code
a = [100,90,80,70,60,50]
for num in a:
if num > 60 and num <= 70:
continue
print(num)
###Output
_____no_output_____
###Markdown
loop 중첩 - 반복문의 경우에도 중첩하여 사용 가능 - 중첩이라는 것은 반복문 블록의 코드 안에 또 반복문의 코드가 작성되는 것을 의미 - 이런 경우, 내부 루프는 외루 루프가 수행되는 만큼 반복 수행 됨 - 또한 중첩의 경우 무한히 가능
###Code
a = [1,2,4]
for i in a :
for j in a:
print (i*j)
###Output
_____no_output_____
###Markdown
구구단 출력하기
###Code
x = [2,3,4,5,6,7,8,9]
y = [1,2,3,4,5,6,7,8,9]
for i in x :
for j in y:
print(i, 'x', j,'=',i*j)
###Output
_____no_output_____ |
notebooks/Somers2017.ipynb | ###Markdown
Somers2017`Title`: A Measurement of Radius Inflation in the Pleiades and Its Relation to Rotation and Lithium Depletion `Authors`: Somers & StassunData is from this paper: http://iopscience.iop.org/article/10.3847/1538-3881/153/3/101/meta
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
pd.options.display.max_columns = 150
%config InlineBackend.figure_format = 'retina'
import astropy
from astropy.io import ascii
from astropy.table import Table
import numpy as np
###Output
_____no_output_____
###Markdown
Table 1: Basic Pleiades data Currently behind a paywall, requires institutional access.
###Code
#! mkdir ../data/Somers2017
#! wget http://iopscience.iop.org/1538-3881/153/3/101/suppdata/ajaa4ef4t1_mrt.txt
#! mv ajaa4ef4t1_mrt.txt ../data/Somers2017/
! head ../data/Somers2017/ajaa4ef4t1_mrt.txt
tab1 = ascii.read('../data/Somers2017/ajaa4ef4t1_mrt.txt')
#tab1.show_in_notebook(display_length=5)
df1 = tab1.to_pandas()
df1.head()
###Output
_____no_output_____
###Markdown
Table 2: Derived stellar properties
###Code
#! wget http://iopscience.iop.org/1538-3881/153/3/101/suppdata/ajaa4ef4t2_mrt.txt
#! mv ajaa4ef4t2_mrt.txt ../data/Somers2017/
tab2 = ascii.read('../data/Somers2017/ajaa4ef4t2_mrt.txt')
df2 = tab2.to_pandas()
df2.head()
df1.to_csv('../data/Somers2017/tb1.csv', index=False)
df2.to_csv('../data/Somers2017/tb2.csv', index=False)
###Output
_____no_output_____ |
notebooks/1.0_biiweeklyforecast.ipynb | ###Markdown
1.0 Import Library
###Code
import pandas as pd
import datetime
###Output
_____no_output_____
###Markdown
1.1 Load Dataset
###Code
df = pd.read_csv("../data/interim/df_complete.csv", parse_dates=['transaction_date'])
df['mnth_yr'] = df['transaction_date'].apply(lambda x: x.strftime('%B-%Y'))
df['mnth'] = df['transaction_date'].dt.month
df.head(1)
df.info()
dfx = df.groupby(['user_id','direction','mnth_yr', 'mnth']).agg(amount = ('amount_n26_currency','sum'), num_trx = ('amount_n26_currency','count')).reset_index()
dfx.head()
###Output
_____no_output_____
###Markdown
Remove customer with just 1 month amount
###Code
test = dfx.groupby(['user_id','direction'])['mnth_yr'].nunique().reset_index()
dfx = dfx[~dfx.user_id.isin(test[test.mnth_yr == 1]['user_id']) ]
###Output
_____no_output_____
###Markdown
2.0 Exploratory Data Analysis
###Code
def check_df(dataframe):
print("##################### Shape #####################")
print(dataframe.shape)
print("##################### Types #####################")
print(dataframe.dtypes)
print("##################### Head #####################")
print(dataframe.head(3))
print("##################### Tail #####################")
print(dataframe.tail(3))
print("##################### NA #####################")
print(dataframe.isnull().sum())
print("##################### Quantiles #####################")
print(dataframe.quantile([0, 0.05, 0.50, 0.95, 0.99, 1]).T)
check_df(dfx)
###Output
##################### Shape #####################
(66823, 6)
##################### Types #####################
user_id object
direction object
mnth_yr object
mnth int64
amount int64
num_trx int64
dtype: object
##################### Head #####################
user_id direction mnth_yr mnth amount \
0 000295594379774ab9ac2c78c946d615 In February-2016 2 291
1 000295594379774ab9ac2c78c946d615 In June-2016 6 115
2 000295594379774ab9ac2c78c946d615 In March-2016 3 381
num_trx
0 1
1 1
2 1
##################### Tail #####################
user_id direction mnth_yr mnth amount \
73296 fffde1ba10b4040deb651162f56c9fc4 Out June-2016 6 217
73297 fffde1ba10b4040deb651162f56c9fc4 Out March-2016 3 345
73298 fffde1ba10b4040deb651162f56c9fc4 Out May-2016 5 336
num_trx
73296 12
73297 10
73298 13
##################### NA #####################
user_id 0
direction 0
mnth_yr 0
mnth 0
amount 0
num_trx 0
dtype: int64
##################### Quantiles #####################
0.00 0.05 0.50 0.95 0.99 1.00
mnth 2.0 2.0 5.0 7.0 7.00 7.0
amount 3.0 11.0 156.0 1035.0 1887.78 8528.0
num_trx 1.0 1.0 3.0 21.0 33.00 91.0
###Markdown
TBF 3.0 Outlier CheckFor outlier detection, I will use IQR method with Q1 as 0.05% and Q3 as 0.95%. I will compute the low limit and up limit with IQR method and check if the sales variable contain values above/below these limits. It will return boolean.For outlier detection, I will use IQR method with Q1 as 0.05% and Q3 as 0.95%. I will compute the low limit and up limit with IQR method and check if the sales variable contain values above/below these limits. It will return boolean.
###Code
def outlier_thresholds(dataframe, col_name, q1_perc=0.05, q3_perc=0.95):
"""
given dataframe, column name, q1_percentage and q3 percentage, function calculates low_limit and up_limit
"""
quartile1 = dataframe[col_name].quantile(q1_perc)
quartile3 = dataframe[col_name].quantile(q3_perc)
interquantile_range = quartile3 - quartile1
up_limit = quartile3 + 1.5 * interquantile_range
low_limit = quartile1 - 1.5 * interquantile_range
return low_limit, up_limit
def check_outlier(dataframe, col_name, q1_perc=0.01, q3_perc=0.99):
outlier_list = []
low_limit, up_limit = outlier_thresholds(dataframe, col_name, q1_perc=0.01, q3_perc=0.99)
if dataframe[(dataframe[col_name] > up_limit) | (dataframe[col_name] < low_limit)].any(axis=None):
return True
else:
return False
check_outlier(dfx, 'amount')
dfx.groupby(["user_id","direction"]).agg({"amount": ["sum", "mean", "median", "std"]})
###Output
_____no_output_____
###Markdown
4.0 Feature EnggIn order to search for seasonalities, date variable will be used to derive new features and different time periods will be created.In order to search for seasonalities, date variable will be used to derive new features and different time periods will be created.
###Code
# def create_date_features(df):
# df['month'] = df.date.dt.month
# df['quarter'] = df.date.dt.day
# df['day_of_year'] = df.date.dt.dayofyear
# df['week_of_year'] = df.date.dt.weekofyear
# # 1.1.2013 is Tuesday, so our starting point is the 2nd day of week
# df['day_of_week'] = df.date.dt.dayofweek + 1
# df['year'] = df.date.dt.year
# df["is_wknd"] = df.date.dt.weekday // 4
# df['is_month_start'] = df.date.dt.is_month_start.astype(int)
# df['is_month_end'] = df.date.dt.is_month_end.astype(int)
# return df
def create_num_days_month(m):
if m==2:
return 28
list = [1,3,5,7,8,10,12]
if m in list:
return 31
return 30
dfx["num_days_month"] = dfx['mnth'].apply(lambda x: create_num_days_month(x))
# df.rename(columns= {'transaction_date':'date'}, inplace= True)
# df = create_date_features(df)
dfx.head()
###Output
_____no_output_____
###Markdown
4.1 Add Random noiseFor small datasets like this one, in order to avoid overfitting, random noise can be added to the values. I will add Gaussian random noise which is normally distributed with a standard deviation of 1 and mean of 0.
###Code
def random_noise(dataframe):
return np.random.normal(size=(len(dataframe),))
###Output
_____no_output_____
###Markdown
4.2 Lag/Shifted FeaturesTime Series theory states that, the value in time: t highly depends on the value in time: t-1. That is why I will be shifting all the sales values by 1 and adding noise.
###Code
# sort the values per store, item and date so that values would be shifted equally
dfx.sort_values(by=['user_id','direction', 'mnth'], axis=0, inplace=True)
dfx
# the feature name will be created dynamically with regards to the lag value for a given list of lags
def lag_features(dataframe, lags):
dataframe = dataframe.copy()
for lag in lags:
dataframe['amount_lag_' + str(lag)] = dataframe.groupby(['user_id','direction'])['amount'].transform(
lambda x: x.shift(lag)) + random_noise(dataframe)
return dataframe
dfx = lag_features(dfx, [1, 2, 3])
dfx.head()
###Output
_____no_output_____
###Markdown
4.2 Rolling Mean / Moving AverageIn order to find out possible seasonalities, I will be creating moving averagesfor specified time intervals. This function takes the number of time given as window parameter and takes the average of the values, but one of the values is the value on this specific observation. In order to eliminate today's affect on moving average values, I will take 1 shift and use this function
###Code
def roll_mean_features(dataframe, windows):
dataframe = dataframe.copy()
for window in windows:
dataframe['amount_roll_mean_' + str(window)] = dataframe.groupby(['user_id','direction'])['amount']. \
transform(
lambda x: x.shift(1).rolling(window=window, min_periods=2, win_type="triang").mean()) + random_noise(dataframe)
return dataframe
dfx = roll_mean_features(dfx, [2,3,4])
dfx.head()
###Output
_____no_output_____
###Markdown
3.5.4.Exponentially Weighted Mean Features The value in time t highly depends on the value in time t-1, so in order to have a better prediction, while computing the average value, the values should not be equally weighted.
###Code
def ewm_features(dataframe, alphas, lags):
dataframe = dataframe.copy()
for alpha in alphas:
for lag in lags:
dataframe['amount_ewm_alpha_' + str(alpha).replace(".", "") + "_lag_" + str(lag)] = \
dataframe.groupby(["user_id", "direction"])['amount']. \
transform(lambda x: x.shift(lag).ewm(alpha=alpha).mean())
return dataframe
alphas = [0.95, 0.9, 0.8, 0.7, 0.5]
lags = [1, 2, 3]
dfx = ewm_features(dfx, alphas, lags)
dfx.tail()
dfx['amount'] = np.log1p(dfx["amount"].values)
# df['sales'].head()
def smape(preds, target):
n = len(preds)
masked_arr = ~((preds == 0) & (target == 0))
preds, target = preds[masked_arr], target[masked_arr]
num = np.abs(preds-target)
denom = np.abs(preds)+np.abs(target)
smape_val = (200*np.sum(num/denom))/n
return smape_val
def lgbm_smape(preds, train_data):
labels = train_data.get_label()
smape_val = smape(np.expm1(preds), np.expm1(labels))
return 'SMAPE', smape_val, False
train = dfx.loc[(dfx["mnth"] < 7), :]
# train["date"].min(), train["date"].max()
val = dfx.loc[(dfx["mnth"] == 7) , :]
val.head(2)
cols = [col for col in train.columns if col not in ['user_id', "direction", "mnth","mnth_yr","amount","num_trx"]]
Y_train = train['amount']
X_train = train[cols]
Y_val = val['amount']
X_val = val[cols]
import lightgbm as lgb
lgb_params = {'metric': {'mae'},
'num_leaves': 10,
'learning_rate': 0.02,
'feature_fraction': 0.8,
'max_depth': 5,
'verbose': 0,
'num_boost_round': 15000,
'early_stopping_rounds': 200,
'nthread': -1}
lgbtrain = lgb.Dataset(data=X_train, label=Y_train, feature_name=cols)
lgbval = lgb.Dataset(data=X_val, label=Y_val, reference=lgbtrain, feature_name=cols)
type(lgbtrain)
model = lgb.train(lgb_params, lgbtrain,
valid_sets=[lgbtrain, lgbval],
num_boost_round=lgb_params['num_boost_round'],
early_stopping_rounds=lgb_params['early_stopping_rounds'],
feval=lgbm_smape,
verbose_eval=200)
y_pred_val = model.predict(X_val)
smape(np.expm1(y_pred_val), np.expm1(Y_val))
from sklearn.metrics import mean_squared_error
print(mean_squared_error(np.expm1(y_pred_val), np.expm1(Y_val)))
def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
print(mean_absolute_percentage_error(np.expm1(y_pred_val), np.expm1(Y_val)))
def plot_lgb_importances(model,plot=True,num=10):
from matplotlib import pyplot as plt
import seaborn as sns
gain = model.feature_importance('gain')
feat_imp = pd.DataFrame({'feature': model.feature_name(),
'split': model.feature_importance('split'),
'gain': 100 * gain / gain.sum()}).sort_values('gain', ascending=False)
if plot:
plt.figure(figsize=(10, 10))
sns.set(font_scale=1)
sns.barplot(x="gain", y="feature", data=feat_imp[0:25])
plt.title('feature')
plt.tight_layout()
plt.show()
else:
print(feat_imp.head(num))
print(feat_imp.head(num))
plot_lgb_importances(model,30)
# this one is the built-in plot function of LightGBM library
lgb.plot_importance(model, max_num_features=20, figsize=(10, 10), importance_type="gain")
plt.show()
np.expm1(y_pred_val).shape
data = {"students": students,
"subject": subject,
"marks": marks}
np.expm1(Y_val).shape
lgb_params = {'metric': {'mae'},
'num_leaves': 10,
'learning_rate': 0.02,
'feature_fraction': 0.8,
'max_depth': 5,
'verbose': 0,
'num_boost_round': 15000,
'early_stopping_rounds': 200,
'nthread': -1}
###Output
_____no_output_____ |
Asteroid Light Curve Examples - Part 1.ipynb | ###Markdown
Asteroid Light Curve Examples - Part 1This notebook contains examples deep learning techniques applied to the asteroid light curve data from http://alcdef.org. Objectives- Understand when a convolutional neural network (CNN) might be applicable.- See how to apply a 1D-CNN to time-series data.- See how to build a more complex model that takes both time-series and categorical inputs. Parameters
###Code
# Path to the ALCDEF_ALL dataset downloaded from http://alcdef.org
# Download the full archive as a .zip file. Extract its contents to this
# directory. It should be ~14K .txt files.
data_dir = 'data/ALCDEF_ALL'
# Discard any light curves with fewer than this many samples
min_samples = 100
# Resample light curves to common number of samples
nb_samples = 100
# Discard any light curve that isn't among the nb_classes most common objects
nb_classes = 20
###Output
_____no_output_____
###Markdown
Imports
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from glob import glob
from scipy.signal import resample
from collections import Counter
import random
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, LSTM
from keras.utils import to_categorical
from keras.optimizers import SGD, Adam
from keras import regularizers
from keras.callbacks import EarlyStopping
from keras.layers import Conv1D, MaxPooling1D, Flatten, GlobalAveragePooling1D, Input, concatenate
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
import umap
import pandas as pd
import seaborn as sns
from ml4ssa_utils import visualize_embedding, load_alcdef_data, plot_alcdef_examples, normalize_features, plot_confusion_matrix
###Output
_____no_output_____
###Markdown
Load DatasetLoad data from Astroid Lightcurve Photometry Database (http://alcdef.org/)
###Code
data = load_alcdef_data(
data_dir=data_dir,
min_samples=min_samples,
resample_to=nb_samples,
reduce_to_top=nb_classes
)
# Gather a list of the object names we'll be working with
names = list(set([item['OBJECTNAME'] for item in data]))
###Output
_____no_output_____
###Markdown
Visualize Examples
###Code
plot_alcdef_examples(data)
###Output
_____no_output_____
###Markdown
Generate Train and Test Data Sets
###Code
X = np.array([ item['DATA_RESAMPLED'][:,1] for item in data ])
y = np.array([ names.index(item['OBJECTNAME']) for item in data ])
# Reserve 20% of the data for testing
# Startify the data split so that the train and test sets have the same class distribution
X_train, X_test, y_train, y_test, data_train, data_test = train_test_split(X, y, data, test_size=0.20, stratify=y)
print('Generated train and test sets with the following sizes:')
print('Train X (features) {}, y (targets) {}'.format(X_train.shape, y_train.shape))
print('Test X (features) {}, y (targets) {}'.format(X_test.shape, y_test.shape))
###Output
Generated train and test sets with the following sizes:
Train X (features) (615, 100), y (targets) (615,)
Test X (features) (154, 100), y (targets) (154,)
###Markdown
Review Class Distributions to Understand Performance of Random ClassifierIt's always helpful to understand how well a random classifier should perform. This sets a worst case baseline. If you're doing better than this performance, you know at least something is working. If your classifier is performing worse than random, something is broken. If it's performing at the same level as random, it's either broken or you have a very hard problem (at least with your current size and distribution of training data).
###Code
class_counts = np.sum(to_categorical(y_test), axis=0)
class_proportions = class_counts / np.sum(class_counts)
max_proportion = np.max(class_proportions)
random_performance = 1./nb_classes
print('Random Performance: {:.3f}'.format(1./nb_classes))
print('Mode Collapse Performance: {:.3f}'.format(max_proportion))
print('-'*65)
for name, proportion in zip(names, class_proportions):
print('{:15} {:.3f} {}'.format(name, proportion, 'largest' if proportion == max_proportion else ''))
###Output
Random Performance: 0.050
Mode Collapse Performance: 0.110
-----------------------------------------------------------------
Eudora 0.052
Euterpe 0.039
Pales 0.039
Aline 0.039
Aurelia 0.039
Scania 0.045
Parthenope 0.104
Zelinda 0.058
Cupido 0.045
Eukrate 0.039
Thora 0.039
Ate 0.058
Lachesis 0.039
Ganymed 0.110 largest
Ounas 0.039
Polyhymnia 0.039
Norma 0.039
Melanie 0.045
Leukothea 0.052
Philia 0.039
###Markdown
Try an MLP (multi-layer perceptron) similiar to the TLE Example
###Code
metric='acc'
nb_classes = len(names)
model = Sequential()
model.add(Dense(units=100,activation='relu', input_shape=(100,)))
model.add(Dense(units=100, activation='relu'))
model.add(Dense(units=nb_classes, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=[metric]
)
# keras is complaining that I need to evaluate the model before printing a summary
# model.predict(np.zeros((16,9)))
model.summary()
plot_confusion_matrix(model, X_test, y_test, 'Untrained MLP on Test Data', names=names)
# Prepare data to pass to model
repeats = 100
# The repeats value here is used to artifically increase the size of our training set.
# This forces keras to treat <repeats> passes through the training set as a single epoch and we
# get to avoid a huge number of progress bars and short-term variance in metrics.
train_features = normalize_features(X_train.repeat(repeats, axis=0))
train_targets = to_categorical(y_train.repeat(repeats, axis=0))
test_features = normalize_features(X_test)
test_targets = to_categorical(y_test)
# Fit model to data
model.fit(
train_features, train_targets,
validation_data=(test_features, test_targets),
epochs=10,
batch_size=16,
callbacks=[EarlyStopping(patience=3, monitor='val_loss')],
verbose=1
)
plot_confusion_matrix(model, X_test, y_test, 'Test Data', names=names)
plot_confusion_matrix(model, X_train, y_train, 'Train Data', names=names)
###Output
_____no_output_____
###Markdown
CNN ModelNow that we have two baselines (random performance and the MLP we used for TLE data), let's look at improving our performance with a different model.
###Code
metric='acc'
nb_classes = len(names)
model = Sequential()
model.add(Conv1D(filters=64, kernel_size=5, activation='relu'))
model.add(MaxPooling1D())
model.add(Conv1D(filters=32, kernel_size=5, activation='relu'))
model.add(MaxPooling1D())
model.add(Conv1D(filters=16, kernel_size=5, activation='relu'))
model.add(GlobalAveragePooling1D())
model.add(Dense(units=nb_classes, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=[metric]
)
# keras is complaining that I need to evaluate the model before printing a summary
model.predict(np.zeros((16,100,1)))
model.summary()
# Prepare data to pass to model
repeats = 100
# The repeats value here is used to artifically increase the size of our training set.
# This forces keras to treat <repeats> passes through the training set as a single epoch and we
# get to avoid a huge number of progress bars and short-term variance in metrics.
train_features = normalize_features(X_train.repeat(repeats, axis=0))
train_targets = to_categorical(y_train.repeat(repeats, axis=0))
test_features = normalize_features(X_test)
test_targets = to_categorical(y_test)
# The convolutional layers will expect a "channels" dimension at the end.
train_features = np.expand_dims(train_features, axis=-1)
test_features = np.expand_dims(test_features, axis=-1)
model.fit(
train_features, train_targets,
validation_data=(test_features, test_targets),
epochs=10,
batch_size=16,
callbacks=[EarlyStopping(patience=5, monitor='val_loss')],
verbose=1
)
###Output
Train on 61500 samples, validate on 154 samples
Epoch 1/10
61500/61500 [==============================] - 1090s 18ms/step - loss: 2.2010 - acc: 0.2361 - val_loss: 2.2053 - val_acc: 0.2143
Epoch 2/10
61500/61500 [==============================] - 1129s 18ms/step - loss: 2.0066 - acc: 0.2917 - val_loss: 2.2793 - val_acc: 0.2078
Epoch 3/10
61500/61500 [==============================] - 1184s 19ms/step - loss: 1.9634 - acc: 0.3049 - val_loss: 2.4039 - val_acc: 0.2273
Epoch 4/10
61500/61500 [==============================] - 1178s 19ms/step - loss: 1.9344 - acc: 0.3122 - val_loss: 2.2558 - val_acc: 0.2143
Epoch 5/10
61500/61500 [==============================] - 1186s 19ms/step - loss: 1.9140 - acc: 0.3167 - val_loss: 2.4238 - val_acc: 0.2208
Epoch 6/10
61500/61500 [==============================] - 1181s 19ms/step - loss: 1.9041 - acc: 0.3178 - val_loss: 2.6379 - val_acc: 0.2078
###Markdown
Light Curve Embedding Based on Extracted Features
###Code
# Here we extract the intermediate features/activations from the layer named penultimate
layer_name = 'penultimate'
intermediate_layer_model = Model(inputs=model.input, outputs=model.layers[-2].output)
X_penultimate_test = intermediate_layer_model.predict(test_features)
visualize_embedding(X_penultimate_test, y_test)
plot_confusion_matrix(model, test_features, y_test, '', names=names)
###Output
_____no_output_____
###Markdown
Multi-Modal InputThe above models use features from single modality (sampled light curves). In real world problems, we often have multiple data types that will be relevant to our problem. For exampoe, we typically at least have metadata associated with sampled data.The convolutional layers were motivated by the assumption that our sampled data was translationally invariant. As we have no reason to believe this should be the case for our metadata (it's not even clear what that would mean), we'll need to think about how best to incorporate additional data.
###Code
def generate_metadata_vector(item):
'''Generate a metadata vector of the form <one-hot-encoded filter value> | <phase>.
The data appears to have 3 different filter codes and a single phase value so the metadata vector
will be of length 4.
'''
v = np.zeros(4)
filter_codes = ['V', 'R', 'C']
filter_ndx = filter_codes.index(item['FILTER'])
v[filter_ndx] = 1
v[-1] = float(item['PHASE']) / 60. # 60 was chosen as it was the largest value observed in a chunk of the data
return v
metric='acc'
nb_classes = len(names)
nb_metadata_inputs = 4
cnn_input = Input(shape=(nb_samples,1), name='cnn_input')
x = Conv1D(filters=64, kernel_size=5, activation='relu')(cnn_input)
x = MaxPooling1D()(x)
x = Conv1D(filters=32, kernel_size=5, activation='relu')(x)
x = MaxPooling1D()(x)
x = Conv1D(filters=16, kernel_size=5, activation='relu')(x)
cnn_output = GlobalAveragePooling1D()(x)
metadata_input = Input(shape=(nb_metadata_inputs,), name='metadata_input')
x = Dense(units=10, activation='relu')(metadata_input)
metadata_output = Dense(units=10, activation='relu')(x)
merged = concatenate([cnn_output, metadata_output])
final_output = Dense(units=nb_classes, activation='softmax')(merged)
model = Model([cnn_input, metadata_input], final_output)
model.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=[metric]
)
model.summary()
# Prepare data to pass to model
repeats = 10
# The repeats value here is used to artifically increase the size of our training set.
# This forces keras to treat <repeats> passes through the training set as a single epoch and we
# get to avoid a huge number of progress bars and short-term variance in metrics.
train_features = normalize_features(X_train.repeat(repeats, axis=0))
train_targets = to_categorical(y_train.repeat(repeats, axis=0))
test_features = normalize_features(X_test)
test_targets = to_categorical(y_test)
# The convolutional layers will expect a "channels" dimension at the end.
train_features = np.expand_dims(train_features, axis=-1)
test_features = np.expand_dims(test_features, axis=-1)
# Generate the metadata features
train_metadata_features = np.stack([ generate_metadata_vector(item) for item in data_train ]).repeat(repeats, axis=0)
test_metadata_features = np.stack([ generate_metadata_vector(item) for item in data_test ])
model.fit(
[train_features, train_metadata_features], train_targets,
validation_data=([test_features, test_metadata_features], test_targets),
epochs=10,
batch_size=16,
callbacks=[EarlyStopping(patience=5, monitor='val_loss')],
verbose=1
)
###Output
Train on 6150 samples, validate on 154 samples
Epoch 1/10
6150/6150 [==============================] - 115s 19ms/step - loss: 2.4147 - acc: 0.2434 - val_loss: 2.1445 - val_acc: 0.3182
Epoch 2/10
6150/6150 [==============================] - 111s 18ms/step - loss: 1.9617 - acc: 0.3660 - val_loss: 1.9389 - val_acc: 0.3896
Epoch 3/10
6150/6150 [==============================] - 112s 18ms/step - loss: 1.7892 - acc: 0.4106 - val_loss: 1.8477 - val_acc: 0.4026
Epoch 4/10
6150/6150 [==============================] - 111s 18ms/step - loss: 1.6687 - acc: 0.4320 - val_loss: 1.8205 - val_acc: 0.3701
Epoch 5/10
6150/6150 [==============================] - 118s 19ms/step - loss: 1.5877 - acc: 0.4421 - val_loss: 1.7468 - val_acc: 0.4091
Epoch 6/10
6150/6150 [==============================] - 112s 18ms/step - loss: 1.5243 - acc: 0.4698 - val_loss: 1.7314 - val_acc: 0.4221
Epoch 7/10
6150/6150 [==============================] - 111s 18ms/step - loss: 1.4681 - acc: 0.4928 - val_loss: 1.7473 - val_acc: 0.4156
Epoch 8/10
6150/6150 [==============================] - 111s 18ms/step - loss: 1.4310 - acc: 0.5176 - val_loss: 1.7497 - val_acc: 0.3766
Epoch 9/10
6150/6150 [==============================] - 110s 18ms/step - loss: 1.3982 - acc: 0.5226 - val_loss: 1.7517 - val_acc: 0.3831
Epoch 10/10
6150/6150 [==============================] - 110s 18ms/step - loss: 1.3766 - acc: 0.5285 - val_loss: 1.7583 - val_acc: 0.3766
###Markdown
Recurrent Neural NetworkA CNN processes an entire sequence of data in one step. An RNN, on other hand, processes sequences an element at a time. At each time step, it has access to the current sequence element and whatever information it might have extracted from the preceeding elements.
###Code
metric='acc'
nb_classes = len(names)
model = Sequential()
model.add(Conv1D(filters=16, kernel_size=5, activation='relu', padding='same'))
model.add(LSTM(16, return_sequences=True))
model.add(LSTM(16, return_sequences=False))
model.add(Dense(units=nb_classes, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=[metric]
)
# keras is complaining that I need to evaluate the model before printing a summary
model.predict(np.zeros((16,100,1)))
model.summary()
# Prepare data to pass to model
repeats = 10
# The repeats value here is used to artifically increase the size of our training set.
# This forces keras to treat <repeats> passes through the training set as a single epoch and we
# get to avoid a huge number of progress bars and short-term variance in metrics.
train_features = normalize_features(X_train.repeat(repeats, axis=0))
train_targets = to_categorical(y_train.repeat(repeats, axis=0))
test_features = normalize_features(X_test)
test_targets = to_categorical(y_test)
# The convolutional layers will expect a "channels" dimension at the end.
train_features = np.expand_dims(train_features, axis=-1)
test_features = np.expand_dims(test_features, axis=-1)
model.fit(
train_features, train_targets,
validation_data=(test_features, test_targets),
epochs=10,
batch_size=16,
callbacks=[EarlyStopping(patience=5, monitor='val_loss')],
verbose=1
)
###Output
Train on 6150 samples, validate on 154 samples
Epoch 1/10
6150/6150 [==============================] - 26s 4ms/step - loss: 2.4989 - acc: 0.1748 - val_loss: 2.4395 - val_acc: 0.2208
Epoch 2/10
6150/6150 [==============================] - 28s 5ms/step - loss: 2.3488 - acc: 0.2062 - val_loss: 2.3965 - val_acc: 0.2078
Epoch 3/10
6150/6150 [==============================] - 27s 4ms/step - loss: 2.2785 - acc: 0.2195 - val_loss: 2.3793 - val_acc: 0.2208
Epoch 4/10
6150/6150 [==============================] - 29s 5ms/step - loss: 2.2390 - acc: 0.2320 - val_loss: 2.3788 - val_acc: 0.1753
Epoch 5/10
6150/6150 [==============================] - 31s 5ms/step - loss: 2.2513 - acc: 0.2294 - val_loss: 2.3380 - val_acc: 0.1753
Epoch 6/10
6150/6150 [==============================] - 29s 5ms/step - loss: 2.2498 - acc: 0.2228 - val_loss: 2.4141 - val_acc: 0.1753
Epoch 7/10
6150/6150 [==============================] - 35s 6ms/step - loss: 2.2734 - acc: 0.2293 - val_loss: 2.3195 - val_acc: 0.2078
Epoch 8/10
6150/6150 [==============================] - 35s 6ms/step - loss: 2.2185 - acc: 0.2372 - val_loss: 2.2921 - val_acc: 0.2078
Epoch 9/10
6150/6150 [==============================] - 29s 5ms/step - loss: 2.2059 - acc: 0.2390 - val_loss: 2.3672 - val_acc: 0.2143
Epoch 10/10
6150/6150 [==============================] - 30s 5ms/step - loss: 2.2427 - acc: 0.2285 - val_loss: 2.2659 - val_acc: 0.2468
|
nepali-word2vec.ipynb | ###Markdown
Importing the dataset
###Code
import string
import time
# ~~~~~~~~~~~~~~~IMPORTING THE DATASET~~~~~~~~~~~~~~~~
start = time.process_time()
print("Reading the file .......")
f = open("../input/nepdata/clean.txt" , encoding= 'utf-8' , buffering= 10000)
lines = f.read().strip().split(u"।")
sentences = [sentence.translate(str.maketrans('', '', string.punctuation)) for sentence in lines]
f.close()
print(f"Total number of lines in text file {len(sentences)}")
print(f"Time required to read the file {time.process_time() - start}")
###Output
Reading the file .......
Total number of lines in text file 5891518
Time required to read the file 101.750362739
###Markdown
Processing Dataset for Training
###Code
!pip install snowballstemmer
# ~~~~~~~~~~~~~~ Getting the dataset ready for training word2vec model ~~~~~~~~~~
import re
import snowballstemmer
mainlist = list()
class Main_Data_list:
def __init__(self, dataset):
self.dataset = dataset
self.stop_word_list = []
self.mainlist = []
a_file = open("../input/stopwords/stopwords.txt", "r" ,encoding= 'utf-8')
for line in a_file:
stripped_line = line.strip()
self.stop_word_list.append(stripped_line)
a_file.close()
self.stemmer = snowballstemmer.NepaliStemmer()
def simple_tokenizer(self,text) -> list:
line = re.sub('[।]',"", text)
devanagari_range = r'[\u0900-\u097F\\]'
def getDevanagariCharCount(token):
return len(list(filter(lambda char: re.match(devanagari_range, char), (char for char in token))))
def isDevanagari(token):
return True if getDevanagariCharCount(token) >= len(token)/2 else False
tokens = list(filter(lambda t: isDevanagari(t), line.split(" ")))
return tokens
def get(self):
for i,line in enumerate(self.dataset[0:2000000]):
wordsList = self.simple_tokenizer(line)
words = [w for w in wordsList if not w in self.stop_word_list]
words = self.stemmer.stemWords(words)
if len(words) > 3:
self.mainlist.append(words)
if i % 100000 == 0:
print(f"DONE FOR {i/100000} LAKHS LINES")
return self.mainlist
final = Main_Data_list(sentences)
mainlist = final.get()
###Output
DONE FOR 0.0 LAKHS LINES
DONE FOR 1.0 LAKHS LINES
DONE FOR 2.0 LAKHS LINES
DONE FOR 3.0 LAKHS LINES
DONE FOR 4.0 LAKHS LINES
DONE FOR 5.0 LAKHS LINES
DONE FOR 6.0 LAKHS LINES
DONE FOR 7.0 LAKHS LINES
DONE FOR 8.0 LAKHS LINES
DONE FOR 9.0 LAKHS LINES
DONE FOR 10.0 LAKHS LINES
DONE FOR 11.0 LAKHS LINES
DONE FOR 12.0 LAKHS LINES
DONE FOR 13.0 LAKHS LINES
DONE FOR 14.0 LAKHS LINES
DONE FOR 15.0 LAKHS LINES
DONE FOR 16.0 LAKHS LINES
DONE FOR 17.0 LAKHS LINES
DONE FOR 18.0 LAKHS LINES
DONE FOR 19.0 LAKHS LINES
###Markdown
Training
###Code
import gensim
model = gensim.models.Word2Vec(
vector_size = 200 ,
window= 5,
min_count=2,
workers= 4
)
model.build_vocab(mainlist, progress_per=1000 )
model.train(mainlist, total_examples= model.corpus_count, epochs= model.epochs)
###Output
_____no_output_____
###Markdown
Testing
###Code
model.wv.most_similar('ठमेल')
model.wv.most_similar('चितवन')
model.save("nepaliW2V_5Million.model")
###Output
_____no_output_____ |
.ipynb_checkpoints/P1_GoogleSearch-checkpoint.ipynb | ###Markdown
Use what I've learned from the other two to test out a final scraper that can iterate through a list several radiologists.
###Code
# Getting to a radiologist's webpage
try:
from googlesearch import search, get_random_user_agent
except ImportError:
print("No module named 'google' found")
import re
from random import randint
from time import sleep
import random
# create test list of first 15 or so names
radiologists = [
"JUSTIN OWENS",
"MATTHEW HARTMAN",
"VINCENT GRAZIANO",
"SABA HASAN",
"DANIEL MENDEZ",
"PATRICK HURLEY",
"ANDREW PICEL",
"NORNA KARP",
"DAVID SHEEHAN",
"ROBERT BURMAN",
"KEVIN SAWYER",
"RAKESH BARAK",
"MICHAEL FISHMAN",
"STEVEN REIMAN",
"BRIAN MOON"
]
site = "https://health.usnews.com/doctors/"
url_list = []
for radiologist in radiologists:
query = "site:"+site+' "'+radiologist+" radiologist"'"'
for j in search(query, num=1, stop=1, tld="co.in", pause=2, user_agent= 'Mozilla/5.0'):
sleep(randint(10,20))
url_dict = {}
url_j = j
url_dict['radiologist_name'] = radiologist
url_dict['url'] = url_j
url_list.append(url_dict)
# This is how to access the names and urls after entirely populating the list
url_list[0]['radiologist_name']
url_list[0]['url']
url_list
# Proof that it did work for the short list at one point
url_list
###Output
_____no_output_____
###Markdown
Testing 2-7-22
###Code
from bs4 import BeautifulSoup
import urllib.request
# List with google queries I want to make
dash_radiologists = []
for radiologist in radiologists:
string = radiologist
string=string.replace(' ','-')
dash_radiologists.append(string)
desired_google_queries = dash_radiologists
for query in desired_google_queries:
# Constracting http query
url = 'http://google.com/search?q=' + query + '-radiologist-healthusnews'
# For avoid 403-error using User-Agent
req = urllib.request.Request(url, headers={'User-Agent' : "Magic Browser"})
response = urllib.request.urlopen( req )
html = response.read()
# Parsing response
soup = BeautifulSoup(html, 'html.parser')
# Extracting number of results
results = soup.find(id="experience")
print(results.prettify())
# Delay
sleep(randint(10,20))
from bs4 import BeautifulSoup
import urllib.request
import requests
zipcode = '30342'
url = 'https://health.usnews.com/doctors/search?specialty=Radiology&location='+zipcode
user_agent = {'User-agent' : 'Mozilla/5.0'}
page = requests.get(url, headers=user_agent)
soup = BeautifulSoup(page.text, "html.parser")
# Find all HTML relating to Education & Experience (includes med school & residency, licenses, etc.)
li_elements = soup.find_all("li", class_= "item-list__ListItemStyled-sc-18yjqdy-1 fRQMSd")
# Take a look at all the elements
for li_element in li_elements:
url_element_a = li_element.find('a')
if url_element_a is not None:
url_element = url_element_a.get('href')
print(url_element)
print() # v-space between elements
###Output
/doctors/michael-dille-447907
/doctors/neil-amin-862965
/doctors/pamela-donlan-256487
/doctors/pamela-donlan-256487
/doctors/john-gay-137869
/doctors/kim-gray-67337
/doctors/travis-langley-729504
/doctors/travis-langley-729504
/doctors/jason-oppenheimer-734909
/doctors/neel-patel-690987
/doctors/nirav-patel-119277
/doctors/nirav-patel-119277
/doctors/neil-shah-784970
/doctors/prashant-shankar-779331
/doctors/ashishkumar-parikh-996150
/doctors/ashishkumar-parikh-996150
/doctors/john-grattan-smith-271480
/doctors/courtney-stewart-639821
/doctors/stephen-little-270734
/doctors/stephen-little-270734
/doctors/ariane-neish-269318
/doctors/jane-share-269455
/doctors/thai-trinh-269238
/doctors/thai-trinh-269238
/doctors/amanda-bauer-483431
/doctors/richard-barlow-272286
|
handy_syntax/file.py.ipynb | ###Markdown
Write a Python Script
###Code
%%file zenofpython.py
'''new file'''
import this
import zenofpython
###Output
The Zen of Python, by Tim Peters
Beautiful is better than ugly.
Explicit is better than implicit.
Simple is better than complex.
Complex is better than complicated.
Flat is better than nested.
Sparse is better than dense.
Readability counts.
Special cases aren't special enough to break the rules.
Although practicality beats purity.
Errors should never pass silently.
Unless explicitly silenced.
In the face of ambiguity, refuse the temptation to guess.
There should be one-- and preferably only one --obvious way to do it.
Although that way may not be obvious at first unless you're Dutch.
Now is better than never.
Although never is often better than *right* now.
If the implementation is hard to explain, it's a bad idea.
If the implementation is easy to explain, it may be a good idea.
Namespaces are one honking great idea -- let's do more of those!
|
notebooks/magr_calibration.ipynb | ###Markdown
Load SDSS sample
###Code
from cosmodc2.sdss_colors import load_umachine_processed_sdss_catalog
sdss = load_umachine_processed_sdss_catalog()
print(sdss.keys())
###Output
/Users/aphearin/anaconda/lib/python2.7/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
from ._conv import register_converters as _register_converters
###Markdown
Load $z=0$ baseline ${\rm UniverseMachine}$ mock with $M_{\ast}\ \&\ {\rm SFR}$
###Code
import os
# MDPl2-based mock
# dirname = "/Users/aphearin/work/random/0330/testing_mock"
# basename = "sfr_catalog_1.000000_value_added.hdf5"
# fname = os.path.join(dirname, basename)
# Bpl-based mock
dirname = "/Users/aphearin/work/random/0331"
basename = "testing_bpl_based_v4.hdf5"
fname = os.path.join(dirname, basename)
from astropy.table import Table
mock = Table.read(fname, path='data')
# Impute sSFR=0 values in the quenched sequence for plotting convenience
mock['obs_ssfr'] = mock['obs_sfr']/mock['obs_sm']
zero_mask = mock['obs_ssfr'] == 0
num_zeros = np.count_nonzero(zero_mask)
mock['obs_ssfr'][zero_mask] = 10**np.random.normal(loc=-13, scale=0.25, size=num_zeros)
print(mock.keys())
###Output
['halo_id', 'upid', 'vpeak', 'mpeak', 'mvir', 'vmax', 'sm', 'sfr', 'obs_sm', 'obs_sfr', 'x', 'y', 'z', 'vx', 'vy', 'vz', 'hostid', 'host_halo_x', 'host_halo_y', 'host_halo_z', 'host_halo_vx', 'host_halo_vy', 'host_halo_vz', 'host_halo_mvir', 'host_centric_x', 'host_centric_y', 'host_centric_z', 'host_centric_vx', 'host_centric_vy', 'host_centric_vz', 'obs_sfr_percentile', 'sfr_percentile', 'obs_ssfr']
###Markdown
Load ${\rm protoDC2\ v3}$ at $z=0$
###Code
dirname = "/Users/aphearin/Dropbox/protoDC2/umachine_z0p1_color_mock"
basename = "umachine_z0p1_color_mock.hdf5"
fname = os.path.join(dirname, basename)
from astropy.table import Table
v3_mock = Table.read(fname, path='data')
print(v3_mock.keys())
###Output
['id', 'x', 'y', 'z', 'vx', 'vy', 'vz', 'obs_sm', 'obs_sfr', 'mpeak', 'mvir', 'vmax', 'vmax_at_mpeak', 'upid', 'hostid', 'has_matching_host', 'host_halo_x', 'host_halo_y', 'host_halo_z', 'host_halo_vx', 'host_halo_vy', 'host_halo_vz', 'host_halo_mvir', 'host_centric_x', 'host_centric_y', 'host_centric_z', 'host_centric_vx', 'host_centric_vy', 'host_centric_vz', 'obs_ssfr', 'sfr_percentile_fixed_sm', 'rmag', 'sdss_petrosian_gr', 'sdss_petrosian_ri', 'size_kpc', 'dr7_photoobj_id']
###Markdown
Assign Absolute r-band magnitude
###Code
from cosmodc2.sdss_colors import median_magr_from_mstar
# y_table=[-18.8, -20.1, -22.8]
median_magr = median_magr_from_mstar(np.log10(mock['obs_sm']))
mock['restframe_extincted_sdss_abs_magr'] = np.random.normal(
loc=median_magr, scale=0.25, size=len(mock))
median_magr2 = median_magr_from_mstar(np.log10(mock['obs_sm']))
mock['rmag2'] = np.random.normal(
loc=median_magr2, scale=0.15, size=len(mock))
median_magr3 = median_magr_from_mstar(np.log10(mock['obs_sm']),
y_table=[-18.9, -20.2, -22.55])
mock['rmag3'] = np.random.normal(
loc=median_magr3, scale=0.2, size=len(mock))
from cosmodc2.sdss_colors import dim_satellites
log_mhost = np.log10(mock['host_halo_mvir'])
log_mpeak = np.log10(mock['mpeak'])
upid = mock['upid']
mock['restframe_extincted_sdss_abs_magr'] = dim_satellites(
mock['restframe_extincted_sdss_abs_magr'], log_mpeak, log_mhost, upid)
mock['rmag2'] = dim_satellites(mock['rmag2'], log_mpeak, log_mhost, upid)
mock['rmag3'] = dim_satellites(mock['rmag3'], log_mpeak, log_mhost, upid)
fig, ax = plt.subplots(1, 1)
nskip_sdss = 10
__=ax.scatter(sdss['sm'][::nskip_sdss],
sdss['restframe_extincted_sdss_abs_magr'][::nskip_sdss], s=0.1)
from scipy.stats import binned_statistic
logsm_bins = np.linspace(9, 11.7, 40)
logsm_mids = 0.5*(logsm_bins[:-1] + logsm_bins[1:])
median_magr_model1, __, __ = binned_statistic(
np.log10(mock['obs_sm']), mock['restframe_extincted_sdss_abs_magr'],
bins=logsm_bins, statistic='median')
median_magr_model2, __, __ = binned_statistic(
np.log10(mock['obs_sm']), mock['rmag2'],
bins=logsm_bins, statistic='median')
median_magr_model3, __, __ = binned_statistic(
np.log10(mock['obs_sm']), mock['rmag3'],
bins=logsm_bins, statistic='median')
median_magr_v3, __, __ = binned_statistic(
np.log10(v3_mock['obs_sm']), v3_mock['rmag'],
bins=logsm_bins, statistic='median')
nskip_mock = 50
__=ax.plot(logsm_mids, median_magr_model1, color='blue')
__=ax.plot(logsm_mids, median_magr_model2, color='green')
__=ax.plot(logsm_mids, median_magr_model3, color='red')
__=ax.plot(logsm_mids, median_magr_v3, ':', color='k')
ylim = ax.set_ylim(-19, -23.25)
xlim = ax.set_xlim(9.5, 11.5)
ylim = ax.set_ylim(-14, -24)
xlim = ax.set_xlim(8, 12)
fig, ax = plt.subplots(1, 1)
__=ax.hist(mock['restframe_extincted_sdss_abs_magr'],
bins=50, normed=True, alpha=0.8, color='blue')
__=ax.hist(mock['rmag2'], bins=50, normed=True, alpha=0.8, color='green')
__=ax.hist(mock['rmag3'], bins=50, normed=True, alpha=0.8, color='red')
xlim = ax.set_xlim(-16, -22.8)
###Output
_____no_output_____
###Markdown
Compare cumulative number density to Zehavi+11
###Code
from cosmodc2.sdss_colors import zehavi11_cumulative_nd
lumthresh_h1p0, cumnd_sdss = zehavi11_cumulative_nd()
from cosmodc2.mock_diagnostics import cumulative_nd
volume_v4 = 250.**3.
protoDC2_littleh = 0.7
cumnd_pdc2_v4 = cumulative_nd(mock['restframe_extincted_sdss_abs_magr'],
volume_v4, protoDC2_littleh, lumthresh_h1p0)
cumnd_pdc2_v4_model2 = cumulative_nd(mock['rmag2'],
volume_v4, protoDC2_littleh, lumthresh_h1p0)
cumnd_pdc2_v4_model3 = cumulative_nd(mock['rmag3'],
volume_v4, protoDC2_littleh, lumthresh_h1p0)
volume_v3 = 250.**3.
cumnd_pdc2_v3 = cumulative_nd(v3_mock['rmag'],
volume_v3, protoDC2_littleh, lumthresh_h1p0)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4), sharex=True)
__=ax1.plot(lumthresh_h1p0, np.log10(cumnd_sdss), label=r'${\rm Zehavi+11}$', color='blue')
__=ax1.plot(lumthresh_h1p0, np.log10(cumnd_pdc2_v4),
label=r'${\rm protoDC2\ v4}$', color='k')
__=ax1.plot(lumthresh_h1p0, np.log10(cumnd_pdc2_v4_model2),
label=r'${\rm protoDC2\ v4\ model\ 2}$', color='green')
__=ax1.plot(lumthresh_h1p0, np.log10(cumnd_pdc2_v4_model3),
label=r'${\rm protoDC2\ v4\ model\ 3}$', color='red')
fracdiff_pdc2_v4 = (cumnd_pdc2_v4 - cumnd_sdss)/cumnd_sdss
fracdiff_pdc2_v4_model2 = (cumnd_pdc2_v4_model2 - cumnd_sdss)/cumnd_sdss
fracdiff_pdc2_v4_model3 = (cumnd_pdc2_v4_model3 - cumnd_sdss)/cumnd_sdss
fracdiff_pdc2_v3 = (cumnd_pdc2_v3 - cumnd_sdss)/cumnd_sdss
__=ax2.plot(lumthresh_h1p0, fracdiff_pdc2_v4,
label=r'${\rm protoDC2\ v4}$', color='blue')
__=ax2.plot(lumthresh_h1p0, fracdiff_pdc2_v4_model2,
label=r'${\rm protoDC2\ v4\ model\ 2}$', color='green')
__=ax2.plot(lumthresh_h1p0, fracdiff_pdc2_v4_model3,
label=r'${\rm protoDC2\ v4\ model\ 3}$', color='red')
__=ax2.plot(lumthresh_h1p0, fracdiff_pdc2_v3, ':', color='k',
label=r'${\rm protoDC2\ v3}$')
__=ax2.plot(np.linspace(-100, 100, 100), np.zeros(100), ':', color='k')
legend = ax1.legend()
xlabel = ax1.set_xlabel(r'${\rm M_{r}}$')
ylabel = ax1.set_ylabel(r'$n(<{\rm M_r})\ [(h/{\rm Mpc})^3]$')
xlim1 = ax1.set_xlim(-18, -22)
ylim2 = ax2.set_ylim(-1, 2)
fig, ax = plt.subplots(1, 1)
nskip_sdss = 10
# __=ax.scatter(sdss['sm'][::nskip_sdss],
# sdss['restframe_extincted_sdss_abs_magr'][::nskip_sdss], s=0.1)
from scipy.stats import binned_statistic
logsm_bins = np.linspace(8, 11.7, 40)
logsm_mids = 0.5*(logsm_bins[:-1] + logsm_bins[1:])
median_magr_model1, __, __ = binned_statistic(
np.log10(mock['obs_sm']), mock['restframe_extincted_sdss_abs_magr'],
bins=logsm_bins, statistic='median')
median_magr_model2, __, __ = binned_statistic(
np.log10(mock['obs_sm']), mock['rmag2'],
bins=logsm_bins, statistic='median')
median_magr_model3, __, __ = binned_statistic(
np.log10(mock['obs_sm']), mock['rmag3'],
bins=logsm_bins, statistic='median')
nskip_mock = 50
# __=ax.plot(logsm_mids, median_magr_model1, color='k')
# __=ax.plot(logsm_mids, median_magr_model2, color='green')
# __=ax.plot(logsm_mids, median_magr_model3, color='red')
nskip = 200
__=ax.scatter(np.log10(mock['obs_sm'][::nskip]),
mock['restframe_extincted_sdss_abs_magr'][::nskip], s=0.1)
ylim = ax.set_ylim(-15, -24)
xlim = ax.set_xlim(7, 12)
###Output
_____no_output_____
###Markdown
Compare luminosity threshold clustering to Zehavi+11
###Code
from cosmodc2.mock_diagnostics import zehavi_wp
period = 250.
x, y, z, vz = mock['x'], mock['y'], mock['z'], mock['vz']
magr = mock['restframe_extincted_sdss_abs_magr']
rp_mids, wp_um_19p0 = zehavi_wp(x, y, z, vz,
period, magr, -19.0, protoDC2_littleh)
rp_mids, wp_um_19p5 = zehavi_wp(x, y, z, vz,
period, magr, -19.5, protoDC2_littleh)
rp_mids, wp_um_20p5 = zehavi_wp(x, y, z, vz,
period, magr, -20.5, protoDC2_littleh)
rp_mids, wp_um_21p5 = zehavi_wp(x, y, z, vz,
period, magr, -21.5, protoDC2_littleh)
x, y, z, vz = v3_mock['x'], v3_mock['y'], v3_mock['z'], v3_mock['vz']
magr = v3_mock['rmag']
rp_mids, wp_v3_19p0 = zehavi_wp(x, y, z, vz,
period, magr, -19.0, protoDC2_littleh)
rp_mids, wp_v3_19p5 = zehavi_wp(x, y, z, vz,
period, magr, -19.5, protoDC2_littleh)
rp_mids, wp_v3_20p5 = zehavi_wp(x, y, z, vz,
period, magr, -20.5, protoDC2_littleh)
rp_mids, wp_v3_21p5 = zehavi_wp(x, y, z, vz,
period, magr, -21.5, protoDC2_littleh)
x, y, z, vz = mock['x'], mock['y'], mock['z'], mock['vz']
magr = mock['rmag2']
rp_mids, wp_um2_19p0 = zehavi_wp(x, y, z, vz,
period, magr, -19.0, protoDC2_littleh)
rp_mids, wp_um2_19p5 = zehavi_wp(x, y, z, vz,
period, magr, -19.5, protoDC2_littleh)
rp_mids, wp_um2_20p5 = zehavi_wp(x, y, z, vz,
period, magr, -20.5, protoDC2_littleh)
rp_mids, wp_um2_21p5 = zehavi_wp(x, y, z, vz,
period, magr, -21.5, protoDC2_littleh)
x, y, z, vz = mock['x'], mock['y'], mock['z'], mock['vz']
magr = mock['rmag3']
rp_mids, wp_um3_19p0 = zehavi_wp(x, y, z, vz,
period, magr, -19.0, protoDC2_littleh)
rp_mids, wp_um3_19p5 = zehavi_wp(x, y, z, vz,
period, magr, -19.5, protoDC2_littleh)
rp_mids, wp_um3_20p5 = zehavi_wp(x, y, z, vz,
period, magr, -20.5, protoDC2_littleh)
rp_mids, wp_um3_21p5 = zehavi_wp(x, y, z, vz,
period, magr, -21.5, protoDC2_littleh)
from cosmodc2.sdss_colors.sdss_measurements import rp as rp_zehavi
from cosmodc2.sdss_colors import zehavi11_clustering
wp_zehavi_18p5 = zehavi11_clustering(-18.5)
wp_zehavi_19p0 = zehavi11_clustering(-19.0)
wp_zehavi_19p5 = zehavi11_clustering(-19.5)
wp_zehavi_20p0 = zehavi11_clustering(-20.0)
wp_zehavi_20p5 = zehavi11_clustering(-20.5)
wp_zehavi_21p0 = zehavi11_clustering(-21.0)
wp_zehavi_21p5 = zehavi11_clustering(-21.5)
fig, _axes = plt.subplots(2, 2, figsize=(10, 8))
((ax1, ax2), (ax3, ax4)) = _axes
axes = ax1, ax2, ax3, ax4
for ax in axes:
__=ax.loglog()
# __=ax.plot(rp_mids, rp_mids*wp_v3_20p5, '--', color='red')
__=ax1.plot(rp_mids, rp_mids*wp_um_19p0, color='blue')
__=ax1.plot(rp_mids, rp_mids*wp_um2_19p0, '--', color='green')
__=ax1.plot(rp_mids, rp_mids*wp_um3_19p0, '--', color='red')
__=ax1.plot(rp_mids, rp_mids*wp_v3_19p0, ':', color='k')
__=ax1.errorbar(rp_zehavi, rp_zehavi*wp_zehavi_19p0, rp_zehavi*0.2*wp_zehavi_19p0,
fmt='.', color='green')
__=ax2.plot(rp_mids, rp_mids*wp_um_19p5, color='blue')
__=ax2.plot(rp_mids, rp_mids*wp_um2_19p5, '--', color='green')
__=ax2.plot(rp_mids, rp_mids*wp_um3_19p5, '--', color='red')
__=ax2.plot(rp_mids, rp_mids*wp_v3_19p5, ':', color='k')
__=ax2.errorbar(rp_zehavi, rp_zehavi*wp_zehavi_19p5, rp_zehavi*0.2*wp_zehavi_19p5,
fmt='.', color='green')
__=ax3.plot(rp_mids, rp_mids*wp_um_20p5, color='blue')
__=ax3.plot(rp_mids, rp_mids*wp_um2_20p5, '--', color='green')
__=ax3.plot(rp_mids, rp_mids*wp_um3_20p5, '--', color='red')
__=ax3.plot(rp_mids, rp_mids*wp_v3_20p5, ':', color='k')
__=ax3.errorbar(rp_zehavi, rp_zehavi*wp_zehavi_20p5, rp_zehavi*0.2*wp_zehavi_20p5,
fmt='.', color='green')
__=ax4.plot(rp_mids, rp_mids*wp_um_21p5, color='blue')
__=ax4.plot(rp_mids, rp_mids*wp_um2_21p5, '--', color='green')
__=ax4.plot(rp_mids, rp_mids*wp_um3_21p5, '--', color='red')
__=ax4.plot(rp_mids, rp_mids*wp_v3_21p5, ':', color='k')
__=ax4.errorbar(rp_zehavi, rp_zehavi*wp_zehavi_21p5, rp_zehavi*0.2*wp_zehavi_21p5,
fmt='.', color='green')
title1 = ax1.set_title(r'${\rm M_{r} < -19}$')
title2 = ax2.set_title(r'${\rm M_{r} < -19.5}$')
title3 = ax3.set_title(r'${\rm M_{r} < -20.5}$')
title4 = ax4.set_title(r'${\rm M_{r} < -21.5}$')
__=ax1.xaxis.set_ticks_position('none')
__=ax1.set_xticklabels([''])
__=ax2.xaxis.set_ticks_position('none')
__=ax2.set_xticklabels([''])
__=ax2.yaxis.set_ticks_position('none')
__=ax2.set_yticklabels([''])
__=ax4.yaxis.set_ticks_position('none')
__=ax4.set_yticklabels([''])
from cosmodc2.sdss_colors.sdss_measurements import rp as rp_zehavi
from cosmodc2.sdss_colors import zehavi11_clustering
wp_zehavi_18p5 = zehavi11_clustering(-18.5)
wp_zehavi_19p0 = zehavi11_clustering(-19.0)
wp_zehavi_19p5 = zehavi11_clustering(-19.5)
wp_zehavi_20p0 = zehavi11_clustering(-20.0)
wp_zehavi_20p5 = zehavi11_clustering(-20.5)
wp_zehavi_21p0 = zehavi11_clustering(-21.0)
wp_zehavi_21p5 = zehavi11_clustering(-21.5)
fig, _axes = plt.subplots(2, 2, figsize=(10, 8))
((ax1, ax2), (ax3, ax4)) = _axes
axes = ax1, ax2, ax3, ax4
for ax in axes:
__=ax.loglog()
# __=ax.plot(rp_mids, rp_mids*wp_v3_20p5, '--', color='red')
__=ax1.plot(rp_mids, wp_um_19p0, color='green')
__=ax1.plot(rp_mids, wp_um2_19p0, '--', color='green')
__=ax1.plot(rp_mids, wp_um3_19p0, '--', color='red')
__=ax1.plot(rp_mids, wp_v3_19p0, ':', color='k')
__=ax1.errorbar(rp_zehavi, wp_zehavi_19p0, 0.2*wp_zehavi_19p0,
fmt='.', color='green')
__=ax2.plot(rp_mids, wp_um_19p5, color='green')
__=ax2.plot(rp_mids, wp_um2_19p5, '--', color='green')
__=ax2.plot(rp_mids, wp_um3_19p5, '--', color='red')
__=ax2.plot(rp_mids, wp_v3_19p5, ':', color='k')
__=ax2.errorbar(rp_zehavi, wp_zehavi_19p5, 0.2*wp_zehavi_19p5,
fmt='.', color='green')
__=ax3.plot(rp_mids, wp_um_20p5, color='green')
__=ax3.plot(rp_mids, wp_um2_20p5, '--', color='green')
__=ax3.plot(rp_mids, wp_um3_20p5, '--', color='red')
__=ax3.plot(rp_mids, wp_v3_20p5, ':', color='k')
__=ax3.errorbar(rp_zehavi, wp_zehavi_20p5, 0.2*wp_zehavi_20p5,
fmt='.', color='green')
__=ax4.plot(rp_mids, wp_um_21p5, color='green')
__=ax4.plot(rp_mids, wp_um2_21p5, '--', color='green')
__=ax4.plot(rp_mids, wp_um3_21p5, '--', color='red')
__=ax4.plot(rp_mids, wp_v3_21p5, ':', color='k')
__=ax4.errorbar(rp_zehavi, wp_zehavi_21p5, 0.2*wp_zehavi_21p5,
fmt='.', color='green')
title1 = ax1.set_title(r'${\rm M_{r} < -19}$')
title2 = ax2.set_title(r'${\rm M_{r} < -19.5}$')
title3 = ax3.set_title(r'${\rm M_{r} < -20.5}$')
title4 = ax4.set_title(r'${\rm M_{r} < -21.5}$')
__=ax1.xaxis.set_ticks_position('none')
__=ax1.set_xticklabels([''])
__=ax2.xaxis.set_ticks_position('none')
__=ax2.set_xticklabels([''])
__=ax2.yaxis.set_ticks_position('none')
__=ax2.set_yticklabels([''])
__=ax4.yaxis.set_ticks_position('none')
__=ax4.set_yticklabels([''])
# wp_um_19p0 = wp(um_pos_19p0, rp_bins, pi_max, period=250, num_threads='max')
# wp_um_20p0 = wp(um_pos_20p0, rp_bins, pi_max, period=250, num_threads='max')
# wp_um_21p0 = wp(um_pos_21p0, rp_bins, pi_max, period=250, num_threads='max')
# wp_um_21p5 = wp(um_pos_21p5, rp_bins, pi_max, period=250, num_threads='max')
print(mock.keys())
satmask = mock['upid'] != -1
cluster_mask = mock['host_halo_mvir'] > 10**14
mw_mask = (mock['host_halo_mvir'] > 10**11.75) & (mock['host_halo_mvir'] < 10**12.5)
sm_mask = (mock['obs_sm'] > 10**10) & (mock['obs_sm'] < 10**10.25)
fig, ax = plt.subplots(1, 1)
__=ax.hist(np.log10(mock['obs_ssfr'][satmask & mw_mask & sm_mask]), bins=50, normed=True,
alpha=0.8, color='blue')
__=ax.hist(np.log10(mock['obs_ssfr'][satmask & cluster_mask & sm_mask]), bins=50, normed=True,
alpha=0.8, color='red')
###Output
['halo_id', 'upid', 'vpeak', 'mpeak', 'mvir', 'vmax', 'sm', 'sfr', 'obs_sm', 'obs_sfr', 'x', 'y', 'z', 'vx', 'vy', 'vz', 'hostid', 'host_halo_x', 'host_halo_y', 'host_halo_z', 'host_halo_vx', 'host_halo_vy', 'host_halo_vz', 'host_halo_mvir', 'host_centric_x', 'host_centric_y', 'host_centric_z', 'host_centric_vx', 'host_centric_vy', 'host_centric_vz', 'obs_sfr_percentile', 'sfr_percentile', 'obs_ssfr', 'restframe_extincted_sdss_abs_magr', 'rmag2', 'rmag3']
|
1-HelloWorld.ipynb | ###Markdown
1. Hello World This practical is for you to get used to the jupyter interface. The cell below prints "Hello World".
###Code
print("Hello World")
###Output
_____no_output_____
###Markdown
Printing something is a way to confirm that the setup is done, and the environment is usable. Whenever we start using a new language, this is usually the first thing that we do.Try "printing" your name through the code below.
###Code
#Try printing your name.
print(None)
###Output
_____no_output_____
###Markdown
CommentsComments is a method to document your code. comments usually starts with a hash () symbol. If you try to execute the comments below, nothing happens.
###Code
#print("Hello World")
###Output
_____no_output_____ |
Deploy Steam Savings Calc.ipynb | ###Markdown
Tutorial: Creating and pushing calculated signalsHow to push calculated signals and scalars using Seeq Data Lab and manipulate worksheet items.- **Author:** Siang Lim- **Date:** June 6th 2022 Background**Steam savings** is one component in a typical refinery energy dashboard.In this notebook, we will demonstrate how to use Seeq Data Lab to calculate steam savings and push the results back to the Workbench, using the following tags in the Splitter as a demo:- **53FFR412** - Stripping Steam/Bottoms Ratio- **53FC128** - Bottoms to FCC flow CalculationsSteam savings is defined relative to a baseline steam flow to bottoms. For this tag, the baseline ratio is $R_b = 7.1$.$$\begin{align}F, \text{Steam Saved [lb/h]} &= (R_b - R_s) * F_B \\C, \text{Steam Cost [\$/(klb/h)]} &= \$13.51 \quad \text{ (from Planning dept or market prices)}\\S, \text{Savings [\$/day]} &= F \text{ [lb/h]} \cdot C \text{ [\$/(klb/h)]} \cdot 24 \text{ [h/day]} \cdot 1/1000 \text{ [(klb/lb)]}\end{align}$$Where,$$\begin{align}R_s &= \text{Current steam/bottoms ratio, 53FFR412, [unitless]} \\F_B &= \text{Bottoms flow, 53FC128, [lb/h]}\end{align}$$ Set up variables
###Code
R_b = 7.1
cost = 13.51
###Output
_____no_output_____
###Markdown
Seeq formulaCheck out the Seeq training courses if you need a review on Seeq formulas. **Note that:** Seeq variables must start with a letter: `$f128` is valid, `$53ffr412` is not. **First, a signal:** Steam saved is a `signal`, and will be defined as follows.
###Code
steam_saved = f"(({R_b}-$f412)*$f128/24).setUnits('lb/h')"
steam_saved
###Output
_____no_output_____
###Markdown
**Aside:** We've used the Python f-string syntax (as indicated by the `f` before the string) to embed expressions inside string. In this case, our expression was `R_b`. For more information on f-strings, see https://realpython.com/python-f-strings/:~:text=%E2%80%9CF%2Dstrings%20provide%20a%20way,which%20contains%20expressions%20inside%20braces. **Second, scalar:** Steam cost is a `scalar` (it has a single value), and will be defined as:
###Code
steam_cost = f"({cost}).setunits('$/klb/h')"
steam_cost
###Output
_____no_output_____
###Markdown
**Third, another signal:** Finally, the savings per day is calculated as:
###Code
savings = f"($ss*$co*24*(1/1000)).setunits('$/day').remove(islessthan(0))"
savings
###Output
_____no_output_____
###Markdown
Variable assignmentWe'll need to tell Seeq what those variables are: - `$f412`- `f128`- `$ss`- `$co`Since `$ss` and `$co` in the savings calculation depends on previously calculated values, we will need to push the calculations to Seeq in 2 separate steps.We will see how to do this below. Step 1 - Import libraries
###Code
from seeq import spy
import pandas as pd
pd.set_option('display.max_colwidth', None)
###Output
_____no_output_____
###Markdown
Step 2 - Define tags and data source Replace `YOUR PI SERVER` with your PI server.
###Code
my_items = pd.DataFrame({
'Name': ['53FFR412', '53FC128'],
'Datasource Name': 'YOUR PI SERVER'
})
my_items
###Output
_____no_output_____
###Markdown
Step 3 - Search for tags using `spy.search`
###Code
metadata_df = spy.search(my_items)
metadata_df
###Output
_____no_output_____
###Markdown
Step 4 - First, push PI tags back to Seeq WorkbenchYou can tell Seeq which workbook and worksheet you want to push the signal back to. Leave it blank and it will push it to a default SDL workbook called `Data Lab >> Data Lab Analysis` and worksheet called `From Data Lab`.More information can be found in the SPy.push documentation below using `help(spy.push)`.
###Code
help(spy.push)
workbook_ID = '7836C665-2B4C-4B36-8262-BE5230E102A5' # Change this to where you want it
worksheet_name = 'Dev 1'
push_results = spy.push(
workbook=workbook_ID,
worksheet=worksheet_name,
metadata=metadata_df)
push_results
###Output
_____no_output_____
###Markdown
> Navigate to the link above to see the results. You may need to replace the IP address with your Seeq server if it's not pointing to the right place (still trying to figure out how to fix this) WRONG: - http://`1.2.3.4`/892DF617-6104-4BFF-BCFC-6256FE4DFA7A/workbook/7836C665-2B4C-4B36-8262-BE5230E102A5/worksheet/9F20FD2B-F56E-4D04-8FCB-FF9A661EA39D RIGHT: - http://`MYSEEQSERVER`/892DF617-6104-4BFF-BCFC-6256FE4DFA7A/workbook/7836C665-2B4C-4B36-8262-BE5230E102A5/worksheet/9F20FD2B-F56E-4D04-8FCB-FF9A661EA39D Step 4 - Define calculationsIf you have multi-step calculations where a step depends on a previous calculation, you will need to break them down into several calculations.Here, we will do the steam saved and cost of steam first. In `Formula Parameters`, we are telling Seeq to grab the IDs of `53FFR412` and `53FC128` to assign tags to the variables we've defined.> Notice that we passed an entire DataFrame row to it, Seeq will automatically parse the row to find the IDs. We could've also passed the `metadata_df` row instead of `push_results`. However, we want to display all the signals (tags + calculated values), which we will see later in this notebook. Using `push_results` makes this step easier to understand (code-wise).
###Code
calc_signals = [{'Name': 'Steam Saved',
'Type': 'Signal',
'Formula': steam_saved,
'Formula Parameters': {'$f412': push_results[push_results['Name'] == '53FFR412'],
'$f128': push_results[push_results['Name'] == '53FC128'],
}
},
{'Name': 'Cost of Steam',
'Type': 'Scalar',
'Formula': steam_cost
}]
df_calcs = pd.DataFrame(calc_signals)
df_calcs
###Output
[0;31mNameError: [0mname 'steam_saved' is not defined
Error found at [0;36mline 3[0m in [0;32mcell 1[0m.
###Markdown
Step 6 - Push first 2 calculations to Seeq Now we push the first 2 calcs, then store the results as `push_results_2`.
###Code
push_results_2 = spy.push(
workbook=workbook_ID,
worksheet=worksheet_name,
metadata=df_calcs)
push_results_2
###Output
_____no_output_____
###Markdown
Step 6 - Push third calculation to Seeq Notice in `push_results_2`, we now have IDs for the steam saved and steam cost variables. Now we can push the savings signal and then store the results as `push_results_3`.
###Code
calc_signals = [{'Name': 'Savings per Day',
'Type': 'Signal',
'Formula': savings,
'Formula Parameters': {'$ss': push_results_2[push_results_2['Name'] == 'Steam Saved'],
'$co': push_results_2[push_results_2['Name'] == 'Cost of Steam'],
}
}]
df_calcs = pd.DataFrame(calc_signals)
df_calcs
push_results_3 = spy.push(
workbook=workbook_ID,
worksheet=worksheet_name,
metadata=df_calcs)
push_results_3
###Output
_____no_output_____
###Markdown
Step 7 - Getting all signals displayed (Method 1)You may have noticed that every time we pushed a signal back to the worksheet, the earlier pushed signals are no longer displayed in the Workbench. However, they are still available in the 'Recently Accessed' menu.To make Seeq display ALL signals, we just need to combine all 3 `push_results` DataFrame and push all signals together. There are, of course, other ways to do this, that may be computationally more efficient, but this method seems to be the most straightforward (code-wise, and to understand what's going on for novice users). Use `pd.concat()` to merge the 3 dataframes
###Code
df_combined = pd.concat([push_results, push_results_2, push_results_3]).reset_index(drop=True)
df_combined
push_results_final = spy.push(
workbook=workbook_ID,
worksheet=worksheet_name,
metadata=df_combined)
push_results_final
###Output
_____no_output_____
###Markdown
> Take a look at the Workbench, you will see that all 5 signals are now displayed. Step 8 - Getting all signals displayed (Method 2)Alternatively, you could also pull the worksheet using `spy.workbooks` and then modify the `display_items`.
###Code
workbooks_df = spy.workbooks.search({
'ID': workbook_ID
})
workbooks_df
workbooks = spy.workbooks.pull(workbooks_df)
workbooks
###Output
_____no_output_____
###Markdown
Check out the worksheets
###Code
workbooks[0].worksheets
###Output
_____no_output_____
###Markdown
We see that the worksheet we want is the 3rd one. Let's look at the display items:
###Code
worksheet_items = workbooks[0].worksheets[2].display_items
worksheet_items
###Output
_____no_output_____
###Markdown
It shows all 5 signals as expected. As long as we know the name and ID of a signal (tag), we can add it to the display. You could also remove a signal by removing a row here.Let's remove the first 2 signals then push it back.
###Code
new_worksheet_items = worksheet_items.loc[2:,:]
new_worksheet_items
###Output
_____no_output_____
###Markdown
Reassign the dataframe
###Code
workbooks[0].worksheets[2].display_items = new_worksheet_items
###Output
_____no_output_____
###Markdown
Then push it back to the workbench
###Code
spy.workbooks.push(workbooks)
###Output
_____no_output_____ |
3_math_for_datascience/04_Random_Variable_with_SciPy/20180223_04_09_F_distribution.ipynb | ###Markdown
F 분포카이제곱 분포를 따르는 2개의 확률변수 $\chi^2_{1,2}(n_{1, 2})$로부터 구할 수 있음. 제곱분포의 샘플을 자유도 인수(n)로 나누어 그 2개의 비율을 구한다.$$\dfrac{x_1 / n_1}{x_2/ n_2} \sim F(n_1, n_2)$$(PDF 수식)$$f(x; n_1,n_2) = \dfrac{\sqrt{\dfrac{(n_1\,x)^{n_1}\,\,n_2^{n_2}} {(n_1\,x+n_2)^{n_1+n_2}}}} {x\,\text{Beta}\!\left(\frac{n_1}{2},\frac{n_2}{2}\right)}$$ F 분포 시뮬레이션
###Code
import numpy as np
import scipy as sp
from scipy import stats
import matplotlib as mpt
import matplotlib.pyplot as plt
import seaborn as sns
xx = np.linspace(0.03, 3, 1000)
plt.hold(True)
plt.plot(xx, sp.stats.f(1,1).pdf(xx), label="F(1,1)")
plt.plot(xx, sp.stats.f(2,1).pdf(xx), label="F(2,1)")
plt.plot(xx, sp.stats.f(5,2).pdf(xx), label="F(5,2)")
plt.plot(xx, sp.stats.f(10,1).pdf(xx), label="F(10,1)")
plt.plot(xx, sp.stats.f(20,20).pdf(xx), label="F(20,20)")
plt.legend()
plt.show()
###Output
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/ipykernel_launcher.py:2: MatplotlibDeprecationWarning: pyplot.hold is deprecated.
Future behavior will be consistent with the long-time default:
plot commands add elements without first clearing the
Axes and/or Figure.
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/matplotlib/__init__.py:805: MatplotlibDeprecationWarning: axes.hold is deprecated. Please remove it from your matplotlibrc and/or style files.
mplDeprecation)
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/matplotlib/rcsetup.py:155: MatplotlibDeprecationWarning: axes.hold is deprecated, will be removed in 3.0
mplDeprecation)
|
Labs/5.1-Lab2b_exercises/HCDE411-Week-5-Basic-Visualizations.ipynb | ###Markdown
Basic Data VisualizationsThis module shows a few different techniques for retreiving and visualizing data using pandas and matplotlib. We will also work with the original cars dataset. You will need to add that dataset to your notebook for some of these examples to work.
###Code
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
###Output
_____no_output_____
###Markdown
To retreive some data to work with, you'll use a library called `pandas_datreader`, which allows you to connect to multiple external datasources. The documentation is here: [https://pydata.github.io/pandas-datareader/](http://) To install the library in your server, use the `pip` tool. Open a terminal session on your server (It is in the Launcher tab. You may need to start a new Launcher from the File menu.). In the terminal session type: `pip install pandas-datareader`.
###Code
import pandas_datareader.data as web
###Output
_____no_output_____
###Markdown
You will need the matplotlib library so that we can make charts. It is common practice to import it **as** `plt` - that's fewer characters to type, every time you want to access the functions. You also import `datetime`, to provide useful functions for working with dates (like getting the current time).
###Code
import matplotlib.pyplot as plt
import datetime as dt
###Output
_____no_output_____
###Markdown
Next you'll retrieve stock tickers as an easy-to-access source of data to practice with. Then create a list to store the stock tickers.
###Code
# Define the instruments to download. We would like to see Apple, Microsoft and the S&P500 index.
tickers = ['AAPL'] #, 'MSFT', 'IBM'] (you can add more tickers as a list)
# We would like all available data from 01/01/2017 until 12/31/2017.
start_date = '2017-01-01' # you can set this to whatever date you want
end_date = dt.datetime.now() # this puts the current time into a variable called end_date
# This next function creates a pandas dataframe containing the results of the DataReader query
# The 'yahoo' datasource provides the stock ticker info. (google and morningstar no longer work).
# The results are stored as a dataframe called df (nice and short!)
df = web.DataReader(tickers, data_source='yahoo', start=start_date, end=end_date)
# Inspect the first 5 rows
df.head()
###Output
_____no_output_____
###Markdown
Now for the first visualization! You use the matplotlib library's plot function to access a basic line graph. It can take many paramenters, but it needs at least the data to work with and plot on the y-axis, which can be requested from the column headings you just retrieved into the new dataframe. You can plot the low closing value from the 'Low' coumn, for example.
###Code
df.plot(y='Low')
###Output
_____no_output_____
###Markdown
Plotting multiple values is easy. Just specify which columns of the dataframe you want to plot.
###Code
df[["High", "Low"]].plot()
###Output
_____no_output_____
###Markdown
You can also change the aesthetics of the plot to meet your. There are a lot of pre-set styles that you can choose from (easiest) or you can make your own by modifying specific parameters of the plot function (harder). To list the available styles, use the `style.available` function.
###Code
plt.style.available
###Output
_____no_output_____
###Markdown
To use a specific style, call the `style.use` function and set the parameter to the name of the style you want. You need to call this function every time you want to change the style.
###Code
plt.style.use("fivethirtyeight") #need to reset this every time you want to change the template
df[["High", "Low"]].plot()
plt.style.use("ggplot")
df[["High", "Low"]].plot()
###Output
_____no_output_____
###Markdown
Bar ChartsYou can also easily plot bar charts usling matplotlib. Bar charts are good representations for ranking categorical and nominal data. This example uses Google stock data to create categories of how many closing days were Poor, Good, or Stellar, depending on how they compare to the avarage closing value over the whole time period.Suppose you want to answer the question: *"How many closing stock prices were low medium or high compared to the average closing price?"*To do this, you need to know the average price over that time period and to create three categories for the closing values, compared to that average. You can use python to create categories of data from the stock prices. First get stock prices for Google (over the same time period as above).Then calculate what the average (mean) price was over that time period.
###Code
google = web.DataReader('GOOG', data_source='yahoo', start=start_date, end=end_date)
google['Close'].mean()
google
###Output
_____no_output_____
###Markdown
You can use the mean price over that period to create three categories – depending upon whether the closing price on a day was lower, near it or above it.To do this create a function that you use to evaluate each price and set it's **rank performace**. You will pass this function the price on each row of the dataframe
###Code
def rank_performance(stock_price):
if stock_price <= 900:
return "Poor"
elif stock_price>900 and stock_price <=1200:
return "Good"
elif stock_price>1200:
return "Stellar"
###Output
_____no_output_____
###Markdown
You then run this custom function against each of the values in the **Close** column.
###Code
google['Close'].apply(rank_performance)
###Output
_____no_output_____
###Markdown
Note that the values haven't actually changed in the resulting data - you've simply stored the ranking for each value in the Close column in the datareader object. To show the data hasn't changed, just view the object:
###Code
google
###Output
_____no_output_____
###Markdown
To finally create the bar chart of categories, you need to count how many times each ranking occurred. Conveninetly, the `value_counts()` function does this. If you use dot "." notation to append this function to the other ones, you don't have to create an intermediate variable to store the counts. You can just pass along the results right on to the `.plot()` function. In this way, you are concatenating the results of each step with the "dot" notation. Note the `kind` parameter sets it to a bar chart.*get coogle 'Close' . -> apply the rank performance function . -> count the results . -> plot the results*
###Code
google['Close'].apply(rank_performance).value_counts().plot(kind="bar")
###Output
_____no_output_____
###Markdown
If for some reason, you wanted a horizontal bar chart, just set the `kind` parameter to `"barh"`.
###Code
google["Close"].apply(rank_performance).value_counts().plot(kind="barh")
###Output
_____no_output_____
###Markdown
Pie ChartsIt is similarly easy to plot categories with a pie cahrt, to create a part-to-whole comparison.First you load the results of the `DataReader` into a new variable to work with. Let's take Johnson & Johnson for example.
###Code
jnj = web.DataReader('JNJ', data_source='yahoo', start='2016-01-01', end=dt.datetime.now())
jnj.head()
###Output
_____no_output_____
###Markdown
How did performance each day compare to its average?First let's find out the average:
###Code
jnj['Close'].mean()
###Output
_____no_output_____
###Markdown
We can write another custom performance to determine whether each value is above or below the average score over this time period.
###Code
def above_or_below(stock_price):
if stock_price >= 128.33:
return "Above average"
else:
return "Below average"
###Output
_____no_output_____
###Markdown
You can then create a pie based upon the values for the results of your custom function. Note the styling choices in this example. A full list of the styling parameters is in the matplotlib documentation. [https://matplotlib.org/3.1.0/api/_as_gen/matplotlib.pyplot.pie.html](http://)
###Code
labels='above','below'
colors = ['mediumseagreen','lightcoral']
jnj["Close"].apply(above_or_below).value_counts().plot(kind='pie', legend=False, labels=labels, colors=colors)
###Output
_____no_output_____
###Markdown
Scatter Plots Scatterplots require at least two columns of data, because you need to specify which axes to compare. To try out these examples, you need my `original cars.csv` dataset, on Kaggle. `read_csv()` function to create a dataframe from the file.
###Code
cars = pd.read_csv("data_cars_2004.csv")
cars # show the head and tail of this file
###Output
_____no_output_____
###Markdown
To show what a generic scatterplot might look like you can create a bunch of random points and make them have random weights.
###Code
N = 50
x = np.random.rand(N)
y = np.random.rand(N)
colors = np.random.rand(N)
size = (30 * np.random.rand(N))**2 # 0 to 15 point radii
plt.scatter(x, y, s=size, c=colors, alpha=0.5)
plt.show()
x=cars[['Hwy MPG']]
y=cars[['HP']]
cars[['Hwy MPG','HP']].plot(kind='scatter', x='Hwy MPG', y='HP', alpha=0.5)
cars[['Hwy MPG','HP']].plot(kind='scatter',x='Hwy MPG', y='HP', alpha=0.5)
###Output
_____no_output_____
###Markdown
You can access the `size` parameter to change how big the dots are. And the `figsize` to adjust how big the graph is.
###Code
#list(cars)
size=cars[['Retail Price']] # we can use the size parameter to set the size of the marks
cars[['Hwy MPG','HP']].plot(kind='scatter', x='Hwy MPG', y='HP', alpha=0.5, s=size*.005, figsize=(12,8))
###Output
_____no_output_____
###Markdown
DistributionsYou can easily plot the distribution of values in an axis (i.e., column) using the matplotlib `hist()` function. You can specify a list of only the columsnb
###Code
hist=cars.hist(column='Hwy MPG')
###Output
_____no_output_____
###Markdown
You can plot several columns by passing a list to the `column` paramater.
###Code
hist=cars.hist(column=['Hwy MPG', 'HP'])
###Output
_____no_output_____
###Markdown
If you specify no parameters for which column you get them all! In the example below, the figure is made larger so that the histograms don't overlap each other.
###Code
hist=cars.hist(figsize=(16,12))
###Output
_____no_output_____
###Markdown
You can customize the histogram by providing the hist() method additional parameters and matplotlib styling:
###Code
hist = cars.hist(column='Hwy MPG', bins=10, grid=False, figsize=(12,8), color='#4290be', zorder=2, rwidth=0.9)
hist = hist[0] # each unique value is accessed by its index (the car name) which is in clumn 0
for x in hist:
# Switch off tickmarks
x.tick_params(axis="both", which="both", bottom=False, top=True, labelbottom=True, left=False, right=False, labelleft=True)
# Draw horizontal axis lines
vals = x.get_yticks()
for tick in vals:
x.axhline(y=tick, linestyle='dashed', alpha=0.4, color='#eeeeee', zorder=1)
# Set title (set to "" for no title!)
x.set_title("Cars and MPG")
# Set x-axis label
x.set_xlabel("Miles per Gallon", labelpad=20, weight='bold', size=12)
# Set y-axis label
x.set_ylabel("Number of cars", labelpad=20, weight='bold', size=12)
###Output
_____no_output_____
###Markdown
Exercises: Part 1Refer to the examples above to guide you in completing the following exercises. You may need to do some research in the Pandas or Matplotlib documentation to help you out. Exercise 1- Create a new dataframe that contains only the Name, Highway MPG rating, and Weight of each car in the dataset.- Display the last 15 entries in the dataframe- Use the new dataframe to create a bar chart that shows the number of cars for each MPG rating (e.g., there are 10 cars with an MPG of 32), with the values sorted in ascending order.
###Code
### Your code here
###Output
_____no_output_____
###Markdown
Exercise 2- Create a table that shows the names of the top 10 heaviest cars.- Make a new dataframe for only these 10 "heaviest".- Create a horizontal bar chart that shows the top 5 "heaviest" in descending order, their labels and values. It should be sized at 10 x 8. The names of the vehicles should be shown to the left of the bars. Set the x and y labels to show "weight" and "Top 5 heaviest cars" (Hint: it may be easiest to do this with a new dataframe.)
###Code
### Your code here
###Output
_____no_output_____
###Markdown
Exercise 3Flex your skills! Create your own arbitrary chart from any of the values in the Cars dataset. Document your approach in the markdown cells. Cite any external references.
###Code
### Your code here
###Output
_____no_output_____
###Markdown
Exercises: Part 2Make sure that the lab file `Sample-Superstore-Orders.csv` is stored in your notebook before continuing with the following exercises.In Exercises 4-6, we'll use the Superstore Data we saw in the Tableau data cleaning exercises to create visualizations. Exercise 4Use the source data to create a histogram of the distribution of Sales in Illinois. Use your own judgement as to how to style and present the chart.
###Code
### Your code here
###Output
_____no_output_____
###Markdown
Exercise 5Use the source data to create a time series of Sales data for Illinois, New York, and California. Your visualization should show data for all three states in the same chart. Use your own judgement as to how to style and present the chart.
###Code
### Your code here
###Output
_____no_output_____
###Markdown
Exercise 6Locate your own data file for the final exercise. It can be any .csv file from any source that you have access to or that is online. Check Canvas for a list of data sources. Use your source data to a new visualization. Explain your chart, including choice of dimensions, values, and encodings in comments or a markdown text box.**Stretch Goal for one extra point: Create an additional visualization of a type that we did not cover in class. (e.g., bar chart, box plot, etc.)**
###Code
### Your code here
###Output
_____no_output_____ |
DecisionTree/C4.5DecisionTree.ipynb | ###Markdown
数据集来源: http://archive.ics.uci.edu/ml/datasets/Heart+Disease - 选择这个数据集的原因在于特征既有离散型和连续型两种类型
###Code
df = pd.read_csv("processed.cleveland.data",header=None,names=["age","sex","cp","trestbps","chol","fbs","restectg","thalach","exang","oldpeak","slope","ca","thal","num"])
df.head()
# numerical or categorical
for index in df:
print(index,len(set(df[index])))
# drop ? values
print(len(df))
index = ["?" not in row for row in df.values]
# convert to np.float
dataset = df.values[index].astype(np.float32)
print(len(dataset))
# split to X,y
X,y = dataset[:,:-1],dataset[:,-1]
y = y.astype(np.int64)
print(X.shape)
print(y.shape)
kinds = ["categorical" if len(set(col))<6 else "numerical"for col in X.T]
print(len(kinds))
from sklearn.model_selection import train_test_split
from collections import Counter
X_train,X_test,y_train,y_test = train_test_split(X,y)
def entropy(y):
precs = np.array(list(Counter(y).values()))/len(y)
ent = np.sum(-1 * precs * np.log2(precs))
return ent
# 选择哪个特征进行分裂
def decide_feature(X,y,fas,kinds):
# fas refers to feature_available;if one feature can be splitted,this feature_available is True,else False
(n_samples,n_features) = X.shape
ers = np.zeros(n_features)
bestfvs = np.zeros(n_features)
for fi in range(n_features):
if not fas[fi]:
continue
if kinds[fi] == "categorical":
I,H = entropy(y),0
for fv,c in Counter(X[:,fi]).items():
splity = y[X[:,fi] == fv]
proc = c/n_samples
I -= proc * entropy(splity)
H += -proc * np.log2(proc)
ers[fi] = I/H
else:
for fv in list(sorted(set(X[:,fi])))[:-1]:
splity_less = y[X[:,fi] <= fv]
proc_less = len(splity_less) / n_samples
splity_more = y[X[:,fi] > fv]
proc_more = len(splity_more) / n_samples
I = -proc_less * entropy(splity_less) - proc_more * entropy(splity_more) + entropy(y)
H = -1*proc_less * np.log2(proc_less) - proc_more * np.log2(proc_more)
if I/H > ers[fi]:
ers[fi] = I/H
bestfvs[fi] = fv
return ers,bestfvs
fas = np.array([True]*len(y_train))
decide_feature(X_train,y_train,fas,kinds)
def build_tree(X,y,fas,kinds):
counts = dict(Counter(y))
if len(counts) == 1 or (fas==False).all():
result = max(counts,key=counts.get)
return {"counts":counts,"result":result}
ers,bestfvs = decide_feature(X,y,fas,kinds)
next_ = {}
bestfi = np.argmax(ers)
if kinds[bestfi] == "categorical":
fas[bestfi] = False
for fv in set(X[:,bestfi]):
index = (X[:,bestfi] == fv)
next_["{}{}".format("==",fv)] = build_tree(X[index],y[index],fas,kinds)
else:
bestfv = bestfvs[bestfi]
index_less = X[:,bestfi] <= bestfv
fas_less = fas.copy()
if len(set(X[index_less][:,bestfi])) == 1:
fas_less[bestfi] = False
next_["{}{}".format("<=",bestfv)] = build_tree(X[index_less],y[index_less],fas_less,kinds)
index_more = X[:,bestfi] > bestfv
fas_more = fas.copy()
if len(set(X[index_more][:,bestfi])) == 1:
fas_more[bestfi] = False
next_["{}{}".format(">",bestfv)] = build_tree(X[index_more],y[index_more],fas_more,kinds)
return {"fi":bestfi,"counts":counts,"result":None,"next":next_}
fas = np.array([True]*X_train.shape[-1])
tree = build_tree(X_train,y_train,fas,kinds)
tree
# build_tree 里面有两个错误,一是在计算信息增益率时除数不能为0,二是终止条件需要改进
def predict_one(x,kinds,tree):
while tree["result"] == None:
fi = tree["fi"]
fv = x[fi]
flag = False
for condition in tree["next"]:
if eval(str(fv)+condition):
tree = tree["next"][condition]
flag = True
break
if not flag:
counts = tree["counts"]
return max(counts,key=counts.get)
return tree["result"]
class C45DecisionTree:
@staticmethod
def entropy(y):
precs = np.array(list(Counter(y).values()))/len(y)
ent = np.sum(-1 * precs * np.log2(precs))
return ent
# 选择哪个特征进行分裂
def decide_feature(self,X,y,fas,kinds):
# fas refers to feature_available;if one feature can be splitted,this feature_available is True,else False
(n_samples,n_features) = X.shape
ers = np.ones(n_features) * -1
bestfvs = np.zeros(n_features)
for fi in range(n_features):
if not fas[fi]:
continue
if kinds[fi] == "categorical":
I,H = self.entropy(y),0
for fv,c in Counter(X[:,fi]).items():
splity = y[X[:,fi] == fv]
proc = c/n_samples
I -= proc * self.entropy(splity)
H += -proc * np.log2(proc)
ers[fi] = I/(H+1e-7)
else:
# print(set(X[:,fi]))
for fv in list(sorted(set(X[:,fi])))[:-1]:
splity_less = y[X[:,fi] <= fv]
proc_less = len(splity_less) / n_samples
splity_more = y[X[:,fi] > fv]
proc_more = len(splity_more) / n_samples
I = -proc_less * self.entropy(splity_less) - proc_more * self.entropy(splity_more) + self.entropy(y)
H = -1*proc_less * np.log2(proc_less) - proc_more * np.log2(proc_more)
if I/(H+1e-7) > ers[fi]:
ers[fi] = I/(H+1e-7)
bestfvs[fi] = fv
return ers,bestfvs
def build_tree(self,X,y,fas,kinds):
counts = dict(Counter(y))
result = max(counts,key=counts.get)
# print("fas",fas,"len(counts)",len(counts))
if len(counts) == 1 or (fas==False).all():
return {"counts":counts,"result":result}
ers,bestfvs = self.decide_feature(X,y,fas,kinds)
if (ers == -1).all():
return {"counts":counts,"result":result}
next_ = {}
bestfi = np.argmax(ers)
# print(bestfi,ers)
if kinds[bestfi] == "categorical":
fas[bestfi] = False
for fv in set(X[:,bestfi]):
index = (X[:,bestfi] == fv)
# print("next: {} {} {}, size:{}".format(bestfi,"==",fv,len(y[index])))
next_["{}{}".format("==",fv)] = self.build_tree(X[index],y[index],fas,kinds)
else:
bestfv = bestfvs[bestfi]
index_less = X[:,bestfi] <= bestfv
fas_less = fas.copy()
if len(set(X[index_less][:,bestfi])) == 1:
fas_less[bestfi] = False
# print("next: {} {} {}, size:{}".format(bestfi,"<=",bestfv,len(y[index_less])))
next_["{}{}".format("<=",bestfv)] = self.build_tree(X[index_less],y[index_less],fas_less,kinds)
index_more = X[:,bestfi] > bestfv
fas_more = fas.copy()
if len(set(X[index_more][:,bestfi])) == 1:
fas_more[bestfi] = False
# print("next: {} {} {}, size:{}".format(bestfi,">=",bestfv,len(y[index_more])))
next_["{}{}".format(">",bestfv)] = self.build_tree(X[index_more],y[index_more],fas_more,kinds)
return {"fi":bestfi,"counts":counts,"result":None,"next":next_}
def fit(self,X,y,kinds):
fas = np.array([True]*X.shape[-1])
self.tree = self.build_tree(X,y,fas,kinds)
def predict_one(self,x):
tree = self.tree
while tree["result"] == None:
fi = tree["fi"]
fv = x[fi]
flag = False
for condition in tree["next"]:
if eval(str(fv)+condition):
tree = tree["next"][condition]
flag = True
break
if not flag:
counts = tree["counts"]
return max(counts,key=counts.get)
return tree["result"]
def predict(self,X):
y_predicts = []
for x_test in X_test:
y_predicts.append(self.predict_one(x_test))
return y_predicts
def score(self,X_test,y_test):
y_predicts = self.predict(X_test)
return np.sum(y_predicts == y_test)/ len(y_test)
mytree = C45DecisionTree()
mytree.fit(X_train,y_train,kinds)
mytree.score(X_test,y_test)
###Output
_____no_output_____ |
201707104056+chenhe7.25.ipynb | ###Markdown
列表List- 一个列表可以储存任意大小的数据集合,你可以理解为他是一个容器
###Code
def b ():
pass
a = [1,2,1,2,'ab',True,b,[1,2,3,]]
a
c = 'abd'
list(c)
a = [1,2,3,['ab']]
a
###Output
_____no_output_____
###Markdown
先来一个例子爽一爽 创建一个列表- a = [1,2,3,4,5] 列表的一般操作
###Code
a = [100,200]
b = [1,2,3,4,[a]]
a in b
a = [100,200]
b = [1,2,3,4,a]
a in b
a = [1,2]
b = [1,2]
a+b
a = 'a'
a*5
a = [1,2,3,4,5,[100,200,[1000,[4000]]]]
a
a[5][2][1][0]
b = [1,2,3,4,5,6,7,8,9,10]
b
for i in range(0,10,2):
b[i]=100
b
b=[1,2,3,4,5,6,7,8,9,10]
b
for i in range(0,10,3):
print(b[i:i+2])
v = [1,2,3,[3,4]]
v
count = 0
for i in v:
if type(i)==list:
for j in i:
count = count+1
else:
count =count+1
len(v)
###Output
_____no_output_____
###Markdown
列表索引操作- Mylist[index]- 正序索引,逆序索引- 列表一定注意越界- 
###Code
a = [1,2,3]
max(a)
a.__iter__#有这个属性可以for 循环
b = [4,3,2,1,]
b
def zwJ(b):
n = len(b)
for j in range(0,n-1):
for i in range(0,n-1-j):
if b[i]>b [i+1]:
b[i],b[i+1]=b[i+1],b[i]
zwJ(b)
print(b)
###Output
[1, 2, 3, 4]
###Markdown
列表切片操作- Mylist[start:end]- 正序切片,逆序切片 列表 +、*、in 、not in 使用for循环遍历元素- for 循环可以遍历一切可迭代元素 EP:- 使用while 循环遍历列表 列表的比较- \>,=,<=,==,!= 列表生成式[x for x in range(10)] 列表的方法
###Code
a = [1,2,3]
b = [100,200]
a.append(b)
a
a = [1,2,3]
b=[100,200]
b.extend(a)
c = [1,2,3,4]
c.insert(0,100)
c.insert((3,100)
c
###Output
_____no_output_____
###Markdown
将字符串分割成列表- split 按照自定义的内容拆分
###Code
c = [1,2,3,4,5,6]
for i in range(0,len(c)+3,3):
c.insert(i,100)
c
a =[]
for i in range(10):
if i%2==0:
a.append(i)
a
[x for x in range(10) if x%2==0]
lst = [30,1,2,1,0]
lst.append(40)
lst
lst.insert(1,43)
lst
lst.remove(1)
lst
lst.pop(1)
lst
lst.pop()
lst
lst.sort()
lst
lst.reverse()
lst
a =[1,2,3]
b = a
b
a[0]=100
a
c =[1,2,3]
d = c.copy()
d
e = [1,2,3,[0,11]]
e
import copy
a = [1,2,3,[100,200]]
###Output
_____no_output_____
###Markdown
EP:
###Code
a
a.pop()
a = 'a b c d'
a.split(' ')
b = 'a!b!c!'
b.split('!')
b = 'a b c d'
b .split()
###Output
_____no_output_____
###Markdown
列表的复制- copy 浅复制- deepcopy import copy 深复制- http://www.pythontutor.com/visualize.htmlmode=edit 列表排序- sort- sorted- 列表的多级排序 - 匿名函数 EP:- 手动排序该列表[5,3,8,0,17],以升序或者降序 - 1
###Code
def best():
a = eval(input(">>"))
Best= max (a)
print(Best)
if str(a)>=str(Best-10):
print('A')
elif str(a)>=str(Best-20):
print('B')
elif str(a)>=str(Best-30):
print('C')
elif str(a)>=str(Best-40):
print('D')
best()
###Output
>>[40,55,70,58]
70
A
###Markdown
- 2
###Code
yuan = [1,2,35,41,51,64,7]
print (yuan)
fan = []
print('将列表逆序输出')
for i in range(len(yuan)):
a = yuan.pop()
fan.append(a)
print (fan)
###Output
[1, 2, 35, 41, 51, 64, 7]
将列表逆序输出
[7, 64, 51, 41, 35, 2, 1]
###Markdown
- 3
###Code
def Count():
lst = eval(input('>>'))
x = eval(input('>>'))
return lst.count(x)
Count()
###Output
>>2,5,6,5,4,3,23,43,2
>>2
###Markdown
- 4 - 5 - 6
###Code
def index():
lst = eval(input('输入数字列表'))
print(lst.index(min(lst)))
index()
###Output
输入数字列表15,25,4,5,2,1,2
5
###Markdown
- 7
###Code
import random
lst =[1,2,3,4,5,6]
random.shuffle(lst)
print(lst)
def shuffle(lst):
random.shuffle(lst)
print(lst)
shuffle([1,2,3,4,5,6])
###Output
[2, 5, 4, 6, 3, 1]
###Markdown
- 8
###Code
def eliminateDuplicates(lst):
lst=[]
for i in lst:
for j in lst:
if i==j:
print(lst.append(i))
eliminateDuplicates([1,2,3,2,1,6,3,4,5,2])
a = [1,2,3,4,5,6,1,2,3]
a
###Output
_____no_output_____
###Markdown
- 9
###Code
def isSort(lst):
lst = eval(input('>>'))
isSort([1,1,3,4,4,5,7,9,10,30,11])
###Output
>>[1,1,3,4,4,5,7,9,10,30,11]
###Markdown
- 10
###Code
def bubbleSort(arr):
n = len(arr) # 遍历所有数组元素
for i in range(n):
for j in range(0, n-i-1):
if arr[j] > arr[j+1] :
arr[j], arr[j+1] = arr[j+1], arr[j]
arr = [64, 34, 25, 12, 22, 11, 90,20]
bubbleSort(arr)
print ("排序后的数组:")
for i in range(len(arr)):
print ("%d" %arr[i]),
###Output
排序后的数组:
11
12
20
22
25
34
64
90
|
Where-2-relocate-4-job.ipynb | ###Markdown
Here is the reason if you need to relocate for the new job around the world In this project, I want to analyze the best place to start a new career across the continents as Data Scientist based on the data from Stack Overflow 2017 survey.1- Where is a better place to move to find a job (North America, Asia, Europe or other places)?!2- What is the likelihood of increase or decrease in salary?3- What about job satisfaction?
###Code
import numpy as np
import pandas as pd
import math
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
###Output
_____no_output_____
###Markdown
Dataset ViewFirst let's look at the data set in hand to get some insights and learn about available features
###Code
df = pd.read_csv('survey_results_public.csv')
df.head()
###Output
_____no_output_____
###Markdown
Dataset is made up of 154 different features and there are some Null values which should be taken care of!Now let's see the histogram of the numeric features
###Code
df.hist(figsize=(10, 10), bins=20, grid=False);
###Output
_____no_output_____
###Markdown
What is the distribution of proffesions between individuals?There are 5 different professions which most of them (70%) are professional developer that make this dataset great to evaluate for the porpuse of this project
###Code
color = list('rgbkymc')
profession_vals = df['Professional'].value_counts()
print(len(profession_vals))
(profession_vals/df.shape[0]).plot(kind="bar", color=color);
###Output
5
###Markdown
Where are they from?As we expect most of them are from USA and if we omit India all others are mostly from Europe!
###Code
country_vals = df['Country'].value_counts()
print(len(country_vals))
(country_vals[:50]/df.shape[0]).plot(kind="bar", color=color, figsize=(15, 15));
###Output
201
###Markdown
There are 18 countries with number of developers more than 500. We can get an insight of grouping professions based on countries as follow
###Code
profession_country = df.groupby('Professional')['Country'].value_counts().unstack().fillna(0)
###Output
_____no_output_____
###Markdown
At the later steps I consider putting all these 18 countries on their right category of Location since there should be an impact by developers on these counries on the analysis. All these countries have more than 500 people attending the survey and as can bee seen below in each country most of the are "professional developers". "Iran" is a surprise in this list. Did you spot China, Japan and South Korea... Where are they?
###Code
profession_country_500 = profession_country.drop([col for col, val in profession_country.sum().iteritems() if val < 500], axis=1)
profession_country_500
###Output
_____no_output_____
###Markdown
I suspect below result is because developers are more conservative in east Asia so we don't have many developers information
###Code
profession_country[['South Korea', 'Japan', 'China', 'Iran']]
empl_vals = df['EmploymentStatus'].value_counts()
print(len(empl_vals))
(empl_vals/df.shape[0]).plot(kind="bar", color=color );
###Output
7
###Markdown
At this pont maybe it's better to think changing your job since most of the people think they are underpaid!!
###Code
country_vals = df['Overpaid'].value_counts()
print(len(country_vals))
(country_vals/df.shape[0]).plot(kind="bar", color=color );
America = ['United States', 'Canada', 'Brazil', 'Argentina']
Europe = ['Liechtenstein', 'Switzerland', 'Iceland', 'Norway', 'Israel', 'Denmark', 'Ireland', 'United Kingdom',
'Germany', 'Netherlands', 'Sweden', 'Luxembourg', 'Austria', 'Finland', 'France', 'Belgium', 'Spain',
'Italy', 'Poland', 'Czech Republic', 'Romania']
Asia = ['Iran', 'Russian Federation','Israel', 'Australia', 'New Zealand', 'Thailand', 'Singapore', 'Hong Kong',
'South Korea', 'Japan', 'China', 'Taiwan', 'Malaysia', 'India', 'Indonesia', 'Vietnam', 'Qatar', 'Oman',
'United Arab Emirates']
df['Location'] = df['Country'].apply(lambda x: 'America' if x in America else ('Europe' if x in Europe else ('Asia' if x in Asia else 'Other')))
df['Location'].value_counts().plot(kind='bar', color=color);
###Output
_____no_output_____
###Markdown
I need to use the columns that I think are most suitable for this analysis.I need to just consider those who are professional developer and full-time employed, as well.
###Code
best_columns = ['Country', 'YearsCodedJob', 'EmploymentStatus', 'CareerSatisfaction', 'JobSatisfaction', 'JobSeekingStatus', 'HoursPerWeek', 'Salary', 'Location', 'Overpaid']
df = pd.DataFrame(df.query("Professional == 'Professional developer' and EmploymentStatus == 'Employed full-time'"))[best_columns]
df
overpaid_map = {
'Greatly underpaid' : 1,
'Somewhat underpaid' : 2,
'Neither underpaid nor overpaid' : 3,
'Somewhat overpaid' : 4,
'Greatly overpaid' : 5,
np.nan: np.nan
}
df['Overpaid'] = df['Overpaid'].apply(lambda x: np.nan if x == np.nan else overpaid_map[x] )
df_comp = df.groupby(['Location','YearsCodedJob']).mean()
year_map = {'1 to 2 years' : 1,
'10 to 11 years' : 10,
'11 to 12 years' : 11,
'12 to 13 years' : 12,
'13 to 14 years' : 13,
'14 to 15 years' : 14,
'15 to 16 years' : 15,
'16 to 17 years' : 16,
'17 to 18 years' : 17,
'18 to 19 years' : 18,
'19 to 20 years' : 19,
'2 to 3 years' : 2,
'20 or more years' : 20,
'3 to 4 years' : 3,
'4 to 5 years' : 4,
'5 to 6 years' : 5,
'6 to 7 years' : 6,
'7 to 8 years' : 7,
'8 to 9 years' : 8,
'9 to 10 years' : 9,
'Less than a year' : 0}
df_comp = df_comp.reset_index()
df_comp
df_comp['YearsCodedJob'] = df_comp['YearsCodedJob'].apply(lambda x: np.nan if x == np.nan else year_map[x])
df_comp['YearsCodedJob'] = pd.to_numeric(df_comp['YearsCodedJob'])
df_comp = df_comp.sort_values(by='YearsCodedJob')
df_comp.set_index('YearsCodedJob', inplace=True)
df_comp.groupby('Location')['Salary'].plot(legend=True, figsize=(10, 10));
plt.title("Range of Salary between different areas");
plt.xlabel('YearsCodedJob');
plt.ylabel('Average Salary');
df_comp.groupby('Location')['Overpaid'].plot(legend=True, figsize=(10, 10))
plt.title("Who thinks that they are Overpaid?");
plt.xlabel('YearsCodedJob')
plt.ylabel('Overpaid');
df_comp.groupby('Location').mean().CareerSatisfaction
df_comp.groupby('Location').mean().JobSatisfaction
df_comp.groupby('Location').mean().Salary/12
plt.figure(figsize=(10, 8), dpi=80)
plt.scatter(df_comp.groupby('Location').mean().CareerSatisfaction, df_comp.groupby('Location').mean().JobSatisfaction, df_comp.groupby('Location').mean().Salary/12, c=['red','green','blue','yellow'])
plt.title('Comparison of Career and Job Satisfaction\n(Red: America; Green: Asia; Blue: Europe; yellow: Other)')
plt.xlabel('Career Satisfaction')
plt.ylabel('Job Satisfaction');
###Output
_____no_output_____ |
proyecto_6.ipynb | ###Markdown
Importaciones
###Code
fig, ax = plt.subplots(figsize=(14,8))
sns.heatmap(basededatosimportaciones, cmap='jet')
exportaciones=pd.read_excel("/content/export.xlsx")
basededatosexportaciones=exportaciones.set_index("paises")
basededatosexportaciones
###Output
_____no_output_____
###Markdown
Exportaciones
###Code
fig, ax = plt.subplots(figsize=(14,8))
sns.heatmap(basededatosexportaciones, cmap='cool')
balanza=pd.read_excel("/content/balanza.xlsx")
basededatosbalanza=balanza.set_index("paises")
basededatosbalanza
###Output
_____no_output_____
###Markdown
Balanza Comercial
###Code
fig, ax = plt.subplots(figsize=(14,8))
sns.heatmap(basededatosbalanza, cmap='inferno_r')
###Output
_____no_output_____ |
examples/example-project_movie-likes.ipynb | ###Markdown
2017-2018 Girls Who Code final project example: movies on FacebookBy: Rucheng Diao, diaorchThis iPython notebook hosts the code for final project example for [Girls Who Code, DCMB UofM](http://umich.edu/~girlswc/) for school year 2017 - 2018. The [project data comes from Kaggle](https://www.kaggle.com/nazimamzz/imdb-dataset-of-5000-movie-posters/data). Input dataThis part is for import of Pandas and input data from a previously downloaded data set.
###Code
import pandas as pd
movieData = pd.read_csv('data/20170827-movie_stats-imdb_5000_movie_dataset_kaggle/movie stats - movie_metadata.csv')
# preview of first several lines of data
movieData.head()
# checking what kinds of data there is in the table
print(movieData.columns)
# what are the range of title years of the movies
movieData['title_year'].describe()
###Output
_____no_output_____
###Markdown
Facebook likes vs title yearThis part is plotting the number of Facebook likes vs the title years of the movies, colored by the number of voted IMDB users.
###Code
import matplotlib.pyplot as plt
# iPython notebook setting: show figures inline
%matplotlib inline
# matplotlib setting: figure size
plt.rcParams['figure.figsize'] = (16, 6)
# values on x axis
x = movieData.title_year
# values on y axis
y = movieData.movie_facebook_likes
# values for color scale
c = movieData.num_voted_users
# plotting the scatter plot, using the x-s, y-s, and color values as set above
fig = plt.scatter(x, y, alpha = 0.5, c = c, cmap = "autumn_r")
# setting label for x-axis, including text and font size
plt.xlabel("Title year", fontsize = 15)
# setting label for y-axis, including text and font size
plt.ylabel("Number of movie Facebook likes", fontsize = 15)
# setting title for the whole figure, including text and font size
plt.title("Relationship between title year and Facebook likes of movie", fontsize = 20)
# setting title for color bar
plt.colorbar(fig).set_label('Voted users', rotation = 270)
x = movieData.title_year
y = movieData.movie_facebook_likes
c = movieData.num_voted_users
fig = plt.scatter(x, y, alpha = 0.5, c = c, cmap = "autumn_r")
# adding a vertical line for year 2004
plt.axvline(x = 2004, color = 'k', linestyle = '--')
plt.xlabel("Title year", fontsize = 15)
plt.ylabel("Number of movie Facebook likes", fontsize = 15)
plt.title("Relationship between title year and Facebook likes of movie", fontsize = 20)
plt.colorbar(fig).set_label('Voted users', rotation = 270)
###Output
_____no_output_____
###Markdown
What are the best liked movies pre- and post-Facebook?We noticed that there are two dots in the figure above that has a very high Facebook likes, one of which is before Face book came around, and the other one after. We might want to find out what the movies are.
###Code
# How to find the dot that has over 250000 likes and is post Facebook?
# Why do we need only one of the criteria?
# finding movie that has over 250000 likes
findDot_post = movieData[(movieData.movie_facebook_likes > 250000)]
print(findDot_post.to_string())
# How to find the dot that has over 100000 likes and is pre Facebook?
# Why do we need both criteria this time?
# finding movie that has over 100000 likes AND the title year is before 2000
findDot_pre = movieData[(movieData.movie_facebook_likes > 100000) & (movieData.title_year < 2000)]
print(findDot_pre.to_string())
###Output
movie_title color director_name num_critic_for_reviews duration director_facebook_likes actor_3_facebook_likes actor_2_name actor_1_facebook_likes gross genres actor_1_name num_voted_users cast_total_facebook_likes actor_3_name facenumber_in_poster plot_keywords movie_imdb_link num_user_for_reviews language country content_rating budget title_year actor_2_facebook_likes imdb_score aspect_ratio movie_facebook_likes
1937 The Shawshank Redemption Color Frank Darabont 199.0 142.0 0.0 461.0 Jeffrey DeMunn 11000.0 28341469.0 Crime|Drama Morgan Freeman 1689764 13495 Bob Gunton 0.0 escape from prison|first person narration|pris... http://www.imdb.com/title/tt0111161/?ref_=fn_t... 4144.0 English USA R 25000000.0 1994.0 745.0 9.3 1.85 108000
###Markdown
We are also curious which is the earlies movie that has a non-zero Facebook like:
###Code
findDot_first = movieData[(movieData.movie_facebook_likes > 0) & (movieData.title_year <=1925)]
print(findDot_first.to_string())
###Output
movie_title color director_name num_critic_for_reviews duration director_facebook_likes actor_3_facebook_likes actor_2_name actor_1_facebook_likes gross genres actor_1_name num_voted_users cast_total_facebook_likes actor_3_name facenumber_in_poster plot_keywords movie_imdb_link num_user_for_reviews language country content_rating budget title_year actor_2_facebook_likes imdb_score aspect_ratio movie_facebook_likes
4810 Intolerance: Love's Struggle Throughout the Ages Black and White D.W. Griffith 69.0 123.0 204.0 9.0 Mae Marsh 436.0 NaN Drama|History|War Lillian Gish 10718 481 Walter Long 1.0 huguenot|intolerance|medicis|protestant|wedding http://www.imdb.com/title/tt0006864/?ref_=fn_t... 88.0 NaN USA Not Rated 385907.0 1916.0 22.0 8.0 1.33 691
4885 The Big Parade Black and White King Vidor 48.0 151.0 54.0 6.0 Renée Adorée 81.0 NaN Drama|Romance|War John Gilbert 4849 108 Claire Adams 0.0 chewing gum|climbing a tree|france|translation... http://www.imdb.com/title/tt0015624/?ref_=fn_t... 45.0 NaN USA Not Rated 245000.0 1925.0 12.0 8.3 1.33 226
###Markdown
What does the distribution of Facebook likes of movies look like?The histogram of movie Facebook likes is plotted below. And the summary statistics are printed too. It is also found that a lot of movies have 0 Facebook likes, according to the shape of movie data table and the movie data table of non-zero Facebook likes.
###Code
# plotting histogram of Facebook likes of movies
plt.hist(movieData.movie_facebook_likes, bins = 200, color = 'r')
plt.xlabel("Movie Facebook likes", fontsize = 15)
plt.xlabel("Number of movies", fontsize = 15)
plt.title("Distribution of movie Facebook likes", fontsize = 20)
# checking the summary statistics of Facebook likes for movies
movieData['movie_facebook_likes'].describe()
movieData.shape
# another way to check the summary statistics individually is to use funcions in Numpy
import numpy as np
# checking the median value of movie Facebook likes
movieFacebookLikesMedian = np.median(movieData['movie_facebook_likes'])
print(movieFacebookLikesMedian)
# subsetting data set, keeping only entries that have non-zero Facebook likes
movieDataNonzero = movieData.loc[movieData.movie_facebook_likes != 0]
# checking the dimension/shape of the subset data
movieDataNonzero.shape
###Output
_____no_output_____
###Markdown
Is there a relationship between movie Facebook likes and the gross income the movie make?Here the relationship between movie Facebook likes and the gross that the movie made is explored.
###Code
x = movieData.movie_facebook_likes
y = movieData.gross
c = movieData.num_voted_users
fig = plt.scatter(x, y, alpha = 0.5, c = c, cmap = "autumn_r")
plt.xlabel("Movie Facebook likes", fontsize = 15)
plt.ylabel("Movie gross", fontsize = 15)
plt.title("Relationship between movie Facebook likes and gross", fontsize = 20)
plt.colorbar(fig)
###Output
_____no_output_____
###Markdown
**But**, is it reasonable to assume that 7 million dollars made in 1990 is the same as 7 million dollars made in 2010? Advance content:
###Code
# new data input: CPI Urban from 1913 to 2016
cpi = {1913:9.9, 1914:10, 1915:10.1, 1916:10.9, 1917:12.8, 1918:15.1, 1919:17.3, 1920:20, 1921:17.9, 1922:16.8, 1923:17.1, 1924:17.1, 1925:17.5, 1926:17.7, 1927:17.4, 1928:17.1, 1929:17.1, 1930:16.7, 1931:15.2, 1932:13.7, 1933:13, 1934:13.4, 1935:13.7, 1936:13.9, 1937:14.4, 1938:14.1, 1939:13.9, 1940:14, 1941:14.7, 1942:16.3, 1943:17.3, 1944:17.6, 1945:18, 1946:19.5, 1947:22.3, 1948:24.1, 1949:23.8, 1950:24.1, 1951:26, 1952:26.5, 1953:26.7, 1954:26.9, 1955:26.8, 1956:27.2, 1957:28.1, 1958:28.9, 1959:29.1, 1960:29.6, 1961:29.9, 1962:30.2, 1963:30.6, 1964:31, 1965:31.5, 1966:32.4, 1967:33.4, 1968:34.8, 1969:36.7, 1970:38.8, 1971:40.5, 1972:41.8, 1973:44.4, 1974:49.3, 1975:53.8, 1976:56.9, 1977:60.6, 1978:65.2, 1979:72.6, 1980:82.4, 1981:90.9, 1982:96.5, 1983:99.6, 1984:103.9, 1985:107.6, 1986:109.6, 1987:113.6, 1988:118.3, 1989:124, 1990:130.7, 1991:136.2, 1992:140.3, 1993:144.5, 1994:148.2, 1995:152.4, 1996:156.9, 1997:160.5, 1998:163, 1999:166.6, 2000:172.2, 2001:177.1, 2002:179.9, 2003:184, 2004:188.9, 2005:195.3, 2006:201.6, 2007:207.3, 2008:215.303, 2009:214.537, 2010:218.056, 2011:224.939, 2012:229.594, 2013:232.957, 2014:236.736, 2015:237.017, 2016:240.007}
# normalizing the gross income using CPI data
import math
movie_gross_normalized = []
for i in range(0, len(movieData)):
one_movie_year = movieData['title_year'].iloc[i]
# there are NaN-s in title year information
if (math.isnan(one_movie_year)):
one_movie_gross_normalized = np.NaN
else:
# using the dictionary of data to get the CPI data of the title year
movie_cpi = cpi[one_movie_year]
# normalizing the gross income of movie using the CPI of the year
one_movie_gross_normalized = movieData['gross'].iloc[i] / movie_cpi
# adding the calculated normalization result to the recording list
movie_gross_normalized.append(one_movie_gross_normalized)
x = movieData.movie_facebook_likes
y = movie_gross_normalized
c = movieData.num_voted_users
fig = plt.scatter(x, y, alpha = 0.5, c = c, cmap = "autumn_r")
plt.xlabel("Movie Facebook likes", fontsize = 15)
plt.ylabel("Movie gross", fontsize = 15)
plt.title("Relationship between movie Facebook likes and gross", fontsize = 20)
plt.colorbar(fig)
###Output
_____no_output_____
###Markdown
Is a one-face poster the key to success for a movie?Hypothesis: movies with posters with only one face on it are more successful, speaking of gross income. Explore the data to find out if it is true.
###Code
# importing a plotting package called seaborn
import seaborn as sns
# reseting the graph size
plt.rcParams['figure.figsize'] = (16, 6)
# using seaborn to plot a box plot
sns.boxplot(data = movieData, x = 'facenumber_in_poster', y = 'movie_facebook_likes', palette = 'Set3')
# setting label for x-axis
plt.xlabel("Number of faces in poster", fontsize = 15)
# setting label for y-axis
plt.ylabel("Movie Facebook likes, non-zero", fontsize = 15)
# setting title for the whole figure
plt.title("Relationship between number of faces in poster and movie Facebook likes", fontsize = 20)
###Output
_____no_output_____
###Markdown
We can't really see anything because the boxes are "squished" to the bottom. Why so? That is possibly because almost half of the movies have 0 likes, from what we have discovered above. So let's try again removing movies with 0 likes.
###Code
# using seaborn to plot a box plot
sns.boxplot(data = movieDataNonzero, x = 'facenumber_in_poster', y = 'movie_facebook_likes', palette = 'Set3')
# setting label for x-axis
plt.xlabel("Number of faces in poster", fontsize = 15)
# setting label for y-axis
plt.ylabel("Movie Facebook likes, non-zero", fontsize = 15)
# setting title for the whole figure
plt.title("Relationship between number of faces in poster and movie Facebook likes", fontsize = 20)
# counting the appearance of each possible value
facenumber_freq = movieDataNonzero.facenumber_in_poster.value_counts()
print(facenumber_freq)
# finding the index - possible values - from the frequency table above
facenumber_index = facenumber_freq.index
# plotting a bar plot using seaborn
sns.barplot(x = facenumber_index, y = facenumber_freq, palette = 'Set3')
plt.xlabel("Number of faces in movie poster", fontsize = 15)
plt.ylabel("Frequency", fontsize = 15)
plt.title("Distribution of number of faces in movie poster", fontsize = 20)
###Output
_____no_output_____ |
project_wrangle_act.ipynb | ###Markdown
Project: Twitter Archive of WeRateDogs Chloe Xue June, 2019 Table of ContentsIntroductionData Wrangling Part I. Gathering Data Part II. Accessing Data Data Quality and Tidiness Part III. Cleaning Data Part IV. Storing Data Exploratory Data Analysis Part V. Analyzing and Visualizations Conclusions Introduction > In this project, the dataset I analyze is the tweet archive of Twitter user [@dog_rates](https://twitter.com/dog_rates), also known as [WeRateDogs](https://en.wikipedia.org/wiki/WeRateDogs). This twitter account rates people's dogs with ratings on denominator of 10 along with comments. This archive contains basic tweet data (tweet ID, timestamp, text, etc.) for all 5000+ of tweets from August 1, 2017. > The objective of this project is wrangling WeRateDog Twitter data to draw interesting and trustworthy analyses and visualizations . The main focus on my project is performing data wrangling from a variety of sources and in variety of formats, in the process of gathering, accessing and cleaning using Python and its libiraries. > Introducing three datasets: - Twittwer Archive. Originally the enhanced Twitter archive contains 5000+ tweets with basic tweet data. It is being filtered for tweets with ratings only along with information like dog name, stage, etc. - Image Prediction. This dataset is created by running every image in the WeRateDogs Twitter Archive through a neural network that can classify top three predictions(corresponding to the most confident predictions) on breeds of dogs alongside each tweet ID, image URL and the image number.- Retweets and Favorites.Querying from Twitter's API, this dataset generates retweet count and favorite count of each tweet ID in WeRateDogs Twitter Archive.
###Code
import pandas as pd
import numpy as np
import requests
import tweepy
import json
import re
import matplotlib.pyplot as plt
import seaborn as sb
import statsmodels.api as sm
from scipy.stats.stats import pearsonr
%matplotlib inline
###Output
/opt/conda/lib/python3.6/site-packages/statsmodels/compat/pandas.py:56: FutureWarning: The pandas.core.datetools module is deprecated and will be removed in a future version. Please use the pandas.tseries module instead.
from pandas.core import datetools
###Markdown
Data Wrangling Part I. Gathering Data 1.a archive: WeRateDogs Twitter Archive dataset, which is given on hand. Use 'read_csv' to read data. 1.b image: Image Predictions dataset, which is hosted on Udacity's servers. Download programmatically using the requests library.1.c tweets_data: Retweet count and favorite count dataset, which is generated by querying Twitter's API.
###Code
# 1.a Read WeRateDogs archive data.
archive = pd.read_csv('twitter-archive-enhanced.csv')
# 1.b Use request library to read image prediction data.
response = requests.get('https://d17h27t6h515a5.cloudfront.net/topher/2017/August/599fd2ad_image-predictions/image-predictions.tsv')
with open('image_predictions.tsv', mode = 'wb') as file:
file.write(response.content)
# Read tsv data
image = pd.read_csv('image_predictions.tsv', delimiter = '\t')
#1.c Use Tweepy to query API
consumer_key = ''
consumer_secret = ''
access_token = ''
access_secret = ''
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth, wait_on_rate_limit = True,wait_on_rate_limit_notify = True)
Write JSON data into tweet_json.txt
with open('tweet_json.txt', 'a', encoding = 'utf8') as f:
for tweet_id in archive['tweet_id']:
try:
tweet = api.get_status(tweet_id, tweet_mode = 'extended')
json.dump(tweet._json, f)
f.write('\n')
except:
continue
# Create a empty list for append each tweet info into it
tweets_data = []
tweet_json = open('tweet_json.txt', 'r')
for line in tweet_json:
try:
tweet = json.loads(line)
tweets_data.append(tweet)
except:
continue
tweet_json.close()
print(tweets_data[0])
tweets = pd.DataFrame()
tweets['id'] = list(map(lambda tweet: tweet['id'],tweets_data))
tweets['retweet_count'] = list(map(lambda tweet: tweet['retweet_count'],tweets_data))
tweets['favorite_count'] = list(map(lambda tweet: tweet['favorite_count'],tweets_data))
tweets.head()
###Output
_____no_output_____
###Markdown
Part II. Accessing Data Properties of data include Quality and Tideness.Quality (issues with content) Dimensions:- Completeness- Validity- Accuracy- ConsistencyTidiness (issues with structure) Dimensions:- Each variable forms a column- Each observation forms a row- Each type of observational unit forms a table
###Code
# check information, head and tail of archive.
archive.info()
archive.head(10)
archive.tail(10)
# check ratings on denominator and numerator.
archive.rating_denominator.value_counts()
archive.rating_numerator.value_counts()
# Retweet in archive is not considered, so check numbers of retweets.
archive.retweeted_status_id.isna().value_counts()
archive.in_reply_to_status_id.isna().value_counts()
# Check if there are missing values in expanded_urls, which means tweets do not included an image.
archive.expanded_urls.isnull().value_counts()
# check information, head of image.
image.info()
image.head(10)
# Check if all tweet_id has images.
image.img_num.isnull().value_counts()
# Check information and head of tweets.
tweets.info()
tweets.head()
# Check if there are duplicates in three datasets.
print(archive.duplicated().value_counts())
print(image.duplicated().value_counts())
print(tweets.duplicated().value_counts())
# Check the length of three datasets to see if the tweet counts match.
print('archive counts = {}'.format(len(archive)))
print('image counts = {}'.format(len(image)))
print('tweets data count = {}'.format(len(tweets_data)))
###Output
archive counts = 2356
image counts = 2075
tweets data count = 2335
###Markdown
Quality:- archive: Including 181 retweets and 78 replies that are unnessary. (1)- archive: 'expanded_url' has missing values. That means it doesn't include an image. The ratings will not be considered without images. (2)- archive: 'rating_denominator' column has value not equal to 10. (3)- archive: 'rating_numerator' column has unexpected value. (4)- archive: Data type for timestamp is not correct. (5)- archive: 'source' column is not clean. (6)- image: Dog breed names have delimiter that need to be cleaned. Name should be caplitalized. (8)- tweets: Data type for retweet_count and favorite_count should be integer. (9)- Counts of tweets are inconsistent in three dataset. (10) Tidiness:- archive: 'doggo','floofer','pupper','puppo' columns refer to four categories of dog stage, that should be under one column: 'dog_stage'. (7)- image: Aggregate p1, p2 and p3 to classify each image into dog_type(Dog, Might Dog and Not Dog), dog_breed(with best confidence). (8)- tweets: This can be merged to archive data. Three dataset should be merged into one final dataset. (10) > Note: The following Cleaning process is following a logical order instead of the order of issue statement above. Following cleaning includes solving multiple issues at one step, which makes it easier to follow. The final dataset will reflect all issues being resolved. Part III. Cleaning Data
###Code
# Make copies of original dataset.
archive_clean = archive.copy()
image_clean = image.copy()
tweets_clean = tweets.copy()
###Output
_____no_output_____
###Markdown
(1) Define Archive: Retweets are not considered. Delete the 151 retweet and 78 replies tweets in archive. Code
###Code
archive_clean = archive_clean[archive_clean.retweeted_status_id.isna()]
archive_clean = archive_clean[archive_clean.in_reply_to_status_id.isna()]
# Drop in_reply_to_status_id and in_reply_to_user_id columns.
# Drop retweet status columns.
archive_clean.drop(columns = ['in_reply_to_status_id', 'in_reply_to_user_id','retweeted_status_id',
'retweeted_status_user_id','retweeted_status_timestamp'], inplace = True)
###Output
_____no_output_____
###Markdown
Test
###Code
archive_clean.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 2097 entries, 0 to 2355
Data columns (total 12 columns):
tweet_id 2097 non-null int64
timestamp 2097 non-null object
source 2097 non-null object
text 2097 non-null object
expanded_urls 2094 non-null object
rating_numerator 2097 non-null int64
rating_denominator 2097 non-null int64
name 2097 non-null object
doggo 2097 non-null object
floofer 2097 non-null object
pupper 2097 non-null object
puppo 2097 non-null object
dtypes: int64(3), object(9)
memory usage: 213.0+ KB
###Markdown
(2) Define Archive: Missing values in 'expanded_urls' mean those tweets do not include images, that are not considered. Delete missing values in this column. Code
###Code
archive_clean = archive_clean[archive_clean.expanded_urls.notnull()]
###Output
_____no_output_____
###Markdown
Test
###Code
print(archive_clean.expanded_urls.isnull().value_counts())
# Drop expanded_urls column.
archive_clean.drop(columns = ['expanded_urls'], inplace = True)
archive_clean.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 2094 entries, 0 to 2355
Data columns (total 11 columns):
tweet_id 2094 non-null int64
timestamp 2094 non-null object
source 2094 non-null object
text 2094 non-null object
rating_numerator 2094 non-null int64
rating_denominator 2094 non-null int64
name 2094 non-null object
doggo 2094 non-null object
floofer 2094 non-null object
pupper 2094 non-null object
puppo 2094 non-null object
dtypes: int64(3), object(8)
memory usage: 196.3+ KB
###Markdown
(3) Define Archive: Ratings always have a denominator of 10. Clean rating_denominator column with value not equal to 10. Code
###Code
archive_clean.rating_denominator.value_counts()
# Remove the urls in text:
archive_clean['text'] = archive_clean['text'].str.split('http').str[0]
# Create a sub-dataset with denominator not equal to 10.
df1 = archive_clean[archive_clean.rating_denominator != 10]
# Shrink df1 with coloumns only containing comments and ratings.
df1 = df1[['tweet_id','text','rating_numerator','rating_denominator']]
pd.options.display.max_rows
pd.set_option('display.max_colwidth', -1)
df1
# Reading directly from the text, we first change some obivious errors on rating.
archive_clean.loc[archive_clean.tweet_id == 740373189193256964, ['rating_numerator','rating_denominator']] = [14,10]
archive_clean.loc[archive_clean.tweet_id == 722974582966214656, ['rating_numerator','rating_denominator']] = [13,10]
archive_clean.loc[archive_clean.tweet_id == 716439118184652801, ['rating_numerator','rating_denominator']] = [11,10]
archive_clean.loc[archive_clean.tweet_id == 682962037429899265, ['rating_numerator','rating_denominator']] = [10,10]
archive_clean.loc[archive_clean.tweet_id == 666287406224695296, ['rating_numerator','rating_denominator']] = [9,10]
# Work on the rest abnormal ratings.
df1 = archive_clean[archive_clean.rating_denominator != 10]
df1 = df1[['tweet_id','text','rating_numerator','rating_denominator']]
pd.options.display.max_rows
pd.set_option('display.max_colwidth', -1)
df1
# Calculate ratio for these ratings and reflect ratios with denominator is 10 for each tweet.
df1['rating_score'] = df1['rating_numerator'] / df1['rating_denominator']
df1['new_rating_numerator'] = df1['rating_score'] * 10
df1
# Change the ratings with rounding integers.
archive_clean.loc[archive_clean.tweet_id == 820690176645140481, ['rating_numerator','rating_denominator']] = [12,10]
# There is no actural rating for tweet 810984652412424192, so we change it to 10/10
archive_clean.loc[archive_clean.tweet_id == 810984652412424192, ['rating_numerator','rating_denominator']] = [10,10]
archive_clean.loc[archive_clean.tweet_id == 758467244762497024, ['rating_numerator','rating_denominator']] = [11,10]
archive_clean.loc[archive_clean.tweet_id == 731156023742988288, ['rating_numerator','rating_denominator']] = [12,10]
archive_clean.loc[archive_clean.tweet_id == 713900603437621249, ['rating_numerator','rating_denominator']] = [11,10]
archive_clean.loc[archive_clean.tweet_id == 710658690886586372, ['rating_numerator','rating_denominator']] = [10,10]
archive_clean.loc[archive_clean.tweet_id == 709198395643068416, ['rating_numerator','rating_denominator']] = [9,10]
archive_clean.loc[archive_clean.tweet_id == 704054845121142784, ['rating_numerator','rating_denominator']] = [12,10]
archive_clean.loc[archive_clean.tweet_id == 697463031882764288, ['rating_numerator','rating_denominator']] = [11,10]
archive_clean.loc[archive_clean.tweet_id == 684222868335505415, ['rating_numerator','rating_denominator']] = [11,10]
archive_clean.loc[archive_clean.tweet_id == 677716515794329600, ['rating_numerator','rating_denominator']] = [12,10]
archive_clean.loc[archive_clean.tweet_id == 675853064436391936, ['rating_numerator','rating_denominator']] = [11,10]
###Output
_____no_output_____
###Markdown
Test
###Code
archive_clean.rating_denominator.value_counts()
###Output
_____no_output_____
###Markdown
(4) Define Archive: Rating_numerator has unexpected value. Clean Rating_numerator with numbers too big or lower than 10. Code
###Code
archive_clean.rating_numerator.value_counts()
# Check the unexpected numbers that are greater than 14.
df2 = archive_clean[archive_clean.rating_numerator > 14]
df2 = df2[['tweet_id','text','rating_numerator','rating_denominator']]
pd.options.display.max_rows
pd.set_option('display.max_colwidth', -1)
df2
# We will round 9.75/10, 11.27/10 and 11.26/10 to closest integer.
archive_clean.loc[archive_clean.tweet_id == 786709082849828864, 'rating_numerator'] = 10
archive_clean.loc[archive_clean.tweet_id == 778027034220126208, 'rating_numerator'] = 11
archive_clean.loc[archive_clean.tweet_id == 680494726643068929, 'rating_numerator'] = 11
# For tweets with rating 1776/10 and 420/10, we can't get accurate information so we want to leave it as 10/10
archive_clean.loc[archive_clean.tweet_id == 749981277374128128, 'rating_numerator'] = 10
archive_clean.loc[archive_clean.tweet_id == 670842764863651840, 'rating_numerator'] = 10
# Check the unexpected numbers that are less than 10.
df3 = archive_clean[archive_clean.rating_numerator < 10]
df3 = df3[['tweet_id','text','rating_numerator','rating_denominator']]
pd.options.display.max_rows
pd.set_option('display.max_colwidth', -1)
df3
# Change the first rating with obivuos error.
archive_clean.loc[archive_clean.tweet_id == 883482846933004288, ['rating_numerator','rating_denominator']] = [14,10]
###Output
_____no_output_____
###Markdown
Though the numerator should always greater than 10. After reading the comments, I figure out that the lower ratings are real, except some of the ratings are low because pictures are not dogs. So I decide to keep the low ratings but I would like to dig deeper into numerators that lower than 5.
###Code
df4 = archive_clean[archive_clean.rating_numerator < 5]
df4 = df4[['tweet_id','text','rating_numerator','rating_denominator']]
pd.options.display.max_rows
pd.set_option('display.max_colwidth', -1)
df4
# Change the first rating with obivuos error.
archive_clean.loc[archive_clean.tweet_id == 695064344191721472, ['rating_numerator','rating_denominator']] = [13,10]
###Output
_____no_output_____
###Markdown
Test
###Code
archive_clean.rating_numerator.value_counts()
###Output
_____no_output_____
###Markdown
(5) Define Archive: Data type for 'timestamp' is not correct. It should be datetime. Code
###Code
archive_clean['timestamp'] = archive_clean['timestamp'].str.split('+').str[0]
archive_clean.timestamp = pd.to_datetime(archive_clean.timestamp)
###Output
_____no_output_____
###Markdown
Test
###Code
archive_clean.info()
archive_clean.timestamp.head(2)
###Output
_____no_output_____
###Markdown
(6) Define Archive: Source column is messy and needs to be clean. Extract only the source in the strings. Code
###Code
archive_clean.source.value_counts()
archive_clean['source'] = archive_clean['source'].apply(lambda x: re.search('rel="nofollow">(.*)</a>', x).group(1))
# Change the data type of source to categorical.
archive_clean['source'] = pd.Categorical(archive_clean['source'])
###Output
_____no_output_____
###Markdown
Test
###Code
print(archive_clean.source.value_counts())
archive_clean.info()
archive_clean.head(2)
###Output
Twitter for iPhone 1962
Vine - Make a Scene 91
Twitter Web Client 30
TweetDeck 11
Name: source, dtype: int64
<class 'pandas.core.frame.DataFrame'>
Int64Index: 2094 entries, 0 to 2355
Data columns (total 11 columns):
tweet_id 2094 non-null int64
timestamp 2094 non-null datetime64[ns]
source 2094 non-null category
text 2094 non-null object
rating_numerator 2094 non-null int64
rating_denominator 2094 non-null int64
name 2094 non-null object
doggo 2094 non-null object
floofer 2094 non-null object
pupper 2094 non-null object
puppo 2094 non-null object
dtypes: category(1), datetime64[ns](1), int64(3), object(6)
memory usage: 182.2+ KB
###Markdown
(7) Define Archive: 'doggo','floofer','pupper','puppo' columns refer to four categories of dog stage, that should be under one column: 'dog_stage'. Code
###Code
archive_clean.columns
# Define how many tweets with no reference of any four of dog stages.
archive_clean[(archive_clean.doggo != 'None') + (archive_clean.floofer!='None')
+ (archive_clean.pupper!='None') + (archive_clean.puppo!='None') == 0].shape
# Define how many tweets with more than one reference on dog stages.
archive_clean[(archive_clean.doggo != 'None') + (archive_clean.floofer!='None')
+ (archive_clean.pupper!='None') + (archive_clean.puppo!='None') > 1].shape
# Create a sub dataset with no reference on dog stages (all four columns are 'none'), creating a new column 'dog_stage'.
# Assign values to 'None'.
all_none = archive_clean[(archive_clean.doggo != 'None') + (archive_clean.floofer!='None')
+ (archive_clean.pupper!='None') + (archive_clean.puppo!='None') == 0]
all_none = all_none.assign(dog_stage = 'None')
all_none = all_none.drop(['doggo', 'floofer','pupper', 'puppo'],axis = 1)
# Use melt function to unpivot four columns under a new column 'dog_stage' with values are not 'None'.
archive_clean = pd.melt(archive_clean,
id_vars = ['tweet_id', 'timestamp', 'source', 'text','rating_numerator', 'rating_denominator', 'name'],
value_vars = ['doggo', 'floofer','pupper', 'puppo'],var_name = 'dog_stage', value_name = 'Bool')
archive_clean = archive_clean[archive_clean.Bool != 'None']
del archive_clean['Bool']
# Append all_none sub dataset to archive_clean.
archive_clean = archive_clean.append(all_none)
archive_clean = archive_clean.reset_index(drop = True)
# Check if there are duplicates.
archive_clean[archive_clean.tweet_id.duplicated()]
# Clean the duplicates. Check the length to see if it matches with before.
archive_clean = archive_clean.drop_duplicates('tweet_id')
archive_clean.shape
###Output
_____no_output_____
###Markdown
Test
###Code
archive_clean['dog_stage'] = pd.Categorical(archive_clean['dog_stage'])
archive_clean.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 2094 entries, 0 to 2104
Data columns (total 8 columns):
tweet_id 2094 non-null int64
timestamp 2094 non-null datetime64[ns]
source 2094 non-null category
text 2094 non-null object
rating_numerator 2094 non-null int64
rating_denominator 2094 non-null int64
name 2094 non-null object
dog_stage 2094 non-null category
dtypes: category(2), datetime64[ns](1), int64(3), object(2)
memory usage: 119.0+ KB
###Markdown
(8) Define image: Aggregate p1, p2 and p3 to classify each image into dog_type(Dog, Might Dog and Not Dog), dog_breed(with best confidence). Code
###Code
image_clean = image_clean.drop(columns = ['jpg_url','img_num'])
image_clean.shape
image_clean.sample(5)
###Output
_____no_output_____
###Markdown
My approach for classifying predictions:1. If all top three predictions are 'true' for dog, dog_type is Dog. The breed is p1 prediction with best confidence.2. If all top three predictions are 'false' for dog, dog_type is Not Dog. The breed is None with p1 confidence.3. There are mixed 'true' and 'false' predictions. Compare the sum of true confidence and false confidence, identify a threshold that if true confidence is higher than that value, dog_type is Dog. Otherwise, dog_type is Might Dog.
###Code
# Sum all true p_conf for each tweet.
image_clean['true_conf'] = (image_clean['p1_conf'].where(image_clean['p1_dog'] == True, 0)
+ image_clean['p2_conf'].where(image_clean['p2_dog'] == True, 0)
+ image_clean['p3_conf'].where(image_clean['p3_dog'] == True, 0))
# Sum all false p_conf seperately for each tweet.
image_clean['false_conf'] = (image_clean['p1_conf'].where(image_clean['p1_dog'] == False, 0)
+ image_clean['p2_conf'].where(image_clean['p2_dog'] == False, 0)
+ image_clean['p3_conf'].where(image_clean['p3_dog'] == False, 0))
image_clean.sample(5)
image_clean.true_conf.describe()
image_clean.false_conf.describe()
# Create a sub dataset for all true for dog.
all_true = image_clean[image_clean.false_conf == 0]
# Create three new columns: dog_type, dog_breed and confidence. Assign values.
all_true = all_true.assign(dog_type = 'Dog')
all_true = all_true.assign(dog_breed = all_true.p1)
all_true = all_true.assign(confidence = all_true.p1_conf)
# Create a sub dataset for all false for dog.
all_false = image_clean[image_clean.true_conf == 0]
# Create three new columns: dog_type, dog_breed and confidence. Assign values.
all_false = all_false.assign(dog_type = 'Not Dog')
all_false = all_false.assign(dog_breed = 'Unknown')
all_false = all_false.assign(confidence = all_false.p1_conf)
# Create a sub dataset for mixed true and false.
mix = image_clean.loc[(image_clean['true_conf'] != 0) & (image_clean['false_conf'] != 0)]
# Confirm the length of all three sub datasets matches with the original image dataset.
print(len(all_true) + len(all_false) + len(mix))
print(len(image_clean))
# Check the statistics for true confidence.
mix.true_conf.describe()
# Check the statistics for false confidence.
mix.false_conf.describe()
# If the true confidence is greater than false-conf Q3, check the range of false-conf, make sure they all fall below the threshold.
mix[mix.true_conf > 0.464286].false_conf.describe()
# Create a sub dataset mix_true with true-conf is greater than the threshold. Assign values.
mix_true = mix[mix.true_conf > 0.464286]
mix_true = mix_true.assign(dog_type = 'Dog')
mix_true = mix_true.assign(dog_breed = mix_true.p1)
mix_true = mix_true.assign(confidence = mix_true.p1_conf)
# Create a sub dataset mix_might with true-conf is lower than the threshold. Assign values.
mix_might = mix[mix.true_conf <= 0.464286]
mix_might = mix_might.assign(dog_type = 'Might Dog')
mix_might = mix_might.assign(dog_breed = 'Unknown')
mix_might = mix_might.assign(confidence = mix_might.false_conf)
# Append all sub datasets.
image_clean = all_true.append([mix_true,mix_might,all_false])
# Change data type for dog_type and dog_breed to categorical.
image_clean['dog_type'] = pd.Categorical(image_clean['dog_type'])
image_clean['dog_breed'] = pd.Categorical(image_clean['dog_breed'])
# Make sure the length is not changed.
image_clean.shape
# Round confidence to four digits decimals.
image_clean = image_clean.round({ 'confidence': 4})
# Clean the delimiter of dog breed name.
image_clean['bre'] = image_clean['dog_breed'].astype(str).str.split('_')
image_clean['dog_breed'] = image_clean['bre'].apply(' '.join)
image_clean['bre'] = image_clean['dog_breed'].astype(str).str.split('-')
image_clean['dog_breed'] = image_clean['bre'].apply(' '.join)
# Caplitalize each word of dog breed name.
image_clean['dog_breed'] = image_clean['dog_breed'].str.title()
# Drop the p1, p2 and p3 columns.
image_clean = image_clean.drop(columns = ['p1', 'p1_conf', 'p1_dog', 'p2', 'p2_conf','p2_dog',
'p3','p3_conf', 'p3_dog', 'true_conf', 'false_conf', 'bre'])
###Output
_____no_output_____
###Markdown
Test
###Code
image_clean.info()
image_clean.sample(5)
###Output
_____no_output_____
###Markdown
(9) Define tweets: Data type for retweet_count and favorite_count should be integer. Code
###Code
tweets = tweets.astype({"retweet_count": int, "favorite_count": int})
###Output
_____no_output_____
###Markdown
Test
###Code
tweets.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 2335 entries, 0 to 2334
Data columns (total 3 columns):
id 2335 non-null int64
retweet_count 2335 non-null int64
favorite_count 2335 non-null int64
dtypes: int64(3)
memory usage: 54.8 KB
###Markdown
(10) Define Combine three dataframes together to get the final table. Code
###Code
archive_clean.info()
image_clean.info()
tweets.info()
tweets = tweets.rename(columns = {'id':'tweet_id'})
tweets.info()
# Check the length of three datasets.
print(archive_clean.shape)
print(image_clean.shape)
print(tweets.shape)
###Output
(2094, 8)
(2075, 4)
(2335, 3)
###Markdown
I want to keep all the valid ratings and image predictions. Therefore the final dataset should have a length of 2094 rows.
###Code
# Merge archive and image dataset first.
s1 = pd.merge(archive_clean,image_clean, on = ['tweet_id','tweet_id'], how = 'left')
# Merge tweets dataset
ratedogs = pd.merge(s1, tweets, on = 'tweet_id', how = 'left')
ratedogs.shape
###Output
_____no_output_____
###Markdown
Test
###Code
ratedogs.info()
ratedogs.sample(5)
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 2094 entries, 0 to 2093
Data columns (total 13 columns):
tweet_id 2094 non-null int64
timestamp 2094 non-null datetime64[ns]
source 2094 non-null category
text 2094 non-null object
rating_numerator 2094 non-null int64
rating_denominator 2094 non-null int64
name 2094 non-null object
dog_stage 2094 non-null category
dog_type 1971 non-null category
dog_breed 1971 non-null object
confidence 1971 non-null float64
retweet_count 2089 non-null float64
favorite_count 2089 non-null float64
dtypes: category(3), datetime64[ns](1), float64(3), int64(3), object(3)
memory usage: 186.6+ KB
###Markdown
(11) Define Final clean-up: re-arrange the columns in logical order and fix data type for 'retweet_count' and 'favorite_count'. Code
###Code
ratedogs = ratedogs[['tweet_id','timestamp','text','rating_numerator','rating_denominator',
'name','dog_type','dog_breed','confidence','dog_stage','retweet_count',
'favorite_count','source']]
ratedogs.retweet_count = ratedogs.retweet_count.fillna(0).astype(int)
ratedogs.favorite_count = ratedogs.favorite_count.fillna(0).astype(int)
ratedogs.info()
ratedogs.sample(10)
###Output
_____no_output_____
###Markdown
Part IV. Stroing Data
###Code
# Store dataframe into csv file and make a copy.
ratedogs.to_csv('twitter_archive_master.csv', encoding = 'utf-8', index = False)
ratedogs_clean = pd.read_csv('twitter_archive_master.csv')
ratedogs_clean.head()
###Output
_____no_output_____
###Markdown
Explanatory Data Analysis Part V. Analyzing and Visulization
###Code
def rating_distribution():
"""
First plot I would like to look at the distribution of WeRateDogs rating scores.
Since denominator is always 10, I use rating_denominator as variable measuring rating scores.
"""
# data setup
df = pd.read_csv('twitter_archive_master.csv')
# plotting
plt.figure(figsize = [8,6])
bin_edges = np.arange(0, df['rating_numerator'].max()+1,1);
plt.hist(data = df, x = 'rating_numerator', bins = bin_edges);
plt.xlabel('Rating Score')
plt.ylabel('Tweet Count')
plt.title('WeRateDogs Rating Distribution')
rating_string = ['I use basic histogram plotting the rating score distribution.',
' As you can see, the rating score has a long-tailed distribution with few scores below 10, is right-skewed.',
' Large proportion of rating score takes on range 10 to 13.',
' Scores can be lower to 0 which could be considered as unique cases, such as the picture is not a dog.']
print((''.join(rating_string)))
def correlation_plot():
"""
For this plot, I first want to find out the bivariate correlation between retweet count and favorite count.
"""
# data setup
df = pd.read_csv('twitter_archive_master.csv')
# plotting
plt.figure(figsize = [8,6])
sb.set(color_codes = True)
sb.regplot(x = "retweet_count", y = "favorite_count", data = df)
plt.xlabel('retweet count')
plt.ylabel('favorite count')
plt.title('Correlation between Retweet and Favorite')
r = pearsonr(np.array(df.retweet_count),np.array(df.favorite_count))[0]
correlation_string = ['I use basic scatter plot to visualize the correlation between retweet count and favorite count.',
' As you can see, retweet numbers and favortie count are strongly positively correlated with a Pearson Correlation Coefficient = {:.4f}'.format(r),
'. This means, with a larger count of retweet, favorite count increases.',
' Most tweets retweeted and favorited for 20,000 and 50,000 times.',
' The slope of fitted line interprets that people tend to favorite a twitter than retweet it.']
print((''.join(correlation_string)))
def rating_prediction1():
"""
I would like to find if two predictor variables, retweet and favorite are significant to predict
rating score using linear regression model.
"""
# data setup
df = pd.read_csv('twitter_archive_master.csv')
# Use linear regression model to identify if retweet and favorite count could significantly predict rating score.
df['intercept'] = 1
# Downgrade scale of retweet and favorite to 1000 to match with scale of rating score.
df[['retweet_count','favorite_count']] /= 1000
lm = sm.OLS(df['rating_numerator'], df[['intercept','retweet_count','favorite_count']])
results = lm.fit()
print(results.summary())
prediction_string1 = ['Reading from model results, p-values for two predictor variables, retweet and favorite are zero.',
' It can be interpreted as retweet and favorite count are significant to predict rating score.',
' Coefficient of favorite count is 0.1375, which means for every 137.5 additional favorite count,',
' I would expect rating score to increase by an average of 0.1375.',
' I was surprised the coefficient for retweet is negative.']
print((''.join(prediction_string1)))
def rating_prediction2():
"""
I would like to get an intuitively visual on relationships between retweet/favorite and rating.
"""
# data setup
df = pd.read_csv('twitter_archive_master.csv')
# plotting
plt.figure(figsize = [10,5])
df.groupby('rating_numerator')[['retweet_count','favorite_count']].mean().plot()
plt.xlabel('Rating Score');
plt.ylabel('Count Numbers');
plt.legend(['retweet', 'favorite']);
plt.title('Average Retweet and Favorite Count for Rating Scores');
plt.show()
prediction_string2 = ['Two interesting findings from the plot: One is when the rating score is zero,',
' favorite count reaches about 23000 which is rare.',
' Second finding is, with rating score is 9 and above, favorite and retweet count are increasing,',
' as rating score is increasing.', ' Users tend to favorite a tweet rather than retweet.']
print((''.join(prediction_string2)))
def dogstage_plot():
"""
Analyzing on dog stage data, I would like to see the comparisons on ratings,
retweet and favorite counts for four different dog stages. I use bar chart for this plot.
"""
# data setup
df = pd.read_csv('twitter_archive_master.csv')
df.loc[df.dog_stage == 'None', 'dog_stage'] = None
dogstage = df.groupby('dog_stage')[['retweet_count','favorite_count','rating_numerator']].mean()
dogstage = dogstage.reset_index()
dogstage.rename(columns={"retweet_count": "retweet", "favorite_count": "favorite","rating_numerator":"rating"}, inplace = True)
# scale down 1000 times for retweet and favorite count numbers to get them match with rating score.
dogstage[['retweet','favorite']] /= 1000
# Reorder the dog stage from young to age. floofer is a reference of fur amounts so I leave it at the last.
dogstage.dog_stage = pd.Categorical(dogstage.dog_stage,categories = ["pupper","puppo","doggo","floofer"], ordered = True)
dogstage.sort_values('dog_stage', inplace = True)
dogstage = pd.melt(dogstage, id_vars = 'dog_stage', value_vars = ['retweet','favorite','rating'],
value_name = 'Numbers')
# plotting
plt.figure(figsize = [10,5])
ax = sb.barplot(data = dogstage, x = 'dog_stage',y = 'Numbers', hue = 'variable')
ax.set_xlabel('Dog Stages');
ax.set_ylabel('Average Numbers');
ax.set_title('Average Retweet, Favorite and Rating Count for Different Dog Stages');
stage_string = ['I use bar plot for this multivariate exploration.', ' pupper is the youngest dog stage and doggo is the oldest stage.',
' As you can see, teenager dog (puppo) is most popular from all three dimensions. ',
' The popularity towards furry dog (floofer) is neutral.']
print((''.join(stage_string)))
def source_plot():
"""
Analyzing on source data, I would like to see proportions of four different sources.
I use pie chart for this plot.
"""
# data setup
df = pd.read_csv('twitter_archive_master.csv')
source = df['source'].value_counts()
x = np.array(source.index)
y = np.array(source)
percent = 100.*y/y.sum()
# plotting
patches, texts = plt.pie(y, startangle = 90)
labels = ['{0} - {1:1.2f} %'.format(i,j) for i,j in zip(x, percent)]
sort_legend = True
plt.legend(patches, labels, loc = 'center left', bbox_to_anchor = (-0.1, 1.), fontsize = 8)
plt.show()
source_string = ['I use pie chart for this source exploration.',' Largest proportion for source is Twitter from iPhone to a large ratio.',
' Other three sources: Vine, Web Client and TweetDeck share a small proportions on WeRateDogs tweet sources.']
print((''.join(source_string)))
def breed_ranking():
"""
Given the prediction of dog breed, I would like to see the ranking of dog breed in the perspectives of
rating and tweet count. I use a bar chart for this plot.
"""
# data setup
df = pd.read_csv('twitter_archive_master.csv')
df.loc[df.dog_breed == 'Unknown', 'dog_breed'] = None
most_count_breed = df.dog_breed.value_counts().nlargest(20)
most_rating_breed = df.groupby('dog_breed')['rating_numerator'].mean().nlargest(20)
# plotting
plt.figure(figsize = [12,20])
plt.subplot(2,1,1)
most_count_breed.plot(kind = 'barh',color = (0.2, 0.4, 0.6, 0.6))
plt.xlabel('Number of Tweet Count')
plt.ylabel('Dog Breed')
plt.title('WeRateDogs Top 20 Tweeted Dog Breeds')
plt.gca().invert_yaxis()
plt.subplot(2,1,2)
most_rating_breed.plot(kind = 'barh',color = (0.2, 0.4, 0.6, 0.6))
plt.xlabel('Average Rating on Dog Breed')
plt.ylabel('Dog Breed')
plt.title('WeRateDogs Top 20 Rated Dog Breeds')
plt.gca().invert_yaxis()
breed_string = ['I use bar chart for this breed ranking.',
' As you can see, Golden Retriever, Pembroke are stars in dog breed as they appear in both ranking.']
print((''.join(breed_string)))
def prediction_ranking():
"""
For this plot, I would like to find out the most confidently predicted dog breed ranking.
And the least confidently predicted dog breed ranking."""
# data setup
df = pd.read_csv('twitter_archive_master.csv')
df_high10 = df.groupby('dog_breed').confidence.mean().sort_values(ascending=True).iloc[-10:]
df_low10 = df.groupby('dog_breed').confidence.mean().sort_values(ascending=False).iloc[-10:]
# plotting
plt.figure(figsize = [12,12])
plt.subplot(2,1,1)
df_high10.plot(kind = 'barh', color = (0.2, 0.4, 0.6, 0.6))
plt.xlabel('Confidence Level')
plt.ylabel('Dog Breed')
plt.title('Top 10 Most Confidently Predicted Dog Breed')
plt.subplot(2,1,2)
df_low10.plot(kind = 'barh', color = (0.2, 0.4, 0.6, 0.6))
plt.xlabel('Confidence Level')
plt.ylabel('Dog Breed')
plt.title('10 Least Confidently Predicted Dog Breed')
plt.show()
pre_rank_string = ['Breeds with "Terrier" are predicted with a low confidence level. ']
print((''.join(pre_rank_string)))
###Output
_____no_output_____
###Markdown
Plot 1. Rating Distrition: How does 'WeRateDogs' rate dogs on scores?
###Code
rating_distribution()
###Output
I use basic histogram plotting the rating score distribution. As you can see, the rating score has a long-tailed distribution with few scores below 10, is right-skewed. Large proportion of rating score takes on range 10 to 13. Scores can be lower to 0 which could be considered as unique cases, such as the picture is not a dog.
###Markdown
Plot 2. Correlation Plot: If a tweet is retweetd a lot, is it being favorited a lot meantime?
###Code
correlation_plot()
###Output
I use basic scatter plot to visualize the correlation between retweet count and favorite count. As you can see, retweet numbers and favortie count are strongly positively correlated with a Pearson Correlation Coefficient = 0.9272. This means, with a larger count of retweet, favorite count increases. Most tweets retweeted and favorited for 20,000 and 50,000 times. The slope of fitted line interprets that people tend to favorite a twitter than retweet it.
###Markdown
Analysis on Retweet and Favorite in Predicting Rating Score Using Linear Regression: If a tweet being retweeted and favorite a lot, does it mean it has a high rating?
###Code
rating_prediction1()
###Output
OLS Regression Results
==============================================================================
Dep. Variable: rating_numerator R-squared: 0.180
Model: OLS Adj. R-squared: 0.179
Method: Least Squares F-statistic: 229.8
Date: Sun, 23 Jun 2019 Prob (F-statistic): 6.05e-91
Time: 02:28:49 Log-Likelihood: -4356.8
No. Observations: 2094 AIC: 8720.
Df Residuals: 2091 BIC: 8737.
Df Model: 2
Covariance Type: nonrobust
==================================================================================
coef std err t P>|t| [0.025 0.975]
----------------------------------------------------------------------------------
intercept 9.9793 0.052 191.861 0.000 9.877 10.081
retweet_count -0.2055 0.024 -8.462 0.000 -0.253 -0.158
favorite_count 0.1375 0.009 15.224 0.000 0.120 0.155
==============================================================================
Omnibus: 722.533 Durbin-Watson: 1.872
Prob(Omnibus): 0.000 Jarque-Bera (JB): 2626.560
Skew: -1.690 Prob(JB): 0.00
Kurtosis: 7.323 Cond. No. 19.8
==============================================================================
Warnings:
[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
Reading from model results, p-values for two predictor variables, retweet and favorite are zero. It can be interpreted as retweet and favorite count are significant to predict rating score. Coefficient of favorite count is 0.1375, which means for every 137.5 additional favorite count, I would expect rating score to increase by an average of 0.1375. I was surprised the coefficient for retweet is negative.
###Markdown
Plot 3. Retweet/Favorite on Rating Score Plot: Will highly rated dogs cause big trend on retweet and favorite?
###Code
rating_prediction2()
###Output
_____no_output_____
###Markdown
Plot 4. Popularity on Three Dimensions(Retweet, Favorite and Rating) on dog stage: People like young dog better or old dog better?
###Code
dogstage_plot()
###Output
I use bar plot for this multivariate exploration. pupper is the youngest dog stage and doggo is the oldest stage. As you can see, teenager dog (puppo) is most popular from all three dimensions. The popularity towards furry dog (floofer) is neutral.
###Markdown
Plot 5. Where are tweets coming from?
###Code
source_plot()
###Output
_____no_output_____
###Markdown
Plot 6. What are the dog breeds that are tweeted most and rated highest on WeRateDogs?
###Code
breed_ranking()
###Output
I use bar chart for this breed ranking. As you can see, Golden Retriever, Pembroke are stars in dog breed as they appear in both ranking.
###Markdown
Plot 7. What kinds of dog breed are confidently to predict?
###Code
prediction_ranking()
###Output
_____no_output_____ |
Practical Data Science/Practical Data Science - Duke University/4_pandas/Series.ipynb | ###Markdown
Exercise 1Use the code below to get started:
###Code
import pandas as pd
gdppercap = pd.Series([34605, 34493, 12393, 44200, 10041,
58138, 4709, 49284, 10109, 42536],
index=['Bahrain', 'Belgium', 'Bulgaria',
'Ireland', 'Macedonia', 'Norway',
'Paraguay', 'Singapore',
'South Africa', 'Switzerland']
)
###Output
_____no_output_____
###Markdown
Exercise 2Find the mean, median, minimum and maximum values of GDP per capita in this data.
###Code
gdppercap.describe()
gdppercap.median()
# (another option to get the median)
import numpy as np
np.percentile(gdppercap, 50)
###Output
_____no_output_____
###Markdown
Exercise 3Programmatically, determine which country in our data has the highest income per capita, and which has the lowest income per capita. Hint: Country names form the index for this Series, so to get country names you’ll need to access the index.
###Code
gdppercap.sort_values()
# Lowest GDP
gdppercap.sort_values().iloc[:1]
# Highest DGP
gdppercap.sort_values().iloc[-1:]
###Output
_____no_output_____
###Markdown
Exercise 4Get Python to print out the names of all the countries that have GDP per capitas less than $20,000.
###Code
cond = gdppercap < 20000
gdppercap[cond].sort_values()
###Output
_____no_output_____
###Markdown
Exercise 5Get Python to print out the GDP per capita of Switzerland
###Code
gdppercap['Switzerland']
###Output
_____no_output_____
###Markdown
Exercise 6 Calculate the Gini coefficient for our income data. (Formula provided in the exercise page)**HINT 1**: Be careful with 0-indexing! Python counts from 0, but mathematical formulas count from 1!**HINT 2**: I’m gonna make you calculate Gini coefficients again later, so maybe you should write a function to do this and make life easier later? 
###Code
def calculate_gini(input_series):
input_copy = input_series.copy()
# Prepare the data
input_df = input_copy.sort_values().reset_index().reset_index()
n = len(input_copy)
input_df.columns = ['i','country','y']
input_df['i'] += 1
# Calculate Gini
parte1_num = 2*np.sum(input_df['i']*input_df['y'])
parte1_den = n*np.sum(input_df['y'])
part2 = (n+1)/n
gini = parte1_num/parte1_den - part2
return gini
gini = calculate_gini(gdppercap)
gini
###Output
_____no_output_____
###Markdown
Exercise 7Using this data on average growth rates in GDP per capita, and assuming growth rates from 2000 to 2018 continue into the future, estimate what our Gini Coefficient may look like in 2025 (remembering that income in our data is from 2008, so we’re extrapolating ahead 17 years)?
###Code
avg_growth = pd.Series([-0.29768835, 0.980299584, 4.52991925,
3.686556736, 2.621416804, 0.775132075,
2.015489468, 3.345793635, 1.349993318,
0.982775018],
index=['Bahrain', 'Belgium', 'Bulgaria',
'Ireland', 'Macedonia', 'Norway',
'Paraguay', 'Singapore',
'South Africa', 'Switzerland']
)
###Output
_____no_output_____
###Markdown

###Code
future_gdppercap = gdppercap * (1 + avg_growth/100)**17
future_gdppercap
future_gini = calculate_gini(future_gdppercap)
future_gini
# EXTRA: Using the same growth numbers how would the next 50 years look like?
gini_values = []
for i in range(50):
gini_values.append(calculate_gini(gdppercap * (1 + avg_growth/100)**i))
pd.Series(gini_values).plot()
###Output
_____no_output_____ |
yucheng_ner/tplinker_ner/Evaluation.ipynb | ###Markdown
Load Data
###Code
test_data_dict = {}
for file_name, path in test_data_path_dict.items():
test_data_dict[file_name] = json.load(open(path, "r", encoding = "utf-8"))
###Output
_____no_output_____
###Markdown
Split
###Code
# init tokenizers
if use_bert:
bert_tokenizer = BertTokenizerFast.from_pretrained(bert_config["path"], add_special_tokens = False, do_lower_case = False)
word2idx = json.load(open(word2idx_path, "r", encoding = "utf-8"))
word_tokenizer = WordTokenizer(word2idx)
# preprocessor
tokenizer4preprocess = bert_tokenizer if use_bert else word_tokenizer
preprocessor = Preprocessor(tokenizer4preprocess, use_bert)
def split(data, max_seq_len, sliding_len, data_name = "train"):
'''
split into short texts
'''
max_tok_num = 0
for sample in tqdm(data, "calculating the max token number of {}".format(data_name)):
text = sample["text"]
tokens = preprocessor.tokenize(text)
max_tok_num = max(max_tok_num, len(tokens))
print("max token number of {}: {}".format(data_name, max_tok_num))
if max_tok_num > max_seq_len:
print("max token number of {} is greater than the setting, need to split!".format(data_name, data_name, max_seq_len))
short_data = preprocessor.split_into_short_samples(data,
max_seq_len,
sliding_len = sliding_len,
data_type = "test")
else:
short_data = data
max_seq_len = max_tok_num
print("max token number of {} is less than the setting, no need to split!".format(data_name, data_name, max_tok_num))
return short_data, max_seq_len
# all_data = []
# for data in list(test_data_dict.values()):
# all_data.extend(data)
# max_tok_num = 0
# for sample in tqdm(all_data, desc = "Calculate the max token number"):
# tokens = tokenize(sample["text"])
# max_tok_num = max(len(tokens), max_tok_num)
# split_test_data = False
# if max_tok_num > config["max_test_seq_len"]:
# split_test_data = True
# print("max_tok_num: {}, lagger than max_test_seq_len: {}, test data will be split!".format(max_tok_num, config["max_test_seq_len"]))
# else:
# print("max_tok_num: {}, less than or equal to max_test_seq_len: {}, no need to split!".format(max_tok_num, config["max_test_seq_len"]))
# max_seq_len = min(max_tok_num, config["max_test_seq_len"])
# if config["force_split"]:
# split_test_data = True
# print("force to split the test dataset!")
ori_test_data_dict = copy.deepcopy(test_data_dict)
test_data_dict = {}
max_seq_len_all_data = []
for file_name, data in ori_test_data_dict.items():
split_data, max_seq_len_this_data = split(data, max_seq_len, sliding_len, file_name)
max_seq_len_all_data.append(max_seq_len_this_data)
test_data_dict[file_name] = split_data
max_seq_len = max(max_seq_len_all_data)
print("final max_seq_len is {}".format(max_seq_len))
for filename, short_data in test_data_dict.items():
print("example number of {}: {}".format(filename, len(short_data)))
###Output
_____no_output_____
###Markdown
Decoder(Tagger)
###Code
meta = json.load(open(meta_path, "r", encoding = "utf-8"))
tags = meta["tags"]
if meta["visual_field_rec"] > handshaking_kernel_config["visual_field"]:
handshaking_kernel_config["visual_field"] = meta["visual_field_rec"]
print("Recommended visual_field is greater than current visual_field, reset to rec val: {}".format(handshaking_kernel_config["visual_field"]))
handshaking_tagger = HandshakingTaggingScheme(tags, max_seq_len, handshaking_kernel_config["visual_field"])
###Output
_____no_output_____
###Markdown
Character indexing
###Code
char2idx = json.load(open(char2idx_path, "r", encoding = "utf-8"))
def text2char_indices(text, max_seq_len = -1):
char_ids = []
chars = list(text)
for c in chars:
if c not in char2idx:
char_ids.append(char2idx['<UNK>'])
else:
char_ids.append(char2idx[c])
if len(char_ids) < max_seq_len:
char_ids.extend([char2idx['<PAD>']] * (max_seq_len - len(char_ids)))
if max_seq_len != -1:
char_ids = torch.tensor(char_ids[:max_seq_len]).long()
return char_ids
###Output
_____no_output_____
###Markdown
Dataset
###Code
class MyDataset(Dataset):
def __init__(self, data):
self.data = data
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
# max word num, max subword num, max char num
def cal_max_tok_num(data, tokenizer):
max_tok_num = 0
for example in data:
text = example["text"]
max_tok_num = max(max_tok_num, len(tokenizer.tokenize(text)))
return max_tok_num
all_data = []
for data in list(test_data_dict.values()):
all_data.extend(data)
max_word_num = cal_max_tok_num(all_data, word_tokenizer)
print("max_word_num: {}".format(max_word_num))
if use_bert:
max_subword_num = cal_max_tok_num(all_data, bert_tokenizer)
print("max_subword_num: {}".format(max_subword_num))
subword_tokenizer = bert_tokenizer if use_bert else None
data_maker = DataMaker(handshaking_tagger, word_tokenizer, subword_tokenizer, text2char_indices,
max_word_num, max_subword_num, max_char_num_in_tok)
###Output
_____no_output_____
###Markdown
Model
###Code
if char_encoder_config is not None:
char_encoder_config["char_size"] = len(char2idx)
if word_encoder_config is not None:
word_encoder_config["word2idx"] = word2idx
ent_extractor = TPLinkerNER(char_encoder_config,
word_encoder_config,
flair_config,
handshaking_kernel_config,
enc_hidden_size,
activate_enc_fc,
len(tags),
bert_config,
)
ent_extractor = ent_extractor.to(device)
###Output
_____no_output_____
###Markdown
Merics
###Code
metrics = Metrics(handshaking_tagger)
###Output
_____no_output_____
###Markdown
Prediction
###Code
# get model state paths
model_state_dir = config["model_state_dict_dir"]
target_run_ids = set(config["run_ids"])
run_id2model_state_paths = {}
for root, dirs, files in os.walk(model_state_dir):
for file_name in files:
run_id = root.split("-")[-1]
if re.match(".*model_state.*\.pt", file_name) and run_id in target_run_ids:
if run_id not in run_id2model_state_paths:
run_id2model_state_paths[run_id] = []
model_state_path = os.path.join(root, file_name)
run_id2model_state_paths[run_id].append(model_state_path)
def get_last_k_paths(path_list, k):
path_list = sorted(path_list, key = lambda x: int(re.search("(\d+)", x.split("/")[-1]).group(1)))
# pprint(path_list)
return path_list[-k:]
# only last k models
k = config["last_k_model"]
for run_id, path_list in run_id2model_state_paths.items():
run_id2model_state_paths[run_id] = get_last_k_paths(path_list, k)
print("Following model states will be loaded: ")
pprint(run_id2model_state_paths)
def filter_duplicates(ent_list):
ent_memory_set = set()
filtered_ent_list = []
for ent in ent_list:
ent_memory = "{}\u2E80{}\u2E80{}".format(ent["tok_span"][0], ent["tok_span"][1], ent["type"])
if ent_memory not in ent_memory_set:
filtered_ent_list.append(ent)
ent_memory_set.add(ent_memory)
return filtered_ent_list
def predict(test_dataloader, ori_test_data):
'''
test_data: if split, it would be samples with subtext
ori_test_data: the original data has not been split, used to get original text here
'''
pred_sample_list = []
for batch_test_data in tqdm(test_dataloader, desc = "Predicting"):
sample_list = batch_test_data["sample_list"]
tok2char_span_list = batch_test_data["tok2char_span_list"]
del batch_test_data["sample_list"]
del batch_test_data["tok2char_span_list"]
for k, v in batch_test_data.items():
if k not in {"padded_sents"}:
batch_test_data[k] = v.to(device)
with torch.no_grad():
batch_pred_shaking_outputs = ent_extractor(**batch_test_data)
batch_pred_shaking_tag = (batch_pred_shaking_outputs > 0.).long()
for ind in range(len(sample_list)):
sample = sample_list[ind]
text = sample["text"]
text_id = sample["id"]
tok2char_span = tok2char_span_list[ind]
pred_shaking_tag = batch_pred_shaking_tag[ind]
tok_offset, char_offset = 0, 0
tok_offset, char_offset = (sample["tok_offset"], sample["char_offset"]) if "char_offset" in sample else (0, 0)
ent_list = handshaking_tagger.decode_ent(text,
pred_shaking_tag,
tok2char_span,
tok_offset = tok_offset,
char_offset = char_offset)
pred_sample_list.append({
"text": text,
"id": text_id,
"entity_list": ent_list,
})
# merge
text_id2ent_list = {}
for sample in pred_sample_list:
text_id = sample["id"]
if text_id not in text_id2ent_list:
text_id2ent_list[text_id] = sample["entity_list"]
else:
text_id2ent_list[text_id].extend(sample["entity_list"])
text_id2text = {sample["id"]:sample["text"] for sample in ori_test_data}
merged_pred_sample_list = []
for text_id, ent_list in text_id2ent_list.items():
merged_pred_sample_list.append({
"id": text_id,
"text": text_id2text[text_id],
"entity_list": filter_duplicates(ent_list),
})
return merged_pred_sample_list
def get_test_prf(pred_sample_list, gold_test_data, pattern = "only_head"):
text_id2gold_n_pred = {}
for sample in gold_test_data:
text_id = sample["id"]
text_id2gold_n_pred[text_id] = {
"gold_entity_list": sample["entity_list"],
}
for sample in pred_sample_list:
text_id = sample["id"]
text_id2gold_n_pred[text_id]["pred_entity_list"] = sample["entity_list"]
correct_num, pred_num, gold_num = 0, 0, 0
for gold_n_pred in text_id2gold_n_pred.values():
gold_ent_list = gold_n_pred["gold_entity_list"]
pred_ent_list = gold_n_pred["pred_entity_list"] if "pred_entity_list" in gold_n_pred else []
if pattern == "only_head_index":
gold_ent_set = set(["{}\u2E80{}".format(ent["char_span"][0], ent["type"]) for ent in gold_ent_list])
pred_ent_set = set(["{}\u2E80{}".format(ent["char_span"][0], ent["type"]) for ent in pred_ent_list])
elif pattern == "whole_span":
gold_ent_set = set(["{}\u2E80{}\u2E80{}".format(ent["char_span"][0], ent["char_span"][1], ent["type"]) for ent in gold_ent_list])
pred_ent_set = set(["{}\u2E80{}\u2E80{}".format(ent["char_span"][0], ent["char_span"][1], ent["type"]) for ent in pred_ent_list])
elif pattern == "whole_text":
gold_ent_set = set(["{}\u2E80{}".format(ent["text"], ent["type"]) for ent in gold_ent_list])
pred_ent_set = set(["{}\u2E80{}".format(ent["text"], ent["type"]) for ent in pred_ent_list])
for ent_str in pred_ent_set:
if ent_str in gold_ent_set:
correct_num += 1
pred_num += len(pred_ent_set)
gold_num += len(gold_ent_set)
# print((correct_num, pred_num, gold_num))
prf = metrics.get_scores(correct_num, pred_num, gold_num)
return prf
# predict
res_dict = {}
predict_statistics = {}
for file_name, short_data in test_data_dict.items():
ori_test_data = ori_test_data_dict[file_name]
indexed_test_data = data_maker.get_indexed_data(short_data, data_type = "test")
test_dataloader = DataLoader(MyDataset(indexed_test_data),
batch_size = batch_size,
shuffle = False,
num_workers = 6,
drop_last = False,
collate_fn = lambda data_batch: data_maker.generate_batch(data_batch, data_type = "test"),
)
# iter all model state dicts
for run_id, model_path_list in run_id2model_state_paths.items():
save_dir4run = os.path.join(save_res_dir, run_id)
if config["save_res"] and not os.path.exists(save_dir4run):
os.makedirs(save_dir4run)
for model_state_path in model_path_list:
res_num = re.search("(\d+)", model_state_path.split("/")[-1]).group(1)
save_path = os.path.join(save_dir4run, "{}_res_{}.json".format(file_name, res_num))
if os.path.exists(save_path):
pred_sample_list = [json.loads(line) for line in open(save_path, "r", encoding = "utf-8")]
print("{} already exists, load it directly!".format(save_path))
else:
# load model state
model_state_dict = torch.load(model_state_path)
# if used paralell train, need to rm prefix "module."
new_model_state_dict = OrderedDict()
for key, v in model_state_dict.items():
key = re.sub("module\.", "", key)
new_model_state_dict[key] = v
ent_extractor.load_state_dict(new_model_state_dict)
ent_extractor.eval()
print("run_id: {}, model state {} loaded".format(run_id, model_state_path.split("/")[-1]))
# predict
pred_sample_list = predict(test_dataloader, ori_test_data)
res_dict[save_path] = pred_sample_list
predict_statistics[save_path] = len([s for s in pred_sample_list if len(s["entity_list"]) > 0])
pprint(predict_statistics)
# score
if config["score"]:
filepath2scores = {}
for file_path, pred_samples in res_dict.items():
file_name = re.match("(.*?)_res_\d+.json", file_path.split("/")[-1]).group(1)
gold_test_data = ori_test_data_dict[file_name]
prf = get_test_prf(pred_samples, gold_test_data, pattern = config["correct"])
filepath2scores[file_path] = prf
print("---------------- Results -----------------------")
pprint(filepath2scores)
# check char span
for path, res in res_dict.items():
for sample in tqdm(res, "check character level span"):
text = sample["text"]
for ent in sample["entity_list"]:
assert ent["text"] == text[ent["char_span"][0]:ent["char_span"][1]]
# save
if config["save_res"]:
for path, res in res_dict.items():
with open(path, "w", encoding = "utf-8") as file_out:
for sample in tqdm(res, desc = "Output"):
if len(sample["entity_list"]) == 0:
continue
json_line = json.dumps(sample, ensure_ascii = False)
file_out.write("{}\n".format(json_line))
###Output
_____no_output_____ |
Sampling_effort_by_MPA.ipynb | ###Markdown
Sampling effort by MPAHere I've tried to capture the number of times target MPAs have been sampled through time, and what the sample size was, across projects and data types.**Resources:**https://docs.google.com/spreadsheets/d/1SIb_n9VoAS-GFKKpfBRCVKRseAXQT-4_dgC_rsWK-84/editgid=0
###Code
## Imports
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import datetime as dt
import sys
sys.executable
## CCFRP
ccfrp = pd.read_csv('CCFRP\\CCFRP_derived_effort_table.csv')
print(ccfrp.shape)
ccfrp
# Load site table
ccfrp_site = pd.read_csv('CCFRP\\CCFRP_location_table.csv')
print(ccfrp_site.shape)
ccfrp_site
# ----- Calculate the number of times each MPA was sampled per year
ccfrp_years_per_MPA = ccfrp.groupby(['Area', 'MPA_Status'], as_index=False)['Year'].nunique()
# Sort
ccfrp_years_per_MPA.sort_values(['Area', 'MPA_Status'], inplace=True)
# Map area names with MPA names
names = pd.DataFrame({'Area':ccfrp['Area'], 'CA_MPA_name_short':ccfrp['CA_MPA_name_short']})
names.dropna(inplace=True)
name_dict = dict(zip(names['Area'], names['CA_MPA_name_short']))
# Replace
ccfrp_years_per_MPA['Area'].replace(name_dict, inplace=True)
# Save
ccfrp_years_per_MPA.to_csv('CCFRP\\ccfrp_years_per_MPA.csv', index=False)
# ----- Calculate the number of times each MPA was sampled by year
ccfrp_grid_cells_per_year = ccfrp.groupby(['Area', 'MPA_Status', 'Year'], as_index=False)['ID_Cell_per_Trip'].nunique()
# Find min and max
ccfrp_grid_cells_per_year = ccfrp_grid_cells_per_year.groupby(['Area', 'MPA_Status'], as_index=False).agg({'ID_Cell_per_Trip':[min, max]})
ccfrp_grid_cells_per_year.columns = ['Area', 'MPA_Status', 'Min_samples', 'Max_samples']
# Sort
ccfrp_grid_cells_per_year.sort_values(['Area', 'MPA_Status'], inplace=True)
# Replace
ccfrp_grid_cells_per_year['Area'].replace(name_dict, inplace=True)
# Save
ccfrp_grid_cells_per_year.to_csv('CCFRP\\ccfrp_grid_cells_per_year.csv', index=False)
ccfrp_grid_cells_per_year = ccfrp.groupby(['Area', 'MPA_Status', 'Year'], as_index=False)['ID_Cell_per_Trip'].nunique()
ccfrp_grid_cells_per_year
test = ccfrp_grid_cells_per_year.groupby(['Area', 'MPA_Status'], as_index=False).agg({'ID_Cell_per_Trip':[min, max]})
test
test.columns = ['a', 'b', 'c', 'd']
test
test = pd.DataFrame({'Area':ccfrp['Area'], 'CA_MPA_name_short':ccfrp['CA_MPA_name_short']})
test.drop_duplicates(inplace=True)
test.dropna(inplace=True)
test.loc[test['Area'] == 'Anacapa Island', 'CA_MPA_name_short'] = 'Anacapa Island SMR or Anacapa Island SMCA'
test.drop_duplicates(inplace=True)
test = pd.DataFrame({'Area':ccfrp['Area'], 'CA_MPA_name_short':ccfrp['CA_MPA_name_short']})
test.dropna(inplace=True)
dict(zip(test['Area'], test['CA_MPA_name_short']))
###Output
_____no_output_____ |
tutorial/06 - Aerodynamics/03 - Interfaces to External Aerodynamics Tools/02 - XFoil.ipynb | ###Markdown
XFOIL OverviewXFOIL is a design and analysis tool for subsonic airfoils developed by Mark Drela at MIT.The [XFOIL website](https://web.mit.edu/drela/Public/web/xfoil/) contains more info. SetupAs with the previous AVL tutorial, a copy of the XFOIL executable must be somewhere on your computer in order to use it with AeroSandbox.Download a copy of the executable from the [XFOIL website](https://web.mit.edu/drela/Public/web/xfoil/) for your operating system. Place it anywhere on your computer, and remember the filepath to the executable. Running XFOIL from AeroSandboxFirst, we'll do some imports. We'll also do some stuff that is only necessary for this tutorial to run correctly in a browser - ignore this following code block. (Basically, it's to make unit testing of tutorials happy.)
###Code
import aerosandbox as asb
import aerosandbox.numpy as np
from shutil import which
xfoil_is_present = which('xfoil') is not None
###Output
_____no_output_____
###Markdown
Next, we'll define an airfoil to analyze:
###Code
airfoil = asb.Airfoil("dae51") # Geometry will be automatically pulled from UIUC database (local).
###Output
_____no_output_____
###Markdown
And draw it:
###Code
from aerosandbox.tools.pretty_plots import plt, show_plot, set_ticks # sets some nice defaults
fig, ax = plt.subplots()
airfoil.draw(show=False)
set_ticks(0.1, 0.05, 0.1, 0.05)
show_plot()
###Output
_____no_output_____
###Markdown
Now, let's analyze it:
###Code
if xfoil_is_present: # Ignore this; just for tutorial purposes.
analysis = asb.XFoil(
airfoil=airfoil,
Re=3e5,
xfoil_command="xfoil",
# If XFOIL is not on your PATH, then set xfoil_command to the filepath to your XFOIL executable.
)
point_analysis = analysis.alpha(
alpha=3
)
from pprint import pprint
print("\nPoint analysis:")
pprint(point_analysis)
sweep_analysis = analysis.alpha(
alpha=np.linspace(0, 15, 6)
)
print("\nSweep analysis:")
pprint(sweep_analysis)
cl_analysis = analysis.cl(
cl=1.2
)
print("\nFixed-CL analysis:")
pprint(cl_analysis)
###Output
Point analysis:
{'Bot_Xtr': array([1.]),
'CD': array([0.00873]),
'CDp': array([0.00318]),
'CL': array([0.8065]),
'CM': array([-0.1026]),
'Top_Xtr': array([0.6764]),
'alpha': array([3.])}
Sweep analysis:
{'Bot_Xtr': array([1., 1., 1., 1., 1., 1.]),
'CD': array([0.008 , 0.00873, 0.01112, 0.02211, 0.04114, 0.08114]),
'CDp': array([0.00319, 0.00318, 0.00522, 0.01426, 0.03433, 0.07667]),
'CL': array([0.4719, 0.8065, 1.1205, 1.3277, 1.4138, 1.3851]),
'CM': array([-0.1039, -0.1026, -0.0987, -0.0819, -0.0568, -0.0459]),
'Top_Xtr': array([0.7891, 0.6764, 0.4618, 0.0438, 0.0213, 0.017 ]),
'alpha': array([ 0., 3., 6., 9., 12., 15.])}
Fixed-CL analysis:
{'Bot_Xtr': array([1.]),
'CD': array([0.01303]),
'CDp': array([0.00666]),
'CL': array([1.2]),
'CM': array([-0.0957]),
'Top_Xtr': array([0.3296]),
'alpha': array([6.902])}
###Markdown
We can use this to plot polars:
###Code
if xfoil_is_present: # Ignore this; just for tutorial purposes.
fig, ax = plt.subplots(2, 2, figsize=(8, 8))
Re = 250e3
alpha_inputs = np.linspace(-15, 15, 150)
xf_run = asb.XFoil(airfoil, Re=Re, max_iter=20, timeout=None).alpha(alpha_inputs)
xa = xf_run["alpha"]
xCL = xf_run["CL"]
xCD = xf_run["CD"]
plt.sca(ax[0, 0])
plt.plot(xa, xCL, ".-")
plt.xlabel(r"Angle of Attack $\alpha$ [deg]")
plt.ylabel(r"Lift Coefficient $C_L$ [-]")
plt.sca(ax[0, 1])
plt.plot(xa, xCD, ".-")
plt.xlabel(r"Angle of Attack $\alpha$ [deg]")
plt.ylabel(r"Drag Coefficient $C_D$ [-]")
plt.ylim(0, 0.05)
plt.sca(ax[1, 0])
plt.plot(xCD, xCL, ".-")
plt.xlabel(r"Drag Coefficient $C_D$ [-]")
plt.ylabel(r"Lift Coefficient $C_L$ [-]")
plt.xlim(0, 0.05)
plt.sca(ax[1, 1])
plt.plot(xa, xCL / xCD, ".-")
plt.xlabel(r"Angle of Attack $\alpha$ [deg]")
plt.ylabel(r"Aerodynamic Efficiency $C_L / C_D$ [-]")
from aerosandbox.tools.string_formatting import eng_string
show_plot(f"Aerodynamic Performance of Airfoil '{airfoil.name}' at $\\mathrm{{Re}}={eng_string(Re)}$, from XFoil")
###Output
_____no_output_____ |
Plot_tool/散布図.ipynb | ###Markdown
散布図
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
from matplotlib import font_manager
font_manager._rebuild()
# os.getcwd()
counter = 0
df = pd.read_csv("csv/high_voltage.csv", encoding="UTF-8")
df.head()
x1 = df["d"]
y1 = df["vk"]
x2 = df["pin_d"]
y2 = df["vc"]
x3 = df["plate_d"]
y3 = df["plate_v"]
# ycos = df["cosx"]
# plt.rcParams["font.family"] = "Source Han Code JP"
# plt.rcParams['font.family'] ='sans-serif', "IPAexGothic"#使用するフォント
plt.rcParams["font.sans-serif"] = "IPAexGothic"
plt.rcParams['xtick.direction'] = 'in'#x軸の目盛線が内向き('in')か外向き('out')か双方向か('inout')
plt.rcParams['ytick.direction'] = 'in'#y軸の目盛線が内向き('in')か外向き('out')か双方向か('inout')
plt.rcParams['xtick.major.width'] = 1.0#x軸主目盛り線の線幅
plt.rcParams['ytick.major.width'] = 1.0#y軸主目盛り線の線幅
plt.rcParams['font.size'] = 12 #フォントの大きさ
plt.rcParams['axes.linewidth'] = 1.0# 軸の線幅edge linewidth。囲みの太さ
plt.figure(figsize=(5.5,5))
# 軸の目盛り
plt.gca().yaxis.set_tick_params(which='both', direction='in',bottom=True, top=False, left=True, right=False)
# plt.gca().yaxis.set_major_formatter(plt.FormatStrFormatter('%.3f'))#y軸小数点以下3桁表示
# plt.gca().xaxis.get_major_formatter().set_useOffset(False) # 軸の数字にオフセット(+1.05e9 など)を使わずに表現する
# plt.rcParams["xtick.labelsize"] = 12
# plt.rcParams["ytick.labelsize"] = 12
# plt.rcParams["legend.fontsize"] = 12
# plt.plot(x,y2,"-",label="Theoretical value")
plt.scatter(x1,y1,label="充電電圧 理論値",marker=".")
plt.scatter(x1,y2,label="充電電圧 実験値" ,marker=".")
# plt.scatter(x3,y3,label="平板-平板電極" ,marker=".")
plt.plot(x1,y1,"-",linestyle="dotted")
plt.plot(x1,y2,"-",linestyle="dotted")
# plt.plot(x3,y3,"-",linestyle="dotted")
# plt.plot(x1,y2,"-",linestyle="dotted")
# plt.plot(x3,y3,"-",label="平板-平板電極")
# plt.plot(x1,y2,"-",label="補正放電電圧 Vk",linestyle='dashed')
# plt.plot(x3,y3,"-",label="m=12,ΔT=0.0025",linestyle='dashdot')
# plt.plot(x,y3,"s-",label="3 term")
# plt.plot(x,ycos,"+-",label="cos x")
plt.xlabel("ギャップ長[mm]")
plt.ylabel("充電電圧[kV]")
# plt.xlim(0,1)
# plt.ylabel("angle[rad]")
plt.legend()
plt.tight_layout()#グラフが重ならず,設定した図のサイズ内に収まる。
# plt.savefig('figname.pdf', transparent=True)
# plt.savefig('cos_taylor.png', transparent=True, dpi=300)
plt.savefig('output_' + str(counter) + '.png',dpi=300)
print("output_" + str(counter) + ".png")
counter += 1
###Output
output_0.png
###Markdown
2軸表示のグラフ
###Code
plt.rcParams['font.family'] ='sans-serif'#使用するフォント
plt.rcParams['xtick.direction'] = 'in'#x軸の目盛線が内向き('in')か外向き('out')か双方向か('inout')
plt.rcParams['ytick.direction'] = 'in'#y軸の目盛線が内向き('in')か外向き('out')か双方向か('inout')
plt.rcParams['xtick.major.width'] = 1.0#x軸主目盛り線の線幅
plt.rcParams['ytick.major.width'] = 1.0#y軸主目盛り線の線幅
plt.rcParams['font.size'] = 12 #フォントの大きさ
plt.rcParams['axes.linewidth'] = 1.0# 軸の線幅edge linewidth。囲みの太さ
fig = plt.figure(figsize=(5.8,5.5))
# fig = plt.figure(figsize=(5.8,5))
# 軸の目盛り
plt.gca().yaxis.set_tick_params(which='both', direction='in',bottom=True, top=False, left=True, right=False)
# 1軸目
ax1 = fig.add_subplot(111)
ln1 = ax1.plot(x,y1,"-",label="Theta Euler method")
# 2軸目
ax2 = ax1.twinx()
ln2 = ax2.plot(x,y2,"-",label="Omega Euler method",color="orange")
# 凡例
h1, l1 = ax1.get_legend_handles_labels()
h2, l2 = ax2.get_legend_handles_labels()
ax1.legend(h1+h2, l1+l2,bbox_to_anchor=(0, 1), loc='lower left')
# ax1.legend(h1+h2, l1+l2)
# 軸の名前
ax1.set_xlabel("time")
ax1.set_ylabel("angle[rad]")
ax2.set_ylabel("angular velocity[rad/s]")
plt.tight_layout()#グラフが重ならず,設定した図のサイズ内に収まる。
plt.savefig('output_' + str(counter) + '.png',dpi=300)
print("output" + str(counter) + ".png")
counter += 1
###Output
/home/hiroya/Documents/Jupyter-Notebook/.venv/lib/python3.6/site-packages/ipykernel_launcher.py:15: MatplotlibDeprecationWarning: Adding an axes using the same arguments as a previous axes currently reuses the earlier instance. In a future version, a new instance will always be created and returned. Meanwhile, this warning can be suppressed, and the future behavior ensured, by passing a unique label to each axes instance.
from ipykernel import kernelapp as app
|
Chapter 7 - Ensemble Learning and Random Forests .ipynb | ###Markdown
Bagging method where the sampling with replacement is used for training same/different predictors and an aggregate (mode for classification and average for regression) is used
###Code
from sklearn.ensemble import BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
bag_clf = BaggingClassifier(
DecisionTreeClassifier(), n_estimators=500,
max_samples=100, bootstrap=True, n_jobs=-1) # n_jobs tells sklearn how many CPU cores to use, -1 being all cores
bag_clf.fit(X_train, y_train)
y_pred = bag_clf.predict(X_test)
from sklearn.metrics import accuracy_score
print(accuracy_score(y_test, y_pred))
tree_clf = DecisionTreeClassifier(random_state=42)
tree_clf.fit(X_train, y_train)
y_pred_tree = tree_clf.predict(X_test)
print(accuracy_score(y_test, y_pred_tree))
import numpy as np
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
from matplotlib.colors import ListedColormap
def plot_decision_boundary(clf, X, y, axes=[-1.5, 2.45, -1, 1.5], alpha=0.5, contour=True):
x1s = np.linspace(axes[0], axes[1], 100)
x2s = np.linspace(axes[2], axes[3], 100)
x1, x2 = np.meshgrid(x1s, x2s)
X_new = np.c_[x1.ravel(), x2.ravel()]
y_pred = clf.predict(X_new).reshape(x1.shape)
custom_cmap = ListedColormap(['#fafab0','#9898ff','#a0faa0'])
plt.contourf(x1, x2, y_pred, alpha=0.3, cmap=custom_cmap)
if contour:
custom_cmap2 = ListedColormap(['#7d7d58','#4c4c7f','#507d50'])
plt.contour(x1, x2, y_pred, cmap=custom_cmap2, alpha=0.8)
plt.plot(X[:, 0][y==0], X[:, 1][y==0], "yo", alpha=alpha)
plt.plot(X[:, 0][y==1], X[:, 1][y==1], "bs", alpha=alpha)
plt.axis(axes)
plt.xlabel(r"$x_1$", fontsize=18)
plt.ylabel(r"$x_2$", fontsize=18, rotation=0)
fix, axes = plt.subplots(ncols=2, figsize=(10,4), sharey=True)
plt.sca(axes[0])
plot_decision_boundary(tree_clf, X, y)
plt.title("Decision Tree", fontsize=14)
plt.sca(axes[1])
plot_decision_boundary(bag_clf, X, y)
plt.title("Decision Trees with Bagging", fontsize=14)
plt.ylabel("")
plt.show()
###Output
_____no_output_____
###Markdown
Out of Bag (OOB) evaluation
###Code
bag_clf = BaggingClassifier(
DecisionTreeClassifier(), n_estimators=500,
bootstrap=True, n_jobs=-1, oob_score=True) # OOB score True ensures the avaibility
bag_clf.fit(X_train, y_train)
# this is the score achived on the validation set, that was created due to sampling with replacement
bag_clf.oob_score_
y_pred = bag_clf.predict(X_test)
accuracy_score(y_test, y_pred)
# to check the oob evalution of each instance
bag_clf.oob_decision_function_
###Output
_____no_output_____
###Markdown
Random Forest
###Code
from sklearn.ensemble import RandomForestClassifier
rnd_clf = RandomForestClassifier(n_estimators=500, max_leaf_nodes=16, n_jobs=-1)
rnd_clf.fit(X_train, y_train)
y_pred_rf = rnd_clf.predict(X_test)
accuracy_score(y_test, y_pred_rf)
# A similar setup with bagging would look like this
bag_clf = BaggingClassifier(
DecisionTreeClassifier(splitter='random', max_leaf_nodes=16),
n_estimators=500, max_samples=1.0, bootstrap=True, n_jobs=-1)
bag_clf.fit(X_train, y_train)
y_pred = bag_clf.predict(X_test)
accuracy_score(y_test, y_pred)
# Extra Trees are also a good option with randomized thresholds and may perform better than RandomForest at times
###Output
_____no_output_____
###Markdown
Feature Importance
###Code
from sklearn.datasets import load_iris
iris = load_iris()
rnd_clf = RandomForestClassifier(n_estimators=500, n_jobs=-1)
rnd_clf.fit(iris['data'], iris['target'])
for name, score in zip(iris['feature_names'], rnd_clf.feature_importances_):
print(name, score)
###Output
sepal length (cm) 0.10496305024303643
sepal width (cm) 0.021002451182639954
petal length (cm) 0.41921921153180375
petal width (cm) 0.45481528704251983
###Markdown
AdaBoost
###Code
from sklearn.ensemble import AdaBoostClassifier
ada_clf = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=1), n_estimators=200,
algorithm='SAMME.R', learning_rate=0.5) # SAMME is sklearn's multiclass version, when more than two classes, we need to use SAMME.R
ada_clf.fit(X_train, y_train)
y_ada_pred = ada_clf.predict(X_test)
accuracy_score(y_test, y_ada_pred)
###Output
_____no_output_____
###Markdown
Gradient Boosting
###Code
# Let's create a basic quadratic dataset with some noise
np.random.seed(42)
X = np.random.rand(100, 1) - 0.5
y = 3*X[:, 0]**2 + 0.05 * np.random.randn(100)
from sklearn.tree import DecisionTreeRegressor
tree_reg1 = DecisionTreeRegressor(max_depth=2)
tree_reg1.fit(X, y)
y
# Let's train a second regressor on the residual errors
y2 = y - tree_reg1.predict(X)
tree_reg2 = DecisionTreeRegressor(max_depth=2)
tree_reg2.fit(X, y2)
y2
# Let's train a thrird predictor the same way
y3 = y2 - tree_reg2.predict(X)
tree_reg3 = DecisionTreeRegressor(max_depth=2)
tree_reg3.fit(X, y3)
y3
X_new = np.array([[0.8]])
# Now let's add up the three trees to make predictions
y_pred = sum(tree.predict(X_new) for tree in (tree_reg1, tree_reg2, tree_reg3))
y_pred
from sklearn.ensemble import GradientBoostingRegressor
gbrt = GradientBoostingRegressor(max_depth=2, n_estimators=3, learning_rate=1.0)
gbrt.fit(X, y)
y_gbrt_pred = gbrt.predict(X_new)
y_gbrt_pred
# Let's plot how these two models fit to the training data at each stage
def plot_predictions(regressors, X, y, axes, label=None, style="r-", data_style="b.", data_label=None):
x1 = np.linspace(axes[0], axes[1], 500)
y_pred = sum(regressor.predict(x1.reshape(-1, 1)) for regressor in regressors)
plt.plot(X[:, 0], y, data_style, label=data_label)
plt.plot(x1, y_pred, style, linewidth=2, label=label)
if label or data_label:
plt.legend(loc="upper center", fontsize=16)
plt.axis(axes)
plt.figure(figsize=(15,15))
plt.subplot(321)
plot_predictions([tree_reg1], X, y, axes=[-0.5, 0.5, -0.1, 0.8], label="$h_1(x_1)$", style="g-", data_label="Training set")
plt.ylabel("$y$", fontsize=16, rotation=0)
plt.title("Residuals and tree predictions", fontsize=16)
plt.subplot(322)
plot_predictions([tree_reg1], X, y, axes=[-0.5, 0.5, -0.1, 0.8], label="$h(x_1) = h_1(x_1)$", data_label="Training set")
plt.ylabel("$y$", fontsize=16, rotation=0)
plt.title("Ensemble predictions", fontsize=16)
plt.subplot(323)
plot_predictions([tree_reg2], X, y2, axes=[-0.5, 0.5, -0.5, 0.5], label="$h_2(x_1)$", style="g-", data_style="k+", data_label="Residuals")
plt.ylabel("$y - h_1(x_1)$", fontsize=16)
plt.subplot(324)
plot_predictions([tree_reg1, tree_reg2], X, y, axes=[-0.5, 0.5, -0.1, 0.8], label="$h(x_1) = h_1(x_1) + h_2(x_1)$")
plt.ylabel("$y$", fontsize=16, rotation=0)
plt.subplot(325)
plot_predictions([tree_reg3], X, y3, axes=[-0.5, 0.5, -0.5, 0.5], label="$h_3(x_1)$", style="g-", data_style="k+")
plt.ylabel("$y - h_1(x_1) - h_2(x_1)$", fontsize=16)
plt.xlabel("$x_1$", fontsize=16)
plt.subplot(326)
plot_predictions([tree_reg1, tree_reg2, tree_reg3], X, y, axes=[-0.5, 0.5, -0.1, 0.8], label="$h(x_1) = h_1(x_1) + h_2(x_1) + h_3(x_1)$")
plt.xlabel("$x_1$", fontsize=16)
plt.ylabel("$y$", fontsize=16, rotation=0)
plt.show()
###Output
_____no_output_____
###Markdown
Example of how we can use early stopping to find the optimal number of trees to use so that we don't overfit the mode
###Code
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
X_train, X_val, y_train, y_val = train_test_split(X, y)
gbrt = GradientBoostingRegressor(max_depth=2, n_estimators=120)
gbrt.fit(X_train, y_train)
errors = [mean_squared_error(y_val, y_pred)
for y_pred in gbrt.staged_predict(X_val)] # staged_predict returns an iterator over the predictions made by the ensemble at each stage (tree) of the training
bst_n_estimators = np.argmin(errors) + 1
gbrt_best = GradientBoostingRegressor(max_depth=2, n_estimators=bst_n_estimators)
gbrt_best.fit(X_train, y_train)
# Let's visualize the validation error and the best model's fit
min_error = np.min(errors)
plt.figure(figsize=(10, 4))
plt.subplot(121)
plt.plot(errors, "b.-")
plt.plot([bst_n_estimators, bst_n_estimators], [0, min_error], "k--")
plt.plot([0, 120], [min_error, min_error], "k--")
plt.plot(bst_n_estimators, min_error, "ko")
plt.text(bst_n_estimators, min_error*1.2, "Minimum", ha="center", fontsize=14)
plt.axis([0, 120, 0, 0.01])
plt.xlabel("Number of trees")
plt.ylabel("Error", fontsize=16)
plt.title("Validation error", fontsize=14)
plt.subplot(122)
plot_predictions([gbrt_best], X, y, axes=[-0.5, 0.5, -0.1, 0.8])
plt.title("Best model (%d trees)" % bst_n_estimators, fontsize=14)
plt.ylabel("$y$", fontsize=16, rotation=0)
plt.xlabel("$x_1$", fontsize=16)
plt.show()
# Let's train another time using early stopping but stop once we find the minimum error automatically
gbrt = GradientBoostingRegressor(max_depth=2, warm_start=True)
min_val_error = float('inf')
error_going_up = 0
for n_estimators in range(1, 120):
gbrt.n_estimators = n_estimators
gbrt.fit(X_train, y_train)
y_pred = gbrt.predict(X_val)
val_error = mean_squared_error(y_val, y_pred)
if val_error < min_val_error:
min_val_error = val_error
error_going_up = 0
else:
error_going_up += 1
if error_going_up == 5:
break # early stopping
gbrt
###Output
_____no_output_____
###Markdown
XGBoost
###Code
import xgboost
xgb_reg = xgboost.XGBRegressor()
xgb_reg.fit(X_train, y_train)
y_xgb_pred = xgb_reg.predict(X_val)
y_xgb_pred
y_val
# Early stopping is easy with XGBoost
xgb_reg.fit(X_train, y_train,
eval_set=[(X_val, y_val)], early_stopping_rounds=2)
y_pred = xgb_reg.predict(X_val)
y_pred
###Output
[0] validation_0-rmse:0.22709
[1] validation_0-rmse:0.17088
[2] validation_0-rmse:0.13266
[3] validation_0-rmse:0.10603
[4] validation_0-rmse:0.08793
[5] validation_0-rmse:0.07539
[6] validation_0-rmse:0.06511
[7] validation_0-rmse:0.05926
[8] validation_0-rmse:0.05582
[9] validation_0-rmse:0.05352
[10] validation_0-rmse:0.05235
[11] validation_0-rmse:0.05179
[12] validation_0-rmse:0.05150
[13] validation_0-rmse:0.05110
[14] validation_0-rmse:0.05083
[15] validation_0-rmse:0.05115
[16] validation_0-rmse:0.05068
[17] validation_0-rmse:0.05079
[18] validation_0-rmse:0.05063
[19] validation_0-rmse:0.05078
|
NLP/Learn_by_deeplearning.ai/Course 1 - Classification and Vector Spaces/Labs/Week 1/C1-W1-L2-Building and Visualizing word frequencies.ipynb | ###Markdown
Building and Visualizing word frequenciesIn this lab, we will focus on the `build_freqs()` helper function and visualizing a dataset fed into it. In our goal of tweet sentiment analysis, this function will build a dictionary where we can lookup how many times a word appears in the lists of positive or negative tweets. This will be very helpful when extracting the features of the dataset in the week's programming assignment. Let's see how this function is implemented under the hood in this notebook. SetupLet's import the required libraries for this lab:
###Code
import nltk
from nltk.corpus import twitter_samples
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
%config InlineBackend.figure_format='svg'
###Output
_____no_output_____
###Markdown
Import some helper functions that we provided in the utils.py file:* `process_tweet()`: Cleans the text, tokenizes it into separate words, removes stopwords, and converts words to stems.* `build_freqs()`: This counts how often a word in the 'corpus' (the entire set of tweets) was associated with a positive label `1` or a negative label `0`. It then builds the `freqs` dictionary, where each key is a `(word,label)` tuple, and the value is the count of its frequency within the corpus of tweets.
###Code
# download the stopwords for the process_tweet function
nltk.download('stopwords')
# import our convenience functions
from utils import process_tweet
###Output
[nltk_data] Error loading stopwords: <urlopen error [SSL:
[nltk_data] CERTIFICATE_VERIFY_FAILED] certificate verify failed:
[nltk_data] unable to get local issuer certificate (_ssl.c:1076)>
###Markdown
Load the NLTK sample datasetAs in the previous lab, we will be using the [Twitter dataset from NLTK](http://www.nltk.org/howto/twitter.htmlUsing-a-Tweet-Corpus).
###Code
# select the lists of positive and negative tweets
all_positive_tweets = twitter_samples.strings('positive_tweets.json')
all_negative_tweets = twitter_samples.strings('negative_tweets.json')
# concatenate the lists, 1st part is the positive tweets followed by the negative
tweets = all_positive_tweets + all_negative_tweets
# let's see how many tweets we have
print("Number of tweets: ", len(tweets))
###Output
Number of tweets: 10000
###Markdown
Next, we will build a labels array that matches the sentiments of our tweets. This data type works pretty much like a regular list but is optimized for computations and manipulation. The `labels` array will be composed of 10000 elements. The first 5000 will be filled with `1` labels denoting positive sentiments, and the next 5000 will be `0` labels denoting the opposite. We can do this easily with a series of operations provided by the `numpy` library:* `np.ones()` - create an array of 1's* `np.zeros()` - create an array of 0's* `np.append()` - concatenate arrays
###Code
labels = np.append(np.ones((len(all_positive_tweets))), np.zeros((len(all_negative_tweets))))
###Output
_____no_output_____
###Markdown
DictionariesIn Python, a dictionary is a mutable and indexed collection. It stores items as key-value pairs and uses [hash tables](https://en.wikipedia.org/wiki/Hash_table) underneath to allow practically constant time lookups. In NLP, dictionaries are essential because it enables fast retrieval of items or containment checks even with thousands of entries in the collection. Word frequency dictionaryNow that we know the building blocks, let's finally take a look at the **build_freqs()** function in **utils.py**. This is the function that creates the dictionary containing the word counts from each corpus.
###Code
def build_freqs(tweets,ys):
yslist = np.squeeze(ys).tolist()
freqs={}
for y,tweet in zip(yslist,tweets):
for word in process_tweet(tweet):
pair=(word,y)
if pair in freqs:
freqs[pair]+=1
else:
freqs[pair]=1
return freqs
###Output
_____no_output_____
###Markdown
Now, it is time to use the dictionary returned by the `build_freqs()` function. First, let us feed our `tweets` and `labels` lists then print a basic report:
###Code
# create frequency dictionary
freqs = build_freqs(tweets, labels)
# check data type
print(f'type(freqs) = {type(freqs)}')
# check length of the dictionary
print(f'len(freqs) = {len(freqs)}')
# Now print the frequency of each word depending on its class.
print(freqs)
###Output
{('followfriday', 1.0): 25, ('top', 1.0): 32, ('engag', 1.0): 7, ('member', 1.0): 16, ('commun', 1.0): 33, ('week', 1.0): 83, (':)', 1.0): 3568, ('hey', 1.0): 76, ('jame', 1.0): 7, ('odd', 1.0): 2, (':/', 1.0): 5, ('pleas', 1.0): 97, ('call', 1.0): 37, ('contact', 1.0): 7, ('centr', 1.0): 2, ('02392441234', 1.0): 1, ('abl', 1.0): 8, ('assist', 1.0): 1, ('mani', 1.0): 33, ('thank', 1.0): 620, ('listen', 1.0): 16, ('last', 1.0): 47, ('night', 1.0): 68, ('bleed', 1.0): 2, ('amaz', 1.0): 51, ('track', 1.0): 5, ('scotland', 1.0): 2, ('congrat', 1.0): 21, ('yeaaah', 1.0): 1, ('yipppi', 1.0): 1, ('accnt', 1.0): 2, ('verifi', 1.0): 2, ('rqst', 1.0): 1, ('succeed', 1.0): 1, ('got', 1.0): 69, ('blue', 1.0): 9, ('tick', 1.0): 1, ('mark', 1.0): 1, ('fb', 1.0): 6, ('profil', 1.0): 2, ('15', 1.0): 5, ('day', 1.0): 246, ('one', 1.0): 129, ('irresist', 1.0): 2, ('flipkartfashionfriday', 1.0): 17, ('like', 1.0): 233, ('keep', 1.0): 68, ('love', 1.0): 400, ('custom', 1.0): 4, ('wait', 1.0): 70, ('long', 1.0): 36, ('hope', 1.0): 141, ('enjoy', 1.0): 75, ('happi', 1.0): 211, ('friday', 1.0): 116, ('lwwf', 1.0): 1, ('second', 1.0): 10, ('thought', 1.0): 29, ('’', 1.0): 21, ('enough', 1.0): 18, ('time', 1.0): 127, ('dd', 1.0): 1, ('new', 1.0): 143, ('short', 1.0): 7, ('enter', 1.0): 9, ('system', 1.0): 2, ('sheep', 1.0): 1, ('must', 1.0): 18, ('buy', 1.0): 11, ('jgh', 1.0): 4, ('go', 1.0): 148, ('bayan', 1.0): 1, (':D', 1.0): 629, ('bye', 1.0): 7, ('act', 1.0): 8, ('mischiev', 1.0): 1, ('etl', 1.0): 1, ('layer', 1.0): 1, ('in-hous', 1.0): 1, ('wareh', 1.0): 1, ('app', 1.0): 16, ('katamari', 1.0): 1, ('well', 1.0): 81, ('…', 1.0): 38, ('name', 1.0): 18, ('impli', 1.0): 1, (':p', 1.0): 137, ('influenc', 1.0): 18, ('big', 1.0): 33, ('...', 1.0): 289, ('juici', 1.0): 3, ('selfi', 1.0): 12, ('follow', 1.0): 381, ('perfect', 1.0): 24, ('alreadi', 1.0): 28, ('know', 1.0): 145, ("what'", 1.0): 17, ('great', 1.0): 171, ('opportun', 1.0): 23, ('junior', 1.0): 2, ('triathlet', 1.0): 1, ('age', 1.0): 2, ('12', 1.0): 5, ('13', 1.0): 6, ('gatorad', 1.0): 1, ('seri', 1.0): 5, ('get', 1.0): 206, ('entri', 1.0): 4, ('lay', 1.0): 4, ('greet', 1.0): 5, ('card', 1.0): 8, ('rang', 1.0): 3, ('print', 1.0): 3, ('today', 1.0): 108, ('job', 1.0): 41, (':-)', 1.0): 692, ("friend'", 1.0): 3, ('lunch', 1.0): 5, ('yummm', 1.0): 1, ('nostalgia', 1.0): 1, ('tb', 1.0): 2, ('ku', 1.0): 1, ('id', 1.0): 8, ('conflict', 1.0): 1, ('help', 1.0): 41, ("here'", 1.0): 25, ('screenshot', 1.0): 3, ('work', 1.0): 110, ('hi', 1.0): 173, ('liv', 1.0): 2, ('hello', 1.0): 59, ('need', 1.0): 78, ('someth', 1.0): 28, ('u', 1.0): 175, ('fm', 1.0): 2, ('twitter', 1.0): 29, ('—', 1.0): 27, ('sure', 1.0): 58, ('thing', 1.0): 69, ('dm', 1.0): 39, ('x', 1.0): 72, ("i'v", 1.0): 35, ('heard', 1.0): 9, ('four', 1.0): 5, ('season', 1.0): 9, ('pretti', 1.0): 20, ('dope', 1.0): 2, ('penthous', 1.0): 1, ('obv', 1.0): 1, ('gobigorgohom', 1.0): 1, ('fun', 1.0): 58, ("y'all", 1.0): 3, ('yeah', 1.0): 47, ('suppos', 1.0): 7, ('lol', 1.0): 64, ('chat', 1.0): 13, ('bit', 1.0): 20, ('youth', 1.0): 19, ('💅', 1.0): 1, ('🏽', 1.0): 2, ('💋', 1.0): 2, ('seen', 1.0): 10, ('year', 1.0): 43, ('rest', 1.0): 12, ('goe', 1.0): 7, ('quickli', 1.0): 3, ('bed', 1.0): 16, ('music', 1.0): 21, ('fix', 1.0): 10, ('dream', 1.0): 20, ('spiritu', 1.0): 1, ('ritual', 1.0): 1, ('festiv', 1.0): 8, ('népal', 1.0): 1, ('begin', 1.0): 4, ('line-up', 1.0): 4, ('left', 1.0): 13, ('see', 1.0): 184, ('sarah', 1.0): 4, ('send', 1.0): 22, ('us', 1.0): 109, ('email', 1.0): 26, ('[email protected]', 1.0): 1, ("we'll", 1.0): 20, ('asap', 1.0): 5, ('kik', 1.0): 22, ('hatessuc', 1.0): 1, ('32429', 1.0): 1, ('kikm', 1.0): 1, ('lgbt', 1.0): 2, ('tinder', 1.0): 1, ('nsfw', 1.0): 1, ('akua', 1.0): 1, ('cumshot', 1.0): 1, ('come', 1.0): 70, ('hous', 1.0): 7, ('nsn_supplement', 1.0): 1, ('effect', 1.0): 4, ('press', 1.0): 1, ('releas', 1.0): 11, ('distribut', 1.0): 1, ('result', 1.0): 2, ('link', 1.0): 18, ('remov', 1.0): 3, ('pressreleas', 1.0): 1, ('newsdistribut', 1.0): 1, ('bam', 1.0): 44, ('bestfriend', 1.0): 50, ('lot', 1.0): 87, ('warsaw', 1.0): 44, ('<3', 1.0): 134, ('x46', 1.0): 1, ('everyon', 1.0): 58, ('watch', 1.0): 46, ('documentari', 1.0): 1, ('earthl', 1.0): 2, ('youtub', 1.0): 13, ('support', 1.0): 27, ('buuut', 1.0): 1, ('oh', 1.0): 53, ('look', 1.0): 137, ('forward', 1.0): 29, ('visit', 1.0): 30, ('next', 1.0): 48, ('letsgetmessi', 1.0): 1, ('jo', 1.0): 1, ('make', 1.0): 99, ('feel', 1.0): 46, ('better', 1.0): 52, ('never', 1.0): 36, ('anyon', 1.0): 11, ('kpop', 1.0): 1, ('flesh', 1.0): 1, ('good', 1.0): 238, ('girl', 1.0): 44, ('best', 1.0): 65, ('wish', 1.0): 37, ('reason', 1.0): 13, ('epic', 1.0): 2, ('soundtrack', 1.0): 1, ('shout', 1.0): 12, ('ad', 1.0): 14, ('video', 1.0): 34, ('playlist', 1.0): 5, ('would', 1.0): 84, ('dear', 1.0): 17, ('jordan', 1.0): 1, ('okay', 1.0): 39, ('fake', 1.0): 2, ('gameplay', 1.0): 2, (';)', 1.0): 27, ('haha', 1.0): 53, ('im', 1.0): 51, ('kid', 1.0): 18, ('stuff', 1.0): 13, ('exactli', 1.0): 6, ('product', 1.0): 12, ('line', 1.0): 6, ('etsi', 1.0): 1, ('shop', 1.0): 16, ('check', 1.0): 52, ('vacat', 1.0): 6, ('recharg', 1.0): 1, ('normal', 1.0): 6, ('charger', 1.0): 2, ('asleep', 1.0): 9, ('talk', 1.0): 45, ('sooo', 1.0): 6, ('someon', 1.0): 34, ('text', 1.0): 18, ('ye', 1.0): 77, ('bet', 1.0): 6, ("he'll", 1.0): 4, ('fit', 1.0): 3, ('hear', 1.0): 33, ('speech', 1.0): 1, ('piti', 1.0): 3, ('green', 1.0): 3, ('garden', 1.0): 7, ('midnight', 1.0): 1, ('sun', 1.0): 6, ('beauti', 1.0): 50, ('canal', 1.0): 1, ('dasvidaniya', 1.0): 1, ('till', 1.0): 18, ('scout', 1.0): 1, ('sg', 1.0): 1, ('futur', 1.0): 13, ('wlan', 1.0): 1, ('pro', 1.0): 5, ('confer', 1.0): 1, ('asia', 1.0): 1, ('chang', 1.0): 24, ('lollipop', 1.0): 1, ('🍭', 1.0): 1, ('nez', 1.0): 1, ('agnezmo', 1.0): 1, ('oley', 1.0): 1, ('mama', 1.0): 1, ('stand', 1.0): 8, ('stronger', 1.0): 1, ('god', 1.0): 20, ('misti', 1.0): 1, ('babi', 1.0): 20, ('cute', 1.0): 26, ('woohoo', 1.0): 3, ("can't", 1.0): 43, ('sign', 1.0): 11, ('yet', 1.0): 13, ('still', 1.0): 48, ('think', 1.0): 63, ('mka', 1.0): 5, ('liam', 1.0): 8, ('access', 1.0): 3, ('welcom', 1.0): 73, ('stat', 1.0): 60, ('arriv', 1.0): 67, ('1', 1.0): 75, ('unfollow', 1.0): 63, ('via', 1.0): 69, ('surpris', 1.0): 10, ('figur', 1.0): 5, ('happybirthdayemilybett', 1.0): 1, ('sweet', 1.0): 19, ('talent', 1.0): 5, ('2', 1.0): 58, ('plan', 1.0): 27, ('drain', 1.0): 1, ('gotta', 1.0): 5, ('timezon', 1.0): 1, ('parent', 1.0): 5, ('proud', 1.0): 12, ('least', 1.0): 16, ('mayb', 1.0): 18, ('sometim', 1.0): 13, ('grade', 1.0): 4, ('al', 1.0): 4, ('grand', 1.0): 4, ('manila_bro', 1.0): 2, ('chosen', 1.0): 1, ('let', 1.0): 68, ('around', 1.0): 17, ('..', 1.0): 128, ('side', 1.0): 15, ('world', 1.0): 27, ('eh', 1.0): 2, ('take', 1.0): 43, ('care', 1.0): 18, ('final', 1.0): 30, ('fuck', 1.0): 26, ('weekend', 1.0): 75, ('real', 1.0): 21, ('x45', 1.0): 1, ('join', 1.0): 23, ('hushedcallwithfraydo', 1.0): 1, ('gift', 1.0): 8, ('yeahhh', 1.0): 1, ('hushedpinwithsammi', 1.0): 2, ('event', 1.0): 8, ('might', 1.0): 27, ('luv', 1.0): 6, ('realli', 1.0): 79, ('appreci', 1.0): 31, ('share', 1.0): 46, ('wow', 1.0): 22, ('tom', 1.0): 5, ('gym', 1.0): 4, ('monday', 1.0): 9, ('invit', 1.0): 17, ('scope', 1.0): 5, ('friend', 1.0): 61, ('nude', 1.0): 2, ('sleep', 1.0): 45, ('birthday', 1.0): 74, ('want', 1.0): 96, ('t-shirt', 1.0): 3, ('cool', 1.0): 38, ('haw', 1.0): 1, ('phela', 1.0): 1, ('mom', 1.0): 10, ('obvious', 1.0): 2, ('princ', 1.0): 1, ('charm', 1.0): 1, ('stage', 1.0): 2, ('luck', 1.0): 30, ('tyler', 1.0): 2, ('hipster', 1.0): 1, ('glass', 1.0): 5, ('marti', 1.0): 2, ('glad', 1.0): 43, ('done', 1.0): 54, ('afternoon', 1.0): 10, ('read', 1.0): 34, ('kahfi', 1.0): 1, ('finish', 1.0): 17, ('ohmyg', 1.0): 1, ('yaya', 1.0): 3, ('dub', 1.0): 2, ('stalk', 1.0): 2, ('ig', 1.0): 3, ('gondooo', 1.0): 1, ('moo', 1.0): 2, ('tologooo', 1.0): 1, ('becom', 1.0): 10, ('detail', 1.0): 10, ('zzz', 1.0): 1, ('xx', 1.0): 42, ('physiotherapi', 1.0): 1, ('hashtag', 1.0): 5, ('💪', 1.0): 1, ('monica', 1.0): 1, ('miss', 1.0): 27, ('sound', 1.0): 23, ('morn', 1.0): 101, ("that'", 1.0): 67, ('x43', 1.0): 1, ('definit', 1.0): 23, ('tri', 1.0): 44, ('tonight', 1.0): 20, ('took', 1.0): 8, ('advic', 1.0): 6, ('treviso', 1.0): 1, ('concert', 1.0): 24, ('citi', 1.0): 27, ('countri', 1.0): 23, ("i'll", 1.0): 90, ('start', 1.0): 61, ('fine', 1.0): 10, ('gorgeou', 1.0): 12, ('xo', 1.0): 2, ('oven', 1.0): 3, ('roast', 1.0): 2, ('garlic', 1.0): 1, ('oliv', 1.0): 1, ('oil', 1.0): 4, ('dri', 1.0): 5, ('tomato', 1.0): 1, ('basil', 1.0): 1, ('centuri', 1.0): 1, ('tuna', 1.0): 1, ('right', 1.0): 47, ('back', 1.0): 98, ('atchya', 1.0): 1, ('even', 1.0): 35, ('almost', 1.0): 10, ('chanc', 1.0): 6, ('cheer', 1.0): 20, ('po', 1.0): 4, ('ice', 1.0): 6, ('cream', 1.0): 6, ('agre', 1.0): 16, ('100', 1.0): 8, ('heheheh', 1.0): 2, ('that', 1.0): 13, ('point', 1.0): 13, ('stay', 1.0): 25, ('home', 1.0): 31, ('soon', 1.0): 47, ('promis', 1.0): 6, ('web', 1.0): 4, ('whatsapp', 1.0): 5, ('volta', 1.0): 1, ('funcionar', 1.0): 1, ('com', 1.0): 2, ('iphon', 1.0): 7, ('jailbroken', 1.0): 1, ('later', 1.0): 16, ('34', 1.0): 3, ('min', 1.0): 9, ('leia', 1.0): 1, ('appear', 1.0): 3, ('hologram', 1.0): 1, ('r2d2', 1.0): 1, ('w', 1.0): 18, ('messag', 1.0): 10, ('obi', 1.0): 1, ('wan', 1.0): 3, ('sit', 1.0): 8, ('luke', 1.0): 6, ('inter', 1.0): 1, ('3', 1.0): 32, ('ucl', 1.0): 1, ('arsen', 1.0): 2, ('small', 1.0): 4, ('team', 1.0): 29, ('pass', 1.0): 12, ('🚂', 1.0): 1, ('dewsburi', 1.0): 2, ('railway', 1.0): 1, ('station', 1.0): 4, ('dew', 1.0): 1, ('west', 1.0): 3, ('yorkshir', 1.0): 2, ('430', 1.0): 1, ('smh', 1.0): 2, ('9:25', 1.0): 1, ('live', 1.0): 26, ('strang', 1.0): 4, ('imagin', 1.0): 5, ('megan', 1.0): 1, ('masaantoday', 1.0): 6, ('a4', 1.0): 3, ('shweta', 1.0): 1, ('tripathi', 1.0): 1, ('5', 1.0): 17, ('20', 1.0): 6, ('kurta', 1.0): 3, ('half', 1.0): 7, ('number', 1.0): 13, ('wsalelov', 1.0): 16, ('ah', 1.0): 13, ('larri', 1.0): 3, ('anyway', 1.0): 16, ('kinda', 1.0): 13, ('goood', 1.0): 4, ('life', 1.0): 49, ('enn', 1.0): 1, ('could', 1.0): 32, ('warmup', 1.0): 1, ('15th', 1.0): 2, ('bath', 1.0): 7, ('dum', 1.0): 2, ('andar', 1.0): 1, ('ram', 1.0): 1, ('sampath', 1.0): 1, ('sona', 1.0): 1, ('mohapatra', 1.0): 1, ('samantha', 1.0): 1, ('edward', 1.0): 1, ('mein', 1.0): 1, ('tulan', 1.0): 1, ('razi', 1.0): 2, ('wah', 1.0): 2, ('josh', 1.0): 1, ('alway', 1.0): 67, ('smile', 1.0): 62, ('pictur', 1.0): 12, ('16.20', 1.0): 1, ('giveitup', 1.0): 1, ('given', 1.0): 3, ('ga', 1.0): 3, ('subsidi', 1.0): 1, ('initi', 1.0): 4, ('propos', 1.0): 3, ('delight', 1.0): 7, ('yesterday', 1.0): 7, ('x42', 1.0): 1, ('lmaoo', 1.0): 2, ('song', 1.0): 22, ('ever', 1.0): 23, ('shall', 1.0): 6, ('littl', 1.0): 31, ('throwback', 1.0): 3, ('outli', 1.0): 1, ('island', 1.0): 5, ('cheung', 1.0): 1, ('chau', 1.0): 1, ('mui', 1.0): 1, ('wo', 1.0): 1, ('total', 1.0): 9, ('differ', 1.0): 11, ('kfckitchentour', 1.0): 2, ('kitchen', 1.0): 4, ('clean', 1.0): 1, ("i'm", 1.0): 183, ('cusp', 1.0): 1, ('test', 1.0): 7, ('water', 1.0): 8, ('reward', 1.0): 1, ('arummzz', 1.0): 2, ("let'", 1.0): 23, ('drive', 1.0): 11, ('travel', 1.0): 20, ('yogyakarta', 1.0): 3, ('jeep', 1.0): 3, ('indonesia', 1.0): 4, ('instamood', 1.0): 3, ('wanna', 1.0): 30, ('skype', 1.0): 3, ('may', 1.0): 22, ('nice', 1.0): 98, ('friendli', 1.0): 2, ('pretend', 1.0): 2, ('film', 1.0): 9, ('congratul', 1.0): 15, ('winner', 1.0): 4, ('cheesydelight', 1.0): 1, ('contest', 1.0): 6, ('address', 1.0): 10, ('guy', 1.0): 60, ('market', 1.0): 5, ('24/7', 1.0): 1, ('14', 1.0): 1, ('hour', 1.0): 27, ('leav', 1.0): 12, ('without', 1.0): 12, ('delay', 1.0): 2, ('actual', 1.0): 19, ('easi', 1.0): 9, ('guess', 1.0): 14, ('train', 1.0): 10, ('wd', 1.0): 1, ('shift', 1.0): 5, ('engin', 1.0): 2, ('etc', 1.0): 2, ('sunburn', 1.0): 1, ('peel', 1.0): 2, ('blog', 1.0): 31, ('huge', 1.0): 11, ('warm', 1.0): 6, ('☆', 1.0): 3, ('complet', 1.0): 11, ('triangl', 1.0): 2, ('northern', 1.0): 1, ('ireland', 1.0): 2, ('sight', 1.0): 1, ('smthng', 1.0): 2, ('fr', 1.0): 3, ('hug', 1.0): 13, ('xoxo', 1.0): 3, ('uu', 1.0): 1, ('jaann', 1.0): 1, ('topnewfollow', 1.0): 2, ('connect', 1.0): 14, ('wonder', 1.0): 35, ('made', 1.0): 53, ('fluffi', 1.0): 1, ('insid', 1.0): 8, ('pirouett', 1.0): 1, ('moos', 1.0): 1, ('trip', 1.0): 14, ('philli', 1.0): 1, ('decemb', 1.0): 3, ("i'd", 1.0): 20, ('dude', 1.0): 6, ('x41', 1.0): 1, ('question', 1.0): 17, ('flaw', 1.0): 1, ('pain', 1.0): 9, ('negat', 1.0): 1, ('strength', 1.0): 3, ('went', 1.0): 12, ('solo', 1.0): 4, ('move', 1.0): 12, ('fav', 1.0): 13, ('nirvana', 1.0): 1, ('smell', 1.0): 2, ('teen', 1.0): 3, ('spirit', 1.0): 3, ('rip', 1.0): 3, ('ami', 1.0): 4, ('winehous', 1.0): 1, ('coupl', 1.0): 9, ('tomhiddleston', 1.0): 1, ('elizabetholsen', 1.0): 1, ('yaytheylookgreat', 1.0): 1, ('goodnight', 1.0): 24, ('vid', 1.0): 11, ('wake', 1.0): 12, ('gonna', 1.0): 21, ('shoot', 1.0): 6, ('itti', 1.0): 2, ('bitti', 1.0): 2, ('teeni', 1.0): 2, ('bikini', 1.0): 3, ('much', 1.0): 89, ('4th', 1.0): 4, ('togeth', 1.0): 7, ('end', 1.0): 20, ('xfile', 1.0): 1, ('content', 1.0): 4, ('rain', 1.0): 21, ('fabul', 1.0): 5, ('fantast', 1.0): 13, ('♡', 1.0): 20, ('jb', 1.0): 1, ('forev', 1.0): 5, ('belieb', 1.0): 3, ('nighti', 1.0): 1, ('bug', 1.0): 3, ('bite', 1.0): 1, ('bracelet', 1.0): 2, ('idea', 1.0): 26, ('foundri', 1.0): 1, ('game', 1.0): 27, ('sens', 1.0): 7, ('pic', 1.0): 27, ('ef', 1.0): 1, ('phone', 1.0): 19, ('woot', 1.0): 2, ('derek', 1.0): 1, ('use', 1.0): 44, ('parkshar', 1.0): 1, ('gloucestershir', 1.0): 1, ('aaaahhh', 1.0): 1, ('man', 1.0): 23, ('traffic', 1.0): 2, ('stress', 1.0): 8, ('reliev', 1.0): 1, ("how'r", 1.0): 1, ('arbeloa', 1.0): 1, ('turn', 1.0): 15, ('17', 1.0): 4, ('omg', 1.0): 15, ('say', 1.0): 61, ('europ', 1.0): 1, ('rise', 1.0): 2, ('find', 1.0): 23, ('hard', 1.0): 12, ('believ', 1.0): 9, ('uncount', 1.0): 1, ('coz', 1.0): 3, ('unlimit', 1.0): 1, ('cours', 1.0): 18, ('teamposit', 1.0): 1, ('aldub', 1.0): 2, ('☕', 1.0): 3, ('rita', 1.0): 2, ('info', 1.0): 13, ("we'd", 1.0): 4, ('way', 1.0): 46, ('boy', 1.0): 21, ('x40', 1.0): 1, ('true', 1.0): 22, ('sethi', 1.0): 2, ('high', 1.0): 7, ('exe', 1.0): 1, ('skeem', 1.0): 1, ('saam', 1.0): 1, ('peopl', 1.0): 48, ('polit', 1.0): 2, ('izzat', 1.0): 1, ('wese', 1.0): 1, ('trust', 1.0): 9, ('khawateen', 1.0): 1, ('k', 1.0): 9, ('sath', 1.0): 2, ('mana', 1.0): 1, ('kar', 1.0): 1, ('deya', 1.0): 1, ('sort', 1.0): 9, ('smart', 1.0): 5, ('hair', 1.0): 12, ('tbh', 1.0): 5, ('jacob', 1.0): 2, ('g', 1.0): 10, ('upgrad', 1.0): 6, ('tee', 1.0): 2, ('famili', 1.0): 19, ('person', 1.0): 19, ('two', 1.0): 22, ('convers', 1.0): 6, ('onlin', 1.0): 7, ('mclaren', 1.0): 1, ('fridayfeel', 1.0): 5, ('tgif', 1.0): 10, ('squar', 1.0): 1, ('enix', 1.0): 1, ('bissmillah', 1.0): 1, ('ya', 1.0): 23, ('allah', 1.0): 3, ("we'r", 1.0): 29, ('socent', 1.0): 1, ('startup', 1.0): 2, ('drop', 1.0): 9, ('your', 1.0): 3, ('arnd', 1.0): 1, ('town', 1.0): 5, ('basic', 1.0): 4, ('piss', 1.0): 3, ('cup', 1.0): 4, ('also', 1.0): 35, ('terribl', 1.0): 2, ('complic', 1.0): 1, ('discuss', 1.0): 3, ('snapchat', 1.0): 36, ('lynettelow', 1.0): 1, ('kikmenow', 1.0): 3, ('snapm', 1.0): 2, ('hot', 1.0): 24, ('amazon', 1.0): 1, ('kikmeguy', 1.0): 3, ('defin', 1.0): 2, ('grow', 1.0): 7, ('sport', 1.0): 4, ('rt', 1.0): 12, ('rakyat', 1.0): 1, ('write', 1.0): 13, ('sinc', 1.0): 15, ('mention', 1.0): 24, ('fli', 1.0): 5, ('fish', 1.0): 3, ('promot', 1.0): 5, ('post', 1.0): 21, ('cyber', 1.0): 1, ('ourdaughtersourprid', 1.0): 5, ('mypapamyprid', 1.0): 2, ('papa', 1.0): 2, ('coach', 1.0): 2, ('posit', 1.0): 8, ('kha', 1.0): 1, ('atleast', 1.0): 2, ('x39', 1.0): 1, ('mango', 1.0): 1, ("lassi'", 1.0): 1, ("monty'", 1.0): 1, ('marvel', 1.0): 2, ('though', 1.0): 19, ('suspect', 1.0): 3, ('meant', 1.0): 3, ('24', 1.0): 4, ('hr', 1.0): 2, ('touch', 1.0): 15, ('kepler', 1.0): 4, ('452b', 1.0): 5, ('chalna', 1.0): 1, ('hai', 1.0): 11, ('thankyou', 1.0): 14, ('hazel', 1.0): 1, ('food', 1.0): 6, ('brooklyn', 1.0): 1, ('pta', 1.0): 2, ('awak', 1.0): 10, ('okayi', 1.0): 2, ('awww', 1.0): 15, ('ha', 1.0): 23, ('doc', 1.0): 1, ('splendid', 1.0): 1, ('spam', 1.0): 1, ('folder', 1.0): 1, ('amount', 1.0): 1, ('nigeria', 1.0): 1, ('claim', 1.0): 1, ('rted', 1.0): 1, ('leg', 1.0): 5, ('hurt', 1.0): 8, ('bad', 1.0): 18, ('mine', 1.0): 14, ('saturday', 1.0): 8, ('thaaank', 1.0): 1, ('puhon', 1.0): 1, ('happinesss', 1.0): 1, ('tnc', 1.0): 1, ('prior', 1.0): 1, ('notif', 1.0): 2, ('fat', 1.0): 1, ('co', 1.0): 1, ('probabl', 1.0): 9, ('ate', 1.0): 4, ('yuna', 1.0): 2, ('tamesid', 1.0): 1, ('´', 1.0): 3, ('googl', 1.0): 6, ('account', 1.0): 19, ('scouser', 1.0): 1, ('everyth', 1.0): 13, ('zoe', 1.0): 2, ('mate', 1.0): 7, ('liter', 1.0): 6, ("they'r", 1.0): 12, ('samee', 1.0): 1, ('edgar', 1.0): 1, ('updat', 1.0): 13, ('log', 1.0): 4, ('bring', 1.0): 17, ('abe', 1.0): 1, ('meet', 1.0): 34, ('x38', 1.0): 1, ('sigh', 1.0): 3, ('dreamili', 1.0): 1, ('pout', 1.0): 1, ('eye', 1.0): 14, ('quacketyquack', 1.0): 7, ('funni', 1.0): 19, ('happen', 1.0): 16, ('phil', 1.0): 1, ('em', 1.0): 3, ('del', 1.0): 1, ('rodder', 1.0): 1, ('els', 1.0): 10, ('play', 1.0): 46, ('newest', 1.0): 1, ('gamejam', 1.0): 1, ('irish', 1.0): 2, ('literatur', 1.0): 2, ('inaccess', 1.0): 2, ("kareena'", 1.0): 2, ('fan', 1.0): 30, ('brain', 1.0): 13, ('dot', 1.0): 11, ('braindot', 1.0): 11, ('fair', 1.0): 5, ('rush', 1.0): 1, ('either', 1.0): 11, ('brandi', 1.0): 1, ('18', 1.0): 5, ('carniv', 1.0): 1, ('men', 1.0): 10, ('put', 1.0): 17, ('mask', 1.0): 3, ('xavier', 1.0): 1, ('forneret', 1.0): 1, ('jennif', 1.0): 1, ('site', 1.0): 9, ('free', 1.0): 37, ('50.000', 1.0): 3, ('8', 1.0): 10, ('ball', 1.0): 7, ('pool', 1.0): 5, ('coin', 1.0): 5, ('edit', 1.0): 7, ('trish', 1.0): 1, ('♥', 1.0): 19, ('grate', 1.0): 5, ('three', 1.0): 10, ('comment', 1.0): 8, ('wakeup', 1.0): 1, ('besid', 1.0): 2, ('dirti', 1.0): 2, ('sex', 1.0): 6, ('lmaooo', 1.0): 1, ('😤', 1.0): 2, ('loui', 1.0): 4, ("he'", 1.0): 11, ('throw', 1.0): 3, ('caus', 1.0): 15, ('inspir', 1.0): 7, ('ff', 1.0): 48, ('twoof', 1.0): 3, ('gr8', 1.0): 1, ('wkend', 1.0): 3, ('kind', 1.0): 24, ('exhaust', 1.0): 2, ('word', 1.0): 20, ('cheltenham', 1.0): 1, ('area', 1.0): 4, ('kale', 1.0): 1, ('crisp', 1.0): 1, ('ruin', 1.0): 5, ('x37', 1.0): 1, ('open', 1.0): 12, ('worldwid', 1.0): 2, ('outta', 1.0): 1, ('sfvbeta', 1.0): 1, ('vantast', 1.0): 1, ('xcylin', 1.0): 1, ('bundl', 1.0): 1, ('show', 1.0): 28, ('internet', 1.0): 2, ('price', 1.0): 4, ('realisticli', 1.0): 1, ('pay', 1.0): 8, ('net', 1.0): 1, ('educ', 1.0): 1, ('power', 1.0): 7, ('weapon', 1.0): 1, ('nelson', 1.0): 1, ('mandela', 1.0): 1, ('recent', 1.0): 9, ('j', 1.0): 3, ('chenab', 1.0): 1, ('flow', 1.0): 5, ('pakistan', 1.0): 2, ('incredibleindia', 1.0): 1, ('teenchoic', 1.0): 10, ('choiceinternationalartist', 1.0): 9, ('superjunior', 1.0): 9, ('caught', 1.0): 4, ('first', 1.0): 50, ('salmon', 1.0): 3, ('super-blend', 1.0): 1, ('project', 1.0): 6, ('[email protected]', 1.0): 1, ('awesom', 1.0): 42, ('stream', 1.0): 14, ('alma', 1.0): 1, ('mater', 1.0): 1, ('highschoolday', 1.0): 1, ('clientvisit', 1.0): 1, ('faith', 1.0): 3, ('christian', 1.0): 1, ('school', 1.0): 9, ('lizaminnelli', 1.0): 1, ('upcom', 1.0): 2, ('uk', 1.0): 4, ('😄', 1.0): 5, ('singl', 1.0): 6, ('hill', 1.0): 4, ('everi', 1.0): 26, ('beat', 1.0): 10, ('wrong', 1.0): 10, ('readi', 1.0): 25, ('natur', 1.0): 1, ('pefumeri', 1.0): 1, ('workshop', 1.0): 3, ('neal', 1.0): 1, ('yard', 1.0): 1, ('covent', 1.0): 1, ('tomorrow', 1.0): 40, ('fback', 1.0): 27, ('indo', 1.0): 1, ('harmo', 1.0): 1, ('americano', 1.0): 1, ('rememb', 1.0): 16, ('aww', 1.0): 10, ('head', 1.0): 14, ('saw', 1.0): 19, ('dark', 1.0): 6, ('handshom', 1.0): 1, ('juga', 1.0): 1, ('hurray', 1.0): 1, ('hate', 1.0): 13, ('cant', 1.0): 15, ('decid', 1.0): 4, ('save', 1.0): 12, ('list', 1.0): 15, ('hiya', 1.0): 4, ('exec', 1.0): 1, ('[email protected]', 1.0): 1, ('photo', 1.0): 19, ('thx', 1.0): 15, ('4', 1.0): 24, ('china', 1.0): 2, ('homosexu', 1.0): 1, ('hyungbot', 1.0): 1, ('give', 1.0): 48, ('fam', 1.0): 5, ('mind', 1.0): 23, ('timetunnel', 1.0): 1, ('1982', 1.0): 1, ('quit', 1.0): 13, ('radio', 1.0): 5, ('set', 1.0): 11, ('heart', 1.0): 11, ('hiii', 1.0): 2, ('jack', 1.0): 3, ('ili', 1.0): 5, ('✨', 1.0): 4, ('domino', 1.0): 1, ('pub', 1.0): 1, ('heat', 1.0): 1, ('prob', 1.0): 5, ('sorri', 1.0): 22, ('hastili', 1.0): 1, ('type', 1.0): 6, ('came', 1.0): 7, ('pakistani', 1.0): 1, ('x36', 1.0): 1, ('3point', 1.0): 1, ('dreamteam', 1.0): 1, ('gooo', 1.0): 2, ('bailey', 1.0): 2, ('pbb', 1.0): 4, ('737gold', 1.0): 3, ('drank', 1.0): 2, ('old', 1.0): 13, ('gotten', 1.0): 2, ('1/2', 1.0): 1, ('welsh', 1.0): 1, ('wale', 1.0): 3, ('yippe', 1.0): 1, ('💟', 1.0): 4, ('bro', 1.0): 24, ('lord', 1.0): 4, ('michael', 1.0): 4, ("u'r", 1.0): 1, ('ure', 1.0): 1, ('bigot', 1.0): 1, ('usual', 1.0): 6, ('front', 1.0): 4, ('squat', 1.0): 1, ('dobar', 1.0): 1, ('dan', 1.0): 5, ('brand', 1.0): 8, ('heavi', 1.0): 2, ('musicolog', 1.0): 1, ('2015', 1.0): 16, ('spend', 1.0): 2, ('marathon', 1.0): 1, ('iflix', 1.0): 2, ('offici', 1.0): 10, ('graduat', 1.0): 3, ('cri', 1.0): 9, ('__', 1.0): 1, ('yep', 1.0): 9, ('expert', 1.0): 4, ('bisexu', 1.0): 1, ('minal', 1.0): 1, ('aidzin', 1.0): 1, ('yo', 1.0): 7, ('pi', 1.0): 1, ('cook', 1.0): 2, ('book', 1.0): 21, ('dinner', 1.0): 7, ('tough', 1.0): 2, ('choic', 1.0): 8, ('other', 1.0): 12, ('chill', 1.0): 6, ('smu', 1.0): 1, ('oval', 1.0): 1, ('basketbal', 1.0): 1, ('player', 1.0): 4, ('whahahaha', 1.0): 1, ('soamaz', 1.0): 1, ('moment', 1.0): 12, ('onto', 1.0): 3, ('a5', 1.0): 1, ('wardrob', 1.0): 2, ('user', 1.0): 3, ('teamr', 1.0): 1, ('appar', 1.0): 6, ('depend', 1.0): 2, ('greatli', 1.0): 1, ('design', 1.0): 21, ('ahhh', 1.0): 1, ('7th', 1.0): 1, ('cinepambata', 1.0): 1, ('mechan', 1.0): 1, ('form', 1.0): 4, ('download', 1.0): 10, ('ur', 1.0): 38, ('swisher', 1.0): 1, ('cop', 1.0): 1, ('ducktail', 1.0): 1, ('surreal', 1.0): 3, ('exposur', 1.0): 1, ('sotw', 1.0): 1, ('halesowen', 1.0): 1, ('blackcountryfair', 1.0): 1, ('street', 1.0): 1, ('assess', 1.0): 1, ('mental', 1.0): 4, ('bodi', 1.0): 15, ('ooz', 1.0): 1, ('appeal', 1.0): 1, ('amassiveoverdoseofship', 1.0): 1, ('latest', 1.0): 5, ('isi', 1.0): 1, ('chan', 1.0): 1, ('c', 1.0): 9, ('note', 1.0): 6, ('pkwalasawa', 1.0): 1, ('gemma', 1.0): 1, ('orlean', 1.0): 1, ('fever', 1.0): 2, ('geskenya', 1.0): 1, ('obamainkenya', 1.0): 1, ('magicalkenya', 1.0): 1, ('greatkenya', 1.0): 1, ('allgoodthingsk', 1.0): 1, ('anim', 1.0): 6, ('umaru', 1.0): 1, ('singer', 1.0): 4, ('ship', 1.0): 8, ('order', 1.0): 17, ('room', 1.0): 5, ('car', 1.0): 6, ('gone', 1.0): 5, ('hahaha', 1.0): 14, ('stori', 1.0): 11, ('relat', 1.0): 4, ('label', 1.0): 1, ('worst', 1.0): 3, ('batch', 1.0): 1, ('princip', 1.0): 1, ('due', 1.0): 3, ('march', 1.0): 1, ('wooftast', 1.0): 2, ('receiv', 1.0): 8, ('necessari', 1.0): 1, ('regret', 1.0): 4, ('rn', 1.0): 4, ('whatev', 1.0): 5, ('hat', 1.0): 1, ('success', 1.0): 6, ('abstin', 1.0): 1, ('wtf', 1.0): 3, ("there'", 1.0): 11, ('thrown', 1.0): 1, ('middl', 1.0): 2, ('repeat', 1.0): 3, ('relentlessli', 1.0): 1, ('approxim', 1.0): 1, ('oldschool', 1.0): 1, ('runescap', 1.0): 1, ('daaay', 1.0): 1, ('jumma_mubarik', 1.0): 1, ('frnd', 1.0): 1, ('stay_bless', 1.0): 1, ('bless', 1.0): 12, ('pussycat', 1.0): 1, ('main', 1.0): 7, ('launch', 1.0): 4, ('pretoria', 1.0): 1, ('fahrinahmad', 1.0): 1, ('tengkuaaronshah', 1.0): 1, ('eksperimencinta', 1.0): 1, ('tykkäsin', 1.0): 1, ('videosta', 1.0): 1, ('month', 1.0): 13, ('hoodi', 1.0): 2, ('eeep', 1.0): 1, ('yay', 1.0): 16, ('sohappyrightnow', 1.0): 1, ('mmm', 1.0): 1, ('azz-set', 1.0): 1, ('babe', 1.0): 9, ('feedback', 1.0): 11, ('gain', 1.0): 6, ('valu', 1.0): 2, ('peac', 1.0): 8, ('refresh', 1.0): 5, ('manthan', 1.0): 1, ('tune', 1.0): 5, ('fresh', 1.0): 6, ('mother', 1.0): 5, ('determin', 1.0): 2, ('maxfreshmov', 1.0): 2, ('loneliest', 1.0): 1, ('tattoo', 1.0): 3, ('friday.and', 1.0): 1, ('magnific', 1.0): 2, ('e', 1.0): 5, ('achiev', 1.0): 2, ('rashmi', 1.0): 1, ('dedic', 1.0): 2, ('happyfriday', 1.0): 6, ('nearli', 1.0): 4, ('retweet', 1.0): 35, ('alert', 1.0): 1, ('da', 1.0): 5, ('dang', 1.0): 2, ('rad', 1.0): 2, ('fanart', 1.0): 1, ('massiv', 1.0): 1, ('niamh', 1.0): 1, ('fennel', 1.0): 1, ('journal', 1.0): 1, ('land', 1.0): 2, ('copi', 1.0): 5, ('past', 1.0): 7, ('tweet', 1.0): 61, ('yesss', 1.0): 5, ('ariana', 1.0): 2, ('selena', 1.0): 2, ('gomez', 1.0): 1, ('tomlinson', 1.0): 1, ('payn', 1.0): 1, ('caradelevingn', 1.0): 1, ('🌷', 1.0): 1, ('trade', 1.0): 3, ('tire', 1.0): 5, ('nope', 1.0): 7, ('appli', 1.0): 6, ('iamca', 1.0): 1, ('found', 1.0): 15, ('afti', 1.0): 1, ('goodmorn', 1.0): 8, ('prokabaddi', 1.0): 1, ('koel', 1.0): 1, ('mallick', 1.0): 1, ('recit', 1.0): 4, ('nation', 1.0): 3, ('anthem', 1.0): 1, ('6', 1.0): 23, ('yournaturallead', 1.0): 1, ('youngnaturallead', 1.0): 1, ('mon', 1.0): 3, ('27juli', 1.0): 1, ('cumbria', 1.0): 1, ('flockstar', 1.0): 1, ('thur', 1.0): 2, ('30juli', 1.0): 1, ('itv', 1.0): 1, ('sleeptight', 1.0): 1, ('haveagoodday', 1.0): 1, ('septemb', 1.0): 5, ('perhap', 1.0): 3, ('bb', 1.0): 4, ('full', 1.0): 19, ('album', 1.0): 6, ('fulli', 1.0): 2, ('intend', 1.0): 1, ('possibl', 1.0): 7, ('attack', 1.0): 3, ('>:d', 1.0): 4, ('bird', 1.0): 4, ('teamadmicro', 1.0): 1, ('fridaydownpour', 1.0): 1, ('clear', 1.0): 4, ('rohit', 1.0): 1, ('queen', 1.0): 8, ('otwolgrandtrail', 1.0): 3, ('sheer', 1.0): 1, ('fact', 1.0): 8, ('obama', 1.0): 1, ('innumer', 1.0): 1, ('presid', 1.0): 2, ('ni', 1.0): 3, ('shauri', 1.0): 1, ('yako', 1.0): 1, ('memotohat', 1.0): 1, ('sunday', 1.0): 9, ('pamper', 1.0): 2, ("t'wa", 1.0): 1, ('cabincrew', 1.0): 1, ('interview', 1.0): 5, ('langkawi', 1.0): 1, ('1st', 1.0): 1, ('august', 1.0): 7, ('fulfil', 1.0): 5, ('fantasi', 1.0): 6, ('👉', 1.0): 6, ('ex-tweleb', 1.0): 1, ('apart', 1.0): 2, ('makeov', 1.0): 1, ('brilliantli', 1.0): 1, ('happyyi', 1.0): 1, ('birthdaaayyy', 1.0): 2, ('kill', 1.0): 3, ('interest', 1.0): 20, ('internship', 1.0): 3, ('program', 1.0): 5, ('sadli', 1.0): 1, ('career', 1.0): 3, ('page', 1.0): 9, ('issu', 1.0): 10, ('sad', 1.0): 5, ('overwhelmingli', 1.0): 1, ('aha', 1.0): 2, ('beaut', 1.0): 2, ('♬', 1.0): 2, ('win', 1.0): 16, ('deo', 1.0): 1, ('faaabul', 1.0): 1, ('freebiefriday', 1.0): 4, ('aluminiumfre', 1.0): 1, ('stayfresh', 1.0): 1, ('john', 1.0): 6, ('worri', 1.0): 18, ('navig', 1.0): 1, ('thnk', 1.0): 1, ('progrmr', 1.0): 1, ('9pm', 1.0): 1, ('9am', 1.0): 2, ('hardli', 1.0): 1, ('rose', 1.0): 4, ('emot', 1.0): 3, ('poetri', 1.0): 1, ('frequentfly', 1.0): 1, ('break', 1.0): 10, ('apolog', 1.0): 4, ('kb', 1.0): 1, ('londondairi', 1.0): 1, ('icecream', 1.0): 2, ('experi', 1.0): 7, ('cover', 1.0): 9, ('sin', 1.0): 1, ('excit', 1.0): 33, (":')", 1.0): 2, ('xxx', 1.0): 15, ('jim', 1.0): 1, ('chuckl', 1.0): 1, ('cake', 1.0): 10, ('doh', 1.0): 1, ('500', 1.0): 2, ('subscrib', 1.0): 2, ('reach', 1.0): 1, ('scorch', 1.0): 1, ('summer', 1.0): 17, ('younger', 1.0): 4, ('woman', 1.0): 4, ('stamina', 1.0): 1, ('expect', 1.0): 6, ('anyth', 1.0): 22, ('less', 1.0): 8, ('tweeti', 1.0): 1, ('fab', 1.0): 12, ('dont', 1.0): 13, ('-->', 1.0): 2, ('10', 1.0): 16, ('loner', 1.0): 3, ('introduc', 1.0): 3, ('vs', 1.0): 4, ('alter', 1.0): 1, ('understand', 1.0): 6, ('spread', 1.0): 8, ('problem', 1.0): 19, ('supa', 1.0): 1, ('dupa', 1.0): 1, ('near', 1.0): 6, ('dartmoor', 1.0): 1, ('gold', 1.0): 7, ('colour', 1.0): 4, ('ok', 1.0): 38, ('someday', 1.0): 4, ('r', 1.0): 14, ('dii', 1.0): 1, ('n', 1.0): 17, ('forget', 1.0): 17, ('si', 1.0): 4, ('smf', 1.0): 1, ('ft', 1.0): 4, ('japanes', 1.0): 3, ('import', 1.0): 5, ('kitti', 1.0): 1, ('match', 1.0): 6, ('stationari', 1.0): 1, ('draw', 1.0): 6, ('close', 1.0): 14, ('broken', 1.0): 3, ('specialis', 1.0): 4, ('thermal', 1.0): 4, ('imag', 1.0): 6, ('survey', 1.0): 4, ('–', 1.0): 14, ('south', 1.0): 2, ('korea', 1.0): 3, ('scamper', 1.0): 1, ('slept', 1.0): 4, ('alarm', 1.0): 1, ("ain't", 1.0): 5, ('mad', 1.0): 4, ('chweina', 1.0): 1, ('xd', 1.0): 4, ('jotzh', 1.0): 1, ('wast', 1.0): 7, ('place', 1.0): 21, ('worth', 1.0): 11, ('coat', 1.0): 3, ('beforehand', 1.0): 1, ('tho', 1.0): 12, ('foh', 1.0): 2, ('outsid', 1.0): 5, ('holiday', 1.0): 11, ('menac', 1.0): 1, ('jojo', 1.0): 2, ('ta', 1.0): 2, ('accept', 1.0): 1, ('admin', 1.0): 2, ('lukri', 1.0): 1, ('😘', 1.0): 10, ('momma', 1.0): 2, ('bear', 1.0): 2, ('❤', 1.0): 29, ('️', 1.0): 20, ('redid', 1.0): 1, ('8th', 1.0): 1, ('v.ball', 1.0): 1, ('atm', 1.0): 4, ('build', 1.0): 8, ('pack', 1.0): 8, ('suitcas', 1.0): 2, ('hang-copi', 1.0): 1, ('translat', 1.0): 1, ("dostoevsky'", 1.0): 1, ('voucher', 1.0): 2, ('bugatti', 1.0): 1, ('bra', 1.0): 3, ('مطعم_هاشم', 1.0): 1, ('yummi', 1.0): 3, ('a7la', 1.0): 1, ('bdayt', 1.0): 1, ('mnwreeen', 1.0): 1, ('jazz', 1.0): 2, ('truck', 1.0): 1, ('x34', 1.0): 1, ('speak', 1.0): 8, ('pbevent', 1.0): 1, ('hq', 1.0): 1, ('add', 1.0): 22, ('yoona', 1.0): 1, ('hairpin', 1.0): 1, ('otp', 1.0): 1, ('collect', 1.0): 7, ('mastership', 1.0): 1, ('honey', 1.0): 4, ('paindo', 1.0): 1, ('await', 1.0): 1, ('report', 1.0): 3, ('manni', 1.0): 1, ('asshol', 1.0): 3, ('brijresid', 1.0): 1, ('structur', 1.0): 1, ('156', 1.0): 1, ('unit', 1.0): 3, ('encompass', 1.0): 1, ('bhk', 1.0): 1, ('flat', 1.0): 2, ('91', 1.0): 2, ('975-580-', 1.0): 1, ('444', 1.0): 1, ('honor', 1.0): 2, ('curri', 1.0): 2, ('clash', 1.0): 1, ('milano', 1.0): 1, ('👌', 1.0): 1, ('followback', 1.0): 6, (':-d', 1.0): 5, ('legit', 1.0): 1, ('loser', 1.0): 5, ('gass', 1.0): 1, ('dead', 1.0): 4, ('starsquad', 1.0): 4, ('⭐', 1.0): 3, ('news', 1.0): 25, ('utc', 1.0): 1, ('flume', 1.0): 1, ('kaytranada', 1.0): 1, ('alunageorg', 1.0): 1, ('ticket', 1.0): 12, ('km', 1.0): 1, ('certainti', 1.0): 1, ('solv', 1.0): 2, ('faster', 1.0): 3, ('👊', 1.0): 2, ('hurri', 1.0): 5, ('totem', 1.0): 1, ('somewher', 1.0): 5, ('alic', 1.0): 4, ('dog', 1.0): 6, ('cat', 1.0): 5, ('goodwynsgoodi', 1.0): 1, ('ugh', 1.0): 1, ('fade', 1.0): 2, ('moan', 1.0): 1, ('leed', 1.0): 1, ('jozi', 1.0): 1, ('wasnt', 1.0): 2, ('fifth', 1.0): 2, ('avail', 1.0): 10, ('tix', 1.0): 2, ('pa', 1.0): 2, ('ba', 1.0): 2, ('ng', 1.0): 2, ('atl', 1.0): 1, ('coldplay', 1.0): 1, ('favorit', 1.0): 14, ('scientist', 1.0): 1, ('yellow', 1.0): 2, ('atla', 1.0): 1, ('yein', 1.0): 1, ('selo', 1.0): 1, ('jabongatpumaurbanstamped', 1.0): 4, ('an', 1.0): 3, ('7', 1.0): 8, ('waiter', 1.0): 1, ('bill', 1.0): 5, ('sir', 1.0): 12, ('titl', 1.0): 2, ('pocket', 1.0): 1, ('wrip', 1.0): 1, ('jean', 1.0): 1, ('conni', 1.0): 2, ('crew', 1.0): 3, ('staff', 1.0): 2, ('sweetan', 1.0): 1, ('ask', 1.0): 37, ('mum', 1.0): 2, ('beg', 1.0): 2, ('soprano', 1.0): 1, ('ukrain', 1.0): 2, ('x33', 1.0): 1, ('olli', 1.0): 2, ('disney.art', 1.0): 1, ('elmoprinssi', 1.0): 1, ('salsa', 1.0): 1, ('danc', 1.0): 2, ('tell', 1.0): 25, ('truth', 1.0): 4, ('pl', 1.0): 8, ('4-6', 1.0): 1, ('2nd', 1.0): 5, ('blogiversari', 1.0): 1, ('review', 1.0): 9, ('cuti', 1.0): 6, ('bohol', 1.0): 1, ('briliant', 1.0): 1, ('v', 1.0): 9, ('key', 1.0): 3, ('annual', 1.0): 1, ('far', 1.0): 19, ('spin', 1.0): 2, ('voic', 1.0): 3, ('\U000fe334', 1.0): 1, ('yeheyi', 1.0): 1, ('pinya', 1.0): 1, ('whoooah', 1.0): 1, ('tranc', 1.0): 1, ('lover', 1.0): 4, ('subject', 1.0): 7, ('physic', 1.0): 1, ('stop', 1.0): 15, ('ब', 1.0): 1, ('matter', 1.0): 6, ('jungl', 1.0): 1, ('accommod', 1.0): 1, ('secret', 1.0): 9, ('behind', 1.0): 3, ('sandroforceo', 1.0): 2, ('ceo', 1.0): 11, ('1month', 1.0): 11, ('swag', 1.0): 1, ('mia', 1.0): 1, ('workinprogress', 1.0): 1, ('choos', 1.0): 2, ('finnigan', 1.0): 1, ('loyal', 1.0): 2, ('royal', 1.0): 2, ('fotoset', 1.0): 1, ('reus', 1.0): 1, ('seem', 1.0): 10, ('somebodi', 1.0): 1, ('sell', 1.0): 1, ('young', 1.0): 3, ('muntu', 1.0): 1, ('anoth', 1.0): 23, ('gem', 1.0): 2, ('falco', 1.0): 1, ('supersmash', 1.0): 1, ('hotnsexi', 1.0): 1, ('friskyfriday', 1.0): 1, ('beach', 1.0): 4, ('movi', 1.0): 24, ('crop', 1.0): 2, ('nash', 1.0): 1, ('tissu', 1.0): 1, ('chocol', 1.0): 7, ('tea', 1.0): 6, ('hannib', 1.0): 3, ('episod', 1.0): 5, ('hotb', 1.0): 1, ('bush', 1.0): 2, ('classicassur', 1.0): 1, ('thrill', 1.0): 2, ('intern', 1.0): 2, ('assign', 1.0): 1, ('aerial', 1.0): 1, ('camera', 1.0): 6, ('oper', 1.0): 1, ('boom', 1.0): 3, ('hong', 1.0): 1, ('kong', 1.0): 1, ('ferri', 1.0): 1, ('central', 1.0): 2, ('girlfriend', 1.0): 4, ('after-work', 1.0): 1, ('drink', 1.0): 8, ('dj', 1.0): 3, ('resto', 1.0): 1, ('drinkt', 1.0): 1, ('koffi', 1.0): 1, ('a6', 1.0): 1, ('stargat', 1.0): 1, ('atlanti', 1.0): 1, ('muaahhh', 1.0): 1, ('ohh', 1.0): 3, ('hii', 1.0): 2, ('🙈', 1.0): 1, ('di', 1.0): 5, ('nagsend', 1.0): 1, ('yung', 1.0): 1, ('ko', 1.0): 4, ('ulit', 1.0): 3, ('🎉', 1.0): 5, ('🎈', 1.0): 1, ('ugli', 1.0): 4, ('legget', 1.0): 1, ('qui', 1.0): 1, ('per', 1.0): 1, ('la', 1.0): 8, ('mar', 1.0): 1, ('encourag', 1.0): 3, ('employ', 1.0): 5, ('board', 1.0): 5, ('sticker', 1.0): 1, ('sponsor', 1.0): 4, ('prize', 1.0): 3, ('(:', 1.0): 1, ('milo', 1.0): 1, ('aurini', 1.0): 1, ('juicebro', 1.0): 1, ('pillar', 1.0): 2, ('respect', 1.0): 2, ('boii', 1.0): 1, ('smashingbook', 1.0): 1, ('bibl', 1.0): 2, ('ill', 1.0): 6, ('sick', 1.0): 4, ('lamo', 1.0): 1, ('fangirl', 1.0): 3, ('platon', 1.0): 1, ('scienc', 1.0): 5, ('resid', 1.0): 2, ('servicewithasmil', 1.0): 1, ('bloodlin', 1.0): 1, ('huski', 1.0): 1, ('obituari', 1.0): 1, ('advert', 1.0): 1, ('goofingaround', 1.0): 1, ('bollywood', 1.0): 1, ('giveaway', 1.0): 6, ('dah', 1.0): 2, ('noth', 1.0): 15, ('bitter', 1.0): 2, ('anger', 1.0): 1, ('hatr', 1.0): 2, ('toward', 1.0): 2, ('pure', 1.0): 2, ('indiffer', 1.0): 1, ('suit', 1.0): 5, ('zach', 1.0): 1, ('codi', 1.0): 2, ('deliv', 1.0): 3, ('ac', 1.0): 1, ('excel', 1.0): 6, ('produc', 1.0): 1, ('boggl', 1.0): 1, ('fatigu', 1.0): 1, ('baareeq', 1.0): 1, ('gamedev', 1.0): 2, ('hobbi', 1.0): 1, ('tweenie_fox', 1.0): 1, ('click', 1.0): 3, ('accessori', 1.0): 1, ('tamang', 1.0): 1, ('hinala', 1.0): 1, ('niam', 1.0): 1, ('selfiee', 1.0): 1, ('especi', 1.0): 4, ('lass', 1.0): 1, ('ale', 1.0): 1, ('swim', 1.0): 3, ('bout', 1.0): 3, ('goodby', 1.0): 5, ('feminist', 1.0): 1, ('fought', 1.0): 1, ('snobbi', 1.0): 1, ('bitch', 1.0): 3, ('carolin', 1.0): 2, ('mighti', 1.0): 1, ('🔥', 1.0): 1, ('threw', 1.0): 2, ('hbd', 1.0): 1, ('follback', 1.0): 19, ('jog', 1.0): 1, ('remot', 1.0): 2, ('newli', 1.0): 1, ('ebay', 1.0): 2, ('store', 1.0): 15, ('disneyinfin', 1.0): 1, ('starwar', 1.0): 1, ('charact', 1.0): 3, ('preorder', 1.0): 1, ('starter', 1.0): 1, ('hit', 1.0): 13, ('snap', 1.0): 4, ('homi', 1.0): 3, ('bought', 1.0): 4, ('skin', 1.0): 8, ('bday', 1.0): 11, ('chant', 1.0): 2, ('jai', 1.0): 1, ('itali', 1.0): 2, ('fast', 1.0): 4, ('heeeyyy', 1.0): 1, ('woah', 1.0): 3, ('★', 1.0): 5, ('😊', 1.0): 11, ('whenev', 1.0): 4, ('ang', 1.0): 2, ('kiss', 1.0): 4, ('philippin', 1.0): 2, ('packag', 1.0): 3, ('bruis', 1.0): 1, ('rib', 1.0): 2, ('😀', 1.0): 2, ('😁', 1.0): 6, ('😂', 1.0): 17, ('😃', 1.0): 1, ('😅', 1.0): 1, ('😉', 1.0): 2, ('tombraid', 1.0): 1, ('hype', 1.0): 1, ('thejuiceinthemix', 1.0): 1, ('rela', 1.0): 1, ('low', 1.0): 6, ('prioriti', 1.0): 1, ('harri', 1.0): 5, ('bc', 1.0): 9, ('collaps', 1.0): 2, ('chaotic', 1.0): 1, ('cosa', 1.0): 1, ('<---', 1.0): 2, ('alliter', 1.0): 1, ('oppayaa', 1.0): 1, ("how'", 1.0): 4, ('natgeo', 1.0): 1, ('lick', 1.0): 1, ('elbow', 1.0): 2, ('. .', 1.0): 2, ('“', 1.0): 7, ('emu', 1.0): 1, ('stoke', 1.0): 1, ('woke', 1.0): 5, ("people'", 1.0): 3, ('approv', 1.0): 6, ("god'", 1.0): 2, ('jisung', 1.0): 1, ('sunshin', 1.0): 7, ('mm', 1.0): 6, ('nicola', 1.0): 1, ('brighten', 1.0): 2, ('helen', 1.0): 3, ('brian', 1.0): 3, ('2-3', 1.0): 1, ('australia', 1.0): 5, ('ol', 1.0): 2, ('bone', 1.0): 1, ('creak', 1.0): 1, ('abuti', 1.0): 1, ('tweetland', 1.0): 1, ('android', 1.0): 3, ('xma', 1.0): 2, ('skyblock', 1.0): 1, ('bcaus', 1.0): 1, ('2009', 1.0): 1, ('die', 1.0): 10, ('twitch', 1.0): 5, ('sympathi', 1.0): 1, ('laugh', 1.0): 5, ('unniee', 1.0): 1, ('nuka', 1.0): 1, ('penacova', 1.0): 1, ('djset', 1.0): 1, ('edm', 1.0): 1, ('kizomba', 1.0): 1, ('latinhous', 1.0): 1, ('housemus', 1.0): 3, ('portug', 1.0): 1, ('wild', 1.0): 2, ('ride', 1.0): 6, ('anytim', 1.0): 6, ('tast', 1.0): 5, ('yer', 1.0): 2, ('mtn', 1.0): 2, ('maganda', 1.0): 1, ('mistress', 1.0): 2, ('saphir', 1.0): 1, ('busi', 1.0): 19, ('4000', 1.0): 1, ('instagram', 1.0): 7, ('among', 1.0): 5, ('coconut', 1.0): 1, ('sambal', 1.0): 1, ('mussel', 1.0): 1, ('recip', 1.0): 5, ('kalin', 1.0): 1, ('mixcloud', 1.0): 1, ('sarcasm', 1.0): 2, ('chelsea', 1.0): 3, ('he', 1.0): 2, ('useless', 1.0): 2, ('thursday', 1.0): 2, ('hang', 1.0): 3, ('hehe', 1.0): 10, ('said', 1.0): 16, ('benson', 1.0): 1, ('facebook', 1.0): 5, ('solid', 1.0): 1, ('16/17', 1.0): 1, ('30', 1.0): 3, ('°', 1.0): 1, ('😜', 1.0): 2, ('maryhick', 1.0): 1, ('kikmeboy', 1.0): 7, ('photooftheday', 1.0): 4, ('musicbiz', 1.0): 2, ('sheskindahot', 1.0): 1, ('fleekil', 1.0): 1, ('mbalula', 1.0): 1, ('africa', 1.0): 1, ('mexican', 1.0): 1, ('scar', 1.0): 1, ('offic', 1.0): 8, ('donut', 1.0): 2, ('foiegra', 1.0): 2, ('despit', 1.0): 2, ('weather', 1.0): 9, ('wed', 1.0): 5, ('toni', 1.0): 2, ('stark', 1.0): 1, ('incred', 1.0): 7, ('poem', 1.0): 2, ('bubbl', 1.0): 3, ('dale', 1.0): 1, ('billion', 1.0): 1, ('magic', 1.0): 5, ('op', 1.0): 3, ('cast', 1.0): 1, ('vote', 1.0): 9, ('elect', 1.0): 1, ('jcreport', 1.0): 1, ('piggin', 1.0): 1, ('botan', 1.0): 2, ('soap', 1.0): 4, ('late', 1.0): 13, ('upload', 1.0): 5, ('freshli', 1.0): 1, ('3week', 1.0): 1, ('heal', 1.0): 1, ('tobi-bro', 1.0): 1, ('isp', 1.0): 1, ('steel', 1.0): 1, ('wednesday', 1.0): 1, ('swear', 1.0): 3, ('met', 1.0): 4, ('earlier', 1.0): 4, ('cam', 1.0): 3, ('😭', 1.0): 2, ('except', 1.0): 2, ("masha'allah", 1.0): 1, ('french', 1.0): 5, ('wwat', 1.0): 2, ('franc', 1.0): 5, ('yaaay', 1.0): 3, ('beirut', 1.0): 2, ('coffe', 1.0): 11, ('panda', 1.0): 6, ('eonni', 1.0): 2, ('favourit', 1.0): 13, ('soda', 1.0): 1, ('fuller', 1.0): 1, ('shit', 1.0): 13, ('healthi', 1.0): 2, ('💓', 1.0): 2, ('rettweet', 1.0): 3, ('mvg', 1.0): 1, ('valuabl', 1.0): 1, ('madrid', 1.0): 3, ('sore', 1.0): 6, ('bergerac', 1.0): 1, ('u21', 1.0): 1, ('individu', 1.0): 2, ('adam', 1.0): 1, ("beach'", 1.0): 1, ('suicid', 1.0): 1, ('squad', 1.0): 1, ('fond', 1.0): 1, ('christoph', 1.0): 2, ('cocki', 1.0): 1, ('prove', 1.0): 3, ("attitude'", 1.0): 1, ('improv', 1.0): 3, ('suggest', 1.0): 6, ('date', 1.0): 12, ('inde', 1.0): 10, ('intellig', 1.0): 3, ('strong', 1.0): 7, ('cs', 1.0): 2, ('certain', 1.0): 2, ('exam', 1.0): 5, ('forgot', 1.0): 3, ('home-bas', 1.0): 1, ('knee', 1.0): 4, ('sale', 1.0): 3, ('fleur', 1.0): 1, ('dress', 1.0): 10, ('readystock_hijabmart', 1.0): 1, ('idr', 1.0): 2, ('325.000', 1.0): 1, ('200.000', 1.0): 1, ('tompolo', 1.0): 1, ('aim', 1.0): 1, ('cannot', 1.0): 4, ('buyer', 1.0): 3, ('disappoint', 1.0): 1, ('paper', 1.0): 4, ('slack', 1.0): 1, ('crack', 1.0): 1, ('particularli', 1.0): 2, ('strike', 1.0): 1, ('31', 1.0): 1, ('mam', 1.0): 2, ('feytyaz', 1.0): 1, ('instant', 1.0): 1, ('stiffen', 1.0): 1, ('ricky_feb', 1.0): 1, ('grindea', 1.0): 1, ('courier', 1.0): 1, ('crypt', 1.0): 1, ('arma', 1.0): 1, ('record', 1.0): 5, ('gosh', 1.0): 2, ('limbo', 1.0): 1, ('orchard', 1.0): 1, ('art', 1.0): 10, ('super', 1.0): 15, ('karachi', 1.0): 2, ('ka', 1.0): 4, ('venic', 1.0): 1, ('sever', 1.0): 3, ('part', 1.0): 15, ('wit', 1.0): 2, ('accumul', 1.0): 1, ('maroon', 1.0): 1, ('cocktail', 1.0): 4, ('0-100', 1.0): 1, ('quick', 1.0): 7, ('1100d', 1.0): 1, ('auto-focu', 1.0): 1, ('manual', 1.0): 2, ('vein', 1.0): 1, ('crackl', 1.0): 1, ('glaze', 1.0): 1, ('layout', 1.0): 3, ('bomb', 1.0): 4, ('social', 1.0): 4, ('websit', 1.0): 8, ('pake', 1.0): 1, ('joim', 1.0): 1, ('feed', 1.0): 4, ('troop', 1.0): 1, ('mail', 1.0): 3, ('[email protected]', 1.0): 1, ('prrequest', 1.0): 1, ('journorequest', 1.0): 1, ('the_madstork', 1.0): 1, ('shaun', 1.0): 1, ('bot', 1.0): 4, ('chloe', 1.0): 2, ('actress', 1.0): 3, ('away', 1.0): 13, ('wick', 1.0): 9, ('hola', 1.0): 1, ('juan', 1.0): 1, ('houston', 1.0): 1, ('tx', 1.0): 2, ('jenni', 1.0): 1, ("year'", 1.0): 2, ('stumbl', 1.0): 1, ('upon', 1.0): 1, ('prob.nic', 1.0): 1, ('choker', 1.0): 1, ('btw', 1.0): 12, ('seouljin', 1.0): 1, ('photoset', 1.0): 3, ('sadomasochistsparadis', 1.0): 1, ('wynter', 1.0): 1, ('bottom', 1.0): 3, ('outtak', 1.0): 1, ('sadomasochist', 1.0): 1, ('paradis', 1.0): 1, ('ty', 1.0): 8, ('bbi', 1.0): 3, ('clip', 1.0): 1, ('lose', 1.0): 6, ('cypher', 1.0): 1, ('amen', 1.0): 2, ('x32', 1.0): 1, ('plant', 1.0): 4, ('allow', 1.0): 4, ('corner', 1.0): 3, ('addict', 1.0): 4, ('gurl', 1.0): 1, ('suck', 1.0): 9, ('special', 1.0): 8, ('owe', 1.0): 1, ('daniel', 1.0): 2, ('ape', 1.0): 1, ('saar', 1.0): 1, ('ahead', 1.0): 4, ('vers', 1.0): 1, ('butterfli', 1.0): 1, ('bonu', 1.0): 2, ('fill', 1.0): 5, ('tear', 1.0): 1, ('laughter', 1.0): 2, ('5so', 1.0): 6, ('yummmyyi', 1.0): 1, ('eat', 1.0): 6, ('dosa', 1.0): 1, ('easier', 1.0): 2, ('unless', 1.0): 3, ('achi', 1.0): 2, ('youuu', 1.0): 2, ('bawi', 1.0): 1, ('ako', 1.0): 1, ('queenesth', 1.0): 1, ('sharp', 1.0): 2, ('yess', 1.0): 1, ('poldi', 1.0): 1, ('cimbom', 1.0): 1, ('buddi', 1.0): 7, ('bruhhh', 1.0): 1, ('daddi', 1.0): 2, ('”', 1.0): 5, ('knowledg', 1.0): 2, ('attent', 1.0): 4, ('1tb', 1.0): 1, ('bank', 1.0): 1, ('credit', 1.0): 4, ('depart', 1.0): 2, ('anz', 1.0): 1, ('extrem', 1.0): 3, ('offshor', 1.0): 1, ('absolut', 1.0): 9, ('classic', 1.0): 3, ('gottolovebank', 1.0): 1, ('yup', 1.0): 6, ('in-shaa-allah', 1.0): 1, ('dua', 1.0): 1, ('thru', 1.0): 2, ('aameen', 1.0): 2, ('4/5', 1.0): 1, ('coca', 1.0): 1, ('cola', 1.0): 1, ('fanta', 1.0): 1, ('pepsi', 1.0): 1, ('sprite', 1.0): 1, ('all', 1.0): 1, ('sweeeti', 1.0): 1, (';-)', 1.0): 3, ('welcometweet', 1.0): 2, ('psygustokita', 1.0): 4, ('setup', 1.0): 1, ('wet', 1.0): 3, ('feet', 1.0): 3, ('carpet', 1.0): 1, ('judgment', 1.0): 1, ('hypocrit', 1.0): 1, ('narcissist', 1.0): 1, ('jumpsuit', 1.0): 1, ('bt', 1.0): 2, ('denim', 1.0): 1, ('verg', 1.0): 1, ('owl', 1.0): 1, ('constant', 1.0): 1, ('run', 1.0): 12, ('sia', 1.0): 1, ('count', 1.0): 7, ('brilliant', 1.0): 9, ('teacher', 1.0): 1, ('compar', 1.0): 2, ('religion', 1.0): 1, ('rant', 1.0): 1, ('student', 1.0): 6, ('bencher', 1.0): 1, ('1/5', 1.0): 1, ('porsch', 1.0): 1, ('paddock', 1.0): 1, ('budapestgp', 1.0): 1, ('johnyherbert', 1.0): 1, ('roll', 1.0): 5, ('porschesupercup', 1.0): 1, ('koyal', 1.0): 1, ('melodi', 1.0): 1, ('unexpect', 1.0): 4, ('creat', 1.0): 8, ('memori', 1.0): 3, ('35', 1.0): 1, ('ep', 1.0): 3, ('catch', 1.0): 10, ('wirh', 1.0): 1, ('arc', 1.0): 1, ('x31', 1.0): 1, ('wolv', 1.0): 2, ('desir', 1.0): 1, ('ameen', 1.0): 1, ('kca', 1.0): 1, ('votejkt', 1.0): 1, ('48id', 1.0): 1, ('helpinggroupdm', 1.0): 1, ('quot', 1.0): 6, ('weird', 1.0): 5, ('dp', 1.0): 1, ('wife', 1.0): 5, ('poor', 1.0): 4, ('chick', 1.0): 1, ('guid', 1.0): 3, ('zonzofox', 1.0): 3, ('bhaiya', 1.0): 1, ('brother', 1.0): 4, ('lucki', 1.0): 10, ('patti', 1.0): 1, ('elabor', 1.0): 1, ('kuch', 1.0): 1, ('rate', 1.0): 1, ('merdeka', 1.0): 1, ('palac', 1.0): 2, ('hotel', 1.0): 5, ('plusmil', 1.0): 1, ('servic', 1.0): 7, ('hahahaa', 1.0): 1, ('mean', 1.0): 25, ('nex', 1.0): 2, ('safe', 1.0): 5, ('gwd', 1.0): 1, ('she', 1.0): 2, ('okok', 1.0): 1, ('33', 1.0): 4, ('idiot', 1.0): 1, ('chaerin', 1.0): 1, ('unni', 1.0): 1, ('viabl', 1.0): 1, ('altern', 1.0): 3, ('nowaday', 1.0): 2, ('ip', 1.0): 1, ('tombow', 1.0): 1, ('abt', 1.0): 2, ('friyay', 1.0): 2, ('smug', 1.0): 1, ('marrickvil', 1.0): 1, ('public', 1.0): 3, ('ten', 1.0): 1, ('ago', 1.0): 8, ('eighteen', 1.0): 1, ('auvssscr', 1.0): 1, ('ncaaseason', 1.0): 1, ('slow', 1.0): 2, ('popsicl', 1.0): 1, ('soft', 1.0): 2, ('melt', 1.0): 1, ('mouth', 1.0): 2, ('thankyouuu', 1.0): 1, ('dianna', 1.0): 1, ('ngga', 1.0): 1, ('usah', 1.0): 1, ('dipikirin', 1.0): 1, ('elah', 1.0): 1, ('easili', 1.0): 1, ("who'", 1.0): 9, ('entp', 1.0): 1, ('killin', 1.0): 1, ('meme', 1.0): 1, ('worthi', 1.0): 1, ('shot', 1.0): 6, ('emon', 1.0): 1, ('decent', 1.0): 2, ('outdoor', 1.0): 1, ('rave', 1.0): 1, ('dv', 1.0): 1, ('aku', 1.0): 1, ('bakal', 1.0): 1, ('liat', 1.0): 1, ('kak', 1.0): 2, ('merri', 1.0): 1, ('tv', 1.0): 5, ('outfit', 1.0): 3, ('--->', 1.0): 1, ('fashionfriday', 1.0): 1, ('angle.nelson', 1.0): 1, ('cheap', 1.0): 1, ('mymonsoonstori', 1.0): 2, ('tree', 1.0): 2, ('lotion', 1.0): 1, ('moistur', 1.0): 1, ('monsoon', 1.0): 1, ('whoop', 1.0): 6, ('romant', 1.0): 2, ('valencia', 1.0): 1, ('daaru', 1.0): 1, ('parti', 1.0): 12, ('chaddi', 1.0): 1, ('wonderful.great', 1.0): 1, ('trim', 1.0): 1, ('pube', 1.0): 1, ('es', 1.0): 2, ('mi', 1.0): 5, ('tio', 1.0): 1, ('sinaloa', 1.0): 1, ('arr', 1.0): 1, ('stylish', 1.0): 1, ('trendi', 1.0): 1, ('kim', 1.0): 5, ('fabfriday', 1.0): 2, ('facetim', 1.0): 4, ('calum', 1.0): 3, ('constantli', 1.0): 1, ('announc', 1.0): 1, ('filbarbarian', 1.0): 1, ('beer', 1.0): 3, ('arm', 1.0): 3, ('testicl', 1.0): 1, ('light', 1.0): 13, ('katerina', 1.0): 1, ('maniataki', 1.0): 1, ('ahh', 1.0): 5, ('alright', 1.0): 6, ('worthwhil', 1.0): 3, ('judg', 1.0): 2, ('tech', 1.0): 2, ('window', 1.0): 7, ('stupid', 1.0): 8, ('plugin', 1.0): 1, ('bass', 1.0): 1, ('slap', 1.0): 1, ('6pm', 1.0): 1, ('door', 1.0): 3, ('vip', 1.0): 1, ('gener', 1.0): 4, ('seat', 1.0): 2, ('earli', 1.0): 9, ('london', 1.0): 9, ('toptravelcentar', 1.0): 1, ('ttctop', 1.0): 1, ('lux', 1.0): 1, ('luxurytravel', 1.0): 1, ('beograd', 1.0): 1, ('srbija', 1.0): 1, ('putovanja', 1.0): 1, ('wendi', 1.0): 2, ('provid', 1.0): 4, ('drainag', 1.0): 1, ('homebound', 1.0): 1, ('hahahay', 1.0): 1, ('yeeeah', 1.0): 1, ('moar', 1.0): 2, ('kitteh', 1.0): 1, ('incom', 1.0): 1, ('tower', 1.0): 2, ('yippee', 1.0): 1, ('scrummi', 1.0): 1, ('bio', 1.0): 5, ('mcpe', 1.0): 1, ('->', 1.0): 1, ('vainglori', 1.0): 1, ('driver', 1.0): 1, ('6:01', 1.0): 1, ('lilydal', 1.0): 1, ('fss', 1.0): 1, ('rais', 1.0): 3, ('magicalmysterytour', 1.0): 1, ('chek', 1.0): 2, ('rule', 1.0): 2, ('weebli', 1.0): 1, ('donetsk', 1.0): 1, ('earth', 1.0): 7, ('personalis', 1.0): 1, ('wrap', 1.0): 2, ('stationeri', 1.0): 1, ('adrian', 1.0): 1, ('parcel', 1.0): 2, ('tuesday', 1.0): 7, ('pri', 1.0): 3, ('80', 1.0): 3, ('wz', 1.0): 1, ('pattern', 1.0): 1, ('cut', 1.0): 3, ('buttonhol', 1.0): 1, ('4mi', 1.0): 1, ('famou', 1.0): 1, ('client', 1.0): 1, ('p', 1.0): 3, ('aliv', 1.0): 2, ('trial', 1.0): 1, ('spm', 1.0): 1, ('dinooo', 1.0): 1, ('cardio', 1.0): 1, ('steak', 1.0): 1, ('cue', 1.0): 1, ('laptop', 1.0): 1, ('guinea', 1.0): 1, ('pig', 1.0): 1, ('salamat', 1.0): 1, ('sa', 1.0): 6, ('mga', 1.0): 1, ('nag.greet', 1.0): 1, ('guis', 1.0): 1, ('godbless', 1.0): 2, ('crush', 1.0): 3, ('appl', 1.0): 4, ('deserv', 1.0): 11, ('charl', 1.0): 1, ('workhard', 1.0): 1, ('model', 1.0): 7, ('forrit', 1.0): 1, ('bread', 1.0): 2, ('bacon', 1.0): 2, ('butter', 1.0): 2, ('afang', 1.0): 2, ('soup', 1.0): 2, ('semo', 1.0): 2, ('brb', 1.0): 1, ('forc', 1.0): 2, ('doesnt', 1.0): 5, ('tato', 1.0): 1, ('bulat', 1.0): 1, ('concern', 1.0): 1, ('snake', 1.0): 1, ('perform', 1.0): 3, ('con', 1.0): 1, ('todayyy', 1.0): 1, ('max', 1.0): 2, ('gaza', 1.0): 1, ('bbb', 1.0): 1, ('pc', 1.0): 3, ('22', 1.0): 2, ('legal', 1.0): 1, ('ditch', 1.0): 2, ('tori', 1.0): 1, ('bajrangibhaijaanhighestweek', 1.0): 6, ("s'okay", 1.0): 1, ('andi', 1.0): 2, ('you-and', 1.0): 1, ('return', 1.0): 3, ('tuitutil', 1.0): 1, ('bud', 1.0): 2, ('learn', 1.0): 8, ('takeaway', 1.0): 1, ('instead', 1.0): 7, ('1hr', 1.0): 1, ('genial', 1.0): 1, ('competit', 1.0): 1, ('yosh', 1.0): 1, ('procrastin', 1.0): 1, ('plu', 1.0): 4, ('kfc', 1.0): 2, ('itun', 1.0): 1, ('dedicatedfan', 1.0): 1, ('💜', 1.0): 7, ('daft', 1.0): 1, ('teeth', 1.0): 1, ('troubl', 1.0): 1, ('huxley', 1.0): 1, ('basket', 1.0): 2, ('ben', 1.0): 2, ('sent', 1.0): 8, ('gamer', 1.0): 3, ('activ', 1.0): 5, ('120', 1.0): 2, ('distanc', 1.0): 2, ('suitabl', 1.0): 1, ('stockholm', 1.0): 1, ('zack', 1.0): 1, ('destroy', 1.0): 1, ('heel', 1.0): 2, ('claw', 1.0): 1, ('q', 1.0): 2, ('blond', 1.0): 2, ('box', 1.0): 3, ('cheerio', 1.0): 1, ('seed', 1.0): 4, ('cutest', 1.0): 2, ('ffback', 1.0): 2, ('spotifi', 1.0): 3, ("we'v", 1.0): 7, ('vc', 1.0): 1, ('tgp', 1.0): 1, ('race', 1.0): 5, ('averag', 1.0): 2, ("joe'", 1.0): 1, ('bluejay', 1.0): 1, ('vinylbear', 1.0): 1, ('pal', 1.0): 1, ('furbabi', 1.0): 1, ('luff', 1.0): 1, ('mega', 1.0): 4, ('retail', 1.0): 4, ('boot', 1.0): 2, ('whsmith', 1.0): 1, ('ps3', 1.0): 1, ('shannon', 1.0): 1, ('na', 1.0): 9, ('redecor', 1.0): 1, ('bob', 1.0): 3, ('elli', 1.0): 4, ('mairi', 1.0): 1, ('workout', 1.0): 6, ('impair', 1.0): 1, ('uggghhh', 1.0): 1, ('dam', 1.0): 2, ('dun', 1.0): 2, ('eczema', 1.0): 1, ('suffer', 1.0): 4, ('ndee', 1.0): 1, ('pleasur', 1.0): 14, ('publiliu', 1.0): 1, ('syru', 1.0): 1, ('fear', 1.0): 1, ('death', 1.0): 3, ('dread', 1.0): 1, ('fell', 1.0): 3, ('fuk', 1.0): 1, ('unblock', 1.0): 1, ('tweak', 1.0): 2, ('php', 1.0): 1, ('fall', 1.0): 10, ('oomf', 1.0): 1, ('pippa', 1.0): 1, ('hschool', 1.0): 1, ('bu', 1.0): 3, ('cardi', 1.0): 1, ('everyday', 1.0): 3, ('everytim', 1.0): 3, ('hk', 1.0): 1, ("why'd", 1.0): 1, ('acorn', 1.0): 1, ('origin', 1.0): 7, ('c64', 1.0): 1, ('cpu', 1.0): 1, ('consider', 1.0): 1, ('advanc', 1.0): 1, ('onair', 1.0): 1, ('bay', 1.0): 1, ('hold', 1.0): 6, ('river', 1.0): 3, ('0878 0388', 1.0): 1, ('1033', 1.0): 1, ('0272 3306', 1.0): 1, ('70', 1.0): 5, ('rescu', 1.0): 1, ('mutt', 1.0): 1, ('confirm', 1.0): 3, ('deliveri', 1.0): 3, ('switch', 1.0): 2, ('lap', 1.0): 1, ('optim', 1.0): 1, ('lu', 1.0): 1, (':|', 1.0): 1, ('tweetofthedecad', 1.0): 1, (':P', 1.0): 1, ('class', 1.0): 5, ('happiest', 1.0): 2, ('bbmme', 1.0): 3, ('pin', 1.0): 4, ('7df9e60a', 1.0): 1, ('bbm', 1.0): 2, ('bbmpin', 1.0): 2, ('addmeonbbm', 1.0): 1, ('addm', 1.0): 1, ("today'", 1.0): 3, ('menu', 1.0): 1, ('marri', 1.0): 3, ('glenn', 1.0): 1, ('what', 1.0): 4, ('height', 1.0): 1, ("sculptor'", 1.0): 1, ('ti5', 1.0): 1, ('dota', 1.0): 3, ('nudg', 1.0): 1, ('spot', 1.0): 5, ('tasti', 1.0): 1, ('hilli', 1.0): 1, ('cycl', 1.0): 6, ('england', 1.0): 4, ('scotlandismass', 1.0): 1, ('gen', 1.0): 2, ('vikk', 1.0): 1, ('fna', 1.0): 1, ('mombasa', 1.0): 1, ('tukutanemombasa', 1.0): 1, ('100reasonstovisitmombasa', 1.0): 1, ('karibumombasa', 1.0): 1, ('hanbin', 1.0): 1, ('certainli', 1.0): 4, ('goosnight', 1.0): 1, ('kindli', 1.0): 4, ('familiar', 1.0): 2, ('jealou', 1.0): 4, ('tent', 1.0): 2, ('yea', 1.0): 2, ('cozi', 1.0): 1, ('phenomen', 1.0): 2, ('collab', 1.0): 2, ('gave', 1.0): 4, ('birth', 1.0): 1, ('behav', 1.0): 2, ('monster', 1.0): 1, ('spree', 1.0): 4, ('000', 1.0): 1, ('tank', 1.0): 6, ('outstand', 1.0): 1, ('donat', 1.0): 3, ('h', 1.0): 4, ('contestkiduniya', 1.0): 2, ('mfundo', 1.0): 1, ('och', 1.0): 1, ('hun', 1.0): 4, ('inner', 1.0): 2, ('nerd', 1.0): 2, ('tame', 1.0): 2, ('insidi', 1.0): 1, ('logic', 1.0): 1, ('math', 1.0): 1, ('channel', 1.0): 5, ('continu', 1.0): 4, ('doubt', 1.0): 3, ('300', 1.0): 2, ('sub', 1.0): 2, ('200', 1.0): 3, ('forgiven', 1.0): 1, ('manner', 1.0): 1, ('yhooo', 1.0): 1, ('ngi', 1.0): 1, ('mood', 1.0): 7, ('push', 1.0): 1, ('limit', 1.0): 6, ('obakeng', 1.0): 1, ('goat', 1.0): 1, ('alhamdullilah', 1.0): 1, ('pebbl', 1.0): 1, ('engross', 1.0): 1, ('bing', 1.0): 2, ('scream', 1.0): 2, ('whole', 1.0): 7, ('wide', 1.0): 2, ('🌎', 1.0): 2, ('😧', 1.0): 1, ('wat', 1.0): 2, ('muahhh', 1.0): 1, ('pausetim', 1.0): 1, ('drift', 1.0): 1, ('loos', 1.0): 3, ('campaign', 1.0): 4, ('kickstart', 1.0): 1, ('articl', 1.0): 9, ('jenna', 1.0): 1, ('bellybutton', 1.0): 5, ('inni', 1.0): 4, ('outi', 1.0): 4, ('havent', 1.0): 4, ('delish', 1.0): 1, ('joselito', 1.0): 1, ('freya', 1.0): 1, ('nth', 1.0): 1, ('latepost', 1.0): 1, ('lupet', 1.0): 1, ('mo', 1.0): 2, ('eric', 1.0): 3, ('askaman', 1.0): 1, ('150', 1.0): 1, ('0345', 1.0): 2, ('454', 1.0): 1, ('111', 1.0): 1, ('webz', 1.0): 1, ('oop', 1.0): 5, ("they'll", 1.0): 6, ('realis', 1.0): 2, ('anymor', 1.0): 3, ('carmel', 1.0): 1, ('decis', 1.0): 5, ('matt', 1.0): 6, ('@commoncultur', 1.0): 1, ('@connorfranta', 1.0): 1, ('honestli', 1.0): 3, ('explain', 1.0): 3, ('relationship', 1.0): 4, ('pick', 1.0): 15, ('tessnzach', 1.0): 1, ('paperboy', 1.0): 1, ('honest', 1.0): 3, ('reassur', 1.0): 1, ('guysss', 1.0): 3, ('mubank', 1.0): 2, ("dongwoo'", 1.0): 1, ('bright', 1.0): 2, ('tommorow', 1.0): 3, ('newyork', 1.0): 1, ('lolll', 1.0): 1, ('twinx', 1.0): 1, ('16', 1.0): 2, ('path', 1.0): 1, ('firmansyahbl', 1.0): 1, ('procedur', 1.0): 1, ('grim', 1.0): 1, ('fandango', 1.0): 1, ('ordinari', 1.0): 1, ('extraordinari', 1.0): 1, ('bo', 1.0): 2, ('birmingham', 1.0): 1, ('oracl', 1.0): 1, ('samosa', 1.0): 1, ('firebal', 1.0): 1, ('shoe', 1.0): 4, ('serv', 1.0): 1, ('sushi', 1.0): 2, ('shoeshi', 1.0): 1, ('�', 1.0): 2, ('lymond', 1.0): 1, ('philippa', 1.0): 2, ('novel', 1.0): 1, ('tara', 1.0): 3, ('. . .', 1.0): 2, ('aur', 1.0): 2, ('han', 1.0): 1, ('imran', 1.0): 3, ('khan', 1.0): 7, ('63', 1.0): 1, ('agaaain', 1.0): 1, ('doli', 1.0): 1, ('siregar', 1.0): 1, ('ninh', 1.0): 1, ('size', 1.0): 5, ('geekiest', 1.0): 1, ('geek', 1.0): 2, ('wallet', 1.0): 3, ('request', 1.0): 4, ('media', 1.0): 4, ('ralli', 1.0): 1, ('rotat', 1.0): 3, ('direct', 1.0): 3, ('eek', 1.0): 1, ('red', 1.0): 6, ('beij', 1.0): 1, ('meni', 1.0): 1, ('tebrik', 1.0): 1, ('etdi', 1.0): 1, ('700', 1.0): 1, ('💗', 1.0): 2, ('rod', 1.0): 1, ('embrac', 1.0): 1, ('actor', 1.0): 1, ('aplomb', 1.0): 1, ('foreveralon', 1.0): 2, ('mysumm', 1.0): 1, ('01482', 1.0): 1, ('333505', 1.0): 1, ('hahahaha', 1.0): 2, ('wear', 1.0): 6, ('uniform', 1.0): 1, ('evil', 1.0): 1, ('owww', 1.0): 1, ('choo', 1.0): 1, ('chweet', 1.0): 1, ('shorthair', 1.0): 1, ('oscar', 1.0): 1, ('realiz', 1.0): 7, ('harmoni', 1.0): 1, ('deneriveri', 1.0): 1, ('506', 1.0): 1, ('kiksext', 1.0): 5, ('kikkomansabor', 1.0): 2, ('killer', 1.0): 1, ('henessydiari', 1.0): 1, ('journey', 1.0): 4, ('band', 1.0): 4, ('plz', 1.0): 5, ('convo', 1.0): 3, ('11', 1.0): 5, ('vault', 1.0): 1, ('expand', 1.0): 2, ('vinni', 1.0): 1, ('money', 1.0): 9, ('hahahahaha', 1.0): 2, ('50cent', 1.0): 1, ('repay', 1.0): 1, ('debt', 1.0): 2, ('evet', 1.0): 1, ('wifi', 1.0): 3, ('lifestyl', 1.0): 1, ('qatarday', 1.0): 1, ('. ..', 1.0): 3, ('🌞', 1.0): 3, ('girli', 1.0): 1, ('india', 1.0): 4, ('innov', 1.0): 1, ('volunt', 1.0): 2, ('saran', 1.0): 1, ('drama', 1.0): 3, ('genr', 1.0): 1, ('romanc', 1.0): 1, ('comedi', 1.0): 1, ('leannerin', 1.0): 1, ('19', 1.0): 7, ('porno', 1.0): 1, ('l4l', 1.0): 3, ('weloveyounamjoon', 1.0): 1, ('homey', 1.0): 1, ('kenya', 1.0): 1, ('roller', 1.0): 2, ('coaster', 1.0): 1, ('aspect', 1.0): 1, ('najam', 1.0): 1, ('confess', 1.0): 2, ('pricelessantiqu', 1.0): 1, ('takesonetoknowon', 1.0): 1, ('extra', 1.0): 5, ('ucount', 1.0): 1, ('ji', 1.0): 3, ('turkish', 1.0): 1, ('knew', 1.0): 8, ('crap', 1.0): 1, ('burn', 1.0): 3, ('80x', 1.0): 1, ('airlin', 1.0): 1, ('sexi', 1.0): 10, ('yello', 1.0): 1, ('gail', 1.0): 1, ('yael', 1.0): 1, ('lesson', 1.0): 4, ('en', 1.0): 1, ('mano', 1.0): 1, ('hand', 1.0): 4, ('manag', 1.0): 6, ('prettiest', 1.0): 1, ('reader', 1.0): 4, ('dnt', 1.0): 1, ('ideal', 1.0): 2, ('weekli', 1.0): 2, ('idol', 1.0): 3, ('pose', 1.0): 2, ('shortlist', 1.0): 1, ('dominion', 1.0): 2, ('picnic', 1.0): 2, ('tmrw', 1.0): 3, ('nobodi', 1.0): 2, ('jummamubarak', 1.0): 1, ('shower', 1.0): 3, ('shalwarkameez', 1.0): 1, ('itter', 1.0): 1, ('offer', 1.0): 8, ('jummapray', 1.0): 1, ('af', 1.0): 8, ('display', 1.0): 1, ('enabl', 1.0): 1, ('compani', 1.0): 4, ('peep', 1.0): 4, ('tweep', 1.0): 2, ('folow', 1.0): 1, ('2k', 1.0): 1, ('ohhh', 1.0): 4, ('teaser', 1.0): 2, ('airec', 1.0): 1, ('009', 1.0): 1, ('acid', 1.0): 1, ('mous', 1.0): 2, ('31st', 1.0): 2, ('includ', 1.0): 5, ('robin', 1.0): 1, ('rough', 1.0): 4, ('control', 1.0): 1, ('remix', 1.0): 5, ('fave', 1.0): 3, ('toss', 1.0): 1, ('ladi', 1.0): 8, ('🐑', 1.0): 1, ('librari', 1.0): 3, ('mr2', 1.0): 1, ('climb', 1.0): 1, ('cuddl', 1.0): 1, ('jilla', 1.0): 1, ('headlin', 1.0): 1, ('2017', 1.0): 1, ('jumma', 1.0): 5, ('mubarik', 1.0): 2, ('spent', 1.0): 2, ('congratz', 1.0): 1, ('contribut', 1.0): 3, ('2.0', 1.0): 2, ('yuppiiee', 1.0): 1, ('alienthought', 1.0): 1, ('happyalien', 1.0): 1, ('crowd', 1.0): 2, ('loudest', 1.0): 2, ('gari', 1.0): 1, ('particular', 1.0): 1, ('attract', 1.0): 1, ('supprt', 1.0): 1, ('savag', 1.0): 1, ('cleans', 1.0): 1, ('scam', 1.0): 1, ('ridden', 1.0): 1, ('vyapam', 1.0): 2, ('renam', 1.0): 1, ('wave', 1.0): 2, ('couch', 1.0): 1, ('dodg', 1.0): 1, ('explan', 1.0): 2, ('bag', 1.0): 4, ('sanza', 1.0): 1, ('yaa', 1.0): 3, ('slr', 1.0): 1, ('som', 1.0): 1, ('honour', 1.0): 1, ('heheh', 1.0): 1, ('view', 1.0): 16, ('explor', 1.0): 2, ('wayanadan', 1.0): 1, ('forest', 1.0): 1, ('wayanad', 1.0): 1, ('srijith', 1.0): 1, ('whisper', 1.0): 1, ('lie', 1.0): 4, ('pokemon', 1.0): 1, ('dazzl', 1.0): 1, ('urself', 1.0): 2, ('doubl', 1.0): 2, ('flare', 1.0): 1, ('black', 1.0): 4, ('9', 1.0): 3, ('51', 1.0): 1, ('brows', 1.0): 1, ('bore', 1.0): 9, ('femal', 1.0): 2, ('tour', 1.0): 8, ('delv', 1.0): 2, ('muchhh', 1.0): 1, ('tmr', 1.0): 1, ('breakfast', 1.0): 4, ('gl', 1.0): 1, ("tonight'", 1.0): 2, ('):', 1.0): 7, ('litey', 1.0): 1, ('manuella', 1.0): 1, ('abhi', 1.0): 2, ('tak', 1.0): 2, ('nhi', 1.0): 2, ('dekhi', 1.0): 1, ('promo', 1.0): 3, ('se', 1.0): 4, ('xpax', 1.0): 1, ('lisa', 1.0): 2, ('aboard', 1.0): 3, ('institut', 1.0): 1, ('nc', 1.0): 2, ('chees', 1.0): 4, ('overload', 1.0): 1, ('pizza', 1.0): 1, ('•', 1.0): 3, ('mcfloat', 1.0): 1, ('fudg', 1.0): 3, ('sanda', 1.0): 1, ('munchkin', 1.0): 1, ("d'd", 1.0): 1, ('granni', 1.0): 1, ('baller', 1.0): 1, ('lil', 1.0): 4, ('chain', 1.0): 1, ('everybodi', 1.0): 1, ('ought', 1.0): 1, ('jay', 1.0): 3, ('[email protected]', 1.0): 1, ('79x', 1.0): 1, ('champion', 1.0): 1, ('letter', 1.0): 2, ('uniqu', 1.0): 2, ('affaraid', 1.0): 1, ('dearslim', 1.0): 2, ('role', 1.0): 2, ('billi', 1.0): 2, ('lab', 1.0): 1, ('ovh', 1.0): 2, ('maxi', 1.0): 2, ('bunch', 1.0): 1, ('acc', 1.0): 2, ('sprit', 1.0): 1, ('you', 1.0): 1, ('til', 1.0): 2, ('hammi', 1.0): 1, ('freedom', 1.0): 2, ('pistol', 1.0): 1, ('unlock', 1.0): 1, ('bemeapp', 1.0): 1, ('thumb', 1.0): 1, ('beme', 1.0): 1, ('bemecod', 1.0): 1, ('proudtobem', 1.0): 1, ('round', 1.0): 2, ('calm', 1.0): 5, ('kepo', 1.0): 1, ('luckili', 1.0): 1, ('clearli', 1.0): 2, ('دعمم', 1.0): 1, ('للعودة', 1.0): 1, ('للحياة', 1.0): 1, ('heiyo', 1.0): 2, ('dudafti', 1.0): 1, ('breaktym', 1.0): 1, ('fatal', 1.0): 1, ('danger', 1.0): 1, ('term', 1.0): 2, ('health', 1.0): 2, ('outrag', 1.0): 1, ('645k', 1.0): 1, ('muna', 1.0): 1, ('magstart', 1.0): 1, ('salut', 1.0): 3, ('→', 1.0): 1, ('thq', 1.0): 1, ('contin', 1.0): 1, ('thalaivar', 1.0): 1, ('£', 1.0): 7, ('heiya', 1.0): 2, ('grab', 1.0): 3, ('30.000', 1.0): 2, ('av', 1.0): 1, ('gd', 1.0): 3, ('wknd', 1.0): 1, ('ear', 1.0): 12, ("y'day", 1.0): 1, ('hxh', 1.0): 1, ('badass', 1.0): 2, ('killua', 1.0): 1, ('scene', 1.0): 2, ('78x', 1.0): 1, ('unappreci', 1.0): 1, ('graciou', 1.0): 1, ('nailedit', 1.0): 1, ('ourdisneyinfin', 1.0): 1, ('mari', 1.0): 3, ('jillmil', 1.0): 1, ('webcam', 1.0): 2, ('elfindelmundo', 1.0): 1, ('mainli', 1.0): 1, ('favour', 1.0): 1, ('dancetast', 1.0): 1, ('satyajit', 1.0): 1, ("ray'", 1.0): 1, ('porosh', 1.0): 1, ('pathor', 1.0): 1, ('situat', 1.0): 3, ('goldbug', 1.0): 1, ('wine', 1.0): 3, ('bottl', 1.0): 2, ('spill', 1.0): 2, ('jazmin', 1.0): 3, ('bonilla', 1.0): 3, ('15000', 1.0): 1, ('star', 1.0): 9, ('hollywood', 1.0): 3, ('rofl', 1.0): 3, ('shade', 1.0): 1, ('grey', 1.0): 1, ('netsec', 1.0): 1, ('kev', 1.0): 1, ('sister', 1.0): 6, ('told', 1.0): 6, ('unlist', 1.0): 1, ('hickey', 1.0): 1, ('dad', 1.0): 5, ('hock', 1.0): 1, ('mamma', 1.0): 1, ('human', 1.0): 5, ('be', 1.0): 1, ('mere', 1.0): 1, ('holist', 1.0): 1, ('cosmovis', 1.0): 1, ('narrow-mind', 1.0): 1, ('charg', 1.0): 3, ('cess', 1.0): 1, ('alix', 1.0): 1, ('quan', 1.0): 1, ('tip', 1.0): 5, ('naaahhh', 1.0): 1, ('duh', 1.0): 2, ('emesh', 1.0): 1, ('hilari', 1.0): 4, ('kath', 1.0): 3, ('kia', 1.0): 1, ('@vauk', 1.0): 1, ('tango', 1.0): 1, ('tracerequest', 1.0): 2, ('dassi', 1.0): 1, ('fwm', 1.0): 1, ('selamat', 1.0): 1, ('nichola', 1.0): 2, ('malta', 1.0): 1, ('gto', 1.0): 1, ('tomorrowland', 1.0): 1, ('incal', 1.0): 1, ('shob', 1.0): 1, ('incomplet', 1.0): 1, ('barkada', 1.0): 1, ('silverston', 1.0): 1, ('pull', 1.0): 1, ('bookstor', 1.0): 1, ('ganna', 1.0): 1, ('hillari', 1.0): 1, ('clinton', 1.0): 1, ('court', 1.0): 2, ('notic', 1.0): 11, ('slice', 1.0): 2, ('life-so', 1.0): 1, ('hidden', 1.0): 1, ('untap', 1.0): 1, ('mca', 1.0): 2, ('gettin', 1.0): 1, ('hella', 1.0): 1, ('wana', 1.0): 1, ('bandz', 1.0): 1, ('hell', 1.0): 4, ('donington', 1.0): 1, ('park', 1.0): 8, ('24/25', 1.0): 1, ('x30', 1.0): 1, ('merci', 1.0): 1, ('bien', 1.0): 1, ('pitbul', 1.0): 1, ('777x', 1.0): 1, ('fri', 1.0): 3, ('annyeong', 1.0): 1, ('oppa', 1.0): 7, ('indonesian', 1.0): 1, ('elf', 1.0): 3, ('flight', 1.0): 2, ('bf', 1.0): 2, ('jennyjean', 1.0): 1, ('kikchat', 1.0): 1, ('sabadodeganarseguidor', 1.0): 1, ('sexysasunday', 1.0): 2, ('marseil', 1.0): 1, ('ganda', 1.0): 1, ('fnaf', 1.0): 5, ('steam', 1.0): 1, ('assur', 1.0): 2, ('current', 1.0): 7, ('goin', 1.0): 1, ('sweeti', 1.0): 4, ('strongest', 1.0): 1, ("spot'", 1.0): 1, ('barnstapl', 1.0): 1, ('bideford', 1.0): 1, ('abit', 1.0): 1, ('road', 1.0): 5, ('rocro', 1.0): 1, ('13glodyysbro', 1.0): 1, ('hire', 1.0): 1, ('2ne1', 1.0): 1, ('aspetti', 1.0): 1, ('chicken', 1.0): 4, ('chip', 1.0): 3, ('cupboard', 1.0): 1, ('empti', 1.0): 2, ('jami', 1.0): 2, ('ian', 1.0): 2, ('latin', 1.0): 5, ('asian', 1.0): 5, ('version', 1.0): 8, ('va', 1.0): 1, ('642', 1.0): 1, ('kikgirl', 1.0): 5, ('orgasm', 1.0): 1, ('phonesex', 1.0): 1, ('spacer', 1.0): 1, ('felic', 1.0): 1, ('smoak', 1.0): 1, ('👓', 1.0): 1, ('💘', 1.0): 3, ('children', 1.0): 3, ('psychopath', 1.0): 1, ('spoil', 1.0): 1, ('dimpl', 1.0): 1, ('contempl', 1.0): 1, ('indi', 1.0): 2, ('rout', 1.0): 4, ('jsl', 1.0): 1, ('76x', 1.0): 1, ('gotcha', 1.0): 1, ('kina', 1.0): 1, ('donna', 1.0): 3, ('reachabl', 1.0): 1, ('jk', 1.0): 1, ('s02e04', 1.0): 1, ('air', 1.0): 7, ('naggi', 1.0): 1, ('anal', 1.0): 1, ('child', 1.0): 3, ('vidcon', 1.0): 2, ('anxiou', 1.0): 1, ('shake', 1.0): 2, ('10:30', 1.0): 1, ('smoke', 1.0): 3, ('white', 1.0): 4, ('grandpa', 1.0): 4, ('prolli', 1.0): 1, ('stash', 1.0): 2, ('closer-chas', 1.0): 1, ('spec', 1.0): 1, ('leagu', 1.0): 3, ('chase', 1.0): 1, ('wall', 1.0): 3, ('angel', 1.0): 4, ('mochamichel', 1.0): 1, ('iph', 1.0): 4, ('0ne', 1.0): 4, ('simpli', 1.0): 3, ('bi0', 1.0): 8, ('x29', 1.0): 1, ('there', 1.0): 2, ('background', 1.0): 2, ('maggi', 1.0): 1, ('afraid', 1.0): 3, ('mull', 1.0): 1, ('nil', 1.0): 1, ('glasgow', 1.0): 2, ('netbal', 1.0): 1, ('thistl', 1.0): 1, ('thistlelov', 1.0): 1, ('minecraft', 1.0): 7, ('drew', 1.0): 3, ('delici', 1.0): 3, ('muddl', 1.0): 1, ('racket', 1.0): 2, ('isol', 1.0): 1, ('fa', 1.0): 1, ('particip', 1.0): 2, ('icecreammast', 1.0): 1, ('group', 1.0): 10, ('huhu', 1.0): 3, ('shet', 1.0): 1, ('desk', 1.0): 1, ('o_o', 1.0): 1, ('orz', 1.0): 1, ('problemmm', 1.0): 1, ('75x', 1.0): 1, ('english', 1.0): 4, ('yeeaayi', 1.0): 1, ('alhamdulillah', 1.0): 1, ('amin', 1.0): 1, ('weed', 1.0): 1, ('crowdfund', 1.0): 1, ('goal', 1.0): 2, ('walk', 1.0): 12, ('hellooo', 1.0): 2, ('select', 1.0): 1, ('lynn', 1.0): 1, ('buffer', 1.0): 2, ('button', 1.0): 2, ('compos', 1.0): 1, ('fridayfun', 1.0): 1, ('non-filipina', 1.0): 1, ('ejayst', 1.0): 1, ('state', 1.0): 2, ('le', 1.0): 2, ('stan', 1.0): 1, ('lee', 1.0): 2, ('discoveri', 1.0): 1, ('cousin', 1.0): 5, ('1400', 1.0): 1, ('yr', 1.0): 2, ('teleport', 1.0): 1, ('shahid', 1.0): 1, ('afridi', 1.0): 1, ('tou', 1.0): 1, ('mahnor', 1.0): 1, ('baloch', 1.0): 1, ('nikki', 1.0): 2, ('flower', 1.0): 4, ('blackfli', 1.0): 1, ('courgett', 1.0): 1, ('wont', 1.0): 5, ('affect', 1.0): 2, ('fruit', 1.0): 5, ('italian', 1.0): 1, ('netfilx', 1.0): 1, ('unmarri', 1.0): 1, ('finger', 1.0): 6, ('rock', 1.0): 10, ('wielli', 1.0): 1, ('paul', 1.0): 2, ('barcod', 1.0): 1, ('charlott', 1.0): 1, ('thta', 1.0): 1, ('trailblazerhonor', 1.0): 1, ('labour', 1.0): 3, ('leader', 1.0): 3, ('alot', 1.0): 2, ('agayhippiehippi', 1.0): 1, ('exercis', 1.0): 2, ('ginger', 1.0): 1, ('x28', 1.0): 1, ('teach', 1.0): 2, ('awar', 1.0): 1, ('::', 1.0): 4, ('portsmouth', 1.0): 1, ('sonal', 1.0): 1, ('hungri', 1.0): 2, ('hmmm', 1.0): 4, ('pedant', 1.0): 1, ('98', 1.0): 1, ('kit', 1.0): 2, ('ack', 1.0): 1, ('hih', 1.0): 1, ('choir', 1.0): 1, ('rosidbinr', 1.0): 1, ('duke', 1.0): 2, ('earl', 1.0): 1, ('tau', 1.0): 1, ('orayt', 1.0): 1, ('knw', 1.0): 1, ('block', 1.0): 3, ('dikha', 1.0): 1, ('reh', 1.0): 1, ('adolf', 1.0): 1, ('hitler', 1.0): 1, ('obstacl', 1.0): 1, ('exist', 1.0): 2, ('surrend', 1.0): 2, ('terrif', 1.0): 1, ('advaddict', 1.0): 1, ('_15', 1.0): 1, ('jimin', 1.0): 1, ('notanapolog', 1.0): 3, ('map', 1.0): 2, ('inform', 1.0): 5, ('0.7', 1.0): 1, ('motherfuck', 1.0): 1, ("david'", 1.0): 1, ('damn', 1.0): 3, ('colleg', 1.0): 2, ('24th', 1.0): 3, ('steroid', 1.0): 1, ('alansmithpart', 1.0): 1, ('servu', 1.0): 1, ('bonasio', 1.0): 1, ("doido'", 1.0): 1, ('task', 1.0): 2, ('deleg', 1.0): 1, ('aaahhh', 1.0): 1, ('jen', 1.0): 2, ('virgin', 1.0): 5, ('non-mapbox', 1.0): 1, ('restrict', 1.0): 1, ('mapbox', 1.0): 1, ('basemap', 1.0): 1, ('contractu', 1.0): 1, ('research', 1.0): 1, ('seafood', 1.0): 1, ('weltum', 1.0): 1, ('teh', 1.0): 1, ('deti', 1.0): 1, ('huh', 1.0): 2, ('=D', 1.0): 2, ('annoy', 1.0): 2, ('katmtan', 1.0): 1, ('swan', 1.0): 1, ('fandom', 1.0): 3, ('blurri', 1.0): 1, ('besok', 1.0): 1, ('b', 1.0): 8, ('urgent', 1.0): 3, ('within', 1.0): 4, ('dorset', 1.0): 1, ('goddess', 1.0): 1, ('blast', 1.0): 1, ('shitfac', 1.0): 1, ('soul', 1.0): 4, ('sing', 1.0): 5, ('disney', 1.0): 1, ('doug', 1.0): 3, ('28', 1.0): 2, ('bnte', 1.0): 1, ('hain', 1.0): 2, (';p', 1.0): 1, ('shiiitt', 1.0): 1, ('case', 1.0): 9, ('rm35', 1.0): 1, ('negooo', 1.0): 1, ('male', 1.0): 1, ('madelin', 1.0): 1, ('nun', 1.0): 1, ('mornin', 1.0): 2, ('yapster', 1.0): 1, ('pli', 1.0): 1, ('icon', 1.0): 2, ('alchemist', 1.0): 1, ('x27', 1.0): 1, ('dayz', 1.0): 1, ('preview', 1.0): 1, ('thug', 1.0): 1, ('lmao', 1.0): 3, ('sharethelov', 1.0): 2, ('highvalu', 1.0): 2, ('halsey', 1.0): 1, ('30th', 1.0): 1, ('anniversari', 1.0): 5, ('folk', 1.0): 10, ('bae', 1.0): 6, ('repli', 1.0): 5, ('complain', 1.0): 3, ('rude', 1.0): 3, ('bond', 1.0): 4, ('nigg', 1.0): 1, ('readingr', 1.0): 1, ('wordoftheweek', 1.0): 1, ('wotw', 1.0): 1, ('4:18', 1.0): 1, ('est', 1.0): 1, ('earn', 1.0): 1, ('jess', 1.0): 2, ('surri', 1.0): 1, ('botani', 1.0): 1, ('gel', 1.0): 1, ('alison', 1.0): 1, ('lsa', 1.0): 1, ('respons', 1.0): 7, ('fron', 1.0): 1, ('debbi', 1.0): 1, ('carol', 1.0): 2, ('patient', 1.0): 4, ('discharg', 1.0): 1, ('loung', 1.0): 1, ('walmart', 1.0): 1, ('balanc', 1.0): 2, ('studi', 1.0): 6, ('hayley', 1.0): 2, ('shoulder', 1.0): 1, ('pad', 1.0): 2, ('mount', 1.0): 1, ('inquisitor', 1.0): 1, ('cosplay', 1.0): 4, ('cosplayprogress', 1.0): 1, ('mike', 1.0): 3, ('dunno', 1.0): 2, ('insecur', 1.0): 2, ('nh', 1.0): 1, ('devolut', 1.0): 1, ('patriot', 1.0): 1, ('halla', 1.0): 1, ('ark', 1.0): 1, ("jiyeon'", 1.0): 1, ('buzz', 1.0): 2, ('burnt', 1.0): 1, ('mist', 1.0): 4, ('opi', 1.0): 1, ('avoplex', 1.0): 1, ('nail', 1.0): 3, ('cuticl', 1.0): 1, ('replenish', 1.0): 1, ('15ml', 1.0): 1, ('seriou', 1.0): 2, ('submiss', 1.0): 1, ('lb', 1.0): 2, ('cherish', 1.0): 2, ('flip', 1.0): 1, ('learnt', 1.0): 2, ('backflip', 1.0): 2, ('jumpgiant', 1.0): 1, ('foampit', 1.0): 1, ('usa', 1.0): 3, ('pamer', 1.0): 1, ('thk', 1.0): 1, ('actuallythough', 1.0): 1, ('craft', 1.0): 2, ('session', 1.0): 3, ('mehtab', 1.0): 1, ('aunti', 1.0): 1, ('gc', 1.0): 1, ('yeeew', 1.0): 1, ('pre', 1.0): 3, ('lan', 1.0): 1, ('yeey', 1.0): 1, ('arrang', 1.0): 1, ('doodl', 1.0): 2, ('comic', 1.0): 1, ('summon', 1.0): 1, ('none', 1.0): 1, ('🙅', 1.0): 1, ('lycra', 1.0): 1, ('vincent', 1.0): 1, ('couldnt', 1.0): 1, ('roy', 1.0): 1, ('bg', 1.0): 1, ('img', 1.0): 1, ('circl', 1.0): 1, ('font', 1.0): 1, ('deathofgrass', 1.0): 1, ('loan', 1.0): 2, ('lawnmow', 1.0): 1, ('popular', 1.0): 2, ('charismat', 1.0): 1, ('man.h', 1.0): 1, ('thrive', 1.0): 1, ('economi', 1.0): 1, ('burst', 1.0): 2, ('georgi', 1.0): 1, ('x26', 1.0): 1, ('million', 1.0): 4, ('fl', 1.0): 1, ('kindest', 1.0): 2, ('iceland', 1.0): 1, ('crazi', 1.0): 4, ('landscap', 1.0): 2, ('yok', 1.0): 1, ('lah', 1.0): 1, ('concordia', 1.0): 1, ('reunit', 1.0): 1, ('xxxibmchll', 1.0): 1, ('sea', 1.0): 4, ('prettier', 1.0): 2, ('imitatia', 1.0): 1, ('oe', 1.0): 1, ('michel', 1.0): 1, ('comeback', 1.0): 1, ('gross', 1.0): 1, ('treat', 1.0): 5, ('equal', 1.0): 2, ('injustic', 1.0): 1, ('femin', 1.0): 1, ('ineedfeminismbecaus', 1.0): 1, ('forgotten', 1.0): 3, ('stuck', 1.0): 4, ('recommend', 1.0): 4, ('redhead', 1.0): 1, ('wacki', 1.0): 1, ('rather', 1.0): 5, ('waytoliveahappylif', 1.0): 1, ('hoxton', 1.0): 1, ('holborn', 1.0): 1, ('karen', 1.0): 2, ('wag', 1.0): 2, ('bum', 1.0): 1, ('wwooo', 1.0): 1, ('nite', 1.0): 3, ('laiten', 1.0): 1, ('arond', 1.0): 1, ('1:30', 1.0): 1, ('consid', 1.0): 3, ('matur', 1.0): 3, ('journeyp', 1.0): 2, ('foam', 1.0): 1, ("lady'", 1.0): 1, ('mob', 1.0): 1, ('fals', 1.0): 1, ('bulletin', 1.0): 1, ('spring', 1.0): 1, ('fiesta', 1.0): 1, ('nois', 1.0): 2, ('awuuu', 1.0): 1, ('aich', 1.0): 1, ('sept', 1.0): 2, ('rudramadevi', 1.0): 1, ('anushka', 1.0): 1, ('gunashekar', 1.0): 1, ('harryxhood', 1.0): 1, ('upset', 1.0): 1, ('ooh', 1.0): 1, ('humanist', 1.0): 1, ('magazin', 1.0): 2, ('usernam', 1.0): 1, ('rape', 1.0): 1, ('csrrace', 1.0): 1, ('lack', 1.0): 6, ('hygien', 1.0): 1, ('tose', 1.0): 1, ('cloth', 1.0): 1, ('temperatur', 1.0): 1, ('planet', 1.0): 2, ('brave', 1.0): 2, ('ge', 1.0): 1, ('2015kenya', 1.0): 1, ('ryan', 1.0): 4, ('tidi', 1.0): 2, ('hagergang', 1.0): 1, ('chanhun', 1.0): 1, ('photoshoot', 1.0): 1, ('afteral', 1.0): 1, ('sadkaay', 1.0): 1, ('thark', 1.0): 1, ('peak', 1.0): 1, ('heatwav', 1.0): 1, ('lower', 1.0): 1, ('standard', 1.0): 2, ('x25', 1.0): 1, ('recruit', 1.0): 2, ('doom', 1.0): 1, ('nasti', 1.0): 1, ('affili', 1.0): 1, ('>:)', 1.0): 2, ('64', 1.0): 2, ('74', 1.0): 1, ('40', 1.0): 4, ('00', 1.0): 1, ('hall', 1.0): 2, ('ted', 1.0): 3, ('pixgram', 1.0): 2, ('creativ', 1.0): 2, ('slideshow', 1.0): 1, ('nibbl', 1.0): 2, ('ivi', 1.0): 1, ('sho', 1.0): 1, ('superpow', 1.0): 2, ('obsess', 1.0): 2, ('oth', 1.0): 1, ('third', 1.0): 2, ('ngarepfollbackdarinabilahjkt', 1.0): 1, ('48', 1.0): 1, ('sunglass', 1.0): 1, ('jacki', 1.0): 2, ('sunni', 1.0): 6, ('style', 1.0): 5, ('jlo', 1.0): 1, ('jlover', 1.0): 1, ('turkey', 1.0): 1, ('goodafternoon', 1.0): 2, ('collag', 1.0): 2, ('furri', 1.0): 2, ('bruce', 1.0): 2, ('kunoriforceo', 1.0): 8, ('aayegi', 1.0): 1, ('tim', 1.0): 2, ('wiw', 1.0): 1, ('bip', 1.0): 1, ('zareen', 1.0): 1, ('daisi', 1.0): 1, ("b'coz", 1.0): 1, ('kart', 1.0): 1, ('mak', 1.0): 1, ('∗', 1.0): 2, ('lega', 1.0): 1, ('spag', 1.0): 1, ('boat', 1.0): 2, ('outboard', 1.0): 1, ('spell', 1.0): 4, ('reboard', 1.0): 1, ('fire', 1.0): 2, ('offboard', 1.0): 1, ('sn16', 1.0): 1, ('9dg', 1.0): 1, ('bnf', 1.0): 1, ('50', 1.0): 1, ('jason', 1.0): 1, ('rob', 1.0): 2, ('feb', 1.0): 1, ('victoriasecret', 1.0): 1, ('finland', 1.0): 1, ('helsinki', 1.0): 1, ('airport', 1.0): 3, ('plane', 1.0): 2, ('beyond', 1.0): 4, ('ont', 1.0): 1, ('tii', 1.0): 1, ('lng', 1.0): 2, ('yan', 1.0): 2, ("u'll", 1.0): 2, ('steve', 1.0): 2, ('bell', 1.0): 1, ('prescott', 1.0): 1, ('leadership', 1.0): 2, ('cartoon', 1.0): 1, ('upsid', 1.0): 2, ('statement', 1.0): 1, ('selamathariraya', 1.0): 1, ('lovesummertim', 1.0): 1, ('dumont', 1.0): 1, ('jax', 1.0): 1, ('jone', 1.0): 1, ('awesomee', 1.0): 1, ('x24', 1.0): 1, ('geoff', 1.0): 1, ('amazingli', 1.0): 1, ('talant', 1.0): 1, ('vsco', 1.0): 2, ('thanki', 1.0): 2, ('hash', 1.0): 1, ('tag', 1.0): 5, ('ifimeetanalien', 1.0): 1, ('bff', 1.0): 4, ('section', 1.0): 3, ('follbaaack', 1.0): 1, ('az', 1.0): 1, ('cauliflow', 1.0): 1, ('attempt', 1.0): 1, ('prinsesa', 1.0): 1, ('yaaah', 1.0): 2, ('law', 1.0): 3, ('toy', 1.0): 2, ('sonaaa', 1.0): 1, ('beautiful', 1.0): 2, ("josephine'", 1.0): 1, ('mirror', 1.0): 3, ('cretaperfect', 1.0): 2, ('4me', 1.0): 2, ('cretaperfectsuv', 1.0): 2, ('creta', 1.0): 1, ('load', 1.0): 1, ('telecom', 1.0): 2, ('judi', 1.0): 1, ('superb', 1.0): 1, ('slightli', 1.0): 1, ('rakna', 1.0): 1, ('ew', 1.0): 1, ('whose', 1.0): 1, ('fifa', 1.0): 1, ('lineup', 1.0): 1, ('surviv', 1.0): 2, ('p90x', 1.0): 1, ('p90', 1.0): 1, ('dishoom', 1.0): 2, ('rajnigandha', 1.0): 1, ('minju', 1.0): 1, ('rapper', 1.0): 1, ('lead', 1.0): 2, ('vocal', 1.0): 1, ('yujin', 1.0): 1, ('visual', 1.0): 2, ('makna', 1.0): 1, ('jane', 1.0): 2, ('hah', 1.0): 4, ('hawk', 1.0): 2, ('greatest', 1.0): 2, ('histori', 1.0): 2, ('along', 1.0): 6, ('talkback', 1.0): 1, ('process', 1.0): 4, ('featur', 1.0): 4, ('mostli', 1.0): 1, ("cinema'", 1.0): 1, ('defend', 1.0): 2, ('fashion', 1.0): 2, ('atroc', 1.0): 1, ('pandimension', 1.0): 1, ('manifest', 1.0): 1, ('argo', 1.0): 1, ('ring', 1.0): 4, ('640', 1.0): 1, ('nad', 1.0): 1, ('plezzz', 1.0): 1, ('asthma', 1.0): 1, ('inhal', 1.0): 1, ('breath', 1.0): 3, ('goodluck', 1.0): 1, ('hunger', 1.0): 1, ('mockingjay', 1.0): 1, ('thehungergam', 1.0): 1, ('ador', 1.0): 4, ('x23', 1.0): 1, ('reina', 1.0): 1, ('felt', 1.0): 3, ('excus', 1.0): 2, ('attend', 1.0): 2, ('whn', 1.0): 1, ('andr', 1.0): 1, ('mamayang', 1.0): 1, ('11pm', 1.0): 1, ('1d', 1.0): 2, ('89.9', 1.0): 1, ('powi', 1.0): 1, ('shropshir', 1.0): 1, ('border', 1.0): 1, ("school'", 1.0): 1, ('san', 1.0): 2, ('diego', 1.0): 1, ('jump', 1.0): 2, ('sourc', 1.0): 3, ('appeas', 1.0): 1, ('¦', 1.0): 1, ('aj', 1.0): 1, ('action', 1.0): 1, ('grunt', 1.0): 1, ('sc', 1.0): 1, ('anti-christ', 1.0): 1, ('m8', 1.0): 1, ('ju', 1.0): 1, ('halfway', 1.0): 1, ('ex', 1.0): 2, ('postiv', 1.0): 2, ('opinion', 1.0): 3, ('avi', 1.0): 1, ('dare', 1.0): 4, ('corridor', 1.0): 1, ('👯', 1.0): 2, ('neither', 1.0): 2, ('rundown', 1.0): 1, ('yah', 1.0): 4, ('leviboard', 1.0): 1, ('kleper', 1.0): 1, (':(', 1.0): 1, ('impecc', 1.0): 2, ('setokido', 1.0): 1, ('shoulda', 1.0): 3, ('hippo', 1.0): 1, ('materialist', 1.0): 1, ('showpo', 1.0): 1, ('cough', 1.0): 6, ('@artofsleepingin', 1.0): 1, ('x22', 1.0): 1, ('☺', 1.0): 5, ('makesm', 1.0): 1, ('santorini', 1.0): 1, ('escap', 1.0): 2, ('beatport', 1.0): 1, ('🏻', 1.0): 3, ('trmdhesit', 1.0): 2, ('manuel', 1.0): 1, ('vall', 1.0): 1, ('king', 1.0): 3, ('seven', 1.0): 2, ('kingdom', 1.0): 2, ('andal', 1.0): 1, ('taught', 1.0): 1, ('hide', 1.0): 3, ('privaci', 1.0): 1, ('wise', 1.0): 1, ('natsuki', 1.0): 1, ('often', 1.0): 2, ('catchi', 1.0): 1, ('neil', 1.0): 2, ('emir', 1.0): 2, ('brill', 1.0): 1, ('urquhart', 1.0): 1, ('castl', 1.0): 1, ('simpl', 1.0): 2, ('shatter', 1.0): 2, ('contrast', 1.0): 1, ('educampakl', 1.0): 1, ('rotorua', 1.0): 1, ('pehli', 1.0): 1, ('phir', 1.0): 1, ('somi', 1.0): 1, ('burfday', 1.0): 1, ('univers', 1.0): 3, ('santo', 1.0): 1, ('toma', 1.0): 1, ('norh', 1.0): 1, ('dialogu', 1.0): 2, ('chainsaw', 1.0): 2, ('amus', 1.0): 1, ('awe', 1.0): 1, ('protect', 1.0): 2, ('pop', 1.0): 5, ('2ish', 1.0): 1, ('fahad', 1.0): 1, ('bhai', 1.0): 3, ('iqrar', 1.0): 1, ('waseem', 1.0): 1, ('abroad', 1.0): 2, ('movie', 1.0): 1, ('chef', 1.0): 1, ('grogol', 1.0): 1, ('long-dist', 1.0): 1, ('rhi', 1.0): 1, ('pwrfl', 1.0): 1, ('benefit', 1.0): 2, ('b2b', 1.0): 1, ('b2c', 1.0): 1, ("else'", 1.0): 2, ('soo', 1.0): 2, ('enterprison', 1.0): 1, ('schoolsoutforsumm', 1.0): 1, ('fellow', 1.0): 4, ('juggl', 1.0): 1, ('purrtho', 1.0): 1, ('catho', 1.0): 1, ('catami', 1.0): 1, ('fourfivesecond', 1.0): 4, ('deaf', 1.0): 4, ('drug', 1.0): 1, ('alcohol', 1.0): 1, ('apexi', 1.0): 3, ('crystal', 1.0): 3, ('meth', 1.0): 1, ('champagn', 1.0): 1, ('fc', 1.0): 1, ('streamer', 1.0): 1, ('juic', 1.0): 1, ('correct', 1.0): 1, ('portrait', 1.0): 1, ('izumi', 1.0): 1, ('fugiwara', 1.0): 1, ('clonmel', 1.0): 1, ('vibrant', 1.0): 1, ('estim', 1.0): 1, ('server', 1.0): 2, ('quiet', 1.0): 1, ('yey', 1.0): 1, ("insha'allah", 1.0): 1, ('wil', 1.0): 1, ('x21', 1.0): 1, ('trend', 1.0): 3, ('akshaymostlovedsuperstarev', 1.0): 1, ('indirect', 1.0): 1, ('askurban', 1.0): 1, ('lyka', 1.0): 2, ('nap', 1.0): 4, ('aff', 1.0): 1, ('unam', 1.0): 1, ('jonginuh', 1.0): 1, ('forecast', 1.0): 2, ('10am', 1.0): 2, ('5am', 1.0): 1, ('sooth', 1.0): 1, ('vii', 1.0): 1, ('sweetheart', 1.0): 1, ('freak', 1.0): 3, ('zayn', 1.0): 3, ('fucker', 1.0): 1, ('pet', 1.0): 2, ('illustr', 1.0): 1, ('wohoo', 1.0): 1, ('gleam', 1.0): 1, ('paint', 1.0): 4, ('deal', 1.0): 2, ('prime', 1.0): 2, ('minist', 1.0): 2, ('sunjam', 1.0): 1, ('industri', 1.0): 1, ('present', 1.0): 7, ('practic', 1.0): 3, ('proactiv', 1.0): 1, ('environ', 1.0): 1, ('unreal', 1.0): 1, ('zain', 1.0): 1, ('zac', 1.0): 1, ('isaac', 1.0): 1, ('oss', 1.0): 1, ('frank', 1.0): 1, ('iero', 1.0): 1, ('phase', 1.0): 2, ('david', 1.0): 1, ('beginn', 1.0): 1, ('shine', 1.0): 3, ('sunflow', 1.0): 2, ('tommarow', 1.0): 1, ('yall', 1.0): 2, ('rank', 1.0): 2, ('birthdaymonth', 1.0): 1, ('vianey', 1.0): 1, ('juli', 1.0): 11, ('birthdaygirl', 1.0): 1, ("town'", 1.0): 1, ('andrew', 1.0): 2, ('checkout', 1.0): 2, ('otwol', 1.0): 1, ('awhil', 1.0): 1, ('x20', 1.0): 1, ('all-tim', 1.0): 1, ('julia', 1.0): 1, ('robert', 1.0): 1, ('awwhh', 1.0): 1, ('bulldog', 1.0): 1, ('unfortun', 1.0): 2, ('02079', 1.0): 1, ('490', 1.0): 1, ('132', 1.0): 1, ('born', 1.0): 2, ('fightstickfriday', 1.0): 1, ('extravag', 1.0): 2, ('tearout', 1.0): 1, ('selekt', 1.0): 1, ('yoot', 1.0): 1, ('cross', 1.0): 3, ('gudday', 1.0): 1, ('dave', 1.0): 5, ('haileyhelp', 1.0): 1, ('eid', 1.0): 2, ('mubarak', 1.0): 5, ('brotheeerrr', 1.0): 1, ('adventur', 1.0): 5, ('tokyo', 1.0): 2, ('kansai', 1.0): 1, ('l', 1.0): 4, ('upp', 1.0): 2, ('om', 1.0): 1, ('60', 1.0): 1, ('minut', 1.0): 7, ('data', 1.0): 1, ('jesu', 1.0): 5, ('amsterdam', 1.0): 2, ('3rd', 1.0): 3, ('nextweek', 1.0): 1, ('booti', 1.0): 2, ('bcuz', 1.0): 1, ('step', 1.0): 3, ('option', 1.0): 3, ('stabl', 1.0): 1, ('sturdi', 1.0): 1, ('lukkke', 1.0): 1, ('again.ensoi', 1.0): 1, ('tc', 1.0): 1, ('madam', 1.0): 1, ('siddi', 1.0): 1, ('unknown', 1.0): 2, ('roomi', 1.0): 1, ('gn', 1.0): 2, ('gf', 1.0): 2, ('consent', 1.0): 1, ('mister', 1.0): 2, ('vine', 1.0): 2, ('peyton', 1.0): 1, ('nagato', 1.0): 1, ('yuki-chan', 1.0): 1, ('shoushitsu', 1.0): 1, ('archdbanterburi', 1.0): 3, ('experttradesmen', 1.0): 1, ('banter', 1.0): 1, ('quiz', 1.0): 1, ('tradetalk', 1.0): 1, ('floof', 1.0): 1, ('face', 1.0): 13, ('muahah', 1.0): 1, ('x19', 1.0): 1, ('anticip', 1.0): 1, ('jd', 1.0): 1, ('laro', 1.0): 1, ('tayo', 1.0): 1, ('answer', 1.0): 8, ('ht', 1.0): 1, ('angelica', 1.0): 1, ('anghel', 1.0): 1, ('aa', 1.0): 3, ('kkk', 1.0): 1, ('macbook', 1.0): 1, ('rehears', 1.0): 1, ('youthcelebr', 1.0): 1, ('mute', 1.0): 1, ('29th', 1.0): 1, ('gohf', 1.0): 4, ('vegetarian', 1.0): 1, ("she'll", 1.0): 1, ('gooday', 1.0): 3, ('101', 1.0): 3, ('12000', 1.0): 1, ('oshieer', 1.0): 1, ('realreview', 1.0): 1, ('happycustom', 1.0): 1, ('realoshi', 1.0): 1, ('dealsuthaonotebachao', 1.0): 1, ('bigger', 1.0): 2, ('dime', 1.0): 1, ('uhuh', 1.0): 1, ('🎵', 1.0): 3, ('code', 1.0): 4, ('pleasant', 1.0): 2, ('on-board', 1.0): 1, ('raheel', 1.0): 1, ('flyhigh', 1.0): 1, ('bother', 1.0): 2, ('everett', 1.0): 1, ('taylor', 1.0): 1, ('ha-ha', 1.0): 1, ('peachyloan', 1.0): 1, ('fridayfreebi', 1.0): 1, ('noe', 1.0): 1, ('yisss', 1.0): 1, ('bindingofissac', 1.0): 1, ('xboxon', 1.0): 1, ('consol', 1.0): 1, ('justin', 1.0): 2, ('gladli', 1.0): 1, ('son', 1.0): 4, ('morocco', 1.0): 1, ('peru', 1.0): 1, ('nxt', 1.0): 1, ('bp', 1.0): 1, ('resort', 1.0): 1, ('x18', 1.0): 1, ('havuuulovey', 1.0): 1, ('uuu', 1.0): 1, ('possitv', 1.0): 1, ('hopey', 1.0): 1, ('throwbackfriday', 1.0): 1, ('christen', 1.0): 1, ('ki', 1.0): 1, ('yaad', 1.0): 1, ('gayi', 1.0): 1, ('opossum', 1.0): 1, ('belat', 1.0): 5, ('yeahh', 1.0): 2, ('kuffar', 1.0): 1, ('comput', 1.0): 5, ('cell', 1.0): 1, ('diarrhea', 1.0): 1, ('immigr', 1.0): 1, ('lice', 1.0): 1, ('goictiv', 1.0): 1, ('70685', 1.0): 1, ('tagsforlik', 1.0): 4, ('trapmus', 1.0): 1, ('hotmusicdeloco', 1.0): 1, ('kinick', 1.0): 1, ('01282', 1.0): 2, ('452096', 1.0): 1, ('shadi', 1.0): 1, ('reserv', 1.0): 3, ('tkt', 1.0): 1, ('likewis', 1.0): 4, ('overgener', 1.0): 1, ('ikr', 1.0): 1, ('😍', 1.0): 2, ('consumer', 1.0): 1, ('fic', 1.0): 2, ('ouch', 1.0): 2, ('slip', 1.0): 1, ('disc', 1.0): 1, ('thw', 1.0): 1, ('chute', 1.0): 1, ('chalut', 1.0): 1, ('replay', 1.0): 1, ('iplay', 1.0): 1, ('11am', 1.0): 3, ('unneed', 1.0): 1, ('megamoh', 1.0): 1, ('7/29', 1.0): 1, ('tool', 1.0): 2, ('zealand', 1.0): 1, ('pile', 1.0): 2, ('dump', 1.0): 1, ('couscou', 1.0): 3, ("women'", 1.0): 2, ('fiction', 1.0): 1, ('wahahaah', 1.0): 1, ('x17', 1.0): 1, ('orhan', 1.0): 1, ('pamuk', 1.0): 1, ('hero', 1.0): 3, ('canopi', 1.0): 1, ('mapl', 1.0): 2, ('syrup', 1.0): 1, ('farm', 1.0): 2, ('stephani', 1.0): 2, ('💖', 1.0): 2, ('congrtaualt', 1.0): 1, ('philea', 1.0): 1, ('club', 1.0): 4, ('inc', 1.0): 1, ('photograph', 1.0): 2, ('phonegraph', 1.0): 1, ('srsli', 1.0): 1, ('10:17', 1.0): 1, ('ripaaa', 1.0): 1, ('banat', 1.0): 1, ('ray', 1.0): 1, ('dept', 1.0): 1, ('hospit', 1.0): 3, ('grt', 1.0): 1, ('infograph', 1.0): 1, ("o'clock", 1.0): 2, ('habit', 1.0): 1, ('1dfor', 1.0): 1, ('roadtrip', 1.0): 1, ('19:30', 1.0): 1, ('ifc', 1.0): 1, ('whip', 1.0): 1, ('lilsisbro', 1.0): 1, ('pre-ord', 1.0): 2, ("pixar'", 1.0): 2, ('steelbook', 1.0): 1, ('hmm', 1.0): 2, ('pegel', 1.0): 1, ('lemess', 1.0): 1, ('kyle', 1.0): 2, ('paypal', 1.0): 1, ('oct', 1.0): 1, ('tud', 1.0): 1, ('jst', 1.0): 2, ('humphrey', 1.0): 1, ('yell', 1.0): 2, ('erm', 1.0): 1, ('breach', 1.0): 1, ('lemon', 1.0): 2, ('yogurt', 1.0): 2, ('pot', 1.0): 1, ('discov', 1.0): 2, ('liquoric', 1.0): 1, ('pud', 1.0): 1, ('cajun', 1.0): 1, ('spice', 1.0): 1, ('yum', 1.0): 2, ('cajunchicken', 1.0): 1, ('infinit', 1.0): 2, ('fight', 1.0): 4, ('gern', 1.0): 1, ('cikaaa', 1.0): 1, ('maaf', 1.0): 1, ('telat', 1.0): 1, ('ngucapinnya', 1.0): 1, ('maaay', 1.0): 1, ('x16', 1.0): 1, ('viparita', 1.0): 1, ('karani', 1.0): 1, ('legsupthewal', 1.0): 1, ('unwind', 1.0): 1, ('coco', 1.0): 3, ('comfi', 1.0): 1, ('jalulu', 1.0): 1, ('rosh', 1.0): 1, ('gla', 1.0): 1, ('pallavi', 1.0): 1, ('nairobi', 1.0): 1, ('hrdstellobama', 1.0): 1, ('region', 1.0): 2, ('civil', 1.0): 1, ('societi', 1.0): 2, ('globe', 1.0): 1, ('hajur', 1.0): 1, ('yayi', 1.0): 2, ("must'v", 1.0): 1, ('nerv', 1.0): 1, ('prelim', 1.0): 1, ('costacc', 1.0): 1, ('nwb', 1.0): 1, ('shud', 1.0): 1, ('cold', 1.0): 2, ('hmu', 1.0): 2, ('cala', 1.0): 1, ('brush', 1.0): 1, ('ego', 1.0): 1, ('wherev', 1.0): 1, ('interact', 1.0): 2, ('dongsaeng', 1.0): 1, ('chorong', 1.0): 1, ('friendship', 1.0): 1, ('impress', 1.0): 3, ('dragon', 1.0): 2, ('duck', 1.0): 5, ('mix', 1.0): 5, ('cheetah', 1.0): 1, ('wagga', 1.0): 2, ('coursework', 1.0): 1, ('lorna', 1.0): 1, ('scan', 1.0): 1, ('x12', 1.0): 2, ('canva', 1.0): 2, ('iqbal', 1.0): 1, ('ima', 1.0): 1, ('hon', 1.0): 1, ('aja', 1.0): 1, ('besi', 1.0): 1, ('chati', 1.0): 1, ('phulani', 1.0): 1, ('swasa', 1.0): 1, ('bahari', 1.0): 1, ('jiba', 1.0): 1, ('mumbai', 1.0): 1, ('gujarat', 1.0): 1, ('distrub', 1.0): 1, ('otherwis', 1.0): 5, ('190cr', 1.0): 1, ('inspit', 1.0): 1, ('highest', 1.0): 1, ('holder', 1.0): 1, ('threaten', 1.0): 1, ('daili', 1.0): 2, ('basi', 1.0): 1, ('vr', 1.0): 1, ('angelo', 1.0): 1, ('quezon', 1.0): 1, ('sweatpant', 1.0): 1, ('farbridg', 1.0): 1, ('segalakatakata', 1.0): 1, ('nixu', 1.0): 1, ('begun', 1.0): 1, ('flint', 1.0): 1, ('🍰', 1.0): 5, ('separ', 1.0): 1, ('criticis', 1.0): 1, ('gestur', 1.0): 1, ('pedal', 1.0): 1, ('stroke', 1.0): 1, ('caro', 1.0): 1, ('deposit', 1.0): 1, ('secur', 1.0): 2, ('shock', 1.0): 1, ('coff', 1.0): 2, ('tenerina', 1.0): 1, ('auguri', 1.0): 1, ('iso', 1.0): 1, ('certif', 1.0): 1, ('paralyz', 1.0): 1, ('anxieti', 1.0): 1, ("it'd", 1.0): 1, ('develop', 1.0): 3, ('spain', 1.0): 2, ('def', 1.0): 1, ('bantim', 1.0): 1, ('fail', 1.0): 5, ('2ban', 1.0): 1, ('x15', 1.0): 1, ('awkward', 1.0): 2, ('ab', 1.0): 1, ('gale', 1.0): 1, ('founder', 1.0): 1, ('loveyaaah', 1.0): 1, ('⅛', 1.0): 1, ('⅞', 1.0): 1, ('∞', 1.0): 1, ('specialist', 1.0): 1, ('aw', 1.0): 3, ('babyyi', 1.0): 1, ('djstruthmat', 1.0): 1, ('re-cap', 1.0): 1, ('flickr', 1.0): 1, ('tack', 1.0): 2, ('zephbot', 1.0): 1, ('hhahahahaha', 1.0): 1, ('blew', 1.0): 2, ('entir', 1.0): 2, ('vega', 1.0): 3, ('strip', 1.0): 1, ('hahahahahhaha', 1.0): 1, ("callie'", 1.0): 1, ('puppi', 1.0): 1, ('owner', 1.0): 2, ('callinganimalabusehotlineasap', 1.0): 1, ('gorefiend', 1.0): 1, ('mythic', 1.0): 1, ('remind', 1.0): 6, ('9:00', 1.0): 1, ('▪', 1.0): 2, ('️bea', 1.0): 1, ('miller', 1.0): 2, ('lockscreen', 1.0): 1, ('mbf', 1.0): 1, ('keesh', 1.0): 1, ("yesterday'", 1.0): 1, ('groupi', 1.0): 1, ('bebe', 1.0): 1, ('sizam', 1.0): 1, ('color', 1.0): 5, ('invoic', 1.0): 1, ('kanina', 1.0): 1, ('pong', 1.0): 1, ('umaga', 1.0): 1, ('browser', 1.0): 1, ('typic', 1.0): 2, ('pleass', 1.0): 5, ('leeteuk', 1.0): 1, ('pearl', 1.0): 1, ('thusi', 1.0): 1, ('pour', 1.0): 1, ('milk', 1.0): 2, ('tgv', 1.0): 1, ('pari', 1.0): 5, ('austerlitz', 1.0): 1, ('bloi', 1.0): 1, ('mile', 1.0): 3, ('chateau', 1.0): 1, ('de', 1.0): 1, ('marai', 1.0): 1, ('taxi', 1.0): 1, ('x14', 1.0): 1, ('nom', 1.0): 1, ('enji', 1.0): 1, ('hater', 1.0): 3, ('purchas', 1.0): 2, ('specially-mark', 1.0): 1, ('custard', 1.0): 1, ('sm', 1.0): 1, ('on-pack', 1.0): 1, ('instruct', 1.0): 1, ('tile', 1.0): 1, ('downstair', 1.0): 1, ('kelli', 1.0): 1, ('greek', 1.0): 2, ('petra', 1.0): 1, ('shadowplayloui', 1.0): 1, ('mutual', 1.0): 2, ('cuz', 1.0): 4, ('liveonstream', 1.0): 1, ('lani', 1.0): 1, ('graze', 1.0): 1, ('pride', 1.0): 1, ('bristolart', 1.0): 1, ('in-app', 1.0): 1, ('ensur', 1.0): 1, ('item', 1.0): 2, ('screw', 1.0): 1, ('amber', 1.0): 2, ('43', 1.0): 1, ('hpc', 1.0): 1, ('wip', 1.0): 2, ('sw', 1.0): 1, ('newsround', 1.0): 1, ('hound', 1.0): 1, ('7:40', 1.0): 1, ('ada', 1.0): 1, ('racist', 1.0): 1, ('hulk', 1.0): 1, ('tight', 1.0): 2, ('prayer', 1.0): 3, ('pardon', 1.0): 1, ('phl', 1.0): 1, ('abu', 1.0): 2, ('dhabi', 1.0): 1, ('hihihi', 1.0): 1, ('teamjanuaryclaim', 1.0): 1, ('godonna', 1.0): 1, ('msg', 1.0): 2, ('bowwowchicawowwow', 1.0): 1, ('settl', 1.0): 1, ('dkt', 1.0): 1, ('porch', 1.0): 1, ('uber', 1.0): 2, ('mobil', 1.0): 4, ('applic', 1.0): 3, ('giggl', 1.0): 2, ('bare', 1.0): 3, ('wind', 1.0): 2, ('kahlil', 1.0): 1, ('gibran', 1.0): 1, ('flash', 1.0): 1, ('stiff', 1.0): 1, ('upper', 1.0): 1, ('lip', 1.0): 1, ('britain', 1.0): 1, ('latmon', 1.0): 1, ('endeavour', 1.0): 1, ('ann', 1.0): 2, ('joy', 1.0): 4, ('os', 1.0): 1, ('exploit', 1.0): 1, ('ign', 1.0): 2, ('au', 1.0): 1, ('pubcast', 1.0): 1, ('tengaman', 1.0): 1, ('21', 1.0): 2, ('celebratio', 1.0): 1, ('women', 1.0): 1, ('instal', 1.0): 2, ('glorifi', 1.0): 1, ('infirm', 1.0): 1, ('silli', 1.0): 1, ('suav', 1.0): 1, ('gentlemen', 1.0): 1, ('monthli', 1.0): 1, ('mileag', 1.0): 1, ('target', 1.0): 2, ('samsung', 1.0): 1, ('qualiti', 1.0): 3, ('ey', 1.0): 1, ('beth', 1.0): 2, ('gangster', 1.0): 1, ("athena'", 1.0): 1, ('fanci', 1.0): 1, ('wellington', 1.0): 1, ('rich', 1.0): 2, ('christina', 1.0): 1, ('newslett', 1.0): 1, ('zy', 1.0): 1, ('olur', 1.0): 1, ('x13', 1.0): 1, ('flawless', 1.0): 1, ('reaction', 1.0): 2, ('hayli', 1.0): 1, ('edwin', 1.0): 1, ('elvena', 1.0): 1, ('emc', 1.0): 1, ('rubber', 1.0): 3, ('swearword', 1.0): 1, ('infect', 1.0): 1, ('10:16', 1.0): 1, ('wrote', 1.0): 3, ('gan', 1.0): 1, ('brotherhood', 1.0): 1, ('wolf', 1.0): 5, ('pill', 1.0): 1, ('nocturn', 1.0): 1, ('rrp', 1.0): 1, ('18.99', 1.0): 1, ('13.99', 1.0): 1, ('jah', 1.0): 1, ('wobbl', 1.0): 1, ('retard', 1.0): 1, ('50notif', 1.0): 1, ('check-up', 1.0): 1, ('pun', 1.0): 1, ('elit', 1.0): 1, ('camillu', 1.0): 1, ('pleasee', 1.0): 1, ('spare', 1.0): 1, ('tyre', 1.0): 2, ('joke', 1.0): 3, ('ahahah', 1.0): 1, ('shame', 1.0): 1, ('abandon', 1.0): 1, ('disagre', 1.0): 2, ('nowher', 1.0): 2, ('contradict', 1.0): 1, ('chao', 1.0): 1, ('contain', 1.0): 1, ('cranium', 1.0): 1, ('sneaker', 1.0): 1, ('nike', 1.0): 1, ('nikeorigin', 1.0): 1, ('nikeindonesia', 1.0): 1, ('pierojogg', 1.0): 1, ('skoy', 1.0): 1, ('winter', 1.0): 2, ('falkland', 1.0): 1, ('jamie-le', 1.0): 1, ('congraaat', 1.0): 1, ('hooh', 1.0): 1, ('chrome', 1.0): 1, ('storm', 1.0): 1, ('thunderstorm', 1.0): 1, ('circuscircu', 1.0): 1, ('omgg', 1.0): 1, ('tdi', 1.0): 1, ('(-:', 1.0): 2, ('peter', 1.0): 1, ('expel', 1.0): 2, ('boughi', 1.0): 1, ('kernel', 1.0): 1, ('paralysi', 1.0): 1, ('liza', 1.0): 1, ('lol.hook', 1.0): 1, ('vampir', 1.0): 2, ('diari', 1.0): 3, ('twice', 1.0): 1, ('thanq', 1.0): 2, ('goodwil', 1.0): 1, ('vandr', 1.0): 1, ('ash', 1.0): 1, ('debat', 1.0): 3, ('solar', 1.0): 1, ('6-5', 1.0): 1, ('shown', 1.0): 1, ('ek', 1.0): 1, ('taco', 1.0): 2, ('mexico', 1.0): 2, ('viva', 1.0): 1, ('méxico', 1.0): 1, ('burger', 1.0): 3, ('thebestangkapuso', 1.0): 1, ('lighter', 1.0): 1, ('tooth', 1.0): 2, ('korean', 1.0): 2, ('netizen', 1.0): 1, ('crueler', 1.0): 1, ('eleph', 1.0): 1, ('marula', 1.0): 1, ('tdif', 1.0): 1, ('shoutout', 1.0): 1, ('shortli', 1.0): 1, ('itsamarvelth', 1.0): 1, ("japan'", 1.0): 1, ('artist', 1.0): 1, ('homework', 1.0): 1, ('marco', 1.0): 1, ('herb', 1.0): 1, ('pm', 1.0): 3, ('self', 1.0): 1, ('esteem', 1.0): 1, ('patienc', 1.0): 1, ('sobtian', 1.0): 1, ('cowork', 1.0): 1, ('deathli', 1.0): 1, ('hallow', 1.0): 1, ('supernatur', 1.0): 1, ('consult', 1.0): 1, ('himach', 1.0): 1, ('2.25', 1.0): 1, ('asham', 1.0): 1, ('where.do.i.start', 1.0): 1, ('moviemarathon', 1.0): 1, ('skill', 1.0): 4, ('shadow', 1.0): 1, ('own', 1.0): 1, ('pair', 1.0): 3, ("it'll", 1.0): 6, ('cortez', 1.0): 1, ('superstar', 1.0): 1, ('tthank', 1.0): 1, ('colin', 1.0): 1, ('luxuou', 1.0): 1, ('tarryn', 1.0): 1, ('hbdme', 1.0): 1, ('yeeeyyy', 1.0): 1, ('barsostay', 1.0): 1, ('males', 1.0): 1, ('independ', 1.0): 1, ('sum', 1.0): 1, ('debacl', 1.0): 1, ('perfectli', 1.0): 1, ('longer', 1.0): 2, ('amyjackson', 1.0): 1, ('omegl', 1.0): 2, ('countrymus', 1.0): 1, ('five', 1.0): 2, ("night'", 1.0): 2, ("freddy'", 1.0): 2, ('demo', 1.0): 2, ('pump', 1.0): 2, ('fanboy', 1.0): 1, ('thegrandad', 1.0): 1, ('sidni', 1.0): 1, ('remarriag', 1.0): 1, ('occas', 1.0): 1, ('languag', 1.0): 1, ('java', 1.0): 1, ("php'", 1.0): 1, ('notion', 1.0): 1, ('refer', 1.0): 1, ('confus', 1.0): 3, ('ohioan', 1.0): 1, ('stick', 1.0): 2, ('doctor', 1.0): 3, ('offlin', 1.0): 1, ('thesim', 1.0): 1, ('mb', 1.0): 1, ('meaningless', 1.0): 1, ('common', 1.0): 1, ('celebr', 1.0): 9, ('muertosatfring', 1.0): 1, ('emul', 1.0): 1, ('brought', 1.0): 1, ('enemi', 1.0): 2, ('relax', 1.0): 3, ('ou', 1.0): 1, ('pink', 1.0): 2, ('cc', 1.0): 2, ('meooowww', 1.0): 1, ('barkkkiiidee', 1.0): 1, ('bark', 1.0): 1, ('x11', 1.0): 1, ('routin', 1.0): 4, ('alek', 1.0): 1, ('awh', 1.0): 2, ('kumpul', 1.0): 1, ('cantik', 1.0): 1, ('ganteng', 1.0): 1, ('kresna', 1.0): 1, ('jelli', 1.0): 1, ('simon', 1.0): 1, ('lesley', 1.0): 3, ('blood', 1.0): 2, ('panti', 1.0): 1, ('lion', 1.0): 1, ('artworkbyli', 1.0): 1, ('judo', 1.0): 1, ('daredevil', 1.0): 2, ('despond', 1.0): 1, ('re-watch', 1.0): 1, ('welcoma.hav', 1.0): 1, ('favor', 1.0): 5, ('tridon', 1.0): 1, ('21pic', 1.0): 1, ('master', 1.0): 3, ('nim', 1.0): 1, ("there'r", 1.0): 1, ('22pic', 1.0): 1, ('kebun', 1.0): 1, ('ubud', 1.0): 1, ('ladyposs', 1.0): 1, ('xoxoxo', 1.0): 1, ('sneak', 1.0): 3, ('peek', 1.0): 2, ('inbox', 1.0): 1, ('happyweekend', 1.0): 1, ('therealgolden', 1.0): 1, ('47', 1.0): 1, ('girlfriendsmya', 1.0): 1, ('ppl', 1.0): 2, ('closest', 1.0): 1, ('njoy', 1.0): 1, ('followingg', 1.0): 1, ('privat', 1.0): 1, ('pusher', 1.0): 1, ('stun', 1.0): 4, ('wooohooo', 1.0): 1, ('cuss', 1.0): 1, ('teenag', 1.0): 1, ('ace', 1.0): 1, ('sauc', 1.0): 3, ('livi', 1.0): 1, ('fowl', 1.0): 1, ('oliviafowl', 1.0): 1, ('891', 1.0): 1, ('burnout', 1.0): 1, ('johnforceo', 1.0): 1, ('matthew', 1.0): 1, ('provok', 1.0): 1, ('indiankultur', 1.0): 1, ('oppos', 1.0): 1, ('biker', 1.0): 1, ('lyk', 1.0): 1, ('gud', 1.0): 4, ('weight', 1.0): 6, ('bcu', 1.0): 1, ('rubbish', 1.0): 1, ('veggi', 1.0): 2, ('steph', 1.0): 1, ('nj', 1.0): 1, ('x10', 1.0): 1, ('cohes', 1.0): 1, ('gossip', 1.0): 2, ('alex', 1.0): 3, ('heswifi', 1.0): 1, ('7am', 1.0): 1, ('wub', 1.0): 1, ('cerbchan', 1.0): 1, ('jarraaa', 1.0): 1, ('morrrn', 1.0): 1, ('snooz', 1.0): 1, ('clicksco', 1.0): 1, ('gay', 1.0): 4, ('lesbian', 1.0): 2, ('rigid', 1.0): 1, ('theocrat', 1.0): 1, ('wing', 1.0): 1, ('fundamentalist', 1.0): 1, ('islamist', 1.0): 1, ('brianaaa', 1.0): 1, ('brianazabrocki', 1.0): 1, ('sky', 1.0): 2, ('batb', 1.0): 1, ('clap', 1.0): 3, ('whilst', 1.0): 1, ('aki', 1.0): 1, ('thencerest', 1.0): 2, ('547', 1.0): 2, ('indiemus', 1.0): 5, ('sexyjudi', 1.0): 3, ('pussi', 1.0): 4, ('sexo', 1.0): 3, ('humid', 1.0): 1, ('87', 1.0): 1, ('sloppi', 1.0): 1, ("second'", 1.0): 1, ('stock', 1.0): 3, ('marmit', 1.0): 2, ('x9', 1.0): 1, ('nic', 1.0): 3, ('taft', 1.0): 1, ('finalist', 1.0): 1, ('lotteri', 1.0): 1, ('award', 1.0): 3, ('usagi', 1.0): 1, ('looov', 1.0): 1, ('wowww', 1.0): 2, ('💙', 1.0): 8, ('💚', 1.0): 8, ('💕', 1.0): 12, ('lepa', 1.0): 1, ('sembuh', 1.0): 1, ('sibuk', 1.0): 1, ('balik', 1.0): 1, ('kin', 1.0): 1, ('gotham', 1.0): 1, ('sunnyday', 1.0): 1, ('dudett', 1.0): 1, ('cost', 1.0): 1, ('flippin', 1.0): 1, ('fortun', 1.0): 1, ('divinediscont', 1.0): 1, (';}', 1.0): 1, ('amnot', 1.0): 1, ('autofollow', 1.0): 3, ('teamfollowback', 1.0): 4, ('geer', 1.0): 1, ('bat', 1.0): 2, ('mz', 1.0): 1, ('yang', 1.0): 2, ('deennya', 1.0): 1, ('jehwan', 1.0): 1, ('11:00', 1.0): 1, ('ashton', 1.0): 1, ('✧', 1.0): 12, ('。', 1.0): 4, ('chelni', 1.0): 2, ('datz', 1.0): 1, ('jeremi', 1.0): 1, ('fmt', 1.0): 1, ('dat', 1.0): 3, ('heartbeat', 1.0): 1, ('clutch', 1.0): 1, ('🐢', 1.0): 2, ('besteverdoctorwhoepisod', 1.0): 1, ('relev', 1.0): 1, ('puke', 1.0): 1, ('proper', 1.0): 1, ('x8', 1.0): 1, ('sublimin', 1.0): 1, ('eatmeat', 1.0): 1, ('brewproject', 1.0): 1, ('lovenafianna', 1.0): 1, ('mr', 1.0): 7, ('lewi', 1.0): 1, ('clock', 1.0): 1, ('3:02', 1.0): 2, ('muslim', 1.0): 1, ('prophet', 1.0): 1, ('غردلي', 1.0): 4, ('is.h', 1.0): 1, ('mistak', 1.0): 4, ('understood', 1.0): 1, ('politician', 1.0): 1, ('argu', 1.0): 1, ('intellect', 1.0): 1, ('shiva', 1.0): 1, ('mp3', 1.0): 1, ('standrew', 1.0): 1, ('sandcastl', 1.0): 1, ('ewok', 1.0): 1, ('nate', 1.0): 2, ('brawl', 1.0): 1, ('rear', 1.0): 1, ('nake', 1.0): 1, ('choke', 1.0): 1, ('heck', 1.0): 1, ('gun', 1.0): 2, ('associ', 1.0): 1, ('um', 1.0): 1, ('endow', 1.0): 1, ('ai', 1.0): 1, ('sikandar', 1.0): 1, ('pti', 1.0): 1, ('standwdik', 1.0): 1, ('westandwithik', 1.0): 1, ('starbuck', 1.0): 2, ('logo', 1.0): 2, ('renew', 1.0): 1, ('chariti', 1.0): 1, ('جمعة_مباركة', 1.0): 1, ('hoki', 1.0): 1, ('biz', 1.0): 1, ('non', 1.0): 1, ('america', 1.0): 1, ('california', 1.0): 1, ('01:16', 1.0): 1, ('45gameplay', 1.0): 2, ('ilovey', 1.0): 2, ('vex', 1.0): 1, ('iger', 1.0): 1, ('leicaq', 1.0): 1, ('leica', 1.0): 1, ('dudee', 1.0): 1, ('persona', 1.0): 1, ('yepp', 1.0): 1, ('5878e503', 1.0): 1, ('x7', 1.0): 1, ('greg', 1.0): 1, ('posey', 1.0): 1, ('miami', 1.0): 1, ('james_yammouni', 1.0): 1, ('breakdown', 1.0): 1, ('materi', 1.0): 2, ('thorin', 1.0): 1, ('hunt', 1.0): 1, ('choroo', 1.0): 1, ('nahi', 1.0): 2, ('aztec', 1.0): 1, ('princess', 1.0): 2, ('raini', 1.0): 1, ('kingfish', 1.0): 1, ('chinua', 1.0): 1, ('acheb', 1.0): 1, ('intellectu', 1.0): 2, ('liquid', 1.0): 1, ('melbournetrip', 1.0): 1, ('taxikitchen', 1.0): 1, ('nooow', 1.0): 2, ('mcdo', 1.0): 1, ('everywher', 1.0): 2, ('dreamer', 1.0): 1, ('tanisha', 1.0): 1, ('1nonli', 1.0): 1, ('attitud', 1.0): 1, ('kindl', 1.0): 2, ('flame', 1.0): 1, ('convict', 1.0): 1, ('bar', 1.0): 1, ('repath', 1.0): 2, ('adi', 1.0): 1, ('stefani', 1.0): 1, ('sg1', 1.0): 1, ('lightbox', 1.0): 1, ('ran', 1.0): 2, ('incorrect', 1.0): 1, ('apologist', 1.0): 1, ('x6', 1.0): 1, ('vuli', 1.0): 1, ('01:15', 1.0): 1, ('batman', 1.0): 1, ('pearson', 1.0): 1, ('reput', 1.0): 2, ('nikkei', 1.0): 1, ('woodford', 1.0): 1, ('vscocam', 1.0): 1, ('vscoph', 1.0): 1, ('vscogood', 1.0): 1, ('vscophil', 1.0): 1, ('vscocousin', 1.0): 1, ('yaap', 1.0): 1, ('urwelc', 1.0): 1, ('neon', 1.0): 1, ('pant', 1.0): 1, ('haaa', 1.0): 1, ('will', 1.0): 2, ('auspost', 1.0): 1, ('openfollow', 1.0): 1, ('rp', 1.0): 2, ('eng', 1.0): 1, ('yūjō-cosplay', 1.0): 1, ('luxembourg', 1.0): 1, ('bunni', 1.0): 1, ('broadcast', 1.0): 1, ('needa', 1.0): 1, ('gal', 1.0): 3, ('bend', 1.0): 3, ('heaven', 1.0): 2, ('score', 1.0): 2, ('januari', 1.0): 1, ('hanabutl', 1.0): 1, ('kikhorni', 1.0): 1, ('interraci', 1.0): 1, ('makeup', 1.0): 1, ('chu', 1.0): 1, ("weekend'", 1.0): 1, ('punt', 1.0): 1, ('horserac', 1.0): 1, ('hors', 1.0): 2, ('horseracingtip', 1.0): 1, ('guitar', 1.0): 1, ('cocoar', 1.0): 1, ('brief', 1.0): 1, ('introduct', 1.0): 1, ('earliest', 1.0): 1, ('indian', 1.0): 1, ('subcontin', 1.0): 1, ('bfr', 1.0): 1, ('maurya', 1.0): 1, ('jordanian', 1.0): 1, ('00962778381', 1.0): 1, ('838', 1.0): 1, ('tenyai', 1.0): 1, ('hee', 1.0): 2, ('ss', 1.0): 1, ('semi', 1.0): 1, ('atp', 1.0): 2, ('wimbledon', 1.0): 2, ('feder', 1.0): 1, ('nadal', 1.0): 1, ('monfil', 1.0): 1, ('handsom', 1.0): 2, ('cilic', 1.0): 3, ('firm', 1.0): 1, ('potenti', 1.0): 3, ('nyc', 1.0): 1, ('chillin', 1.0): 2, ('tail', 1.0): 2, ('kitten', 1.0): 1, ('garret', 1.0): 1, ('baz', 1.0): 1, ('leo', 1.0): 2, ('xst', 1.0): 1, ('centrifug', 1.0): 1, ('etern', 1.0): 3, ('forgiv', 1.0): 2, ('kangin', 1.0): 1, ('بندر', 1.0): 1, ('العنزي', 1.0): 1, ('kristin', 1.0): 1, ('cass', 1.0): 1, ('surajettan', 1.0): 1, ('kashi', 1.0): 1, ('ashwathi', 1.0): 1, ('mommi', 1.0): 2, ('tirth', 1.0): 1, ('brambhatt', 1.0): 1, ('snooker', 1.0): 1, ('compens', 1.0): 1, ('theoper', 1.0): 1, ('479', 1.0): 1, ('premiostumundo', 1.0): 2, ('philosoph', 1.0): 1, ('x5', 1.0): 1, ('graphic', 1.0): 2, ('level', 1.0): 1, ('aug', 1.0): 3, ('excl', 1.0): 1, ('raw', 1.0): 1, ('weeni', 1.0): 1, ('annoyingbabi', 1.0): 1, ('lazi', 1.0): 2, ('cosi', 1.0): 1, ('client_amends_edit', 1.0): 1, ('_5_final_final_fin', 1.0): 1, ('pdf', 1.0): 1, ('mauliat', 1.0): 1, ('ito', 1.0): 2, ('okkay', 1.0): 1, ('knock', 1.0): 3, ("soloist'", 1.0): 1, ('ryu', 1.0): 1, ('saera', 1.0): 1, ('pinkeu', 1.0): 1, ('angri', 1.0): 3, ('screencap', 1.0): 1, ('jonghyun', 1.0): 1, ('seungyeon', 1.0): 1, ('cnblue', 1.0): 1, ('mbc', 1.0): 1, ('wgm', 1.0): 1, ('masa', 1.0): 2, ('entrepreneurship', 1.0): 1, ('empow', 1.0): 1, ('limpopo', 1.0): 1, ('pict', 1.0): 1, ('norapowel', 1.0): 1, ('hornykik', 1.0): 2, ('livesex', 1.0): 1, ('pumpkin', 1.0): 1, ('thrice', 1.0): 1, ('patron', 1.0): 1, ('ventur', 1.0): 1, ('deathcur', 1.0): 1, ('boob', 1.0): 1, ('blame', 1.0): 1, ('dine', 1.0): 1, ('modern', 1.0): 1, ('grill', 1.0): 1, ('disk', 1.0): 1, ('nt4', 1.0): 1, ('iirc', 1.0): 1, ('ux', 1.0): 1, ('refin', 1.0): 1, ('zdp', 1.0): 1, ('didnt', 1.0): 2, ('justic', 1.0): 1, ('daw', 1.0): 1, ('tine', 1.0): 1, ('gensan', 1.0): 1, ('frightl', 1.0): 1, ('undead', 1.0): 1, ('plush', 1.0): 1, ('cushion', 1.0): 1, ('nba', 1.0): 3, ('2k15', 1.0): 3, ('mypark', 1.0): 3, ('chronicl', 1.0): 4, ('gryph', 1.0): 3, ('volum', 1.0): 3, ('ellen', 1.0): 1, ('degener', 1.0): 1, ('shirt', 1.0): 1, ('mint', 1.0): 1, ('superdri', 1.0): 1, ('berangkaat', 1.0): 1, ('lagiii', 1.0): 1, ('siguro', 1.0): 1, ('un', 1.0): 1, ('kesa', 1.0): 1, ('lotsa', 1.0): 2, ('organis', 1.0): 2, ('4am', 1.0): 1, ('fingers-cross', 1.0): 1, ('deep', 1.0): 1, ('htaccess', 1.0): 1, ('file', 1.0): 2, ('adf', 1.0): 1, ('womad', 1.0): 1, ('gran', 1.0): 1, ('canaria', 1.0): 1, ('gig', 1.0): 1, ('twist', 1.0): 1, ('youv', 1.0): 1, ('teamnatur', 1.0): 1, ('huni', 1.0): 1, ('yayayayay', 1.0): 1, ('yt', 1.0): 2, ('convent', 1.0): 1, ('brighton', 1.0): 1, ('slay', 1.0): 1, ('nicknam', 1.0): 1, ('babygirl', 1.0): 1, ('regard', 1.0): 2, ('himmat', 1.0): 1, ('karain', 1.0): 2, ('baat', 1.0): 1, ('meri', 1.0): 1, ('hotee-mi', 1.0): 1, ('uncl', 1.0): 1, ('tongu', 1.0): 1, ('pronounc', 1.0): 1, ('nativ', 1.0): 1, ('american', 1.0): 2, ('proverb', 1.0): 1, ('lovabl', 1.0): 1, ('yesha', 1.0): 1, ('montoya', 1.0): 1, ('eagerli', 1.0): 1, ('payment', 1.0): 1, ('suprem', 1.0): 1, ('leon', 1.0): 1, ('ks', 1.0): 2, ('randi', 1.0): 1, ('9bi', 1.0): 1, ('physiqu', 1.0): 1, ('shave', 1.0): 1, ('uncut', 1.0): 1, ('boi', 1.0): 1, ('cheapest', 1.0): 1, ('regular', 1.0): 3, ('printer', 1.0): 3, ('nz', 1.0): 1, ('larg', 1.0): 4, ('format', 1.0): 1, ('10/10', 1.0): 1, ('senior', 1.0): 1, ('raid', 1.0): 2, ('conserv', 1.0): 1, ('batteri', 1.0): 1, ('comfort', 1.0): 2, ('swt', 1.0): 1, ('[email protected]', 1.0): 1, ('localgaragederbi', 1.0): 1, ('campu', 1.0): 1, ('subgam', 1.0): 1, ('faceit', 1.0): 1, ('snpcaht', 1.0): 1, ('hakhakhak', 1.0): 1, ('t___t', 1.0): 1, ("kyungsoo'", 1.0): 1, ('3d', 1.0): 2, ('properti', 1.0): 2, ('agent', 1.0): 1, ('accur', 1.0): 1, ('descript', 1.0): 1, ('theori', 1.0): 1, ('x4', 1.0): 1, ('15.90', 1.0): 1, ('yvett', 1.0): 1, ('author', 1.0): 2, ('mwf', 1.0): 1, ('programm', 1.0): 1, ('taal', 1.0): 1, ('lake', 1.0): 1, ('2emt', 1.0): 1, ('«', 1.0): 2, ('scurri', 1.0): 1, ('agil', 1.0): 1, ('solut', 1.0): 1, ('sme', 1.0): 1, ('omar', 1.0): 1, ('biggest', 1.0): 5, ('kamaal', 1.0): 1, ('amm', 1.0): 1, ('3am', 1.0): 1, ('hopehousekid', 1.0): 1, ('pitmantrain', 1.0): 1, ('walkersmithway', 1.0): 1, ('keepitloc', 1.0): 2, ('sehun', 1.0): 1, ('se100lead', 1.0): 1, ('unev', 1.0): 1, ('sofa', 1.0): 1, ('surf', 1.0): 1, ('cunt', 1.0): 1, ('rescoop', 1.0): 1, ('multiraci', 1.0): 1, ('fk', 1.0): 1, ('narrow', 1.0): 1, ('warlock', 1.0): 1, ('balloon', 1.0): 3, ('mj', 1.0): 1, ('madison', 1.0): 1, ('beonknockknock', 1.0): 1, ('con-gradu', 1.0): 1, ('gent', 1.0): 1, ('bitchfac', 1.0): 1, ('😒', 1.0): 1, ('organ', 1.0): 1, ('12pm', 1.0): 2, ('york', 1.0): 2, ('nearest', 1.0): 1, ('lendal', 1.0): 1, ('pikami', 1.0): 1, ('captur', 1.0): 1, ('fulton', 1.0): 1, ('sheen', 1.0): 1, ('baloney', 1.0): 1, ('unvarnish', 1.0): 1, ('laid', 1.0): 2, ('thick', 1.0): 1, ('blarney', 1.0): 1, ('flatteri', 1.0): 1, ('thin', 1.0): 1, ('sachin', 1.0): 1, ('unimport', 1.0): 1, ('context', 1.0): 1, ('dampen', 1.0): 1, ('yu', 1.0): 1, ('rocket', 1.0): 1, ('narendra', 1.0): 1, ('modi', 1.0): 1, ('aaaand', 1.0): 1, ("team'", 1.0): 1, ('macauley', 1.0): 1, ('howev', 1.0): 3, ('x3', 1.0): 1, ('wheeen', 1.0): 1, ('heechul', 1.0): 1, ('toast', 1.0): 2, ('coffee-weekday', 1.0): 1, ('9-11', 1.0): 1, ('sail', 1.0): 1, ("friday'", 1.0): 1, ('commerci', 1.0): 1, ('insur', 1.0): 1, ('requir', 1.0): 2, ('lookfortheo', 1.0): 1, ('cl', 1.0): 1, ('thou', 1.0): 1, ('april', 1.0): 2, ('airforc', 1.0): 1, ('clark', 1.0): 1, ('field', 1.0): 1, ('pampanga', 1.0): 1, ('troll', 1.0): 1, ('⚡', 1.0): 1, ('brow', 1.0): 1, ('oili', 1.0): 1, ('maricarljanah', 1.0): 1, ('6:15', 1.0): 1, ('degre', 1.0): 3, ('fahrenheit', 1.0): 1, ('🍸', 1.0): 7, ('╲', 1.0): 4, ('─', 1.0): 8, ('╱', 1.0): 5, ('🍤', 1.0): 4, ('╭', 1.0): 4, ('╮', 1.0): 4, ('┓', 1.0): 2, ('┳', 1.0): 1, ('┣', 1.0): 1, ('╰', 1.0): 3, ('╯', 1.0): 3, ('┗', 1.0): 2, ('┻', 1.0): 1, ('stool', 1.0): 1, ('toppl', 1.0): 1, ('findyourfit', 1.0): 1, ('prefer', 1.0): 2, ('whomosexu', 1.0): 1, ('stack', 1.0): 1, ('pandora', 1.0): 3, ('digitalexet', 1.0): 1, ('digitalmarket', 1.0): 1, ('sociamedia', 1.0): 1, ('nb', 1.0): 1, ('bom', 1.0): 1, ('dia', 1.0): 1, ('todo', 1.0): 1, ('forklift', 1.0): 1, ('warehous', 1.0): 1, ('worker', 1.0): 1, ('lsceen', 1.0): 1, ('immatur', 1.0): 1, ('gandhi', 1.0): 1, ('grassi', 1.0): 1, ('feetblog', 1.0): 2, ('daughter', 1.0): 3, ('4yr', 1.0): 1, ('old-porridg', 1.0): 1, ('fiend', 1.0): 1, ('2nite', 1.0): 1, ('comp', 1.0): 1, ('vike', 1.0): 1, ('t20blast', 1.0): 1, ('np', 1.0): 1, ('tax', 1.0): 1, ('ooohh', 1.0): 1, ('petjam', 1.0): 1, ('virtual', 1.0): 2, ('pounc', 1.0): 1, ('bentek', 1.0): 1, ('agn', 1.0): 1, ('[email protected]', 1.0): 1, ('sam', 1.0): 3, ('fruiti', 1.0): 1, ('vodka', 1.0): 2, ('sellyourcarin', 1.0): 2, ('5word', 1.0): 2, ('chaloniklo', 1.0): 2, ('pic.twitter.com/jxz2lbv6o', 1.0): 1, ("paperwhite'", 1.0): 1, ('laser-lik', 1.0): 1, ('focu', 1.0): 1, ('ghost', 1.0): 3, ('tagsforlikesapp', 1.0): 2, ('instagood', 1.0): 2, ('tbt', 1.0): 1, ('socket', 1.0): 1, ('spanner', 1.0): 1, ('😴', 1.0): 1, ('pglcsgo', 1.0): 1, ('x2', 1.0): 1, ('tend', 1.0): 1, ('crave', 1.0): 1, ('slower', 1.0): 1, ('sjw', 1.0): 1, ('cakehamp', 1.0): 1, ('glow', 1.0): 2, ('yayyy', 1.0): 1, ('merced', 1.0): 1, ('hood', 1.0): 1, ('badg', 1.0): 1, ('host', 1.0): 1, ('drone', 1.0): 1, ('blow', 1.0): 1, ('ignor', 1.0): 1, ('retali', 1.0): 1, ('bolling', 1.0): 1, ("where'", 1.0): 1, ('denmark', 1.0): 1, ('whitey', 1.0): 1, ('cultur', 1.0): 2, ('course', 1.0): 1, ('intro', 1.0): 2, ('graphicdesign', 1.0): 1, ('videograph', 1.0): 1, ('space', 1.0): 2, ("ted'", 1.0): 1, ('bogu', 1.0): 1, ('1000', 1.0): 1, ('hahahaaah', 1.0): 1, ('owli', 1.0): 1, ('afternon', 1.0): 1, ('whangarei', 1.0): 1, ('kati', 1.0): 2, ('paulin', 1.0): 1, ('traffick', 1.0): 1, ('wors', 1.0): 3, ('henc', 1.0): 1, ('express', 1.0): 1, ('wot', 1.0): 1, ('hand-lett', 1.0): 1, ('roof', 1.0): 1, ('eas', 1.0): 1, ('2/2', 1.0): 1, ('sour', 1.0): 1, ('dough', 1.0): 1, ('egypt', 1.0): 1, ('hubbi', 1.0): 2, ('sakin', 1.0): 1, ('six', 1.0): 1, ('christma', 1.0): 2, ('avril', 1.0): 1, ('n04j', 1.0): 1, ('25', 1.0): 1, ('prosecco', 1.0): 1, ('pech', 1.0): 1, ('micro', 1.0): 1, ('catspj', 1.0): 1, ('4:15', 1.0): 1, ('lazyweekend', 1.0): 1, ('overdu', 1.0): 1, ('mice', 1.0): 1, ('💃', 1.0): 3, ('jurass', 1.0): 1, ('ding', 1.0): 1, ('nila', 1.0): 1, ('8)', 1.0): 1, ('cooki', 1.0): 1, ('shir', 1.0): 1, ('0', 1.0): 3, ('hale', 1.0): 1, ('cheshir', 1.0): 1, ('decor', 1.0): 1, ('lemm', 1.0): 2, ('rec', 1.0): 1, ('ingat', 1.0): 1, ('din', 1.0): 2, ('mono', 1.0): 1, ('kathryn', 1.0): 1, ('jr', 1.0): 1, ('hsr', 1.0): 1, ('base', 1.0): 3, ('major', 1.0): 1, ('sugarrush', 1.0): 1, ('knit', 1.0): 1, ('partli', 1.0): 1, ('homegirl', 1.0): 1, ('nanci', 1.0): 1, ('fenja', 1.0): 1, ('aapk', 1.0): 1, ('benchmark', 1.0): 1, ('ke', 1.0): 1, ('hisaab', 1.0): 1, ('ho', 1.0): 1, ('gaya', 1.0): 1, ('ofc', 1.0): 1, ('rtss', 1.0): 1, ('hwait', 1.0): 1, ('titanfal', 1.0): 1, ('xbox', 1.0): 2, ('ultim', 1.0): 2, ('gastronomi', 1.0): 1, ('newblogpost', 1.0): 1, ('foodiefriday', 1.0): 1, ('foodi', 1.0): 1, ('yoghurt', 1.0): 1, ('pancak', 1.0): 2, ('sabah', 1.0): 3, ('kapima', 1.0): 1, ('gelen', 1.0): 1, ('guzel', 1.0): 1, ('bir', 1.0): 1, ('hediy', 1.0): 1, ('thanx', 1.0): 1, ('💞', 1.0): 2, ('visa', 1.0): 1, ('parisa', 1.0): 1, ('epiphani', 1.0): 1, ('lit', 1.0): 1, ('em-con', 1.0): 1, ('swore', 1.0): 1, ('0330 333 7234', 1.0): 1, ('kianweareproud', 1.0): 1, ('distract', 1.0): 1, ('dayofarch', 1.0): 1, ('10-20', 1.0): 1, ('bapu', 1.0): 1, ('ivypowel', 1.0): 1, ('newmus', 1.0): 1, ('sexchat', 1.0): 1, ('🍅', 1.0): 1, ('pathway', 1.0): 1, ('balkan', 1.0): 1, ('gypsi', 1.0): 1, ('mayhem', 1.0): 1, ('burek', 1.0): 1, ('meat', 1.0): 1, ('gibanica', 1.0): 1, ('pie', 1.0): 1, ('surrey', 1.0): 1, ('afterward', 1.0): 1, ('10.30', 1.0): 1, ('tempor', 1.0): 1, ('void', 1.0): 1, ('stem', 1.0): 1, ('sf', 1.0): 1, ('ykr', 1.0): 1, ('sparki', 1.0): 1, ('40mm', 1.0): 1, ('3.5', 1.0): 1, ('gr', 1.0): 1, ('rockfish', 1.0): 1, ('topwat', 1.0): 1, ('twitlong', 1.0): 1, ('me.so', 1.0): 1, ('jummah', 1.0): 3, ('durood', 1.0): 1, ('pak', 1.0): 1, ('cjradacomateada', 1.0): 2, ('supris', 1.0): 1, ('debut', 1.0): 1, ('shipper', 1.0): 1, ('asid', 1.0): 1, ('housem', 1.0): 1, ('737bigatingconcert', 1.0): 1, ('jedzjabłka', 1.0): 1, ('pijjabłka', 1.0): 1, ('polish', 1.0): 1, ('cider', 1.0): 1, ('mustread', 1.0): 1, ('cricket', 1.0): 1, ('5pm', 1.0): 1, ('queri', 1.0): 2, ('abbi', 1.0): 1, ('sumedh', 1.0): 1, ('sunnah', 1.0): 2, ('عن', 1.0): 2, ('quad', 1.0): 1, ('bike', 1.0): 1, ('carri', 1.0): 2, ('proprieti', 1.0): 1, ('chronic', 1.0): 1, ('superday', 1.0): 1, ('chocolatey', 1.0): 1, ('yasu', 1.0): 1, ('ooooh', 1.0): 1, ('hallo', 1.0): 2, ('dylan', 1.0): 2, ('laura', 1.0): 1, ('patric', 1.0): 2, ('keepin', 1.0): 1, ('mohr', 1.0): 1, ('guest', 1.0): 1, ("o'neal", 1.0): 1, ('tk', 1.0): 1, ('lua', 1.0): 1, ('stone', 1.0): 2, ('quicker', 1.0): 1, ('diet', 1.0): 1, ('sosweet', 1.0): 1, ('nominier', 1.0): 1, ('und', 1.0): 1, ('hardcor', 1.0): 1, ('😌', 1.0): 1, ('ff__special', 1.0): 1, ('acha', 1.0): 2, ('banda', 1.0): 1, ('✌', 1.0): 2, ('bhi', 1.0): 2, ('krta', 1.0): 1, ('beautifully-craft', 1.0): 1, ('mockingbird', 1.0): 1, ('diploma', 1.0): 1, ('blend', 1.0): 3, ('numbero', 1.0): 1, ('lolz', 1.0): 1, ('ambros', 1.0): 1, ('gwinett', 1.0): 1, ('bierc', 1.0): 1, ('ravag', 1.0): 1, ('illadvis', 1.0): 1, ('marriag', 1.0): 1, ('stare', 1.0): 1, ('cynic', 1.0): 2, ('yahuda', 1.0): 1, ('nosmet', 1.0): 1, ('poni', 1.0): 1, ('cuuut', 1.0): 1, ("f'ing", 1.0): 1, ('vacant', 1.0): 1, ('hauc', 1.0): 1, ('lovesss', 1.0): 1, ('hiss', 1.0): 1, ('overnight', 1.0): 1, ('cornish', 1.0): 1, ('all-clear', 1.0): 1, ('raincoat', 1.0): 1, ('measur', 1.0): 1, ('wealth', 1.0): 1, ('invest', 1.0): 2, ('garbi', 1.0): 1, ('wash', 1.0): 2, ('refuel', 1.0): 1, ('dunedin', 1.0): 1, ('kall', 1.0): 1, ('rakhi', 1.0): 1, ('12th', 1.0): 2, ('repres', 1.0): 3, ('slovenia', 1.0): 1, ('fridg', 1.0): 2, ('ludlow', 1.0): 1, ('28th', 1.0): 1, ('selway', 1.0): 1, ('submit', 1.0): 1, ('spanish', 1.0): 2, ('90210', 1.0): 1, ('oitnb', 1.0): 1, ('prepar', 1.0): 3, ('condit', 1.0): 1, ('msged', 1.0): 1, ('chiquito', 1.0): 1, ('ohaha', 1.0): 1, ('delhi', 1.0): 1, ('95', 1.0): 1, ('webtogsaward', 1.0): 1, ('grace', 1.0): 2, ('sheffield', 1.0): 1, ('tramlin', 1.0): 1, ('tl', 1.0): 2, ('hack', 1.0): 1, ('lad', 1.0): 1, ('beeepin', 1.0): 1, ('duper', 1.0): 1, ('handl', 1.0): 1, ('critiqu', 1.0): 1, ('contectu', 1.0): 1, ('ultor', 1.0): 2, ('mamaya', 1.0): 1, ('loiyal', 1.0): 1, ('para', 1.0): 1, ('truthfulwordsof', 1.0): 1, ('beanatividad', 1.0): 1, ('nknkkpagpapakumbaba', 1.0): 1, ('birthdaypres', 1.0): 1, ('compliment', 1.0): 1, ('swerv', 1.0): 1, ('goodtim', 1.0): 1, ('sinist', 1.0): 1, ('scare', 1.0): 1, ('tryna', 1.0): 1, ('anonym', 1.0): 1, ('dipsatch', 1.0): 1, ('aunt', 1.0): 1, ('dagga', 1.0): 1, ('burket', 1.0): 1, ('2am', 1.0): 1, ('twine', 1.0): 1, ("diane'", 1.0): 1, ('happybirthday', 1.0): 1, ('thanksss', 1.0): 1, ('randomli', 1.0): 1, ('buckinghampalac', 1.0): 1, ('chibi', 1.0): 1, ('maker', 1.0): 1, ('timog', 1.0): 1, ('18th', 1.0): 1, ('otw', 1.0): 1, ('kami', 1.0): 1, ('feelinggood', 1.0): 1, ('demand', 1.0): 2, ('naman', 1.0): 1, ('barkin', 1.0): 1, ('yeap', 1.0): 2, ('onkey', 1.0): 1, ('umma', 1.0): 1, ('pervert', 1.0): 1, ('onyu', 1.0): 1, ('appa', 1.0): 1, ('luci', 1.0): 1, ('horribl', 1.0): 1, ('quantum', 1.0): 1, ('greater', 1.0): 1, ('blockchain', 1.0): 1, ('nowplay', 1.0): 1, ('loftey', 1.0): 1, ('routt', 1.0): 1, ('assia', 1.0): 1, ('.\n.\n.', 1.0): 1, ('joint', 1.0): 1, ('futurereleas', 1.0): 1, ("look'", 1.0): 1, ('scari', 1.0): 1, ('murder', 1.0): 1, ('mysteri', 1.0): 1, ('comma', 1.0): 1, ("j'", 1.0): 1, ('hunni', 1.0): 2, ('diva', 1.0): 1, ('emili', 1.0): 3, ('nathan', 1.0): 1, ('medit', 1.0): 1, ('alumni', 1.0): 1, ('mba', 1.0): 1, ('foto', 1.0): 1, ('what-is-your-fashion', 1.0): 1, ('lorenangel', 1.0): 1, ('kw', 1.0): 2, ('tellanoldjokeday', 1.0): 1, ('reqd', 1.0): 1, ('specul', 1.0): 1, ('consist', 1.0): 4, ('tropic', 1.0): 1, ('startupph', 1.0): 1, ('zodiac', 1.0): 1, ('rapunzel', 1.0): 1, ('therver', 1.0): 1, ('85552', 1.0): 1, ('bestoftheday', 1.0): 1, ('oralsex', 1.0): 1, ('carli', 1.0): 1, ('happili', 1.0): 1, ('contract', 1.0): 1, ('matsu_bouzu', 1.0): 1, ('sonic', 1.0): 2, ('videogam', 1.0): 1, ('harana', 1.0): 1, ('belfast', 1.0): 1, ('danni', 1.0): 1, ('rare', 1.0): 1, ('sponsorship', 1.0): 1, ('aswel', 1.0): 1, ('gigi', 1.0): 1, ('nick', 1.0): 1, ('austin', 1.0): 1, ('youll', 1.0): 1, ('weak', 1.0): 4, ('10,000', 1.0): 1, ('bravo', 1.0): 1, ('iamamonst', 1.0): 1, ('rxthedailysurveyvot', 1.0): 1, ('broke', 1.0): 1, ('ass', 1.0): 1, ('roux', 1.0): 1, ('walkin', 1.0): 1, ('audienc', 1.0): 2, ('pfb', 1.0): 1, ('jute', 1.0): 1, ('walangmakakapigilsakin', 1.0): 1, ('lori', 1.0): 1, ('ehm', 1.0): 1, ('trick', 1.0): 1, ('baekhyun', 1.0): 1, ('eyesmil', 1.0): 1, ('borrow', 1.0): 1, ('knive', 1.0): 1, ('thek', 1.0): 1, ('eventu', 1.0): 1, ('reaapear', 1.0): 1, ('kno', 1.0): 1, ('whet', 1.0): 1, ('gratti', 1.0): 1, ('shorter', 1.0): 1, ('tweetin', 1.0): 1, ('inshallah', 1.0): 1, ('banana', 1.0): 1, ('raspberri', 1.0): 2, ('healthylifestyl', 1.0): 1, ('aint', 1.0): 2, ('skate', 1.0): 1, ('analyz', 1.0): 1, ('varieti', 1.0): 1, ('4:13', 1.0): 1, ('insomnia', 1.0): 1, ('medic', 1.0): 1, ('opposit', 1.0): 1, ('everlast', 1.0): 1, ('yoga', 1.0): 1, ('massag', 1.0): 2, ('osteopath', 1.0): 1, ('trainer', 1.0): 1, ('sharm', 1.0): 1, ('al_master_band', 1.0): 1, ('tbc', 1.0): 1, ('unives', 1.0): 1, ('architectur', 1.0): 1, ('random', 1.0): 1, ('isnt', 1.0): 1, ('typo', 1.0): 1, ('snark', 1.0): 1, ('lession', 1.0): 1, ('drunk', 1.0): 1, ('bruuh', 1.0): 1, ('2week', 1.0): 1, ('50europ', 1.0): 1, ('🇫', 1.0): 4, ('🇷', 1.0): 4, ('iov', 1.0): 1, ('accord', 1.0): 1, ('mne', 1.0): 1, ('pchelok', 1.0): 1, ('ja', 1.0): 1, ('=:', 1.0): 2, ('sweetest', 1.0): 1, ('comet', 1.0): 1, ('ahah', 1.0): 1, ('candi', 1.0): 2, ('axio', 1.0): 1, ('rabbit', 1.0): 2, ('nutshel', 1.0): 1, ('taken', 1.0): 1, ('letshavecocktailsafternuclai', 1.0): 1, ('malik', 1.0): 1, ('umair', 1.0): 1, ('canon', 1.0): 1, ('gang', 1.0): 1, ('grind', 1.0): 1, ('thoracicbridg', 1.0): 1, ('5minut', 1.0): 1, ('nonscript', 1.0): 1, ('password', 1.0): 1, ('shoshannavassil', 1.0): 1, ('addmeonsnapchat', 1.0): 1, ('dmme', 1.0): 1, ('mpoint', 1.0): 2, ('soph', 1.0): 1, ('anot', 1.0): 1, ('liao', 1.0): 2, ('ord', 1.0): 1, ('lor', 1.0): 1, ('sibei', 1.0): 1, ('xialan', 1.0): 1, ('thnx', 1.0): 1, ('malfunct', 1.0): 1, ('clown', 1.0): 1, ('joker', 1.0): 1, ('\U000fec00', 1.0): 1, ('nigth', 1.0): 1, ('estoy', 1.0): 1, ('escuchando', 1.0): 1, ('elsewher', 1.0): 1, ('bipolar', 1.0): 1, ('hahahahahahahahahahahahahaha', 1.0): 1, ('yoohoo', 1.0): 1, ('bajrangibhaijaanstorm', 1.0): 1, ('superhappi', 1.0): 1, ('doll', 1.0): 1, ('energi', 1.0): 1, ('f', 1.0): 3, ("m'dear", 1.0): 1, ('emma', 1.0): 2, ('alrd', 1.0): 1, ('dhan', 1.0): 2, ('satguru', 1.0): 1, ('tera', 1.0): 1, ('aasra', 1.0): 1, ('pita', 1.0): 1, ('keeo', 1.0): 1, ('darl', 1.0): 2, ('akarshan', 1.0): 1, ('sweetpea', 1.0): 1, ('gluten', 1.0): 1, ('pastri', 1.0): 2, ('highfiv', 1.0): 1, ('artsi', 1.0): 1, ('verbal', 1.0): 1, ('kaaa', 1.0): 1, ('oxford', 1.0): 2, ('wahoo', 1.0): 1, ('anchor', 1.0): 1, ('partnership', 1.0): 1, ('robbenisland', 1.0): 1, ('whale', 1.0): 1, ('aquat', 1.0): 1, ('safari', 1.0): 1, ('garru', 1.0): 1, ('liara', 1.0): 1, ('appoint', 1.0): 1, ('burnley', 1.0): 1, ('453', 1.0): 1, ('110', 1.0): 2, ('49', 1.0): 1, ('footbal', 1.0): 1, ('fm15', 1.0): 1, ('fmfamili', 1.0): 1, ('aamir', 1.0): 1, ('difficult', 1.0): 1, ('medium', 1.0): 1, ('nva', 1.0): 1, ('minuet', 1.0): 1, ('gamec', 1.0): 1, ('headrest', 1.0): 1, ('pit', 1.0): 1, ('spoken', 1.0): 1, ('advis', 1.0): 1, ('paypoint', 1.0): 1, ('deepthroat', 1.0): 1, ('truli', 1.0): 3, ('bee', 1.0): 2, ('upward', 1.0): 1, ('bound', 1.0): 1, ('movingonup', 1.0): 1, ('aitor', 1.0): 1, ('sn', 1.0): 1, ('ps4', 1.0): 2, ('jawad', 1.0): 1, ('presal', 1.0): 1, ('betcha', 1.0): 1, ('dumb', 1.0): 2, ('butt', 1.0): 1, ('qualki', 1.0): 1, ('808', 1.0): 1, ('milf', 1.0): 1, ('4like', 1.0): 1, ('sexysaturday', 1.0): 1, ('vw', 1.0): 1, ('umpfff', 1.0): 1, ('ca', 1.0): 1, ('domg', 1.0): 1, ('nanti', 1.0): 1, ('difollow', 1.0): 1, ('stubborn', 1.0): 1, ('nothavingit', 1.0): 1, ('klee', 1.0): 1, ('hem', 1.0): 1, ('congrad', 1.0): 1, ('accomplish', 1.0): 1, ('kfcroleplay', 1.0): 3, ('tregaron', 1.0): 1, ('boar', 1.0): 1, ('sweati', 1.0): 1, ('glyon', 1.0): 1, ('🚮', 1.0): 1, ("tee'", 1.0): 1, ('johnni', 1.0): 1, ('utub', 1.0): 1, ("video'", 1.0): 1, ('loss', 1.0): 1, ('combin', 1.0): 2, ('pigeon', 1.0): 1, ('fingerscross', 1.0): 1, ('photobomb', 1.0): 1, ('90', 1.0): 1, ('23', 1.0): 1, ('gimm', 1.0): 1, ('definetli', 1.0): 1, ('exit', 1.0): 1, ('bom-dia', 1.0): 1, ('apod', 1.0): 1, ('ultraviolet', 1.0): 1, ('m31', 1.0): 1, ('jul', 1.0): 1, ('oooh', 1.0): 1, ('yawn', 1.0): 1, ('ftw', 1.0): 1, ('maman', 1.0): 1, ('afterznoon', 1.0): 1, ('tweeep', 1.0): 1, ('abp', 1.0): 2, ('kiya', 1.0): 1, ('van', 1.0): 1, ('olymp', 1.0): 1, ('😷', 1.0): 1, ('classi', 1.0): 1, ('attach', 1.0): 1, ('equip', 1.0): 1, ('bobbl', 1.0): 1, ('anu', 1.0): 1, ('mh3', 1.0): 1, ('patch', 1.0): 1, ('psp', 1.0): 1, ('huffpost', 1.0): 1, ('tribut', 1.0): 1, ('h_eartshapedbox', 1.0): 1, ('magictrikband', 1.0): 1, ('magictrik', 1.0): 2, ('roommat', 1.0): 1, ('tami', 1.0): 1, ('b3dk', 1.0): 1, ('7an', 1.0): 1, ('ank', 1.0): 1, ('purpos', 1.0): 1, ('struggl', 1.0): 1, ('eagl', 1.0): 1, ('oceana', 1.0): 1, ('idk', 1.0): 3, ('med', 1.0): 1, ('fridayfauxpa', 1.0): 1, ('subtl', 1.0): 1, ('hint', 1.0): 1, ('prim', 1.0): 1, ('algorithm', 1.0): 1, ('iii', 1.0): 1, ('rosa', 1.0): 1, ('yvw', 1.0): 1, ('here', 1.0): 1, ('boost', 1.0): 1, ('unforgett', 1.0): 1, ('humor', 1.0): 1, ("mum'", 1.0): 1, ('hahahhaah', 1.0): 1, ('sombrero', 1.0): 1, ('lost', 1.0): 2, ('spammer', 1.0): 1, ('proceed', 1.0): 1, ('entertain', 1.0): 1, ('100k', 1.0): 1, ('mileston', 1.0): 1, ('judith', 1.0): 1, ('district', 1.0): 1, ('council', 1.0): 1, ('midar', 1.0): 1, ('gender', 1.0): 1, ('ilysm', 1.0): 1, ('zen', 1.0): 1, ('neat', 1.0): 1, ('rider', 1.0): 1, ('fyi', 1.0): 1, ('dig', 1.0): 2, ('👱', 1.0): 1, ('👽', 1.0): 1, ('🌳', 1.0): 1, ('suspici', 1.0): 1, ('calori', 1.0): 1, ('harder', 1.0): 1, ('jessica', 1.0): 1, ('carina', 1.0): 1, ('francisco', 1.0): 1, ('teret', 1.0): 1, ('potassium', 1.0): 1, ('rehydr', 1.0): 1, ('drinkitallup', 1.0): 1, ('thirstquench', 1.0): 1, ('tapir', 1.0): 1, ('calf', 1.0): 1, ('mealtim', 1.0): 1, ('uhc', 1.0): 1, ('scale', 1.0): 1, ('network', 1.0): 1, ('areal', 1.0): 1, ('extremesport', 1.0): 1, ('quadbik', 1.0): 1, ('bloggersrequir', 1.0): 1, ('bloggersw', 1.0): 1, ('brainer', 1.0): 1, ('mse', 1.0): 1, ('fund', 1.0): 1, ('nooowww', 1.0): 1, ('lile', 1.0): 1, ('tid', 1.0): 1, ('tmi', 1.0): 1, ('deploy', 1.0): 1, ('jule', 1.0): 1, ('betti', 1.0): 1, ('hddc', 1.0): 1, ('salman', 1.0): 1, ('pthht', 1.0): 1, ('lfc', 1.0): 3, ('tope', 1.0): 1, ('xxoo', 1.0): 2, ('russia', 1.0): 2, ('silver-wash', 1.0): 1, ('fritillari', 1.0): 1, ('moon', 1.0): 1, ('ap', 1.0): 2, ('trash', 1.0): 2, ('clever', 1.0): 1, ("thank'", 1.0): 1, ('keven', 1.0): 1, ('pastim', 1.0): 1, ('ashramcal', 1.0): 1, ('ontrack', 1.0): 1, ('german', 1.0): 1, ('subtitl', 1.0): 1, ('pinter', 1.0): 1, ('morninggg', 1.0): 1, ('🐶', 1.0): 1, ('pete', 1.0): 1, ('awesome-o', 1.0): 1, ('multipl', 1.0): 1, ('cya', 1.0): 1, ('harrog', 1.0): 1, ('jet', 1.0): 1, ('supplier', 1.0): 1, ('req', 1.0): 1, ('fridayloug', 1.0): 1, ('4thstreetmus', 1.0): 1, ('hawaii', 1.0): 1, ('kick', 1.0): 1, ('deepli', 1.0): 1, ('[email protected]', 1.0): 1, ('thousand', 1.0): 2, ('newspap', 1.0): 1, ('lew', 1.0): 1, ('nah', 1.0): 1, ('fallout', 1.0): 2, ('technic', 1.0): 1, ('gunderson', 1.0): 1, ('europa', 1.0): 1, ('thoroughli', 1.0): 1, ('script', 1.0): 1, ('overtak', 1.0): 1, ('motorway', 1.0): 1, ('thu', 1.0): 1, ('niteflirt', 1.0): 1, ('hbu', 1.0): 2, ('bowl', 1.0): 1, ('chri', 1.0): 2, ('niall', 1.0): 2, ('94', 1.0): 1, ('ik', 1.0): 1, ('stydia', 1.0): 1, ('nawazuddin', 1.0): 1, ('siddiqu', 1.0): 1, ('nomnomnom', 1.0): 1, ('dukefreebiefriday', 1.0): 1, ('z', 1.0): 1, ('insyaallah', 1.0): 1, ('ham', 1.0): 1, ('villa', 1.0): 1, ('brum', 1.0): 1, ('deni', 1.0): 1, ('vagina', 1.0): 1, ('rli', 1.0): 1, ('izzi', 1.0): 1, ('mitch', 1.0): 1, ('minn', 1.0): 1, ('recently.websit', 1.0): 1, ('coolingtow', 1.0): 1, ('soon.thank', 1.0): 1, ('showinginterest', 1.0): 1, ('multicolor', 1.0): 1, ('wid', 1.0): 1, ('wedg', 1.0): 1, ('motiv', 1.0): 1, ('nnnnot', 1.0): 1, ("gf'", 1.0): 1, ('bluesidemenxix', 1.0): 1, ('ardent', 1.0): 1, ('mooorn', 1.0): 1, ('wuppert', 1.0): 1, ('fridayfunday', 1.0): 1, ('re-sign', 1.0): 1, ('chalkhil', 1.0): 1, ('midday', 1.0): 1, ('carter', 1.0): 1, ('remedi', 1.0): 1, ('atrack', 1.0): 1, ('christ', 1.0): 1, ('badminton', 1.0): 1, ("littl'un", 1.0): 1, ('ikprideofpak', 1.0): 1, ('janjua', 1.0): 1, ('pimpl', 1.0): 1, ('forehead', 1.0): 1, ('volcano', 1.0): 1, ('mag', 1.0): 1, ('miryenda', 1.0): 1, ("technology'", 1.0): 1, ('touchétoday', 1.0): 1, ('idownload', 1.0): 1, ('25ish', 1.0): 1, ('snowbal', 1.0): 1, ('nd', 1.0): 1, ('expir', 1.0): 1, ('6gb', 1.0): 1, ('loveu', 1.0): 1, ('morefuninthephilippin', 1.0): 1, ('laho', 1.0): 1, ('caramoan', 1.0): 1, ('kareem', 1.0): 1, ('surah', 1.0): 1, ('kahaf', 1.0): 1, ('melani', 1.0): 1, ('bosch', 1.0): 1, ('machin', 1.0): 1, ("week'", 1.0): 1, ('refollow', 1.0): 1, ('😎', 1.0): 1, ('💁', 1.0): 1, ('relaps', 1.0): 1, ('prada', 1.0): 2, ('punjabiswillgetit', 1.0): 1, ('hitter', 1.0): 1, ('mass', 1.0): 2, ('shoud', 1.0): 1, ('1:12', 1.0): 1, ('ughtm', 1.0): 1, ('545', 1.0): 1, ('kissm', 1.0): 1, ('likeforfollow', 1.0): 1, ('overwhelm', 1.0): 1, ('groupmat', 1.0): 1, ('75', 1.0): 2, ('kyunk', 1.0): 1, ('aitchison', 1.0): 1, ('curvi', 1.0): 1, ('mont', 1.0): 1, ('doa', 1.0): 1, ('header', 1.0): 1, ('speaker', 1.0): 3, ('avoid', 1.0): 1, ('laboratori', 1.0): 1, ('idc', 1.0): 1, ('fuckin', 1.0): 2, ('wooo', 1.0): 2, ('neobyt', 1.0): 1, ('pirat', 1.0): 1, ('takedown', 1.0): 1, ('indirag', 1.0): 1, ('judiciari', 1.0): 1, ('commit', 1.0): 4, ('govt', 1.0): 1, ('polici', 1.0): 1, ('rbi', 1.0): 1, ('similar', 1.0): 1, ("thought'", 1.0): 1, ('progress', 1.0): 1, ('transfer', 1.0): 1, ('gg', 1.0): 1, ('defenit', 1.0): 1, ('nofx', 1.0): 1, ('friskyfiday', 1.0): 1, ('yipee', 1.0): 1, ('shed', 1.0): 1, ('incent', 1.0): 1, ('vege', 1.0): 1, ('marin', 1.0): 1, ('gz', 1.0): 1, ('rajeev', 1.0): 1, ('hvng', 1.0): 1, ('funfil', 1.0): 1, ('friday.it', 1.0): 1, ('ws', 1.0): 1, ('reali', 1.0): 1, ('diff', 1.0): 1, ('kabir.fel', 1.0): 1, ('dresden', 1.0): 1, ('germani', 1.0): 1, ('plot', 1.0): 1, ('tdf', 1.0): 1, ('🍷', 1.0): 2, ('☀', 1.0): 2, ('🚲', 1.0): 2, ('minion', 1.0): 2, ('slot', 1.0): 1, ("b'day", 1.0): 1, ('isabella', 1.0): 1, ('okeyyy', 1.0): 1, ('vddd', 1.0): 1, (');', 1.0): 1, ('selfee', 1.0): 1, ('insta', 1.0): 1, ('🙆', 1.0): 1, ('🙌', 1.0): 1, ('😛', 1.0): 1, ('🐒', 1.0): 1, ('😝', 1.0): 1, ('hhahhaaa', 1.0): 1, ('jeez', 1.0): 1, ('teamcannib', 1.0): 1, ('teamspacewhalingisthebest', 1.0): 1, ('fitfa', 1.0): 1, ('identifi', 1.0): 1, ('pharmaci', 1.0): 1, ('verylaterealis', 1.0): 1, ('iwishiknewbett', 1.0): 1, ('satisfi', 1.0): 1, ('ess-aych-eye-te', 1.0): 1, ('supposedli', 1.0): 1, ('👍', 1.0): 1, ('immedi', 1.0): 1, ("foxy'", 1.0): 1, ('instrument', 1.0): 1, ('alon', 1.0): 2, ('goldcoast', 1.0): 1, ('lelomustfal', 1.0): 1, ('meal', 1.0): 1, ('5g', 1.0): 1, ('liker', 1.0): 1, ('newdress', 1.0): 1, ('resist', 1.0): 1, ('fot', 1.0): 1, ('troy', 1.0): 1, ('twitterfollowerswhatsup', 1.0): 1, ('happyfriedday', 1.0): 1, ('keepsafealway', 1.0): 1, ('loveyeah', 1.0): 1, ('emojasp_her', 1.0): 1, ('vanilla', 1.0): 1, ('sidemen', 1.0): 1, ('yaaayyy', 1.0): 1, ('friendaaa', 1.0): 1, ('bulb', 1.0): 5, ('corn', 1.0): 6, ('1tbps4', 1.0): 1, ('divin', 1.0): 1, ('wheeli', 1.0): 1, ('bin', 1.0): 1, ('ubericecream', 1.0): 1, ('messengerforaday', 1.0): 1, ('kyli', 1.0): 1, ('toilet', 1.0): 1, ('ikaw', 1.0): 1, ('musta', 1.0): 1, ('cheatmat', 1.0): 1, ('kyuhyun', 1.0): 1, ('ghanton', 1.0): 1, ('easy.get', 1.0): 1, ('5:30', 1.0): 1, ('therein', 1.0): 1, ('majalah', 1.0): 1, ('dominiqu', 1.0): 1, ('lamp', 1.0): 1, ('a-foot', 1.0): 1, ('revamp', 1.0): 1, ('brainchild', 1.0): 1, ('confid', 1.0): 1, ('confin', 1.0): 1, ('colorado', 1.0): 1, ('goodyear', 1.0): 1, ('upto', 1.0): 1, ('cashback', 1.0): 1, ('yourewelcom', 1.0): 1, ('nightli', 1.0): 1, ('simpin', 1.0): 1, ('sketchbook', 1.0): 1, ('4wild', 1.0): 1, ('colorpencil', 1.0): 1, ('cray', 1.0): 1, ('6:30', 1.0): 1, ('imma', 1.0): 3, ('ob', 1.0): 1, ('11h', 1.0): 1, ('kino', 1.0): 1, ('adult', 1.0): 1, ('kardamena', 1.0): 1, ('samo', 1.0): 1, ('greec', 1.0): 1, ('caesar', 1.0): 1, ('salad', 1.0): 1, ('tad', 1.0): 1, ('bland', 1.0): 1, ('respond', 1.0): 1, ('okk', 1.0): 1, ('den', 1.0): 1, ('allov', 1.0): 1, ('hangout', 1.0): 1, ('whoever', 1.0): 1, ('tourist', 1.0): 1, ('♌', 1.0): 1, ('kutiyapanti', 1.0): 1, ('profession', 1.0): 1, ('boomshot', 1.0): 1, ('fuh', 1.0): 1, ('yeeey', 1.0): 1, ('donot', 1.0): 1, ('expos', 1.0): 1, ('lipstick', 1.0): 1, ('cran', 1.0): 1, ('prayr', 1.0): 1, ('හෙල', 1.0): 1, ('හවුල', 1.0): 1, ('onemochaonelov', 1.0): 1, ('southpaw', 1.0): 1, ('geniu', 1.0): 1, ('stroma', 1.0): 1, ('🔴', 1.0): 1, ('younow', 1.0): 1, ('jonah', 1.0): 1, ('jareddd', 1.0): 1, ('postcod', 1.0): 1, ('talkmobil', 1.0): 1, ('huha', 1.0): 1, ('transform', 1.0): 1, ('sword', 1.0): 3, ('misread', 1.0): 1, ('richard', 1.0): 1, ('ibiza', 1.0): 1, ('birthdaymoneyforjesusjuic', 1.0): 1, ('ytb', 1.0): 1, ('tutori', 1.0): 1, ('construct', 1.0): 2, ('critic', 1.0): 1, ('ganesha', 1.0): 1, ('textur', 1.0): 1, ('photographi', 1.0): 1, ('hinduism', 1.0): 1, ('hindugod', 1.0): 1, ('elephantgod', 1.0): 1, ('selfish', 1.0): 1, ('bboy', 1.0): 1, ('cardgam', 1.0): 1, ('pixelart', 1.0): 1, ('gamedesign', 1.0): 1, ('indiedev', 1.0): 1, ('pixel_daili', 1.0): 1, ('plateau', 1.0): 1, ('laguna', 1.0): 1, ('tha', 1.0): 4, ('bahot', 1.0): 1, ('baje', 1.0): 1, ('raat', 1.0): 1, ('liya', 1.0): 1, ('hath', 1.0): 1, ('ghant', 1.0): 1, ('itna', 1.0): 2, ('bana', 1.0): 1, ('paya', 1.0): 1, ('uta', 1.0): 1, ('manga', 1.0): 1, ('jamuna', 1.0): 1, ('\\:', 1.0): 1, ('swiftma', 1.0): 1, ('trion', 1.0): 1, ('forum', 1.0): 1, ('b-day', 1.0): 1, ('disgust', 1.0): 1, ('commodor', 1.0): 1, ('annabel', 1.0): 1, ('bridg', 1.0): 1, ('quest', 1.0): 1, ('borderland', 1.0): 1, ('wanderrook', 1.0): 1, ('gm', 1.0): 1, ('preciou', 1.0): 2, ('mizz', 1.0): 1, ('bleedgreen', 1.0): 1, ('sophia', 1.0): 1, ('chicago', 1.0): 1, ('honeymoon', 1.0): 1, ("da'esh", 1.0): 1, ('co-ord', 1.0): 1, ('fsa', 1.0): 1, ('estat', 1.0): 1, ("when'", 1.0): 1, ('dusti', 1.0): 1, ('tunisia', 1.0): 2, ("class'", 1.0): 1, ('irrit', 1.0): 1, ('fiverr', 1.0): 1, ('gina', 1.0): 1, ('soproud', 1.0): 1, ('enought', 1.0): 1, ('hole', 1.0): 1, ('melbourneburg', 1.0): 1, ('arianna', 1.0): 1, ('esai', 1.0): 1, ('rotterdam', 1.0): 1, ('jordi', 1.0): 1, ('clasi', 1.0): 1, ('horni', 1.0): 1, ('salon', 1.0): 1, ('bleach', 1.0): 1, ('olaplex', 1.0): 1, ('damag', 1.0): 1, ('teamwork', 1.0): 1, ('zitecofficestori', 1.0): 1, ('다쇼', 1.0): 1, ('colleagu', 1.0): 1, ('eb', 1.0): 1, ("t'would", 1.0): 1, ('tweetup', 1.0): 1, ('detect', 1.0): 1, ('jonathancreek', 1.0): 1, ('dvr', 1.0): 1, ('kat', 1.0): 1, ('rarer', 1.0): 1, ('okkk', 1.0): 1, ('frend', 1.0): 1, ('milt', 1.0): 1, ('mario', 1.0): 1, ('rewatch', 1.0): 1, ('1600', 1.0): 1, ('sige', 1.0): 1, ('punta', 1.0): 1, ('kayo', 1.0): 1, ('nooo', 1.0): 1, ('prompt', 1.0): 1, ('t-mobil', 1.0): 1, ('orang', 1.0): 1, ('ee', 1.0): 1, ('teapot', 1.0): 1, ('hotter', 1.0): 1, ('»', 1.0): 1, ('londoutrad', 1.0): 1, ('kal', 1.0): 1, ('wayward', 1.0): 1, ('pine', 1.0): 1, ('muscl', 1.0): 1, ('ilikeit', 1.0): 1, ('belong', 1.0): 1, ('watford', 1.0): 1, ('enterpris', 1.0): 1, ('cube', 1.0): 1, ('particp', 1.0): 1, ('saudi', 1.0): 1, ('arabia', 1.0): 1, ('recogn', 1.0): 1, ('fanbas', 1.0): 3, ('bailona', 1.0): 3, ('responsibilti', 1.0): 1, ('sunlight', 1.0): 1, ('tiger', 1.0): 1, ('elev', 1.0): 1, ('horror', 1.0): 1, ('bitchesss', 1.0): 1, ('shitti', 1.0): 1, ('squash', 1.0): 1, ('becca', 1.0): 1, ('delta', 1.0): 1, ('nut', 1.0): 1, ('yun', 1.0): 1, ('joe', 1.0): 1, ('dirt', 1.0): 1, ('sharon', 1.0): 1, ('medicin', 1.0): 1, ('ttyl', 1.0): 1, ('gav', 1.0): 1, ('linda', 1.0): 1, ('3hr', 1.0): 1, ('tym', 1.0): 2, ('dieback', 1.0): 1, ('endit', 1.0): 1, ('minecon', 1.0): 1, ('sere', 1.0): 1, ('joerin', 1.0): 1, ('joshan', 1.0): 1, ('tandem', 1.0): 1, ('ligao', 1.0): 1, ('albay', 1.0): 1, ('bcyc', 1.0): 1, ('lnh', 1.0): 1, ('sat', 1.0): 1, ('honorari', 1.0): 1, ('alac', 1.0): 1, ('skelo_ghost', 1.0): 1, ('madadagdagan', 1.0): 1, ('bmc', 1.0): 1, ('11:11', 1.0): 2, ('embarrass', 1.0): 1, ('entropi', 1.0): 1, ('evolut', 1.0): 2, ('loop', 1.0): 1, ('eva', 1.0): 1, ('camden', 1.0): 1, ('uhh', 1.0): 1, ('scoup', 1.0): 1, ('jren', 1.0): 1, ('nuest', 1.0): 1, ('lovelayyy', 1.0): 1, ('kidney', 1.0): 1, ('neuer', 1.0): 1, ('spray', 1.0): 1, ('[email protected]', 1.0): 1, ('uni', 1.0): 1, ('uff', 1.0): 1, ('karhi', 1.0): 1, ('thi', 1.0): 1, ('juaquin', 1.0): 1, ('v3nzor99', 1.0): 1, ('shell', 1.0): 1, ('heyi', 1.0): 1, ('flavor', 1.0): 1, ('thakyou', 1.0): 1, ('beatriz', 1.0): 1, ('cancel', 1.0): 1, ('puff', 1.0): 1, ('egg', 1.0): 2, ('tart', 1.0): 1, ('chai', 1.0): 1, ('mtr', 1.0): 1, ('alyssa', 1.0): 1, ('rub', 1.0): 1, ('tummi', 1.0): 1, ('zelda', 1.0): 1, ('ive', 1.0): 1, ('🎂', 1.0): 1, ('jiva', 1.0): 1, ('🍹', 1.0): 1, ('🍻', 1.0): 1, ('mubbarak', 1.0): 1, ('deborah', 1.0): 1, ('coupon', 1.0): 1, ('colourdeb', 1.0): 1, ('purpl', 1.0): 1, ("chippy'", 1.0): 1, ('vessel', 1.0): 1, ('ps', 1.0): 2, ('vintag', 1.0): 1, ('✫', 1.0): 4, ('˚', 1.0): 4, ('·', 1.0): 4, ('✵', 1.0): 4, ('⊹', 1.0): 4, ('1710', 1.0): 1, ('gooffeanotter', 1.0): 1, ('kiksex', 1.0): 1, ('mugshot', 1.0): 1, ('token', 1.0): 1, ('maritimen', 1.0): 1, ('rh', 1.0): 1, ('tatton', 1.0): 1, ('jump_julia', 1.0): 1, ('malema', 1.0): 1, ('fren', 1.0): 1, ('nuf', 1.0): 1, ('teas', 1.0): 1, ('alien', 1.0): 2, ('closer', 1.0): 1, ('monitor', 1.0): 1, ('kimmi', 1.0): 1, ("channel'", 1.0): 1, ('planetbollywoodnew', 1.0): 1, ('epi', 1.0): 1, ('tricki', 1.0): 1, ('be-shak', 1.0): 1, ('chenoweth', 1.0): 1, ('oodl', 1.0): 1, ('hailey', 1.0): 1, ('craźi', 1.0): 1, ('sęxxxÿ', 1.0): 1, ('cøôl', 1.0): 1, ('runway', 1.0): 1, ('gooodnight', 1.0): 1, ('iv', 1.0): 1, ('ri', 1.0): 1, ('jayci', 1.0): 1, ('karaok', 1.0): 1, ('ltsw', 1.0): 1, ('giant', 1.0): 1, ('1709', 1.0): 1, ('refus', 1.0): 1, ('collagen', 1.0): 1, ('2win', 1.0): 1, ('hopetowin', 1.0): 1, ('inventori', 1.0): 1, ('loveforfood', 1.0): 1, ('foodforthought', 1.0): 1, ('thoughtfortheday', 1.0): 1, ('carp', 1.0): 1, ('diem', 1.0): 1, ('nath', 1.0): 1, ('ning', 1.0): 1, ('although', 1.0): 1, ('harm', 1.0): 1, ('stormi', 1.0): 1, ('sync', 1.0): 1, ('devic', 1.0): 1, ('mess', 1.0): 1, ('nylon', 1.0): 1, ('gvb', 1.0): 1, ('cd', 1.0): 1, ('mountain.titl', 1.0): 1, ('unto', 1.0): 1, ('theworldwouldchang', 1.0): 1, ('categori', 1.0): 1, ('mah', 1.0): 1, ('panel', 1.0): 1, ("i'am", 1.0): 1, ('80-1', 1.0): 1, ('1708', 1.0): 1, ('neenkin', 1.0): 1, ('masterpiec', 1.0): 1, ('debit', 1.0): 1, ('beagl', 1.0): 1, ('♫', 1.0): 1, ('feat', 1.0): 1, ('charli', 1.0): 1, ('puth', 1.0): 1, ('wiz', 1.0): 1, ('khalifa', 1.0): 1, ('svu', 1.0): 1, ('darker', 1.0): 1, ('berni', 1.0): 1, ('henri', 1.0): 1, ('trap', 1.0): 1, ('tommi', 1.0): 1, ("vivian'", 1.0): 1, ('transpar', 1.0): 1, ('bitcoin', 1.0): 1, ('insight', 1.0): 1, ('ping', 1.0): 1, ('masquerad', 1.0): 1, ('zorroreturm', 1.0): 1, ('1707', 1.0): 1, ('pk', 1.0): 1, ('hay', 1.0): 1, ('jacquelin', 1.0): 1, ('passion', 1.0): 1, ('full-fledg', 1.0): 1, ('workplac', 1.0): 1, ('venu', 1.0): 1, ('lago', 1.0): 1, ('luxord', 1.0): 1, ('potato', 1.0): 1, ('hundr', 1.0): 1, ('cite', 1.0): 1, ('academ', 1.0): 1, ('pokiri', 1.0): 1, ('1nenokkadin', 1.0): 1, ('heritag', 1.0): 1, ('wood', 1.0): 1, ('beleaf', 1.0): 1, ('spnfamili', 1.0): 1, ('spn', 1.0): 1, ('alwayskeepfight', 1.0): 1, ('jaredpadalecki', 1.0): 1, ('jensenackl', 1.0): 1, ('peasant', 1.0): 2, ('ahahha', 1.0): 1, ('distant', 1.0): 1, ('shout-out', 1.0): 1, ('adulthood', 1.0): 1, ('hopeless', 0.0): 2, ('tmr', 0.0): 3, (':(', 0.0): 4571, ('everyth', 0.0): 17, ('kid', 0.0): 20, ('section', 0.0): 3, ('ikea', 0.0): 1, ('cute', 0.0): 43, ('shame', 0.0): 19, ("i'm", 0.0): 343, ('nearli', 0.0): 3, ('19', 0.0): 8, ('2', 0.0): 41, ('month', 0.0): 23, ('heart', 0.0): 27, ('slide', 0.0): 1, ('wast', 0.0): 5, ('basket', 0.0): 1, ('“', 0.0): 15, ('hate', 0.0): 57, ('japanes', 0.0): 4, ('call', 0.0): 29, ('bani', 0.0): 2, ('”', 0.0): 11, ('dang', 0.0): 2, ('start', 0.0): 44, ('next', 0.0): 40, ('week', 0.0): 56, ('work', 0.0): 133, ('oh', 0.0): 92, ('god', 0.0): 15, ('babi', 0.0): 47, ('face', 0.0): 20, ('make', 0.0): 102, ('smile', 0.0): 10, ('neighbour', 0.0): 1, ('motor', 0.0): 1, ('ask', 0.0): 29, ('said', 0.0): 33, ('updat', 0.0): 11, ('search', 0.0): 3, ('sialan', 0.0): 1, ('athabasca', 0.0): 2, ('glacier', 0.0): 2, ('1948', 0.0): 1, (':-(', 0.0): 493, ('jasper', 0.0): 1, ('jaspernationalpark', 0.0): 1, ('alberta', 0.0): 1, ('explorealberta', 0.0): 1, ('…', 0.0): 16, ('realli', 0.0): 131, ('good', 0.0): 101, ('g', 0.0): 8, ('idea', 0.0): 10, ('never', 0.0): 57, ('go', 0.0): 224, ('meet', 0.0): 31, ('mare', 0.0): 1, ('ivan', 0.0): 1, ('happi', 0.0): 25, ('trip', 0.0): 11, ('keep', 0.0): 34, ('safe', 0.0): 5, ('see', 0.0): 124, ('soon', 0.0): 45, ('tire', 0.0): 50, ('hahahah', 0.0): 3, ('knee', 0.0): 2, ('replac', 0.0): 4, ('get', 0.0): 232, ('day', 0.0): 149, ('ouch', 0.0): 3, ('relat', 0.0): 2, ('sweet', 0.0): 7, ('n', 0.0): 21, ('sour', 0.0): 2, ('kind', 0.0): 11, ('bi-polar', 0.0): 1, ('peopl', 0.0): 75, ('life', 0.0): 33, ('...', 0.0): 331, ('cuz', 0.0): 4, ('full', 0.0): 16, ('pleass', 0.0): 2, ('im', 0.0): 129, ('sure', 0.0): 31, ('tho', 0.0): 28, ('feel', 0.0): 158, ('stupid', 0.0): 8, ("can't", 0.0): 180, ('seem', 0.0): 15, ('grasp', 0.0): 1, ('basic', 0.0): 2, ('digit', 0.0): 8, ('paint', 0.0): 3, ('noth', 0.0): 26, ("i'v", 0.0): 77, ('research', 0.0): 1, ('help', 0.0): 54, ('lord', 0.0): 2, ('lone', 0.0): 9, ('someon', 0.0): 57, ('talk', 0.0): 45, ('guy', 0.0): 62, ('girl', 0.0): 28, ('assign', 0.0): 5, ('project', 0.0): 3, ('😩', 0.0): 14, ('want', 0.0): 246, ('play', 0.0): 48, ('video', 0.0): 23, ('game', 0.0): 28, ('watch', 0.0): 77, ('movi', 0.0): 24, ('choreograph', 0.0): 1, ('hard', 0.0): 35, ('email', 0.0): 10, ('link', 0.0): 12, ('still', 0.0): 124, ('say', 0.0): 63, ('longer', 0.0): 12, ('avail', 0.0): 13, ('cri', 0.0): 46, ('bc', 0.0): 50, ('miss', 0.0): 301, ('mingm', 0.0): 1, ('much', 0.0): 139, ('sorri', 0.0): 148, ('mom', 0.0): 13, ('far', 0.0): 18, ('away', 0.0): 28, ("we'r", 0.0): 30, ('truli', 0.0): 5, ('flight', 0.0): 6, ('friend', 0.0): 39, ('happen', 0.0): 51, ('sad', 0.0): 123, ('dog', 0.0): 17, ('pee', 0.0): 2, ('’', 0.0): 27, ('bag', 0.0): 8, ('take', 0.0): 49, ('newwin', 0.0): 1, ('15', 0.0): 10, ('doushit', 0.0): 1, ('late', 0.0): 27, ('suck', 0.0): 23, ('sick', 0.0): 43, ('plan', 0.0): 17, ('first', 0.0): 27, ('gundam', 0.0): 1, ('night', 0.0): 46, ('nope', 0.0): 6, ('dollar', 0.0): 1, ('😭', 0.0): 29, ('listen', 0.0): 18, ('back', 0.0): 122, ('old', 0.0): 16, ('show', 0.0): 26, ('know', 0.0): 131, ('weird', 0.0): 10, ('got', 0.0): 104, ('u', 0.0): 193, ('leav', 0.0): 42, ('might', 0.0): 11, ('give', 0.0): 36, ('pale', 0.0): 2, ('imit', 0.0): 1, ('went', 0.0): 32, ('sea', 0.0): 1, ('massiv', 0.0): 4, ('fuck', 0.0): 58, ('rash', 0.0): 1, ('bodi', 0.0): 12, ('pain', 0.0): 21, ('thing', 0.0): 52, ('ever', 0.0): 30, ('home', 0.0): 63, ('hi', 0.0): 34, ('absent', 0.0): 1, ('gran', 0.0): 2, ('knew', 0.0): 6, ('care', 0.0): 20, ('tell', 0.0): 26, ('love', 0.0): 152, ('wish', 0.0): 91, ('would', 0.0): 70, ('sequel', 0.0): 1, ('busi', 0.0): 28, ('sa', 0.0): 15, ('school', 0.0): 32, ('time', 0.0): 166, ('yah', 0.0): 3, ('xx', 0.0): 18, ('ouucchhh', 0.0): 1, ('one', 0.0): 148, ('wisdom', 0.0): 2, ('teeth', 0.0): 6, ('come', 0.0): 91, ('frighten', 0.0): 1, ('case', 0.0): 6, ('pret', 0.0): 1, ('wkwkw', 0.0): 1, ('verfi', 0.0): 1, ('activ', 0.0): 6, ('forget', 0.0): 8, ('follow', 0.0): 262, ('member', 0.0): 6, ('thank', 0.0): 107, ('join', 0.0): 8, ('goodby', 0.0): 14, ('´', 0.0): 4, ('chain', 0.0): 1, ('—', 0.0): 26, ('sentir-s', 0.0): 1, ('incompleta', 0.0): 1, ('okay', 0.0): 38, ('..', 0.0): 108, ('wednesday', 0.0): 5, ('marvel', 0.0): 1, ('thwart', 0.0): 1, ('awh', 0.0): 3, ("what'", 0.0): 15, ('chanc', 0.0): 16, ('zant', 0.0): 1, ('need', 0.0): 106, ('someth', 0.0): 28, ('x', 0.0): 39, ("when'", 0.0): 1, ('birthday', 0.0): 23, ('worst', 0.0): 14, ('part', 0.0): 11, ('bad', 0.0): 73, ('audraesar', 0.0): 1, ('sushi', 0.0): 3, ('pic', 0.0): 15, ('tl', 0.0): 8, ('drive', 0.0): 16, ('craaazzyy', 0.0): 2, ('pop', 0.0): 3, ('like', 0.0): 228, ('helium', 0.0): 1, ('balloon', 0.0): 1, ('climatechang', 0.0): 5, ('cc', 0.0): 6, ("california'", 0.0): 1, ('power', 0.0): 6, ('influenti', 0.0): 1, ('air', 0.0): 3, ('pollut', 0.0): 1, ('watchdog', 0.0): 1, ('califor', 0.0): 1, ('elhaida', 0.0): 1, ('rob', 0.0): 2, ('juri', 0.0): 1, ('came', 0.0): 16, ('10th', 0.0): 1, ('televot', 0.0): 1, ('idaho', 0.0): 2, ('restrict', 0.0): 2, ('fish', 0.0): 2, ('despit', 0.0): 2, ('region', 0.0): 2, ('drought-link', 0.0): 1, ('die-of', 0.0): 1, ('abrupt', 0.0): 1, ('climat', 0.0): 1, ('chang', 0.0): 27, ('may', 0.0): 16, ('doom', 0.0): 2, ('mammoth', 0.0): 1, ('megafauna', 0.0): 1, ('sc', 0.0): 3, ("australia'", 0.0): 1, ('dirtiest', 0.0): 2, ('station', 0.0): 3, ('consid', 0.0): 5, ('clean', 0.0): 6, ('energi', 0.0): 3, ('biomass', 0.0): 1, ("ain't", 0.0): 5, ('easi', 0.0): 6, ('green', 0.0): 7, ('golf', 0.0): 1, ('cours', 0.0): 7, ('california', 0.0): 1, ('ulti', 0.0): 1, ('well', 0.0): 56, ('mine', 0.0): 12, ('gonna', 0.0): 51, ('sexi', 0.0): 14, ('prexi', 0.0): 1, ('kindergarten', 0.0): 1, ('hungri', 0.0): 19, ('cant', 0.0): 47, ('find', 0.0): 53, ('book', 0.0): 20, ('sane', 0.0): 1, ('liter', 0.0): 15, ('three', 0.0): 7, ('loung', 0.0): 1, ('event', 0.0): 4, ('turn', 0.0): 17, ('boss', 0.0): 5, ('hozier', 0.0): 1, ("that'", 0.0): 61, ('true', 0.0): 22, ('soooner', 0.0): 1, ('ahh', 0.0): 7, ('fam', 0.0): 3, ('respectlost', 0.0): 1, ('hypercholesteloremia', 0.0): 1, ('ok', 0.0): 33, ('look', 0.0): 100, ('gift', 0.0): 11, ('calibraska', 0.0): 1, ('actual', 0.0): 24, ('genuin', 0.0): 2, ('contend', 0.0): 1, ('head', 0.0): 23, ('alway', 0.0): 56, ('hurt', 0.0): 41, ('stay', 0.0): 24, ('lmao', 0.0): 13, ('older', 0.0): 5, ('sound', 0.0): 19, ('upset', 0.0): 11, ('infinit', 0.0): 10, ('ao', 0.0): 1, ('stick', 0.0): 1, ('8th', 0.0): 1, ('either', 0.0): 13, ('seriou', 0.0): 8, ('yun', 0.0): 1, ('eh', 0.0): 4, ('room', 0.0): 11, ('way', 0.0): 42, ('hot', 0.0): 15, ('havent', 0.0): 11, ('found', 0.0): 11, ('handsom', 0.0): 2, ('jack', 0.0): 3, ('draw', 0.0): 2, ('shit', 0.0): 36, ('cut', 0.0): 14, ('encor', 0.0): 4, ('4thwin', 0.0): 4, ('baymax', 0.0): 1, ('french', 0.0): 4, ('mixer', 0.0): 1, ('💜', 0.0): 6, ('wft', 0.0): 1, ('awesom', 0.0): 5, ('replay', 0.0): 1, ('parti', 0.0): 15, ('promot', 0.0): 3, ('music', 0.0): 16, ('bank', 0.0): 9, ('short', 0.0): 11, ('boy', 0.0): 18, ('order', 0.0): 16, ('receiv', 0.0): 7, ('hub', 0.0): 1, ('nearest', 0.0): 1, ('deliv', 0.0): 3, ('today', 0.0): 108, ('1/2', 0.0): 3, ('mum', 0.0): 14, ('loud', 0.0): 2, ('final', 0.0): 35, ('parasyt', 0.0): 1, ('alll', 0.0): 1, ('zayniscomingbackonjuli', 0.0): 23, ('26', 0.0): 24, ('bye', 0.0): 8, ('era', 0.0): 1, ('。', 0.0): 3, ('ω', 0.0): 1, ('」', 0.0): 2, ('∠', 0.0): 2, ('):', 0.0): 6, ('nathann', 0.0): 1, ('💕', 0.0): 7, ('hug', 0.0): 29, ('😊', 0.0): 9, ('beauti', 0.0): 11, ('dieididieieiei', 0.0): 1, ('stage', 0.0): 15, ('mean', 0.0): 43, ('hello', 0.0): 13, ('lion', 0.0): 3, ('think', 0.0): 75, ('screw', 0.0): 4, ('netflix', 0.0): 5, ('chill', 0.0): 7, ('di', 0.0): 7, ('ervin', 0.0): 1, ('ohh', 0.0): 8, ('yeah', 0.0): 41, ('hope', 0.0): 102, ('accept', 0.0): 2, ('offer', 0.0): 10, ('desper', 0.0): 2, ('year', 0.0): 46, ('snapchat', 0.0): 79, ('amargolonnard', 0.0): 2, ('kikhorni', 0.0): 13, ('snapm', 0.0): 4, ('tagsforlik', 0.0): 5, ('batalladelosgallo', 0.0): 2, ('webcamsex', 0.0): 4, ('ugh', 0.0): 26, ('stream', 0.0): 24, ('duti', 0.0): 3, ("u'v", 0.0): 1, ('gone', 0.0): 24, ('alien', 0.0): 1, ('aww', 0.0): 21, ('wanna', 0.0): 94, ('sorka', 0.0): 1, ('funer', 0.0): 4, ('text', 0.0): 15, ('phone', 0.0): 34, ('sunni', 0.0): 1, ('nonexist', 0.0): 1, ('wowza', 0.0): 1, ('fah', 0.0): 1, ('taylor', 0.0): 3, ('crop', 0.0): 1, ('boo', 0.0): 5, ('count', 0.0): 7, ('new', 0.0): 51, ('guitar', 0.0): 1, ('jonghyun', 0.0): 1, ('hyung', 0.0): 1, ('pleas', 0.0): 275, ('predict', 0.0): 2, ('sj', 0.0): 3, ('nomin', 0.0): 1, ('vs', 0.0): 4, ('pl', 0.0): 45, ('dude', 0.0): 12, ('calm', 0.0): 3, ('brace', 0.0): 5, ('sir', 0.0): 5, ('plu', 0.0): 4, ('4', 0.0): 18, ('shock', 0.0): 3, ('omggg', 0.0): 2, ('yall', 0.0): 4, ('deserv', 0.0): 8, ('whenev', 0.0): 3, ('spend', 0.0): 8, ('smoke', 0.0): 3, ('end', 0.0): 40, ('fall', 0.0): 16, ('asleep', 0.0): 25, ('1', 0.0): 26, ('point', 0.0): 14, ('close', 0.0): 20, ('grand', 0.0): 1, ('whyyi', 0.0): 7, ('long', 0.0): 38, ('must', 0.0): 15, ('annoy', 0.0): 11, ('evan', 0.0): 1, ('option', 0.0): 3, ('opt', 0.0): 1, ("who'", 0.0): 7, ('giveaway', 0.0): 3, ('muster', 0.0): 1, ('merch', 0.0): 4, ('ah', 0.0): 18, ('funni', 0.0): 6, ('drink', 0.0): 7, ('savanna', 0.0): 1, ('straw', 0.0): 1, ('ignor', 0.0): 16, ('yester', 0.0): 1, ('afternoon', 0.0): 3, ('sleep', 0.0): 90, ('ye', 0.0): 48, ('sadli', 0.0): 11, ('when', 0.0): 2, ('album', 0.0): 16, ('last', 0.0): 72, ('chocol', 0.0): 8, ('consum', 0.0): 1, ('werk', 0.0): 1, ('morn', 0.0): 31, ('foreal', 0.0): 1, ('wesen', 0.0): 1, ('uwes', 0.0): 1, ('mj', 0.0): 1, ('😂', 0.0): 24, ('catch', 0.0): 9, ('onlin', 0.0): 20, ('enough', 0.0): 24, ('haha', 0.0): 30, ("he'", 0.0): 23, ('bosen', 0.0): 1, ('die', 0.0): 21, ('egg', 0.0): 4, ('benni', 0.0): 1, ('sometim', 0.0): 16, ('followback', 0.0): 6, ('huhu', 0.0): 17, ('understand', 0.0): 15, ('badli', 0.0): 12, ('scare', 0.0): 16, ('>:(', 0.0): 47, ('al', 0.0): 4, ('kati', 0.0): 3, ('zaz', 0.0): 1, ('ami', 0.0): 2, ('lot', 0.0): 27, ('diari', 0.0): 1, ('read', 0.0): 20, ('rehash', 0.0): 1, ('websit', 0.0): 7, ('mushroom', 0.0): 1, ('piec', 0.0): 4, ('except', 0.0): 5, ('reach', 0.0): 3, ('anyway', 0.0): 12, ('vicki', 0.0): 1, ('omg', 0.0): 63, ('wtf', 0.0): 13, ('lip', 0.0): 3, ('virgin', 0.0): 2, ('your', 0.0): 8, ('45', 0.0): 1, ('hahah', 0.0): 6, ('ninasti', 0.0): 1, ('tsktsk', 0.0): 1, ('oppa', 0.0): 4, ('wont', 0.0): 9, ('dick', 0.0): 5, ('kawaii', 0.0): 1, ('manli', 0.0): 1, ('xbox', 0.0): 3, ('alreadi', 0.0): 52, ('comfi', 0.0): 1, ('bed', 0.0): 12, ('youu', 0.0): 2, ('sigh', 0.0): 13, ('lol', 0.0): 43, ('potato', 0.0): 1, ('fri', 0.0): 7, ('guess', 0.0): 14, ("y'all", 0.0): 2, ('ugli', 0.0): 9, ('asf', 0.0): 1, ('huh', 0.0): 7, ('eish', 0.0): 1, ('ive', 0.0): 11, ('quit', 0.0): 9, ('lost', 0.0): 25, ('twitter', 0.0): 30, ('mojo', 0.0): 1, ('dont', 0.0): 53, ('mara', 0.0): 1, ('neh', 0.0): 2, ('fever', 0.0): 7, ('<3', 0.0): 25, ('poor', 0.0): 35, ('bb', 0.0): 7, ('abl', 0.0): 22, ('associ', 0.0): 1, ('councillor', 0.0): 1, ('confer', 0.0): 2, ('weekend', 0.0): 25, ('skype', 0.0): 6, ('account', 0.0): 20, ('hack', 0.0): 8, ('contact', 0.0): 7, ('creat', 0.0): 2, ('tweet', 0.0): 35, ('spree', 0.0): 4, ('na', 0.0): 29, ('sholong', 0.0): 1, ('reject', 0.0): 7, ('propos', 0.0): 2, ('gee', 0.0): 1, ('fli', 0.0): 10, ('gidi', 0.0): 1, ('pamper', 0.0): 1, ('lago', 0.0): 1, ('ehn', 0.0): 1, ('arrest', 0.0): 1, ('girlfriend', 0.0): 2, ('he', 0.0): 3, ('nice', 0.0): 19, ('person', 0.0): 15, ('idk', 0.0): 26, ('anybodi', 0.0): 7, ('song', 0.0): 27, ('disappear', 0.0): 1, ('itun', 0.0): 3, ('daze', 0.0): 1, ('confus', 0.0): 8, ('surviv', 0.0): 5, ('fragment', 0.0): 1, ("would'v", 0.0): 2, ('forc', 0.0): 2, ('horribl', 0.0): 9, ('weather', 0.0): 29, ('us', 0.0): 43, ('could', 0.0): 69, ('walao', 0.0): 1, ('kb', 0.0): 1, ('send', 0.0): 12, ('ill', 0.0): 16, ('djderek', 0.0): 1, ('mani', 0.0): 29, ('fun', 0.0): 32, ('gig', 0.0): 3, ('absolut', 0.0): 6, ('legend', 0.0): 3, ('wait', 0.0): 43, ('till', 0.0): 8, ('saturday', 0.0): 10, ('homework', 0.0): 2, ('pa', 0.0): 8, ('made', 0.0): 23, ('da', 0.0): 5, ('greek', 0.0): 2, ('tragedi', 0.0): 1, ('rain', 0.0): 43, ('gym', 0.0): 6, ('💪', 0.0): 2, ('🏻', 0.0): 4, ('🐒', 0.0): 1, ('what', 0.0): 8, ('wrong', 0.0): 33, ('struck', 0.0): 1, ('anymor', 0.0): 20, ('belgium', 0.0): 4, ('fabian', 0.0): 2, ('delph', 0.0): 6, ('fallen', 0.0): 3, ('hide', 0.0): 4, ('drake', 0.0): 1, ('silent', 0.0): 1, ('hear', 0.0): 33, ('rest', 0.0): 21, ('peac', 0.0): 5, ('mo', 0.0): 4, ('tonight', 0.0): 24, ('t20blast', 0.0): 1, ('ahhh', 0.0): 5, ('wake', 0.0): 21, ('mumma', 0.0): 2, ('7', 0.0): 16, ('dead', 0.0): 10, ('tomorrow', 0.0): 34, ("i'll", 0.0): 41, ('high', 0.0): 8, ('low', 0.0): 8, ('pray', 0.0): 13, ('appropri', 0.0): 1, ('. . .', 0.0): 2, ('awak', 0.0): 10, ('woke', 0.0): 14, ('upp', 0.0): 1, ('dm', 0.0): 23, ('luke', 0.0): 6, ('hey', 0.0): 26, ('babe', 0.0): 19, ('across', 0.0): 4, ('hindi', 0.0): 1, ('reaction', 0.0): 1, ('5s', 0.0): 1, ('run', 0.0): 15, ('space', 0.0): 5, ('tbh', 0.0): 14, ('disabl', 0.0): 2, ('pension', 0.0): 1, ('ptsd', 0.0): 1, ('imposs', 0.0): 4, ('physic', 0.0): 7, ('financi', 0.0): 2, ('nooo', 0.0): 16, ('broke', 0.0): 9, ('soo', 0.0): 3, ('amaz', 0.0): 16, ('toghet', 0.0): 1, ('around', 0.0): 20, ('p', 0.0): 5, ('hold', 0.0): 9, ('anoth', 0.0): 27, ('septemb', 0.0): 2, ('21st', 0.0): 2, ('snsd', 0.0): 2, ('interact', 0.0): 2, ('anna', 0.0): 5, ('akana', 0.0): 1, ('askip', 0.0): 1, ("t'exist", 0.0): 1, ('channel', 0.0): 6, ('owner', 0.0): 1, ('decid', 0.0): 10, ('broadcast', 0.0): 6, ('kei', 0.0): 2, ('rate', 0.0): 4, ('se', 0.0): 2, ('notic', 0.0): 26, ('exist', 0.0): 2, ('traffic', 0.0): 5, ('terribl', 0.0): 12, ('eye', 0.0): 12, ('small', 0.0): 9, ('kate', 0.0): 2, ('spade', 0.0): 1, ('pero', 0.0): 3, ('walang', 0.0): 1, ('maganda', 0.0): 1, ('aw', 0.0): 42, ('seen', 0.0): 23, ('agesss', 0.0): 1, ('add', 0.0): 26, ('corinehurleigh', 0.0): 1, ('snapchatm', 0.0): 6, ('instagram', 0.0): 4, ('addmeonsnapchat', 0.0): 2, ('sf', 0.0): 3, ('quot', 0.0): 6, ('kiksext', 0.0): 6, ('bum', 0.0): 2, ('zara', 0.0): 1, ('trouser', 0.0): 1, ('effect', 0.0): 4, ('spanish', 0.0): 1, ("it'okay", 0.0): 1, ('health', 0.0): 2, ('luck', 0.0): 6, ('freed', 0.0): 1, ('rock', 0.0): 3, ('orcalov', 0.0): 1, ('tri', 0.0): 65, ('big', 0.0): 21, ('cuddl', 0.0): 8, ('lew', 0.0): 1, ('kiss', 0.0): 4, ('em', 0.0): 1, ('crave', 0.0): 8, ('banana', 0.0): 4, ('crumbl', 0.0): 1, ('mcflurri', 0.0): 1, ('cabl', 0.0): 1, ('car', 0.0): 17, ('brother', 0.0): 10, ("venus'", 0.0): 1, ('concept', 0.0): 4, ('rli', 0.0): 5, ('tea', 0.0): 7, ('tagal', 0.0): 2, ("we'v", 0.0): 3, ('appoint', 0.0): 1, ("i'd", 0.0): 11, ('sinc', 0.0): 35, ("there'", 0.0): 18, ('milk', 0.0): 3, ('left', 0.0): 26, ('cereal', 0.0): 2, ('film', 0.0): 6, ('date', 0.0): 7, ('previou', 0.0): 2, ('73', 0.0): 2, ('user', 0.0): 1, ('everywher', 0.0): 6, ('fansign', 0.0): 1, ('photo', 0.0): 15, ('expens', 0.0): 7, ('zzzz', 0.0): 1, ('let', 0.0): 37, ('sun', 0.0): 10, ('yet', 0.0): 33, ("bff'", 0.0): 1, ('extrem', 0.0): 3, ('stress', 0.0): 10, ('anyth', 0.0): 19, ('win', 0.0): 27, ("deosn't", 0.0): 1, ('liverpool', 0.0): 2, ('pool', 0.0): 3, ('though', 0.0): 57, ('bro', 0.0): 3, ('great', 0.0): 22, ('news', 0.0): 21, ('self', 0.0): 1, ('esteem', 0.0): 1, ('lowest', 0.0): 1, ('better', 0.0): 36, ('tacki', 0.0): 1, ('taken', 0.0): 9, ('man', 0.0): 32, ('lucki', 0.0): 16, ('charm', 0.0): 1, ('haaretz', 0.0): 1, ('israel', 0.0): 1, ('syria', 0.0): 2, ('continu', 0.0): 1, ('develop', 0.0): 5, ('chemic', 0.0): 1, ('weapon', 0.0): 2, ('offici', 0.0): 3, ('wsj', 0.0): 2, ('rep', 0.0): 1, ('bt', 0.0): 4, ('mr', 0.0): 9, ('wong', 0.0): 1, ('confisc', 0.0): 1, ('art', 0.0): 4, ('thought', 0.0): 31, ('icepack', 0.0): 1, ('dose', 0.0): 2, ('killer', 0.0): 2, ('board', 0.0): 1, ('whimper', 0.0): 1, ('fan', 0.0): 17, ('senpai', 0.0): 1, ('buttsex', 0.0): 1, ('joke', 0.0): 8, ('headlin', 0.0): 1, ("dn't", 0.0): 1, ('brk', 0.0): 1, (":'(", 0.0): 13, ('hit', 0.0): 7, ('voic', 0.0): 9, ('falsetto', 0.0): 1, ('zone', 0.0): 2, ('leannerin', 0.0): 1, ('hornykik', 0.0): 17, ('loveofmylif', 0.0): 2, ('dmme', 0.0): 2, ('pussi', 0.0): 2, ('newmus', 0.0): 3, ('sexo', 0.0): 2, ('s2', 0.0): 1, ('spain', 0.0): 4, ('delay', 0.0): 5, ('kill', 0.0): 22, ('singl', 0.0): 10, ('untruth', 0.0): 1, ('cross', 0.0): 4, ('countri', 0.0): 6, ('ij', 0.0): 1, ('💥', 0.0): 1, ('✨', 0.0): 1, ('💫', 0.0): 1, ('bear', 0.0): 2, ('littl', 0.0): 21, ('apart', 0.0): 7, ('live', 0.0): 37, ('soshi', 0.0): 1, ('didnt', 0.0): 24, ('buttt', 0.0): 2, ('congrat', 0.0): 2, ('sunday', 0.0): 8, ('friday', 0.0): 12, ('shoulda', 0.0): 1, ('move', 0.0): 12, ('w', 0.0): 22, ('caus', 0.0): 16, ("they'r", 0.0): 14, ('heyyy', 0.0): 1, ('yeol', 0.0): 2, ('solo', 0.0): 6, ('dancee', 0.0): 1, ('inter', 0.0): 1, ('nemanja', 0.0): 1, ('vidic', 0.0): 1, ('roma', 0.0): 1, ("mom'", 0.0): 2, ('linguist', 0.0): 1, ("dad'", 0.0): 1, ('comput', 0.0): 6, ('scientist', 0.0): 1, ('dumbest', 0.0): 1, ('famili', 0.0): 9, ('broken', 0.0): 11, ('ice', 0.0): 35, ('cream', 0.0): 32, ('pour', 0.0): 1, ('crash', 0.0): 6, ('scienc', 0.0): 1, ('resourc', 0.0): 1, ('vehicl', 0.0): 5, ('ate', 0.0): 10, ('ayex', 0.0): 1, ('eat', 0.0): 27, ('swear', 0.0): 6, ('lamon', 0.0): 1, ('scroll', 0.0): 1, ('curv', 0.0): 2, ('😉', 0.0): 1, ('cement', 0.0): 1, ('cast', 0.0): 5, ('10.3', 0.0): 1, ('k', 0.0): 9, ('sign', 0.0): 9, ('zayn', 0.0): 8, ('bot', 0.0): 1, ('plz', 0.0): 3, ('mention', 0.0): 9, ('jmu', 0.0): 1, ('camp', 0.0): 7, ('teas', 0.0): 3, ('sweetest', 0.0): 1, ('awuna', 0.0): 1, ('mbulelo', 0.0): 1, ('match', 0.0): 7, ('pig', 0.0): 2, ('although', 0.0): 5, ('crackl', 0.0): 1, ('nois', 0.0): 3, ('plug', 0.0): 2, ('fuse', 0.0): 1, ('dammit', 0.0): 3, ('tip', 0.0): 2, ('carlton', 0.0): 2, ('aflblueshawk', 0.0): 2, ("alex'", 0.0): 1, ('hous', 0.0): 16, ('motorsport', 0.0): 1, ('seri', 0.0): 3, ('disc', 0.0): 1, ('right', 0.0): 51, ('cheeki', 0.0): 1, ('j', 0.0): 1, ('instead', 0.0): 4, ('seo', 0.0): 1, ('nl', 0.0): 1, ('bud', 0.0): 1, ('christi', 0.0): 1, ('xo', 0.0): 1, ('niec', 0.0): 1, ('summer', 0.0): 19, ('bloodi', 0.0): 2, ('sandwhich', 0.0): 1, ('buset', 0.0): 1, ('discrimin', 0.0): 4, ('five', 0.0): 5, ('learn', 0.0): 5, ('pregnanc', 0.0): 2, ('foot', 0.0): 5, ('f', 0.0): 4, ('matern', 0.0): 1, ('kick', 0.0): 6, ('domesticviol', 0.0): 1, ('law', 0.0): 4, ('domest', 0.0): 1, ('violenc', 0.0): 2, ('victim', 0.0): 4, ('98fm', 0.0): 1, ('exactli', 0.0): 5, ('unfortun', 0.0): 21, ('yesterday', 0.0): 13, ('uk', 0.0): 9, ('govern', 0.0): 1, ('sapiosexu', 0.0): 1, ('damn', 0.0): 29, ('beta', 0.0): 4, ('12', 0.0): 8, ('hour', 0.0): 35, ('world', 0.0): 17, ('hulk', 0.0): 3, ('hogan', 0.0): 3, ('scrub', 0.0): 1, ('wwe', 0.0): 2, ('histori', 0.0): 2, ('iren', 0.0): 4, ('mistak', 0.0): 6, ('naa', 0.0): 1, ('sold', 0.0): 6, ('h_my_k', 0.0): 1, ('lose', 0.0): 7, ('valentin', 0.0): 2, ('et', 0.0): 3, ("r'ship", 0.0): 1, ('btwn', 0.0): 1, ('homo', 0.0): 2, ('biphob', 0.0): 2, ('comment', 0.0): 4, ('certain', 0.0): 6, ('disciplin', 0.0): 2, ('incl', 0.0): 2, ('european', 0.0): 3, ('lang', 0.0): 6, ('lit', 0.0): 2, ('educ', 0.0): 2, ('fresherstofin', 0.0): 1, ('💔', 0.0): 3, ('dream', 0.0): 24, ('gettin', 0.0): 2, ('realist', 0.0): 4, ('thx', 0.0): 1, ('real', 0.0): 21, ('isnt', 0.0): 7, ('prefer', 0.0): 4, ('benzema', 0.0): 2, ('hahahahahaah', 0.0): 1, ('donno', 0.0): 1, ('korean', 0.0): 2, ('languag', 0.0): 5, ('russian', 0.0): 2, ('waaa', 0.0): 1, ('eidwithgrof', 0.0): 1, ('boreddd', 0.0): 1, ('mug', 0.0): 3, ('piss', 0.0): 3, ('tiddler', 0.0): 1, ('silli', 0.0): 2, ('least', 0.0): 15, ('card', 0.0): 7, ('chorong', 0.0): 1, ('leader', 0.0): 1, ('에이핑크', 0.0): 3, ('더쇼', 0.0): 4, ('clan', 0.0): 1, ('slot', 0.0): 2, ('open', 0.0): 16, ('pfff', 0.0): 1, ('privat', 0.0): 2, ('bugbounti', 0.0): 1, ('self-xss', 0.0): 1, ('host', 0.0): 2, ('header', 0.0): 3, ('poison', 0.0): 3, ('code', 0.0): 8, ('execut', 0.0): 1, ('ktksbye', 0.0): 1, ('connect', 0.0): 3, ('compani', 0.0): 3, ('alert', 0.0): 2, ('cancel', 0.0): 10, ('uber', 0.0): 3, ('everyon', 0.0): 26, ('els', 0.0): 4, ('offic', 0.0): 7, ('ahahah', 0.0): 1, ('petit', 0.0): 1, ('relationship', 0.0): 4, ('height', 0.0): 2, ('cost', 0.0): 1, ('600', 0.0): 2, ('£', 0.0): 6, ('secur', 0.0): 4, ('odoo', 0.0): 2, ('8', 0.0): 11, ('partner', 0.0): 2, ('commun', 0.0): 2, ('spirit', 0.0): 3, ('jgh', 0.0): 2, ('effin', 0.0): 1, ('facebook', 0.0): 4, ('anyon', 0.0): 17, ("else'", 0.0): 1, ('box', 0.0): 8, ('ap', 0.0): 3, ('stori', 0.0): 13, ('london', 0.0): 12, ('imagin', 0.0): 2, ('elsewher', 0.0): 1, ('someday', 0.0): 1, ('ben', 0.0): 3, ('provid', 0.0): 3, ('name', 0.0): 15, ('branch', 0.0): 1, ('visit', 0.0): 12, ('address', 0.0): 3, ('concern', 0.0): 3, ('welsh', 0.0): 1, ('pod', 0.0): 1, ('juli', 0.0): 12, ('laura', 0.0): 4, ('insid', 0.0): 10, ('train', 0.0): 12, ('D;', 0.0): 1, ('talk-kama', 0.0): 1, ('hawako', 0.0): 1, ('waa', 0.0): 1, ('kimaaani', 0.0): 1, ('prisss', 0.0): 1, ('baggag', 0.0): 2, ('claim', 0.0): 3, ('plane', 0.0): 2, ('niamh', 0.0): 1, ('forev', 0.0): 10, ('hmmm', 0.0): 2, ('sugar', 0.0): 3, ('rare', 0.0): 1, ('paper', 0.0): 16, ('town', 0.0): 14, ('score', 0.0): 3, ('stuck', 0.0): 8, ('agh', 0.0): 2, ('middl', 0.0): 7, ('undercoverboss', 0.0): 1, ('تكفى', 0.0): 1, ('10', 0.0): 8, ('job', 0.0): 13, ('cat', 0.0): 17, ('forgotten', 0.0): 3, ('yep', 0.0): 5, ('stop', 0.0): 43, ('ach', 0.0): 2, ('wrist', 0.0): 1, ('nake', 0.0): 3, ('forgot', 0.0): 14, ('bracelet', 0.0): 3, ('ligo', 0.0): 1, ('dozen', 0.0): 1, ('parent', 0.0): 8, ('children', 0.0): 2, ('shark', 0.0): 2, ('selfi', 0.0): 6, ('heartach', 0.0): 1, ('zayniscomingback', 0.0): 3, ('mix', 0.0): 2, ('sweden', 0.0): 1, ('breath', 0.0): 4, ('moment', 0.0): 14, ('word', 0.0): 16, ('elmhurst', 0.0): 1, ('fc', 0.0): 1, ('etid', 0.0): 1, ("chillin'with", 0.0): 1, ('father', 0.0): 2, ('istanya', 0.0): 1, ('2suppli', 0.0): 1, ('extra', 0.0): 3, ('infrastructur', 0.0): 2, ('teacher', 0.0): 2, ('doctor', 0.0): 4, ('nurs', 0.0): 2, ('paramed', 0.0): 1, ('countless', 0.0): 1, ('2cope', 0.0): 1, ('bore', 0.0): 23, ('plea', 0.0): 2, ('arian', 0.0): 1, ('hahahaha', 0.0): 6, ('slr', 0.0): 1, ('kendal', 0.0): 1, ('kyli', 0.0): 3, ("kylie'", 0.0): 1, ('manila', 0.0): 3, ('jeebu', 0.0): 1, ('reabsorbt', 0.0): 1, ('tooth', 0.0): 2, ('abscess', 0.0): 1, ('threaten', 0.0): 2, ('affect', 0.0): 1, ('front', 0.0): 6, ('crown', 0.0): 1, ('ooouch', 0.0): 1, ('barney', 0.0): 1, ("be'", 0.0): 1, ('yo', 0.0): 4, ('later', 0.0): 14, ('realis', 0.0): 6, ('problemat', 0.0): 1, ('expect', 0.0): 5, ('proud', 0.0): 8, ('mess', 0.0): 7, ('maa', 0.0): 2, ('without', 0.0): 25, ('bangalor', 0.0): 1, ('awww', 0.0): 23, ('lui', 0.0): 1, ('manzano', 0.0): 1, ('shaaa', 0.0): 1, ('super', 0.0): 11, ('7th', 0.0): 1, ('conven', 0.0): 1, ('2:30', 0.0): 2, ('pm', 0.0): 8, ('forward', 0.0): 6, ('delet', 0.0): 5, ('turkey', 0.0): 1, ('bomb', 0.0): 3, ('isi', 0.0): 1, ('allow', 0.0): 9, ('usa', 0.0): 2, ('use', 0.0): 43, ('airfield', 0.0): 1, ('jet', 0.0): 1, ("jack'", 0.0): 1, ('spam', 0.0): 6, ('sooo', 0.0): 16, ('☺', 0.0): 3, ("mommy'", 0.0): 1, ('reason', 0.0): 8, ('overweight', 0.0): 1, ('sigeg', 0.0): 1, ('habhab', 0.0): 1, ('masud', 0.0): 1, ('kaha', 0.0): 1, ('ko', 0.0): 10, ('akong', 0.0): 1, ('un', 0.0): 1, ('hella', 0.0): 4, ('matter', 0.0): 4, ('pala', 0.0): 1, ('hahaha', 0.0): 11, ('lesson', 0.0): 1, ('dolphin', 0.0): 1, ('xxx', 0.0): 12, ('holi', 0.0): 2, ('anythin', 0.0): 1, ('trend', 0.0): 6, ('radio', 0.0): 4, ('sing', 0.0): 5, ('bewar', 0.0): 1, ('agonis', 0.0): 1, ('experi', 0.0): 2, ('ahead', 0.0): 3, ('modimo', 0.0): 1, ('ho', 0.0): 3, ('tseba', 0.0): 1, ('wena', 0.0): 1, ('fela', 0.0): 1, ('emot', 0.0): 8, ('hubbi', 0.0): 1, ('delight', 0.0): 1, ('return', 0.0): 6, ('bill', 0.0): 6, ('nowt', 0.0): 1, ('wors', 0.0): 8, ('willi', 0.0): 1, ('gon', 0.0): 1, ('vomit', 0.0): 1, ('famou', 0.0): 5, ('bowl', 0.0): 1, ('devast', 0.0): 1, ('titan', 0.0): 1, ('ae', 0.0): 1, ('mark', 0.0): 2, ('hair', 0.0): 21, ('shini', 0.0): 1, ('wavi', 0.0): 1, ('emo', 0.0): 2, ('germani', 0.0): 4, ('load', 0.0): 9, ('shed', 0.0): 2, ('ha', 0.0): 7, ('bheyp', 0.0): 1, ('ayemso', 0.0): 1, ('ear', 0.0): 5, ('swell', 0.0): 2, ('sm', 0.0): 7, ('fb', 0.0): 7, ('remind', 0.0): 3, ('abt', 0.0): 3, ('womad', 0.0): 1, ('wut', 0.0): 1, ('hell', 0.0): 11, ('viciou', 0.0): 1, ('circl', 0.0): 1, ('surpris', 0.0): 5, ('ticket', 0.0): 12, ('codi', 0.0): 1, ('simpson', 0.0): 1, ('concert', 0.0): 11, ('singapor', 0.0): 4, ('august', 0.0): 5, ('pooo', 0.0): 2, ('bh3', 0.0): 1, ('enter', 0.0): 1, ('pitchwar', 0.0): 1, ('chap', 0.0): 1, ("mine'", 0.0): 1, ('transcript', 0.0): 1, ("apma'", 0.0): 1, ('shoulder', 0.0): 2, ('bitch', 0.0): 11, ('competit', 0.0): 1, ("it'll", 0.0): 3, ('fine', 0.0): 6, ('timw', 0.0): 1, ('acc', 0.0): 8, ('rude', 0.0): 11, ('vitamin', 0.0): 1, ('e', 0.0): 9, ('oil', 0.0): 1, ('massag', 0.0): 5, ('everyday', 0.0): 7, ('healthier', 0.0): 1, ('easier', 0.0): 3, ('stretch', 0.0): 1, ('choos', 0.0): 7, ('blockjam', 0.0): 1, ("schedule'", 0.0): 1, ('whack', 0.0): 1, ('kik', 0.0): 69, ('thelock', 0.0): 1, ('76', 0.0): 1, ('sex', 0.0): 6, ('omegl', 0.0): 4, ('coupl', 0.0): 2, ('travel', 0.0): 11, ('hotgirl', 0.0): 2, ('2009', 0.0): 1, ('3', 0.0): 37, ('ghantay', 0.0): 1, ('light', 0.0): 8, ('nai', 0.0): 1, ('hay', 0.0): 8, ('deni', 0.0): 1, ('ruin', 0.0): 11, ('laguna', 0.0): 1, ('exit', 0.0): 2, ('gomen', 0.0): 1, ('heck', 0.0): 5, ('fair', 0.0): 12, ('grew', 0.0): 2, ('half', 0.0): 10, ('inch', 0.0): 2, ('two', 0.0): 19, ('problem', 0.0): 7, ('suuuper', 0.0): 1, ('65', 0.0): 1, ('sale', 0.0): 8, ('inact', 0.0): 8, ('orphan', 0.0): 1, ('black', 0.0): 12, ('earlier', 0.0): 9, ('whaaat', 0.0): 5, ('kaya', 0.0): 2, ('naaan', 0.0): 1, ('paus', 0.0): 1, ('randomli', 0.0): 1, ('app', 0.0): 13, ('3:30', 0.0): 1, ('walk', 0.0): 7, ('inglewood', 0.0): 1, ('ummm', 0.0): 4, ('anxieti', 0.0): 3, ('readi', 0.0): 12, ('also', 0.0): 19, ('charcoal', 0.0): 1, ('til', 0.0): 5, ('mid-end', 0.0): 1, ('aug', 0.0): 1, ('noooo', 0.0): 1, ('heard', 0.0): 6, ('rip', 0.0): 12, ('rodfanta', 0.0): 1, ('wasp', 0.0): 2, ('sting', 0.0): 1, ('avert', 0.0): 1, ('bug', 0.0): 3, ('(:', 0.0): 7, ('exo', 0.0): 2, ('seekli', 0.0): 1, ('riptito', 0.0): 1, ('manbearpig', 0.0): 1, ('cannot', 0.0): 7, ('grow', 0.0): 3, ('shorter', 0.0): 1, ('academ', 0.0): 1, ('free', 0.0): 19, ('exclus', 0.0): 2, ('unfair', 0.0): 7, ('esp', 0.0): 4, ('regard', 0.0): 1, ('current', 0.0): 7, ('bleak', 0.0): 1, ('german', 0.0): 1, ('chart', 0.0): 2, ('situat', 0.0): 2, ('entri', 0.0): 4, ('even', 0.0): 70, ('top', 0.0): 6, ('100', 0.0): 8, ('pfft', 0.0): 1, ('place', 0.0): 18, ('white', 0.0): 7, ('wash', 0.0): 1, ('polaroid', 0.0): 1, ('newbethvideo', 0.0): 1, ('greec', 0.0): 2, ('xur', 0.0): 2, ('imi', 0.0): 3, ('fill', 0.0): 1, ('♡', 0.0): 11, ('♥', 0.0): 22, ('xoxoxo', 0.0): 1, ('pictur', 0.0): 17, ('stud', 0.0): 1, ('hund', 0.0): 1, ('6', 0.0): 14, ('kikchat', 0.0): 9, ('amazon', 0.0): 5, ('3.4', 0.0): 1, ('yach', 0.0): 1, ('telat', 0.0): 1, ('huvvft', 0.0): 1, ('zoo', 0.0): 2, ('fieldtrip', 0.0): 1, ('touch', 0.0): 5, ('yan', 0.0): 1, ('posit', 0.0): 2, ('king', 0.0): 1, ('futur', 0.0): 4, ('sizw', 0.0): 1, ('write', 0.0): 13, ('20', 0.0): 9, ('result', 0.0): 3, ('km', 0.0): 2, ('four', 0.0): 4, ('shift', 0.0): 5, ('aaahhh', 0.0): 2, ('boredom', 0.0): 1, ('en', 0.0): 1, ('aint', 0.0): 7, ('who', 0.0): 1, ('sins', 0.0): 1, ('that', 0.0): 13, ('somehow', 0.0): 2, ('tini', 0.0): 4, ('ball', 0.0): 2, ('barbel', 0.0): 1, ('owww', 0.0): 2, ('amsterdam', 0.0): 1, ('luv', 0.0): 2, ('💖', 0.0): 4, ('ps', 0.0): 3, ('looong', 0.0): 1, ('especi', 0.0): 4, (':/', 0.0): 11, ('lap', 0.0): 1, ('litro', 0.0): 1, ('shepherd', 0.0): 2, ('lami', 0.0): 1, ('mayb', 0.0): 27, ('relax', 0.0): 3, ('lungomar', 0.0): 1, ('pesaro', 0.0): 1, ('giachietittiwed', 0.0): 1, ('igersoftheday', 0.0): 1, ('summertim', 0.0): 1, ('nose', 0.0): 7, ('bruis', 0.0): 1, ('lil', 0.0): 8, ('snake', 0.0): 3, ('journey', 0.0): 2, ('scarf', 0.0): 1, ('au', 0.0): 3, ('afford', 0.0): 7, ('fridayfeel', 0.0): 1, ('earli', 0.0): 12, ('money', 0.0): 24, ('chicken', 0.0): 5, ('woe', 0.0): 4, ('nigga', 0.0): 3, ('motn', 0.0): 1, ('make-up', 0.0): 1, ('justic', 0.0): 1, ('import', 0.0): 4, ('sit', 0.0): 5, ('mind', 0.0): 7, ('buy', 0.0): 17, ('limit', 0.0): 4, ('ver', 0.0): 1, ('normal', 0.0): 5, ('edit', 0.0): 7, ('huhuhu', 0.0): 3, ('stack', 0.0): 1, ("m'ladi", 0.0): 1, ('j8', 0.0): 1, ('j11', 0.0): 1, ('m20', 0.0): 1, ('jk', 0.0): 5, ('acad', 0.0): 1, ('schedul', 0.0): 9, ('nowww', 0.0): 1, ('cop', 0.0): 1, ('jame', 0.0): 4, ('window', 0.0): 6, ('hugh', 0.0): 2, ('paw', 0.0): 1, ('muddi', 0.0): 1, ('distract', 0.0): 1, ('heyi', 0.0): 1, ('otherwis', 0.0): 3, ('picnic', 0.0): 1, ('24', 0.0): 11, ('cupcak', 0.0): 2, ('talaga', 0.0): 1, ('best', 0.0): 22, ('femal', 0.0): 3, ('poppin', 0.0): 1, ('joc', 0.0): 1, ('playin', 0.0): 1, ('saw', 0.0): 19, ('fix', 0.0): 10, ('coldplay', 0.0): 1, ('media', 0.0): 1, ('player', 0.0): 3, ('fail', 0.0): 10, ('subj', 0.0): 1, ('sobrang', 0.0): 1, ('bv', 0.0): 1, ('zamn', 0.0): 1, ('line', 0.0): 8, ('afropunk', 0.0): 1, ('fest', 0.0): 1, ('brooklyn', 0.0): 2, ('id', 0.0): 5, ('put', 0.0): 14, ('50', 0.0): 5, ('madrid', 0.0): 7, ('shithous', 0.0): 1, ('cutest', 0.0): 2, ('danc', 0.0): 6, ('ur', 0.0): 26, ('arm', 0.0): 3, ('rais', 0.0): 1, ('hand', 0.0): 12, ('ladder', 0.0): 2, ('told', 0.0): 11, ('climb', 0.0): 3, ('success', 0.0): 4, ('nerv', 0.0): 1, ('wrack', 0.0): 1, ('test', 0.0): 8, ('booset', 0.0): 1, ('restart', 0.0): 1, ('assassin', 0.0): 1, ('creed', 0.0): 1, ('ii', 0.0): 1, ('heap', 0.0): 1, ('fell', 0.0): 10, ('daughter', 0.0): 1, ('begin', 0.0): 4, ('ps3', 0.0): 1, ('ankl', 0.0): 4, ('step', 0.0): 5, ('puddl', 0.0): 2, ('wear', 0.0): 5, ('slipper', 0.0): 1, ('eve', 0.0): 1, ('bbi', 0.0): 6, ('sararoc', 0.0): 1, ('angri', 0.0): 5, ('pretti', 0.0): 15, ('fnaf', 0.0): 1, ('holiday', 0.0): 20, ('cheer', 0.0): 6, ('😘', 0.0): 11, ('anywayhedidanicejob', 0.0): 1, ('😞', 0.0): 3, ('3am', 0.0): 2, ('other', 0.0): 7, ('local', 0.0): 3, ('cruis', 0.0): 1, ('done', 0.0): 24, ('doubl', 0.0): 4, ('wail', 0.0): 1, ('manual', 0.0): 2, ('wheelchair', 0.0): 1, ('check', 0.0): 19, ('fit', 0.0): 3, ('nh', 0.0): 3, ('26week', 0.0): 1, ('sbenu', 0.0): 1, ('sasin', 0.0): 1, ('team', 0.0): 14, ('anarchi', 0.0): 1, ('af', 0.0): 14, ('candl', 0.0): 1, ('forehead', 0.0): 4, ('medicin', 0.0): 3, ('welcom', 0.0): 5, ('oop', 0.0): 4, ('hoya', 0.0): 3, ('mah', 0.0): 2, ('a', 0.0): 1, ('nobodi', 0.0): 10, ('awhil', 0.0): 2, ('ago', 0.0): 20, ('b', 0.0): 10, ('hush', 0.0): 2, ('gurli', 0.0): 1, ('bring', 0.0): 9, ('purti', 0.0): 1, ('mouth', 0.0): 5, ('closer', 0.0): 2, ('shiver', 0.0): 1, ('solut', 0.0): 1, ('paid', 0.0): 8, ('properli', 0.0): 2, ('gol', 0.0): 1, ('pea', 0.0): 1, ('english', 0.0): 9, ('mental', 0.0): 4, ('tierd', 0.0): 2, ('third', 0.0): 1, ("eye'", 0.0): 1, ('thnkyouuu', 0.0): 1, ('carolin', 0.0): 1, ('neither', 0.0): 6, ('figur', 0.0): 6, ('mirror', 0.0): 1, ('highlight', 0.0): 2, ('pure', 0.0): 3, ('courag', 0.0): 1, ('bit', 0.0): 15, ('fishi', 0.0): 1, ('idek', 0.0): 1, ('apink', 0.0): 5, ('perform', 0.0): 8, ('bulet', 0.0): 1, ('gendut', 0.0): 1, ('noo', 0.0): 5, ('race', 0.0): 3, ('hotwheel', 0.0): 1, ('ms', 0.0): 1, ('patch', 0.0): 1, ('typic', 0.0): 2, ('ahaha', 0.0): 1, ('lay', 0.0): 2, ('wine', 0.0): 1, ('glass', 0.0): 3, ("where'", 0.0): 4, ('akon', 0.0): 1, ('somewher', 0.0): 5, ('nightmar', 0.0): 7, ('ya', 0.0): 15, ('mino', 0.0): 2, ('crazyyi', 0.0): 1, ('thooo', 0.0): 1, ('zz', 0.0): 1, ('airport', 0.0): 7, ('straight', 0.0): 4, ('soundcheck', 0.0): 1, ('hmm', 0.0): 4, ('antagonist', 0.0): 1, ('ob', 0.0): 1, ('phantasi', 0.0): 1, ('star', 0.0): 4, ('ip', 0.0): 1, ('issu', 0.0): 11, ('bruce', 0.0): 1, ('sleepdepriv', 0.0): 1, ('tiredashel', 0.0): 1, ('4aspot', 0.0): 1, ("kinara'", 0.0): 1, ('awami', 0.0): 1, ('question', 0.0): 9, ('niqqa', 0.0): 1, ('answer', 0.0): 14, ('mockingjay', 0.0): 1, ('slow', 0.0): 9, ('pb.contest', 0.0): 1, ('cycl', 0.0): 2, ('aarww', 0.0): 1, ('lmbo', 0.0): 1, ('dangit', 0.0): 1, ('ohmygod', 0.0): 1, ('scenario', 0.0): 1, ('tooo', 0.0): 2, ('duck', 0.0): 1, ('baechyyi', 0.0): 1, ('okayyy', 0.0): 1, ('noon', 0.0): 3, ('drag', 0.0): 5, ('serious', 0.0): 11, ('misundersrand', 0.0): 1, ('chal', 0.0): 1, ('raha', 0.0): 1, ('hai', 0.0): 11, ('yhm', 0.0): 1, ('edsa', 0.0): 2, ('jasmingarrick', 0.0): 2, ('kikmeguy', 0.0): 5, ('webcam', 0.0): 2, ('milf', 0.0): 1, ('nakamaforev', 0.0): 3, ('kiksex', 0.0): 7, ("unicef'", 0.0): 1, ('fu', 0.0): 1, ('alon', 0.0): 16, ('manag', 0.0): 13, ('stephen', 0.0): 1, ('street', 0.0): 2, ('35', 0.0): 1, ('min', 0.0): 7, ('appear', 0.0): 2, ('record', 0.0): 6, ('coz', 0.0): 4, ('frustrat', 0.0): 6, ('sent', 0.0): 9, ('interest', 0.0): 9, ('woza', 0.0): 1, ('promis', 0.0): 4, ('senight', 0.0): 1, ('468', 0.0): 1, ('kikmeboy', 0.0): 9, ('gay', 0.0): 6, ('teen', 0.0): 7, ('amateur', 0.0): 5, ('hotscratch', 0.0): 1, ('sell', 0.0): 8, ('sock', 0.0): 6, ('150-160', 0.0): 1, ('peso', 0.0): 1, ('gotta', 0.0): 8, ('pay', 0.0): 8, ('degrassi', 0.0): 1, ('4-6', 0.0): 1, ('bcz', 0.0): 1, ('kat', 0.0): 3, ('chem', 0.0): 2, ('onscreen', 0.0): 1, ('ofscreen', 0.0): 1, ('kinda', 0.0): 10, ('pak', 0.0): 4, ('class', 0.0): 10, ('monthli', 0.0): 1, ('roll', 0.0): 4, ('band', 0.0): 2, ('throw', 0.0): 2, ('ironi', 0.0): 2, ('rhisfor', 0.0): 1, ('500', 0.0): 2, ('bestoftheday', 0.0): 3, ('chat', 0.0): 9, ('camsex', 0.0): 5, ('unfollow', 0.0): 11, ('particular', 0.0): 1, ('support', 0.0): 26, ('bae', 0.0): 11, ('poopi', 0.0): 1, ('pip', 0.0): 1, ('post', 0.0): 12, ('felt', 0.0): 6, ('uff', 0.0): 1, ('1.300', 0.0): 1, ('credit', 0.0): 3, ('glue', 0.0): 1, ('factori', 0.0): 1, ('kuchar', 0.0): 1, ('fast', 0.0): 7, ('graduat', 0.0): 3, ('up', 0.0): 2, ('definit', 0.0): 3, ('uni', 0.0): 2, ('ee', 0.0): 1, ('tommi', 0.0): 1, ('georgia', 0.0): 2, ('bout', 0.0): 2, ('instant', 0.0): 1, ('transmiss', 0.0): 1, ('malik', 0.0): 1, ('orang', 0.0): 2, ('suma', 0.0): 1, ('shouldeeerr', 0.0): 1, ('outfit', 0.0): 5, ('age', 0.0): 8, ('repack', 0.0): 3, ('group', 0.0): 4, ('charl', 0.0): 1, ('grown', 0.0): 2, ('rememb', 0.0): 17, ('dy', 0.0): 1, ('rihanna', 0.0): 1, ('red', 0.0): 4, ('ging', 0.0): 2, ('boot', 0.0): 4, ('closest', 0.0): 3, ('nike', 0.0): 1, ('adida', 0.0): 1, ('inform', 0.0): 4, ('[email protected]', 0.0): 1, ('set', 0.0): 13, ('ifeely', 0.0): 1, ('harder', 0.0): 2, ('usual', 0.0): 7, ('ratbaglat', 0.0): 1, ('second', 0.0): 5, ('semest', 0.0): 2, ('gin', 0.0): 1, ('gut', 0.0): 12, ('reynold', 0.0): 1, ('dessert', 0.0): 2, ('season', 0.0): 9, ('villag', 0.0): 1, ('differ', 0.0): 10, ('citi', 0.0): 11, ('unit', 0.0): 3, ('oppress', 0.0): 1, ('mass', 0.0): 2, ('wat', 0.0): 5, ('afghanistn', 0.0): 1, ('war', 0.0): 2, ('tore', 0.0): 1, ('sunggyu', 0.0): 5, ('injur', 0.0): 7, ('plaster', 0.0): 2, ('rtd', 0.0): 1, ('loui', 0.0): 4, ('harri', 0.0): 10, ('5so', 0.0): 7, ('crowd', 0.0): 1, ('stadium', 0.0): 4, ('welder', 0.0): 1, ('ghost', 0.0): 1, ('hogo', 0.0): 1, ('vishaya', 0.0): 1, ('adu', 0.0): 1, ('bjp', 0.0): 1, ('madatt', 0.0): 1, ('anta', 0.0): 1, ('vishwa', 0.0): 1, ('ne', 0.0): 3, ('illa', 0.0): 1, ('wua', 0.0): 1, ('picki', 0.0): 1, ('finger', 0.0): 8, ('favourit', 0.0): 9, ('mutual', 0.0): 2, ('gn', 0.0): 1, ('along', 0.0): 3, ('ass', 0.0): 9, ('thent', 0.0): 1, ('423', 0.0): 1, ('sabadodeganarseguidor', 0.0): 2, ('sexual', 0.0): 4, ('sync', 0.0): 2, ('plug.dj', 0.0): 1, ('peel', 0.0): 1, ('suspems', 0.0): 1, ('cope', 0.0): 3, ('offroad', 0.0): 1, ('adventur', 0.0): 1, ('there', 0.0): 5, ('harvest', 0.0): 1, ('machineri', 0.0): 1, ('inapropri', 0.0): 1, ('weav', 0.0): 2, ('nowher', 0.0): 3, ('decent', 0.0): 2, ('invest', 0.0): 2, ('scottish', 0.0): 1, ('footbal', 0.0): 3, ('dire', 0.0): 2, ('nomoney', 0.0): 1, ('nawf', 0.0): 1, ('sum', 0.0): 2, ('becho', 0.0): 1, ('danni', 0.0): 3, ('eng', 0.0): 2, ("let'", 0.0): 5, ('overli', 0.0): 2, ('lab', 0.0): 1, ('ty', 0.0): 3, ('zap', 0.0): 1, ('distress', 0.0): 1, ('shot', 0.0): 6, ('cinema', 0.0): 4, ('louisianashoot', 0.0): 1, ('laugh', 0.0): 7, ('har', 0.0): 3, ("how'", 0.0): 5, ('chum', 0.0): 1, ('ncc', 0.0): 1, ('ph', 0.0): 2, ('balik', 0.0): 1, ('naman', 0.0): 1, ('kayo', 0.0): 1, ('itong', 0.0): 1, ('shirt', 0.0): 3, ('thaaat', 0.0): 1, ('ctto', 0.0): 1, ('expir', 0.0): 3, ('bi', 0.0): 2, ('tough', 0.0): 2, ('11', 0.0): 4, ('3:33', 0.0): 2, ('jfc', 0.0): 1, ('bio', 0.0): 3, ('bodo', 0.0): 1, ('amat', 0.0): 1, ('quick', 0.0): 5, ('yelaaa', 0.0): 1, ('dublin', 0.0): 2, ('potter', 0.0): 1, ('marathon', 0.0): 3, ('balanc', 0.0): 2, ('warm', 0.0): 5, ('comic', 0.0): 5, ('pine', 0.0): 1, ('keybind', 0.0): 1, ('featur', 0.0): 4, ('wild', 0.0): 2, ('warfar', 0.0): 1, ('control', 0.0): 2, ('diagnos', 0.0): 1, ('wiv', 0.0): 1, ("scheuermann'", 0.0): 1, ('diseas', 0.0): 3, ('bone', 0.0): 1, ('rlyhurt', 0.0): 1, ('howdo', 0.0): 1, ('georgesampson', 0.0): 1, ('stand', 0.0): 6, ('signal', 0.0): 3, ('reckon', 0.0): 1, ('t20', 0.0): 1, ('action', 0.0): 2, ('taunton', 0.0): 1, ('vacat', 0.0): 3, ('excit', 0.0): 6, ('justiceforsandrabland', 0.0): 2, ('sandrabland', 0.0): 6, ('disturb', 0.0): 1, ('women', 0.0): 5, ('happpi', 0.0): 1, ('justinbieb', 0.0): 4, ('daianerufato', 0.0): 3, ('ilysm', 0.0): 3, ('2015', 0.0): 12, ('07:34', 0.0): 1, ('delphi', 0.0): 2, ('weak', 0.0): 2, ('dom', 0.0): 2, ('techniqu', 0.0): 1, ('minc', 0.0): 2, ('complet', 0.0): 9, ('symphoni', 0.0): 1, ('joe', 0.0): 3, ('co', 0.0): 6, ('wth', 0.0): 2, ('aisyhhh', 0.0): 1, ('bald', 0.0): 1, ('14', 0.0): 3, ('seungchan', 0.0): 1, ('aigooo', 0.0): 1, ('riri', 0.0): 1, ('origin', 0.0): 6, ('depend', 0.0): 2, ('vet', 0.0): 1, ('major', 0.0): 2, ('va', 0.0): 1, ('kept', 0.0): 2, ('lumin', 0.0): 1, ('follback', 0.0): 2, ('treat', 0.0): 5, ('v', 0.0): 6, ('product', 0.0): 4, ('letter', 0.0): 1, ('z', 0.0): 5, ('uniqu', 0.0): 2, ('refresh', 0.0): 1, ('popular', 0.0): 1, ('bebee', 0.0): 2, ('lt', 0.0): 1, ('inaccuraci', 0.0): 1, ('inaccur', 0.0): 1, ('worri', 0.0): 8, ('burn', 0.0): 4, ('rn', 0.0): 17, ('tragic', 0.0): 1, ('joy', 0.0): 2, ('sam', 0.0): 4, ('rush', 0.0): 2, ('toronto', 0.0): 1, ('stuart', 0.0): 1, ("party'", 0.0): 2, ('iyalaya', 0.0): 1, ('shade', 0.0): 3, ('round', 0.0): 3, ('clock', 0.0): 2, (';(', 0.0): 6, ('happier', 0.0): 1, ('h', 0.0): 8, ('ubusi', 0.0): 1, ('le', 0.0): 3, ('fifa', 0.0): 1, ('gymnast', 0.0): 1, ('aahhh', 0.0): 1, ('noggin', 0.0): 1, ('bump', 0.0): 1, ('feelslikeanidiot', 0.0): 1, ('pregnant', 0.0): 2, ('woman', 0.0): 5, ('dearli', 0.0): 1, ('sunshin', 0.0): 4, ('suk', 0.0): 2, ('pumpkin', 0.0): 1, ('scone', 0.0): 1, ('outnumb', 0.0): 1, ('vidcon', 0.0): 10, ('eri', 0.0): 1, ('geez', 0.0): 1, ('preciou', 0.0): 4, ('hive', 0.0): 1, ('vote', 0.0): 7, ('vietnam', 0.0): 1, ('decemb', 0.0): 2, ('dunt', 0.0): 1, ('ikr', 0.0): 3, ('sob', 0.0): 3, ('buff', 0.0): 1, ('leg', 0.0): 4, ('toni', 0.0): 1, ('deactiv', 0.0): 6, ('bra', 0.0): 2, ("shady'", 0.0): 1, ('isibaya', 0.0): 1, ('special', 0.0): 3, ('❤', 0.0): 21, ('️', 0.0): 19, ('😓', 0.0): 2, ('slept', 0.0): 5, ('colder', 0.0): 1, ('took', 0.0): 9, ('med', 0.0): 1, ('sausag', 0.0): 1, ('adio', 0.0): 1, ('cold', 0.0): 15, ('sore', 0.0): 9, ('ew', 0.0): 3, ('h8', 0.0): 1, ('messeng', 0.0): 2, ('shittier', 0.0): 1, ('leno', 0.0): 1, ('ident', 0.0): 1, ('crisi', 0.0): 2, ('roommat', 0.0): 1, ('knock', 0.0): 3, ('nighter', 0.0): 3, ('bird', 0.0): 2, ('flew', 0.0): 2, ('thru', 0.0): 2, ('derek', 0.0): 3, ('tour', 0.0): 7, ('wetherspoon', 0.0): 1, ('pub', 0.0): 1, ('polic', 0.0): 4, ('frank', 0.0): 2, ('ocean', 0.0): 4, ('releas', 0.0): 8, ('ff', 0.0): 4, ('lisah', 0.0): 2, ('kikm', 0.0): 8, ('eboni', 0.0): 2, ('weloveyounamjoon', 0.0): 1, ('gave', 0.0): 8, ('dress', 0.0): 6, ('polka', 0.0): 1, ('dot', 0.0): 2, ('ndi', 0.0): 1, ('yum', 0.0): 1, ('feed', 0.0): 3, ('leftov', 0.0): 2, ('side', 0.0): 6, ('cs', 0.0): 2, ('own', 0.0): 1, ('walnut', 0.0): 1, ('whip', 0.0): 1, ('wife', 0.0): 6, ('boah', 0.0): 1, ('madi', 0.0): 2, ('def', 0.0): 3, ('manga', 0.0): 1, ('giant', 0.0): 3, ('aminormalyet', 0.0): 1, ('cooki', 0.0): 2, ('breakfast', 0.0): 5, ('clutch', 0.0): 1, ('poorli', 0.0): 6, ('tummi', 0.0): 6, ('pj', 0.0): 1, ('groan', 0.0): 1, ('nou', 0.0): 1, ('adam', 0.0): 2, ('ken', 0.0): 1, ('sara', 0.0): 2, ('sister', 0.0): 4, ('accid', 0.0): 2, ('sort', 0.0): 7, ('mate', 0.0): 2, ('pick', 0.0): 12, ('rang', 0.0): 4, ('fk', 0.0): 2, ('freak', 0.0): 5, ('describ', 0.0): 1, ('eric', 0.0): 2, ('prydz', 0.0): 1, ('sister-in-law', 0.0): 1, ('instal', 0.0): 2, ('seat', 0.0): 4, ('bought', 0.0): 6, ('rear-end', 0.0): 1, ("everyone'", 0.0): 4, ('trash', 0.0): 2, ('boob', 0.0): 3, ('whilst', 0.0): 3, ('stair', 0.0): 1, ('childhood', 0.0): 1, ('toothsensit', 0.0): 4, ('size', 0.0): 9, ('ke', 0.0): 3, ('shem', 0.0): 2, ('trust', 0.0): 2, ('awel', 0.0): 1, ('drunk', 0.0): 2, ('weekendofmad', 0.0): 1, ('🍹', 0.0): 3, ('🍸', 0.0): 1, ('cb', 0.0): 1, ('dancer', 0.0): 1, ('choregraph', 0.0): 1, ('626-430-8715', 0.0): 1, ('messag', 0.0): 8, ('repli', 0.0): 14, ('hoe', 0.0): 1, ('xd', 0.0): 7, ('xiu', 0.0): 1, ('nk', 0.0): 1, ('gi', 0.0): 2, ('uss', 0.0): 1, ('eliss', 0.0): 1, ('ksoo', 0.0): 2, ('session', 0.0): 5, ('tat', 0.0): 1, ('bcoz', 0.0): 1, ('bet', 0.0): 10, ('rancho', 0.0): 1, ('imperi', 0.0): 1, ('de', 0.0): 1, ('silang', 0.0): 1, ('subdivis', 0.0): 1, ('center', 0.0): 1, ('39', 0.0): 1, ('cornwal', 0.0): 1, ('verit', 0.0): 1, ('prize', 0.0): 2, ('regular', 0.0): 3, ('workout', 0.0): 1, ('spin', 0.0): 1, ('base', 0.0): 1, ('upon', 0.0): 1, ('penni', 0.0): 1, ('ebook', 0.0): 1, ('фотосет', 0.0): 1, ('addicted-to-analsex', 0.0): 1, ('sweetbj', 0.0): 2, ('blowjob', 0.0): 1, ('mhhh', 0.0): 1, ('sed', 0.0): 1, ('sg', 0.0): 1, ('dinner', 0.0): 4, ('bless', 0.0): 2, ('mee', 0.0): 2, ('enviou', 0.0): 1, ('eonni', 0.0): 1, ('lovey', 0.0): 1, ('dovey', 0.0): 1, ('dongsaeng', 0.0): 1, ('workin', 0.0): 1, ('tuesday', 0.0): 4, ('schade', 0.0): 3, ('belfast', 0.0): 1, ('jealou', 0.0): 9, ('jacob', 0.0): 5, ('isco', 0.0): 4, ('peni', 0.0): 1, ('everi', 0.0): 16, ('convers', 0.0): 6, ('wonder', 0.0): 11, ('soul', 0.0): 5, ('nation', 0.0): 2, ('louisiana', 0.0): 4, ('lafayett', 0.0): 2, ('matteroftheheart', 0.0): 1, ('waduh', 0.0): 1, ('pant', 0.0): 3, ('suspend', 0.0): 2, ('believ', 0.0): 14, ('teenag', 0.0): 2, ('clich', 0.0): 1, ('youuu', 0.0): 5, ('rma', 0.0): 1, ('jersey', 0.0): 2, ('fake', 0.0): 4, ('jaclintil', 0.0): 1, ('model', 0.0): 9, ('likeforlik', 0.0): 7, ('mpoint', 0.0): 4, ('hotfmnoaidilforariana', 0.0): 2, ('ran', 0.0): 5, ('fuckkk', 0.0): 1, ('jump', 0.0): 3, ('justin', 0.0): 3, ('finish', 0.0): 14, ('sanum', 0.0): 1, ('llaollao', 0.0): 1, ('foood', 0.0): 1, ('ubericecream', 0.0): 14, ('glare', 0.0): 1, ('vine', 0.0): 3, ('tweetin', 0.0): 1, ('mood', 0.0): 3, ('elbow', 0.0): 1, ('choreo', 0.0): 1, ('offens', 0.0): 2, ('yeyi', 0.0): 1, ('hd', 0.0): 2, ('brow', 0.0): 1, ('kit', 0.0): 6, ('slightli', 0.0): 2, ('monday', 0.0): 10, ('sux', 0.0): 1, ('enjoy', 0.0): 9, ('nothaveld', 0.0): 1, ('765', 0.0): 1, ('edm', 0.0): 1, ('likeforfollow', 0.0): 3, ('hannib', 0.0): 3, ('mosquito', 0.0): 2, ('bite', 0.0): 5, ('kinki', 0.0): 1, ('hsould', 0.0): 1, ('justget', 0.0): 1, ('marri', 0.0): 2, ('la', 0.0): 11, ('shuffl', 0.0): 4, ('int', 0.0): 1, ('buckl', 0.0): 1, ('spring', 0.0): 1, ('millz', 0.0): 1, ('aski', 0.0): 2, ('awusasho', 0.0): 1, ('unlucki', 0.0): 2, ('driver', 0.0): 7, ('briefli', 0.0): 1, ('spot', 0.0): 4, ('144p', 0.0): 1, ('brook', 0.0): 1, ('crack', 0.0): 2, ('@', 0.0): 5, ('maverickgam', 0.0): 4, ('07:32', 0.0): 1, ('07:25', 0.0): 1, ('max', 0.0): 3, ('file', 0.0): 2, ('extern', 0.0): 2, ('sd', 0.0): 1, ('via', 0.0): 1, ('airdroid', 0.0): 1, ('android', 0.0): 2, ('4.4+', 0.0): 1, ('googl', 0.0): 5, ('alright', 0.0): 3, ('cramp', 0.0): 2, ('unstan', 0.0): 1, ('tay', 0.0): 2, ('ngeze', 0.0): 1, ('cocktaili', 0.0): 1, ('classi', 0.0): 1, ('07:24', 0.0): 1, ('✈', 0.0): 2, ('️2', 0.0): 1, ('raini', 0.0): 2, ('☔', 0.0): 2, ('peter', 0.0): 1, ('pen', 0.0): 1, ('spare', 0.0): 1, ('guest', 0.0): 2, ('barcelona', 0.0): 2, ('bilbao', 0.0): 1, ('booti', 0.0): 2, ('sharyl', 0.0): 1, ('shane', 0.0): 2, ('ta', 0.0): 1, ('giddi', 0.0): 1, ('d1', 0.0): 1, ('zipper', 0.0): 1, ('beyond', 0.0): 1, ('repair', 0.0): 4, ('iphon', 0.0): 5, ('upgrad', 0.0): 1, ('april', 0.0): 1, ('2016', 0.0): 1, ('cont', 0.0): 2, ('england', 0.0): 4, ('wore', 0.0): 2, ('greet', 0.0): 5, ('tempt', 0.0): 2, ('whole', 0.0): 16, ('pack', 0.0): 6, ('oreo', 0.0): 2, ('strength', 0.0): 1, ('wifi', 0.0): 5, ('network', 0.0): 4, ('within', 0.0): 3, ('lolipop', 0.0): 1, ('kebab', 0.0): 1, ('klappertart', 0.0): 1, ('cake', 0.0): 10, ('moodbost', 0.0): 2, ('shoot', 0.0): 6, ('unprepar', 0.0): 1, ('sri', 0.0): 1, ('dresscod', 0.0): 1, ('door', 0.0): 6, ('iam', 0.0): 2, ('dnt', 0.0): 1, ('stab', 0.0): 3, ('meh', 0.0): 3, ('wrocilam', 0.0): 1, ('otp', 0.0): 3, ('5', 0.0): 14, ('looww', 0.0): 1, ('recov', 0.0): 2, ('wayn', 0.0): 2, ('insur', 0.0): 3, ('loss', 0.0): 3, ('stolen', 0.0): 2, ('accident', 0.0): 1, ('damag', 0.0): 5, ('devic', 0.0): 3, ('warranti', 0.0): 1, ('centr', 0.0): 2, ('👌', 0.0): 1, ('lmfaoo', 0.0): 1, ('accur', 0.0): 2, ('fra', 0.0): 4, ('aliv', 0.0): 2, ('steel', 0.0): 2, ('otamendi', 0.0): 1, ('ny', 0.0): 2, ('🚖', 0.0): 1, ('🗽', 0.0): 1, ('🌃', 0.0): 1, ('stealth', 0.0): 2, ('bastard', 0.0): 2, ('inc', 0.0): 3, ('steam', 0.0): 2, ('therapi', 0.0): 1, ('exhaust', 0.0): 3, ('lie', 0.0): 7, ('total', 0.0): 11, ('block', 0.0): 11, ('choic', 0.0): 5, ('switzerland', 0.0): 1, ('kfc', 0.0): 1, ('common', 0.0): 4, ('th', 0.0): 5, ('wolrd', 0.0): 1, ('fyn', 0.0): 1, ('drop', 0.0): 10, ('state', 0.0): 4, ('3g', 0.0): 2, ('christ', 0.0): 1, ('scale', 0.0): 1, ('deck', 0.0): 1, ('chair', 0.0): 4, ('yk', 0.0): 1, ('resi', 0.0): 1, ('memori', 0.0): 5, ('nude', 0.0): 4, ('bruh', 0.0): 3, ('prepar', 0.0): 3, ('lock', 0.0): 2, ('view', 0.0): 7, ('fbc', 0.0): 3, ('mork', 0.0): 1, ('873', 0.0): 1, ('kikgirl', 0.0): 13, ('premiostumundo', 0.0): 2, ('hotspotwithdanri', 0.0): 1, ('hospit', 0.0): 3, ('food', 0.0): 18, ('sone', 0.0): 1, ('produc', 0.0): 1, ('potag', 0.0): 1, ('tomato', 0.0): 1, ('blight', 0.0): 1, ('sheffield', 0.0): 1, ('mych', 0.0): 1, ('shiiit', 0.0): 2, ('screenshot', 0.0): 4, ('prompt', 0.0): 1, ('areadi', 0.0): 1, ('similar', 0.0): 4, ('soulmat', 0.0): 1, ('canon', 0.0): 1, ('zzz', 0.0): 2, ('britain', 0.0): 1, ('😁', 0.0): 3, ('mana', 0.0): 2, ('hw', 0.0): 1, ('jouch', 0.0): 1, ('por', 0.0): 1, ('que', 0.0): 1, ('liceooo', 0.0): 1, ('30', 0.0): 3, ('minut', 0.0): 6, ('pass', 0.0): 13, ('ayala', 0.0): 1, ('tunnel', 0.0): 2, ('thatscold', 0.0): 1, ('80', 0.0): 1, ('snap', 0.0): 3, ('lourd', 0.0): 1, ('bang', 0.0): 3, ('anywher', 0.0): 4, ('water', 0.0): 8, ('road', 0.0): 1, ('showbox', 0.0): 1, ('naruto', 0.0): 1, ('cartoon', 0.0): 1, ('companion', 0.0): 2, ('skinni', 0.0): 3, ('fat', 0.0): 4, ('bare', 0.0): 6, ('dubai', 0.0): 3, ('calum', 0.0): 1, ('ashton', 0.0): 1, ('✧', 0.0): 8, ('。', 0.0): 8, ('chelni', 0.0): 4, ('disappoint', 0.0): 13, ('everybodi', 0.0): 5, ('due', 0.0): 14, ('laribuggi', 0.0): 1, ('medic', 0.0): 1, ('nutella', 0.0): 1, ("could'v", 0.0): 3, ('siriu', 0.0): 1, ('goat', 0.0): 4, ('frudg', 0.0): 1, ('mike', 0.0): 1, ('cloth', 0.0): 6, ('stuff', 0.0): 11, ('sat', 0.0): 3, ('number', 0.0): 6, ('ring', 0.0): 1, ('bbz', 0.0): 1, ('angek', 0.0): 1, ('sbali', 0.0): 1, ('euuuwww', 0.0): 2, ('lunch', 0.0): 10, ('construct', 0.0): 3, ('worker', 0.0): 3, ('1k', 0.0): 3, ('style', 0.0): 4, ('nell', 0.0): 1, ('ik', 0.0): 2, ('death', 0.0): 3, ('jaysu', 0.0): 1, ('toast', 0.0): 1, ('insecur', 0.0): 2, ('buti', 0.0): 1, ('ure', 0.0): 2, ('poop', 0.0): 1, ('gorgeou', 0.0): 2, ('angel', 0.0): 2, ('rome', 0.0): 1, ('throat', 0.0): 10, ('llama', 0.0): 1, ('urself', 0.0): 2, ('getwellsoonamb', 0.0): 1, ('heath', 0.0): 2, ('ledger', 0.0): 1, ('appl', 0.0): 3, ('permiss', 0.0): 2, ('2-0', 0.0): 1, ('lead', 0.0): 3, ('supersport', 0.0): 1, ('milkshak', 0.0): 1, ('witcher', 0.0): 1, ('papertown', 0.0): 1, ('bale', 0.0): 1, ('9', 0.0): 5, ('méxico', 0.0): 1, ('bahay', 0.0): 1, ('bahayan', 0.0): 1, ('magisa', 0.0): 1, ('sadlyf', 0.0): 1, ('bunso', 0.0): 1, ('sleeep', 0.0): 4, ('astonvilla', 0.0): 1, ('berigaud', 0.0): 1, ('bakar', 0.0): 1, ('club', 0.0): 4, ('dear', 0.0): 11, ('allerg', 0.0): 4, ('depress', 0.0): 5, ("blaine'", 0.0): 1, ('acoust', 0.0): 2, ('version', 0.0): 5, ('excus', 0.0): 3, ('hernia', 0.0): 3, ('toxin', 0.0): 1, ('freedom', 0.0): 1, ('organ', 0.0): 2, ('ariel', 0.0): 1, ('slap', 0.0): 1, ('slam', 0.0): 1, ('bee', 0.0): 1, ('unknown', 0.0): 2, ('finddjderek', 0.0): 1, ('smell', 0.0): 3, ('uuughhh', 0.0): 1, ('grabe', 0.0): 5, ('ka', 0.0): 5, ('where', 0.0): 1, ('gf', 0.0): 3, ('james_yammouni', 0.0): 1, ('smi', 0.0): 1, ('nemesi', 0.0): 1, ('rule', 0.0): 1, ('doesnt', 0.0): 2, ('appeal', 0.0): 1, ('neeein', 0.0): 1, ('saaad', 0.0): 3, ('less', 0.0): 3, ('hang', 0.0): 7, ('creas', 0.0): 1, ('tan', 0.0): 3, ('dalla', 0.0): 4, ('suppos', 0.0): 7, ('infront', 0.0): 2, ('beato', 0.0): 1, ('tim', 0.0): 2, ('prob', 0.0): 5, ('minha', 0.0): 1, ('deleici', 0.0): 1, ('hr', 0.0): 2, ('pcb', 0.0): 1, ('ep', 0.0): 5, ('peregrin', 0.0): 1, ('8.40', 0.0): 1, ('pigeon', 0.0): 1, ('feet', 0.0): 3, ('tram', 0.0): 1, ('hav', 0.0): 2, ('spent', 0.0): 5, ('outsid', 0.0): 9, ('apt', 0.0): 1, ('build', 0.0): 3, ('key', 0.0): 3, ('bldg', 0.0): 1, ('wrote', 0.0): 3, ('dark', 0.0): 5, ('swan', 0.0): 1, ('fifth', 0.0): 2, ('mmmm', 0.0): 1, ('avi', 0.0): 4, ('nicki', 0.0): 1, ('fucjikg', 0.0): 1, ('disgust', 0.0): 6, ('buynotanapologyonitun', 0.0): 1, ('aval', 0.0): 1, ('denmark', 0.0): 1, ('nw', 0.0): 2, ('sch', 0.0): 2, ('share', 0.0): 11, ('jeslyn', 0.0): 1, ('72', 0.0): 4, ('root', 0.0): 2, ('kuch', 0.0): 1, ('nahi', 0.0): 1, ('hua', 0.0): 2, ('newbi', 0.0): 1, ('crap', 0.0): 3, ('miracl', 0.0): 1, ('4th', 0.0): 1, ('linda', 0.0): 1, ('click', 0.0): 1, ('pin', 0.0): 2, ('wing', 0.0): 3, ('epic', 0.0): 2, ('page', 0.0): 6, ('ang', 0.0): 8, ('ganda', 0.0): 1, ('💗', 0.0): 4, ('nux', 0.0): 1, ('hinanap', 0.0): 1, ('ako', 0.0): 1, ('uy', 0.0): 1, ('sched', 0.0): 1, ('anyar', 0.0): 1, ('entertain', 0.0): 2, ('typa', 0.0): 3, ('buddi', 0.0): 2, ('transpar', 0.0): 1, ('photoshop', 0.0): 2, ('planner', 0.0): 1, ('helppp', 0.0): 2, ('wearig', 0.0): 1, ('dri', 0.0): 2, ('alot', 0.0): 3, ('bu', 0.0): 5, ('prey', 0.0): 1, ('gross', 0.0): 5, ('drain', 0.0): 3, ('ausfailia', 0.0): 1, ('snow', 0.0): 3, ('footi', 0.0): 3, ('2nd', 0.0): 5, ('row', 0.0): 3, ("m'", 0.0): 2, ('kitkat', 0.0): 2, ('bday', 0.0): 7, ('😢', 0.0): 8, ('suger', 0.0): 1, ('olivia', 0.0): 2, ('audit', 0.0): 1, ('american', 0.0): 1, ('idol', 0.0): 2, ('injuri', 0.0): 2, ('appendix', 0.0): 1, ('burst', 0.0): 2, ('append', 0.0): 1, ('yeahh', 0.0): 2, ('fack', 0.0): 2, ('nhl', 0.0): 1, ('khami', 0.0): 2, ('favorit', 0.0): 4, ('rise', 0.0): 3, ('reaali', 0.0): 1, ('ja', 0.0): 2, ('naomi', 0.0): 1, ('modern', 0.0): 1, ('contemporari', 0.0): 1, ('slack', 0.0): 1, ('565', 0.0): 1, ('blond', 0.0): 2, ('jahat', 0.0): 3, ('discount', 0.0): 1, ('thorp', 0.0): 2, ('park', 0.0): 7, ('esnho', 0.0): 1, ('node', 0.0): 1, ('advanc', 0.0): 4, ('directx', 0.0): 1, ('workshop', 0.0): 1, ('p2', 0.0): 1, ('upload', 0.0): 2, ('remov', 0.0): 5, ('blackberri', 0.0): 1, ('shitti', 0.0): 1, ('mobil', 0.0): 2, ('povertyyouareevil', 0.0): 1, ('struggl', 0.0): 4, ('math', 0.0): 1, ('emm', 0.0): 1, ('data', 0.0): 6, ('elgin', 0.0): 1, ('vava', 0.0): 1, ('makati', 0.0): 1, ('💛', 0.0): 4, ('baon', 0.0): 1, ('soup', 0.0): 3, ('soak', 0.0): 1, ('bread', 0.0): 2, ('mush', 0.0): 1, ("they'd", 0.0): 2, ('matt', 0.0): 2, ('ouat', 0.0): 1, ('beach', 0.0): 5, ('blinkin', 0.0): 1, ('unblock', 0.0): 1, ('headack', 0.0): 1, ('tension', 0.0): 1, ('erit', 0.0): 1, ('perspect', 0.0): 1, ('wed', 0.0): 4, ('playlist', 0.0): 2, ('endlessli', 0.0): 1, ('blush', 0.0): 1, ('bat', 0.0): 1, ('kiddo', 0.0): 1, ('rumbel', 0.0): 1, ('overwhelm', 0.0): 1, ('thrown', 0.0): 2, ('irrespons', 0.0): 1, ('pakighinabi', 0.0): 1, ('pinkfinit', 0.0): 1, ('beb', 0.0): 2, ('migrain', 0.0): 2, ('almost', 0.0): 11, ('coyot', 0.0): 1, ('outta', 0.0): 1, ('mad', 0.0): 11, ('😒', 0.0): 3, ('headach', 0.0): 9, ('인피니트', 0.0): 2, ('save', 0.0): 6, ('baechu', 0.0): 1, ('calibraskaep', 0.0): 3, ('r', 0.0): 19, ('fanci', 0.0): 2, ('yt', 0.0): 3, ('purchas', 0.0): 2, ('elgato', 0.0): 1, ('ant', 0.0): 2, ('unexpect', 0.0): 2, ('bestfriend', 0.0): 9, ('faint', 0.0): 1, ('bp', 0.0): 1, ('appar', 0.0): 5, ('shower', 0.0): 3, ('subway', 0.0): 1, ('cool', 0.0): 5, ('prayer', 0.0): 2, ('fragil', 0.0): 1, ('huge', 0.0): 3, ('gap', 0.0): 1, ('plot', 0.0): 2, ('bungi', 0.0): 1, ('folk', 0.0): 1, ('raspberri', 0.0): 1, ('pi', 0.0): 1, ('shoe', 0.0): 2, ('woohyun', 0.0): 2, ('guilti', 0.0): 1, ('monica', 0.0): 2, ('davao', 0.0): 1, ('luckyyi', 0.0): 1, ('confid', 0.0): 1, ('eunha', 0.0): 1, ('misplac', 0.0): 1, ('den', 0.0): 1, ('dae', 0.0): 1, ('bap', 0.0): 1, ('likewis', 0.0): 1, ('liam', 0.0): 1, ('dylan', 0.0): 3, ('huehu', 0.0): 1, ('rice', 0.0): 1, ('krispi', 0.0): 1, ('marshmallow', 0.0): 2, ('srsli', 0.0): 7, ('birmingham', 0.0): 1, ('m5m6junction', 0.0): 1, ('soulsurvivor', 0.0): 1, ('stafford', 0.0): 1, ('progress', 0.0): 1, ('mixtur', 0.0): 1, ("they'v", 0.0): 4, ('practic', 0.0): 1, ('lage', 0.0): 1, ('ramd', 0.0): 1, ('lesbian', 0.0): 3, ('oralsex', 0.0): 4, ('munchkin', 0.0): 1, ('juja', 0.0): 1, ('murugan', 0.0): 1, ('handl', 0.0): 3, ('dia', 0.0): 2, ('bgtau', 0.0): 1, ('harap', 0.0): 1, ('bagi', 0.0): 1, ('aminn', 0.0): 1, ('fraand', 0.0): 1, ('😬', 0.0): 2, ('bigbang', 0.0): 2, ('steak', 0.0): 1, ('younger', 0.0): 2, ('sian', 0.0): 2, ('pizza', 0.0): 7, ('5am', 0.0): 5, ('nicoleapag', 0.0): 1, ('makeup', 0.0): 4, ('hellish', 0.0): 1, ('thirstyyi', 0.0): 1, ('chesti', 0.0): 1, ('dad', 0.0): 9, ("nando'", 0.0): 1, ('22', 0.0): 3, ('bow', 0.0): 2, ('queen', 0.0): 3, ('brave', 0.0): 1, ('hen', 0.0): 1, ('leed', 0.0): 9, ('rdd', 0.0): 1, ('dissip', 0.0): 1, ('. .', 0.0): 1, ('pump', 0.0): 2, ('capee', 0.0): 1, ('japan', 0.0): 2, ('random', 0.0): 1, ('young', 0.0): 5, ('outliv', 0.0): 1, ('x-ray', 0.0): 1, ('dental', 0.0): 1, ('spine', 0.0): 1, ('relief', 0.0): 1, ('popol', 0.0): 1, ('stomach', 0.0): 8, ('frog', 0.0): 2, ('brad', 0.0): 1, ('gen.ad', 0.0): 1, ('price', 0.0): 5, ('negoti', 0.0): 3, ('huhuhuhuhu', 0.0): 1, ('bbmadeinmanila', 0.0): 1, ('findavip', 0.0): 1, ('boyirl', 0.0): 1, ('yasss', 0.0): 1, ('6th', 0.0): 1, ('june', 0.0): 3, ('lain', 0.0): 1, ('diffici', 0.0): 1, ('custom', 0.0): 1, ('internet', 0.0): 9, ('near', 0.0): 9, ('speed', 0.0): 2, ('escap', 0.0): 1, ('rapist', 0.0): 1, ('commit', 0.0): 2, ('crime', 0.0): 1, ('bachpan', 0.0): 1, ('ki', 0.0): 2, ('yaadein', 0.0): 1, ('finnair', 0.0): 1, ('heathrow', 0.0): 1, ('norwegian', 0.0): 1, (':\\', 0.0): 1, ('batteri', 0.0): 3, ('upvot', 0.0): 4, ('keeno', 0.0): 1, ('whatthefuck', 0.0): 1, ('grotti', 0.0): 1, ('attent', 0.0): 1, ('seeker', 0.0): 1, ('moral', 0.0): 1, ('fern', 0.0): 1, ('mimi', 0.0): 1, ('bali', 0.0): 1, ('she', 0.0): 4, ('pleasee', 0.0): 3, ('brb', 0.0): 1, ('lowbat', 0.0): 1, ('otwolgrandtrail', 0.0): 4, ('funk', 0.0): 1, ('wewanticecream', 0.0): 1, ('sweat', 0.0): 2, ('eugh', 0.0): 1, ('speak', 0.0): 4, ('occasion', 0.0): 1, ("izzy'", 0.0): 1, ('dorm', 0.0): 1, ('choppi', 0.0): 1, ('paul', 0.0): 1, ('switch', 0.0): 4, ("infinite'", 0.0): 2, ('5:30', 0.0): 2, ('cayton', 0.0): 1, ('bay', 0.0): 2, ('emma', 0.0): 2, ('jen', 0.0): 1, ('darcey', 0.0): 1, ('connor', 0.0): 1, ('spoke', 0.0): 1, ('nail', 0.0): 2, ('biggest', 0.0): 3, ('blue', 0.0): 5, ('bottl', 0.0): 3, ('roommateexperi', 0.0): 1, ('yup', 0.0): 4, ('avoid', 0.0): 2, ('ic', 0.0): 1, ('te', 0.0): 1, ('auto-followback', 0.0): 1, ('asian', 0.0): 2, ('puppi', 0.0): 3, ('ljp', 0.0): 1, ('1/5', 0.0): 1, ('nowday', 0.0): 1, ('attach', 0.0): 2, ('beat', 0.0): 2, ('numb', 0.0): 1, ('dentist', 0.0): 3, ('misss', 0.0): 2, ('muchhh', 0.0): 1, ('youtub', 0.0): 5, ('rid', 0.0): 3, ('tab', 0.0): 2, ('uca', 0.0): 1, ('onto', 0.0): 2, ('track', 0.0): 3, ('bigtim', 0.0): 1, ('rumor', 0.0): 3, ('warmest', 0.0): 1, ('chin', 0.0): 2, ('tickl', 0.0): 1, ('♫', 0.0): 1, ('zikra', 0.0): 1, ('lusi', 0.0): 1, ('hasya', 0.0): 1, ('nugget', 0.0): 3, ('som', 0.0): 1, ('lu', 0.0): 1, ('olymp', 0.0): 1, ("millie'", 0.0): 1, ('guinea', 0.0): 1, ('lewi', 0.0): 1, ('748292', 0.0): 1, ("we'll", 0.0): 8, ('ano', 0.0): 2, ('22stan', 0.0): 1, ('24/7', 0.0): 2, ('thankyou', 0.0): 2, ('kanina', 0.0): 2, ('breakdown', 0.0): 2, ('mag', 0.0): 2, ('hatee', 0.0): 1, ('leas', 0.0): 1, ('written', 0.0): 2, ('hurri', 0.0): 4, ('attempt', 0.0): 1, ('6g', 0.0): 1, ('unsuccess', 0.0): 1, ('earlob', 0.0): 1, ('sue', 0.0): 1, ('dreari', 0.0): 1, ('denis', 0.0): 1, ('muriel', 0.0): 1, ('ahouré', 0.0): 1, ('pr', 0.0): 1, ('brand', 0.0): 1, ('imag', 0.0): 4, ('opportun', 0.0): 1, ('po', 0.0): 1, ('beg', 0.0): 2, ("kath'd", 0.0): 1, ('respond', 0.0): 2, ('chop', 0.0): 1, ('wbu', 0.0): 1, ('yess', 0.0): 2, ('kme', 0.0): 1, ('tom', 0.0): 4, ('cram', 0.0): 1, ('–', 0.0): 1, ('curiou', 0.0): 1, ('on-board', 0.0): 1, ('announc', 0.0): 3, ('trespass', 0.0): 1, ('fr', 0.0): 3, ('clandestin', 0.0): 1, ('muller', 0.0): 1, ('obviou', 0.0): 1, ('mufc', 0.0): 1, ('colour', 0.0): 4, ('stu', 0.0): 2, ('movie', 0.0): 1, ('buddyyi', 0.0): 1, ('feelgoodfriday', 0.0): 1, ('forest', 0.0): 1, ('6:30', 0.0): 1, ('babysit', 0.0): 1, ('opix', 0.0): 1, ('805', 0.0): 1, ('pilllow', 0.0): 1, ('fool', 0.0): 1, ('brag', 0.0): 1, ('skrillah', 0.0): 1, ('drown', 0.0): 2, ('gue', 0.0): 1, ('report', 0.0): 4, ('eventu', 0.0): 1, ('north', 0.0): 1, ('west', 0.0): 2, ('kitti', 0.0): 1, ('sjkao', 0.0): 1, ('mm', 0.0): 2, ('srri', 0.0): 1, ('honma', 0.0): 1, ('yeh', 0.0): 1, ('walay', 0.0): 1, ('bhi', 0.0): 2, ('bohat', 0.0): 1, ('wailay', 0.0): 1, ('hain', 0.0): 2, ('pre-season', 0.0): 1, ('friendli', 0.0): 3, ('pe', 0.0): 3, ('itna', 0.0): 2, ('shor', 0.0): 1, ('machaya', 0.0): 1, ('mein', 0.0): 1, ('samjha', 0.0): 1, ('cup', 0.0): 3, ('note', 0.0): 2, ('😄', 0.0): 1, ('👍', 0.0): 1, ('😔', 0.0): 7, ('sirkay', 0.0): 1, ('wali', 0.0): 1, ('pyaaz', 0.0): 1, ('daal', 0.0): 2, ('onion', 0.0): 1, ('vinegar', 0.0): 1, ('cook', 0.0): 3, ('tutori', 0.0): 1, ('soho', 0.0): 1, ('wobbl', 0.0): 1, ('server', 0.0): 4, ('ciao', 0.0): 1, ('masaan', 0.0): 1, ('muv', 0.0): 1, ('beast', 0.0): 2, ('hayst', 0.0): 1, ('cr', 0.0): 1, ('hnnn', 0.0): 1, ('fluffi', 0.0): 2, ('comeback', 0.0): 3, ('korea', 0.0): 1, ('wow', 0.0): 10, ('act', 0.0): 4, ('optimis', 0.0): 1, ('soniii', 0.0): 1, ('kahaaa', 0.0): 1, ('shave', 0.0): 3, ('tryna', 0.0): 3, ('healthi', 0.0): 2, ('freez', 0.0): 3, ('fml', 0.0): 4, ('jacket', 0.0): 1, ('sleepi', 0.0): 4, ('cyber', 0.0): 1, ('bulli', 0.0): 2, ('racial', 0.0): 2, ('scari', 0.0): 6, ('hall', 0.0): 1, ('stockholm', 0.0): 1, ('loool', 0.0): 3, ('bunch', 0.0): 3, ('among', 0.0): 1, ('__', 0.0): 2, ('busier', 0.0): 1, ('onward', 0.0): 1, ('ol', 0.0): 2, ('coincid', 0.0): 1, ('imac', 0.0): 1, ('launch', 0.0): 2, ('gram', 0.0): 1, ('nearer', 0.0): 1, ('blain', 0.0): 2, ('darren', 0.0): 2, ('layout', 0.0): 3, ('fuuuck', 0.0): 2, ('jesu', 0.0): 1, ('gishwh', 0.0): 1, ('exclud', 0.0): 1, ('unless', 0.0): 4, ('c', 0.0): 7, ('angelica', 0.0): 1, ('pull', 0.0): 5, ('colleg', 0.0): 5, ('movement', 0.0): 1, ('frou', 0.0): 1, ('vaccin', 0.0): 1, ('armor', 0.0): 2, ('legendari', 0.0): 1, ('cash', 0.0): 2, ('effort', 0.0): 2, ('nat', 0.0): 2, ('brake', 0.0): 1, ('grumpi', 0.0): 4, ('wreck', 0.0): 1, ('decis', 0.0): 2, ('gahhh', 0.0): 1, ('teribl', 0.0): 1, ('kilig', 0.0): 1, ('togeth', 0.0): 7, ('weaker', 0.0): 1, ('shravan', 0.0): 1, ('tv', 0.0): 4, ('stooop', 0.0): 1, ('gi-guilti', 0.0): 1, ('akooo', 0.0): 1, ('imveryverysorri', 0.0): 1, ('cd', 0.0): 1, ('grey', 0.0): 3, ('basenam', 0.0): 1, ('path', 0.0): 1, ('theme', 0.0): 2, ('cigar', 0.0): 1, ('speaker', 0.0): 1, ('volum', 0.0): 1, ('promethazin', 0.0): 1, ('zopiclon', 0.0): 1, ('addit', 0.0): 1, ('quetiapin', 0.0): 1, ('modifi', 0.0): 1, ('prescript', 0.0): 1, ('greska', 0.0): 1, ('macedonian', 0.0): 1, ('slovak', 0.0): 1, ('hike', 0.0): 1, ('certainli', 0.0): 2, ('browser', 0.0): 2, ('os', 0.0): 1, ('zokay', 0.0): 1, ('accent', 0.0): 1, ('b-but', 0.0): 1, ('gintama', 0.0): 1, ('shinsengumi', 0.0): 1, ('chapter', 0.0): 1, ('andi', 0.0): 1, ('crappl', 0.0): 1, ('agre', 0.0): 5, ('ftw', 0.0): 2, ('phandroid', 0.0): 1, ('tline', 0.0): 1, ('orchestra', 0.0): 1, ('ppl', 0.0): 5, ('rehears', 0.0): 1, ('bittersweet', 0.0): 1, ('eunji', 0.0): 1, ('bakit', 0.0): 4, ('121st', 0.0): 1, ("yesterday'", 0.0): 1, ('rt', 0.0): 8, ('ehdar', 0.0): 1, ('pegea', 0.0): 1, ('panga', 0.0): 1, ('dosto', 0.0): 1, ('nd', 0.0): 1, ('real_liam_payn', 0.0): 1, ('retweet', 0.0): 5, ('3/10', 0.0): 1, ('dmed', 0.0): 1, ('ad', 0.0): 1, ('yay', 0.0): 3, ('23', 0.0): 2, ('alreaddyyi', 0.0): 1, ('luceleva', 0.0): 1, ('21', 0.0): 1, ('porno', 0.0): 3, ('countrymus', 0.0): 4, ('sexysasunday', 0.0): 2, ('naeun', 0.0): 1, ('goal', 0.0): 5, ("son'", 0.0): 1, ('kidney', 0.0): 2, ('printer', 0.0): 1, ('ink', 0.0): 2, ('asham', 0.0): 3, ('ihatesomepeopl', 0.0): 1, ('tabl', 0.0): 2, ('0-2', 0.0): 1, ('brain', 0.0): 2, ('hard-wir', 0.0): 1, ('canadian', 0.0): 1, ('acn', 0.0): 2, ('gulo', 0.0): 1, ('kandekj', 0.0): 1, ('rize', 0.0): 1, ('meydan', 0.0): 1, ('experienc', 0.0): 2, ('fcking', 0.0): 1, ('crei', 0.0): 1, ('stabl', 0.0): 1, ('dormmat', 0.0): 1, ('pre', 0.0): 3, ('bo3', 0.0): 1, ('cod', 0.0): 2, ('redeem', 0.0): 1, ('invalid', 0.0): 1, ('wag', 0.0): 1, ('hopia', 0.0): 1, ('campaign', 0.0): 2, ('editor', 0.0): 1, ('reveal', 0.0): 2, ('booo', 0.0): 2, ('extens', 0.0): 1, ('rightnow', 0.0): 1, ('btu', 0.0): 1, ('karaok', 0.0): 1, ('licenc', 0.0): 1, ('apb', 0.0): 2, ('mbf', 0.0): 1, ('kpop', 0.0): 2, ('hahahaokay', 0.0): 1, ('basara', 0.0): 1, ('capcom', 0.0): 3, ('pc', 0.0): 2, ('url', 0.0): 2, ('web', 0.0): 2, ('site', 0.0): 6, ('design', 0.0): 3, ('grumbl', 0.0): 2, ('migrant', 0.0): 1, ('daddi', 0.0): 4, ('legit', 0.0): 1, ('australia', 0.0): 3, ('awsm', 0.0): 1, ('entir', 0.0): 5, ('tmw', 0.0): 1, ('uwu', 0.0): 1, ('jinki', 0.0): 1, ('taem', 0.0): 1, ('gif', 0.0): 2, ('cambridg', 0.0): 1, ('viath', 0.0): 1, ('brilliant', 0.0): 1, ('cypru', 0.0): 1, ('wet', 0.0): 10, ('30th', 0.0): 1, ('zayncomebackto', 0.0): 2, ('1d', 0.0): 6, ('senior', 0.0): 2, ('spazz', 0.0): 1, ('soobin', 0.0): 1, ('27', 0.0): 1, ('unmarri', 0.0): 1, ('float', 0.0): 3, ('pressur', 0.0): 3, ('winter', 0.0): 4, ('lifetim', 0.0): 2, ('hiondsh', 0.0): 1, ('58543', 0.0): 1, ('kikmenow', 0.0): 9, ('sexdat', 0.0): 2, ("demi'", 0.0): 1, ('junjou', 0.0): 2, ('romantica', 0.0): 1, ('cruel', 0.0): 1, ('privileg', 0.0): 2, ('mixtap', 0.0): 2, ('convinc', 0.0): 3, ('friex', 0.0): 1, ('taco', 0.0): 2, ('europ', 0.0): 2, ('shaylan', 0.0): 1, ('4:20', 0.0): 1, ('ylona', 0.0): 1, ('nah', 0.0): 4, ('notanapolog', 0.0): 3, ('ouh', 0.0): 1, ('tax', 0.0): 4, ('ohhh', 0.0): 2, ('nm', 0.0): 1, ('term', 0.0): 1, ('apolog', 0.0): 3, ('encanta', 0.0): 1, ('vale', 0.0): 1, ('osea', 0.0): 1, ('bea', 0.0): 1, ('♛', 0.0): 210, ('》', 0.0): 210, ('beli̇ev', 0.0): 35, ('wi̇ll', 0.0): 35, ('justi̇n', 0.0): 35, ('x15', 0.0): 35, ('350', 0.0): 4, ('see', 0.0): 35, ('me', 0.0): 35, ('40', 0.0): 3, ('dj', 0.0): 2, ('net', 0.0): 2, ('349', 0.0): 1, ('baek', 0.0): 1, ('tight', 0.0): 1, ('dunwan', 0.0): 1, ('suan', 0.0): 1, ('ba', 0.0): 3, ('haiz', 0.0): 1, ('otw', 0.0): 1, ('trade', 0.0): 3, ('venic', 0.0): 1, ('348', 0.0): 1, ('strong', 0.0): 6, ('adult', 0.0): 3, ('347', 0.0): 1, ('tree', 0.0): 3, ('hill', 0.0): 1, ('😕', 0.0): 1, ('com', 0.0): 1, ('insonia', 0.0): 1, ('346', 0.0): 1, ('rick', 0.0): 1, ('ross', 0.0): 1, ('wallet', 0.0): 4, ('empti', 0.0): 3, ('heartbreak', 0.0): 2, ('episod', 0.0): 11, ('345', 0.0): 1, ('milli', 0.0): 1, (':)', 0.0): 2, ('diff', 0.0): 1, ('persona', 0.0): 1, ('golden', 0.0): 1, ('scene', 0.0): 1, ('advert', 0.0): 1, ('determin', 0.0): 2, ('roseburi', 0.0): 1, ('familyhom', 0.0): 1, ('daw', 0.0): 2, ('344', 0.0): 1, ('monkey', 0.0): 1, ('yea', 0.0): 2, ('343', 0.0): 1, ('sweeti', 0.0): 2, ('erica', 0.0): 1, ('istg', 0.0): 1, ('lick', 0.0): 1, ('jackson', 0.0): 4, ('nsbzhdnxndamal', 0.0): 1, ('342', 0.0): 1, ('11:15', 0.0): 1, ('2hour', 0.0): 1, ('11:25', 0.0): 1, ('341', 0.0): 1, ('fandom', 0.0): 2, ('mahilig', 0.0): 1, ('mam-bulli', 0.0): 1, ('mtaani', 0.0): 1, ('tunaita', 0.0): 1, ('viazi', 0.0): 1, ('choma', 0.0): 1, ('laid', 0.0): 1, ('celebr', 0.0): 3, ('7am', 0.0): 1, ('jerk', 0.0): 1, ('lah', 0.0): 2, ('magic', 0.0): 1, ('menil', 0.0): 1, ('340', 0.0): 1, ("kam'", 0.0): 1, ('meee', 0.0): 1, ('diz', 0.0): 1, ('biooo', 0.0): 1, ('ay', 0.0): 1, ('taray', 0.0): 1, ('yumu-youtub', 0.0): 1, ('339', 0.0): 1, ('parijat', 0.0): 1, ('willmissyouparijat', 0.0): 1, ('abroad', 0.0): 2, ('jolli', 0.0): 1, ('scotland', 0.0): 2, ('338', 0.0): 1, ('mcnugget', 0.0): 1, ('sophi', 0.0): 5, ('feedback', 0.0): 4, ('met', 0.0): 7, ('caramello', 0.0): 2, ('koala', 0.0): 1, ('bar', 0.0): 1, ('suckmejimin', 0.0): 1, ('337', 0.0): 1, ('sucki', 0.0): 2, ('laughter', 0.0): 1, ('pou', 0.0): 1, ('goddamn', 0.0): 1, ('bark', 0.0): 1, ('nje', 0.0): 1, ('blast', 0.0): 1, ('hun', 0.0): 4, ('dbn', 0.0): 2, ('🎀', 0.0): 1, ('336', 0.0): 1, ('hardest', 0.0): 1, ('335', 0.0): 1, ('pledg', 0.0): 1, ('realiz', 0.0): 7, ('viber', 0.0): 1, ('mwah', 0.0): 1, ('estat', 0.0): 1, ('crush', 0.0): 1, ('lansi', 0.0): 1, ('334', 0.0): 1, ('hp', 0.0): 4, ('waah', 0.0): 1, ('miami', 0.0): 1, ('vandag', 0.0): 1, ('kgola', 0.0): 1, ('neng', 0.0): 1, ('eintlik', 0.0): 1, ('porn', 0.0): 2, ('4like', 0.0): 5, ('repost', 0.0): 2, ('333', 0.0): 4, ('magpi', 0.0): 1, ('22.05', 0.0): 1, ('15-24', 0.0): 1, ('05.15', 0.0): 1, ('coach', 0.0): 2, ('ador', 0.0): 1, ('chswiyfxcskcalum', 0.0): 1, ('nvm', 0.0): 2, ('lemm', 0.0): 1, ('quiet', 0.0): 3, ('foof', 0.0): 1, ('332', 0.0): 1, ('casilla', 0.0): 1, ('manchest', 0.0): 3, ('xi', 0.0): 1, ('rmtour', 0.0): 1, ('heavi', 0.0): 3, ('irl', 0.0): 2, ('blooper', 0.0): 2, ('huhuhuhu', 0.0): 1, ('na-tak', 0.0): 1, ('sorta', 0.0): 1, ('unfriend', 0.0): 1, ('greysonch', 0.0): 1, ('sandwich', 0.0): 4, ('bell', 0.0): 1, ('sebastian', 0.0): 1, ('rewatch', 0.0): 1, ('s4', 0.0): 1, ('ser', 0.0): 1, ('past', 0.0): 5, ('heart-break', 0.0): 1, ('outdat', 0.0): 1, ('m4', 0.0): 1, ('abandon', 0.0): 1, ('theater', 0.0): 1, ('smh', 0.0): 6, ('7-3', 0.0): 1, ('7.30-', 0.0): 1, ('ekk', 0.0): 1, ('giriboy', 0.0): 1, ('harriet', 0.0): 1, ('gegu', 0.0): 1, ('gray', 0.0): 1, ('truth', 0.0): 4, ('tbt', 0.0): 1, ('331', 0.0): 1, ('roof', 0.0): 2, ('indian', 0.0): 2, ('polit', 0.0): 3, ('blame', 0.0): 3, ('68', 0.0): 1, ('repres', 0.0): 1, ('corbyn', 0.0): 1, ("labour'", 0.0): 1, ('fortun', 0.0): 1, ('icecream', 0.0): 3, ('cuti', 0.0): 2, ('ry', 0.0): 1, ('lfccw', 0.0): 1, ('5ever', 0.0): 1, ('america', 0.0): 3, ('ontheroadagain', 0.0): 1, ('halaaang', 0.0): 1, ('reciev', 0.0): 1, ('flip', 0.0): 4, ('flop', 0.0): 1, ('caesarspalac', 0.0): 1, ('socialreward', 0.0): 1, ('requir', 0.0): 2, ('cali', 0.0): 1, ('fuckboy', 0.0): 1, ('330', 0.0): 1, ('deliveri', 0.0): 3, ('chrompet', 0.0): 1, ('easili', 0.0): 2, ('immun', 0.0): 1, ('system', 0.0): 3, ('lush', 0.0): 1, ('bathtub', 0.0): 1, ('php', 0.0): 1, ('mysql', 0.0): 1, ('libmysqlclient-dev', 0.0): 1, ('dev', 0.0): 2, ('pleasanton', 0.0): 1, ('wala', 0.0): 1, ('329', 0.0): 1, ('quickli', 0.0): 2, ('megan', 0.0): 1, ('heed', 0.0): 2, ('328', 0.0): 1, ('gwss', 0.0): 1, ('thankyouu', 0.0): 1, ('charad', 0.0): 1, ('becom', 0.0): 5, ('piano', 0.0): 2, ('327', 0.0): 1, ('complaint', 0.0): 2, ('yell', 0.0): 2, ('whatsoev', 0.0): 2, ('pete', 0.0): 1, ('wentz', 0.0): 1, ('shogi', 0.0): 1, ('blameshoghicp', 0.0): 1, ('classmat', 0.0): 1, ('troubl', 0.0): 1, ('fixedgearfrenzi', 0.0): 1, ('dispatch', 0.0): 1, ('theyr', 0.0): 2, ('hat', 0.0): 2, ("shamuon'", 0.0): 1, ('tokyo', 0.0): 1, ('toe', 0.0): 2, ('horrend', 0.0): 2, ("someone'", 0.0): 2, ('326', 0.0): 1, ('hasb', 0.0): 1, ('atti', 0.0): 1, ('muji', 0.0): 1, ('sirf', 0.0): 1, ('sensibl', 0.0): 1, ('etc', 0.0): 2, ('brum', 0.0): 1, ('cyclerevolut', 0.0): 1, ('caaannnttt', 0.0): 1, ('payment', 0.0): 3, ('overdrawn', 0.0): 1, ('tbf', 0.0): 1, ('complain', 0.0): 2, ('perfum', 0.0): 1, ('sampl', 0.0): 1, ('chanel', 0.0): 1, ('burberri', 0.0): 1, ('prada', 0.0): 1, ('325', 0.0): 1, ('noesss', 0.0): 1, ('topgear', 0.0): 1, ('worthi', 0.0): 1, ('bridesmaid', 0.0): 1, ("tomorrow'", 0.0): 2, ('gather', 0.0): 1, ('sudden', 0.0): 4, ('324', 0.0): 1, ('randomrestart', 0.0): 1, ('randomreboot', 0.0): 1, ('lumia', 0.0): 1, ('windowsphon', 0.0): 1, ("microsoft'", 0.0): 1, ('mañana', 0.0): 1, ('male', 0.0): 1, ('rap', 0.0): 1, ('sponsor', 0.0): 3, ('striker', 0.0): 2, ('lvg', 0.0): 1, ('behind', 0.0): 3, ('refurbish', 0.0): 1, ('cintiq', 0.0): 1, ("finnick'", 0.0): 1, ('askfinnick', 0.0): 1, ('contain', 0.0): 1, ('hairi', 0.0): 1, ('323', 0.0): 1, ('buri', 0.0): 1, ('omaygad', 0.0): 1, ('vic', 0.0): 1, ('surgeri', 0.0): 4, ('amber', 0.0): 8, ('tt.tt', 0.0): 1, ('hyper', 0.0): 2, ('vega', 0.0): 2, ('322', 0.0): 1, ('imiss', 0.0): 1, ('321', 0.0): 1, ('320', 0.0): 1, ('know.for', 0.0): 1, ('prepaid', 0.0): 1, ('none', 0.0): 4, ('319', 0.0): 1, ('grandma', 0.0): 1, ("grandpa'", 0.0): 1, ('farm', 0.0): 1, ('cow', 0.0): 1, ('sheep', 0.0): 1, ('hors', 0.0): 3, ('fruit', 0.0): 2, ('veget', 0.0): 1, ('puke', 0.0): 2, ('deliri', 0.0): 1, ('motilium', 0.0): 1, ('shite', 0.0): 1, ('318', 0.0): 1, ('schoolwork', 0.0): 1, ("phoebe'", 0.0): 1, ('317', 0.0): 1, ('pothol', 0.0): 1, ('316', 0.0): 1, ('notif', 0.0): 3, ('1,300', 0.0): 1, ('robyn', 0.0): 1, ('necklac', 0.0): 1, ('rachel', 0.0): 1, ('bhai', 0.0): 1, ('ramzan', 0.0): 1, ('crosss', 0.0): 1, ('clapham', 0.0): 1, ('investig', 0.0): 2, ('sth', 0.0): 1, ('essenti', 0.0): 1, ('photoshooot', 0.0): 1, ('austin', 0.0): 1, ('mahon', 0.0): 1, ('shut', 0.0): 3, ('andam', 0.0): 1, ('memor', 0.0): 1, ('cotton', 0.0): 1, ('candi', 0.0): 3, ('stock', 0.0): 3, ('swallow', 0.0): 1, ('snot', 0.0): 1, ('choke', 0.0): 1, ('taknottem', 0.0): 1, ('477', 0.0): 1, ('btob', 0.0): 2, ('percentag', 0.0): 1, ('shoshannavassil', 0.0): 1, ('swift', 0.0): 1, ('flat', 0.0): 3, ('a9', 0.0): 2, ('wsalelov', 0.0): 5, ('sexyjan', 0.0): 1, ('horni', 0.0): 2, ('goodmus', 0.0): 4, ('debut', 0.0): 3, ('lart', 0.0): 1, ('sew', 0.0): 1, ('skyfal', 0.0): 1, ('premier', 0.0): 1, ('yummi', 0.0): 2, ('manteca', 0.0): 1, ("she'd", 0.0): 2, ('probabl', 0.0): 8, ('shiatsu', 0.0): 1, ('heat', 0.0): 1, ('risk', 0.0): 3, ('edward', 0.0): 1, ('hopper', 0.0): 1, ('eyyah', 0.0): 1, ('utd', 0.0): 2, ('born', 0.0): 1, ('1-0', 0.0): 1, ('cart', 0.0): 1, ('shop', 0.0): 10, ('log', 0.0): 2, ('aaa', 0.0): 2, ('waifu', 0.0): 1, ('break', 0.0): 8, ('breakup', 0.0): 3, ('bother', 0.0): 3, ('bia', 0.0): 1, ('syndrom', 0.0): 1, ('shi', 0.0): 1, ('bias', 0.0): 1, ('pixel', 0.0): 2, ('weh', 0.0): 2, ('area', 0.0): 4, ('maymay', 0.0): 1, ('magpaalam', 0.0): 1, ('tf', 0.0): 3, ('subtitl', 0.0): 1, ('oitnb', 0.0): 1, ('backstori', 0.0): 1, ('jeremi', 0.0): 1, ('kyle', 0.0): 1, ('gimm', 0.0): 2, ('meal', 0.0): 3, ('neat-o', 0.0): 1, ('wru', 0.0): 1, ('scissor', 0.0): 1, ('creation', 0.0): 1, ('public', 0.0): 1, ('amtir', 0.0): 1, ('imysm', 0.0): 2, ('tut', 0.0): 1, ('trop', 0.0): 2, ('tard', 0.0): 1, ('deadlin', 0.0): 1, ('31', 0.0): 2, ('st', 0.0): 3, ('child', 0.0): 4, ('oct', 0.0): 2, ('bush', 0.0): 2, ('premiun', 0.0): 1, ('notcool', 0.0): 1, ('2/3', 0.0): 2, ('lahat', 0.0): 2, ('ng', 0.0): 4, ('araw', 0.0): 1, ('nage', 0.0): 1, ('gyu', 0.0): 4, ('lmfaooo', 0.0): 2, ('download', 0.0): 3, ('leagu', 0.0): 1, ('mashup', 0.0): 1, ('eu', 0.0): 1, ('lc', 0.0): 1, ('typo', 0.0): 2, ('itali', 0.0): 1, ('yass', 0.0): 1, ('christma', 0.0): 2, ('rel', 0.0): 1, ('yr', 0.0): 3, ('sydney', 0.0): 1, ('mb', 0.0): 1, ('perf', 0.0): 2, ('programm', 0.0): 1, ('bff', 0.0): 2, ('hashtag', 0.0): 1, ('omfg', 0.0): 4, ('exercis', 0.0): 2, ('combat', 0.0): 1, ('dosent', 0.0): 1, ("sod'", 0.0): 1, ('20min', 0.0): 1, ('request', 0.0): 2, ('yahoo', 0.0): 2, ('yodel', 0.0): 2, ('jokingli', 0.0): 1, ('regret', 0.0): 5, ('starbuck', 0.0): 3, ('lynettelow', 0.0): 1, ('interraci', 0.0): 3, ("today'", 0.0): 3, ('tgif', 0.0): 1, ('gahd', 0.0): 1, ('26th', 0.0): 1, ('discov', 0.0): 1, ('12.00', 0.0): 1, ('obyun', 0.0): 1, ('unni', 0.0): 4, ('wayhh', 0.0): 1, ('preval', 0.0): 1, ('controversi', 0.0): 1, ('🍵', 0.0): 2, ('☕', 0.0): 1, ('tube', 0.0): 1, ('strike', 0.0): 3, ('meck', 0.0): 1, ('mcfc', 0.0): 1, ('fresh', 0.0): 1, ('ucan', 0.0): 1, ('anxiou', 0.0): 1, ('poc', 0.0): 1, ('specif', 0.0): 2, ('sinhala', 0.0): 1, ('billionair', 0.0): 1, ('1645', 0.0): 1, ('island', 0.0): 3, ('1190', 0.0): 1, ('maldiv', 0.0): 1, ('dheena', 0.0): 1, ('fasgadah', 0.0): 1, ('alvadhaau', 0.0): 1, ('countdown', 0.0): 1, ('function', 0.0): 3, ('desktop', 0.0): 1, ('evelineconrad', 0.0): 1, ('facetim', 0.0): 4, ('kikmsn', 0.0): 2, ('selfshot', 0.0): 2, ('panda', 0.0): 1, ('backkk', 0.0): 1, ('transfer', 0.0): 3, ('dan', 0.0): 2, ('dull', 0.0): 1, ('overcast', 0.0): 1, ('folder', 0.0): 1, ('truck', 0.0): 2, ('missin', 0.0): 2, ('hangin', 0.0): 1, ('wiff', 0.0): 1, ('dept', 0.0): 1, ('cherri', 0.0): 1, ('bakewel', 0.0): 1, ('collect', 0.0): 3, ('teal', 0.0): 1, ('sect', 0.0): 1, ('tennunb', 0.0): 1, ('rather', 0.0): 4, ('skip', 0.0): 1, ('doomsday', 0.0): 1, ('neglect', 0.0): 1, ('posti', 0.0): 1, ('goodnight', 0.0): 1, ('donat', 0.0): 3, ('ship', 0.0): 6, ('bellami', 0.0): 1, ('raven', 0.0): 2, ('clark', 0.0): 1, ('helmi', 0.0): 1, ('uh', 0.0): 5, ('cnt', 0.0): 1, ('whereisthesun', 0.0): 1, ('summerismiss', 0.0): 1, ('longgg', 0.0): 1, ('ridicul', 0.0): 4, ('stocko', 0.0): 1, ('lucozad', 0.0): 1, ('explos', 0.0): 1, ('beh', 0.0): 2, ('half-rememb', 0.0): 1, ("melody'", 0.0): 1, ('recal', 0.0): 2, ('level', 0.0): 3, ('target', 0.0): 1, ('difficult', 0.0): 4, ('mile', 0.0): 1, ('pfb', 0.0): 1, ('nate', 0.0): 2, ('expo', 0.0): 2, ('jisoo', 0.0): 1, ('chloe', 0.0): 2, ('anon', 0.0): 2, ('mager', 0.0): 1, ('wi', 0.0): 1, ('knw', 0.0): 1, ('wht', 0.0): 1, ('distant', 0.0): 1, ('buffer', 0.0): 2, ('insan', 0.0): 1, ('charli', 0.0): 1, ('finland', 0.0): 3, ('gana', 0.0): 1, ('studio', 0.0): 3, ('arch', 0.0): 1, ('lyin', 0.0): 1, ('kian', 0.0): 3, ('supercar', 0.0): 1, ('gurgaon', 0.0): 1, ('locat', 0.0): 7, ('9:15', 0.0): 1, ('satir', 0.0): 1, ('gener', 0.0): 2, ('peanut', 0.0): 3, ('butter', 0.0): 1, ('garden', 0.0): 2, ('beer', 0.0): 1, ('viner', 0.0): 1, ('palembang', 0.0): 1, ('sorrryyi', 0.0): 1, ('fani', 0.0): 1, ('hahahahaha', 0.0): 2, ('boner', 0.0): 1, ('merci', 0.0): 1, ('yuki', 0.0): 1, ('2500k', 0.0): 1, ('mari', 0.0): 1, ('jake', 0.0): 1, ('gyllenha', 0.0): 1, ('impact', 0.0): 1, ("ledger'", 0.0): 1, ('btw', 0.0): 5, ('cough', 0.0): 4, ('hunni', 0.0): 1, ('b4', 0.0): 1, ('deplet', 0.0): 1, ('mbasa', 0.0): 1, ('client', 0.0): 3, ('ray', 0.0): 1, ('aah', 0.0): 1, ('type', 0.0): 2, ('suit', 0.0): 5, ('pa-copi', 0.0): 1, ('proper', 0.0): 2, ('biom', 0.0): 1, ('mosqu', 0.0): 1, ('smelli', 0.0): 1, ('taxi', 0.0): 4, ('emptier', 0.0): 1, ("ciara'", 0.0): 1, ("everything'", 0.0): 1, ('clip', 0.0): 2, ('tall', 0.0): 2, ('gladli', 0.0): 1, ('intent', 0.0): 1, ('amb', 0.0): 1, ("harry'", 0.0): 2, ('jean', 0.0): 2, ('mayday', 0.0): 1, ('parad', 0.0): 2, ('lyf', 0.0): 1, ('13th', 0.0): 1, ('anim', 0.0): 4, ('kingdom', 0.0): 1, ('chri', 0.0): 7, ('brown', 0.0): 4, ('riski', 0.0): 1, ('cologn', 0.0): 1, ('duo', 0.0): 3, ('ballad', 0.0): 2, ('bish', 0.0): 2, ('intern', 0.0): 2, ('brought', 0.0): 1, ('yumyum', 0.0): 1, ("cathy'", 0.0): 1, ('missyou', 0.0): 1, ('rubi', 0.0): 2, ('rose', 0.0): 2, ('tou', 0.0): 1, ('main', 0.0): 1, ('pora', 0.0): 1, ('stalk', 0.0): 3, ('karlia', 0.0): 1, ('khatam', 0.0): 2, ('bandi', 0.0): 1, ('👑', 0.0): 1, ('pyaari', 0.0): 1, ('gawd', 0.0): 1, ('understood', 0.0): 1, ('review', 0.0): 3, ('massi', 0.0): 1, ('thatselfiethough', 0.0): 1, ('loop', 0.0): 1, ('ofc', 0.0): 1, ('pict', 0.0): 1, ('caught', 0.0): 1, ('aishhh', 0.0): 1, ('viewer', 0.0): 1, ('exam', 0.0): 5, ('sighsss', 0.0): 1, ('burnt', 0.0): 2, ('toffe', 0.0): 2, ('honesti', 0.0): 1, ('cheatday', 0.0): 1, ('protein', 0.0): 1, ('sissi', 0.0): 1, ('tote', 0.0): 1, ('slowli', 0.0): 1, ('church', 0.0): 2, ('pll', 0.0): 1, ('sel', 0.0): 1, ('beth', 0.0): 2, ('serbia', 0.0): 1, ('serbian', 0.0): 1, ('selen', 0.0): 1, ('motav', 0.0): 1, ('💋', 0.0): 2, ('zayyyn', 0.0): 1, ('momma', 0.0): 1, ('happend', 0.0): 1, ('imper', 0.0): 1, ('trmdhesit', 0.0): 1, ('pana', 0.0): 1, ('quickest', 0.0): 2, ('blood', 0.0): 5, ('sake', 0.0): 1, ('hamstr', 0.0): 1, ('rodwel', 0.0): 1, ('trace', 0.0): 1, ('artist', 0.0): 4, ('tp', 0.0): 1, ('powder', 0.0): 1, ('wider', 0.0): 1, ('honestli', 0.0): 4, ('comfort', 0.0): 3, ('bruno', 0.0): 1, ('1.8', 0.0): 1, ('ed', 0.0): 7, ('croke', 0.0): 2, ('deal', 0.0): 6, ('toll', 0.0): 1, ('packag', 0.0): 1, ('shape', 0.0): 1, ('unluckiest', 0.0): 1, ('bettor', 0.0): 1, ('nstp', 0.0): 1, ('sem', 0.0): 2, ('chipotl', 0.0): 1, ('chick-fil-a', 0.0): 1, ('stole', 0.0): 3, ('evet', 0.0): 1, ('ramadhan', 0.0): 1, ('eid', 0.0): 4, ('stexpert', 0.0): 1, ('ripstegi', 0.0): 1, ('nickyyi', 0.0): 1, ('¿', 0.0): 1, ('centralis', 0.0): 1, ('discontinu', 0.0): 1, ('sniff', 0.0): 1, ("i't", 0.0): 1, ('glad', 0.0): 2, ('fab', 0.0): 2, ('theres', 0.0): 1, ('cred', 0.0): 1, ('t_t', 0.0): 1, ('elimin', 0.0): 1, ('teamzip', 0.0): 1, ('smtm', 0.0): 1, ('assingn', 0.0): 1, ('editi', 0.0): 1, ('nakaka', 0.0): 1, ('beastmod', 0.0): 1, ('gaaawd', 0.0): 1, ('jane', 0.0): 1, ('mango', 0.0): 1, ('colombia', 0.0): 1, ('yot', 0.0): 1, ('labyo', 0.0): 1, ('pano', 0.0): 1, ('nalamannn', 0.0): 1, ('hardhead', 0.0): 1, ('cell', 0.0): 1, ("zach'", 0.0): 1, ('burger', 0.0): 2, ('xpress', 0.0): 1, ('hopkin', 0.0): 1, ('melatonin', 0.0): 1, ('2-4', 0.0): 1, ('nap', 0.0): 2, ('wide', 0.0): 2, ('task', 0.0): 1, ('9pm', 0.0): 1, ('hahaah', 0.0): 1, ('frequent', 0.0): 1, ('jail', 0.0): 2, ('weirddd', 0.0): 1, ('donghyuk', 0.0): 1, ('stan', 0.0): 1, ('bek', 0.0): 1, ('13', 0.0): 4, ('reynoldsgrl', 0.0): 1, ('ole', 0.0): 1, ('beardi', 0.0): 1, ('kaussi', 0.0): 1, ('bummer', 0.0): 3, ('fightingmciren', 0.0): 1, ("michael'", 0.0): 1, ('�', 0.0): 21, ('miser', 0.0): 2, ('💦', 0.0): 1, ('yoga', 0.0): 2, ('🌞', 0.0): 1, ('💃', 0.0): 1, ('🏽', 0.0): 1, ('shouldv', 0.0): 1, ('saffron', 0.0): 1, ('peasant', 0.0): 1, ('wouldv', 0.0): 1, ('nfinit', 0.0): 1, ('admin_myung', 0.0): 1, ('slp', 0.0): 1, ('saddest', 0.0): 2, ('laomma', 0.0): 2, ('kebaya', 0.0): 1, ('bandung', 0.0): 1, ('indonesia', 0.0): 1, ('7df89150', 0.0): 1, ('whatsapp', 0.0): 2, ('62', 0.0): 1, ('08962464174', 0.0): 1, ('laomma_coutur', 0.0): 1, ('haizzz', 0.0): 1, ('urghhh', 0.0): 1, ('working-on-a-tight-schedul', 0.0): 1, ('ganbarimasu', 0.0): 1, ('livid', 0.0): 1, ('whammi', 0.0): 1, ('quuuee', 0.0): 1, ('friooo', 0.0): 1, ('ladi', 0.0): 4, ('stereo', 0.0): 1, ('chwang', 0.0): 1, ('lorm', 0.0): 1, ('823', 0.0): 1, ('rp', 0.0): 1, ('indiemus', 0.0): 10, ('unhappi', 0.0): 2, ('jennyjean', 0.0): 1, ('elfindelmundo', 0.0): 2, ('lolzz', 0.0): 1, ('dat', 0.0): 4, ('corey', 0.0): 1, ('appreci', 0.0): 2, ('weekli', 0.0): 2, ('mahirap', 0.0): 1, ('nash', 0.0): 1, ('gosh', 0.0): 6, ('noodl', 0.0): 1, ('veeerri', 0.0): 1, ('rted', 0.0): 2, ('orig', 0.0): 1, ('starholicxx', 0.0): 1, ('07:17', 0.0): 2, ('@the', 0.0): 1, ('notr', 0.0): 1, ('hwi', 0.0): 1, ('niall', 0.0): 5, ('fraud', 0.0): 1, ('diplomaci', 0.0): 1, ('fittest', 0.0): 1, ('zero', 0.0): 1, ('toler', 0.0): 2, ('gurl', 0.0): 1, ('notion', 0.0): 1, ('pier', 0.0): 1, ('approach', 0.0): 1, ('rattl', 0.0): 1, ('robe', 0.0): 1, ('emphasi', 0.0): 1, ('vocal', 0.0): 1, ('chose', 0.0): 1, ('erm', 0.0): 1, ('abby.can', 0.0): 1, ('persuad', 0.0): 1, ('lyric', 0.0): 1, ("emily'", 0.0): 1, ('odd', 0.0): 3, ('possibl', 0.0): 8, ('elect', 0.0): 2, ('kamiss', 0.0): 1, ('mwa', 0.0): 1, ('mommi', 0.0): 3, ('scream', 0.0): 1, ('fight', 0.0): 2, ('cafe', 0.0): 2, ('melbourn', 0.0): 1, ('anyonnee', 0.0): 1, ('loner', 0.0): 1, ('fricken', 0.0): 2, ('rito', 0.0): 1, ('friendzon', 0.0): 1, ('panel', 0.0): 1, ('repeat', 0.0): 2, ('audienc', 0.0): 1, ('hsm', 0.0): 1, ('canario', 0.0): 1, ('hotel', 0.0): 8, ('ukiss', 0.0): 1, ('faith', 0.0): 2, ('kurt', 0.0): 1, ("fatma'm", 0.0): 1, ('alex', 0.0): 4, ('swag', 0.0): 1, ('lmfao', 0.0): 2, ('flapjack', 0.0): 1, ('countthecost', 0.0): 1, ('ihop', 0.0): 1, ('infra', 0.0): 1, ('lq', 0.0): 1, ('knive', 0.0): 1, ('sotir', 0.0): 1, ('mybrainneedstoshutoff', 0.0): 1, ('macci', 0.0): 1, ('chees', 0.0): 7, ('25', 0.0): 2, ('tend', 0.0): 1, ('510', 0.0): 1, ('silicon', 0.0): 1, ('cover', 0.0): 2, ('kbye', 0.0): 1, ('ini', 0.0): 1, ('anytim', 0.0): 1, ('citizen', 0.0): 1, ('compar', 0.0): 2, ('rank', 0.0): 1, ('mcountdown', 0.0): 2, ('5h', 0.0): 1, ('thapelo', 0.0): 1, ('op', 0.0): 1, ('civ', 0.0): 1, ('wooden', 0.0): 1, ('mic', 0.0): 1, ('embarrass', 0.0): 2, ('translat', 0.0): 3, ('daili', 0.0): 3, ('mecha-totem', 0.0): 1, ('nak', 0.0): 1, ('tgk', 0.0): 1, ('townsss', 0.0): 1, ('jokid', 0.0): 1, ('rent', 0.0): 2, ('degre', 0.0): 1, ('inconsider', 0.0): 2, ('softbal', 0.0): 1, ('appli', 0.0): 1, ('tomcat', 0.0): 1, ('chel', 0.0): 1, ('jemma', 0.0): 1, ('detail', 0.0): 4, ('list', 0.0): 4, ('matchi', 0.0): 2, ('elsa', 0.0): 1, ('postpon', 0.0): 1, ('karin', 0.0): 1, ('honey', 0.0): 2, ('vist', 0.0): 1, ('unhealthi', 0.0): 1, ('propa', 0.0): 1, ('knockin', 0.0): 1, ('bacon', 0.0): 1, ('market', 0.0): 2, ('pre-holiday', 0.0): 1, ('diet', 0.0): 1, ('meani', 0.0): 1, ('deathbybaconsmel', 0.0): 1, ('init', 0.0): 2, ('destin', 0.0): 1, ('victoria', 0.0): 2, ('luna', 0.0): 1, ('krystal', 0.0): 1, ('sarajevo', 0.0): 1, ('haix', 0.0): 2, ('sp', 0.0): 1, ('student', 0.0): 4, ('wii', 0.0): 2, ('bayonetta', 0.0): 1, ('101', 0.0): 1, ('doabl', 0.0): 1, ('drove', 0.0): 1, ('agenc', 0.0): 1, ('story.miss', 0.0): 1, ('everon', 0.0): 1, ('jp', 0.0): 1, ('mamabear', 0.0): 1, ('imintoh', 0.0): 1, ('underr', 0.0): 1, ("slovakia'", 0.0): 1, ('D:', 0.0): 6, ('saklap', 0.0): 1, ('grade', 0.0): 2, ('rizal', 0.0): 1, ('lib', 0.0): 1, ('discuss', 0.0): 1, ('advisori', 0.0): 1, ('period', 0.0): 2, ('dit', 0.0): 1, ('du', 0.0): 1, ('harsh', 0.0): 2, ('ohgod', 0.0): 1, ('abligaverin', 0.0): 2, ('photooftheday', 0.0): 2, ('sexygirlbypreciouslemmi', 0.0): 3, ('ripsandrabland', 0.0): 1, ('edel', 0.0): 1, ('salam', 0.0): 1, ('mubark', 0.0): 1, ('dong', 0.0): 3, ('tammirossm', 0.0): 4, ('speck', 0.0): 1, ('abbymil', 0.0): 2, ('18', 0.0): 8, ('ion', 0.0): 1, ('5min', 0.0): 1, ('hse', 0.0): 1, ('noob', 0.0): 1, ('nxt', 0.0): 1, ('2week', 0.0): 1, ('300', 0.0): 3, ('fck', 0.0): 2, ('nae', 0.0): 2, ('deep', 0.0): 3, ('human', 0.0): 3, ('whit', 0.0): 1, ('van', 0.0): 4, ('bristol', 0.0): 1, ('subserv', 0.0): 1, ('si', 0.0): 4, ('oo', 0.0): 1, ('tub', 0.0): 1, ('penyfan', 0.0): 1, ('forecast', 0.0): 2, ('breconbeacon', 0.0): 1, ('tittheir', 0.0): 1, ('42', 0.0): 1, ('hotti', 0.0): 3, ('uu', 0.0): 2, ('rough', 0.0): 1, ('fuzzi', 0.0): 1, ('san', 0.0): 3, ('antonio', 0.0): 1, ('kang', 0.0): 1, ('junhe', 0.0): 1, ('couldv', 0.0): 1, ('pz', 0.0): 1, ('somerset', 0.0): 1, ('given', 0.0): 2, ('sunburnt', 0.0): 1, ('safer', 0.0): 1, ('k3g', 0.0): 1, ('input', 0.0): 1, ('gamestomp', 0.0): 1, ('desc', 0.0): 1, ("angelo'", 0.0): 1, ('yna', 0.0): 1, ('psygustokita', 0.0): 2, ('fiver', 0.0): 1, ('toward', 0.0): 1, ('sakho', 0.0): 1, ('threat', 0.0): 1, ('goalscor', 0.0): 1, ('10:59', 0.0): 1, ('11.00', 0.0): 1, ('sham', 0.0): 1, ('tricki', 0.0): 1, ('baao', 0.0): 1, ('nisrina', 0.0): 1, ('crazi', 0.0): 8, ('ladygaga', 0.0): 1, ("you'", 0.0): 2, ('pari', 0.0): 2, ('marrish', 0.0): 1, ("otp'", 0.0): 1, ('6:15', 0.0): 1, ('edomnt', 0.0): 1, ('qih', 0.0): 1, ('shxb', 0.0): 1, ('1000', 0.0): 1, ('chilton', 0.0): 1, ('mother', 0.0): 2, ('obsess', 0.0): 1, ('creepi', 0.0): 2, ('josh', 0.0): 1, ('boohoo', 0.0): 1, ('fellow', 0.0): 2, ('tweep', 0.0): 1, ('roar', 0.0): 1, ('victori', 0.0): 1, ('tweepsmatchout', 0.0): 1, ('nein', 0.0): 3, ('404', 0.0): 1, ('midnight', 0.0): 2, ('willlow', 0.0): 1, ('hbd', 0.0): 1, ('sowwi', 0.0): 1, ('3000', 0.0): 1, ('grind', 0.0): 1, ('gear', 0.0): 1, ('0.001', 0.0): 1, ('meant', 0.0): 6, ('portrait', 0.0): 1, ('mode', 0.0): 2, ('fact', 0.0): 4, ('11:11', 0.0): 4, ('shanzay', 0.0): 1, ('salabrati', 0.0): 1, ('journo', 0.0): 1, ('lure', 0.0): 1, ('gang', 0.0): 1, ('twist', 0.0): 1, ('mashaket', 0.0): 1, ('pet', 0.0): 2, ('bapak', 0.0): 1, ('royal', 0.0): 2, ('prima', 0.0): 1, ('mune', 0.0): 1, ('874', 0.0): 1, ('plisss', 0.0): 1, ('elf', 0.0): 1, ('teenchoic', 0.0): 5, ('choiceinternationalartist', 0.0): 5, ('superjunior', 0.0): 5, ("he'll", 0.0): 1, ('sunway', 0.0): 1, ('petal', 0.0): 1, ('jaya', 0.0): 1, ('selangor', 0.0): 1, ('glow', 0.0): 1, ('huhuu', 0.0): 1, ('congratul', 0.0): 2, ('margo', 0.0): 1, ('konga', 0.0): 1, ('ni', 0.0): 4, ('wa', 0.0): 2, ('ode', 0.0): 1, ('disvirgin', 0.0): 1, ('bride', 0.0): 3, ('yulin', 0.0): 1, ('meat', 0.0): 1, ('festiv', 0.0): 2, ('imma', 0.0): 2, ('syawal', 0.0): 1, ('lapar', 0.0): 1, ('foundat', 0.0): 1, ('clash', 0.0): 2, ('facil', 0.0): 1, ('dh', 0.0): 2, ('chalet', 0.0): 1, ('suay', 0.0): 1, ('anot', 0.0): 1, ('bugger', 0.0): 1, ('एक', 0.0): 1, ('बार', 0.0): 1, ('फिर', 0.0): 1, ('सेँ', 0.0): 1, ('धोखा', 0.0): 1, ('chandauli', 0.0): 1, ('majhwar', 0.0): 1, ('railway', 0.0): 1, ('tito', 0.0): 2, ('tita', 0.0): 1, ('cousin', 0.0): 3, ('critic', 0.0): 1, ('condit', 0.0): 1, ('steal', 0.0): 1, ('narco', 0.0): 1, ('regen', 0.0): 1, ('unfav', 0.0): 2, ('benadryl', 0.0): 1, ('offlin', 0.0): 1, ('arent', 0.0): 1, ('msg', 0.0): 1, ('yg', 0.0): 1, ('gg', 0.0): 3, ('sxrew', 0.0): 1, ('dissappear', 0.0): 1, ('swap', 0.0): 1, ('bleed', 0.0): 1, ('ishal', 0.0): 1, ('mi', 0.0): 2, ('thaank', 0.0): 1, ('jhezz', 0.0): 1, ('sneak', 0.0): 3, ('soft', 0.0): 1, ('defenc', 0.0): 1, ('defens', 0.0): 1, ('nrltigersroost', 0.0): 1, ('indiana', 0.0): 2, ('hibb', 0.0): 1, ('biblethump', 0.0): 1, ('rlyyi', 0.0): 1, ('septum', 0.0): 1, ('pierc', 0.0): 2, ('goood', 0.0): 1, ('hiya', 0.0): 1, ('fire', 0.0): 1, ('venom', 0.0): 1, ('carriag', 0.0): 1, ('pink', 0.0): 1, ('fur-trim', 0.0): 1, ('stetson', 0.0): 1, ('error', 0.0): 4, ('59', 0.0): 1, ('xue', 0.0): 1, ('midori', 0.0): 1, ('sakit', 0.0): 2, ('mateo', 0.0): 1, ('hawk', 0.0): 2, ('bartend', 0.0): 1, ('surf', 0.0): 1, ('despair', 0.0): 1, ('insta', 0.0): 1, ('promo', 0.0): 1, ('iwantin', 0.0): 1, ('___', 0.0): 2, ('fault', 0.0): 3, ('goodluck', 0.0): 1, ('pocket', 0.0): 1, ('[email protected]', 0.0): 1, ('benedictervent', 0.0): 1, ('content', 0.0): 1, ('221b', 0.0): 1, ('popcorn', 0.0): 3, ('joyc', 0.0): 1, ('ooop', 0.0): 1, ('spotifi', 0.0): 1, ('paalam', 0.0): 1, ('sazbal', 0.0): 1, ('incid', 0.0): 1, ('aaahh', 0.0): 1, ('gooo', 0.0): 1, ("stomach'", 0.0): 1, ('growl', 0.0): 1, ('beard', 0.0): 1, ('nooop', 0.0): 1, ('🎉', 0.0): 3, ('ding', 0.0): 3, ('hundr', 0.0): 1, ('meg', 0.0): 1, ("verity'", 0.0): 1, ('rupert', 0.0): 1, ('amin', 0.0): 1, ('studi', 0.0): 2, ('pleaaas', 0.0): 1, ('👆', 0.0): 2, ('woaah', 0.0): 1, ('solvo', 0.0): 1, ('twin', 0.0): 2, ("friday'", 0.0): 1, ('lego', 0.0): 1, ('barefoot', 0.0): 1, ('twelvyy', 0.0): 1, ('boaz', 0.0): 1, ('myhil', 0.0): 1, ('takeov', 0.0): 1, ('wba', 0.0): 1, ("taeyeon'", 0.0): 1, ('derp', 0.0): 1, ('pd', 0.0): 1, ('zoom', 0.0): 2, ("sunny'", 0.0): 1, ('besst', 0.0): 1, ('plagu', 0.0): 1, ('pit', 0.0): 1, ('rich', 0.0): 1, ('sight', 0.0): 1, ('frail', 0.0): 1, ('lotteri', 0.0): 1, ('ride', 0.0): 2, ('twurkin', 0.0): 1, ('razzist', 0.0): 1, ('tumblr', 0.0): 1, ('shek', 0.0): 1, ('609', 0.0): 1, ('mugshot', 0.0): 1, ('attend', 0.0): 3, ('plsss', 0.0): 4, ('taissa', 0.0): 1, ('farmiga', 0.0): 1, ('robert', 0.0): 1, ('qualiti', 0.0): 1, ('daniel', 0.0): 1, ('latest', 0.0): 3, ('softwar', 0.0): 1, ('restor', 0.0): 2, ('momo', 0.0): 2, ('pharma', 0.0): 1, ('immov', 0.0): 1, ('messi', 0.0): 1, ('ansh', 0.0): 1, ('f1', 0.0): 1, ('billion', 0.0): 1, ('rand', 0.0): 1, ('bein', 0.0): 1, ('tla', 0.0): 1, ('tweng', 0.0): 1, ('gene', 0.0): 1, ('up.com', 0.0): 1, ('counti', 0.0): 2, ('cooler', 0.0): 1, ('minhyuk', 0.0): 1, ('gold', 0.0): 2, ('1900', 0.0): 1, ('😪', 0.0): 3, ('yu', 0.0): 1, ('hz', 0.0): 2, ('selena', 0.0): 2, ('emta', 0.0): 1, ('hatigii', 0.0): 1, ('b2aa', 0.0): 1, ('yayyy', 0.0): 1, ('anesthesia', 0.0): 1, ('penrith', 0.0): 1, ('emu', 0.0): 1, ('plain', 0.0): 1, ('staff', 0.0): 3, ('untouch', 0.0): 1, ('brienn', 0.0): 1, ('lsh', 0.0): 1, ('gunna', 0.0): 1, ('former', 0.0): 1, ('darn', 0.0): 1, ('allah', 0.0): 4, ('pakistan', 0.0): 2, ('juudiciari', 0.0): 1, ("horton'", 0.0): 1, ('dunkin', 0.0): 1, ('socialis', 0.0): 1, ('cara', 0.0): 1, ("delevingne'", 0.0): 1, ('fear', 0.0): 1, ('drug', 0.0): 1, ('lace', 0.0): 1, ('fank', 0.0): 1, ('takfaham', 0.0): 1, ('ufff', 0.0): 1, ('sr', 0.0): 2, ('dard', 0.0): 1, ('katekyn', 0.0): 1, ('ehh', 0.0): 1, ('yeahhh', 0.0): 2, ('hacharatt', 0.0): 1, ('niwll', 0.0): 1, ('defin', 0.0): 1, ('wit', 0.0): 2, ('goa', 0.0): 1, ('lini', 0.0): 1, ('kasi', 0.0): 3, ('rhd', 0.0): 1, ('1st', 0.0): 3, ('wae', 0.0): 1, ('subsid', 0.0): 1, ('20th', 0.0): 1, ('anniversari', 0.0): 1, ('youngja', 0.0): 1, ('harumph', 0.0): 1, ('soggi', 0.0): 1, ('weed', 0.0): 1, ('ireland', 0.0): 3, ('sakura', 0.0): 1, ('flavour', 0.0): 1, ('chokki', 0.0): 1, ('🌸', 0.0): 1, ('unavail', 0.0): 2, ('richard', 0.0): 2, ('laptop', 0.0): 2, ('satya', 0.0): 1, ('aditya', 0.0): 1, ('🍜', 0.0): 3, ('vibrat', 0.0): 1, ('an', 0.0): 2, ('cu', 0.0): 1, ('dhaka', 0.0): 1, ('jam', 0.0): 1, ('shall', 0.0): 2, ('cornetto', 0.0): 3, ('noseble', 0.0): 1, ('nintendo', 0.0): 3, ('wew', 0.0): 1, ('ramo', 0.0): 1, ('ground', 0.0): 2, ('shawn', 0.0): 1, ('mend', 0.0): 1, ('l', 0.0): 2, ('dinghi', 0.0): 1, ('skye', 0.0): 1, ('store', 0.0): 3, ('descript', 0.0): 2, ('colleagu', 0.0): 2, ('gagal', 0.0): 2, ('txt', 0.0): 1, ('sim', 0.0): 1, ('nooot', 0.0): 1, ('notch', 0.0): 1, ('tht', 0.0): 2, ('starv', 0.0): 4, ('\U000fe196', 0.0): 1, ('pyjama', 0.0): 1, ('swifti', 0.0): 1, ('sorna', 0.0): 1, ('lurgi', 0.0): 1, ('jim', 0.0): 2, ('6gb', 0.0): 1, ('fenestoscop', 0.0): 1, ('etienn', 0.0): 1, ('bandana', 0.0): 3, ('bigger', 0.0): 2, ('vagina', 0.0): 1, ('suriya', 0.0): 1, ('dangl', 0.0): 1, ('mjhe', 0.0): 2, ('aaj', 0.0): 1, ('tak', 0.0): 3, ('kisi', 0.0): 1, ('kiya', 0.0): 1, ('eyesight', 0.0): 1, ('25x30', 0.0): 1, ('aftenoon', 0.0): 1, ('booor', 0.0): 1, ('uuu', 0.0): 1, ('boyfriend', 0.0): 8, ('freebiefriday', 0.0): 1, ('garag', 0.0): 1, ('michael', 0.0): 1, ('obvious', 0.0): 1, ('denim', 0.0): 1, ('somebodi', 0.0): 1, ('ce', 0.0): 1, ('gw', 0.0): 1, ('anatomi', 0.0): 1, ('no1', 0.0): 1, ("morisette'", 0.0): 1, ('flash', 0.0): 1, ('non-trial', 0.0): 1, ('sayhernam', 0.0): 1, ('lootcrat', 0.0): 1, ('item', 0.0): 1, ('inca', 0.0): 1, ('trail', 0.0): 1, ('sandboard', 0.0): 1, ('derbi', 0.0): 1, ('coffe', 0.0): 1, ('unabl', 0.0): 3, ('signatur', 0.0): 1, ('dish', 0.0): 1, ('unfamiliar', 0.0): 1, ('kitchen', 0.0): 3, ('coldest', 0.0): 1, ("old'", 0.0): 1, ('14518344', 0.0): 1, ('61', 0.0): 1, ('thirdwheel', 0.0): 1, ('lovebird', 0.0): 1, ('nth', 0.0): 1, ('imo', 0.0): 1, ('familiar', 0.0): 1, ('@juliettemaughan', 0.0): 1, ('copi', 0.0): 1, ('sensiesha', 0.0): 1, ('eldest', 0.0): 1, ('netbal', 0.0): 1, ('😟', 0.0): 1, ('keedz', 0.0): 1, ('taybigail', 0.0): 1, ('jordan', 0.0): 1, ('tournament', 0.0): 1, ('goin', 0.0): 1, ('ps4', 0.0): 3, ('kink', 0.0): 1, ('charger', 0.0): 1, ('streak', 0.0): 1, ('scorch', 0.0): 1, ('srski', 0.0): 1, ('tdc', 0.0): 1, ('egypt', 0.0): 1, ('in-sensit', 0.0): 1, ('cooper', 0.0): 3, ('invit', 0.0): 1, ('donna', 0.0): 1, ('thurston', 0.0): 1, ('collin', 0.0): 1, ('quietli', 0.0): 2, ('kennel', 0.0): 1, ('911', 0.0): 1, ('pluckersss', 0.0): 1, ('gion', 0.0): 1, ('886', 0.0): 1, ('nsfw', 0.0): 1, ('kidschoiceaward', 0.0): 1, ('ming', 0.0): 1, ('pbr', 0.0): 1, ('shoutout', 0.0): 1, ('periscop', 0.0): 1, ('ut', 0.0): 1, ('shawti', 0.0): 1, ('naw', 0.0): 4, ("sterling'", 0.0): 1, ('9muse', 0.0): 1, ('hrryok', 0.0): 2, ('asap', 0.0): 2, ('wnt', 0.0): 1, ('9:30', 0.0): 1, ('9:48', 0.0): 1, ('9/11', 0.0): 1, ('bueno', 0.0): 1, ('receptionist', 0.0): 1, ('ella', 0.0): 2, ('goe', 0.0): 4, ('ketchup', 0.0): 1, ('tasteless', 0.0): 1, ('deantd', 0.0): 1, ('justgotkanekifi', 0.0): 1, ('notgonnabeactivefor', 0.0): 1, ('2weeksdontmissittoomuch', 0.0): 1, ('2013', 0.0): 1, ('disney', 0.0): 2, ('vlog', 0.0): 1, ('swim', 0.0): 1, ('turtl', 0.0): 2, ('cnn', 0.0): 2, ('straplin', 0.0): 1, ('theatr', 0.0): 1, ('guncontrol', 0.0): 1, ('stung', 0.0): 2, ('tweak', 0.0): 1, ("thát'", 0.0): 1, ('powerpoint', 0.0): 1, ('present', 0.0): 5, ('diner', 0.0): 1, ('no-no', 0.0): 1, ('hind', 0.0): 1, ('circuit', 0.0): 1, ('secondari', 0.0): 1, ('sodder', 0.0): 1, ('perhap', 0.0): 2, ('mobitel', 0.0): 1, ('colin', 0.0): 1, ('playstat', 0.0): 2, ('charg', 0.0): 4, ('exp', 0.0): 1, ('misspelt', 0.0): 1, ('wan', 0.0): 1, ('hyungwon', 0.0): 2, ('alarm', 0.0): 1, ('needicecreamnow', 0.0): 1, ('shake', 0.0): 1, ('repeatedli', 0.0): 1, ('nu-uh', 0.0): 1, ('jace', 0.0): 1, ('mostest', 0.0): 1, ('vip', 0.0): 1, ('urgh', 0.0): 1, ('consol', 0.0): 1, ("grigson'", 0.0): 1, ('carrot', 0.0): 1, ('>:-(', 0.0): 4, ('sunburn', 0.0): 1, ('ughh', 0.0): 2, ('enabl', 0.0): 1, ('otter', 0.0): 1, ('protect', 0.0): 1, ('argh', 0.0): 1, ('pon', 0.0): 1, ('otl', 0.0): 2, ('sleepov', 0.0): 2, ('jess', 0.0): 2, ('bebe', 0.0): 1, ('fabina', 0.0): 1, ("barrista'", 0.0): 1, ('plant', 0.0): 3, ('pup', 0.0): 2, ('brolli', 0.0): 1, ('mere', 0.0): 2, ('nhi', 0.0): 1, ('dey', 0.0): 2, ('serv', 0.0): 1, ('kepo', 0.0): 1, ('bitin', 0.0): 1, ('pretzel', 0.0): 1, ('bb17', 0.0): 1, ('bblf', 0.0): 1, ('fuckin', 0.0): 1, ('vanilla', 0.0): 1, ('latt', 0.0): 1, ('skulker', 0.0): 1, ('thread', 0.0): 1, ('hungrrryyi', 0.0): 1, ('icloud', 0.0): 1, ('ipod', 0.0): 3, ('hallyu', 0.0): 1, ('buuut', 0.0): 1, ('über', 0.0): 1, ('oki', 0.0): 2, ('8p', 0.0): 1, ('champagn', 0.0): 1, ('harlo', 0.0): 1, ('torrentialrain', 0.0): 1, ('lloyd', 0.0): 1, ('asshol', 0.0): 1, ('clearli', 0.0): 2, ('knowww', 0.0): 2, ('runni', 0.0): 1, ('sehun', 0.0): 1, ('sweater', 0.0): 1, ('intoler', 0.0): 2, ('xenophob', 0.0): 1, ('wtfff', 0.0): 1, ('tone', 0.0): 1, ('wasnt', 0.0): 1, ('1pm', 0.0): 2, ('fantasi', 0.0): 1, ('newer', 0.0): 1, ('pish', 0.0): 1, ('comparison', 0.0): 1, ('remast', 0.0): 1, ('fe14', 0.0): 1, ('icon', 0.0): 2, ('strawberri', 0.0): 1, ('loos', 0.0): 1, ('kapatidkongpogi', 0.0): 1, ('steph', 0.0): 1, ('mel', 0.0): 1, ('longest', 0.0): 1, ('carmen', 0.0): 1, ('login', 0.0): 1, ('respons', 0.0): 3, ('00128835', 0.0): 1, ('wingstop', 0.0): 1, ('budg', 0.0): 1, ('fuq', 0.0): 1, ('ilhoon', 0.0): 1, ('ganteng', 0.0): 1, ('simpl', 0.0): 1, ('getthescoop', 0.0): 1, ('hearess', 0.0): 1, ('677', 0.0): 1, ('txt_shot', 0.0): 1, ('standbi', 0.0): 1, ('inatal', 0.0): 1, ('zenmat', 0.0): 1, ('namecheck', 0.0): 1, ('whistl', 0.0): 1, ('junmyeon', 0.0): 1, ('ddi', 0.0): 1, ('arini', 0.0): 1, ('je', 0.0): 1, ('bright', 0.0): 2, ('igbo', 0.0): 1, ('blamehoney', 0.0): 1, ('whhr', 0.0): 1, ('juan', 0.0): 1, ('snuggl', 0.0): 1, ('internship', 0.0): 1, ('usag', 0.0): 1, ('warn', 0.0): 1, ('vertigo', 0.0): 1, ('panic', 0.0): 1, ('attack', 0.0): 4, ('dual', 0.0): 1, ('carriageway', 0.0): 1, ('aragalang', 0.0): 1, ('08', 0.0): 1, ('tam', 0.0): 1, ('bose', 0.0): 1, ('theo', 0.0): 1, ('anymoree', 0.0): 1, ('rubbish', 0.0): 1, ('cactu', 0.0): 1, ('sorrri', 0.0): 1, ('bowel', 0.0): 1, ('nasti', 0.0): 2, ('tumour', 0.0): 1, ('faster', 0.0): 1, ('puffi', 0.0): 1, ('eyelid', 0.0): 1, ('musica', 0.0): 1, ('dota', 0.0): 1, ('4am', 0.0): 1, ('campsit', 0.0): 1, ('miah', 0.0): 1, ('hahay', 0.0): 1, ('churro', 0.0): 1, ('montana', 0.0): 2, ('reign', 0.0): 1, ('exampl', 0.0): 1, ('inflat', 0.0): 1, ('sic', 0.0): 1, ('reset', 0.0): 1, ('entlerbountli', 0.0): 1, ('tinder', 0.0): 3, ('dirtykik', 0.0): 2, ('sexcam', 0.0): 3, ('spray', 0.0): 1, ('industri', 0.0): 1, ('swollen', 0.0): 1, ('distanc', 0.0): 2, ('jojo', 0.0): 1, ('postcod', 0.0): 1, ('kafi', 0.0): 1, ('din', 0.0): 1, ('mene', 0.0): 1, ('aj', 0.0): 1, ('koi', 0.0): 1, ('rewert', 0.0): 1, ('bunta', 0.0): 1, ('warnaaa', 0.0): 1, ('tortur', 0.0): 2, ('field', 0.0): 1, ('wall', 0.0): 2, ('iran', 0.0): 1, ('irand', 0.0): 1, ('us-iran', 0.0): 1, ('nuclear', 0.0): 1, ("mit'", 0.0): 1, ('expert', 0.0): 1, ('sever', 0.0): 3, ('li', 0.0): 1, ('s2e12', 0.0): 1, ('rumpi', 0.0): 1, ('gallon', 0.0): 1, ('ryan', 0.0): 1, ('secret', 0.0): 2, ('dandia', 0.0): 1, ('rbi', 0.0): 1, ('cage', 0.0): 2, ('parrot', 0.0): 1, ('1li', 0.0): 1, ('commiss', 0.0): 1, ('cag', 0.0): 1, ('stripe', 0.0): 2, ('gujarat', 0.0): 1, ('tear', 0.0): 3, ('ily.melani', 0.0): 1, ('unlik', 0.0): 2, ('talent', 0.0): 2, ('deepxcap', 0.0): 1, ('doin', 0.0): 3, ('5:08', 0.0): 1, ('thesi', 0.0): 11, ('belieb', 0.0): 2, ('gtg', 0.0): 1, ('compet', 0.0): 1, ('vv', 0.0): 1, ('respect', 0.0): 5, ('opt-out', 0.0): 1, ('vam', 0.0): 1, ('spece', 0.0): 1, ('ell', 0.0): 1, ('articl', 0.0): 1, ('sexyameli', 0.0): 1, ('fineandyu', 0.0): 1, ('gd', 0.0): 1, ('flesh', 0.0): 1, ('daft', 0.0): 1, ('imsorri', 0.0): 1, ('aku', 0.0): 1, ('chelsea', 0.0): 2, ('koe', 0.0): 1, ('emyu', 0.0): 1, ('confetti', 0.0): 1, ('bf', 0.0): 2, ('sini', 0.0): 1, ('dipoppo', 0.0): 1, ('hop', 0.0): 2, ('bestweekend', 0.0): 1, ('okay-ish', 0.0): 1, ('html', 0.0): 1, ('geneva', 0.0): 1, ('patml', 0.0): 1, ('482', 0.0): 1, ('orgasm', 0.0): 3, ('abouti', 0.0): 1, ('797', 0.0): 1, ('reaalli', 0.0): 1, ('aldub', 0.0): 1, ('nila', 0.0): 1, ('smart', 0.0): 1, ('meter', 0.0): 1, ('display', 0.0): 1, ('unansw', 0.0): 1, ('bri', 0.0): 1, ('magcon', 0.0): 1, ('sinuend', 0.0): 1, ('kak', 0.0): 1, ('laper', 0.0): 2, ('rage', 0.0): 1, ('loser', 0.0): 1, ('brendon', 0.0): 1, ("urie'", 0.0): 1, ('sumer', 0.0): 1, ('repackag', 0.0): 1, (":'d", 0.0): 1, ('matthew', 0.0): 1, ('yongb', 0.0): 1, ('sued', 0.0): 1, ('suprem', 0.0): 1, ('warm-up', 0.0): 1, ('arriv', 0.0): 4, ('brill', 0.0): 1, ('120', 0.0): 1, ('rub', 0.0): 1, ('belli', 0.0): 1, ('jannatul', 0.0): 1, ('ferdou', 0.0): 1, ('ekta', 0.0): 1, ('kharap', 0.0): 1, ('manush', 0.0): 1, ('mart', 0.0): 2, ('gua', 0.0): 1, ('can', 0.0): 1, ("khloe'", 0.0): 1, ('nhe', 0.0): 1, ('yar', 0.0): 1, ('minkyuk', 0.0): 1, ('hol', 0.0): 1, ('isol', 0.0): 1, ('hk', 0.0): 1, ('sensor', 0.0): 1, ('broker', 0.0): 1, ('wna', 0.0): 1, ('flaviana', 0.0): 1, ('chickmt', 0.0): 1, ('123', 0.0): 1, ('letsfootbal', 0.0): 2, ('atk', 0.0): 2, ('greymind', 0.0): 2, ('43', 0.0): 2, ('gayl', 0.0): 2, ('cricket', 0.0): 3, ('2-3', 0.0): 2, ('mood-dump', 0.0): 1, ('livestream', 0.0): 1, ('gotten', 0.0): 1, ('felton', 0.0): 1, ('veriti', 0.0): 1, ("standen'", 0.0): 1, ('shortli', 0.0): 1, ('😆', 0.0): 2, ('takoyaki', 0.0): 1, ('piti', 0.0): 1, ('aisyah', 0.0): 1, ('ffvi', 0.0): 1, ('youtu.be/2_gpctsojkw', 0.0): 1, ('donutsss', 0.0): 1, ('50p', 0.0): 1, ('grate', 0.0): 1, ('spars', 0.0): 1, ('dd', 0.0): 1, ('lagi', 0.0): 1, ('rider', 0.0): 1, ('pride', 0.0): 1, ('hueee', 0.0): 1, ('password', 0.0): 1, ('thingi', 0.0): 1, ('georg', 0.0): 1, ('afraid', 0.0): 2, ('chew', 0.0): 2, ('toy', 0.0): 1, ('stella', 0.0): 1, ('threw', 0.0): 2, ('theaccidentalcoupl', 0.0): 1, ('smooth', 0.0): 1, ('handov', 0.0): 1, ('spick', 0.0): 1, ('bebii', 0.0): 1, ('happenend', 0.0): 1, ('dr', 0.0): 1, ('balm', 0.0): 1, ('hmph', 0.0): 1, ('bubba', 0.0): 2, ('floor', 0.0): 3, ('georgi', 0.0): 1, ('oi', 0.0): 1, ('bengali', 0.0): 1, ('masterchef', 0.0): 1, ('whatchya', 0.0): 1, ('petrol', 0.0): 1, ('diesel', 0.0): 1, ('wardrob', 0.0): 1, ('awe', 0.0): 1, ('cock', 0.0): 1, ('nyquil', 0.0): 1, ('poootek', 0.0): 1, ('1,500', 0.0): 1, ('bobbl', 0.0): 1, ('leak', 0.0): 1, ('thermo', 0.0): 1, ('classic', 0.0): 1, ('ti5', 0.0): 1, ('12th', 0.0): 1, ('skate', 0.0): 1, ('tae', 0.0): 1, ('kita', 0.0): 4, ('ia', 0.0): 1, ('pkwalasawa', 0.0): 1, ('india', 0.0): 1, ('corrupt', 0.0): 2, ('access', 0.0): 2, ('anything.sur', 0.0): 1, ('info', 0.0): 6, ('octob', 0.0): 1, ('mubank', 0.0): 2, ('ene', 0.0): 2, ('3k', 0.0): 1, ('zehr', 0.0): 1, ('khani', 0.0): 1, ('groceri', 0.0): 1, ('hubba', 0.0): 1, ('bubbl', 0.0): 1, ('gum', 0.0): 2, ('closet', 0.0): 1, ('jhalak', 0.0): 1, ('. ..', 0.0): 2, ('bakwa', 0.0): 1, ('. ...', 0.0): 1, ('seehiah', 0.0): 1, ('goy', 0.0): 1, ('nacho', 0.0): 1, ('braid', 0.0): 2, ('initi', 0.0): 1, ('ruth', 0.0): 1, ('boong', 0.0): 1, ('recommend', 0.0): 3, ('gta', 0.0): 1, ('cwnt', 0.0): 1, ('trivia', 0.0): 1, ('belat', 0.0): 1, ('rohingya', 0.0): 1, ('muslim', 0.0): 2, ('indict', 0.0): 1, ('traffick', 0.0): 1, ('thailand', 0.0): 1, ('asia', 0.0): 1, ('rumbl', 0.0): 1, ('kumbl', 0.0): 1, ('scold', 0.0): 1, ('phrase', 0.0): 1, ('includ', 0.0): 1, ('tag', 0.0): 2, ('melt', 0.0): 1, ('tfw', 0.0): 1, ('jest', 0.0): 1, ('offend', 0.0): 2, ('sleepingwithsiren', 0.0): 1, ('17th', 0.0): 1, ('bringmethehorizon', 0.0): 1, ('18th', 0.0): 2, ('carva', 0.0): 1, ('regularli', 0.0): 2, ('sympathi', 0.0): 1, ('revamp', 0.0): 1, ('headphon', 0.0): 1, ('cunt', 0.0): 1, ('wacha', 0.0): 1, ('niend', 0.0): 1, ('bravo', 0.0): 1, ('2hr', 0.0): 1, ('13m', 0.0): 1, ('kk', 0.0): 2, ('calibraksaep', 0.0): 2, ('darlin', 0.0): 1, ('stun', 0.0): 1, ("doedn't", 0.0): 1, ('meaning', 0.0): 1, ('horrif', 0.0): 2, ('scoup', 0.0): 2, ('paypal', 0.0): 3, ('sweedi', 0.0): 1, ('nam', 0.0): 1, ("sacconejoly'", 0.0): 1, ('bethesda', 0.0): 1, ('fallout', 0.0): 1, ('minecon', 0.0): 1, ('perfect', 0.0): 2, ('katee', 0.0): 1, ('iloveyouu', 0.0): 1, ('linux', 0.0): 1, ('nawww', 0.0): 1, ('chikka', 0.0): 1, ('ug', 0.0): 1, ('rata', 0.0): 1, ('soonest', 0.0): 1, ('mwamwa', 0.0): 1, ('faggot', 0.0): 1, ('doubt', 0.0): 2, ('fyi', 0.0): 1, ('profil', 0.0): 1, ('nicest', 0.0): 1, ('mehendi', 0.0): 1, ('dash', 0.0): 1, ('bookmark', 0.0): 1, ('whay', 0.0): 1, ('shaa', 0.0): 1, ('prami', 0.0): 1, ('😚', 0.0): 4, ('ngee', 0.0): 1, ('ann', 0.0): 1, ('crikey', 0.0): 2, ('snit', 0.0): 1, ('nathanielhinanakit', 0.0): 1, ('naya', 0.0): 1, ('spinni', 0.0): 1, ('wheel', 0.0): 2, ('albeit', 0.0): 1, ('athlet', 0.0): 1, ('gfriend', 0.0): 2, ('yung', 0.0): 2, ('fugli', 0.0): 1, ('💞', 0.0): 4, ('jongda', 0.0): 1, ('hardli', 0.0): 2, ('tlist', 0.0): 1, ('budget', 0.0): 1, ('pabebegirl', 0.0): 1, ('pabeb', 0.0): 2, ('alter', 0.0): 1, ('sandra', 0.0): 2, ('bland', 0.0): 2, ('storifi', 0.0): 1, ('abbi', 0.0): 2, ('mtvhottest', 0.0): 1, ('gaga', 0.0): 1, ('rib', 0.0): 1, ('😵', 0.0): 1, ('hulkamania', 0.0): 1, ('unlov', 0.0): 1, ('lazi', 0.0): 3, ('ihhh', 0.0): 1, ('stackar', 0.0): 1, ('basil', 0.0): 1, ('remedi', 0.0): 1, ('ov', 0.0): 2, ('raiz', 0.0): 1, ('nvr', 0.0): 1, ('gv', 0.0): 1, ('up.wt', 0.0): 1, ('wt', 0.0): 1, ('imran', 0.0): 2, ('achiev', 0.0): 1, ('thr', 0.0): 1, ('soln', 0.0): 1, ("sister'", 0.0): 1, ('hong', 0.0): 1, ('kong', 0.0): 1, ('31st', 0.0): 1, ('pipe', 0.0): 1, ('sept', 0.0): 2, ('lawn', 0.0): 1, ("cupid'", 0.0): 1, ('torn', 0.0): 1, ('retain', 0.0): 1, ('clown', 0.0): 2, ('lipstick', 0.0): 1, ('haiss', 0.0): 1, ('todayi', 0.0): 1, ('thoo', 0.0): 1, ('everday', 0.0): 1, ('hangout', 0.0): 2, ('steven', 0.0): 2, ('william', 0.0): 1, ('umboh', 0.0): 1, ('goodafternoon', 0.0): 1, ('jadin', 0.0): 1, ('thiz', 0.0): 1, ('iz', 0.0): 1, ('emeg', 0.0): 1, ('kennat', 0.0): 1, ('reunit', 0.0): 1, ('abi', 0.0): 1, ('arctic', 0.0): 1, ('chicsirif', 0.0): 1, ('structur', 0.0): 1, ('cumbia', 0.0): 1, ('correct', 0.0): 1, ('badlif', 0.0): 1, ('4-5', 0.0): 2, ('kaslkdja', 0.0): 1, ('3wk', 0.0): 1, ('flower', 0.0): 1, ('feverfew', 0.0): 1, ('weddingflow', 0.0): 1, ('diyflow', 0.0): 1, ('fitn', 0.0): 1, ('worth', 0.0): 4, ('wolverin', 0.0): 1, ('khan', 0.0): 1, ('innoc', 0.0): 1, ('🙏', 0.0): 1, ('🎂', 0.0): 2, ('memem', 0.0): 2, ('krystoria', 0.0): 1, ('snob', 0.0): 1, ('zumba', 0.0): 1, ('greekcrisi', 0.0): 1, ('remain', 0.0): 1, ('dutch', 0.0): 1, ('legibl', 0.0): 2, ('isra', 0.0): 1, ('passport', 0.0): 1, ('froze', 0.0): 1, ('theori', 0.0): 1, ('23rd', 0.0): 1, ('24th', 0.0): 1, ('stomachach', 0.0): 1, ('slice', 0.0): 1, ('ཀ', 0.0): 1, ('again', 0.0): 1, ('otani', 0.0): 1, ('3-0', 0.0): 1, ('3rd', 0.0): 3, ('bottom', 0.0): 2, ('niaaa', 0.0): 1, ('2/4', 0.0): 1, ('scheme', 0.0): 2, ('fckin', 0.0): 1, ('hii', 0.0): 1, ('vin', 0.0): 1, ('plss', 0.0): 1, ('rpli', 0.0): 1, ('rat', 0.0): 3, ('bollywood', 0.0): 1, ('mac', 0.0): 1, ('backup', 0.0): 2, ('lune', 0.0): 1, ('robinhood', 0.0): 1, ('robinhoodi', 0.0): 1, ('🚙', 0.0): 1, ('💚', 0.0): 1, ('docopenhagen', 0.0): 1, ('setter', 0.0): 1, ('swipe', 0.0): 1, ('bbygurl', 0.0): 1, ('neil', 0.0): 1, ('caribbean', 0.0): 1, ('6yr', 0.0): 1, ('jabongatpumaurbanstamped', 0.0): 2, ('takraw', 0.0): 1, ('fersure', 0.0): 1, ('angi', 0.0): 1, ('sheriff', 0.0): 1, ('aaag', 0.0): 1, ("i'mo", 0.0): 1, ('sulk', 0.0): 1, ('selfish', 0.0): 1, ('trick', 0.0): 2, ('nonc', 0.0): 1, ('pad', 0.0): 1, ('bison', 0.0): 1, ('motiv', 0.0): 2, ("q'don", 0.0): 1, ('cheat', 0.0): 2, ('stomp', 0.0): 1, ('aaaaaaaaah', 0.0): 1, ('kany', 0.0): 1, ('mama', 0.0): 1, ('jdjdjdjd', 0.0): 1, ("jimin'", 0.0): 1, ('fancaf', 0.0): 1, ('waffl', 0.0): 1, ('87.7', 0.0): 1, ('2fm', 0.0): 1, ('himseek', 0.0): 1, ('kissm', 0.0): 1, ('akua', 0.0): 1, ('glo', 0.0): 1, ('cori', 0.0): 1, ('monteith', 0.0): 1, ('often', 0.0): 1, ('hashbrown', 0.0): 1, ('💘', 0.0): 2, ('pg', 0.0): 1, ('msc', 0.0): 1, ('hierro', 0.0): 1, ('shirleycam', 0.0): 1, ('phonesex', 0.0): 2, ('pal', 0.0): 1, ('111', 0.0): 1, ('gilet', 0.0): 1, ('cheek', 0.0): 1, ('squishi', 0.0): 1, ('lahhh', 0.0): 1, ('eon', 0.0): 1, ('sunris', 0.0): 1, ('beeti', 0.0): 1, ('697', 0.0): 1, ('kikkomansabor', 0.0): 1, ('getaway', 0.0): 1, ('crimin', 0.0): 1, ('amiibo', 0.0): 1, ('batman', 0.0): 1, ('habe', 0.0): 1, ('siannn', 0.0): 1, ('march', 0.0): 1, ('2017', 0.0): 1, ('chuckin', 0.0): 1, ('ampsha', 0.0): 1, ('nia', 0.0): 1, ('strap', 0.0): 1, ('dz9055', 0.0): 1, ('entlead', 0.0): 1, ('590', 0.0): 1, ('twice', 0.0): 5, ('07:02', 0.0): 1, ('ifsc', 0.0): 1, ('mayor', 0.0): 1, ('biodivers', 0.0): 1, ('taxonom', 0.0): 1, ('collabor', 0.0): 1, ('speci', 0.0): 1, ('discoveri', 0.0): 1, ('collar', 0.0): 1, ('3:03', 0.0): 1, ('belt', 0.0): 1, ('smith', 0.0): 2, ('eyelin', 0.0): 1, ('therefor', 0.0): 1, ('netherland', 0.0): 1, ('el', 0.0): 1, ('jeb', 0.0): 1, ('blacklivesmatt', 0.0): 1, ('slogan', 0.0): 1, ('msnbc', 0.0): 1, ('jebbush', 0.0): 1, ('famish', 0.0): 1, ('marino', 0.0): 1, ('qualifi', 0.0): 2, ('suzi', 0.0): 1, ('skirt', 0.0): 1, ('tama', 0.0): 1, ('warrior', 0.0): 2, ('wound', 0.0): 1, ('iraq', 0.0): 1, ('be', 0.0): 2, ('camara', 0.0): 1, ('coveral', 0.0): 1, ('happili', 0.0): 1, ('sneezi', 0.0): 1, ('rogerwatch', 0.0): 1, ('stalker', 0.0): 1, ('velvet', 0.0): 1, ('tradit', 0.0): 1, ("people'", 0.0): 1, ('beheaviour', 0.0): 1, ("robert'", 0.0): 1, ('.\n.', 0.0): 2, ('aaron', 0.0): 1, ('jelous', 0.0): 1, ('mtg', 0.0): 1, ('thoughtseiz', 0.0): 1, ('playabl', 0.0): 1, ('oldi', 0.0): 1, ('goodi', 0.0): 1, ('mcg', 0.0): 1, ('inspirit', 0.0): 1, ('shine', 0.0): 1, ('ise', 0.0): 1, ('assum', 0.0): 2, ('waist', 0.0): 2, ('guin', 0.0): 1, ('venu', 0.0): 1, ('evil', 0.0): 1, ('pepper', 0.0): 1, ('thessidew', 0.0): 1, ('877', 0.0): 1, ('genesi', 0.0): 1, ('mexico', 0.0): 2, ('novemb', 0.0): 1, ('mash', 0.0): 1, ('whattsap', 0.0): 1, ('inuyasha', 0.0): 2, ('outfwith', 0.0): 1, ('myungsoo', 0.0): 1, ('organis', 0.0): 1, ('satisfi', 0.0): 1, ('wah', 0.0): 1, ('challo', 0.0): 1, ('pliss', 0.0): 1, ('juliana', 0.0): 1, ('enrol', 0.0): 1, ('darlen', 0.0): 1, ('emoji', 0.0): 2, ('brisban', 0.0): 1, ('merlin', 0.0): 1, ('nawwwe', 0.0): 1, ('hyperbulli', 0.0): 1, ('tong', 0.0): 1, ('nga', 0.0): 1, ('seatmat', 0.0): 1, ('rajud', 0.0): 1, ('barkada', 0.0): 1, ('ore', 0.0): 1, ('kayla', 0.0): 1, ('ericavan', 0.0): 1, ('jong', 0.0): 1, ('dongwoo', 0.0): 1, ('photocard', 0.0): 1, ('wh', 0.0): 1, ('dw', 0.0): 1, ('tumor', 0.0): 1, ('vivian', 0.0): 1, ('mmsmalubhangsakit', 0.0): 1, ('jillcruz', 0.0): 2, ('lgbt', 0.0): 3, ('qt', 0.0): 1, ('19th', 0.0): 1, ('toss', 0.0): 1, ('co-work', 0.0): 1, ('mia', 0.0): 1, ('push', 0.0): 4, ('dare', 0.0): 2, ('unsettl', 0.0): 1, ('gh', 0.0): 1, ('18c', 0.0): 1, ('rlli', 0.0): 2, ('hamster', 0.0): 2, ('sheeran', 0.0): 2, ('preform', 0.0): 2, ('monash', 0.0): 1, ('hitmark', 0.0): 1, ('glitch', 0.0): 1, ('safaa', 0.0): 1, ("selena'", 0.0): 1, ('galat', 0.0): 1, ('tum', 0.0): 1, ('ab', 0.0): 5, ('non', 0.0): 1, ('lrka', 0.0): 1, ('bna', 0.0): 1, ('kia', 0.0): 1, ('bhook', 0.0): 1, ('jai', 0.0): 1, ('social', 0.0): 2, ('afterschool', 0.0): 1, ('bilal', 0.0): 1, ('ashraf', 0.0): 1, ('icu', 0.0): 1, ('thanksss', 0.0): 1, ('annnd', 0.0): 1, ('winchest', 0.0): 1, ('{:', 0.0): 1, ('grepe', 0.0): 1, ('grepein', 0.0): 1, ('panem', 0.0): 1, ('lover', 0.0): 1, ('sulli', 0.0): 1, ('cpm', 0.0): 1, ('condemn', 0.0): 1, ('✔', 0.0): 1, ('occur', 0.0): 1, ('unagi', 0.0): 1, ('7elw', 0.0): 1, ('mesh', 0.0): 1, ('beyt', 0.0): 1, ('3a2ad', 0.0): 1, ('fluent', 0.0): 1, ('varsiti', 0.0): 1, ('sengenza', 0.0): 1, ('context', 0.0): 1, ('movnat', 0.0): 1, ('yield', 0.0): 1, ('nbhero', 0.0): 1, ("it'd", 0.0): 1, ('background', 0.0): 1, ('agov', 0.0): 1, ('brasileirao', 0.0): 2, ('abus', 0.0): 1, ('unpar', 0.0): 1, ('bianca', 0.0): 1, ('bun', 0.0): 1, ('dislik', 0.0): 1, ('burdensom', 0.0): 1, ('clear', 0.0): 2, ('amelia', 0.0): 1, ('melon', 0.0): 2, ('useless', 0.0): 1, ('soccer', 0.0): 2, ('interview', 0.0): 2, ('thursday', 0.0): 1, ('nevermind', 0.0): 1, ('jeon', 0.0): 1, ('claw', 0.0): 1, ('thigh', 0.0): 2, ('traction', 0.0): 1, ('damnit', 0.0): 1, ('pri', 0.0): 1, ('pv', 0.0): 2, ('reliv', 0.0): 1, ('nyc', 0.0): 2, ('klm', 0.0): 1, ('11am', 0.0): 1, ("mcd'", 0.0): 1, ('hung', 0.0): 1, ('bam', 0.0): 1, ('seventh', 0.0): 1, ('splendour', 0.0): 1, ('swedish', 0.0): 1, ('metal', 0.0): 1, ('häirførc', 0.0): 1, ('givecodpieceach', 0.0): 1, ('alic', 0.0): 3, ('stile', 0.0): 1, ('explain', 0.0): 3, ('ili', 0.0): 1, ('pragu', 0.0): 1, ('sadi', 0.0): 1, ('charact', 0.0): 1, ('915', 0.0): 1, ('hayee', 0.0): 2, ('patwari', 0.0): 1, ('mam', 0.0): 1, ("ik'", 0.0): 1, ('vision', 0.0): 2, ('ga', 0.0): 1, ('awhhh', 0.0): 1, ('nalang', 0.0): 1, ('hehe', 0.0): 1, ('albanian', 0.0): 1, ('curs', 0.0): 2, ('tava', 0.0): 1, ('chara', 0.0): 1, ('teteh', 0.0): 1, ('verri', 0.0): 1, ('shatter', 0.0): 2, ('sb', 0.0): 1, ('nawe', 0.0): 1, ('bulldog', 0.0): 1, ('macho', 0.0): 1, ('puriti', 0.0): 1, ('kwento', 0.0): 1, ('nakakapikon', 0.0): 1, ('nagbabasa', 0.0): 1, ('blog', 0.0): 2, ('cancer', 0.0): 1, (':-\\', 0.0): 1, ('jonatha', 0.0): 4, ('beti', 0.0): 4, ('sogok', 0.0): 1, ('premium', 0.0): 2, ('instrument', 0.0): 1, ('howev', 0.0): 1, ('dastardli', 0.0): 1, ('swine', 0.0): 1, ('envelop', 0.0): 1, ('pipol', 0.0): 1, ('tad', 0.0): 1, ('wiper', 0.0): 2, ('supposedli', 0.0): 1, ('kernel', 0.0): 1, ('intel', 0.0): 1, ('mega', 0.0): 1, ('bent', 0.0): 1, ('socket', 0.0): 1, ('pcgame', 0.0): 1, ('pcupgrad', 0.0): 1, ('brainwash', 0.0): 2, ('smosh', 0.0): 1, ('plawnew', 0.0): 1, ('837', 0.0): 1, ('aswel', 0.0): 1, ('litter', 0.0): 1, ('mensch', 0.0): 1, ('sepanx', 0.0): 1, ('pci', 0.0): 1, ('caerphilli', 0.0): 1, ('omw', 0.0): 1, ('😍', 0.0): 1, ('hahdhdhshh', 0.0): 1, ('growinguppoor', 0.0): 1, ('🇺', 0.0): 2, ('🇸', 0.0): 2, ("bangtan'", 0.0): 1, ('taimoor', 0.0): 1, ('meray', 0.0): 1, ('dost', 0.0): 1, ('tya', 0.0): 1, ('refollow', 0.0): 1, ('dumb', 0.0): 2, ('butt', 0.0): 1, ('pissbabi', 0.0): 1, ('plank', 0.0): 1, ('inconsist', 0.0): 1, ('moor', 0.0): 1, ('bin', 0.0): 1, ('osx', 0.0): 1, ('chrome', 0.0): 1, ('voiceov', 0.0): 1, ('devo', 0.0): 1, ('hulkhogan', 0.0): 1, ('unpleas', 0.0): 1, ('daaamn', 0.0): 1, ('dada', 0.0): 1, ('fulli', 0.0): 1, ('spike', 0.0): 1, ("panic'", 0.0): 1, ('22nd', 0.0): 1, ('south', 0.0): 2, ('africa', 0.0): 2, ('190', 0.0): 2, ('lizardz', 0.0): 1, ('deepli', 0.0): 1, ('emerg', 0.0): 1, ('engin', 0.0): 1, ('dormtel', 0.0): 1, ('scho', 0.0): 1, ('siya', 0.0): 1, ('onee', 0.0): 1, ('carri', 0.0): 1, ('7pm', 0.0): 1, ('feta', 0.0): 1, ('blaaaz', 0.0): 1, ('nausea', 0.0): 1, ('awar', 0.0): 1, ('top-up', 0.0): 1, ('sharknado', 0.0): 1, ('erni', 0.0): 1, ('ezoo', 0.0): 1, ('lilybutl', 0.0): 1, ('seduc', 0.0): 2, ('powai', 0.0): 1, ('neighbor', 0.0): 1, ('delhi', 0.0): 1, ('unsaf', 0.0): 1, ('halo', 0.0): 1, ('fred', 0.0): 1, ('gaon', 0.0): 1, ('infnt', 0.0): 1, ('elig', 0.0): 1, ('acub', 0.0): 1, ("why'd", 0.0): 1, ('bullshit', 0.0): 2, ('hanaaa', 0.0): 1, ('jn', 0.0): 1, ('tau', 0.0): 1, ('basta', 0.0): 1, ('sext', 0.0): 1, ('addm', 0.0): 1, ('hotmusicdeloco', 0.0): 2, ('dhi', 0.0): 1, ('👉', 0.0): 1, ('8ball', 0.0): 1, ('fakmarey', 0.0): 1, ('doo', 0.0): 2, ('six', 0.0): 3, ('flag', 0.0): 1, ('fulltim', 0.0): 1, ('awkward', 0.0): 1, ('beet', 0.0): 1, ('juic', 0.0): 1, ('dci', 0.0): 1, ('granddad', 0.0): 1, ('minion', 0.0): 3, ('bucket', 0.0): 1, ('kapan', 0.0): 1, ('udah', 0.0): 1, ('dihapu', 0.0): 1, ('hilang', 0.0): 1, ('dari', 0.0): 1, ('muka', 0.0): 1, ('bumi', 0.0): 1, ('narrow', 0.0): 1, ('gona', 0.0): 2, ('chello', 0.0): 1, ('gate', 0.0): 1, ('guard', 0.0): 1, ('crepe', 0.0): 1, ('forsaken', 0.0): 1, ('kanin', 0.0): 1, ('hypixel', 0.0): 1, ('grrr', 0.0): 1, ('thestruggleisr', 0.0): 1, ('geek', 0.0): 1, ('gamer', 0.0): 2, ('afterbirth', 0.0): 1, ("apink'", 0.0): 1, ('overperhatian', 0.0): 1, ('son', 0.0): 1, ('pox', 0.0): 1, ('ahm', 0.0): 1, ('karli', 0.0): 1, ('kloss', 0.0): 1, ('goofi', 0.0): 1, ('pcd', 0.0): 1, ('antagonis', 0.0): 1, ('writer', 0.0): 1, ('nudg', 0.0): 1, ('delv', 0.0): 1, ('grandad', 0.0): 1, ("gray'", 0.0): 1, ('followk', 0.0): 1, ('suggest', 0.0): 2, ('pace', 0.0): 1, ('maker', 0.0): 1, ('molli', 0.0): 1, ('higher', 0.0): 1, ('ceremoni', 0.0): 1, ('christin', 0.0): 1, ('moodi', 0.0): 1, ('throwback', 0.0): 1, ('fav', 0.0): 3, ('barb', 0.0): 1, ('creasi', 0.0): 1, ('deputi', 0.0): 1, ('tast', 0.0): 1, ("banana'", 0.0): 1, ('saludo', 0.0): 1, ('dissapoint', 0.0): 1, ('😫', 0.0): 1, ('<--', 0.0): 1, ("bae'", 0.0): 1, ('pimpl', 0.0): 2, ('amount', 0.0): 2, ('tdi', 0.0): 1, ('pamela', 0.0): 1, ('mini', 0.0): 1, ('mast', 0.0): 1, ('intermitt', 0.0): 1, ('servic', 0.0): 3, ('janniecam', 0.0): 1, ('musicbiz', 0.0): 1, ('braxton', 0.0): 1, ('pro', 0.0): 2, ('urban', 0.0): 1, ('unpreced', 0.0): 1, ('tebow', 0.0): 1, ('okaaay', 0.0): 1, ('sayanggg', 0.0): 1, ('housework', 0.0): 1, ('bust', 0.0): 2, ('disneyland', 0.0): 1, ('thoma', 0.0): 1, ('tommyy', 0.0): 1, ('billi', 0.0): 1, ('kevin', 0.0): 1, ('clifton', 0.0): 1, ('strictli', 0.0): 1, ('nsc', 0.0): 1, ('mat', 0.0): 1, ('0', 0.0): 1, ('awhh', 0.0): 1, ('ram', 0.0): 2, ('voucher', 0.0): 1, ('smadvow', 0.0): 1, ('544', 0.0): 1, ('acdc', 0.0): 1, ('aker', 0.0): 1, ('gmail', 0.0): 1, ('sprevelink', 0.0): 1, ('633', 0.0): 1, ('lana', 0.0): 2, ('loveyoutilltheendcart', 0.0): 1, ('sfv', 0.0): 1, ('6/7', 0.0): 1, ('winner', 0.0): 1, ('20/1', 0.0): 1, ('david', 0.0): 1, ('rosi', 0.0): 1, ('hayoung', 0.0): 1, ('nlb', 0.0): 1, ('@_', 0.0): 1, ('tayo', 0.0): 1, ('forth', 0.0): 1, ('suspect', 0.0): 1, ('mening', 0.0): 1, ('viral', 0.0): 1, ('tonsil', 0.0): 1, ('😷', 0.0): 1, ('😝', 0.0): 1, ('babyy', 0.0): 2, ('cushion', 0.0): 1, ('😿', 0.0): 1, ('💓', 0.0): 2, ('weigh', 0.0): 1, ('keen', 0.0): 1, ('petrofac', 0.0): 1, (';-)', 0.0): 1, ('wig', 0.0): 1, ("mark'", 0.0): 1, ('pathet', 0.0): 1, ('burden.say', 0.0): 1, ('itchi', 0.0): 1, ('cheaper', 0.0): 1, ('malaysia', 0.0): 1, ('130', 0.0): 1, ('snapchattimg', 0.0): 1, ('😏', 0.0): 4, ('sin', 0.0): 1, ('lor', 0.0): 1, ('dedic', 0.0): 1, ('worriedli', 0.0): 1, ('stare', 0.0): 1, ('toneadi', 0.0): 1, ('46532', 0.0): 1, ('snapdirti', 0.0): 1, ('sheskindahot', 0.0): 1, ('corps', 0.0): 1, ('taeni', 0.0): 1, ('fyeah', 0.0): 1, ('andromeda', 0.0): 1, ('yunni', 0.0): 1, ('whdjwksja', 0.0): 1, ('ziam', 0.0): 1, ('100k', 0.0): 1, ('spoil', 0.0): 1, ('curtain', 0.0): 1, ('watchabl', 0.0): 1, ('migrin', 0.0): 1, ('gdce', 0.0): 1, ('gamescom', 0.0): 1, ("do't", 0.0): 1, ('parcel', 0.0): 1, ('num', 0.0): 1, ('oooouch', 0.0): 1, ('pinki', 0.0): 1, ('👣', 0.0): 1, ('podiatrist', 0.0): 1, ('gusto', 0.0): 1, ("rodic'", 0.0): 1, ("one'", 0.0): 1, ('adoohh', 0.0): 1, ('b-butt', 0.0): 1, ('tigermilk', 0.0): 1, ('east', 0.0): 1, ('dulwich', 0.0): 1, ('intens', 0.0): 1, ('kagami', 0.0): 1, ('kuroko', 0.0): 1, ('sana', 0.0): 2, ('makita', 0.0): 1, ('spooki', 0.0): 1, ('smol', 0.0): 1, ('bean', 0.0): 1, ('fagan', 0.0): 1, ('meadowhal', 0.0): 1, ('lola', 0.0): 1, ('nadalaw', 0.0): 1, ('labyu', 0.0): 1, ('jot', 0.0): 1, ('ivypowel', 0.0): 1, ('homeslic', 0.0): 1, ('emoticon', 0.0): 2, ('eyebrow', 0.0): 1, ('prettylook', 0.0): 1, ('whitney', 0.0): 1, ('houston', 0.0): 1, ('aur', 0.0): 1, ('shamil', 0.0): 1, ('tonn', 0.0): 1, ('statu', 0.0): 1, ('→', 0.0): 1, ('suddenli', 0.0): 2, ('alli', 0.0): 2, ('wrap', 0.0): 1, ('neck', 0.0): 1, ('heartbroken', 0.0): 1, ('chover', 0.0): 1, ('cebu', 0.0): 1, ('lechon', 0.0): 1, ('kitten', 0.0): 2, ('jannygreen', 0.0): 2, ('suicid', 0.0): 2, ('forgiv', 0.0): 1, ('conno', 0.0): 1, ('brooo', 0.0): 1, ('rout', 0.0): 1, ('lovebox', 0.0): 1, ('prod', 0.0): 1, ('osad', 0.0): 1, ('scam', 0.0): 1, ('itb', 0.0): 1, ('omigod', 0.0): 1, ('ehem', 0.0): 1, ('ala', 0.0): 1, ('yeke', 0.0): 1, ('jumpa', 0.0): 1, ('😋', 0.0): 1, ('ape', 0.0): 1, ('1.2', 0.0): 1, ('map', 0.0): 1, ('namin', 0.0): 1, ('govt', 0.0): 1, ('e-petit', 0.0): 1, ('pretend', 0.0): 1, ('irk', 0.0): 1, ('ruess', 0.0): 1, ('program', 0.0): 1, ('aigoo', 0.0): 1, ('doujin', 0.0): 1, ('killua', 0.0): 1, ('ginggon', 0.0): 1, ('guys.al', 0.0): 1, ('ytd', 0.0): 1, ('pdapaghimok', 0.0): 1, ('flexibl', 0.0): 1, ('sheet', 0.0): 1, ('nanaman', 0.0): 1, ('pinay', 0.0): 1, ('pie', 0.0): 1, ('jadi', 0.0): 1, ('langsung', 0.0): 1, ('flasback', 0.0): 1, ('franc', 0.0): 1, (':|', 0.0): 1, ('lo', 0.0): 1, ('nicknam', 0.0): 1, ('involv', 0.0): 1, ('scrape', 0.0): 1, ('pile', 0.0): 1, ('sare', 0.0): 1, ('bandar', 0.0): 1, ('varg', 0.0): 1, ('hammer', 0.0): 1, ('lolo', 0.0): 1, ('xbsbabnb', 0.0): 1, ('stilll', 0.0): 1, ('apma', 0.0): 2, ('leadership', 0.0): 1, ('wakeupgop', 0.0): 1, ('mv', 0.0): 1, ('bull', 0.0): 1, ('trafficcc', 0.0): 1, ('oscar', 0.0): 1, ('pornographi', 0.0): 1, ('slutsham', 0.0): 1, ('ect', 0.0): 1, ('poland', 0.0): 1, ('faraway', 0.0): 1, ('700', 0.0): 1, ('800', 0.0): 1, ('cgi', 0.0): 1, ('pun', 0.0): 1, ("x'", 0.0): 1, ('osaka', 0.0): 1, ('junior', 0.0): 1, ('aytona', 0.0): 1, ('hala', 0.0): 1, ('mathird', 0.0): 1, ('jkjk', 0.0): 1, ('backtrack', 0.0): 1, ('util', 0.0): 1, ('pat', 0.0): 1, ('jay', 0.0): 2, ('broh', 0.0): 1, ('calll', 0.0): 1, ('icaru', 0.0): 1, ('awn', 0.0): 1, ('bach', 0.0): 1, ('court', 0.0): 1, ('landlord', 0.0): 1, ("mp'", 0.0): 1, ('dame', 0.0): 1, ('gossip', 0.0): 1, ('purpl', 0.0): 2, ('tie', 0.0): 1, ('ishii', 0.0): 1, ('clara', 0.0): 1, ('yile', 0.0): 1, ('whatev', 0.0): 1, ('stil', 0.0): 1, ('sidharth', 0.0): 1, ('ndabenhl', 0.0): 1, ('doggi', 0.0): 1, ('antag', 0.0): 1, ('41', 0.0): 1, ('thu', 0.0): 1, ('jenner', 0.0): 1, ('troubleshoot', 0.0): 1, ("convo'", 0.0): 1, ('dem', 0.0): 1, ('tix', 0.0): 2, ('automat', 0.0): 1, ('redirect', 0.0): 1, ('gigi', 0.0): 1, ('carter', 0.0): 1, ('corn', 0.0): 2, ('chip', 0.0): 2, ('nnnooo', 0.0): 1, ('cz', 0.0): 1, ('gorilla', 0.0): 1, ('hbm', 0.0): 1, ('humid', 0.0): 1, ('admir', 0.0): 1, ('consist', 0.0): 1, ('jason', 0.0): 1, ("shackell'", 0.0): 1, ('podcast', 0.0): 1, ('envi', 0.0): 1, ('twer', 0.0): 1, ('782', 0.0): 1, ('hahaahahahaha', 0.0): 1, ('sm1', 0.0): 1, ('mutil', 0.0): 1, ('robot', 0.0): 1, ('destroy', 0.0): 1, ('freakin', 0.0): 1, ('haestarr', 0.0): 1, ('😀', 0.0): 3, ('audio', 0.0): 1, ('snippet', 0.0): 1, ('brotherhood', 0.0): 1, ('mefd', 0.0): 1, ('diana', 0.0): 1, ('master', 0.0): 1, ('led', 0.0): 1, ('award', 0.0): 1, ('meowkd', 0.0): 1, ('complic', 0.0): 1, ("c'mon", 0.0): 1, ("swimmer'", 0.0): 1, ('leh', 0.0): 1, ('corner', 0.0): 1, ('didnot', 0.0): 1, ('usanel', 0.0): 2, ('nathan', 0.0): 1, ('micha', 0.0): 1, ('fave', 0.0): 2, ('creep', 0.0): 1, ('throughout', 0.0): 1, ('whose', 0.0): 1, ('ave', 0.0): 1, ('tripl', 0.0): 1, ('lectur', 0.0): 1, ('2-5', 0.0): 1, ('jaw', 0.0): 1, ('quarter', 0.0): 1, ('soni', 0.0): 1, ('followmeaaron', 0.0): 1, ('tzelumxoxo', 0.0): 1, ('drank', 0.0): 1, ('mew', 0.0): 1, ('indic', 0.0): 1, ('ouliv', 0.0): 1, ('70748', 0.0): 1, ('viernesderolenahot', 0.0): 1, ('longmorn', 0.0): 1, ('tobermori', 0.0): 1, ('32', 0.0): 1, ('tail', 0.0): 1, ('recuerda', 0.0): 1, ('tanto', 0.0): 1, ('bath', 0.0): 1, ('muna', 0.0): 1, ('await', 0.0): 1, ('urslef', 0.0): 1, ('lime', 0.0): 1, ('truckload', 0.0): 1, ('favour', 0.0): 2, ('spectat', 0.0): 1, ('sail', 0.0): 1, ("w'end", 0.0): 1, ('bbc', 0.0): 1, ('‘', 0.0): 1, ('foil', 0.0): 1, ('ac45', 0.0): 1, ('catamaran', 0.0): 1, ('peli', 0.0): 1, ('829', 0.0): 1, ('sextaatequemfimseguesdvcomvalentino', 0.0): 1, ('befor', 0.0): 1, ('valu', 0.0): 1, ('cinnamon', 0.0): 1, ('mtap', 0.0): 1, ('peng', 0.0): 1, ('frozen', 0.0): 1, ('bagu', 0.0): 1, ('emang', 0.0): 1, ('engg', 0.0): 1, ('cmc', 0.0): 1, ('mage', 0.0): 1, ('statement', 0.0): 1, ('moodsw', 0.0): 1, ('termin', 0.0): 1, ('men', 0.0): 1, ('peep', 0.0): 1, ('multipl', 0.0): 1, ('mef', 0.0): 1, ('rebound', 0.0): 1, ('pooor', 0.0): 1, ('2am', 0.0): 1, ('perpetu', 0.0): 1, ('bitchfac', 0.0): 1, ('clever', 0.0): 1, ('iceland', 0.0): 1, ('zayn_come_back_we_miss_y', 0.0): 1, ('pmsl', 0.0): 1, ('mianh', 0.0): 1, ('milkeu', 0.0): 1, ('lrt', 0.0): 1, ('bambam', 0.0): 1, ('soda', 0.0): 1, ('payback', 0.0): 1, ('87000', 0.0): 1, ('jobe', 0.0): 1, ('muchi', 0.0): 1, ('🎈', 0.0): 1, ('bathroom', 0.0): 1, ('lagg', 0.0): 1, ('banget', 0.0): 1, ('novel', 0.0): 1, ("there'd", 0.0): 1, ('invis', 0.0): 1, ('scuttl', 0.0): 1, ('worm', 0.0): 1, ('bauuukkk', 0.0): 1, ('jessica', 0.0): 1, ('5:15', 0.0): 1, ('argument', 0.0): 1, ('couldnt', 0.0): 2, ('yepp', 0.0): 1, ('😺', 0.0): 1, ('💒', 0.0): 1, ('💎', 0.0): 1, ('feelin', 0.0): 1, ('biscuit', 0.0): 1, ('slather', 0.0): 1, ('jsut', 0.0): 1, ('belov', 0.0): 1, ('grandmoth', 0.0): 1, ('princess', 0.0): 2, ('babee', 0.0): 1, ('demn', 0.0): 1, ('hotaisndonwyvauwjoqhsjsnaihsuswtf', 0.0): 1, ('sia', 0.0): 1, ('niram', 0.0): 1, ('geng', 0.0): 1, ('fikri', 0.0): 1, ('tirtagangga', 0.0): 1, ('char', 0.0): 1, ('font', 0.0): 2, ('riprishikeshwari', 0.0): 1, ('creamist', 0.0): 1, ('challeng', 0.0): 1, ('substitut', 0.0): 1, ('skin', 0.0): 1, ('cplt', 0.0): 1, ('cp', 0.0): 1, ('hannah', 0.0): 1, ('💙', 0.0): 1, ('opu', 0.0): 1, ('inner', 0.0): 1, ('pleasur', 0.0): 1, ('bbq', 0.0): 1, ('33', 0.0): 1, ('lolliv', 0.0): 1, ('split', 0.0): 3, ('collat', 0.0): 2, ('spilt', 0.0): 2, ('quitkarwaoyaaro', 0.0): 1, ('deacti̇v', 0.0): 1, ('2.5', 0.0): 1, ('g2a', 0.0): 1, ('sherep', 0.0): 1, ('nemen', 0.0): 1, ('behey', 0.0): 1, ('motherfuck', 0.0): 1, ('tattoo', 0.0): 1, ('reec', 0.0): 1, ('vm', 0.0): 1, ('deth', 0.0): 2, ('lest', 0.0): 1, ('gp', 0.0): 1, ('departur', 0.0): 1, ('wipe', 0.0): 1, ('yuck', 0.0): 1, ('ystrday', 0.0): 1, ('seolhyun', 0.0): 1, ('drama', 0.0): 1, ('spici', 0.0): 1, ('owl', 0.0): 1, ('mumbai', 0.0): 1, ("pj'", 0.0): 1, ('wallpap', 0.0): 1, ('cba', 0.0): 1, ('hotter', 0.0): 1, ('rec', 0.0): 1, ('gotdamn', 0.0): 1, ('baaack', 0.0): 1, ('honest', 0.0): 1, ('srw', 0.0): 1, ('mobag', 0.0): 1, ('dunno', 0.0): 1, ('stroke', 0.0): 1, ('gnr', 0.0): 1, ('backstag', 0.0): 1, ('slash', 0.0): 1, ('prolli', 0.0): 1, ('bunni', 0.0): 1, ('sooner', 0.0): 1, ('analyst', 0.0): 1, ('expedia', 0.0): 1, ('bellevu', 0.0): 1, ('prison', 0.0): 1, ('alcohol', 0.0): 1, ('huhuh', 0.0): 1, ('heartburn', 0.0): 1, ('awalmu', 0.0): 1, ('njareeem', 0.0): 1, ('maggi', 0.0): 1, ('psycho', 0.0): 1, ('wahhh', 0.0): 1, ('abudhabi', 0.0): 1, ('hiby', 0.0): 1, ('shareyoursumm', 0.0): 1, ('b8', 0.0): 1, ('must.b', 0.0): 1, ('dairi', 0.0): 1, ('produxt', 0.0): 1, ('lactos', 0.0): 2, ('midland', 0.0): 1, ('knacker', 0.0): 1, ('footag', 0.0): 1, ('lifeless', 0.0): 1, ('shell', 0.0): 1, ('44', 0.0): 1, ('7782', 0.0): 1, ('pengen', 0.0): 1, ('girlll', 0.0): 1, ('tsunami', 0.0): 1, ('indi', 0.0): 1, ('nick', 0.0): 1, ('tirad', 0.0): 1, ('stoop', 0.0): 1, ('lower', 0.0): 1, ('role', 0.0): 1, ('thunder', 0.0): 1, ('paradis', 0.0): 1, ('habit', 0.0): 1, ('facad', 0.0): 1, ('democraci', 0.0): 1, ('brat', 0.0): 1, ('tb', 0.0): 1, ("o'", 0.0): 1, ('bade', 0.0): 1, ('fursat', 0.0): 1, ('usey', 0.0): 2, ('banaya', 0.0): 1, ('uppar', 0.0): 1, ('waal', 0.0): 1, ('ney', 0.0): 1, ('afso', 0.0): 1, ('hums', 0.0): 1, ('dur', 0.0): 1, ('wo', 0.0): 1, ("who'd", 0.0): 1, ('naruhina', 0.0): 1, ('namee', 0.0): 1, ('haiqal', 0.0): 1, ('360hr', 0.0): 1, ('picc', 0.0): 1, ('instor', 0.0): 1, ('pre-vot', 0.0): 1, ('5th', 0.0): 1, ('usernam', 0.0): 1, ('minho', 0.0): 1, ('durian', 0.0): 1, ('strudel', 0.0): 1, ('tsk', 0.0): 1, ('marin', 0.0): 1, ('kailan', 0.0): 1, ('separ', 0.0): 1, ('payday', 0.0): 1, ('payhour', 0.0): 1, ('immedi', 0.0): 1, ('natur', 0.0): 1, ('pre-ord', 0.0): 1, ('fwm', 0.0): 1, ('guppi', 0.0): 1, ('poorkid', 0.0): 1, ('lack', 0.0): 1, ('misunderstood', 0.0): 1, ('cuddli', 0.0): 1, ('scratch', 0.0): 1, ('thumb', 0.0): 1, ('compens', 0.0): 1, ('kirkiri', 0.0): 1, ('phase', 0.0): 1, ('wonho', 0.0): 1, ('visual', 0.0): 1, ("='(", 0.0): 1, ('mission', 0.0): 1, ('pap', 0.0): 1, ('danzel', 0.0): 1, ('craft', 0.0): 1, ('devil', 0.0): 1, ('phil', 0.0): 1, ('sheff', 0.0): 1, ('york', 0.0): 1, ('visa', 0.0): 1, ('gim', 0.0): 1, ('bench', 0.0): 1, ('harm', 0.0): 1, ('yolo', 0.0): 1, ('bloat', 0.0): 1, ('olli', 0.0): 1, ('alterni', 0.0): 1, ('earth', 0.0): 1, ('influenc', 0.0): 1, ('overal', 0.0): 1, ('continent', 0.0): 1, ('🔫', 0.0): 1, ('tank', 0.0): 1, ('thirsti', 0.0): 1, ('konami', 0.0): 1, ('polici', 0.0): 1, ('ranti', 0.0): 1, ('atm', 0.0): 1, ('pervers', 0.0): 1, ('bylfnnz', 0.0): 1, ('ban', 0.0): 1, ('failsatlif', 0.0): 1, ('press', 0.0): 1, ('duper', 0.0): 1, ('waaah', 0.0): 1, ('jaebum', 0.0): 1, ('ahmad', 0.0): 1, ('maslan', 0.0): 1, ('hull', 0.0): 1, ('misser', 0.0): 1}
###Markdown
Table of word countsWe will select a set of words that we would like to visualize. It is better to store this temporary information in a table that is very easy to use later.
###Code
# select some words to appear in the report. we will assume that each word is unique (i.e. no duplicates)
keys = ['happi', 'merri', 'nice', 'good', 'bad', 'sad', 'mad', 'best', 'pretti',
'❤', ':)', ':(', '😒', '😬', '😄', '😍', '♛',
'song', 'idea', 'power', 'play', 'magnific']
data = []
for word in keys:
pos=0
neg=0
if(word,1) in freqs:
pos=freqs[(word,1)]
if(word,0) in freqs:
neg=freqs[(word,0)]
data.append([word,pos,neg])
data
###Output
_____no_output_____
###Markdown
We can then use a scatter plot to inspect this table visually. Instead of plotting the raw counts, we will plot it in the logarithmic scale to take into account the wide discrepancies between the raw counts (e.g. `:)` has 3568 counts in the positive while only 2 in the negative). The red line marks the boundary between positive and negative areas. Words close to the red line can be classified as neutral.
###Code
fig,ax=plt.subplots(figsize=(8,8))
x=np.log([x[1] + 1 for x in data])
y=np.log([x[2] + 1 for x in data])
ax.scatter(x,y,s=10,c='b')
plt.xlabel("Log Positive count")
plt.ylabel("Log Negative count")
ax.tick_params(direction='in')
for i in range(0, len(data)):
ax.annotate(data[i][0], (x[i], y[i]), fontsize=12)
ax.plot([0, 9], [0, 9], color = 'red') # Plot the red line that divides the 2 areas.
###Output
_____no_output_____ |
Daily Practice/UpSampling2DPractice.ipynb | ###Markdown
###Code
from numpy import asarray
from keras.models import Sequential
from keras.layers import UpSampling2D
X = asarray([[4, 7],
[9, 14]])
print(X)
X = X.reshape((1, 2, 2, 1))
model = Sequential()
model.add(UpSampling2D(input_shape=(2, 2, 1)))
model.add(UpSampling2D(size = (4,4)))
model.summary()
yhat = model.predict(X)
yhat = yhat.reshape((16, 16))
print(yhat)
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Reshape
from keras.layers import UpSampling2D
from keras.layers import Conv2D
model = Sequential()
model.add(Dense(128 * 5 * 5, input_dim=100))
model.add(Reshape((5, 5, 128)))
model.add(UpSampling2D())
model.add(Conv2D(1, (3,3), padding='same'))
model.summary()
# example of using the transpose convolutional layer
from numpy import asarray
from keras.models import Sequential
from keras.layers import Conv2DTranspose
X = asarray([[21, 32],
[33, 46]])
print(X)
X = X.reshape((1, 2, 2, 1))
model = Sequential()
model.add(Conv2DTranspose(1, (1,1), strides=(2,2), input_shape=(2, 2, 1)))
model.summary()
weights = [asarray([[[[1]]]]), asarray([0])]
model.set_weights(weights)
yhat = model.predict(X)
yhat = yhat.reshape((4, 4))
print(yhat)
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Reshape
from keras.layers import Conv2DTranspose
from keras.layers import Conv2D
model = Sequential()
model.add(Dense(128 * 5 * 5, input_dim=100))
model.add(Reshape((5, 5, 128)))
model.add(Conv2DTranspose(1, (3,3), strides=(2,2), padding='same'))
model.summary()
###Output
Model: "sequential_12"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dense_1 (Dense) (None, 3200) 323200
_________________________________________________________________
reshape_1 (Reshape) (None, 5, 5, 128) 0
_________________________________________________________________
conv2d_transpose_1 (Conv2DTr (None, 10, 10, 1) 1153
=================================================================
Total params: 324,353
Trainable params: 324,353
Non-trainable params: 0
_________________________________________________________________
|
LDA_topic_modeling.ipynb | ###Markdown
topic modeling전체 문장에서 그 단어가 얼마만큼의 비중을 찾아내느냐? -> 수치로 확인[SVD](https://wikidocs.net/30707) 잠재의식 분석15년간 뉴스 제목 모아둔 데이터set
###Code
!curl -O https://raw.githubusercontent.com/franciscadias/data/master/abcnews-date-text.csv
import pandas as pd
df_data = pd.read_csv('./abcnews-date-text.csv')
df_data= df_data.head(10000)
head_text = df_data[['headline_text']]
type(head_text)
###Output
_____no_output_____
###Markdown
word_tokenize
###Code
import nltk
nltk.download('punkt')
###Output
[nltk_data] Downloading package punkt to /root/nltk_data...
[nltk_data] Package punkt is already up-to-date!
###Markdown
pandas 의 map()* head_text.apply(function)head_text.apply(lambda row:nltk.word_tokenize(row['headline_text']),axis=1)
###Code
head_text['title_text'] = head_text.apply(lambda row:nltk.word_tokenize(row['headline_text']),axis=1)
head_text.head(3)
from nltk.corpus import stopwords
nltk.download('stopwords')
stop = stopwords.words('english')
print(stop)
###Output
['i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', "you're", "you've", "you'll", "you'd", 'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she', "she's", 'her', 'hers', 'herself', 'it', "it's", 'its', 'itself', 'they', 'them', 'their', 'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that', "that'll", 'these', 'those', 'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of', 'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into', 'through', 'during', 'before', 'after', 'above', 'below', 'to', 'from', 'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further', 'then', 'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few', 'more', 'most', 'other', 'some', 'such', 'no', 'nor', 'not', 'only', 'own', 'same', 'so', 'than', 'too', 'very', 's', 't', 'can', 'will', 'just', 'don', "don't", 'should', "should've", 'now', 'd', 'll', 'm', 'o', 're', 've', 'y', 'ain', 'aren', "aren't", 'couldn', "couldn't", 'didn', "didn't", 'doesn', "doesn't", 'hadn', "hadn't", 'hasn', "hasn't", 'haven', "haven't", 'isn', "isn't", 'ma', 'mightn', "mightn't", 'mustn', "mustn't", 'needn', "needn't", 'shan', "shan't", 'shouldn', "shouldn't", 'wasn', "wasn't", 'weren', "weren't", 'won', "won't", 'wouldn', "wouldn't"]
###Markdown
기존 방식 ```def stopword(x) result =[] for word in x: if len(word) > 3 : if word not in stop: result.apply(word) return resultdef callStopWorld(head_text) titles=[] for x in head_text['title_text']: titles.apply(stopwords(x)) return titles head_text['title_text'].apply ```
###Code
# 람다(apply) 로 변형
head_text['title']= head_text['title_text'].apply(lambda x: [word for word in x if (len(word) > 3) if ( word not in stop) ])
head_text.head(5)
type(head_text.head(5))
head_text['title'][3]
###Output
_____no_output_____
###Markdown
```tokens = []for i in range(len(head_text)): tokens.append(head_text['title'][i])tokens[3:5]결과[['staff', 'aust', 'strike', 'rise'], ['strike', 'affect', 'australian', 'travellers']] ```
###Code
tokens = []
for i in range(len(head_text)):
tokens.append(' '.join(head_text['title'][i]))
tokens[3:5]
###Output
_____no_output_____
###Markdown
TfidfVectorizer
###Code
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf = TfidfVectorizer(max_features=1000)
X = tfidf.fit_transform(tokens)
X.shape
###Output
_____no_output_____
###Markdown
단어의 관계성 대로 들어감
###Code
X[4].toarray()
###Output
_____no_output_____
###Markdown
LatentDirichletAllocation
###Code
from sklearn.decomposition import LatentDirichletAllocation
###Output
_____no_output_____
###Markdown
max_iter=10 은 epoch 와 비슷하다
###Code
# lda_model = LatentDirichletAllocation()
lda_model = LatentDirichletAllocation(n_components=4)
lda_top = lda_model.fit_transform(X)
###Output
_____no_output_____
###Markdown
components_ 는 numpy, 중요도가 담긴다
###Code
lda_model.components_.shape,lda_model.components_
terms = tfidf.get_feature_names()
###Output
_____no_output_____
###Markdown
현재 여러 데이터 내용 중 토픽의 분류가 10개로 된 것
###Code
n = 5
for idx, topic in enumerate(lda_model.components_):
print([(terms[i], topic[i]) for i in topic.argsort()[:-n-1:-1]])
###Output
_____no_output_____ |
Greedy Algorithms Minimum Spanning Trees and Dynamic Programming/Programming Assignment 3/DynamicProgramming.ipynb | ###Markdown
1 point3.In this programming problem you'll code up the dynamic programming algorithm for computing a maximum-weight independent set of a path graph.Download the text file below.[mwis.txt](https://github.com/SSQ/Coursera-Stanford-Greedy-Algorithms-Minimum-Spanning-Trees-and-Dynamic-Programming/blob/master/Programming%20Assignment%203/mwis.txt)This file describes the weights of the vertices in a path graph (with the weights listed in the order in which vertices appear in the path). It has the following format:[number_of_vertices][weight of first vertex][weight of second vertex]...For example, the third line of the file is "6395702," indicating that the weight of the second vertex of the graph is 6395702.Your task in this problem is to run the dynamic programming algorithm (and the reconstruction procedure) from lecture on this data set. The question is: of the vertices 1, 2, 3, 4, 17, 117, 517, and 997, which ones belong to the maximum-weight independent set? (By "vertex 1" we mean the first vertex of the graph---there is no vertex 0.) In the box below, enter a 8-bit string, where the ith bit should be 1 if the ith of these 8 vertices is in the maximum-weight independent set, and 0 otherwise. For example, if you think that the vertices 1, 4, 17, and 517 are in the maximum-weight independent set and the other four vertices are not, then you should enter the string 10011010 in the box below.
###Code
import numpy as np
# get the file path
file_path = 'mwis.txt'
# file_path = 'test 1.txt'
# convert text file to np.array type
path_graph_data = np.loadtxt(file_path)
int_path_graph_data = path_graph_data.astype(int)
A = {}
A[-1] = 0
A[0] = 0
A[1] = int_path_graph_data[0]
for i in range(2, len(int_path_graph_data)):
A[i] = long(max(A[i-1], A[i-2] + int_path_graph_data[i-1]))
print(A[2])
S = set()
position = len(int_path_graph_data)
while position >= 1:
if A[position - 1] >= (A[position - 2] + int_path_graph_data[position-1]):
position = position - 1
else:
S.add(position)
position = position - 2
print('S: ')
print(S)
judge_node = [1, 2, 3, 4, 17, 117, 517, 997]
for x in judge_node:
print(1 if x in S else 0)
###Output
1
0
1
0
0
1
1
0
|
.ipynb_checkpoints/flower-classifier-checkpoint.ipynb | ###Markdown
The Amazing Flower Classifier!You need to know whether you're being given chamomile, tulip, rose, sunflower, dandelion. and you need an answer fast? Then you've come to the right place. Take a pic of the potentially vicious killer, and click 'upload' to classify it. (Important: this only handles chamomile, tulip, rose, sunflower, dandelion flowers. It will **not** give a sensible answer for other flowers.
###Code
path = Path()
learn_inf = load_learner(path/'export.pkl', cpu=True)
btn_upload = widgets.FileUpload()
out_pl = widgets.Output()
lbl_pred = widgets.Label()
def on_click(change):
img = PILImage.create(btn_upload.data[-1])
out_pl.clear_output()
with out_pl: display(img.to_thumb(128,128))
pred,pred_idx,probs = learn_inf.predict(img)
lbl_pred.value = f'Prediction: {pred}; Probability: {probs[pred_idx]:.04f}'
btn_upload.observe(on_click, names=['data'])
display(VBox([widgets.Label('Select your flower!'), btn_upload, out_pl, lbl_pred]))
###Output
_____no_output_____ |
analysis/cluster_c9orf72.ipynb | ###Markdown
C9orf72 AnalysisThis notebook analyzes the cluster membership for c9orf72 patients from AALS
###Code
%matplotlib inline
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
%load_ext autoreload
%autoreload 2
from analysis_utils import *
import mogp
from pathlib import Path
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(font_scale=2)
sns.set_style("white")
col_palette = ['#176D9C', '#DBA137','#86AF49' ,'#df473c','#ae3c60', '#82b4bb']
parameters = {'pdf.fonttype': 42}
plt.rcParams.update(parameters)
df_c9 = pd.read_csv('data/raw_data/aals/v_NB_IATI_ALS_Gene_Mutations.csv', index_col='SubjectUID')
df_c9.index = df_c9.index + '_aals'
df_c9['c9orf72'] = df_c9['c9orf72'].map({'1':1, '2':0, '.':0}) #only count positive c9orf72 results
# select best MAP model
exp_path = Path('data/model_data/1_alsfrsr_all')
cur_proj = 'aals'
cur_model_path = exp_path / 'results' / 'rbf'
cur_data_path = exp_path / 'data_{}_min3_alsfrst.pkl'.format(cur_proj)
cur_model_suffix = 'model_{}_min3_alsfrst'.format(cur_proj)
cur_model = get_map_model(cur_model_path, cur_model_suffix, num_seeds=5)
cur_data = joblib.load(cur_data_path)
df_clust_memb = pd.DataFrame(zip(cur_data['SI'], cur_model.z), columns=['subj_id', 'cluster_id']).set_index('subj_id')
df_clust_memb = df_clust_memb.join(df_c9)
c9_freq = pd.DataFrame()
c9_freq['c9pos']=df_clust_memb.groupby('cluster_id')['c9orf72'].sum()
clust_size = df_clust_memb['cluster_id'].value_counts()
clust_size.name = 'clust_size'
c9_freq = c9_freq.join(clust_size)
c9_freq['freq'] = c9_freq['c9pos'] / c9_freq['clust_size']
c9_freq.sort_values(by='freq', ascending=False, inplace=True)
# Visualize clusters with highest proportions of c9orf72 patients
clust_size_thresh = 10
fig, ax = plt.subplots(figsize=(10,5))
vis_clust = c9_freq[c9_freq['clust_size']>clust_size_thresh].index[0:3]
disp_freq = []
for j, cur_k in enumerate(vis_clust):
cur_disp_freq = '{:.2f}%'.format(c9_freq.loc[cur_k]['freq']*100)
disp_freq.append(cur_disp_freq)
_, num_pat = plot_mogp_by_clust(ax, cur_model, cur_data, cur_k, data_flag=False, data_col='k', model_flag=True, model_col=col_palette[j], model_alpha=0.2, gpy_pad=0.5)
ax.get_legend().remove()
ind_lis = np.where(np.in1d(cur_data['SI'], df_clust_memb[(df_clust_memb['cluster_id']==cur_k)&(df_clust_memb['c9orf72']==1)].index))
_ = ax.plot(cur_data['XA'][ind_lis].T[1:], cur_data['YA'][ind_lis].T[1:], 'o-', color='k', alpha=0.75)
_ = format_mogp_axs(ax)
# Edit legend
handles, labels = ax.get_legend_handles_labels()
handles = [(x, matplotlib.patches.Patch(color=x.get_color(), alpha=0.25, linewidth=0)) for x in handles]
labels = disp_freq
_ = ax.legend(handles=handles, labels=labels, frameon=False)
_ = ax.set_ylabel('ALSFRS-R Total')
_ = ax.set_xlabel('Time since Symptom Onset (Years)')
c9_freq[c9_freq['clust_size']>clust_size_thresh]
###Output
_____no_output_____ |
nlp/doc2vec-sample.ipynb | ###Markdown
IMDB Movies Dataset Analysis using Doc2Vec Preprocessing Step
###Code
import nltk
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk import word_tokenize
import string
wordnet_lemmatizer = WordNetLemmatizer()
stopwords = nltk.corpus.stopwords.words('english')
def readSentenceAndPreprocess(sentence):
# Remove stopwords
record = unicode(sentence.lower(), "utf-8")
record = word_tokenize(record)
record = [wordnet_lemmatizer.lemmatize(x) for x in record if x not in string.punctuation]
sentence = [w for w in record if w not in stopwords]
return sentence
###Output
_____no_output_____
###Markdown
Loading the model
###Code
from gensim import models
model = models.Doc2Vec.load('imdb-doc2vec.model')
###Output
_____no_output_____
###Markdown
Keyword-Test Paragraph Similarity Matrix
###Code
from gensim import models
import pandas
import numpy as np
# get top 10 similar words to keyword from model
keywords = [["lion","king"], ["batman"],["pokemon"],["matrix"],["darth","vader"]]
print "Top 10 Similar words:"
for keyword in keywords:
print "\n" + '_'.join(keyword)
print pandas.DataFrame(model.wv.most_similar(positive=keyword, topn=10), columns=['Word','Score'])
###Output
Top 10 Similar words:
lion_king
Word Score
0 rafiki 0.330048
1 simba 0.314177
2 matata 0.284346
3 pumbaa 0.282482
4 sabella 0.282138
5 timon 0.267771
6 1½ 0.266465
7 1/2\ 0.248916
8 hakuna 0.236737
9 marge 0.235552
batman
Word Score
0 batgirl 0.354142
1 penguin 0.295907
2 btas 0.272044
3 batwoman 0.271066
4 wb 0.268728
5 o'hearn 0.258377
6 gotham 0.246168
7 joker 0.243499
8 nightwing 0.242217
9 bartram 0.232965
pokemon
Word Score
0 celebi 0.475934
1 suicune 0.431710
2 4ever 0.345238
3 things\ 0.265295
4 pikachu 0.231380
5 vol 0.227041
6 miramax 0.223460
7 misty 0.217513
8 brock 0.216896
9 lugia 0.216481
matrix
Word Score
0 reloaded 0.282041
1 morpheus 0.237963
2 simulation 0.213727
3 kaiser 0.192410
4 iconography 0.192259
5 j.j. 0.184706
6 \special\ 0.182963
7 trickery 0.182767
8 truth\ 0.182744
9 renaissance 0.182124
darth_vader
Word Score
0 leia 0.487074
1 sith 0.454924
2 palpatine 0.446418
3 yoda 0.440748
4 endor 0.413773
5 lando 0.410392
6 tatooine 0.405396
7 jedi 0.372744
8 chewbacca 0.361869
9 skywalker 0.355867
|
.ipynb_checkpoints/FDA_Assignment-checkpoint.ipynb | ###Markdown
Fundamentals of Data Analysis Assignment Semester 2, October 2018, Eimear Butler This assigmnent focuses on the four data sets known as the Anscombe Quartet. I have been assigned questions in relation to the data set and have been asked to detail my response in this Jupyter Notebook. The four questions posed are as follows; 1. Explain the background to the dataset – who created it, when it was created, and any speculation you can find regarding how it might have been created. The Anscombe Quartet Data was published in 19731 bu Francis Anscombe, an English statistician. Anscombe opens his article with a line that both sets the scene and summarises the objective of his research and article;> “ Graphs are essential to good statistical analysis.” He goes further during his introduction to challenge the thinking of the time that numbers are precise while graphs were percieved as “rough” and states that;>”A computer should make *both* calculations *and* graphs. Both sorts of output should be studied; each will contribute to understanding.”>“Graphs can have various purposes such as: i) to help us perceive and appreciate some broad features of the dataii) to let us look behind those broad features and see what else is there.”The use of computers for creating graphs not as common as it is today and anscombe aimed to use the four data sets he created as evidence that this is an essential part of investigating all data. >“Most kinds of statistical calculation rest on assumptions about the behavior of the data. Those assumptions may be false, and then the calculations may be misleading. We ought always to try to check whether the assumptions are reasonably correct; and if they are wrong we ought to be able to perceive in what ways they are wrong. Graphs are very valuable for these purposes.” Anscombe proceeds to outline the four dats sets which ultimately prove his point1. Notice how Anscombe even presents the data set highlighting how the *x* values are exactly the same for the first 3 data sets and the fourth contains ten values of 8 with only one value of 19. The *y* values then vary, and no obvious pattern is seen between them. Anscombe goes on to explain the significance of the four data sets, that when analyzing the values only, they have very similar characteristics. However, when the values are then plotted in simple scatter plot, they are seen to be very different. In this assignment, I intend to test Anscombe’s data to see if I can reproduce his results within this Jupyter Notebook. Regarding the way in which Anscombe created these data sets, this remaines a mystery, however, today there are other examples available to us demonstrating how different data sets produce very similar descriptive statistics but completely different graphs. The Datasaurus Dozen2 is a fantastic demonstration of just how different data sets can be while maintaining the same key statistically descriptive values. The authors produced a video and gif file2 as part of their publication to bring it to life, shown here: As you will have spotted, this dataset even uses Alberto Cairo's Datasaurus3 as well as other somewhat symmetrical shapes while keeping the descritive statistics the same to 2 decimal points. Cairo's Datasaurus is now used widely and is available as a csv file through his blog post3 to recreate as I have done below.
###Code
import pandas as pd #import pandas functionality
import numpy as np #import numpy functionality
import matplotlib.pyplot as plt #import matplotlib functionalities
d = pd.read_csv('https://raw.githubusercontent.com/eimearbutler7/FundamentalsDA/master/z_Datasaurus_data.csv')
plt.plot(d.loc[:,'x'],d.loc[:,'y'],'o',color='black') # create graph using all x and y data values. Represent them with black circles
plt.show() # show the plot
###Output
_____no_output_____
###Markdown
Justin Matejka and George Fitzmaurice started with the descriptive statistics* of the Datasaurus and then created a technique where they could feed in a target shape to their program and test the repeated movement of data points towards the new shape to see if they still met the overall descriptive statistice to within two decimal points4. This required running the process a number of times and within his blog post, Matejka quotes the movement towards the circle shape needing 200,000 small movements4. \* *means, standard deviations, and correlations*They then used a system of simulated annealing to optomise the process and reduce the number of points needing to be tested. Simulated aneeling works to optomise the combinatorial methods and is described as: >"...unlikely to find the optimum solution, [however,] it can often find a very good solution, even in the presence of noisy data.5."An example of this technique in action is the *Travelling Salesman* problem where the salesman knows he needs to visit numerous locations scattered across an area but needs to find the shortest route to visit them all5. This challenge is well demonstrated in the following gif6 where the different combinations for all 125 points are processes to find the optimal route. 2. Plot the interesting aspects of the dataset.
###Code
#Load the .csv dataset from my git hub repository using pandas
df = pd.read_csv('https://raw.githubusercontent.com/eimearbutler7/FundamentalsDA/master/z_Anscombes.csv')
df #view the data
import seaborn as sns #import seaborn functionalities
sns.pairplot(df, hue='dataset') #seaborn's pairplot function allows us a quick overview of the data separating each data set by colour
###Output
_____no_output_____
###Markdown
Instantly using the seaborn `.pairplot` function the significant differences between the data sets can be seen. Lets go further and pull out each data set and analyse individually to see their shape:
###Code
#create subsets of each data set 'x' yalues
x = df.loc[:,'x']
x1 = df.loc[:10,'x']
x2 = df.loc[11:21,'x']
x3 = df.loc[22:32,'x']
x4 = df.loc[33:43,'x']
#can call x1, x2, x3 or x4 here to test the data is correct before proceeding
#create subsets of each data set 'y' yalues
y = df.loc[:,'y']
y1 = df.loc[:10,'y']
y2 = df.loc[11:21,'y']
y3 = df.loc[22:32,'y']
y4 = df.loc[33:43,'y']
#can call y1, y2, y3 or y4 here to test the data is correct before proceeding
###Output
_____no_output_____
###Markdown
Using the numpy `.ployfit` function, I can also determine the *m* and *c* values for each plot where *m* is the slope of the line and *c* the value of *y* where the line crosses the *x* axis (i.e. *x* is 0). This will allow me to add a "best fit" line to the graph.
###Code
(m1,c1) = np.polyfit(x1,y1,1) #use ployfit function to determine least squares polynomial line fit, where 1 is the Degree of the fitting the polynomial
(m2,c2) = np.polyfit(x2,y2,1) #repeat for x2,y2
(m3,c3) = np.polyfit(x3,y3,1) #repeat for x3,y3
(m4,c4) = np.polyfit(x4,y4,1) #repeat for x4,y4
#where the resuting m and c are values in the equation of a straight line (y=mx+c)
out = [(m1,c1),(m2,c2),(m3,c3),(m4,c4)]
out
# I can now plot the data with the best fit line using the method provided by Ian McLoughlan (https://raw.githubusercontent.com/ianmcloughlin/jupyter-teaching-notebooks/master/simple-linear-regression.ipynb)
plt.plot(x1, y1, 'k.', label='Original data')
plt.plot(x1, m1 * x1 + c1, 'b-', label='Best fit line') #using m and c values determined above
plt.xlim([0,20]) #set graph limits for x axis
plt.ylim([0,20]) #set graph limits for y axis
plt.xlabel('x1') #set name for x axis
plt.ylabel('y1') #set name for y axis
plt.legend() # add a legend
plt.show() #show the plot inline
# I can repoeat for x2,y2
plt.plot(x2, y2, 'k.', label='Original data')
plt.plot(x2, m2 * x2 + c2, 'b-', label='Best fit line')
plt.xlim([0,20]) #set graph limits for x axis
plt.ylim([0,20]) #set graph limits for y axis
plt.xlabel('x2') #set name for x axis
plt.ylabel('y2') #set name for y axis
plt.legend() # add a legend
plt.show() #show the plot inline
# I can repoeat for x3,y3
plt.plot(x3, y3, 'k.', label='Original data')
plt.plot(x3, m3 * x3 + c3, 'b-', label='Best fit line')
plt.xlim([0,20]) #set graph limits for x axis
plt.ylim([0,20]) #set graph limits for y axis
plt.xlabel('x3') #set name for x axis
plt.ylabel('y3') #set name for y axis
plt.legend() # add a legend
plt.show() #show the plot inline
# I can repeat for x4,y4
plt.plot(x4, y4, 'k.', label='Original data')
plt.plot(x4, m4 * x4 + c4, 'b-', label='Best fit line')
plt.xlim([0,20]) #set graph limits for x axis
plt.ylim([0,20]) #set graph limits for y axis
plt.xlabel('x4') #set name for x axis
plt.ylabel('y4') #set name for y axis
plt.legend() # add a legend
plt.show() #show the plot inline
###Output
_____no_output_____
###Markdown
Clearly all 4 graphs are very different even though they accept an almost identical "best fit" line. 3. Calculate the descriptive statistics of the variables in the dataset Instantly using the `.describe` function we confirm some of Anscombe's descriptive statistics table.
###Code
df.describe() #also is useful to give an instant overview of the high level descriptive statistics
###Output
_____no_output_____
###Markdown
Lets go further and pull out each data set and analyse individually to see if I can confirm his descriptive statistics findings. Please note, where possible, results for all 4 datasets are presented in one list to allow easy comparison instead of being run in individual cells. - Number of observations (*n*) = 4 sets of 11 values- Mean of the x’s ($\bar{x}$) = 9.0 (also confirmed below for all 4 sets separately)
###Code
x_mean = [np.mean(x1),np.mean(x2), np.mean(x3), np.mean(x4)] #show list of mean x values for each data set
print('Mean of X values is:', x_mean)
y_mean = [np.mean(y1),np.mean(y2), np.mean(y3), np.mean(y4)] #show list of mean y vaues for each data set
print('Mean of Y values is:', y_mean)
###Output
Mean of Y values is: [7.500909090909093, 7.500909090909091, 7.500000000000001, 7.50090909090909]
###Markdown
The sample variance of the data is also consistent although Anscombe does not include this in his published table.
###Code
x_variance = [np.var(x1,ddof=1), np.var(x2,ddof=1), np.var(x3,ddof=1), np.var(x4,ddof=1)] #show list of x variance vaues for each data set where ddof=1 was added using advice from https://stackoverflow.com/questions/41204400/what-is-the-difference-between-numpy-var-and-statistics-variance-in-python
print('Variance for X values are:', x_variance)
y_variance = [np.var(y1,ddof=1), np.var(y2,ddof=1), np.var(y3,ddof=1), np.var(y4,ddof=1)] #show list of x variance vaues for each data set where ddof=1 was added using advice from https://stackoverflow.com/questions/41204400/what-is-the-difference-between-numpy-var-and-statistics-variance-in-python
print('Variance for Y values are:', y_variance)
###Output
Variance for Y values are: [4.127269090909091, 4.127629090909091, 4.12262, 4.12324909090909]
###Markdown
Anscombe has instead represented variance using the R2 coefficient which I can also determine for each data set using the `stats.linregress` function:
###Code
from scipy import stats #import stats function within scipy
slope_1, intercept_1, r_value_1, p_value_1, std_err_1 = stats.linregress(x1, y1) #use example from scipy manual to find r value (https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.linregress.html)
slope_2, intercept_2, r_value_2, p_value_2, std_err_2 = stats.linregress(x2, y2) #repeat for other data sets
slope_3, intercept_3, r_value_3, p_value_3, std_err_3 = stats.linregress(x3, y3) #repeat for other data sets
slope_4, intercept_4, r_value_4, p_value_4, std_err_4 = stats.linregress(x4, y4) #repeat for other data sets
print("R Squared Value for all 4 sets", r_value_1**2, r_value_2**2, r_value_3**2, r_value_4**2)
###Output
R Squared Value for all 4 sets 0.666542459508775 0.6662420337274844 0.6663240410665591 0.6667072568984652
###Markdown
Again we see consistant results with R2 results to 3 decimal places and with the Standard Deviation:
###Code
x_SD = [np.std(x1), np.std(x2), np.std(x3), np.std(x4)] #show list of standard distribution for x values in each data set
print('Standard Deviation of X Values are:', x_SD)
y_SD = [np.std(y1), np.std(y2), np.std(y3), np.std(y4)] #show list of standard distribution for y values in each data set
print('Standard Deviation of Y Values are:', y_SD)
###Output
Standard Deviation of Y Values are: [1.937024215108669, 1.93710869148962, 1.9359329439927313, 1.9360806451340837]
###Markdown
I can also now calculate the Sum of Squares value using the calculation Anscombe gave us himself in his overview table (x - $\bar{x}$):
###Code
#Anscombe himself gives us the
x_sum_of_squares = [np.sum((x1**2)-(np.mean(x1)**2)), np.sum((x2**2)-(np.mean(x2)**2)), np.sum((x3**2)-(np.mean(x3)**2)), np.sum((x4**2)-(np.mean(x4)**2))]
print('X value Sum of Squares:', x_sum_of_squares) #results printed in a list to allow easy comparison
###Output
X value Sum of Squares: [110.0, 110.0, 110.0, 110.0]
###Markdown
Residual Sum of Squares and Regression Sum of Squares Attempts **Expected Result: Residual sum of squares of y = 13.75 (9 d.f.), Regression sum of squares = 27.50 (1 d.f.)** In order to calculate these values, first I must refer to the equations used to obtain them. The following diagram9 has been very useful to understand them. **NOTE:** Explained Sum of Squares (ESS) is also known as Regression Sum of Squares8 I aim to find Total Sum of Squares (TSS), Residual Sum of Squares (RSS) and Explained Sum of Squares (ESS) below and then test if TSS = RSS + ESS7.First I create a list of "predicted" 'y' values (i.e. what the best fit line 'y' value) at the same 'x' values to try and compare the 2 and therefore establish the Residual Sum of Squares (RSS).
###Code
y1_best = m1*(x1) + c1 #I already know m, x and c so I can use the equation of the line to find y
y2_best = m2*(x2) + c2
y3_best = m3*(x3) + c3
y4_best = m4*(x4) + c4
y1_best #this works and gives me the y values for the best fit line
plt.plot(x1,y1_best,color='black') #lets plot it and check
plt.plot(x2,y2_best,color='red')
plt.plot(x3,y3_best,color='yellow')
plt.plot(x4,y4_best,color='green')
plt.xlim([0,20])
plt.ylim([0,20])
plt.show() # we an see that all the lines are consistent and therefore this are the predicted y values.
RSS1 = sum((y1-y1_best)**2) #Residual Sum of Squares equation from diagram (https://i.stack.imgur.com/FOzPq.png)
RSS2 = sum((y2-y2_best)**2)
RSS3 = sum((y3-y3_best)**2)
RSS4 = sum((y4-y4_best)**2)
RSS_list = [RSS1, RSS2, RSS3, RSS4]
print('RSS of each data set is:', RSS_list)
###Output
RSS of each data set is: [13.762689999999996, 13.776290909090909, 13.756191818181817, 13.742490000000005]
###Markdown
This result is fairly consistent across the 4 data sets and consistent with Anscombes results of 13.75. I cannot find evidence that the "cost" function within the Machine Learning environment, which also determines how close the values are to the best fit line, is the exact same value as the RSS. However, they are very similar and in calculating the cost for the first data set, I get a result that is only 0.01 away from Anscombe's overall result (13.75) but the same to decinal places for the first dataset specifically (13.76268).
###Code
cost = lambda m1,c1: np.sum([(y1[i] - m1 * x1[i] - c1)**2 for i in range(x1.size)]) #using Ian McLoughlan's formula (https://github.com/ianmcloughlin/jupyter-teaching-notebooks/raw/master/simple-linear-regression.ipynb)
print("Cost of first dataset is: %8.2f" % (cost(m1, c1)))
###Output
Cost of first dataset is: 13.76
###Markdown
To confirm the statement in the diagram that TSS = RSS + ESS I calculate TSS and ESS and test the equation
###Code
TSS1 = sum((y1-np.mean(y1))**2) #equation for Total sum of squares (https://i.stack.imgur.com/FOzPq.png)
TSS2 = sum((y2-np.mean(y2))**2) #repeat for other datasets
TSS3 = sum((y3-np.mean(y3))**2) #repeat for other datasets
TSS4 = sum((y4-np.mean(y4))**2) #repeat for other datasets
TSS_list = [TSS1, TSS2, TSS3, TSS4]
print('TSS of each data set is:', TSS_list)
ESS1 = sum((y1_best-np.mean(y1))**2) #Explained Sum of Squares
ESS2 = sum((y2_best-np.mean(y2))**2)
ESS3 = sum((y3_best-np.mean(y3))**2)
ESS4 = sum((y4_best-np.mean(y4))**2)
ESS_list = [ESS1, ESS2, ESS3, ESS4]
print('ESS of each data set is:', ESS_list)
Equation_success = [RSS1 + ESS1 == TSS1, RSS2 + ESS2 == TSS2, RSS3 + ESS3 == TSS3, RSS4 + ESS4 == TSS4] #tested to see if the results were "adding up" - they are!!
Equation_success
###Output
_____no_output_____
###Markdown
Although data sets 3 and 4 are False, they give the same overall value to at least 10 decimal places.
###Code
RSS3 + ESS3 #this is correct to at least 10 decimal places
RSS4 + ESS4 #this is correct to at least 10 decimal places
###Output
_____no_output_____
###Markdown
Summary Going back to Anscombe's original table, I have now seen the following to be true (to at least 2 decimal places): | **Paramater** | **Anscombe's Result** | **Reproduced Here** |--- | --- | --- ||Number of observations (*n*)| 11 | Yes| Mean of the *x*’s ($\bar{x}$) | 9.0 | Yes| Mean of the *y*’s ($\bar{y}$) | 7.5 | Yes| Regression coefficient(*b*1) of *y* on *x* | 0.5 | Yes| Equation of regression line *y* | 3 + 0.5 *x* | Yes| Sum of squares of *x* - ($\bar{x}$) | 110.0 | Yes| Multiple *R*2 | 0.667 | Yes| Residual sum of squares of *y* | 13.75 (9 d.f.) | Yes| Regression sum of squares | 27.50 (1 d.f.) | YesNo conclusive confirmation could be made of the following parameter however, my work to try is in the back-up section of the notebook: - Estimated standard error of *b*1 - 0.118 4. Explain why the dataset is interesting, referring to the plots and statistics above. In summary, these four sets of data are fascinating as they produce almost identical descriptive statistics and even have portions of their values that are the very same (i.e. sets 1, 2 and 3 all have the same *x* values). Yet once plotted, they each create very different shaped curves.Franscis Anscombe set out to prove the worth of plotting data *as well as* analyzing it at the numerical level. I think he made a very strong statement as these data sets and his message continues to be analysed and discussed at length today. As discussed in section 1, the research by Matejka and Fitzmaurice, only published in 2017, has ensured this subject will continue to be discussed for some time to come. This concept and warning continues to be more and more relevant as data set size increases and data becomes even more vital in making decisions both in business and the wider society. References 1. F. J. Anscombe (1973). Graphs in Statistical Analysis. The American Statistician, 27(1):17-21.2. Same Stats, Different Graphs: Generating Datasets with Varied Appearance and Identical Statistics through Simulated Annealing (The Datasaurus Dozen), J.Matejka and G.Fitzmaurice 2017 https://dl.acm.org/citation.cfm?doid=3025453.30259123. The original Datasaurus, Alberto Cairo, 2016 http://www.thefunctionalart.com/2016/08/download-datasaurus-never-trust-summary.html4. Supplimentary blogpost on the The Datasaurus Dozen by J.Matejka 2017 https://www.autodeskresearch.com/publications/samestats5. http://mathworld.wolfram.com/SimulatedAnnealing.html6. https://upload.wikimedia.org/wikipedia/commons/1/10/Travelling_salesman_problem_solved_with_simulated_annealing.gif7. https://en.wikipedia.org/wiki/Residual_sum_of_squares8. https://en.wikipedia.org/wiki/Explained_sum_of_squares9. https://stats.stackexchange.com/questions/265869/confused-with-residual-sum-of-squares-and-total-sum-of-squares Back Up Estimated Standard Error Attempt **Expected result: Estimated standard error of b1 - 0.118**Standard Error of the *Mean* (SEM) = standard deviation of the sample mean divided by the square root of n (the sample size).https://www.statsdirect.com/help/basic_descriptive_statistics/standard_deviation.html
###Code
SEM = (np.std(x1)/np.sqrt(11))
SEM
SEx = s / sqrt( n )
###Output
_____no_output_____ |
PythonIntroduction/part_1.ipynb | ###Markdown
Introduction to Python ProgrammingTobias Micklitz & Carsten Hensel     Python program files * Python code is usually stored in text files with the file ending "`.py`": myprogram.py* Every line in a Python program file is assumed to be a Python statement, or part thereof. * The only exceptions are comment lines, which start with the character `` (optionally preceded by an arbitrary number of white-space characters, i.e., tabs or spaces). Comment lines are usually ignored by the Python interpreter. * To run our Python program from the command line we use: $ python myprogram.py* On UNIX systems it is common to define the path to the interpreter on the first line of the program: !/usr/bin/env python If we do, and if we additionally set the file script to be executable, we can run the program like this: $ myprogram.py Jupyter notebooks (IPython) * This file - a Jupyter notebook - does not follow the standard pattern with Python code in a text file. * Instead, an Jupyter notebook is stored as a file in the [JSON](http://en.wikipedia.org/wiki/JSON) format. * The advantage is that we can mix formatted text, Python code and code output. * It requires the Jupyter notebook server to run it though, and therefore isn't a stand-alone Python program as described above. * Other than that, there is no difference between the Python code that goes into a program file or an Jupyter notebook.   Modules * Most of the functionality in Python is provided by *modules*. * The **Python Standard Library** is a large collection of modules that provides *cross-platform* implementations of common facilities such as access to the operating system, file I/O, string management, network communication, and much more. References * The Python Language Reference: http://docs.python.org/3/reference/index.html * The Python Standard Library: http://docs.python.org/3/library/ * To use a module in a Python program it first has to be imported. * A module can be imported using the `import` statement. How to use a Module?
###Code
# how to use the math module
import math
###Output
_____no_output_____
###Markdown
This includes the whole module and makes it available for use later in the program. For example, we can do:
###Code
import math
x = math.cos(2 * math.pi) # note the 'math' prefix telling the system
# where cos and pi are 'located'
print(x)
###Output
1.0
###Markdown
Alternatively, we can chose to import all symbols (functions and variables) in a module to the current namespace (so that we don't need to use the prefix "`math.`" every time we use something from the `math` module:
###Code
from math import * # instead of import math
x = cos(2 * pi) # no math prefix needed
print(x)
###Output
1.0
###Markdown
* This pattern can be very convenient. * But in large programs that include many modules it is often a good idea to keep the symbols from each module in their own **namespaces**. * This would elminate potentially confusing problems with name space collisions. As a third alternative, we can chose to import only a few selected symbols from a module by explicitly listing which ones we want to import instead of using the wildcard character `*`:
###Code
from math import cos, pi
x = cos(2 * pi)
print(x)
###Output
1.0
###Markdown
Looking at what a module contains, and its documentation Once a module is imported, we can list the symbols it provides using the `dir` function:
###Code
import math
print(dir(math))
###Output
['__doc__', '__file__', '__loader__', '__name__', '__package__', '__spec__', 'acos', 'acosh', 'asin', 'asinh', 'atan', 'atan2', 'atanh', 'ceil', 'copysign', 'cos', 'cosh', 'degrees', 'e', 'erf', 'erfc', 'exp', 'expm1', 'fabs', 'factorial', 'floor', 'fmod', 'frexp', 'fsum', 'gamma', 'gcd', 'hypot', 'inf', 'isclose', 'isfinite', 'isinf', 'isnan', 'ldexp', 'lgamma', 'log', 'log10', 'log1p', 'log2', 'modf', 'nan', 'pi', 'pow', 'radians', 'remainder', 'sin', 'sinh', 'sqrt', 'tan', 'tanh', 'tau', 'trunc']
###Markdown
And using the function `help` we can get a description of each function (almost .. not all functions have docstrings, as they are technically called, but the vast majority of functions are documented this way).
###Code
help(math.log)
log(10)
log(10, 2)
###Output
_____no_output_____
###Markdown
We can also use the `help` function directly on modules: Try help(math) Some very useful modules form the Python standard library are `os`, `sys`, `math`, `shutil`, `re`, `subprocess`, `multiprocessing`, `threading`. A complete lists of standard modules for Python 2 and Python 3 are available at http://docs.python.org/2/library/ and http://docs.python.org/3/library/, respectively. Easter Egg
###Code
import this
###Output
The Zen of Python, by Tim Peters
Beautiful is better than ugly.
Explicit is better than implicit.
Simple is better than complex.
Complex is better than complicated.
Flat is better than nested.
Sparse is better than dense.
Readability counts.
Special cases aren't special enough to break the rules.
Although practicality beats purity.
Errors should never pass silently.
Unless explicitly silenced.
In the face of ambiguity, refuse the temptation to guess.
There should be one-- and preferably only one --obvious way to do it.
Although that way may not be obvious at first unless you're Dutch.
Now is better than never.
Although never is often better than *right* now.
If the implementation is hard to explain, it's a bad idea.
If the implementation is easy to explain, it may be a good idea.
Namespaces are one honking great idea -- let's do more of those!
###Markdown
Variables and types Symbol names * Variable names in Python can contain alphanumerical characters `a-z`, `A-Z`, `0-9` and some special characters such as `_`. * Normal variable names must start with a letter. * By convention, variable names start with a lower-case letter, and **Class** names start with a capital letter. * In addition, there are a number of Python keywords that cannot be used as variable names: `and, as, assert, break, class, continue, def, del, elif, else, except, exec, finally, for, from, global, if, import, in, is, lambda, not, or, pass, print, raise, return, try, while, with, yield` * Note: Be aware of the keyword `lambda`, which could easily be a natural variable name in a scientific program. But being a keyword, it cannot be used as a variable name. Assignment * The assignment operator in Python is `=`.* Python is a dynamically typed language, so we do not need to specify the type of a variable when we create one.* Assigning a value to a new variable creates the variable:
###Code
# variable assignments
x = 1.0
my_variable = 12.2
###Output
_____no_output_____
###Markdown
Although not explicitly specified, a variable does have a type associated with it. The type is derived from the value that was assigned to it.
###Code
type(x)
###Output
_____no_output_____
###Markdown
If we assign a new value to a variable, its type can change.
###Code
x = 1
type(x)
###Output
_____no_output_____
###Markdown
If we try to use a variable that has not yet been defined we get an `NameError`:
###Code
print(y)
###Output
_____no_output_____
###Markdown
Fundamental types
###Code
# integers
x = 2
type(x)
# In Python 2 there was also a 'long integer' available
w = 5l
type(w)
# float
x = 2.0
type(x)
# boolean
b1 = True
b2 = False
type(b1)
# complex numbers: note the use of `j` to specify the imaginary part
x = 1.0 - 1.0j
type(x)
print(x)
print(x.real, x.imag)
###Output
1.0 -1.0
###Markdown
Type utility functions The module `types` contains a number of type name definitions that can be used to test if variables are of certain types:
###Code
import types
# print all types defined in the `types` module
print(dir(types))
x = 1.0
# check if the variable x is a float
type(x) is float
# check if the variable x is an int
type(x) is int
###Output
_____no_output_____
###Markdown
We can also use the `isinstance` method for testing types of variables:
###Code
isinstance(x, float)
###Output
_____no_output_____
###Markdown
Type casting
###Code
x = 1.5
print(x, type(x))
x = int(x)
print(x, type(x))
z = complex(x)
print(z, type(z))
x = float(z)
###Output
_____no_output_____
###Markdown
Complex variables cannot be cast to floats or integers. We need to use `z.real` or `z.imag` to extract the part of the complex number we want:
###Code
print(z)
y = float(z.real)
print(z.real, " -> ", y, type(y))
y = bool(z.imag)
print(z.imag, " -> ", y, type(y))
###Output
_____no_output_____
###Markdown
Operators and comparisons Most operators and comparisons in Python work as one would expect:* Arithmetic operators `+`, `-`, `*`, `/`, `//` (integer division), '**' power
###Code
1 + 2, 1 - 2, 1 * 2, 1 / 2
1.0 + 2.0, 1.0 - 2.0, 1.0 * 2.0, 1.0 / 2.0
# Integer division of float numbers
3.0 // 2.0
# Note! The power operators in python isn't ^, but **
2 ** 10
###Output
_____no_output_____
###Markdown
* Note: The `/` operator always performs a floating point division in Python 3.x.This is not true in Python 2.x, where the result of `/` is always an integer if the operands are integers.* To be more specific, `1/2 = 0.5` (`float`) in Python 3.x, and `1/2 = 0` (`int`) in Python 2.x (but `1.0/2 = 0.5` in Python 2.x). Note: The boolean operators are spelled out as the words `and`, `not`, `or`.
###Code
True and False
not False
True or False
###Output
_____no_output_____
###Markdown
* Comparison operators `>`, `=` (greater or equal), `<=` (less or equal), `==` equality, `is` identical.
###Code
2 > 1, 2 < 1
2 > 2, 2 < 2
2 >= 2, 2 <= 2
# equality
[1,2] == [1,2]
# objects identical?
a1 = a2 = [1,2]
print(a1 is a2)
b1 = [1, 2]
b2 = [1, 2]
# b2 = b1
print(b1, b2)
print(b1 == b2)
print(b1 is b2)
###Output
[1, 2] [1, 2]
True
False
###Markdown
Compound types: Strings, List and dictionaries Strings Strings are the variable type that is used for storing text messages.
###Code
s = "Hello world"
type(s)
# length of the string: the number of characters
len(s)
# replace a substring in a string with something else
s2 = s.replace("world", "test")
print(s2)
print(s)
###Output
Hello test
Hello world
###Markdown
We can index a character in a string using `[]`:
###Code
s[0]
###Output
_____no_output_____
###Markdown
**Heads up MATLAB users:** Indexing start at 0!We can extract a part of a string using the syntax `[start:stop]`, which extracts characters between index `start` and `stop` -1 (the character at index `stop` is not included):
###Code
s[0:5]
s[4:5]
###Output
_____no_output_____
###Markdown
If we omit either (or both) of `start` or `stop` from `[start:stop]`, the default is the beginning and the end of the string, respectively:
###Code
s[:5]
s[6:]
s[:]
###Output
_____no_output_____
###Markdown
We can also define the step size using the syntax `[start:end:step]` (the default value for `step` is 1, as we saw above):
###Code
s[::1]
s[::2]
###Output
_____no_output_____
###Markdown
This technique is called *slicing*. Read more about the syntax here: http://docs.python.org/release/2.7.3/library/functions.html?highlight=sliceslice Python has a very rich set of functions for text processing. See for example http://docs.python.org/2/library/string.html for more information. String formatting examples
###Code
print("str1", "str2", "str3") # The print statement concatenates
# strings with a space
print("str1", 1.0, False, -1j) # The print statements converts all
# arguments to strings
print("str1" + "str2" + "str3") # strings added with +
# are concatenated without space
print("value = %f" % 1.0) # we can use C-style string formatting
# this formatting creates a string
s2 = "value1 = %.2f. value2 = %d" % (3.1415, 1.5)
print(s2)
# alternative, more intuitive way of formatting a string
s3 = 'value1 = {0}, value2 = {1}'.format(3.1415, 1.5)
print(s3)
###Output
_____no_output_____
###Markdown
Lists Lists are very similar to strings, except that each element can be of any type.The syntax for creating lists in Python is `[...]`:
###Code
l = [1,2,3,4]
print(type(l))
print(l)
###Output
<class 'list'>
[1, 2, 3, 4]
###Markdown
We can use the same slicing techniques to manipulate lists as we could use on strings:
###Code
print(l)
print(l[1:3])
print(l[::2])
###Output
[1, 2, 3, 4]
[2, 3]
[1, 3]
###Markdown
**Heads up MATLAB users:** Indexing starts at 0!
###Code
l[0]
###Output
_____no_output_____
###Markdown
Elements in a list do not all have to be of the same type:
###Code
l = [1, 'a', 1.0, 1-1j]
print(l)
print(type(l))
###Output
[1, 'a', 1.0, (1-1j)]
<class 'list'>
###Markdown
Python lists can be inhomogeneous and arbitrarily nested:
###Code
nested_list = [1, [2, [3, [4, [5]]]]]
nested_list
###Output
_____no_output_____
###Markdown
* Lists play a very important role in Python. * For example they are used in loops and other flow control structures (discussed below). * There are a number of convenient functions for generating lists of various types, for example the `range` function:
###Code
start = 10
stop = 30
step = 2
a = list(range(start, stop, step))
print(a)
# in python 3 range generates an interator, which can be converted to a list using 'list(...)'.
# It has no effect in python 2
list(range(start, stop, step))
list(range(-10, 10))
s = "Python aula 4"
# convert a string to a list by type casting:
s2 = list(s)
s2
# sorting lists
s2.sort()
print(s2)
###Output
[' ', ' ', '4', 'P', 'a', 'a', 'h', 'l', 'n', 'o', 't', 'u', 'y']
###Markdown
Adding, inserting, modifying, and removing elements from lists
###Code
# create a new empty list
l = []
# add an elements using `append`
l.append("A")
l.append("d")
l.append("d")
print(l)
###Output
['A', 'd', 'd']
###Markdown
We can modify lists by assigning new values to elements in the list. In technical jargon, lists are *mutable*.
###Code
l[1] = "p"
l[2] = "p"
print(l)
l[1:3] = ["d", "d"]
print(l)
###Output
['A', 'd', 'd']
###Markdown
Insert an element at an specific index using `insert`
###Code
l.insert(0, "i")
l.insert(1, "n")
l.insert(2, "s")
l.insert(3, "e")
l.insert(4, "r")
l.insert(5, "t")
print(l)
###Output
_____no_output_____
###Markdown
Remove first element with specific value using 'remove'
###Code
l.remove("A")
print(l)
###Output
_____no_output_____
###Markdown
Remove an element at a specific location using `del`:
###Code
del l[7]
del l[6]
print(l)
###Output
_____no_output_____
###Markdown
See `help(list)` for more details, or read the online documentation Tuples * Tuples are like lists, except that they cannot be modified once created, that is they are *immutable*. * In Python, tuples are created using the syntax `(..., ..., ...)`, or even `..., ...`:
###Code
point = 10, 20
print(point, type(point))
point[0] = 3
print(point, type(point))
###Output
_____no_output_____
###Markdown
We can **unpack** a tuple by assigning it to a comma-separated list of variables:
###Code
x, y = point
print("x =", x)
print("y =", y)
###Output
x = 10
y = 20
###Markdown
If we try to assign a new value to an element in a tuple we get an error:
###Code
point[0] = 20
###Output
_____no_output_____
###Markdown
Dictionaries Dictionaries are also like lists, except that each element is a key-value pair. The syntax for dictionaries is `{key1 : value1, ...}`:
###Code
params = {"parameter1" : 1.0,
"parameter2" : [2.0,5],
"parameter3" : 3.0}
print(type(params))
print(params)
params['parameter1']
params["parameter1"] = "A"
params["parameter2"] = "B"
# add a new entry
params["parameter4"] = "D"
print("parameter1 = " + str(params["parameter1"]))
print("parameter2 = " + str(params["parameter2"]))
print("parameter3 = " + str(params["parameter3"]))
print("parameter4 = " + str(params["parameter4"]))
###Output
_____no_output_____
###Markdown
Control Flow Conditional statements: if, elif, else The Python syntax for conditional execution of code uses the keywords `if`, `elif` (else if), `else`:
###Code
statement1 = False
statement2 = False
if statement1:
print("statement 1 is True")
elif statement2:
print("statement 2 is True")
else:
print("statement 1 and statement 2 are False")
###Output
statement 1 and statement 2 are False
###Markdown
For the first time, here we encounted a peculiar and unusual aspect of the Python programming language: Program blocks are defined by their indentation level. Compare to the equivalent C code: if (statement1) { printf("statement1 is True\n"); } else if (statement2) { printf("statement2 is True\n"); } else { printf("statement1 and statement2 are False\n"); } In C blocks are defined by the enclosing curly brakets `{` and `}`. And the level of indentation (white space before the code statements) does not matter (completely optional). But in Python, the extent of a code block is defined by the indentation level (usually a tab or say four white spaces). This means that we have to be careful to indent our code correctly, or else we will get syntax errors. Examples:
###Code
statement1 = statement2 = True
if statement1:
if statement2:
print("both statement1 and statement2 are True")
# Bad indentation!
if statement1:
if statement2:
print("both statement1 and statement2 are True") # this line is not
# properly indented
statement1 = False
if statement1:
print("printed if statement1 is True")
print("still inside the if block")
if statement1:
print("printed if statement1 is True")
print("now outside the if block")
###Output
now outside the if block
###Markdown
Loops In Python, loops can be programmed in a number of different ways. The most common is the `for` loop, which is used together with iterable objects, such as lists. The basic syntax is: **`for` loops**:
###Code
for x in [5,2,3]:
print(x)
###Output
5
2
3
###Markdown
* The `for` loop iterates over the elements of the supplied list, and executes the containing block once for each element. * Any kind of list can be used in the `for` loop. For example:
###Code
for x in range(4): # by default range start at 0
print(x)
###Output
0
1
2
3
###Markdown
Note: `range(4)` does not include 4 !
###Code
for x in range(-3,3):
print(x)
for word in ["scientific", "computing", "with", "python"]:
print(word)
###Output
scientific
computing
with
python
###Markdown
To iterate over key-value pairs of a dictionary:
###Code
for key, value in params.items():
print(key + " = " + str(value))
###Output
parameter1 = 1.0
parameter2 = [2.0, 5]
parameter3 = 3.0
###Markdown
Sometimes it is useful to have access to the indices of the values when iterating over a list. We can use the `enumerate` function for this:
###Code
for idx, x in enumerate(range(-3,3)):
print(idx, x)
###Output
_____no_output_____
###Markdown
List comprehensions: Creating lists using `for` loops: A convenient and compact way to initialize lists:
###Code
l1 = [x**2 for x in range(0,5)]
print(l1)
###Output
[0, 1, 4, 9, 16]
###Markdown
Try to avoid code like:
###Code
mylist = []
for i in range(0, 5):
mylist.append(i**2)
print(mylist)
###Output
[0, 1, 4, 9, 16]
###Markdown
`while` loops:
###Code
i = 0
while i < 5:
print(i)
i = i + 1
print("done")
###Output
_____no_output_____
###Markdown
Note that the `print("done")` statement is not part of the `while` loop body because of the difference in indentation. Functions * A function in Python is defined using the keyword `def`, followed by a function name, a signature within parentheses `()`, and a colon `:`. * The following code, with one additional level of indentation, is the function body.
###Code
def func0():
print("test")
func0()
###Output
test
###Markdown
* Optionally, but highly recommended, we can define a so called "docstring", which is a description of the functions purpose and behaivor. * The docstring should follow directly after the function definition, before the code in the function body.
###Code
def func1(s):
"""
Print a string 's' and tell how many characters it has
"""
print(s + " has " + str(len(s)) + " characters")
help(func1)
func1("test")
###Output
test has 4 characters
###Markdown
Functions that returns a value use the `return` keyword:
###Code
def square(x):
"""
Return the square of x.
"""
return x ** 2
square(4)
###Output
_____no_output_____
###Markdown
We can return multiple values from a function using tuples (see above):
###Code
def powers(x):
"""
Return a few powers of x.
"""
return x ** 2, x ** 3, x ** 4
powers(3)
x2, x3, x4 = powers(3)
print(x3)
###Output
27
###Markdown
Default argument and keyword arguments In a definition of a function, we can give default values to the arguments the function takes:
###Code
def myfunc(x, p=2, debug=False):
if debug:
print("evaluating myfunc for x = " + str(x) + " using exponent p = " + str(p))
return x**p
###Output
_____no_output_____
###Markdown
If we don't provide a value of the `debug` argument when calling the the function `myfunc` it defaults to the value provided in the function definition:
###Code
myfunc(5)
myfunc(5, debug=True)
###Output
evaluating myfunc for x = 5 using exponent p = 2
###Markdown
* If we explicitly list the name of the arguments in the function calls, they do not need to come in the same order as in the function definition. * This is called *keyword* arguments, and is often very useful in functions that takes a lot of optional arguments.
###Code
myfunc(p=3, debug=True, x=7)
###Output
_____no_output_____
###Markdown
Unnamed functions (lambda function) In Python we can also create unnamed functions, using the `lambda` keyword:
###Code
f1 = lambda x: x**2
# is equivalent to
def f2(x):
return x**2
f1(2), f2(2)
###Output
_____no_output_____
###Markdown
This technique is useful for example when we want to pass a simple function as an argument to another function, like this:
###Code
# map is a built-in python function
map(lambda x: x**2, range(-3,4))
# in python 3 we can use `list(...)` to convert the iterator to an explicit list
list(map(lambda x: x**2, range(-3,4)))
###Output
_____no_output_____
###Markdown
Fun with Functions
###Code
def fun1(x):
return x ** 1
def fun2(x):
return x ** 2
def fun3(x):
return x ** 3
def fun4(x):
return x ** 4
functions = [fun1, fun2, fun3, fun4]
for f in functions:
print(f(3))
###Output
3
9
27
81
###Markdown
Topics not Covered (but worthwhile mentioning) - Exceptions (Error Handling) - Creating `Modules` - Classes (Object Oriented Programming) - ...            Further reading * http://www.python.org - The official web page of the Python programming language.* http://www.python.org/dev/peps/pep-0008 - Style guide for Python programming. Highly recommended. * http://www.greenteapress.com/thinkpython/ - A free book on Python programming.* [Python Essential Reference](http://www.amazon.com/Python-Essential-Reference-4th-Edition/dp/0672329786) - A good reference book on Python programming. Additional Information Classes * Classes are the key features of **object-oriented programming**. * A class is a structure for representing an object and the operations that can be performed on the object. * In Python a class can contain *attributes* (variables) and *methods* (functions). A class is defined almost like a function, but using the `class` keyword, and the class definition usually contains a number of class method definitions (a function in a class).* Each class method should have an argument `self` as its first argument. This object is a self-reference.* Some class method names have special meaning, for example: * `__init__`: The name of the method that is invoked when the object is first created. * `__str__` : A method that is invoked when a simple string representation of the class is needed, as for example when printed. * There are many more, see http://docs.python.org/2/reference/datamodel.htmlspecial-method-names
###Code
class Point:
"""
Simple class for representing a point in a Cartesian coordinate system.
"""
def __init__(self, x, y):
"""
Create a new Point at x, y.
"""
self.x = x
self.y = y
def translate(self, dx, dy):
"""
Translate the point by dx and dy in the x and y direction.
"""
self.x += dx
self.y += dy
def __str__(self):
return("Point at [%f, %f]" % (self.x, self.y))
###Output
_____no_output_____
###Markdown
To create a new instance of a class:
###Code
p1 = Point(0, 0) # this will invoke the __init__ method in the Point class
print p1 # this will invoke the __str__ method
###Output
_____no_output_____
###Markdown
To invoke a class method in the class instance `p`:
###Code
p2 = Point(1, 1)
p1.translate(0.25, 1.5)
print(p1)
print(p2)
###Output
_____no_output_____
###Markdown
* Note that calling class methods can modifiy the state of that particular class instance, but does not effect other class instances or any global variables.* That is one of the nice things about object-oriented design: code such as functions and related variables are grouped in separate and independent entities. Modules * One of the most important concepts in good programming is to reuse code and avoid repetitions.* The idea is to write functions and classes with a well-defined purpose and scope, and reuse these instead of repeating similar code in different part of a program (modular programming). * The result is usually that readability and maintainability of a program is greatly improved.* What this means in practice is that our programs have fewer bugs, are easier to extend and debug/troubleshoot. * Python supports modular programming at different levels. Functions and classes are examples of tools for low-level modular programming. * Python modules are a higher-level modular programming construct, where we can collect related variables, functions and classes in a module. A python module is defined in a python file (with file-ending `.py`), and it can be made accessible to other Python modules and programs using the `import` statement. * Consider the following example: the file `mymodule.py` contains simple example implementations of a variable, function and a class:
###Code
%%file mymodule.py
"""
Example of a python module. Contains a variable called my_variable,
a function called my_function, and a class called MyClass.
"""
my_variable = 0
def my_function():
"""
Example function
"""
return my_variable
class MyClass:
"""
Example class.
"""
def __init__(self):
self.variable = my_variable
def set_variable(self, new_value):
"""
Set self.variable to a new value
"""
self.variable = new_value
def get_variable(self):
return self.variable
###Output
_____no_output_____
###Markdown
We can import the module `mymodule` into our Python program using `import`:
###Code
import mymodule
###Output
_____no_output_____
###Markdown
Use `help(module)` to get a summary of what the module provides:
###Code
help(mymodule)
mymodule.my_variable
mymodule.my_function()
my_class = mymodule.MyClass()
my_class.set_variable(10)
my_class.get_variable()
###Output
_____no_output_____
###Markdown
If we make changes to the code in `mymodule.py`, we need to reload it using `reload`:
###Code
reload(mymodule) # works only in python 2
###Output
_____no_output_____
###Markdown
Exceptions * In Python errors are managed with a special language construct called "Exceptions". * When errors occur exceptions can be raised, which interrupts the normal program flow and fallback to somewhere else in the code where the closest try-except statement is defined. To generate an exception we can use the `raise` statement, which takes an argument that must be an instance of the class `BaseException` or a class derived from it.
###Code
raise Exception("description of the error")
###Output
_____no_output_____
###Markdown
A typical use of exceptions is to abort functions when some error condition occurs, for example: def my_function(arguments): if not verify(arguments): raise Exception("Invalid arguments") rest of the code goes here To gracefully catch errors that are generated by functions and class methods, or by the Python interpreter itself, use the `try` and `except` statements: try: normal code goes here except: code for error handling goes here this code is not executed unless the code above generated an errorFor example:
###Code
try:
print("test")
# generate an error: the variable test is not defined
print test
except:
print("Caught an exception")
###Output
_____no_output_____
###Markdown
To get information about the error, we can access the `Exception` class instance that describes the exception by using for example: except Exception as e:
###Code
try:
print("test")
# generate an error: the variable test is not defined
print(test)
except Exception as e:
print("Caught an exception:" + str(e))
###Output
_____no_output_____ |
notebooks/tutorial_recurrenceNetwork.ipynb | ###Markdown
Turorial for creating and manipulating a recurrence network with pyunicorn Analysis of complex networksSo far, analysis of complex networks in different scientific fields, has been performed by the study of the adjacency matrix. *pyunicorn* suggests a new approach by studying complex networks through a time-series based approach: **Recurrent plot (RP)**Aim: bridging complex network theory and recurrence analysisFrom NWarman: In this letter, we demonstrate that the recurrence matrix (analogously to[31]) can be considered as the adjacency matrix of an undirected, unweighted network, allowing us to study time series using a complex network approachComplex network statistics is helpful to characterise the local and global properties of a network. WeWhat is a recurrence network?A state at time i (red dot) is recurrent at another time j (black dot) when the phase space trajectory visits its close neighborhood (gray circle). his is marked by value 1 in the recurrence matrix at (i, j). States outside of this neighborhood (small red circle) are marked with 0 in the recurrence matrix. For Literature review see : https://www.researchgate.net/figure/Basic-concepts-beyond-recurrence-plots-and-the-resulting-recurrence-networks-exemplified_fig1_47557940 .
###Code
import numpy as np
from .core import Network
from .recurrence_plot import RecurrencePlot
###Output
_____no_output_____ |
ClassMaterial/09 - Oracles/09 - code/09.3_WSC_Oracle_update.ipynb | ###Markdown
A stateless oracle (3): keeping the oracle up to date 09.3 Winter School on Smart Contracts Peter Gruber ([email protected])2022-02-15* Part 3: The transactions that keep the oracle up to date* Parts 1-4 are only relevant if you want to **create** an Oracle* Only parts 5-6 are needed to **use** the oracle.**Note** that these transactions will typically run in regular intervals (every 5 min) on a Linux server, using the `cron` service of Linux. You can, however, run the manually to see what happens. SetupSee notebook 04.1, the lines below will always automatically load functions in `algo_util.py`, the five accounts and the Purestake credentials
###Code
# Loading shared code and credentials
import sys, os
codepath = '..'+os.path.sep+'..'+os.path.sep+'sharedCode'
sys.path.append(codepath)
from algo_util import *
cred = load_credentials()
# Load additional oracle accounts
cred_oracle = load_credentials('credentials_oracle')
Price = cred_oracle['Price']
Reserve = cred_oracle['Reserve']
oracle_id = cred_oracle['oracle_id']
from algosdk import account, mnemonic
from algosdk.v2client import algod
from algosdk.future import transaction
from algosdk.future.transaction import Multisig
from algosdk.future.transaction import PaymentTxn, MultisigTransaction
from algosdk.future.transaction import AssetConfigTxn, AssetTransferTxn, AssetFreezeTxn
from algosdk.future.transaction import LogicSig
import algosdk.error
import json
import base64
import hashlib
from pyteal import *
# Initialize the algod client (Testnet or Mainnet)
algod_client = algod.AlgodClient(algod_token='', algod_address=cred['algod_test'], headers=cred['purestake_token'])
import json
import requests
import pandas as pd
import numpy as np
import time
from pycoingecko import CoinGeckoAPI
cg = CoinGeckoAPI()
###Output
_____no_output_____
###Markdown
Get information about the oracle coin
###Code
print('https://testnet.algoexplorer.io/asset/{}'.format(oracle_id))
###Output
https://testnet.algoexplorer.io/asset/77534697
###Markdown
Transfer coins as a function of price and holdings* This is the code that needs to be deployed on the remote server
###Code
# get current price
price_info = cg.get_price(ids='algorand', vs_currencies='usd')
usdalgo = price_info['algorand']['usd']
print(usdalgo)
# get current holdings
holdings_Price = asset_holdings(algod_client, Price['public'])
oracle_Price = [holding['amount'] for holding in holdings_Price if holding['unit']=='USDALGO'][0]
oracle_Price = int(1e6*oracle_Price)
holdings_Reserve = asset_holdings(algod_client, Reserve['public'])
oracle_Reserve = [holding['amount'] for holding in holdings_Reserve if holding['unit']=='USDALGO'][0]
oracle_Reserve = int(1e6*oracle_Reserve)
print(usdalgo)
print(oracle_Price)
print(oracle_Reserve)
holdings_oracle = int(usdalgo*1e6) # this is how many coins Price *should* hold
# make transfers
if holdings_oracle != oracle_Price:
# A transaction is needed
if holdings_oracle > oracle_Price:
# Price does not have enough coins
# Reserve needs to transfer to Price
amt = int(holdings_oracle-oracle_Price)
sender = Reserve
receiver = Price
else:
# Price has too many coins
# Price needs to transfer to Reserve
amt = int(oracle_Price-holdings_oracle)
sender = Price
receiver = Reserve
# === transfer TXN (must be a multisig!!) ===
# Step 1: prepare
sp = algod_client.suggested_params()
txn = AssetTransferTxn(
sender = sender['public'],
sp=sp,
receiver=receiver['public'],
amt=amt,
index=oracle_id
)
# Step 2+3: Sign + send
stxn = txn.sign(sender['private'])
txid = algod_client.send_transaction(stxn)
# Step 4: Wait for confirmation
txinfo = wait_for_confirmation(algod_client, txid)
###Output
Current round is 20463830.
Waiting for round 20463830 to finish.
Waiting for round 20463831 to finish.
Transaction 4RZB4EM5LSPPJVGAB2DZIPXYZ5Z2V2HGJGMATILDRSBRFSCBYU3Q confirmed in round 20463832.
|
pumpingtest_benchmarks/slug4_dawsonville.ipynb | ###Markdown
4. Slug test for confined aquifer - Dawsonville Example**This test is taken from example of MLU.** Introduction and Conceptual ModelIn this notebook, we reproduce the work of Yang (2020) to check the TTim performance in analysing slug-test. We later compare the solution in TTim with the MLU model (Carlson & Randall, 2012).This Slug Test was reported in Cooper Jr et al. (1967), and it was performed in Dawsonville, Georgia, USA. A fully penetrated well (Ln-2) is screened in a confined aquifer, located between depths 24 and 122 (98 m thick).The volume of the slug is 10.16 litres. Head change has been recorded at the slug well. Both the well and the casing radii of the slug well is 0.076 m.The conceptual model can be seen in the figure below:
###Code
import matplotlib.pyplot as plt
import numpy as np
##Now printing the conceptual model figure:
fig = plt.figure(figsize=(14, 9))
ax = fig.add_subplot(1,1,1)
#sky
sky = plt.Rectangle((-5,0), width = 15, height = 10, fc = 'b', zorder=0, alpha=0.1)
ax.add_patch(sky)
#Aquifer:
ground = plt.Rectangle((-5,-122), width = 15, height = 98, fc = np.array([209,179,127])/255, zorder=0, alpha=0.9)
ax.add_patch(ground)
well = plt.Rectangle((-0.5,-(122)), width = 1, height = 122, fc = np.array([200,200,200])/255, zorder=1)
ax.add_patch(well)
#Confining Unit
conf = plt.Rectangle((-5,-24), width = 15, height = 24, fc = np.array([100,100,100])/255, zorder=0, alpha=0.9)
ax.add_patch(conf)
#Wellhead
wellhead = plt.Rectangle((-0.6,0),width = 1.2, height = 4, fc = np.array([200,200,200])/255, zorder=2, ec='k')
ax.add_patch(wellhead)
#Screen for the well:
screen = plt.Rectangle((-0.5,-(122)), width = 1, height = 98, fc = np.array([200,200,200])/255, alpha=1, zorder = 2, ec = "k", ls = '--')
screen.set_linewidth(2)
ax.add_patch(screen)
#pumping_arrow = plt.Arrow(x = 1,y = 1.5, dx = 0, dy = 1, color = "#00035b")
#ax.add_patch(pumping_arrow)
ax.text(x = 1, y = 2.5, s = r'$ Q = 10.16 L $', fontsize = 'large' )
#last line
line = plt.Line2D(xdata= [-200,1200], ydata = [0,0], color = "k")
ax.add_line(line)
#Water table
#wt = plt.Line2D(xdata= [-200,1200], ydata = [0,0], color = "b")
#ax.add_line(wt)
ax.text(0.6,-35, s = "Ln-2", fontsize = 'large')
#ax.text(6.9, -0.5, "Ln-3", fontsize = 'large')
ax.set_xlim([-5,10])
ax.set_ylim([-122,10])
ax.set_xlabel('Distance [m]')
ax.set_ylabel('Relative height [m]')
ax.set_title('Conceptual Model - Dawsonville Example');
###Output
_____no_output_____
###Markdown
Step 1. Load required libraries
###Code
from ttim import *
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
###Output
_____no_output_____
###Markdown
Step 2. Set basic parameters
###Code
b = 98 #aquifer thickness
zt = -24
zb = zt - b
rw = 0.076 #well radius of Ln-2 Well
rc = 0.076 #casing radius of Ln-2 Well
Q = 0.01016 #slug volume in m^3
###Output
_____no_output_____
###Markdown
Step 3. Load dataData for the Dawsonville test is available in a text file, where the first column is the time data, in days and in the second column is the head displacement in meters
###Code
data = np.loadtxt('data/dawsonville_slug.txt')
t = data[:, 0]
h = data[:, 1]
###Output
_____no_output_____
###Markdown
Step 4. Create First Model - single layerWe begin with a single layer model built in ModelMaq.Details on setting up the model can be seen in: [Confined 1 - Oude Korendijk](confined1_oude_korendijk.ipynb).The slug well is set accordingly. Details on setting up the ```Well``` object can be seen in: [Slug 1 - Pratt County](slug1_pratt_county.ipynb).
###Code
ml = ModelMaq(kaq=10, z=[zt, zb], Saq=1e-4, tmin=1e-6, tmax=1e-3, topboundary='conf')
w = Well(ml, xw=0, yw=0, rw=rw, rc=rc, tsandQ=[(0, -Q)], layers=0, wbstype='slug')
ml.solve()
###Output
self.neq 1
solution complete
###Markdown
Step 5. Model calibration both simultaneous wellsThe procedures for calibration can be seen in [Unconfined 1 - Vennebulten](unconfined1_vennebulten.ipynb)We calibrate hydraulic conductivity and specific storage, as in the KGS model (Hyder et al. 1994).
###Code
#unknown parameters: kay, Saq
ca = Calibrate(ml)
ca.set_parameter(name='kaq0', initial=10, pmin=0)
ca.set_parameter(name='Saq0', initial=1e-4)
ca.series(name='obs', x=0, y=0, layer=0, t=t, h=h)
ca.fit(report=True)
display(ca.parameters)
print('rmse:', ca.rmse())
hm = ml.head(0, 0, t)
plt.figure(figsize=(8, 5))
plt.semilogx(t, h, '.', label='obs')
plt.semilogx(t, hm[0], label='ttim')
plt.xlabel('time [d]')
plt.ylabel('displacement [m]')
plt.title('Model Results - Single-layer model')
plt.legend();
###Output
_____no_output_____
###Markdown
In general, the single-layer model seems to be performing well, with a good visual fit between observations and the model. Step 6. Create Second Model - multi-layer modelTo investigate whether we need to account for the vertical flow component or not, we will create a multi-layer model. Consequently, we divide the previous aquifer into 49 layers (2 m thick each).
###Code
nlay = 49 #number of layers
zlayers = np.linspace(zt, zb, nlay + 1) #elevation of each layer
Saq = 1e-4 * np.ones(nlay)
###Output
_____no_output_____
###Markdown
Now we use the ```Model3D``` object to model multi-layer aquifer:Details on how to set it up can be seen in the notebook: [Unconfined - 1 - Vennebulten](unconfined1_vennebulten.ipynb)
###Code
ml_1 = Model3D(kaq=10, z=zlayers, Saq=Saq, tmin=1e-6, tmax=1e-3, phreatictop=False)
w_1 = Well(ml_1, xw=0, yw=0, rw=rw, rc=rc, tsandQ=[(0, -Q)], layers=range(nlay), \
wbstype='slug')
ml_1.solve()
###Output
self.neq 49
solution complete
###Markdown
Step 7. Calibration of multi-layer model
###Code
ca_1 = Calibrate(ml_1)
ca_1.set_parameter(name='kaq0_48', initial=10, pmin=0)
ca_1.set_parameter(name='Saq0_48', initial=1e-4)
ca_1.series(name='obs', x=0, y=0, layer=range(nlay), t=t, h=h)
ca_1.fit(report=True)
display(ca_1.parameters)
print('RMSE:', ca_1.rmse())
###Output
_____no_output_____
###Markdown
The multi-layer model does not improve the calibration by much.
###Code
hm_1 = ml_1.head(0, 0, t)
plt.figure(figsize=(8, 5))
plt.semilogx(t, h, '.', label='obs')
plt.semilogx(t, hm_1[0], label='ttim')
plt.xlabel('time [d]')
plt.ylabel('displacement [m]')
plt.title('Model Results - Multi-layer model')
plt.legend();
###Output
_____no_output_____
###Markdown
Step 8. Final Model calibration with well skin resistanceNow we test if the skin resistance of the well has an impact on model calibration. We thus add the ```res``` parameter in the calibration settings. We use the same multi-layer model.
###Code
ca_2 = Calibrate(ml_1)
ca_2.set_parameter(name='kaq0_48', initial=10, pmin=0)
ca_2.set_parameter(name='Saq0_48', initial=1e-4, pmin = 1e-7)
ca_2.set_parameter_by_reference(name='res', parameter=w_1.res, initial=0.1, pmin = 0)
ca_2.series(name='obs', x=0, y=0, layer=range(nlay), t=t, h=h)
ca_2.fit(report=True)
display(ca_2.parameters)
print('RMSE:', ca_2.rmse())
hm_2 = ml_1.head(0, 0, t)
plt.figure(figsize=(8, 5))
plt.semilogx(t, h, '.', label='obs')
plt.semilogx(t, hm_2[0], label='ttim')
plt.xlabel('time [d]')
plt.ylabel('displacement [m]')
plt.title('Model Results - Multi-layer with res')
plt.legend();
###Output
_____no_output_____
###Markdown
Adding resistance of the well screen does not improve the performance. Thus, res should not be applied in the conceptual model. Step 9. Analysis and comparison of simulated valuesWe now compare the values in TTim and add the results of the modelling done in MLU by Yang (2020).
###Code
ta = pd.DataFrame(columns=['k [m/d]', 'Ss [1/m]'], \
index = ['MLU', 'ttim', 'ttim-multilayer', 'ttim-res'])
tr = np.delete(ca_2.parameters['optimal'].values, 2)
ta.loc['MLU'] = [0.4133, 1.9388E-05]
ta.loc['ttim'] = ca.parameters['optimal'].values
ta.loc['ttim-multilayer'] = ca_1.parameters['optimal'].values
ta.loc['ttim-res'] = tr
ta['RMSE'] = [0.004264, ca.rmse(), ca_1.rmse(), ca_2.rmse()]
ta.style.set_caption('Comparison of parameter values and error under different models')
###Output
_____no_output_____ |
PandasASS.ipynb | ###Markdown
Player Count Display the total number of players
###Code
# total numbers of players
sum_of_players = len(df["SN"].unique())
print(sum_of_players)
###Output
576
###Markdown
Purchasing Analysis (Total) .Run basic calculations to obtain number of unique items, average price, etc..Create a summary data frame to hold the results.Optional: give the displayed data cleaner formatting.Display the summary data frame
###Code
# unique number of items
#average Price
#Number of Purchases
#Total REvenue
unique_Items = len(df["Item ID"].unique())
avg_price = (df["Price"] .mean())
Number_of_Purchases = len(df["Purchase ID"].unique())
Total_Revenue = (df["Price"] .mean())* len(df["Purchase ID"].unique())
print("Unique Item",unique_Items)
print("Average Price",avg_price)
print("Number of Purchases",Number_of_Purchases)
print("Total Revenue",Total_Revenue)
# creating summary dataFrame to hold the Results
df =pd.DataFrame({
"Unique_Item":["183"],
"Average Price":["$3.05"],
"Number_of_Purchase":["780"],
"Total Revenue":["$2,379.77"]
})
df
###Output
_____no_output_____
###Markdown
Gender Demographics Percentage and Count of Male Players,Percentage and Count of Female Players,Percentage and Count of Other / Non-Disclosed
###Code
#MALE
#sum_of_players = len(df["SN"].unique())
Male1 = df.groupby(["Gender"]).get_group(('Male'))
Male_Count = len(Male1["SN"].unique())
MalePercentage = round((Male_Count / sum_of_players) *100,2)
# FEMALE
Female1 = df.groupby(["Gender"]).get_group('Female')
Female_Count = len(Female1["SN"].unique())
FemalePercentage = round((len(Female1["SN"].unique()) / sum_of_players) * 100,2)
#OTHER
N_Disclose = df.groupby(["Gender"]).get_group('Other / Non-Disclosed')
otherNDisclosed_Count = len(N_Disclose["SN"].unique())
ONDPercentage = round((otherNDisclosed_Count / sum_of_players) * 100,2)
# Gender Table
Gender_Table = {
'Percent of Players':[MalePercentage,FemalePercentage,ONDPercentage ],
'Gender':["Male","Female","Other"],
'Total Count':[Male_Count,Female_Count,otherNDisclosed_Count]}
Gender_Table = pd.DataFrame(Gender_Table)
Gender_Table = Gender_Table.set_index('Gender')
Gender_Table = Gender_Table [['Total Count','Percent of Players']]
Gender_Table
###Output
_____no_output_____
###Markdown
Purchasing Analysis (Gender) Run basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. by genderCreate a summary data frame to hold the resultsOptional: give the displayed data cleaner formattingDisplay the summary data frame
###Code
#PURCHASE COUNT
FemalePurchaseCount = len(Female1 )
MalePurchaseCount = len(Male1)
OtherPurchaseCount = len(N_Disclose)
#AVERAGE PURCHASE PRICE
FemaleAvgPrice = round((Female1["Price"].sum())/len(Female1["Price"]),2)
MaleAvgPrice = round((Male1["Price"].sum())/len(Male1["Price"]),2)
OtherAvgPrice= round((N_Disclose["Price"].sum())/len(N_Disclose["Price"]),2)
#TOTAL PURCHASE VALUE
MaleTotalPurchase = round (Male1["Price"].sum() ,2)
FemaleTotalPurchase = round(Female1["Price"].sum(),2)
OtherTotalPurchase = round (N_Disclose["Price"].sum(),2)
# Normalised Totals
# male/female/Other
NorMale = round((MaleTotalPurchase / MalePurchaseCount ), 2)
NorFemale = round((FemaleTotalPurchase / FemalePurchaseCount), 2)
NorOther = round((OtherTotalPurchase / OtherPurchaseCount), 2)
# SUMMARY TABLE
PByGender = {"Purchase Count":[FemalePurchaseCount,MalePurchaseCount,OtherPurchaseCount],
"Gender":["Male","Female","Other"],
"Average Purchase Price":[MaleAvgPrice,FemaleAvgPrice,OtherAvgPrice],
"Total Purchase Value":[MaleTotalPurchase,FemaleTotalPurchase,OtherTotalPurchase],
"Avg Total Purchase Per Person":[NorMale,NorFemale,NorOther]}
PByGender = pd.DataFrame(PByGender)
PByGender = PByGender1.set_index('Gender')
PByGender= PByGender[['Purchase Count',
'Average Purchase Price',
'Total Purchase Value',
'Avg Total Purchase Per Person']]
PByGender
###Output
_____no_output_____
###Markdown
Age Demographics Establish bins for agesCategorize the existing players using the age bins. Hint: use pd.cut()Calculate the numbers and percentages by age groupCreate a summary data frame to hold the resultsOptional: round the percentage column to two decimal pointsDisplay Age Demographics Table
###Code
bins = [0, 9, 14, 19, 24, 29, 35, 40, 99]
Age_labels = ["<10","10-14","15-19","20-24","25-29","30-34","35-39","Over 40"]
df["Age"] = pd.cut(pd.read_csv(file)["Age"], bins, labels = "group_labels")
Age_group = df.groupby("Age")
Total_Age_Count = Age_group["SN"].nunique()
print(Total_Age_Count)
Percentage_Age = (Total_Age_Count/ len(df["SN"].unique())) * 100
print("Percentage_Age",Percentage_Age)
AgeDemograhic = {"Age Summary":Agelabels,"Total Player Count"
:PlayerBinsCount,"Percentage Of Players"
:PercentBins}
AgeDem = pd.df(AgeDem)
AgeDem = AgeDem1.set_index('Age Summary')
AgeDem
###Output
Age
7 7
8 6
9 4
10 7
11 6
12 4
13 3
14 2
15 26
16 24
17 19
18 21
19 17
20 69
21 43
22 49
23 49
24 48
25 43
26 11
27 9
28 4
29 10
30 25
31 5
32 6
33 9
34 7
35 10
36 5
37 5
38 5
39 6
40 5
41 2
42 1
43 1
44 2
45 1
Name: SN, dtype: int64
Percentage_Age Age
7 1.215278
8 1.041667
9 0.694444
10 1.215278
11 1.041667
12 0.694444
13 0.520833
14 0.347222
15 4.513889
16 4.166667
17 3.298611
18 3.645833
19 2.951389
20 11.979167
21 7.465278
22 8.506944
23 8.506944
24 8.333333
25 7.465278
26 1.909722
27 1.562500
28 0.694444
29 1.736111
30 4.340278
31 0.868056
32 1.041667
33 1.562500
34 1.215278
35 1.736111
36 0.868056
37 0.868056
38 0.868056
39 1.041667
40 0.868056
41 0.347222
42 0.173611
43 0.173611
44 0.347222
45 0.173611
Name: SN, dtype: float64
###Markdown
Purchasing Analysis (Age) Bin the purchase_data data frame by ageRun basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. in the table belowCreate a summary data frame to hold the resultsOptional: give the displayed data cleaner formattingDisplay the summary data frame
###Code
Pur_Count = Age_group ["Purchase ID"].count()
#print("Pur_Count",Pur_Count)
Age_group = df.groupby("Age")
Avg_Pur_price = df.groupby("Age")["Price"].mean()
#print("Avg_Pur_price",Avg_Pur_price)
TOT_PurchV = Age_group["Price"].sum()
#print("TOT_PurchV",TOT_PurchV)
AvgTOT_PPerson = TOT_PurchV / Pur_Count
#print("AvgTOT_PPerson",AvgTOT_PPerson)
purchasing_analysis_pd = pd.DataFrame({"Purchase Count": Pur_Count,
"Average Purchase Price": Avg_Pur_price,
"Total Purchase Value": TOT_PurchV,
"Avg Total Purchase per Person":AvgTOT_PPerson
})
purchasing_analysis_pd
purchasing_analysis_pd["Total Purchase Value"]= purchasing_analysis_pd["Total Purchase Value"].map("${:.2f}".format)
purchasing_analysis_pd["Avg Total Purchase per Person"]= purchasing_analysis_pd["Avg Total Purchase per Person"].map("${:.2f}".format)
purchasing_analysis_pd
###Output
_____no_output_____
###Markdown
Top Spenders Run basic calculations to obtain the results in the table belowCreate a summary data frame to hold the resultsSort the total purchase value column in descending orderOptional: give the displayed data cleaner formattingDisplay a preview of the summary data frame
###Code
#Top Spenders
#SN
SN = df.groupby(df["SN"])
ScreenName = SN["SN"].unique()
#Purchase Count
SNCount = SN['Age'].count()
#Average Purchase Price
SNAverage = round(SN['Price'].mean(),2)
#Total Purchase Value
SNTotal = SN['Price'].sum()
TopSpend = {"SN":ScreenName,"Purchase Count":SNCount,
"Average Purchase Price":SNAverage,"Total Purchase Value":SNTotal}
TopSpend1= pd.DataFrame(TopSpend)
TopSpend1= TopSpend1.set_index('SN')
TopSpend1 = TopSpend1.sort_values("Total Purchase Value",ascending=False)
TopSpend1 = TopSpend1[['Purchase Count', 'Average Purchase Price', 'Total Purchase Value']]
TopSpend1.iloc[:5]
###Output
_____no_output_____
###Markdown
Most Popular Items Retrieve the Item ID, Item Name, and Item Price columnsGroup by Item ID and Item Name. Perform calculations to obtain purchase count, item price, and total purchase valueCreate a summary data frame to hold the resultsSort the purchase count column in descending orderOptional: give the displayed data cleaner formattingDisplay a preview of the summary data frame
###Code
#Item ID
ItemId = df.groupby(df['Item ID'])
Items = ItemId['Item ID'].unique()
#Item Name
ItemName = ItemId["Item Name"].unique()
#Purchase Count
ItemPurCount = ItemId['Age'].count()
#Item Price
ItemPrice= ItemId['Price'].unique()
#Total Purchase Value
ItemTotalPurchase = ItemId['Price'].sum()
ItemTable = {'Item ID':Items,'Item Name':ItemName,'Item Price':ItemPrice,'Item Count':ItemPurCount,'Total Purchase':ItemTotalPurchase}
ItemTable1 = pd.DataFrame(ItemTable)
ItemTable1 = ItemTable1.set_index('Item ID')
ItemTable1= ItemTable1.sort_values('Item Count', ascending=False)
ItemTable1 = ItemTable1[['Item Name','Item Count','Item Price','Total Purchase']]
ItemTable1.iloc[:5]
###Output
_____no_output_____
###Markdown
Most Profitable Items Sort the above table by total purchase value in descending orderOptional: give the displayed data cleaner formattingDisplay a preview of the data frame
###Code
#Most Profitable Items
#Item ID
#Item Name
#Purchase Count
#Item Price
#Total Purchase Value
MostProfit= ItemTable1.sort_values('Total Purchase', ascending=False)
MostProfit[:5]
###Output
_____no_output_____ |
tutorials/Activity Answers.ipynb | ###Markdown
Answers ACTIVITY 2 The above plot looks like continents of 'Antartica' and 'Seven seas (open ocean)' are skewing the plot a bit. Try to remove these two continents from the data that you call the "plot" method on Hint, earlier we combined used two filters, see "cell 7", you can use the same approach and use the '!=' operator.
###Code
# Geopandas Activity 2 Answer
countries[(countries.continent != 'Antarctica') & (countries.continent != 'Seven seas (open ocean)')].plot(figsize=(12,5), cmap='Set1', column='gdp_per_cap', legend=True)
###Output
_____no_output_____
###Markdown
--- ACTIVITY 5 Using the 'mag' column, filter the earthsquakes and display only the Magnitudes greater than say 5, or whatever number you like
###Code
gdf_quakes[gdf_quakes.mag > 4.5].plot(ax=ax1, color='r');
###Output
_____no_output_____ |
notebooks/export_GeoJSON_counties.ipynb | ###Markdown
Generate geo-json files from [US Census Bureau data](https://www.census.gov/geographies/mapping-files/time-series/geo/carto-boundary-file.html).License: Apache 2
###Code
!pip install --quiet kml2geojson
import io
import copy
import json
import urllib.request
import xml.dom.minidom
import zipfile
import kml2geojson
import lxml.etree
def parse_broken_kml(contents):
fixing_tree = lxml.etree.fromstring(
contents, parser=lxml.etree.XMLParser(recover=True))
tree = xml.dom.minidom.parseString(lxml.etree.tostring(fixing_tree))
return kml2geojson.build_layers(tree)
def read_2018_census_kml(filename):
url = "https://www2.census.gov/geo/tiger/GENZ2018/kml/" + filename + ".zip"
with urllib.request.urlopen(url) as infile:
buffer = io.BytesIO(infile.read())
return zipfile.ZipFile(buffer).read(filename + ".kml")
raw_states = parse_broken_kml(read_2018_census_kml("cb_2018_us_state_20m"))[0]
states = copy.deepcopy(raw_states)
for entry in states["features"]:
props = entry["properties"]
props.pop("ALAND")
props.pop("AWATER")
props["is_a_state"] = True
props["state_id"] = int(entry["properties"].pop("GEOID"))
props["name"] = props.pop("NAME")
props.pop("STATEFP")
props.pop("STATENS")
props.pop("LSAD")
props.pop("description")
props.pop("styleUrl")
raw_counties = parse_broken_kml(read_2018_census_kml("cb_2018_us_county_20m"))[0]
counties = copy.deepcopy(raw_counties)
for entry in counties["features"]:
props = entry["properties"]
props.pop("ALAND")
props.pop("AWATER")
props.pop("COUNTYFP")
props.pop("COUNTYNS")
props["fips_id"] = int(props.pop("GEOID"))
props.pop("LSAD")
props["name"] = props.pop("NAME")
props["state_id"] = int(props.pop("STATEFP"))
props.pop("description")
props.pop("styleUrl")
props["is_a_state"] = False
state_names = {entry["properties"]["state_id"]: entry["properties"]["name"]
for entry in states["features"]}
with open("counties.json", "w") as outfile:
json.dump(counties, outfile)
with open("states.json", "w") as outfile:
json.dump(states, outfile)
with open("state_names.json", "w") as outfile:
json.dump(state_names, outfile)
!tar -czvf json_us_geography.tar.gz counties.json states.json state_names.json
###Output
counties.json
states.json
state_names.json
|
01-Lesson-Plans/16-Project-3-and-R/1/Activities/06_Stu_Tibble/Resources/PyCitySchools_Solution.ipynb | ###Markdown
PyCity Schools Analysis* As a whole, schools with higher budgets, did not yield better test results. By contrast, schools with higher spending per student actually (\$645-675) underperformed compared to schools with smaller budgets (<\$585 per student).* As a whole, smaller and medium sized schools dramatically out-performed large sized schools on passing math performances (89-91% passing vs 67%).* As a whole, charter schools out-performed the public district schools across all metrics. However, more analysis will be required to glean if the effect is due to school practices or the fact that charter schools tend to serve smaller student populations per school. ---
###Code
# Dependencies and Setup
import pandas as pd
import numpy as np
# File to Load (Remember to Change These)
school_data_to_load = "raw_data/schools_complete.csv"
student_data_to_load = "raw_data/students_complete.csv"
# Read School and Student Data File and store into Pandas Data Frames
school_data = pd.read_csv(school_data_to_load)
student_data = pd.read_csv(student_data_to_load)
# Combine the data into a single dataset
school_data_complete = pd.merge(student_data, school_data, how="left", on=["school_name", "school_name"])
###Output
_____no_output_____
###Markdown
District Summary
###Code
# Calculate the Totals (Schools and Students)
school_count = len(school_data_complete["school_name"].unique())
student_count = school_data_complete["Student ID"].count()
# Calculate the Total Budget
total_budget = school_data["budget"].sum()
# Calculate the Average Scores
average_math_score = school_data_complete["math_score"].mean()
average_reading_score = school_data_complete["reading_score"].mean()
overall_passing_rate = (average_math_score + average_reading_score) / 2
# Calculate the Percentage Pass Rates
passing_math_count = school_data_complete[(school_data_complete["math_score"] > 70)].count()["student_name"]
passing_math_percentage = passing_math_count / float(student_count) * 100
passing_reading_count = school_data_complete[(school_data_complete["reading_score"] > 70)].count()["student_name"]
passing_reading_percentage = passing_reading_count / float(student_count) * 100
# Minor Data Cleanup
district_summary = pd.DataFrame({"Total Schools": [school_count],
"Total Students": [student_count],
"Total Budget": [total_budget],
"Average Math Score": [average_math_score],
"Average Reading Score": [average_reading_score],
"% Passing Math": [passing_math_percentage],
"% Passing Reading": [passing_reading_percentage],
"% Overall Passing Rate": [overall_passing_rate]})
district_summary = district_summary[["Total Schools", "Total Students", "Total Budget",
"Average Math Score",
"Average Reading Score",
"% Passing Math",
"% Passing Reading",
"% Overall Passing Rate"]]
district_summary["Total Students"] = district_summary["Total Students"].map("{:,}".format)
district_summary["Total Budget"] = district_summary["Total Budget"].map("${:,.2f}".format)
# Display the data frame
district_summary
###Output
_____no_output_____
###Markdown
School Summary
###Code
# Determine the School Type
school_types = school_data.set_index(["school_name"])["type"]
# Calculate the total student count
per_school_counts = school_data_complete["school_name"].value_counts()
# Calculate the total school budget and per capita spending
per_school_budget = school_data_complete.groupby(["school_name"]).mean()["budget"]
per_school_capita = per_school_budget / per_school_counts
# Calculate the average test scores
per_school_math = school_data_complete.groupby(["school_name"]).mean()["math_score"]
per_school_reading = school_data_complete.groupby(["school_name"]).mean()["reading_score"]
# Calculate the passing scores by creating a filtered data frame
school_passing_math = school_data_complete[(school_data_complete["math_score"] > 70)]
school_passing_reading = school_data_complete[(school_data_complete["reading_score"] > 70)]
per_school_passing_math = school_passing_math.groupby(["school_name"]).count()["student_name"] / per_school_counts * 100
per_school_passing_reading = school_passing_reading.groupby(["school_name"]).count()["student_name"] / per_school_counts * 100
overall_passing_rate = (per_school_passing_math + per_school_passing_reading) / 2
# Convert to data frame
per_school_summary = pd.DataFrame({"School Type": school_types,
"Total Students": per_school_counts,
"Total School Budget": per_school_budget,
"Per Student Budget": per_school_capita,
"Average Math Score": per_school_math,
"Average Reading Score": per_school_reading,
"% Passing Math": per_school_passing_math,
"% Passing Reading": per_school_passing_reading,
"% Overall Passing Rate": overall_passing_rate})
# Minor data wrangling
per_school_summary = per_school_summary[["School Type", "Total Students", "Total School Budget", "Per Student Budget",
"Average Math Score", "Average Reading Score",
"% Passing Math", "% Passing Reading",
"% Overall Passing Rate"]]
per_school_summary["Total School Budget"] = per_school_summary["Total Students"].map("${:,.2f}".format)
per_school_summary["Per Student Budget"] = per_school_summary["Per Student Budget"].map("${:,.2f}".format)
# Display the data frame
per_school_summary
###Output
_____no_output_____
###Markdown
Top Performing Schools (By Passing Rate)
###Code
# Sort and show top five schools
top_schools = per_school_summary.sort_values(["% Overall Passing Rate"], ascending=False)
top_schools.head(5)
###Output
_____no_output_____
###Markdown
Bottom Performing Schools (By Passing Rate)
###Code
# Sort and show bottom five schools
bottom_schools = per_school_summary.sort_values(["% Overall Passing Rate"], ascending=True)
bottom_schools.head(5)
###Output
_____no_output_____
###Markdown
Math Scores by Grade
###Code
# Create data series of scores by grade levels using conditionals
nineth_graders = school_data_complete[(school_data_complete["grade"] == "9th")]
tenth_graders = school_data_complete[(school_data_complete["grade"] == "10th")]
eleventh_graders = school_data_complete[(school_data_complete["grade"] == "11th")]
twelfth_graders = school_data_complete[(school_data_complete["grade"] == "12th")]
# Group each by school name
nineth_graders_scores = nineth_graders.groupby(["school_name"]).mean()["math_score"]
tenth_graders_scores = tenth_graders.groupby(["school_name"]).mean()["math_score"]
eleventh_graders_scores = eleventh_graders.groupby(["school_name"]).mean()["math_score"]
twelfth_graders_scores = twelfth_graders.groupby(["school_name"]).mean()["math_score"]
# Combine series into single data frame
scores_by_grade = pd.DataFrame({"9th": nineth_graders_scores, "10th": tenth_graders_scores,
"11th": eleventh_graders_scores, "12th": twelfth_graders_scores})
# Minor data wrangling
scores_by_grade = scores_by_grade[["9th", "10th", "11th", "12th"]]
scores_by_grade.index.name = None
# Display the data frame
scores_by_grade
###Output
_____no_output_____
###Markdown
Reading Score by Grade
###Code
# Create data series of scores by grade levels using conditionals
nineth_graders = school_data_complete[(school_data_complete["grade"] == "9th")]
tenth_graders = school_data_complete[(school_data_complete["grade"] == "10th")]
eleventh_graders = school_data_complete[(school_data_complete["grade"] == "11th")]
twelfth_graders = school_data_complete[(school_data_complete["grade"] == "12th")]
# Group each by school name
nineth_graders_scores = nineth_graders.groupby(["school_name"]).mean()["reading_score"]
tenth_graders_scores = tenth_graders.groupby(["school_name"]).mean()["reading_score"]
eleventh_graders_scores = eleventh_graders.groupby(["school_name"]).mean()["reading_score"]
twelfth_graders_scores = twelfth_graders.groupby(["school_name"]).mean()["reading_score"]
# Combine series into single data frame
scores_by_grade = pd.DataFrame({"9th": nineth_graders_scores, "10th": tenth_graders_scores,
"11th": eleventh_graders_scores, "12th": twelfth_graders_scores})
# Minor data wrangling
scores_by_grade = scores_by_grade[["9th", "10th", "11th", "12th"]]
scores_by_grade.index.name = None
# Display the data frame
scores_by_grade
###Output
_____no_output_____
###Markdown
Scores by School Spending
###Code
# Establish the bins
spending_bins = [0, 585, 615, 645, 675]
group_names = ["<$585", "$585-615", "$615-645", "$645-675"]
# Categorize the spending based on the bins
per_school_summary["Spending Ranges (Per Student)"] = pd.cut(per_school_capita, spending_bins, labels=group_names)
spending_math_scores = per_school_summary.groupby(["Spending Ranges (Per Student)"]).mean()["Average Math Score"]
spending_reading_scores = per_school_summary.groupby(["Spending Ranges (Per Student)"]).mean()["Average Reading Score"]
spending_passing_math = per_school_summary.groupby(["Spending Ranges (Per Student)"]).mean()["% Passing Math"]
spending_passing_reading = per_school_summary.groupby(["Spending Ranges (Per Student)"]).mean()["% Passing Reading"]
overall_passing_rate = (spending_math_scores + spending_reading_scores) / 2
# Assemble into data frame
spending_summary = pd.DataFrame({"Average Math Score" : spending_math_scores,
"Average Reading Score": spending_reading_scores,
"% Passing Math": spending_passing_math,
"% Passing Reading": spending_passing_reading,
"% Overall Passing Rate": overall_passing_rate})
# Minor data wrangling
spending_summary = spending_summary[["Average Math Score",
"Average Reading Score",
"% Passing Math", "% Passing Reading",
"% Overall Passing Rate"]]
# Display results
spending_summary
###Output
_____no_output_____
###Markdown
Scores by School Size
###Code
# Establish the bins
size_bins = [0, 1000, 2000, 5000]
group_names = ["Small (<1000)", "Medium (1000-2000)", "Large (2000-5000)"]
# Categorize the spending based on the bins
per_school_summary["School Size"] = pd.cut(per_school_summary["Total Students"], size_bins, labels=group_names)
# Calculate the scores based on bins
size_math_scores = per_school_summary.groupby(["School Size"]).mean()["Average Math Score"]
size_reading_scores = per_school_summary.groupby(["School Size"]).mean()["Average Reading Score"]
size_passing_math = per_school_summary.groupby(["School Size"]).mean()["% Passing Math"]
size_passing_reading = per_school_summary.groupby(["School Size"]).mean()["% Passing Reading"]
overall_passing_rate = (size_passing_math + size_passing_reading) / 2
# Assemble into data frame
size_summary = pd.DataFrame({"Average Math Score" : size_math_scores,
"Average Reading Score": size_reading_scores,
"% Passing Math": size_passing_math,
"% Passing Reading": size_passing_reading,
"% Overall Passing Rate": overall_passing_rate})
# Minor data wrangling
size_summary = size_summary[["Average Math Score",
"Average Reading Score",
"% Passing Math", "% Passing Reading",
"% Overall Passing Rate"]]
# Display results
size_summary
###Output
_____no_output_____
###Markdown
Scores by School Type
###Code
# Type | Average Math Score | Average Reading Score | % Passing Math | % Passing Reading | % Overall Passing Rate
type_math_scores = per_school_summary.groupby(["School Type"]).mean()["Average Math Score"]
type_reading_scores = per_school_summary.groupby(["School Type"]).mean()["Average Reading Score"]
type_passing_math = per_school_summary.groupby(["School Type"]).mean()["% Passing Math"]
type_passing_reading = per_school_summary.groupby(["School Type"]).mean()["% Passing Reading"]
overall_passing_rate = (type_passing_math + type_passing_reading) / 2
# Assemble into data frame
type_summary = pd.DataFrame({"Average Math Score" : type_math_scores,
"Average Reading Score": type_reading_scores,
"% Passing Math": type_passing_math,
"% Passing Reading": type_passing_reading,
"% Overall Passing Rate": overall_passing_rate})
# Minor data wrangling
type_summary = type_summary[["Average Math Score",
"Average Reading Score",
"% Passing Math",
"% Passing Reading",
"% Overall Passing Rate"]]
# Display results
type_summary
###Output
_____no_output_____ |
Course 3 - Natural Language Processing with Sequence Models/Week 4/C3_W4_Assignment.ipynb | ###Markdown
Assignment 4: Question duplicatesWelcome to the fourth assignment of course 3. In this assignment you will explore Siamese networks applied to natural language processing. You will further explore the fundamentals of Trax and you will be able to implement a more complicated structure using it. By completing this assignment, you will learn how to implement models with different architectures. Outline- [Overview](0)- [Part 1: Importing the Data](1) - [1.1 Loading in the data](1.1) - [1.2 Converting a question to a tensor](1.2) - [1.3 Understanding the iterator](1.3) - [Exercise 01](ex01)- [Part 2: Defining the Siamese model](2) - [2.1 Understanding Siamese Network](2.1) - [Exercise 02](ex02) - [2.2 Hard Negative Mining](2.2) - [Exercise 03](ex03)- [Part 3: Training](3) - [3.1 Training the model](3.1) - [Exercise 04](ex04)- [Part 4: Evaluation](4) - [4.1 Evaluating your siamese network](4.1) - [4.2 Classify](4.2) - [Exercise 05](ex05)- [Part 5: Testing with your own questions](5) - [Exercise 06](ex06)- [On Siamese networks](6) OverviewIn this assignment, concretely you will: - Learn about Siamese networks- Understand how the triplet loss works- Understand how to evaluate accuracy- Use cosine similarity between the model's outputted vectors- Use the data generator to get batches of questions- Predict using your own modelBy now, you are familiar with trax and know how to make use of classes to define your model. We will start this homework by asking you to preprocess the data the same way you did in the previous assignments. After processing the data you will build a classifier that will allow you to identify whether to questions are the same or not. You will process the data first and then pad in a similar way you have done in the previous assignment. Your model will take in the two question embeddings, run them through an LSTM, and then compare the outputs of the two sub networks using cosine similarity. Before taking a deep dive into the model, start by importing the data set. Part 1: Importing the Data 1.1 Loading in the dataYou will be using the Quora question answer dataset to build a model that could identify similar questions. This is a useful task because you don't want to have several versions of the same question posted. Several times when teaching I end up responding to similar questions on piazza, or on other community forums. This data set has been labeled for you. Run the cell below to import some of the packages you will be using.
###Code
import os
import nltk
import trax
from trax import layers as tl
from trax.supervised import training
from trax.fastmath import numpy as fastnp
import numpy as np
import pandas as pd
import random as rnd
# set random seeds
trax.supervised.trainer_lib.init_random_number_generators(34)
rnd.seed(34)
###Output
INFO:tensorflow:tokens_length=568 inputs_length=512 targets_length=114 noise_density=0.15 mean_noise_span_length=3.0
###Markdown
**Notice that for this assignment Trax's numpy is referred to as `fastnp`, while regular numpy is referred to as `np`.**You will now load in the data set. We have done some preprocessing for you. If you have taken the deeplearning specialization, this is a slightly different training method than the one you have seen there. If you have not, then don't worry about it, we will explain everything.
###Code
data = pd.read_csv("questions.csv")
N=len(data)
print('Number of question pairs: ', N)
data.head()
###Output
Number of question pairs: 404351
###Markdown
We first split the data into a train and test set. The test set will be used later to evaluate our model.
###Code
N_train = 300000
N_test = 10*1024
data_train = data[:N_train]
data_test = data[N_train:N_train+N_test]
print("Train set:", len(data_train), "Test set:", len(data_test))
del(data) # remove to free memory
###Output
Train set: 300000 Test set: 10240
###Markdown
As explained in the lectures, we select only the question pairs that are duplicate to train the model. We build two batches as input for the Siamese network and we assume that question $q1_i$ (question $i$ in the first batch) is a duplicate of $q2_i$ (question $i$ in the second batch), but all other questions in the second batch are not duplicates of $q1_i$. The test set uses the original pairs of questions and the status describing if the questions are duplicates.
###Code
td_index = (data_train['is_duplicate'] == 1).to_numpy()
td_index = [i for i, x in enumerate(td_index) if x]
print('number of duplicate questions: ', len(td_index))
print('indexes of first ten duplicate questions:', td_index[:10])
print(data_train['question1'][5]) # Example of question duplicates (first one in data)
print(data_train['question2'][5])
print('is_duplicate: ', data_train['is_duplicate'][5])
Q1_train_words = np.array(data_train['question1'][td_index])
Q2_train_words = np.array(data_train['question2'][td_index])
Q1_test_words = np.array(data_test['question1'])
Q2_test_words = np.array(data_test['question2'])
y_test = np.array(data_test['is_duplicate'])
###Output
_____no_output_____
###Markdown
Above, you have seen that you only took the duplicated questions for training our model. You did so on purpose, because the data generator will produce batches $([q1_1, q1_2, q1_3, ...]$, $[q2_1, q2_2,q2_3, ...])$ where $q1_i$ and $q2_k$ are duplicate if and only if $i = k$.Let's print to see what your data looks like.
###Code
print('TRAINING QUESTIONS:\n')
print('Question 1: ', Q1_train_words[0])
print('Question 2: ', Q2_train_words[0], '\n')
print('Question 1: ', Q1_train_words[5])
print('Question 2: ', Q2_train_words[5], '\n')
print('TESTING QUESTIONS:\n')
print('Question 1: ', Q1_test_words[0])
print('Question 2: ', Q2_test_words[0], '\n')
print('is_duplicate =', y_test[0], '\n')
###Output
TRAINING QUESTIONS:
Question 1: Astrology: I am a Capricorn Sun Cap moon and cap rising...what does that say about me?
Question 2: I'm a triple Capricorn (Sun, Moon and ascendant in Capricorn) What does this say about me?
Question 1: What would a Trump presidency mean for current international master’s students on an F1 visa?
Question 2: How will a Trump presidency affect the students presently in US or planning to study in US?
TESTING QUESTIONS:
Question 1: How do I prepare for interviews for cse?
Question 2: What is the best way to prepare for cse?
is_duplicate = 0
###Markdown
You will now encode each word of the selected duplicate pairs with an index. Given a question, you can then just encode it as a list of numbers. First you tokenize the questions using `nltk.word_tokenize`. You need a python default dictionary which later, during inference, assigns the values $0$ to all Out Of Vocabulary (OOV) words.Then you encode each word of the selected duplicate pairs with an index. Given a question, you can then just encode it as a list of numbers.
###Code
#create arrays
Q1_train = np.empty_like(Q1_train_words)
Q2_train = np.empty_like(Q2_train_words)
Q1_test = np.empty_like(Q1_test_words)
Q2_test = np.empty_like(Q2_test_words)
# Building the vocabulary with the train set (this might take a minute)
from collections import defaultdict
vocab = defaultdict(lambda: 0)
vocab['<PAD>'] = 1
for idx in range(len(Q1_train_words)):
Q1_train[idx] = nltk.word_tokenize(Q1_train_words[idx])
Q2_train[idx] = nltk.word_tokenize(Q2_train_words[idx])
q = Q1_train[idx] + Q2_train[idx]
for word in q:
if word not in vocab:
vocab[word] = len(vocab) + 1
print('The length of the vocabulary is: ', len(vocab))
print(vocab['<PAD>'])
print(vocab['Astrology'])
print(vocab['Astronomy']) #not in vocabulary, returns 0
for idx in range(len(Q1_test_words)):
Q1_test[idx] = nltk.word_tokenize(Q1_test_words[idx])
Q2_test[idx] = nltk.word_tokenize(Q2_test_words[idx])
print('Train set has reduced to: ', len(Q1_train) )
print('Test set length: ', len(Q1_test) )
###Output
Train set has reduced to: 111486
Test set length: 10240
###Markdown
1.2 Converting a question to a tensorYou will now convert every question to a tensor, or an array of numbers, using your vocabulary built above.
###Code
# Converting questions to array of integers
for i in range(len(Q1_train)):
Q1_train[i] = [vocab[word] for word in Q1_train[i]]
Q2_train[i] = [vocab[word] for word in Q2_train[i]]
for i in range(len(Q1_test)):
Q1_test[i] = [vocab[word] for word in Q1_test[i]]
Q2_test[i] = [vocab[word] for word in Q2_test[i]]
print('first question in the train set:\n')
print(Q1_train_words[0], '\n')
print('encoded version:')
print(Q1_train[0],'\n')
print('first question in the test set:\n')
print(Q1_test_words[0], '\n')
print('encoded version:')
print(Q1_test[0])
###Output
first question in the train set:
Astrology: I am a Capricorn Sun Cap moon and cap rising...what does that say about me?
encoded version:
[2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]
first question in the test set:
How do I prepare for interviews for cse?
encoded version:
[32, 38, 4, 107, 65, 1015, 65, 11509, 21]
###Markdown
You will now split your train set into a training/validation set so that you can use it to train and evaluate your Siamese model.
###Code
# Splitting the data
cut_off = int(len(Q1_train)*.8)
train_Q1, train_Q2 = Q1_train[:cut_off], Q2_train[:cut_off]
val_Q1, val_Q2 = Q1_train[cut_off: ], Q2_train[cut_off:]
print('Number of duplicate questions: ', len(Q1_train))
print("The length of the training set is: ", len(train_Q1))
print("The length of the validation set is: ", len(val_Q1))
###Output
Number of duplicate questions: 111486
The length of the training set is: 89188
The length of the validation set is: 22298
###Markdown
1.3 Understanding the iterator Most of the time in Natural Language Processing, and AI in general we use batches when training our data sets. If you were to use stochastic gradient descent with one example at a time, it will take you forever to build a model. In this example, we show you how you can build a data generator that takes in $Q1$ and $Q2$ and returns a batch of size `batch_size` in the following format $([q1_1, q1_2, q1_3, ...]$, $[q2_1, q2_2,q2_3, ...])$. The tuple consists of two arrays and each array has `batch_size` questions. Again, $q1_i$ and $q2_i$ are duplicates, but they are not duplicates with any other elements in the batch. The command ```next(data_generator)```returns the next batch. This iterator returns the data in a format that you could directly use in your model when computing the feed-forward of your algorithm. This iterator returns a pair of arrays of questions. Exercise 01**Instructions:** Implement the data generator below. Here are some things you will need. - While true loop.- if `index >= len_Q1`, set the `idx` to $0$.- The generator should return shuffled batches of data. To achieve this without modifying the actual question lists, a list containing the indexes of the questions is created. This list can be shuffled and used to get random batches everytime the index is reset.- Append elements of $Q1$ and $Q2$ to `input1` and `input2` respectively.- if `len(input1) == batch_size`, determine `max_len` as the longest question in `input1` and `input2`. Ceil `max_len` to a power of $2$ (for computation purposes) using the following command: `max_len = 2**int(np.ceil(np.log2(max_len)))`.- Pad every question by `vocab['']` until you get the length `max_len`.- Use yield to return `input1, input2`. - Don't forget to reset `input1, input2` to empty arrays at the end (data generator resumes from where it last left).
###Code
# UNQ_C1 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: data_generator
def data_generator(Q1, Q2, batch_size, pad=1, shuffle=True):
"""Generator function that yields batches of data
Args:
Q1 (list): List of transformed (to tensor) questions.
Q2 (list): List of transformed (to tensor) questions.
batch_size (int): Number of elements per batch.
pad (int, optional): Pad character from the vocab. Defaults to 1.
shuffle (bool, optional): If the batches should be randomnized or not. Defaults to True.
Yields:
tuple: Of the form (input1, input2) with types (numpy.ndarray, numpy.ndarray)
NOTE: input1: inputs to your model [q1a, q2a, q3a, ...] i.e. (q1a,q1b) are duplicates
input2: targets to your model [q1b, q2b,q3b, ...] i.e. (q1a,q2i) i!=a are not duplicates
"""
input1 = []
input2 = []
idx = 0
len_q = len(Q1)
question_indexes = [*range(len_q)]
if shuffle:
rnd.shuffle(question_indexes)
### START CODE HERE (Replace instances of 'None' with your code) ###
while True:
if idx >= len_q:
# if idx is greater than or equal to len_q, set idx accordingly
# (Hint: look at the instructions above)
idx = 0
# shuffle to get random batches if shuffle is set to True
if shuffle:
rnd.shuffle(question_indexes)
# get questions at the `question_indexes[idx]` position in Q1 and Q2
q1 = Q1[question_indexes[idx]]
q2 = Q1[question_indexes[idx]]
# increment idx by 1
idx += 1
# append q1
input1.append(q1)
# append q2
input2.append(q2)
if len(input1) == batch_size:
# determine max_len as the longest question in input1 & input 2
# Hint: use the `max` function.
# take max of input1 & input2 and then max out of the two of them.
max_len = max(max([len(ques) for ques in input1]), max([len(ques) for ques in input2]))
# pad to power-of-2 (Hint: look at the instructions above)
max_len = 2**int(np.ceil(np.log2(max_len)))
b1 = []
b2 = []
for q1, q2 in zip(input1, input2):
# add [pad] to q1 until it reaches max_len
q1 = q1 + [pad] * (max_len - len(q1))
# add [pad] to q2 until it reaches max_len
q2 = q2 + [pad] * (max_len - len(q2))
# append q1
b1.append(q1)
# append q2
b2.append(q2)
# use b1 and b2
yield np.array(b1), np.array(b2)
### END CODE HERE ###
# reset the batches
input1, input2 = [], [] # reset the batches
batch_size = 2
res1, res2 = next(data_generator(train_Q1, train_Q2, batch_size))
print("First questions : ",'\n', res1, '\n')
print("Second questions : ",'\n', res2)
###Output
First questions :
[[ 30 87 78 134 2132 1981 28 78 594 21 1 1 1 1
1 1]
[ 30 55 78 3541 1460 28 56 253 21 1 1 1 1 1
1 1]]
Second questions :
[[ 30 87 78 134 2132 1981 28 78 594 21 1 1 1 1
1 1]
[ 30 55 78 3541 1460 28 56 253 21 1 1 1 1 1
1 1]]
###Markdown
**Note**: The following expected output is valid only if you run the above test cell **_once_** (first time). The output will change on each execution.If you think your implementation is correct and it is not matching the output, make sure to restart the kernel and run all the cells from the top again. **Expected Output:**```CPPFirst questions : [[ 30 87 78 134 2132 1981 28 78 594 21 1 1 1 1 1 1] [ 30 55 78 3541 1460 28 56 253 21 1 1 1 1 1 1 1]] Second questions : [[ 30 156 78 134 2132 9508 21 1 1 1 1 1 1 1 1 1] [ 30 156 78 3541 1460 131 56 253 21 1 1 1 1 1 1 1]]```Now that you have your generator, you can just call it and it will return tensors which correspond to your questions in the Quora data set.Now you can go ahead and start building your neural network. Part 2: Defining the Siamese model 2.1 Understanding Siamese Network A Siamese network is a neural network which uses the same weights while working in tandem on two different input vectors to compute comparable output vectors.The Siamese network you are about to implement looks like this:You get the question embedding, run it through an LSTM layer, normalize $v_1$ and $v_2$, and finally use a triplet loss (explained below) to get the corresponding cosine similarity for each pair of questions. As usual, you will start by importing the data set. The triplet loss makes use of a baseline (anchor) input that is compared to a positive (truthy) input and a negative (falsy) input. The distance from the baseline (anchor) input to the positive (truthy) input is minimized, and the distance from the baseline (anchor) input to the negative (falsy) input is maximized. In math equations, you are trying to maximize the following.$$\mathcal{L}(A, P, N)=\max \left(\|\mathrm{f}(A)-\mathrm{f}(P)\|^{2}-\|\mathrm{f}(A)-\mathrm{f}(N)\|^{2}+\alpha, 0\right)$$$A$ is the anchor input, for example $q1_1$, $P$ the duplicate input, for example, $q2_1$, and $N$ the negative input (the non duplicate question), for example $q2_2$.$\alpha$ is a margin; you can think about it as a safety net, or by how much you want to push the duplicates from the non duplicates. Exercise 02**Instructions:** Implement the `Siamese` function below. You should be using all the objects explained below. To implement this model, you will be using `trax`. Concretely, you will be using the following functions.- `tl.Serial`: Combinator that applies layers serially (by function composition) allows you set up the overall structure of the feedforward. [docs](https://trax-ml.readthedocs.io/en/latest/trax.layers.htmltrax.layers.combinators.Serial) / [source code](https://github.com/google/trax/blob/1372b903bb66b0daccee19fd0b1fdf44f659330b/trax/layers/combinators.pyL26) - You can pass in the layers as arguments to `Serial`, separated by commas. - For example: `tl.Serial(tl.Embeddings(...), tl.Mean(...), tl.Dense(...), tl.LogSoftmax(...))` - `tl.Embedding`: Maps discrete tokens to vectors. It will have shape (vocabulary length X dimension of output vectors). The dimension of output vectors (also called d_feature) is the number of elements in the word embedding. [docs](https://trax-ml.readthedocs.io/en/latest/trax.layers.htmltrax.layers.core.Embedding) / [source code](https://github.com/google/trax/blob/1372b903bb66b0daccee19fd0b1fdf44f659330b/trax/layers/core.pyL113) - `tl.Embedding(vocab_size, d_feature)`. - `vocab_size` is the number of unique words in the given vocabulary. - `d_feature` is the number of elements in the word embedding (some choices for a word embedding size range from 150 to 300, for example).- `tl.LSTM` The LSTM layer. It leverages another Trax layer called [`LSTMCell`](https://trax-ml.readthedocs.io/en/latest/trax.layers.htmltrax.layers.rnn.LSTMCell). The number of units should be specified and should match the number of elements in the word embedding. [docs](https://trax-ml.readthedocs.io/en/latest/trax.layers.htmltrax.layers.rnn.LSTM) / [source code](https://github.com/google/trax/blob/1372b903bb66b0daccee19fd0b1fdf44f659330b/trax/layers/rnn.pyL87) - `tl.LSTM(n_units)` Builds an LSTM layer of n_units. - `tl.Mean`: Computes the mean across a desired axis. Mean uses one tensor axis to form groups of values and replaces each group with the mean value of that group. [docs](https://trax-ml.readthedocs.io/en/latest/trax.layers.htmltrax.layers.core.Mean) / [source code](https://github.com/google/trax/blob/1372b903bb66b0daccee19fd0b1fdf44f659330b/trax/layers/core.pyL276) - `tl.Mean(axis=1)` mean over columns.- `tl.Fn` Layer with no weights that applies the function f, which should be specified using a lambda syntax. [docs](https://trax-ml.readthedocs.io/en/latest/trax.layers.htmltrax.layers.base.Fn) / [source doce](https://github.com/google/trax/blob/70f5364dcaf6ec11aabbd918e5f5e4b0f5bfb995/trax/layers/base.pyL576) - $x$ -> This is used for cosine similarity. - `tl.Fn('Normalize', lambda x: normalize(x))` Returns a layer with no weights that applies the function `f` - `tl.parallel`: It is a combinator layer (like `Serial`) that applies a list of layers in parallel to its inputs. [docs](https://trax-ml.readthedocs.io/en/latest/trax.layers.htmltrax.layers.combinators.Parallel) / [source code](https://github.com/google/trax/blob/37aba571a89a8ad86be76a569d0ec4a46bdd8642/trax/layers/combinators.pyL152)
###Code
# UNQ_C2 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: Siamese
def Siamese(vocab_size=len(vocab), d_model=128, mode='train'):
"""Returns a Siamese model.
Args:
vocab_size (int, optional): Length of the vocabulary. Defaults to len(vocab).
d_model (int, optional): Depth of the model. Defaults to 128.
mode (str, optional): 'train', 'eval' or 'predict', predict mode is for fast inference. Defaults to 'train'.
Returns:
trax.layers.combinators.Parallel: A Siamese model.
"""
def normalize(x): # normalizes the vectors to have L2 norm 1
return x / fastnp.sqrt(fastnp.sum(x * x, axis=-1, keepdims=True))
### START CODE HERE (Replace instances of 'None' with your code) ###
q_processor = tl.Serial( # Processor will run on Q1 and Q2.
tl.Embedding(vocab_size=vocab_size, d_feature=d_model), # Embedding layer
tl.LSTM(n_units=d_model), # LSTM layer
tl.Mean(axis=1), # Mean over columns
tl.Fn('Normalize', lambda x: normalize(x)) # Apply normalize function
) # Returns one vector of shape [batch_size, d_model].
### END CODE HERE ###
# Run on Q1 and Q2 in parallel.
model = tl.Parallel(q_processor, q_processor)
return model
###Output
_____no_output_____
###Markdown
Setup the Siamese network model
###Code
# check your model
model = Siamese()
print(model)
###Output
Parallel_in2_out2[
Serial[
Embedding_41699_128
LSTM_128
Mean
Normalize
]
Serial[
Embedding_41699_128
LSTM_128
Mean
Normalize
]
]
###Markdown
**Expected output:** ```CPPParallel_in2_out2[ Serial[ Embedding_41699_128 LSTM_128 Mean Normalize ] Serial[ Embedding_41699_128 LSTM_128 Mean Normalize ]]``` 2.2 Hard Negative MiningYou will now implement the `TripletLoss`.As explained in the lecture, loss is composed of two terms. One term utilizes the mean of all the non duplicates, the second utilizes the *closest negative*. Our loss expression is then: \begin{align} \mathcal{Loss_1(A,P,N)} &=\max \left( -cos(A,P) + mean_{neg} +\alpha, 0\right) \\ \mathcal{Loss_2(A,P,N)} &=\max \left( -cos(A,P) + closest_{neg} +\alpha, 0\right) \\\mathcal{Loss(A,P,N)} &= mean(Loss_1 + Loss_2) \\\end{align}Further, two sets of instructions are provided. The first set provides a brief description of the task. If that set proves insufficient, a more detailed set can be displayed. Exercise 03**Instructions (Brief):** Here is a list of things you should do: - As this will be run inside trax, use `fastnp.xyz` when using any `xyz` numpy function- Use `fastnp.dot` to calculate the similarity matrix $v_1v_2^T$ of dimension `batch_size` x `batch_size`- Take the score of the duplicates on the diagonal `fastnp.diagonal`- Use the `trax` functions `fastnp.eye` and `fastnp.maximum` for the identity matrix and the maximum. More Detailed Instructions We'll describe the algorithm using a detailed example. Below, V1, V2 are the output of the normalization blocks in our model. Here we will use a batch_size of 4 and a d_model of 3. As explained in lecture, the inputs, Q1, Q2 are arranged so that corresponding inputs are duplicates while non-corresponding entries are not. The outputs will have the same pattern.This testcase arranges the outputs, v1,v2, to highlight different scenarios. Here, the first two outputs V1[0], V2[0] match exactly - so the model is generating the same vector for Q1[0] and Q2[0] inputs. The second outputs differ, circled in orange, we set, V2[1] is set to match V2[**2**], simulating a model which is generating very poor results. V1[3] and V2[3] match exactly again while V1[4] and V2[4] are set to be exactly wrong - 180 degrees from each other, circled in blue. The first step is to compute the cosine similarity matrix or `score` in the code. As explained in lecture, this is $$V_1 V_2^T$$ This is generated with `fastnp.dot`.The clever arrangement of inputs creates the data needed for positive *and* negative examples without having to run all pair-wise combinations. Because Q1[n] is a duplicate of only Q2[n], other combinations are explicitly created negative examples or *Hard Negative* examples. The matrix multiplication efficiently produces the cosine similarity of all positive/negative combinations as shown above on the left side of the diagram. 'Positive' are the results of duplicate examples and 'negative' are the results of explicitly created negative examples. The results for our test case are as expected, V1[0]V2[0] match producing '1' while our other 'positive' cases (in green) don't match well, as was arranged. The V2[2] was set to match V1[3] producing a poor match at `score[2,2]` and an undesired 'negative' case of a '1' shown in grey. With the similarity matrix (`score`) we can begin to implement the loss equations. First, we can extract $$cos(A,P)$$ by utilizing `fastnp.diagonal`. The goal is to grab all the green entries in the diagram above. This is `positive` in the code.Next, we will create the *closest_negative*. This is the nonduplicate entry in V2 that is closest (has largest cosine similarity) to an entry in V1. Each row, n, of `score` represents all comparisons of the results of Q1[n] vs Q2[x] within a batch. A specific example in our testcase is row `score[2,:]`. It has the cosine similarity of V1[2] and V2[x]. The *closest_negative*, as was arranged, is V2[2] which has a score of 1. This is the maximum value of the 'negative' entries (blue entries in the diagram).To implement this, we need to pick the maximum entry on a row of `score`, ignoring the 'positive'/green entries. To avoid selecting the 'positive'/green entries, we can make them larger negative numbers. Multiply `fastnp.eye(batch_size)` with 2.0 and subtract it out of `scores`. The result is `negative_without_positive`. Now we can use `fastnp.max`, row by row (axis=1), to select the maximum which is `closest_negative`.Next, we'll create *mean_negative*. As the name suggests, this is the mean of all the 'negative'/blue values in `score` on a row by row basis. We can use `fastnp.eye(batch_size)` and a constant, this time to create a mask with zeros on the diagonal. Element-wise multiply this with `score` to get just the 'negative values. This is `negative_zero_on_duplicate` in the code. Compute the mean by using `fastnp.sum` on `negative_zero_on_duplicate` for `axis=1` and divide it by `(batch_size - 1)` . This is `mean_negative`.Now, we can compute loss using the two equations above and `fastnp.maximum`. This will form `triplet_loss1` and `triplet_loss2`. `triple_loss` is the `fastnp.mean` of the sum of the two individual losses.Once you have this code matching the expected results, you can clip out the section between START CODE HERE and END CODE HERE it out and insert it into TripletLoss below.
###Code
# UNQ_C3 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: TripletLossFn
def TripletLossFn(v1, v2, margin=0.25):
"""Custom Loss function.
Args:
v1 (numpy.ndarray): Array with dimension (batch_size, model_dimension) associated to Q1.
v2 (numpy.ndarray): Array with dimension (batch_size, model_dimension) associated to Q2.
margin (float, optional): Desired margin. Defaults to 0.25.
Returns:
jax.interpreters.xla.DeviceArray: Triplet Loss.
"""
### START CODE HERE (Replace instances of 'None' with your code) ###
# use fastnp to take the dot product of the two batches (don't forget to transpose the second argument)
scores = fastnp.dot(v1, v2.T) # pairwise cosine sim
# calculate new batch size
batch_size = len(scores)
# use fastnp to grab all postive `diagonal` entries in `scores`
positive = fastnp.diagonal(scores) # the positive ones (duplicates)
# multiply `fastnp.eye(batch_size)` with 2.0 and subtract it out of `scores`
negative_without_positive = scores - 2.0 * fastnp.eye(batch_size)
# take the row by row `max` of `negative_without_positive`.
# Hint: negative_without_positive.max(axis = [?])
closest_negative = negative_without_positive.max(axis=1)
# subtract `fastnp.eye(batch_size)` out of 1.0 and do element-wise multiplication with `scores`
negative_zero_on_duplicate = scores * (1.0 - fastnp.eye(batch_size))
# use `fastnp.sum` on `negative_zero_on_duplicate` for `axis=1` and divide it by `(batch_size - 1)`
mean_negative = np.sum(negative_zero_on_duplicate, axis=1) / (batch_size - 1)
# compute `fastnp.maximum` among 0.0 and `A`
# A = subtract `positive` from `margin` and add `closest_negative`
triplet_loss1 = fastnp.maximum(0.0, margin - positive + closest_negative)
# compute `fastnp.maximum` among 0.0 and `B`
# B = subtract `positive` from `margin` and add `mean_negative`
triplet_loss2 = fastnp.maximum(0.0, margin - positive + mean_negative)
# add the two losses together and take the `fastnp.mean` of it
triplet_loss = fastnp.mean(fastnp.add(triplet_loss1, triplet_loss2))
### END CODE HERE ###
return triplet_loss
v1 = np.array([[0.26726124, 0.53452248, 0.80178373],[0.5178918 , 0.57543534, 0.63297887]])
v2 = np.array([[ 0.26726124, 0.53452248, 0.80178373],[-0.5178918 , -0.57543534, -0.63297887]])
TripletLossFn(v2,v1)
print("Triplet Loss:", TripletLossFn(v2,v1))
###Output
Triplet Loss: 0.5
###Markdown
**Expected Output:**```CPPTriplet Loss: 0.5``` To make a layer out of a function with no trainable variables, use `tl.Fn`.
###Code
from functools import partial
def TripletLoss(margin=0.25):
triplet_loss_fn = partial(TripletLossFn, margin=margin)
return tl.Fn('TripletLoss', triplet_loss_fn)
###Output
_____no_output_____
###Markdown
Part 3: TrainingNow you are going to train your model. As usual, you have to define the cost function and the optimizer. You also have to feed in the built model. Before, going into the training, we will use a special data set up. We will define the inputs using the data generator we built above. The lambda function acts as a seed to remember the last batch that was given. Run the cell below to get the question pairs inputs.
###Code
batch_size = 256
train_generator = data_generator(train_Q1, train_Q2, batch_size, vocab['<PAD>'])
val_generator = data_generator(val_Q1, val_Q2, batch_size, vocab['<PAD>'])
print('train_Q1.shape ', train_Q1.shape)
print('val_Q1.shape ', val_Q1.shape)
###Output
train_Q1.shape (89188,)
val_Q1.shape (22298,)
###Markdown
3.1 Training the modelYou will now write a function that takes in your model and trains it. To train your model you have to decide how many times you want to iterate over the entire data set; each iteration is defined as an `epoch`. For each epoch, you have to go over all the data, using your training iterator. Exercise 04**Instructions:** Implement the `train_model` below to train the neural network above. Here is a list of things you should do, as already shown in lecture 7: - Create `TrainTask` and `EvalTask`- Create the training loop `trax.supervised.training.Loop`- Pass in the following depending on the context (train_task or eval_task): - `labeled_data=generator` - `metrics=[TripletLoss()]`, - `loss_layer=TripletLoss()` - `optimizer=trax.optimizers.Adam` with learning rate of 0.01 - `lr_schedule=lr_schedule`, - `output_dir=output_dir`You will be using your triplet loss function with Adam optimizer. Please read the [trax](https://trax-ml.readthedocs.io/en/latest/trax.optimizers.html?highlight=adamtrax.optimizers.adam.Adam) documentation to get a full understanding. This function should return a `training.Loop` object. To read more about this check the [docs](https://trax-ml.readthedocs.io/en/latest/trax.supervised.html?highlight=looptrax.supervised.training.Loop).
###Code
lr_schedule = trax.lr.warmup_and_rsqrt_decay(400, 0.01)
# UNQ_C4 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: train_model
def train_model(Siamese, TripletLoss, lr_schedule, train_generator=train_generator,
val_generator=val_generator, output_dir='model/'):
"""Training the Siamese Model
Args:
Siamese (function): Function that returns the Siamese model.
TripletLoss (function): Function that defines the TripletLoss loss function.
lr_schedule (function): Trax multifactor schedule function.
train_generator (generator, optional): Training generator. Defaults to train_generator.
val_generator (generator, optional): Validation generator. Defaults to val_generator.
output_dir (str, optional): Path to save model to. Defaults to 'model/'.
Returns:
trax.supervised.training.Loop: Training loop for the model.
"""
output_dir = os.path.expanduser(output_dir)
### START CODE HERE (Replace instances of 'None' with your code) ###
train_task = training.TrainTask(
labeled_data=train_generator, # Use generator (train)
loss_layer=TripletLoss(), # Use triplet loss. Don't forget to instantiate this object
optimizer=trax.optimizers.Adam(0.01), # Don't forget to add the learning rate parameter
lr_schedule=lr_schedule, # Use Trax multifactor schedule function
)
eval_task = training.EvalTask(
labeled_data=val_generator, # Use generator (val)
metrics=[TripletLoss()], # Use triplet loss. Don't forget to instantiate this object
)
### END CODE HERE ###
training_loop = training.Loop(Siamese(),
train_task,
eval_task=eval_task,
output_dir=output_dir)
return training_loop
train_steps = 5
training_loop = train_model(Siamese, TripletLoss, lr_schedule)
training_loop.run(train_steps)
###Output
Step 1: train TripletLoss | 0.49722433
Step 1: eval TripletLoss | 0.49733442
###Markdown
The model was only trained for 5 steps due to the constraints of this environment. For the rest of the assignment you will be using a pretrained model but now you should understand how the training can be done using Trax. Part 4: Evaluation 4.1 Evaluating your siamese networkIn this section you will learn how to evaluate a Siamese network. You will first start by loading a pretrained model and then you will use it to predict.
###Code
# Loading in the saved model
model = Siamese()
model.init_from_file('model.pkl.gz')
###Output
_____no_output_____
###Markdown
4.2 ClassifyTo determine the accuracy of the model, we will utilize the test set that was configured earlier. While in training we used only positive examples, the test data, Q1_test, Q2_test and y_test, is setup as pairs of questions, some of which are duplicates some are not. This routine will run all the test question pairs through the model, compute the cosine simlarity of each pair, threshold it and compare the result to y_test - the correct response from the data set. The results are accumulated to produce an accuracy. Exercise 05**Instructions** - Loop through the incoming data in batch_size chunks - Use the data generator to load q1, q2 a batch at a time. **Don't forget to set shuffle=False!** - copy a batch_size chunk of y into y_test - compute v1, v2 using the model - for each element of the batch - compute the cos similarity of each pair of entries, v1[j],v2[j] - determine if d > threshold - increment accuracy if that result matches the expected results (y_test[j]) - compute the final accuracy and return Due to some limitations of this environment, running classify multiple times may result in the kernel failing. If that happens *Restart Kernal & clear output* and then run from the top. During development, consider using a smaller set of data to reduce the number of calls to model().
###Code
# UNQ_C5 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: classify
def classify(test_Q1, test_Q2, y, threshold, model, vocab, data_generator=data_generator, batch_size=64):
"""Function to test the accuracy of the model.
Args:
test_Q1 (numpy.ndarray): Array of Q1 questions.
test_Q2 (numpy.ndarray): Array of Q2 questions.
y (numpy.ndarray): Array of actual target.
threshold (float): Desired threshold.
model (trax.layers.combinators.Parallel): The Siamese model.
vocab (collections.defaultdict): The vocabulary used.
data_generator (function): Data generator function. Defaults to data_generator.
batch_size (int, optional): Size of the batches. Defaults to 64.
Returns:
float: Accuracy of the model.
"""
accuracy = 0
### START CODE HERE (Replace instances of 'None' with your code) ###
for i in range(0, len(test_Q1), batch_size):
# Call the data generator (built in Ex 01) with shuffle=False using next()
# use batch size chuncks of questions as Q1 & Q2 arguments of the data generator. e.g x[i:i + batch_size]
# Hint: use `vocab['<PAD>']` for the `pad` argument of the data generator
q1, q2 = next(data_generator(test_Q1[i:i + batch_size], test_Q2[i:i + batch_size], batch_size, vocab['<PAD>'],
shuffle=False))
# use batch size chuncks of actual output targets (same syntax as example above)
y_test = y[i:i + batch_size]
# Call the model
v1, v2 =model((q1, q2))
for j in range(batch_size):
# take dot product to compute cos similarity of each pair of entries, v1[j], v2[j]
# don't forget to transpose the second argument
d = np.dot(v1[j], v2[j].T)
# is d greater than the threshold?
res = d > threshold
# increment accurancy if y_test is equal `res`
accuracy += (y_test[j] == res)
# compute accuracy using accuracy and total length of test questions
accuracy = accuracy / len(test_Q1)
### END CODE HERE ###
return accuracy
# this takes around 1 minute
accuracy = classify(Q1_test,Q2_test, y_test, 0.7, model, vocab, batch_size = 512)
print("Accuracy", accuracy)
###Output
Accuracy 0.3767578125
###Markdown
**Expected Result** Accuracy ~0.69 Part 5: Testing with your own questionsIn this section you will test the model with your own questions. You will write a function `predict` which takes two questions as input and returns $1$ or $0$ depending on whether the question pair is a duplicate or not. But first, we build a reverse vocabulary that allows to map encoded questions back to words: Write a function `predict`that takes in two questions, the model, and the vocabulary and returns whether the questions are duplicates ($1$) or not duplicates ($0$) given a similarity threshold. Exercise 06**Instructions:** - Tokenize your question using `nltk.word_tokenize` - Create Q1,Q2 by encoding your questions as a list of numbers using vocab- pad Q1,Q2 with next(data_generator([Q1], [Q2],1,vocab['']))- use model() to create v1, v2- compute the cosine similarity (dot product) of v1, v2- compute res by comparing d to the threshold
###Code
# UNQ_C6 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: predict
def predict(question1, question2, threshold, model, vocab, data_generator=data_generator, verbose=False):
"""Function for predicting if two questions are duplicates.
Args:
question1 (str): First question.
question2 (str): Second question.
threshold (float): Desired threshold.
model (trax.layers.combinators.Parallel): The Siamese model.
vocab (collections.defaultdict): The vocabulary used.
data_generator (function): Data generator function. Defaults to data_generator.
verbose (bool, optional): If the results should be printed out. Defaults to False.
Returns:
bool: True if the questions are duplicates, False otherwise.
"""
### START CODE HERE (Replace instances of 'None' with your code) ###
# use `nltk` word tokenize function to tokenize
q1 = nltk.word_tokenize(question1) # tokenize
q2 = nltk.word_tokenize(question2) # tokenize
Q1, Q2 = [], []
for word in q1: # encode q1
# increment by checking the 'word' index in `vocab`
Q1 += [vocab[word]]
for word in q2: # encode q2
# increment by checking the 'word' index in `vocab`
Q2 += [vocab[word]]
# Call the data generator (built in Ex 01) using next()
# pass [Q1] & [Q2] as Q1 & Q2 arguments of the data generator. Set batch size as 1
# Hint: use `vocab['<PAD>']` for the `pad` argument of the data generator
Q1, Q2 = next(data_generator([Q1], [Q2], 1, vocab['<PAD>']))
# Call the model
v1, v2 = model((Q1, Q2))
# take dot product to compute cos similarity of each pair of entries, v1, v2
# don't forget to transpose the second argument
d = np.dot(v1[0], v2[0].T)
# is d greater than the threshold?
res = d > threshold
### END CODE HERE ###
if(verbose):
print("Q1 = ", Q1, "\nQ2 = ", Q2)
print("d = ", d)
print("res = ", res)
return res
# Feel free to try with your own questions
question1 = "When will I see you?"
question2 = "When can I see you again?"
# 1 means it is duplicated, 0 otherwise
predict(question1 , question2, 0.7, model, vocab, verbose = True)
###Output
Q1 = [[585 76 4 46 53 21 1 1]]
Q2 = [[585 76 4 46 53 21 1 1]]
d = 1.0
res = True
###Markdown
Expected OutputIf input is:```CPPquestion1 = "When will I see you?"question2 = "When can I see you again?"```Output is (d may vary a bit):```CPPQ1 = [[585 76 4 46 53 21 1 1]] Q2 = [[ 585 33 4 46 53 7280 21 1]]d = 0.88113236res = TrueTrue```
###Code
# Feel free to try with your own questions
question1 = "Do they enjoy eating the dessert?"
question2 = "Do they like hiking in the desert?"
# 1 means it is duplicated, 0 otherwise
predict(question1 , question2, 0.7, model, vocab, verbose=True)
###Output
Q1 = [[ 443 1145 3159 1169 78 29017 21 1]]
Q2 = [[ 443 1145 3159 1169 78 29017 21 1]]
d = 1.0000002
res = True
|
examples/kkr_plugins_test.ipynb | ###Markdown
AiiDA-KKR demo Here is a Demo to run the Voronoi code with a follow up KKR calculation with AiiDA with pure python code Some Comments:In oder to run the KKR and Voronoi codes you have to set them up as codes in AiiDA.You might source a bash rc in the pretext execution of the code for Licenzing issues.Also you should symbol link the ElementPotential DataBase for the Voronoi code If you want to test, use submit test, which will save all files created before a calculation would be run in a local test_submit folder
###Code
%load_ext autoreload
%autoreload 2
%matplotlib notebook
import time
import os
from aiida import load_dbenv, is_dbenv_loaded
if not is_dbenv_loaded():
load_dbenv()
from aiida.orm import Code, load_node
from aiida.orm import DataFactory, CalculationFactory
from aiida_kkr.tools.kkrcontrol import write_kkr_inputcard_template, fill_keywords_to_inputcard, create_keyword_default_values
from pprint import pprint
from scipy import array
from aiida_kkr.calculations.kkr import KkrCalculation
from aiida_kkr.calculations.voro import VoronoiCalculation
from aiida_kkr.parsers.voro import VoronoiParser
from aiida_kkr.parsers.kkr import KkrParser
ParameterData = DataFactory('parameter')
StructureData = DataFactory('structure')
# Prepare and AiiDAStructure Data as input, example Cu
alat = 6.830000 # in a_Bohr
abohr = 0.52917721067
# number of atom positions in unit cell
natyp = 1
# bravais vectors
bravais = array([[0.5, 0.5, 0.0], [0.5, 0.0, 0.5], [0.0, 0.5, 0.5]])
a = 0.5*alat*abohr
Cu = StructureData(cell=[[a, a, 0.0], [a, 0.0, a], [0.0, a, a]])
Cu.append_atom(position=[0.0, 0.0, 0.0], symbols='Cu')
#Cu.store()
Cu = load_node(79546)
print(Cu)
# Now gernerate a ParameterData node with keyvalues needed by voronoi and KKR
# we use a helper function for some defaults and set some values
keywords = create_keyword_default_values()
keywords['NATYP'][0] = natyp
keywords['ALATBASIS'][0] = alat
keywords['NSPIN'][0] = 1
keywords['LMAX'][0] = 2
# choose only coarse energy contour and k-mesh for test purposes
keywords['NPOL'][0] = 4
keywords['NPT1'][0] = 3
keywords['NPT2'][0] = 10
keywords['NPT3'][0] = 3
keywords['BZKX'][0] = 10
keywords['BZKY'][0] = 10
keywords['RCLUSTZ'][0] = 1.50
keywords['RCLUSTXY'][0] = 1.50
# for ASA
keywords['INS'] = [0, '%i']
keywords['KSHAPE'] = [0, '%i']
pprint(keywords)
# Store the node
keyw = ParameterData(dict=keywords)
#keyw.store()
keyw = load_node(79550)
print keyw
# Running a single Vornoi calculation
code = Code.get_from_string('voro@local_mac')
calc = VoronoiCalculation()
calc.label = 'Test voronoi'
calc.set_withmpi(False)
calc.set_resources({"num_machines" : 1})
calc.set_max_wallclock_seconds(300)
calc.set_computer('local_mac')
calc.use_code(code)
calc.use_structure(Cu)
calc.use_parameters(keyw)
submit_test = False
if submit_test:
subfolder, script_filename = calc.submit_test()
print "Test_submit for calculation (uuid='{}')".format(
calc.uuid)
print "Submit file in {}".format(os.path.join(
os.path.relpath(subfolder.abspath),
script_filename
))
else:
calc.store_all()
print "created calculation; calc=Calculation(uuid='{}') # ID={}".format(
calc.uuid, calc.dbnode.pk)
calc.submit()
print "submitted calculation; calc=Calculation(uuid='{}') # ID={}".format(
calc.uuid, calc.dbnode.pk)
!cat submit_test/20171110-00007/_aiidasubmit.sh
!ls submit_test/20171110-00001/
! cat submit_test/20171110-00007/inputcard
# Ontop the voronoi calculation we want to run a KKR calculation
# for this we have to get some things from the voronoi calculation
# use the calculation run before or load a voronoi calculation
calc2 = load_node(79565)
# We create a new parameter node in which we store the emin extracted form the voronoi calculation
#emin = calc1.get_outputs_dict()['output_parameters'].get_dict()['EMIN']
emin = calc2.res.EMIN
remote = calc2.get_outputs_dict()['remote_folder']
keywords2 = keywords
keywords2['EMIN'][0] = emin
keyw2 = ParameterData(dict=keywords2)
#keyw2.store()
keyw2 = load_node(79570)
print keyw2
# Now we create and run the kkr Calculation
code = Code.get_from_string('kkr1@local_mac')#'kkrimp@local_mac')
calc1 = KkrCalculation()
calc1.label = 'Test kkr'
calc1.set_withmpi(False)
calc1.set_resources({"num_machines" : 1})
calc1.set_max_wallclock_seconds(300)
calc1.set_computer('local_mac')
calc1.use_code(code)
#calc1.use_structure(Cu)
calc1.use_parameters(keyw2)
calc1.use_parent_folder(remote)
submit_test = False
if submit_test:
subfolder, script_filename = calc1.submit_test()
print "Test_submit for calculation (uuid='{}')".format(
calc1.uuid)
print "Submit file in {}".format(os.path.join(
os.path.relpath(subfolder.abspath),
script_filename
))
else:
calc1.store_all()
print "created calculation; calc=Calculation(uuid='{}') # ID={}".format(
calc1.uuid, calc.dbnode.pk)
calc1.submit()
print "submitted calculation; calc=Calculation(uuid='{}') # ID={}".format(
calc.uuid, calc.dbnode.pk)
! cat submit_test/20171110-00020///_aiidasubmit.sh
! ls submit_test/20171110-00020//
! cat submit_test/20171110-00020/inputcard
# Check with the verdi shell if everything with you calculations went right
# Voronoi parser test
n = load_node(79559)
retrieved_dict = {'retrieved' : n}
voro_parser = VoronoiParser(calc)
suc, nodes = voro_parser.parse_with_retrieved(retrieved_dict)
print suc
print nodes
print nodes[0][1].get_dict()['EMIN']
###Output
_____no_output_____ |
7-pandas-connections.ipynb | ###Markdown
pandasでデータ連結など 内容- データの読み込み- データ連結(列方向)- 欠損値- データ連結(行方向)
###Code
import numpy as np
import pandas as pd
###Output
_____no_output_____
###Markdown
5章で保存したデータを読み出し
###Code
df = pd.read_pickle("data/df_201704health.pickle")
df
df_moved = pd.read_pickle("data/df_201704moved.pickle")
df_moved
###Output
_____no_output_____
###Markdown
データの連結(列方向)
###Code
df_marged = pd.concat([df, df_moved], axis=1)
df_marged
df_201705 = pd.read_csv("data/201705health.csv", encoding="utf-8", index_col='日付', parse_dates=True)
###Output
_____no_output_____
###Markdown
新たに5月分のデータを読み込み
###Code
df_201705
###Output
_____no_output_____
###Markdown
欠損値処理
###Code
df_201705.dropna()
df_201705.fillna(0)
df_201705_fill = df_201705.fillna(method='ffill')
df_201705_fill
###Output
_____no_output_____
###Markdown
データの連結(行方向)
###Code
pd.concat([df_marged, df_201705_fill], axis=0)
###Output
_____no_output_____ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.