path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
2019/06-Tecnicas_para_Melhoria_de_Resultados/KFold_Cross_Validation.ipynb | ###Markdown
K-Fold Cross Validation
###Code
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.pipeline import Pipeline
df = pd.read_csv('breast_cancer_wisconsin_processed_ok.csv')
df.head()
data = df.copy().values
r = np.random.RandomState(42)
r.shuffle(data)
df.head()
###Output
_____no_output_____
###Markdown
K-Fold Manual
###Code
# definição de índices de cada fold
k = 3
fold_size = data.shape[0]//k
curr = 0
all_idx = np.arange(0, data.shape[0])
p = [None] * k
idx_train = [None] * k
idx_val = [None] * k
for i in range(k):
prev = curr
curr += fold_size
if (curr < data.shape[0]):
curr += 1
p[i] = np.arange(prev, curr)
idx_val[i] = p[i]
diff = np.setdiff1d(all_idx, p[i])
idx_train[i] = diff
n = idx_train[i].shape[0] + idx_val[i].shape[0]
print(idx_train[i].shape, idx_val[i].shape, n)
X_train = [None] * k
y_train = [None] * k
X_val = [None] * k
y_val = [None] * k
%%time
model = [None] * k
y_pred = [None] * k
acc = [None] * k
for i in range(k):
print(idx_train[i].shape, idx_val[i].shape)
X_train[i] = data[idx_train[i], :-1]
y_train[i] = data[idx_train[i], -1]
X_val[i] = data[idx_val[i], :-1]
y_val[i] = data[idx_val[i], -1]
s = StandardScaler()
X_train[i] = s.fit_transform(X_train[i])
X_val[i] = s.transform(X_val[i])
model[i] = LogisticRegression(solver='lbfgs', max_iter=10000, random_state=42)
model[i].fit(X_train[i], y_train[i])
y_pred[i] = model[i].predict(X_val[i])
acc[i] = accuracy_score(y_val[i], y_pred[i])
acc
np.mean(acc)
###Output
_____no_output_____
###Markdown
Usando KFold + cross_val_score do Scikit Sem Standardização
###Code
model2 = LogisticRegression(solver='lbfgs', max_iter=10000, random_state=42)
data2 = df.values
X = data2[:, :-1]
y = data2[:, -1]
cv = KFold(n_splits=3, shuffle=True, random_state=42)
for idx_train2, idx_val2 in cv.split(X, y):
print(idx_train2.shape, idx_val2.shape)
%%time
acc2 = cross_val_score(model2, X, y, cv=cv, scoring='accuracy')
print(acc2)
np.mean(acc2)
###Output
_____no_output_____
###Markdown
Com standadização: usando Pipeline com StandardScaler
###Code
s = StandardScaler()
pipeline = Pipeline([('transformer', s), ('estimator', model2)])
acc3 = cross_val_score(pipeline, X, y, cv=cv, scoring='accuracy')
acc3
np.mean(acc3)
###Output
_____no_output_____ |
part2_day3_simple_model.ipynb | ###Markdown
Data loading
###Code
cd "/content/drive/My Drive/Colab Notebooks/dw_matrix/matrix_two/dw_matrix_car"
df = pd.read_hdf('data/car.h5')
df.shape
df.columns
###Output
_____no_output_____
###Markdown
Dummy Model
###Code
df.select_dtypes(np.number).columns
feats = ['car_id']
x = df[feats].values
y = df['price_value'].values
model = DummyRegressor()
model.fit(x,y)
y_pred = model.predict(x)
mae(y, y_pred)
[x for x in df.columns if 'price' in x]
df['price_currency'].value_counts()
df = df[ df['price_currency'] != 'EUR']
df.shape
ls
###Output
[0m[01;34mdata[0m/ LICENSE part2_day2_visualisation.ipynb README.md
###Markdown
Features
###Code
SUFFIX_CAT = '__cat'
for feat in df.columns:
if isinstance (df[feat][0], list): continue
factorized_values = df[feat].factorize()[0]
if SUFFIX_CAT in feat:
df[feat] = factorized_values
else:
df[feat + SUFFIX_CAT] = factorized_values
cat_feats = [x for x in df.columns if SUFFIX_CAT in x]
cat_feats = [x for x in cat_feats if 'price' not in x]
len(cat_feats)
x = df[cat_feats].values
y = df['price_value'].values
model = DecisionTreeRegressor(max_depth=5)
scores = cross_val_score(model, x, y, cv=3, scoring='neg_mean_absolute_error')
np.mean(scores)
m = DecisionTreeRegressor(max_depth=5)
m.fit(x,y)
imp = PermutationImportance(m, random_state=0).fit(x,y)
eli5.show_weights(m, feature_names=cat_feats)
###Output
_____no_output_____ |
deprecated/dice_compas_dataset_test.ipynb | ###Markdown
DiCE
###Code
import dice_ml
d = dice_ml.Data(dataframe=scaled_df, continuous_features=numerical_cols, outcome_name=target_name)
# m = dice_ml.Model(model=models['dt'], backend="sklearn")
# exp = dice_ml.Dice(d,m)
class RecordWrapper():
def __init__(self, model, cat_to_ohe_cat, ohe_feature_names):
self.all_inputs = []
self.model = model
self.cat_to_ohe_cat = cat_to_ohe_cat
self.ohe_feature_names = ohe_feature_names
def dice_to_input(self, input_df):
x = input_df.copy(deep=True)
for k in cat_to_ohe_cat.keys():
for ohe_col in cat_to_ohe_cat[k]:
x[ohe_col] = x[k].apply(lambda v: 1 if v in ohe_col else 0)
x.drop([k], axis=1, inplace=True)
return np.array(x[ohe_feature_names])
def predict_proba(self, x):
self.all_inputs.append(x)
cf_input = self.dice_to_input(x)
return self.model.predict_proba(cf_input)
def predict(self, x):
self.all_inputs.append(x)
cf_input = self.dice_to_input(x)
return self.model.predict(cf_input)
class NNRecordWrapper():
def __init__(self, model, cat_to_ohe_cat, ohe_feature_names):
self.all_inputs = []
self.model = model
self.cat_to_ohe_cat = cat_to_ohe_cat
self.ohe_feature_names = ohe_feature_names
def dice_to_input(self, input_df):
x = input_df.copy(deep=True)
for k in cat_to_ohe_cat.keys():
for ohe_col in cat_to_ohe_cat[k]:
x[ohe_col] = x[k].apply(lambda v: 1 if v in ohe_col else 0)
x.drop([k], axis=1, inplace=True)
return np.array(x[ohe_feature_names])
def predict(self, x):
self.all_inputs.append(x)
cf_input = self.dice_to_input(x)
return self.model.predict(tf.constant(cf_input.astype(float)))
def predict_proba(self, x):
self.all_inputs.append(x)
cf_input = self.dice_to_input(x)
return self.model.predict(tf.constant(cf_input.astype(float)))
all_wrapped_models = {
'dt': RecordWrapper(models['dt'], cat_to_ohe_cat, ohe_feature_names),
'rfc': RecordWrapper(models['rfc'], cat_to_ohe_cat, ohe_feature_names),
'nn': NNRecordWrapper(models['nn'], cat_to_ohe_cat, ohe_feature_names),
}
# dt_record = RecordWrapper(models['dt'], cat_to_ohe_cat, ohe_feature_names)
# m = dice_ml.Model(model=dt_record, backend="sklearn")
# exp = dice_ml.Dice(d,m)
# dice_exp = exp.generate_counterfactuals(scaled_df.iloc[1:2], total_CFs=5, desired_class="opposite")
# dice_exp.cf_examples_list[0].final_cfs_df.iloc[0][:-1]
dice_cfs = {
'dt': dice_ml.Dice(d,dice_ml.Model(model=all_wrapped_models['dt'], backend="sklearn")),
'rfc': dice_ml.Dice(d,dice_ml.Model(model=all_wrapped_models['rfc'], backend="sklearn")),
'nn': dice_ml.Dice(d,dice_ml.Model(model=all_wrapped_models['nn'], backend="sklearn"))
}
num_instances = 5
num_cf_per_instance = 1
results = {}
for k in dice_cfs.keys():
results[k] = []
print(f"Finding counterfactual for {k}")
for idx, instance in enumerate(scaled_df.iloc[test_df[0:num_instances].index].iloc):
print(f"instance {idx}")
for num_cf in range(num_cf_per_instance):
print(f"CF {num_cf}")
start_t = time()
input_query = pd.DataFrame([instance.to_dict()])
ground_truth = input_query[target_name][0]
exp = dice_cfs[k].generate_counterfactuals(input_query, total_CFs=1, sample_size=200, desired_class="opposite")
# dice_exp = dice_cfs['nn'].generate_counterfactuals(scaled_df.iloc[1:2], total_CFs=1, desired_class="opposite")
# dice_exp.cf_examples_list[0].final_cfs_df.iloc[0][:-1]
if k=='nn':
prediction = target_label_encoder.inverse_transform((all_wrapped_models[k].predict(input_query)[0]> 0.5).astype(int))[0]
else:
prediction = target_label_encoder.inverse_transform(all_wrapped_models[k].predict(input_query))[0]
end_t = time ()
running_time = end_t - start_t
results[k].append({
"input": input_query,
"cf": exp.cf_examples_list[0].final_cfs_df,
"running_time": running_time,
"ground_truth": ground_truth,
"prediction": prediction,
})
# scaled_input_df = results['dt'][0]['input'].copy(deep=True)
# origin_columns = [f"origin_input_{col}" for col in scaled_input_df.columns]
# origin_input_df = scaled_input_df.copy(deep=True)
# scaled_input_df.columns = [f"scaled_input_{col}" for col in scaled_input_df.columns]
# origin_input_df[numerical_cols] = scaler.inverse_transform(origin_input_df[numerical_cols])
# origin_input_df.columns = origin_columns
# scaled_cf_df = results['dt'][0]['cf'].copy(deep=True)
# scaled_cf_df.loc[0, target_name] = target_label_encoder.inverse_transform([scaled_cf_df.loc[0, target_name]])[0]
# origin_cf_columns = [f"origin_cf_{col}" for col in scaled_cf_df.columns]
# origin_cf_df = scaled_cf_df.copy(deep=True)
# scaled_cf_df.columns = [f"scaled_cf_{col}" for col in scaled_cf_df.columns]
# origin_cf_df[numerical_cols] = scaler.inverse_transform(origin_cf_df[numerical_cols])
# origin_cf_df.columns = origin_cf_columns
# final_df = pd.DataFrame([{}])
# final_df = final_df.join([scaled_input_df, origin_input_df, scaled_cf_df, origin_cf_df])
# final_df['running_time'] = results['dt'][0]['running_time']
# final_df['Found'] = "Y" if not results['dt'][0]['cf'] is None else "N"
all_df = {}
for k in results.keys():
all_data = []
for i in range(len(results[k])):
final_df = pd.DataFrame([{}])
scaled_input_df = results[k][i]['input'].copy(deep=True)
origin_columns = [f"origin_input_{col}" for col in scaled_input_df.columns]
origin_input_df = scaled_input_df.copy(deep=True)
scaled_input_df.columns = [f"scaled_input_{col}" for col in scaled_input_df.columns]
origin_input_df[numerical_cols] = scaler.inverse_transform(origin_input_df[numerical_cols])
origin_input_df.columns = origin_columns
final_df = final_df.join([scaled_input_df, origin_input_df])
if not results[k][i]['cf'] is None:
scaled_cf_df = results[k][i]['cf'].copy(deep=True)
scaled_cf_df.loc[0, target_name] = target_label_encoder.inverse_transform([scaled_cf_df.loc[0, target_name]])[0]
origin_cf_columns = [f"origin_cf_{col}" for col in scaled_cf_df.columns]
origin_cf_df = scaled_cf_df.copy(deep=True)
scaled_cf_df.columns = [f"scaled_cf_{col}" for col in scaled_cf_df.columns]
origin_cf_df[numerical_cols] = scaler.inverse_transform(origin_cf_df[numerical_cols])
origin_cf_df.columns = origin_cf_columns
final_df = final_df.join([scaled_cf_df, origin_cf_df])
# final_df = final_df.join([scaled_input_df, origin_input_df, scaled_cf_df, origin_cf_df])
final_df['running_time'] = results[k][i]['running_time']
final_df['Found'] = "Y" if not results[k][i]['cf'] is None else "N"
final_df['ground_truth'] = results[k][i]['ground_truth']
final_df['prediction'] = results[k][i]['prediction']
all_data.append(final_df)
all_df[k] = pd.concat(all_data)
for df_k in all_df.keys():
all_df[df_k].to_csv(f"./results/dice_compas_{df_k}_result.csv")
###Output
_____no_output_____ |
notebooks/2d_heterogeneous_media.ipynb | ###Markdown
Граничные условияРаспределение давления в начальный момент соответствует невозмущённому пласту с давлением 20 МПа, в котором находится скважина с заданным забойным давлением в 15 МПа. Скважина распологается в середине расчётной области.
###Code
P_pl = 2 * 10**7
x_init = np.linspace(0, X, Nx)
y_init = np.linspace(0, Y, Ny)
X_init, Y_init = np.meshgrid(y_init, x_init)
M = np.zeros((Nx, Ny))
for i in range(0, Nx):
for j in range(0, Ny):
M[i,j] = P_pl
M[int(Nx/2), int(Ny/2)] = 15 * 10**6
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_wireframe(X_init, Y_init, M, linewidth=.9)
ax.view_init(40, 125)
plt.show()
print(X_init.shape)
# ax =
###Output
(30, 30)
###Markdown
Переходим к проницаемости. Проницаемость разная во всех узлах, поэтому нужно будет создать матрицу для значений каждой ячейки. Распределение проницаемости должно описываться какой-либо функцией, при этом задаём непроницаемую перемычку в точках x=X/3,y (10 узел по x-направлению):
###Code
M_perm = np.zeros((Nx, Ny))
for i in range(0, Nx-1):
for j in range(0, Ny-1):
if (i == 10):
M_perm[i,j] = 10**(-20)
else:
M_perm[i,j] = (j * Nx + i + 1) * 10**(-15)
# pd.DataFrame(M_porosity)
X_por, Y_por = np.meshgrid(np.linspace(0, Y, Ny), np.linspace(0, X, Nx))
fig = plt.figure()
plt.contour(X_por, Y_por, M_perm)
plt.show()
xw = X/2
yw = Y/2
N = Nx * Ny # общее количество узлов
Nw = Nx * Ny/2 + Nx/2 # порядковый узел со скважиной
###Output
_____no_output_____
###Markdown
Далее наша задача создать пятидиагональную матрицу для решения СЛАУ и вектор начального распределения давления на нулевом временном слое в виде сквозной нумерации узлов.
###Code
A = np.zeros((N, N))
def calc_permeability(i):
k_arr = np.zeros(4)
k_arr[0] = (2*M_perm[(i % Nx) + 1, floor(i/Nx)] * M_perm[(i%Nx), floor(i/Nx)])/ (M_perm[(i % Nx) + 1, floor(i/Nx)] + M_perm[(i%Nx), floor(i/Nx)])
k_arr[1] = (2*M_perm[(i % Nx), floor(i/Nx)] * M_perm[(i%Nx)-1, floor(i/Nx)])/(M_perm[(i % Nx), floor(i/Nx)] + M_perm[(i%Nx)-1, floor(i/Nx)])
k_arr[2] = (2*M_perm[(i % Nx), floor(i/Nx)+1] * M_perm[(i%Nx), floor(i/Nx)])/(M_perm[(i % Nx), floor(i/Nx)+1] + M_perm[(i%Nx), floor(i/Nx)])
k_arr[3] = (2*M_perm[(i % Nx), floor(i/Nx)] * M_perm[(i%Nx), floor(i/Nx)-1]) / (M_perm[(i % Nx), floor(i/Nx)] + M_perm[(i%Nx), floor(i/Nx)-1])
return k_arr
for i in range(0, N):
if ( ( Nx < i < N-Nx ) and (( i % Nx != 0) and ( i % Nx != Nx-1)) and (i != Nw)):
k = calc_permeability(i)
A[i, i] = 1 + ( (dt/(mu*B_k)) * ( ((k[0]+k[1])/dx**2) + ((k[2]+k[3])/dy**2) ) )
A[i, i+1] = - (dt*k[0]) / (mu*B_k*dx**2)
A[i, i-1] = - (dt*k[1]) / (mu*B_k*dx**2)
A[i, i+Nx] = - (dt*k[2]) / (mu*B_k*dx**2)
A[i, i-Nx] = - (dt*k[3]) / (mu*B_k*dx**2)
else:
A[i,i] = 1
# print(A[464,463], A[464,464], A[464,465])
print(A[465,464], A[465,465], A[465,466])
presentation = pd.DataFrame(A[463:468,458:470])
presentation = presentation.applymap(lambda x: "{:1.3f}".format(x))
presentation
###Output
0.0 1.0 0.0
###Markdown
После формирования пятидиагональной матрицы, переходим к расчёту по неявной разностной схеме.
###Code
B = np.zeros((N, 1))
print(B.shape)
# a matrix vector that contains initial P values
k = 0
for i in range(0, Nx):
for j in range(0, Ny):
B[k, 0] = M[i,j]
k += 1
P = np.zeros((Nt, Nx, Ny))
B_temp = np.copy(B)
m = np.zeros((Nx, Ny))
for t in range(1, Nt):
B_ans = np.linalg.solve(A, B_temp)
k = 0
for i in range(0, Nx):
for j in range(0, Ny):
m[i,j] = B_ans[k]
k += 1
P[t] = m
B_temp = B_ans
fig = plt.figure()
# ax = fig.gca(projection='3d')
X, Y = np.meshgrid(np.linspace(0, Ny, Ny), np.linspace(0, Nx, Nx))
plt.contour(X, Y, P[30])
# ax.plot_wireframe(X, Y, P[3])
# ax.view_init(290, 90)
plt.show()
###Output
_____no_output_____ |
config_gen.ipynb | ###Markdown
Config GenBasic notebook to generate and explain Ember's entire configuration file. Change the Join Specification Parameters to operate over a custom dataset. Imports & Util Functions
###Code
import pandas as pd
import numpy as np
import json
from collections import defaultdict
def save_config(config_path):
with open(config_path, 'w') as fp:
json.dump(config, fp, indent=4)
def load_config(config_path):
with open(config_path) as fp:
config = json.load(fp)
return config
###Output
_____no_output_____
###Markdown
Set base paths for data and ember home, and name config
###Code
path_base = '.'
data_base = './data/'
config_name = "demo"
###Output
_____no_output_____
###Markdown
Init New Config
###Code
config = defaultdict(dict)
###Output
_____no_output_____
###Markdown
Join Specification Parameters (change me)These are the only paramters you should change to begin with, before considering any of the lower level ones:* Data Directory Name : must have train_table_A ("left"), train_table_B ("right"), train_supervision, and optional test_supervision over test_table_A and test_table_B. Must be rooted from path_base* Join Type : must be "INNER", "LEFT", "RIGHT", or "FULL"* Join sizes: left size and right size
###Code
config['data_dir'] = 'abt-buy'
config['join_type'] = "LEFT" # Currently only supports left/right one-to-many joins.
config['left_size'] = 1
config['right_size'] = 10
## Do not edit!
config['data_path'] = data_base + config['data_dir']
###Output
_____no_output_____
###Markdown
Encoder Parameters
###Code
# Alternative configurations are not currently supported by the public API,
# but can be added upon request
config['num_encoders'] = 'single'
config['model_type'] = 'distilbert'
config['tokenizer_casing'] = 'uncased'
###Output
_____no_output_____
###Markdown
Pretraining Parameters Data preparing
###Code
# only available preparer; no not modify unless adding new preparers
config['preparer'] = 'sentence'
config['new_col_name'] = 'merged_all'
config['ID_left'] = "ltable_id"
config['ID_right'] = "rtable_id"
###Output
_____no_output_____
###Markdown
Optional Pretraining
###Code
config['pretrain_mlm'] = True
# currently only exposes BM25-based MLM
config['mlm_supervision'] = 'BM25'
config['from_scratch'] = False
config['mlm_train_epochs'] = 20 # Decrease to reduce pretraining time
config['mlm_batch_size'] = 8
config['mlm_probability'] = 0.15
config['mlm_num_seps'] = None
#ALL, BEFORE, AFTER for conditional masking. Please keep to 'ALL' as others are deprecated
config['mlm_masking'] = 'ALL'
config['mlm_model_name'] = f"{config['data_dir']}-{config['model_type']}-{config['tokenizer_casing']}-\
masked-{config['mlm_masking']}-{config['mlm_supervision']}"
###Output
_____no_output_____
###Markdown
Representation Learning Parameters
###Code
# combo of num_encoders and pretrain_MLM does this
# Base encoder type. Options can be:
## distilbert-base-cased
## bert-base_cased
## config['arch'] = 'pretrained'
#config['arch'] = 'pretrained'
#config['encoder_base']= path_base + f'/pretraining/models/{config['MLM_model_name']}'
config['tokenizer'] = f"{config['model_type']}-base-{config['tokenizer_casing']}"
config['pos_frac'] = 1
config['train_frac'] = 1 # Increase to generate and train with more triplets. Can improve performance
config['epochs'] = 1
config['batch_size'] = 8
config['final_size'] = 200
config['lr'] = .00001
config['loss'] = 'triplet'
config['tl_margin'] = 1.0
config['tl_p'] = 2
config['pool_type'] = "CLS"
config['tokenizer_max_length'] = 512
config['model_name'] = f"{config['data_dir']}-{config['model_type']}-{config['tokenizer_casing']}-\
{config['num_encoders']}-pretrain-{config['pretrain_mlm']}-pos-frac-{config['pos_frac']}"
###Output
_____no_output_____
###Markdown
Save and load (to verify) config
###Code
config_path = path_base + f"/configs/{config_name}.json"
print('Run the command as follows:')
print(f"python ember.py -c {config_path}")
save_config(config_path)
load_config(config_path)
###Output
Run the command as follows:
python ember.py -c ./configs/demo.json
|
capstone911-checkpoint.ipynb | ###Markdown
sns.countplot(x='Reason',data = df)
###Code
sns.countplot(x='Reason',data = df, palette='viridis')
df.info()
type(df['timeStamp'].iloc[0])
df['timeStamp'] = pd.to_datetime(df['timeStamp'])
type(df['timeStamp'].iloc[0])
time = df['timeStamp'].iloc[0]
time.hour
time.dayofweek
df['Hour'] = df['timeStamp'].apply(lambda time: time.hour)
df['Month'] = df['timeStamp'].apply(lambda time: time.month)
df['Day of Week'] = df['timeStamp'].apply(lambda time: time.dayofweek)
df.head()
dmap = {0:'Mon',1:'Tue',2:'Wed',3:'Thu',4:'Fri',5:'Sat',6:'Sun'}
df['Day of Week'] = df['Day of Week'].map(dmap)
df.head()
sns.countplot(x='Day of Week',data=df,hue='Reason')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
sns.countplot(x='Month',data=df,hue='Reason')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
byMonth = df.groupby('Month').count()
byMonth.head()
byMonth['twp'].plot()
sns.lmplot(x='Month', y='twp',data=byMonth.reset_index())
t = df['timeStamp'].iloc[0]
df['Date'] = df['timeStamp'].apply(lambda t:t.date())
df.head()
df.groupby('Date').count()['lat'].plot()
plt.tight_layout()
df[df['Reason']=='Traffic'].groupby('Date').count()['lat'].plot()
plt.tight_layout()
df[df['Reason']=='Fire'].groupby('Date').count()['lat'].plot()
plt.tight_layout()
df[df['Reason']=='EMS'].groupby('Date').count()['lat'].plot()
plt.tight_layout()
dayHour = df.groupby(by=['Day of Week','Hour']).count()['Reason'].unstack()
sns.heatmap(dayHour,cmap='viridis')
sns.clustermap(dayHour,cmap='coolwarm')
dayMonth = df.groupby(by=['Day of Week','Month']).count()['Reason'].unstack()
sns.heatmap(dayMonth,cmap='cool')
###Output
_____no_output_____ |
Trigger_word_detection_v1a_clear_output.ipynb | ###Markdown
Trigger Word DetectionWelcome to the final programming assignment of this specialization! In this week's videos, you learned about applying deep learning to speech recognition. In this assignment, you will construct a speech dataset and implement an algorithm for trigger word detection (sometimes also called keyword detection, or wake word detection). * Trigger word detection is the technology that allows devices like Amazon Alexa, Google Home, Apple Siri, and Baidu DuerOS to wake up upon hearing a certain word. * For this exercise, our trigger word will be "Activate." Every time it hears you say "activate," it will make a "chiming" sound. * By the end of this assignment, you will be able to record a clip of yourself talking, and have the algorithm trigger a chime when it detects you saying "activate." * After completing this assignment, perhaps you can also extend it to run on your laptop so that every time you say "activate" it starts up your favorite app, or turns on a network connected lamp in your house, or triggers some other event? In this assignment you will learn to: - Structure a speech recognition project- Synthesize and process audio recordings to create train/dev datasets- Train a trigger word detection model and make predictions Updates If you were working on the notebook before this update...* The current notebook is version "1a".* You can find your original work saved in the notebook with the previous version name ("v1") * To view the file directory, go to the menu "File->Open", and this will open a new tab that shows the file directory. List of updates* 2.1: build the model * Added sample code to show how to use the Keras layers. * Lets student to implement the `TimeDistributed` code.* Spelling, grammar and wording corrections. Let's get started! Run the following cell to load the package you are going to use.
###Code
import numpy as np
from pydub import AudioSegment
import random
import sys
import io
import os
import glob
import IPython
from td_utils import *
%matplotlib inline
###Output
_____no_output_____
###Markdown
1 - Data synthesis: Creating a speech dataset Let's start by building a dataset for your trigger word detection algorithm. * A speech dataset should ideally be as close as possible to the application you will want to run it on. * In this case, you'd like to detect the word "activate" in working environments (library, home, offices, open-spaces ...). * Therefore, you need to create recordings with a mix of positive words ("activate") and negative words (random words other than activate) on different background sounds. Let's see how you can create such a dataset. 1.1 - Listening to the data * One of your friends is helping you out on this project, and they've gone to libraries, cafes, restaurants, homes and offices all around the region to record background noises, as well as snippets of audio of people saying positive/negative words. This dataset includes people speaking in a variety of accents. * In the raw_data directory, you can find a subset of the raw audio files of the positive words, negative words, and background noise. You will use these audio files to synthesize a dataset to train the model. * The "activate" directory contains positive examples of people saying the word "activate". * The "negatives" directory contains negative examples of people saying random words other than "activate". * There is one word per audio recording. * The "backgrounds" directory contains 10 second clips of background noise in different environments.Run the cells below to listen to some examples.
###Code
IPython.display.Audio("./raw_data/activates/1.wav")
IPython.display.Audio("./raw_data/negatives/4.wav")
IPython.display.Audio("./raw_data/backgrounds/1.wav")
###Output
_____no_output_____
###Markdown
You will use these three types of recordings (positives/negatives/backgrounds) to create a labeled dataset. 1.2 - From audio recordings to spectrogramsWhat really is an audio recording? * A microphone records little variations in air pressure over time, and it is these little variations in air pressure that your ear also perceives as sound. * You can think of an audio recording is a long list of numbers measuring the little air pressure changes detected by the microphone. * We will use audio sampled at 44100 Hz (or 44100 Hertz). * This means the microphone gives us 44,100 numbers per second. * Thus, a 10 second audio clip is represented by 441,000 numbers (= $10 \times 44,100$). Spectrogram* It is quite difficult to figure out from this "raw" representation of audio whether the word "activate" was said. * In order to help your sequence model more easily learn to detect trigger words, we will compute a *spectrogram* of the audio. * The spectrogram tells us how much different frequencies are present in an audio clip at any moment in time. * If you've ever taken an advanced class on signal processing or on Fourier transforms: * A spectrogram is computed by sliding a window over the raw audio signal, and calculating the most active frequencies in each window using a Fourier transform. * If you don't understand the previous sentence, don't worry about it.Let's look at an example.
###Code
IPython.display.Audio("audio_examples/example_train.wav")
x = graph_spectrogram("audio_examples/example_train.wav")
###Output
_____no_output_____
###Markdown
The graph above represents how active each frequency is (y axis) over a number of time-steps (x axis). **Figure 1**: Spectrogram of an audio recording * The color in the spectrogram shows the degree to which different frequencies are present (loud) in the audio at different points in time. * Green means a certain frequency is more active or more present in the audio clip (louder).* Blue squares denote less active frequencies.* The dimension of the output spectrogram depends upon the hyperparameters of the spectrogram software and the length of the input. * In this notebook, we will be working with 10 second audio clips as the "standard length" for our training examples. * The number of timesteps of the spectrogram will be 5511. * You'll see later that the spectrogram will be the input $x$ into the network, and so $T_x = 5511$.
###Code
_, data = wavfile.read("audio_examples/example_train.wav")
print("Time steps in audio recording before spectrogram", data[:,0].shape)
print("Time steps in input after spectrogram", x.shape)
###Output
_____no_output_____
###Markdown
Now, you can define:
###Code
Tx = 5511 # The number of time steps input to the model from the spectrogram
n_freq = 101 # Number of frequencies input to the model at each time step of the spectrogram
###Output
_____no_output_____
###Markdown
Dividing into time-intervalsNote that we may divide a 10 second interval of time with different units (steps).* Raw audio divides 10 seconds into 441,000 units.* A spectrogram divides 10 seconds into 5,511 units. * $T_x = 5511$* You will use a Python module `pydub` to synthesize audio, and it divides 10 seconds into 10,000 units.* The output of our model will divide 10 seconds into 1,375 units. * $T_y = 1375$ * For each of the 1375 time steps, the model predicts whether someone recently finished saying the trigger word "activate." * All of these are hyperparameters and can be changed (except the 441000, which is a function of the microphone). * We have chosen values that are within the standard range used for speech systems.
###Code
Ty = 1375 # The number of time steps in the output of our model
###Output
_____no_output_____
###Markdown
1.3 - Generating a single training example Benefits of synthesizing dataBecause speech data is hard to acquire and label, you will synthesize your training data using the audio clips of activates, negatives, and backgrounds. * It is quite slow to record lots of 10 second audio clips with random "activates" in it. * Instead, it is easier to record lots of positives and negative words, and record background noise separately (or download background noise from free online sources). Process for Synthesizing an audio clip* To synthesize a single training example, you will: - Pick a random 10 second background audio clip - Randomly insert 0-4 audio clips of "activate" into this 10sec clip - Randomly insert 0-2 audio clips of negative words into this 10sec clip* Because you had synthesized the word "activate" into the background clip, you know exactly when in the 10 second clip the "activate" makes its appearance. * You'll see later that this makes it easier to generate the labels $y^{\langle t \rangle}$ as well. Pydub* You will use the pydub package to manipulate audio. * Pydub converts raw audio files into lists of Pydub data structures. * Don't worry about the details of the data structures.* Pydub uses 1ms as the discretization interval (1ms is 1 millisecond = 1/1000 seconds). * This is why a 10 second clip is always represented using 10,000 steps.
###Code
# Load audio segments using pydub
activates, negatives, backgrounds = load_raw_audio()
print("background len should be 10,000, since it is a 10 sec clip\n" + str(len(backgrounds[0])),"\n")
print("activate[0] len may be around 1000, since an `activate` audio clip is usually around 1 second (but varies a lot) \n" + str(len(activates[0])),"\n")
print("activate[1] len: different `activate` clips can have different lengths\n" + str(len(activates[1])),"\n")
###Output
_____no_output_____
###Markdown
Overlaying positive/negative 'word' audio clips on top of the background audio* Given a 10 second background clip and a short audio clip containing a positive or negative word, you need to be able to "add" the word audio clip on top of the background audio.* You will be inserting multiple clips of positive/negative words into the background, and you don't want to insert an "activate" or a random word somewhere that overlaps with another clip you had previously added. * To ensure that the 'word' audio segments do not overlap when inserted, you will keep track of the times of previously inserted audio clips. * To be clear, when you insert a 1 second "activate" onto a 10 second clip of cafe noise, **you do not end up with an 11 sec clip.** * The resulting audio clip is still 10 seconds long. * You'll see later how pydub allows you to do this. Label the positive/negative words* Recall that the labels $y^{\langle t \rangle}$ represent whether or not someone has just finished saying "activate." * $y^{\langle t \rangle} = 1$ when that that clip has finished saying "activate". * Given a background clip, we can initialize $y^{\langle t \rangle}=0$ for all $t$, since the clip doesn't contain any "activates." * When you insert or overlay an "activate" clip, you will also update labels for $y^{\langle t \rangle}$. * Rather than updating the label of a single time step, we will update 50 steps of the output to have target label 1. * Recall from the lecture on trigger word detection that updating several consecutive time steps can make the training data more balanced.* You will train a GRU (Gated Recurrent Unit) to detect when someone has **finished** saying "activate". Example* Suppose the synthesized "activate" clip ends at the 5 second mark in the 10 second audio - exactly halfway into the clip. * Recall that $T_y = 1375$, so timestep $687 = $ `int(1375*0.5)` corresponds to the moment 5 seconds into the audio clip. * Set $y^{\langle 688 \rangle} = 1$. * We will allow the GRU to detect "activate" anywhere within a short time-internal **after** this moment, so we actually **set 50 consecutive values** of the label $y^{\langle t \rangle}$ to 1. * Specifically, we have $y^{\langle 688 \rangle} = y^{\langle 689 \rangle} = \cdots = y^{\langle 737 \rangle} = 1$. Synthesized data is easier to label* This is another reason for synthesizing the training data: It's relatively straightforward to generate these labels $y^{\langle t \rangle}$ as described above. * In contrast, if you have 10sec of audio recorded on a microphone, it's quite time consuming for a person to listen to it and mark manually exactly when "activate" finished. Visualizing the labels* Here's a figure illustrating the labels $y^{\langle t \rangle}$ in a clip. * We have inserted "activate", "innocent", activate", "baby." * Note that the positive labels "1" are associated only with the positive words. **Figure 2** Helper functionsTo implement the training set synthesis process, you will use the following helper functions. * All of these functions will use a 1ms discretization interval* The 10 seconds of audio is always discretized into 10,000 steps. 1. `get_random_time_segment(segment_ms)` * Retrieves a random time segment from the background audio.2. `is_overlapping(segment_time, existing_segments)` * Checks if a time segment overlaps with existing segments3. `insert_audio_clip(background, audio_clip, existing_times)` * Inserts an audio segment at a random time in the background audio * Uses the functions `get_random_time_segment` and `is_overlapping`4. `insert_ones(y, segment_end_ms)` * Inserts additional 1's into the label vector y after the word "activate" Get a random time segment* The function `get_random_time_segment(segment_ms)` returns a random time segment onto which we can insert an audio clip of duration `segment_ms`. * Please read through the code to make sure you understand what it is doing.
###Code
def get_random_time_segment(segment_ms):
"""
Gets a random time segment of duration segment_ms in a 10,000 ms audio clip.
Arguments:
segment_ms -- the duration of the audio clip in ms ("ms" stands for "milliseconds")
Returns:
segment_time -- a tuple of (segment_start, segment_end) in ms
"""
segment_start = np.random.randint(low=0, high=10000-segment_ms) # Make sure segment doesn't run past the 10sec background
segment_end = segment_start + segment_ms - 1
return (segment_start, segment_end)
###Output
_____no_output_____
###Markdown
Check if audio clips are overlapping* Suppose you have inserted audio clips at segments (1000,1800) and (3400,4500). * The first segment starts at step 1000 and ends at step 1800. * The second segment starts at 3400 and ends at 4500.* If we are considering whether to insert a new audio clip at (3000,3600) does this overlap with one of the previously inserted segments? * In this case, (3000,3600) and (3400,4500) overlap, so we should decide against inserting a clip here.* For the purpose of this function, define (100,200) and (200,250) to be overlapping, since they overlap at timestep 200. * (100,199) and (200,250) are non-overlapping. **Exercise**: * Implement `is_overlapping(segment_time, existing_segments)` to check if a new time segment overlaps with any of the previous segments. * You will need to carry out 2 steps:1. Create a "False" flag, that you will later set to "True" if you find that there is an overlap.2. Loop over the previous_segments' start and end times. Compare these times to the segment's start and end times. If there is an overlap, set the flag defined in (1) as True. You can use:```pythonfor ....: if ... = ...: ...```Hint: There is overlap if:* The new segment starts before the previous segment ends **and*** The new segment ends after the previous segment starts.
###Code
# GRADED FUNCTION: is_overlapping
def is_overlapping(segment_time, previous_segments):
"""
Checks if the time of a segment overlaps with the times of existing segments.
Arguments:
segment_time -- a tuple of (segment_start, segment_end) for the new segment
previous_segments -- a list of tuples of (segment_start, segment_end) for the existing segments
Returns:
True if the time segment overlaps with any of the existing segments, False otherwise
"""
segment_start, segment_end = segment_time
### START CODE HERE ### (≈ 4 lines)
# Step 1: Initialize overlap as a "False" flag. (≈ 1 line)
overlap = False
# Step 2: loop over the previous_segments start and end times.
# Compare start/end times and set the flag to True if there is an overlap (≈ 3 lines)
for previous_start, previous_end in previous_segments:
if segment_start <= previous_end and segment_end >= previous_start:
overlap = True
### END CODE HERE ###
return overlap
overlap1 = is_overlapping((950, 1430), [(2000, 2550), (260, 949)])
overlap2 = is_overlapping((2305, 2950), [(824, 1532), (1900, 2305), (3424, 3656)])
print("Overlap 1 = ", overlap1)
print("Overlap 2 = ", overlap2)
###Output
_____no_output_____
###Markdown
**Expected Output**: **Overlap 1** False **Overlap 2** True Insert audio clip* Let's use the previous helper functions to insert a new audio clip onto the 10 second background at a random time.* We will ensure that any newly inserted segment doesn't overlap with previously inserted segments. **Exercise**:* Implement `insert_audio_clip()` to overlay an audio clip onto the background 10sec clip. * You implement 4 steps:1. Get the length of the audio clip that is to be inserted. * Get a random time segment whose duration equals the duration of the audio clip that is to be inserted.2. Make sure that the time segment does not overlap with any of the previous time segments. * If it is overlapping, then go back to step 1 and pick a new time segment.3. Append the new time segment to the list of existing time segments * This keeps track of all the segments you've inserted. 4. Overlay the audio clip over the background using pydub. We have implemented this for you.
###Code
# GRADED FUNCTION: insert_audio_clip
def insert_audio_clip(background, audio_clip, previous_segments):
"""
Insert a new audio segment over the background noise at a random time step, ensuring that the
audio segment does not overlap with existing segments.
Arguments:
background -- a 10 second background audio recording.
audio_clip -- the audio clip to be inserted/overlaid.
previous_segments -- times where audio segments have already been placed
Returns:
new_background -- the updated background audio
"""
# Get the duration of the audio clip in ms
segment_ms = len(audio_clip)
### START CODE HERE ###
# Step 1: Use one of the helper functions to pick a random time segment onto which to insert
# the new audio clip. (≈ 1 line)
segment_time = get_random_time_segment(segment_ms)
# Step 2: Check if the new segment_time overlaps with one of the previous_segments. If so, keep
# picking new segment_time at random until it doesn't overlap. (≈ 2 lines)
while is_overlapping(segment_time, previous_segments) is True:
segment_time = get_random_time_segment(segment_ms)
# Step 3: Append the new segment_time to the list of previous_segments (≈ 1 line)
previous_segments.append(segment_time)
### END CODE HERE ###
# Step 4: Superpose audio segment and background
new_background = background.overlay(audio_clip, position = segment_time[0])
return new_background, segment_time
np.random.seed(5)
audio_clip, segment_time = insert_audio_clip(backgrounds[0], activates[0], [(3790, 4400)])
audio_clip.export("insert_test.wav", format="wav")
print("Segment Time: ", segment_time)
IPython.display.Audio("insert_test.wav")
###Output
_____no_output_____
###Markdown
**Expected Output** **Segment Time** (2254, 3169)
###Code
# Expected audio
IPython.display.Audio("audio_examples/insert_reference.wav")
###Output
_____no_output_____
###Markdown
Insert ones for the labels of the positive target* Implement code to update the labels $y^{\langle t \rangle}$, assuming you just inserted an "activate" audio clip.* In the code below, `y` is a `(1,1375)` dimensional vector, since $T_y = 1375$. * If the "activate" audio clip ends at time step $t$, then set $y^{\langle t+1 \rangle} = 1$ and also set the next 49 additional consecutive values to 1. * Notice that if the target word appears near the end of the entire audio clip, there may not be 50 additional time steps to set to 1. * Make sure you don't run off the end of the array and try to update `y[0][1375]`, since the valid indices are `y[0][0]` through `y[0][1374]` because $T_y = 1375$. * So if "activate" ends at step 1370, you would get only set `y[0][1371] = y[0][1372] = y[0][1373] = y[0][1374] = 1`**Exercise**: Implement `insert_ones()`. * You can use a for loop. * If you want to use Python's array slicing operations, you can do so as well.* If a segment ends at `segment_end_ms` (using a 10000 step discretization), * To convert it to the indexing for the outputs $y$ (using a $1375$ step discretization), we will use this formula: ``` segment_end_y = int(segment_end_ms * Ty / 10000.0)```
###Code
# GRADED FUNCTION: insert_ones
def insert_ones(y, segment_end_ms):
"""
Update the label vector y. The labels of the 50 output steps strictly after the end of the segment
should be set to 1. By strictly we mean that the label of segment_end_y should be 0 while, the
50 following labels should be ones.
Arguments:
y -- numpy array of shape (1, Ty), the labels of the training example
segment_end_ms -- the end time of the segment in ms
Returns:
y -- updated labels
"""
# duration of the background (in terms of spectrogram time-steps)
segment_end_y = int(segment_end_ms * Ty / 10000.0)
# Add 1 to the correct index in the background label (y)
### START CODE HERE ### (≈ 3 lines)
for i in range(segment_end_y + 1, segment_end_y + 50 + 1):
if i < Ty:
y[0, i] = 1
### END CODE HERE ###
return y
arr1 = insert_ones(np.zeros((1, Ty)), 9700)
plt.plot(insert_ones(arr1, 4251)[0,:])
print("sanity checks:", arr1[0][1333], arr1[0][634], arr1[0][635])
###Output
_____no_output_____
###Markdown
**Expected Output** **sanity checks**: 0.0 1.0 0.0 Creating a training exampleFinally, you can use `insert_audio_clip` and `insert_ones` to create a new training example.**Exercise**: Implement `create_training_example()`. You will need to carry out the following steps:1. Initialize the label vector $y$ as a numpy array of zeros and shape $(1, T_y)$.2. Initialize the set of existing segments to an empty list.3. Randomly select 0 to 4 "activate" audio clips, and insert them onto the 10 second clip. Also insert labels at the correct position in the label vector $y$.4. Randomly select 0 to 2 negative audio clips, and insert them into the 10 second clip.
###Code
# GRADED FUNCTION: create_training_example
def create_training_example(background, activates, negatives):
"""
Creates a training example with a given background, activates, and negatives.
Arguments:
background -- a 10 second background audio recording
activates -- a list of audio segments of the word "activate"
negatives -- a list of audio segments of random words that are not "activate"
Returns:
x -- the spectrogram of the training example
y -- the label at each time step of the spectrogram
"""
# Set the random seed
np.random.seed(18)
# Make background quieter
background = background - 20
### START CODE HERE ###
# Step 1: Initialize y (label vector) of zeros (≈ 1 line)
y = np.zeros((1, Ty))
# Step 2: Initialize segment times as an empty list (≈ 1 line)
previous_segments = []
### END CODE HERE ###
# Select 0-4 random "activate" audio clips from the entire list of "activates" recordings
number_of_activates = np.random.randint(0, 5)
random_indices = np.random.randint(len(activates), size=number_of_activates)
random_activates = [activates[i] for i in random_indices]
### START CODE HERE ### (≈ 3 lines)
# Step 3: Loop over randomly selected "activate" clips and insert in background
for random_activate in random_activates:
# Insert the audio clip on the background
background, segment_time = insert_audio_clip(background, random_activate, previous_segments)
# Retrieve segment_start and segment_end from segment_time
segment_start, segment_end = segment_time
# Insert labels in "y"
y = insert_ones(y, segment_end)
### END CODE HERE ###
# Select 0-2 random negatives audio recordings from the entire list of "negatives" recordings
number_of_negatives = np.random.randint(0, 3)
random_indices = np.random.randint(len(negatives), size=number_of_negatives)
random_negatives = [negatives[i] for i in random_indices]
### START CODE HERE ### (≈ 2 lines)
# Step 4: Loop over randomly selected negative clips and insert in background
for random_negative in random_negatives:
# Insert the audio clip on the background
background, _ = insert_audio_clip(background, random_negative, previous_segments)
### END CODE HERE ###
# Standardize the volume of the audio clip
background = match_target_amplitude(background, -20.0)
# Export new training example
file_handle = background.export("train" + ".wav", format="wav")
print("File (train.wav) was saved in your directory.")
# Get and plot spectrogram of the new recording (background with superposition of positive and negatives)
x = graph_spectrogram("train.wav")
return x, y
x, y = create_training_example(backgrounds[0], activates, negatives)
###Output
_____no_output_____
###Markdown
**Expected Output** Now you can listen to the training example you created and compare it to the spectrogram generated above.
###Code
IPython.display.Audio("train.wav")
###Output
_____no_output_____
###Markdown
**Expected Output**
###Code
IPython.display.Audio("audio_examples/train_reference.wav")
###Output
_____no_output_____
###Markdown
Finally, you can plot the associated labels for the generated training example.
###Code
plt.plot(y[0])
###Output
_____no_output_____
###Markdown
**Expected Output** 1.4 - Full training set* You've now implemented the code needed to generate a single training example. * We used this process to generate a large training set. * To save time, we've already generated a set of training examples.
###Code
# Load preprocessed training examples
X = np.load("./XY_train/X.npy")
Y = np.load("./XY_train/Y.npy")
###Output
_____no_output_____
###Markdown
1.5 - Development set* To test our model, we recorded a development set of 25 examples. * While our training data is synthesized, we want to create a development set using the same distribution as the real inputs. * Thus, we recorded 25 10-second audio clips of people saying "activate" and other random words, and labeled them by hand. * This follows the principle described in Course 3 "Structuring Machine Learning Projects" that we should create the dev set to be as similar as possible to the test set distribution * This is why our **dev set uses real audio** rather than synthesized audio.
###Code
# Load preprocessed dev set examples
X_dev = np.load("./XY_dev/X_dev.npy")
Y_dev = np.load("./XY_dev/Y_dev.npy")
###Output
_____no_output_____
###Markdown
2 - Model* Now that you've built a dataset, let's write and train a trigger word detection model! * The model will use 1-D convolutional layers, GRU layers, and dense layers. * Let's load the packages that will allow you to use these layers in Keras. This might take a minute to load.
###Code
from keras.callbacks import ModelCheckpoint
from keras.models import Model, load_model, Sequential
from keras.layers import Dense, Activation, Dropout, Input, Masking, TimeDistributed, LSTM, Conv1D
from keras.layers import GRU, Bidirectional, BatchNormalization, Reshape
from keras.optimizers import Adam
###Output
_____no_output_____
###Markdown
2.1 - Build the modelOur goal is to build a network that will ingest a spectrogram and output a signal when it detects the trigger word. This network will use 4 layers: * A convolutional layer * Two GRU layers * A dense layer. Here is the architecture we will use. **Figure 3** 1D convolutional layerOne key layer of this model is the 1D convolutional step (near the bottom of Figure 3). * It inputs the 5511 step spectrogram. Each step is a vector of 101 units.* It outputs a 1375 step output* This output is further processed by multiple layers to get the final $T_y = 1375$ step output. * This 1D convolutional layer plays a role similar to the 2D convolutions you saw in Course 4, of extracting low-level features and then possibly generating an output of a smaller dimension. * Computationally, the 1-D conv layer also helps speed up the model because now the GRU can process only 1375 timesteps rather than 5511 timesteps. GRU, dense and sigmoid* The two GRU layers read the sequence of inputs from left to right.* A dense plus sigmoid layer makes a prediction for $y^{\langle t \rangle}$. * Because $y$ is a binary value (0 or 1), we use a sigmoid output at the last layer to estimate the chance of the output being 1, corresponding to the user having just said "activate." Unidirectional RNN* Note that we use a **unidirectional RNN** rather than a bidirectional RNN. * This is really important for trigger word detection, since we want to be able to detect the trigger word almost immediately after it is said. * If we used a bidirectional RNN, we would have to wait for the whole 10sec of audio to be recorded before we could tell if "activate" was said in the first second of the audio clip. Implement the modelImplementing the model can be done in four steps: **Step 1**: CONV layer. Use `Conv1D()` to implement this, with 196 filters, a filter size of 15 (`kernel_size=15`), and stride of 4. [conv1d](https://keras.io/layers/convolutional/conv1d)```Pythonoutput_x = Conv1D(filters=...,kernel_size=...,strides=...)(input_x)```* Follow this with a ReLu activation. Note that we can pass in the name of the desired activation as a string, all in lowercase letters.```Pythonoutput_x = Activation("...")(input_x)```* Follow this with dropout, using a keep rate of 0.8 ```Pythonoutput_x = Dropout(rate=...)(input_x)```**Step 2**: First GRU layer. To generate the GRU layer, use 128 units.```Pythonoutput_x = GRU(units=..., return_sequences = ...)(input_x)```* Return sequences instead of just the last time step's prediction to ensures that all the GRU's hidden states are fed to the next layer. * Follow this with dropout, using a keep rate of 0.8.* Follow this with batch normalization. No parameters need to be set.```Pythonoutput_x = BatchNormalization()(input_x)```**Step 3**: Second GRU layer. This has the same specifications as the first GRU layer.* Follow this with a dropout, batch normalization, and then another dropout.**Step 4**: Create a time-distributed dense layer as follows: ```PythonX = TimeDistributed(Dense(1, activation = "sigmoid"))(X)```This creates a dense layer followed by a sigmoid, so that the parameters used for the dense layer are the same for every time step. Documentation:* [Keras documentation on wrappers](https://keras.io/layers/wrappers/). * To learn more, you can read this blog post [How to Use the TimeDistributed Layer in Keras](https://machinelearningmastery.com/timedistributed-layer-for-long-short-term-memory-networks-in-python/).**Exercise**: Implement `model()`, the architecture is presented in Figure 3.
###Code
# GRADED FUNCTION: model
def model(input_shape):
"""
Function creating the model's graph in Keras.
Argument:
input_shape -- shape of the model's input data (using Keras conventions)
Returns:
model -- Keras model instance
"""
X_input = Input(shape = input_shape)
### START CODE HERE ###
# Step 1: CONV layer (≈4 lines)
X = Conv1D(filters = 196, kernel_size=15, strides=4)(X_input) # CONV1D
X = BatchNormalization()(X) # Batch normalization
X = Activation('relu')(X) # ReLu activation
X = Dropout(0.8)(X) # dropout (use 0.8)
# Step 2: First GRU Layer (≈4 lines)
X = GRU(units = 128, return_sequences = True)(X) # GRU (use 128 units and return the sequences)
X = Dropout(0.8)(X) # dropout (use 0.8)
X = BatchNormalization()(X) # Batch normalization
# Step 3: Second GRU Layer (≈4 lines)
X = GRU(units=128, return_sequences = True)(X) # GRU (use 128 units and return the sequences)
X = Dropout(0.8)(X) # dropout (use 0.8)
X = BatchNormalization()(X) # Batch normalization
X = Dropout(0.8)(X) # dropout (use 0.8)
# Step 4: Time-distributed dense layer (see given code in instructions) (≈1 line)
X = TimeDistributed(Dense(1, activation = "sigmoid"))(X) # time distributed (sigmoid)
### END CODE HERE ###
model = Model(inputs = X_input, outputs = X)
return model
model = model(input_shape = (Tx, n_freq))
###Output
_____no_output_____
###Markdown
Let's print the model summary to keep track of the shapes.
###Code
model.summary()
###Output
_____no_output_____
###Markdown
**Expected Output**: **Total params** 522,561 **Trainable params** 521,657 **Non-trainable params** 904 The output of the network is of shape (None, 1375, 1) while the input is (None, 5511, 101). The Conv1D has reduced the number of steps from 5511 to 1375. 2.2 - Fit the model * Trigger word detection takes a long time to train. * To save time, we've already trained a model for about 3 hours on a GPU using the architecture you built above, and a large training set of about 4000 examples. * Let's load the model.
###Code
model = load_model('./models/tr_model.h5')
###Output
_____no_output_____
###Markdown
You can train the model further, using the Adam optimizer and binary cross entropy loss, as follows. This will run quickly because we are training just for one epoch and with a small training set of 26 examples.
###Code
opt = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, decay=0.01)
model.compile(loss='binary_crossentropy', optimizer=opt, metrics=["accuracy"])
model.fit(X, Y, batch_size = 5, epochs=1)
###Output
_____no_output_____
###Markdown
2.3 - Test the modelFinally, let's see how your model performs on the dev set.
###Code
loss, acc = model.evaluate(X_dev, Y_dev)
print("Dev set accuracy = ", acc)
###Output
_____no_output_____
###Markdown
This looks pretty good! * However, accuracy isn't a great metric for this task * Since the labels are heavily skewed to 0's, a neural network that just outputs 0's would get slightly over 90% accuracy. * We could define more useful metrics such as F1 score or Precision/Recall. * Let's not bother with that here, and instead just empirically see how the model does with some predictions. 3 - Making PredictionsNow that you have built a working model for trigger word detection, let's use it to make predictions. This code snippet runs audio (saved in a wav file) through the network. <!--can use your model to make predictions on new audio clips.You will first need to compute the predictions for an input audio clip.**Exercise**: Implement predict_activates(). You will need to do the following:1. Compute the spectrogram for the audio file2. Use `np.swap` and `np.expand_dims` to reshape your input to size (1, Tx, n_freqs)5. Use forward propagation on your model to compute the prediction at each output step!-->
###Code
def detect_triggerword(filename):
plt.subplot(2, 1, 1)
x = graph_spectrogram(filename)
# the spectrogram outputs (freqs, Tx) and we want (Tx, freqs) to input into the model
x = x.swapaxes(0,1)
x = np.expand_dims(x, axis=0)
predictions = model.predict(x)
plt.subplot(2, 1, 2)
plt.plot(predictions[0,:,0])
plt.ylabel('probability')
plt.show()
return predictions
###Output
_____no_output_____
###Markdown
Insert a chime to acknowledge the "activate" trigger* Once you've estimated the probability of having detected the word "activate" at each output step, you can trigger a "chiming" sound to play when the probability is above a certain threshold. * $y^{\langle t \rangle}$ might be near 1 for many values in a row after "activate" is said, yet we want to chime only once. * So we will insert a chime sound at most once every 75 output steps. * This will help prevent us from inserting two chimes for a single instance of "activate". * This plays a role similar to non-max suppression from computer vision.<!-- **Exercise**: Implement chime_on_activate(). You will need to do the following:1. Loop over the predicted probabilities at each output step2. When the prediction is larger than the threshold and more than 75 consecutive time steps have passed, insert a "chime" sound onto the original audio clipUse this code to convert from the 1,375 step discretization to the 10,000 step discretization and insert a "chime" using pydub:` audio_clip = audio_clip.overlay(chime, position = ((i / Ty) * audio.duration_seconds)*1000)`!-->
###Code
chime_file = "audio_examples/chime.wav"
def chime_on_activate(filename, predictions, threshold):
audio_clip = AudioSegment.from_wav(filename)
chime = AudioSegment.from_wav(chime_file)
Ty = predictions.shape[1]
# Step 1: Initialize the number of consecutive output steps to 0
consecutive_timesteps = 0
# Step 2: Loop over the output steps in the y
for i in range(Ty):
# Step 3: Increment consecutive output steps
consecutive_timesteps += 1
# Step 4: If prediction is higher than the threshold and more than 75 consecutive output steps have passed
if predictions[0,i,0] > threshold and consecutive_timesteps > 75:
# Step 5: Superpose audio and background using pydub
audio_clip = audio_clip.overlay(chime, position = ((i / Ty) * audio_clip.duration_seconds)*1000)
# Step 6: Reset consecutive output steps to 0
consecutive_timesteps = 0
audio_clip.export("chime_output.wav", format='wav')
###Output
_____no_output_____
###Markdown
3.3 - Test on dev examples Let's explore how our model performs on two unseen audio clips from the development set. Lets first listen to the two dev set clips.
###Code
IPython.display.Audio("./raw_data/dev/1.wav")
IPython.display.Audio("./raw_data/dev/2.wav")
###Output
_____no_output_____
###Markdown
Now lets run the model on these audio clips and see if it adds a chime after "activate"!
###Code
filename = "./raw_data/dev/1.wav"
prediction = detect_triggerword(filename)
chime_on_activate(filename, prediction, 0.5)
IPython.display.Audio("./chime_output.wav")
filename = "./raw_data/dev/2.wav"
prediction = detect_triggerword(filename)
chime_on_activate(filename, prediction, 0.5)
IPython.display.Audio("./chime_output.wav")
###Output
_____no_output_____
###Markdown
Congratulations You've come to the end of this assignment! Here's what you should remember:- Data synthesis is an effective way to create a large training set for speech problems, specifically trigger word detection. - Using a spectrogram and optionally a 1D conv layer is a common pre-processing step prior to passing audio data to an RNN, GRU or LSTM.- An end-to-end deep learning approach can be used to build a very effective trigger word detection system. *Congratulations* on finishing the final assignment! Thank you for sticking with us through the end and for all the hard work you've put into learning deep learning. We hope you have enjoyed the course! 4 - Try your own example! (OPTIONAL/UNGRADED)In this optional and ungraded portion of this notebook, you can try your model on your own audio clips! * Record a 10 second audio clip of you saying the word "activate" and other random words, and upload it to the Coursera hub as `myaudio.wav`. * Be sure to upload the audio as a wav file. * If your audio is recorded in a different format (such as mp3) there is free software that you can find online for converting it to wav. * If your audio recording is not 10 seconds, the code below will either trim or pad it as needed to make it 10 seconds.
###Code
# Preprocess the audio to the correct format
def preprocess_audio(filename):
# Trim or pad audio segment to 10000ms
padding = AudioSegment.silent(duration=10000)
segment = AudioSegment.from_wav(filename)[:10000]
segment = padding.overlay(segment)
# Set frame rate to 44100
segment = segment.set_frame_rate(44100)
# Export as wav
segment.export(filename, format='wav')
###Output
_____no_output_____
###Markdown
Once you've uploaded your audio file to Coursera, put the path to your file in the variable below.
###Code
your_filename = "audio_examples/my_audio.wav"
preprocess_audio(your_filename)
IPython.display.Audio(your_filename) # listen to the audio you uploaded
###Output
_____no_output_____
###Markdown
Finally, use the model to predict when you say activate in the 10 second audio clip, and trigger a chime. If beeps are not being added appropriately, try to adjust the chime_threshold.
###Code
chime_threshold = 0.5
prediction = detect_triggerword(your_filename)
chime_on_activate(your_filename, prediction, chime_threshold)
IPython.display.Audio("./chime_output.wav")
###Output
_____no_output_____ |
Unsupervised_Learning/Feature Scaling.ipynb | ###Markdown
Feature ScalingWith any distance based machine learning model (regularized regression methods, neural networks, and now kmeans), you will want to scale your data. If you have some features that are on completely different scales, this can greatly impact the clusters you get when using K-Means. In this notebook, you will get to see this first hand. To begin, let's read in the necessary libraries.
###Code
import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
from sklearn import preprocessing as p
%matplotlib inline
plt.rcParams['figure.figsize'] = (16, 9)
import helpers2 as h
import tests as t
# Create the dataset for the notebook
data = h.simulate_data(200, 2, 4)
df = pd.DataFrame(data)
df.columns = ['height', 'weight']
df['height'] = np.abs(df['height']*100)
df['weight'] = df['weight'] + np.random.normal(50, 10, 200)
###Output
_____no_output_____
###Markdown
`1.` Next, take a look at the data to get familiar with it. The dataset has two columns, and it is stored in the **df** variable. It might be useful to get an idea of the spread in the current data, as well as a visual of the points.
###Code
#Take a look at the data
df.info()
#use this cell if you would like as well
df.describe()
###Output
_____no_output_____
###Markdown
Now that we've got a dataset, let's look at some options for scaling the data. As well as how the data might be scaled. There are two very common types of feature scaling that we should discuss:**I. MinMaxScaler**In some cases it is useful to think of your data in terms of the percent they are as compared to the maximum value. In these cases, you will want to use **MinMaxScaler**.**II. StandardScaler**Another very popular type of scaling is to scale data so that it has mean 0 and variance 1. In these cases, you will want to use **StandardScaler**. It is probably more appropriate with this data to use **StandardScaler**. However, to get practice with feature scaling methods in python, we will perform both.`2.` First let's fit the **StandardScaler** transformation to this dataset. I will do this one so you can see how to apply preprocessing in sklearn.
###Code
df_ss = p.StandardScaler().fit_transform(df) # Fit and transform the data
df_ss = pd.DataFrame(df_ss) #create a dataframe
df_ss.columns = ['height', 'weight'] #add column names again
plt.scatter(df_ss['height'], df_ss['weight']); # create a plot
###Output
_____no_output_____
###Markdown
`3.` Now it's your turn. Try fitting the **MinMaxScaler** transformation to this dataset. You should be able to use the previous example to assist.
###Code
# fit and transform
df_mm = p.MinMaxScaler().fit_transform(df)
#create a dataframe
#change the column names
#plot the data
df_mm = pd.DataFrame(df_mm)
df_mm.columns= ['height1', 'weight1']
plt.scatter(df_mm['height1'], df_mm['weight1'])
###Output
_____no_output_____
###Markdown
`4.` Now let's take a look at how kmeans divides the dataset into different groups for each of the different scalings of the data. Did you end up with different clusters when the data was scaled differently?
###Code
def fit_kmeans(data, centers):
'''
INPUT:
data = the dataset you would like to fit kmeans to (dataframe)
centers = the number of centroids (int)
OUTPUT:
labels - the labels for each datapoint to which group it belongs (nparray)
'''
kmeans = KMeans(centers)
labels = kmeans.fit_predict(data)
return labels
labels = fit_kmeans(df, 10) #fit kmeans to get the labels
# Plot the original data with clusters
plt.scatter(df['height'], df['weight'], c=labels, cmap='Set1');
#plot each of the scaled datasets
labels = fit_kmeans(df_ss, 10)
plt.scatter(df_ss['height'], df_ss['weight'],c=labels, cmap='Set1') # create a plot
#another plot of the other scaled dataset
labels = fit_kmeans(df_mm, 10)
plt.scatter(df_mm['height1'], df_mm['weight1'],c=labels, cmap='Set1') # create a plot
###Output
_____no_output_____
###Markdown
Write your response here!
###Code
#Clustering changes the results
###Output
_____no_output_____ |
Lesson09/Exercise28.ipynb | ###Markdown
Import Libraries & Process Data
###Code
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset_training = pd.read_csv('AAPL_train.csv')
dataset_training.head()
training_data = dataset_training.iloc[:, 1:2].values
training_data
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler(feature_range = (0, 1))
training_data_scaled = sc.fit_transform(training_data)
training_data_scaled
###Output
_____no_output_____
###Markdown
Create Data Time Stamps & Rehape the Data
###Code
X_train = []
y_train = []
for i in range(60, 1258):
X_train.append(training_data_scaled[i-60:i, 0])
y_train.append(training_data_scaled[i, 0])
X_train, y_train = np.array(X_train), np.array(y_train)
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
X_train
###Output
_____no_output_____
###Markdown
Create & Compile an RNN Architecure
###Code
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
model = Sequential()
model.add(LSTM(units = 100, return_sequences = True, input_shape = (X_train.shape[1], 1)))
# Adding a second LSTM layer and some Dropout regularisation
model.add(LSTM(units = 100, return_sequences = True))
# Adding a third LSTM layer and some Dropout regularisation
model.add(LSTM(units = 100, return_sequences = True))
# Adding a fourth LSTM layer and some Dropout regularisation
model.add(LSTM(units = 100))
# Adding the output layer
model.add(Dense(units = 1))
# Compiling the RNN
model.compile(optimizer = 'adam', loss = 'mean_squared_error')
# Fitting the RNN to the Training set
model.fit(X_train, y_train, epochs = 100, batch_size = 32)
###Output
WARNING:tensorflow:From C:\Users\Ritesh\Anaconda3\lib\site-packages\tensorflow\python\ops\math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.cast instead.
Epoch 1/100
1198/1198 [==============================] - 8s 7ms/step - loss: 0.0215
Epoch 2/100
1198/1198 [==============================] - 7s 6ms/step - loss: 0.0022
Epoch 3/100
1198/1198 [==============================] - 6s 5ms/step - loss: 0.0019
Epoch 4/100
1198/1198 [==============================] - 6s 5ms/step - loss: 0.0018A: 0s - loss: 0.00
Epoch 5/100
1198/1198 [==============================] - 7s 6ms/step - loss: 0.0020A: 4s - ETA: 2s -
Epoch 6/100
1198/1198 [==============================] - 7s 6ms/step - loss: 0.0016
Epoch 7/100
1198/1198 [==============================] - 7s 6ms/step - loss: 0.0014
Epoch 8/100
1198/1198 [==============================] - 6s 5ms/step - loss: 0.0012
Epoch 9/100
1198/1198 [==============================] - 6s 5ms/step - loss: 0.0014
Epoch 10/100
1198/1198 [==============================] - 7s 5ms/step - loss: 0.0010
Epoch 11/100
1198/1198 [==============================] - 7s 6ms/step - loss: 9.8153e-04
Epoch 12/100
1198/1198 [==============================] - 6s 5ms/step - loss: 0.0011
Epoch 13/100
1198/1198 [==============================] - 7s 6ms/step - loss: 0.0010
Epoch 14/100
1198/1198 [==============================] - 8s 7ms/step - loss: 9.2597e-04
Epoch 15/100
1198/1198 [==============================] - 7s 6ms/step - loss: 8.8755e-04
Epoch 16/100
1198/1198 [==============================] - 6s 5ms/step - loss: 8.6967e-04
Epoch 17/100
1198/1198 [==============================] - 6s 5ms/step - loss: 9.0737e-04
Epoch 18/100
1198/1198 [==============================] - 6s 5ms/step - loss: 8.2855e-04
Epoch 19/100
1198/1198 [==============================] - 6s 5ms/step - loss: 8.1373e-04
Epoch 20/100
1198/1198 [==============================] - 6s 5ms/step - loss: 7.8893e-04
Epoch 21/100
1198/1198 [==============================] - 5s 5ms/step - loss: 8.0026e-04
Epoch 22/100
1198/1198 [==============================] - 6s 5ms/step - loss: 6.6137e-04
Epoch 23/100
1198/1198 [==============================] - 6s 5ms/step - loss: 7.6565e-04
Epoch 24/100
1198/1198 [==============================] - 6s 5ms/step - loss: 6.9136e-04
Epoch 25/100
1198/1198 [==============================] - 6s 5ms/step - loss: 6.7790e-04A: 2s - lo
Epoch 26/100
1198/1198 [==============================] - 6s 5ms/step - loss: 5.2027e-04
Epoch 27/100
1198/1198 [==============================] - 6s 5ms/step - loss: 6.0694e-04
Epoch 28/100
1198/1198 [==============================] - 6s 5ms/step - loss: 5.0751e-04
Epoch 29/100
1198/1198 [==============================] - 6s 5ms/step - loss: 4.6917e-04
Epoch 30/100
1198/1198 [==============================] - 7s 6ms/step - loss: 4.4982e-04
Epoch 31/100
1198/1198 [==============================] - 7s 6ms/step - loss: 5.3917e-04
Epoch 32/100
1198/1198 [==============================] - 7s 6ms/step - loss: 4.0272e-04
Epoch 33/100
1198/1198 [==============================] - 7s 6ms/step - loss: 4.0767e-04
Epoch 34/100
1198/1198 [==============================] - 7s 5ms/step - loss: 6.4120e-04
Epoch 35/100
1198/1198 [==============================] - 6s 5ms/step - loss: 5.6197e-04A: 1s - loss:
Epoch 36/100
1198/1198 [==============================] - 6s 5ms/step - loss: 4.7502e-04
Epoch 37/100
1198/1198 [==============================] - 6s 5ms/step - loss: 4.4644e-04
Epoch 38/100
1198/1198 [==============================] - 6s 5ms/step - loss: 4.2442e-04
Epoch 39/100
1198/1198 [==============================] - 6s 5ms/step - loss: 4.0509e-04
Epoch 40/100
1198/1198 [==============================] - 6s 5ms/step - loss: 3.2003e-04
Epoch 41/100
1198/1198 [==============================] - 6s 5ms/step - loss: 3.3146e-04
Epoch 42/100
1198/1198 [==============================] - 6s 5ms/step - loss: 4.8925e-04
Epoch 43/100
1198/1198 [==============================] - 6s 5ms/step - loss: 3.7875e-04
Epoch 44/100
1198/1198 [==============================] - 6s 5ms/step - loss: 2.9879e-04
Epoch 45/100
1198/1198 [==============================] - 6s 5ms/step - loss: 2.8192e-04
Epoch 46/100
1198/1198 [==============================] - 7s 6ms/step - loss: 2.8591e-04
Epoch 47/100
1198/1198 [==============================] - 7s 6ms/step - loss: 3.1839e-04
Epoch 48/100
1198/1198 [==============================] - 7s 6ms/step - loss: 3.1701e-04
Epoch 49/100
1198/1198 [==============================] - 6s 5ms/step - loss: 2.8205e-04
Epoch 50/100
1198/1198 [==============================] - 6s 5ms/step - loss: 2.9261e-04
Epoch 51/100
1198/1198 [==============================] - 6s 5ms/step - loss: 2.5114e-04A: 1s - loss:
Epoch 52/100
1198/1198 [==============================] - 6s 5ms/step - loss: 2.5783e-04
Epoch 53/100
1198/1198 [==============================] - 6s 5ms/step - loss: 2.9359e-04
Epoch 54/100
1198/1198 [==============================] - 6s 5ms/step - loss: 2.8247e-04
Epoch 55/100
1198/1198 [==============================] - 6s 5ms/step - loss: 3.3204e-04
Epoch 56/100
1198/1198 [==============================] - 7s 6ms/step - loss: 3.0866e-04
Epoch 57/100
1198/1198 [==============================] - 6s 5ms/step - loss: 2.7694e-04
Epoch 58/100
1198/1198 [==============================] - 6s 5ms/step - loss: 2.4494e-04
Epoch 59/100
1198/1198 [==============================] - 6s 5ms/step - loss: 2.7662e-04
Epoch 60/100
1198/1198 [==============================] - 6s 5ms/step - loss: 3.0402e-04
Epoch 61/100
1198/1198 [==============================] - 6s 5ms/step - loss: 2.7694e-04
Epoch 62/100
1198/1198 [==============================] - 6s 5ms/step - loss: 2.9211e-04
Epoch 63/100
1198/1198 [==============================] - 6s 5ms/step - loss: 2.7332e-04
Epoch 64/100
1198/1198 [==============================] - 6s 5ms/step - loss: 2.6671e-04
Epoch 65/100
1198/1198 [==============================] - 6s 5ms/step - loss: 2.6654e-04
Epoch 66/100
1198/1198 [==============================] - 7s 5ms/step - loss: 2.3447e-04
Epoch 67/100
1198/1198 [==============================] - 7s 6ms/step - loss: 2.7044e-04
Epoch 68/100
1198/1198 [==============================] - 8s 7ms/step - loss: 3.1124e-04
Epoch 69/100
1198/1198 [==============================] - 9s 7ms/step - loss: 3.5881e-04
Epoch 70/100
1198/1198 [==============================] - 7s 6ms/step - loss: 2.6652e-04A: 1s - loss: 2.7
Epoch 71/100
1198/1198 [==============================] - 6s 5ms/step - loss: 2.3381e-04
Epoch 72/100
1198/1198 [==============================] - 6s 5ms/step - loss: 2.4420e-04
Epoch 73/100
1198/1198 [==============================] - 8s 7ms/step - loss: 2.9664e-04
Epoch 74/100
1198/1198 [==============================] - 7s 6ms/step - loss: 2.5152e-04
Epoch 75/100
1198/1198 [==============================] - 6s 5ms/step - loss: 2.8806e-04
Epoch 76/100
1198/1198 [==============================] - 7s 5ms/step - loss: 3.8401e-04
Epoch 77/100
1198/1198 [==============================] - 6s 5ms/step - loss: 4.1338e-04
Epoch 78/100
1198/1198 [==============================] - 6s 5ms/step - loss: 3.1046e-04
Epoch 79/100
1198/1198 [==============================] - 6s 5ms/step - loss: 2.4760e-04A: 3s
Epoch 80/100
1198/1198 [==============================] - 7s 6ms/step - loss: 2.4440e-04
Epoch 81/100
1198/1198 [==============================] - 7s 6ms/step - loss: 2.4429e-04
Epoch 82/100
1198/1198 [==============================] - 6s 5ms/step - loss: 2.8221e-04
Epoch 83/100
1198/1198 [==============================] - 7s 5ms/step - loss: 2.7073e-04A:
Epoch 84/100
1198/1198 [==============================] - 6s 5ms/step - loss: 3.2201e-04
Epoch 85/100
1198/1198 [==============================] - 6s 5ms/step - loss: 2.9263e-04
Epoch 86/100
1198/1198 [==============================] - 6s 5ms/step - loss: 2.4424e-04
Epoch 87/100
1198/1198 [==============================] - 6s 5ms/step - loss: 2.1479e-04
Epoch 88/100
1198/1198 [==============================] - 6s 5ms/step - loss: 2.3687e-04
Epoch 89/100
###Markdown
Prepare the Test Data , Concatenate Test & Train Datasets
###Code
dataset_testing = pd.read_csv("AAPL_test.csv")
actual_stock_price = dataset_testing.iloc[:, 1:2].values
actual_stock_price
total_data = pd.concat((dataset_training['Open'], dataset_testing['Open']), axis = 0)
inputs = total_data[len(total_data) - len(dataset_testing) - 60:].values
inputs = inputs.reshape(-1,1)
inputs = sc.transform(inputs)
X_test = []
for i in range(60, 81):
X_test.append(inputs[i-60:i, 0])
X_test = np.array(X_test)
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
predicted_stock_price = model.predict(X_test)
predicted_stock_price = sc.inverse_transform(predicted_stock_price)
###Output
_____no_output_____
###Markdown
Visualize the Results
###Code
# Visualising the results
plt.plot(actual_stock_price, color = 'green', label = 'Real Apple Stock Price',ls='--')
plt.plot(predicted_stock_price, color = 'red', label = 'Predicted Apple Stock Price',ls='-')
plt.title('Predicted Stock Price')
plt.xlabel('Time in days')
plt.ylabel('Real Stock Price')
plt.legend()
plt.show()
###Output
_____no_output_____ |
wk2_dataframes/py_wk2_Dataframes.ipynb | ###Markdown
Welcome to Week 2: Dataframes! In weeks 2 and 3, we're going to focus on two things, which are essentially the basics of all downstream bioinformatics that you'll do.First, learning to work with dataframes: we're going use the package pandas, which is one of the most commonly used packages for datascience. https://pandas.pydata.org/docs/getting_started/10min.htmlmin has a brief introduction, if you are curious. The core idea of dataframes - the primary datatype associated with pandas - is that you have a two-dimensional matrix of data (i.e., rows and columns, like an Excel spreadsheet), and can associate a *label* with each row and column. For example, with scRNA data, you have a 2D matrix of gene expression counts, where each row is a gene and each column is a cell. If you wanted to look up the expression for a particular gene in a particular cell, rather than have to know the particular XY "coordinates" of that datapoint (i.e., gene row 1827 and cell column 2937), you can just pass in the names of the gene and cell. If you wanted to sort the dataframe by the expression of a particular gene, you'd want to make sure that the pairings of gene names, cell names, and datapoints stay correct through this sorting process, and pandas dataframes help take care of this to keep everything organized and correct. Don't worry if this doesn't make too much sense now - it'll make more sense when we start playing with actual examples.In addition to pandas, we're going to use the package numpy, which is the core "math" package ("scientific computing", as they describe it - https://docs.scipy.org/doc/numpy/user/quickstart.html and https://docs.scipy.org/doc/numpy/user/basics.html). Oftentimes when working with large datasets, you want to perform a simple operation (for example, log transform or depth normalize) on many pieces of data. Numpy implements a lot of tricks under the hood to perform vectorized math operations very efficiently - doing the same operation to many pieces of data. Numpy is built around *arrays*, which are a 1D datatype: essentially a list, but with a lot of added tricks. Say you have a bunch of datapoints - gene counts, for example - and want to multiply each one by 2. Using a list, you would need to do this one-by-one for each list: iterate through the entire list with a for loop (or list comprehension) and multiply each value by two. However, using a numpy array, you can simply multiply the entire array by 2, and numpy will return the element-wise product of the array by 2 (multiplying each elementy by 2). Again, this will make a little more sense once you've played around with it a little.**I would recommend skimming through the introductions for pandas and numpy, since you'll want to become familiar with them both for this lesson and going forward. It's not as crucial that you memorize each function and every feature, but good to just have a sense of what is possible, so that you can remember that there should be a way to do something easily, then google for it later on and re-figure out how to do it.*** https://pandas.pydata.org/docs/getting_started/10min.htmlmin* https://docs.scipy.org/doc/numpy/user/quickstart.htmlThe second thing that we're going to focus on is plotting. **Matplotlib** is the core plotting package in Python. It is built around two concepts: the figure, which is the "overall" image - think about it like a piece of paper or figure panel - and axes, which are the specific XY axes where you plot things. The simplest example is a figure with one axis - say a simple scatter plot. This is what you'll do 90% of the time. Sometimes, though, you might want to group together multiple plots at the same time - say you have four scatter plots you want to make together. In this case, the figure might have four axes (a 2-by-2 grid of scatter plots). The important thing to remember, is that when you're plotting, you 1) create a figure, 2) create an axis, 3) plot things on that axis, [4) create & plot on any additional axes if applicable], and 5) save the figure (which contains the axis/axes you've plotting things on).Two useful matplotlib links with some tutorials and example plots:* https://matplotlib.org/tutorials/index.html* https://matplotlib.org/gallery/index.htmlThree other packages that we aren't going to use here, but you will also encounter down the road: scipy, which has a lot of more specialized functions for things like statistics (and many others - https://docs.scipy.org/doc/scipy/reference/, https://docs.scipy.org/doc/scipy/reference/tutorial/index.html), and **scikit-learn**, which is the core machine learning package (https://scikit-learn.org/stable/getting_started.html), and **seaborn**, which is another data visualization package (https://seaborn.pydata.org/introduction.html) built on matplotlib. Import Statements First, let's import the packages that we are going to use today: pandas, numpy, and matplotlib.We're going to abbreviate their names as follows: import pandas as pd import numpy as np import matplotlib as mpl Then, when we want to do things with numpy, for example, such as the log10() function, rather than say: numpy.log10(my_data), we can say np.log10(my_data). Note that if we wanted to just import numpy (and not rename it - so saying numpy.log10(my_data)), we would just say: import numpy We can also import a particular function from numpy, rather than everything: from numpy import log10 If we ran that, we would be importing just the log10() function from numpy, rather than the package as a whole. We would then access this function by saying log10(my_data), rather than np.log10(my_dat).You can also put these things together and say: from matplotlib import pyplot as plt Here, we're importing pyplot from the matplotlib package, and renaming it plt to save us some typing.
###Code
import pandas as pd
import numpy as np
import matplotlib as mpl
from matplotlib import pyplot as plt
###Output
_____no_output_____
###Markdown
1. Lists, loops, and arrays.First, we're going to do a quick overview of lists vs. arrays, and also list comprehensions. 1.1 Lists, loops, and list comprehensions.Here, I've created a list, where each element is a string. Let's say I want to convert each element to be an integer. There are two ways to do this.In the first way, we're creating a new empty list, iterating through each element of string_list, converting it to an integer, and adding it to our new empty list.In the second way, we're using a list comprehension to do this all in one step.
###Code
string_list = ['1','2','3','4','5','6','7','8','9','10']
# first way
int_list = []
for i in string_list:
int_list += [int(i)]
print(int_list)
# second way
int_list2 = [int(i) for i in string_list]
print(int_list2)
# checking that they are equal
print(int_list == int_list2)
###Output
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
True
###Markdown
List comprehensions are your friend - they can make it easier to do simple operations to an entire list. The basic syntax is: [function(variable) for variable in thing_to_iterate_over]https://treyhunner.com/2015/12/python-list-comprehensions-now-in-color/ has a good short tutorial that is worth reading through.You can also make a list comprehension include a conditional: [function(variable) for variable in thing_to_iterate_over if condition] I'm going to provide a few examples below of the same thing done either with a loop or list comprehension, and then ask you to convert a few loops to comprehensions and vice versa. 1.1 Examples
###Code
# make a list containing the integers from 0 to 10
# here, we are using the range() function, which will automatically start at 0
# and then iterate up to the number you provide
# with a loop
list_1 = []
for i in range(10):
list_1 += [i]
# list comprehension
list_2 = [i for i in range(10)]
print(list_1)
print(list_2)
# make a list containing the integers from 10 to 20, but as strings.
# if you provide two inputs to the range() function, it will start at the first one, and end at the second one
# with a loop
list_1 = []
for i in range(10, 20):
list_1 += [str(i)]
# list comprehension
list_2 = [str(i) for i in range(10, 20)]
print(list_1)
print(list_2)
# make a list of the first ten integers squared
# note that you can say either i*i or i**2 to square a number
# to cube it, you could say i*i*i or i**3, and so on
# with a loop
list_1 = []
for i in range(10):
list_1 += [i * i]
# list comprehension
list_2 = [i*i for i in range(10)]
print(list_1)
print(list_2)
# iterate through input_list
# if the integer is less than or equal to 10, then square it
# otherwise, don't include it
input_list = [10, 4, 28, 3, 1, 930, 3928, 6, 2, 8, 2038]
# with a loop
list_1 = []
for i in input_list:
if i <= 10:
list_1 += [i**2]
# list comprehension
list_2 = [i*i for i in input_list if i <= 10]
print(list_1)
print(list_2)
###Output
[100, 16, 9, 1, 36, 4, 64]
[100, 16, 9, 1, 36, 4, 64]
###Markdown
The following two examples are examples where you can write things with a list comprehension - but it starts to get a little hard to follow, and might just be better off writing with a normal list, because the list comprehension starts to become a little unreadable.
###Code
# iterate through input list
# if it is less than or equal to 10, return the integer squared
# otherwise, return the integer raised to the fourth power
# note that when you have an if...else that the location gets moved around
input_list = [10, 4, 28, 3, 1, 930, 3928, 6, 2, 8, 2038]
# with a loop
list_1 = []
for i in input_list:
if i <= 10:
list_1 += [i**2]
else:
list_1 += [i**4]
# list comprehension
list_2 = [i**2 if i <= 10 else i ** 4 for i in input_list]
print(list_1)
print(list_2)
# iterate through integers from 0 to 10
# if it is less than or equal to 5, return 'black'
# otherwise, if it is less than 8, return 'red'
# otherwise, return 'blue'
# note that when you have an if...else that the location gets moved around
# with a loop
list_1 = []
for i in range(10):
if i <= 5:
list_1 += ['black']
elif i < 8:
list_1 += ['red']
else:
list_1 += ['blue']
# list comprehension
list_2 = ['black' if i <= 5 else 'red' if i < 8 else 'blue' for i in range(10)]
print(list_1)
print(list_2)
###Output
['black', 'black', 'black', 'black', 'black', 'black', 'red', 'red', 'blue', 'blue']
['black', 'black', 'black', 'black', 'black', 'black', 'red', 'red', 'blue', 'blue']
###Markdown
1.2 ProblemsConvert the loop to a list comprehension, and the list comprehensions to loops. Check that the results are equal.
###Code
list_1 = []
for i in range(20):
list_1 += [4 * i - 2]
print(list_1)
# write answer below
input_list = ['black','black','orange','black','red','black','red','black','red','red','black','green','blue','purple']
list_1 = []
for i in input_list:
if i == 'black':
list_1 += [1]
else:
list_1 += [5]
print(list_1)
# write answer below
list_1 = [str(i / 2) for i in range(15)]
print(list_1)
# write answer below
input_list = [1,4,8,2,40,2038,233,23,1,5,3,882]
list_1 = [i for i in input_list if i % 2 == 0]
print(list_1)
# write your answer below
###Output
[4, 8, 2, 40, 2038, 882]
###Markdown
1.2 Numpy arraysTo create an array from a list, you say: new_array = np.array(old_list) We're going to try doing the same things to list and arrays to see what happens in each case.**Before running the cells below, try to guess that the output will be in each case (for the list versus array), and pay attention to the differences between how lists and arrays behave.**
###Code
test_list = [i for i in range(10)]
test_array = np.array(test_list)
print(test_list)
print(test_array)
print()
# what happens if we multiply by two?
print(test_list * 2)
print(test_array * 2)
# what happens if we try to add one to each one?
# note that this will only work for the arrays: it will throw an error for the list
print(test_list + 1)
print(test_array + 1)
# what happens if we try to add two lists or two arrays together?
print(test_list + test_list)
print(test_array + test_array)
###Output
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
[ 0 2 4 6 8 10 12 14 16 18]
###Markdown
**This is all that we are going to go over for now - the main takeaway here is that when you're dealing with arrays, you're performing the same operation on all elements of the array.** 2. Importing a genome annotation file in pandas Here, we're going to look at a file that I've downloaded from the ENSEMBL website that contains annotation information for various genes in the genome. This file was originally downloaded with transcript-based annotations, which I convereted to be gene-based. When you're doing RNA-seq analysis, you can either perform analyses at the transcript level (meaning considering different isoforms of the same gene differently) or at the gene level (aggregating different isoforms of the same gene); we're going to focus on gene level analysis for now.First, we need to import the annotation file. I typically like to define paths and file names at the start, just to keep things organized.1. Create a variable called 'path' which contains the directory listing to wherever you downloaded the files.2. Create a variable called 'fn_anno' which is the name of the file.As a reminder, both of these should be strings, and the variable 'path' should end with a '/'.
###Code
# you will need to change this based on where you saved the files on your comptuer, as you did last week
# path = '/path/to/the/directory/containing/the/file/'
# fn = 'name_of_the_file.extension'
###Output
_____no_output_____
###Markdown
**Using pd.read_csv(), import the txt file (comma delimted) containing the annotations into a dataframe called 'anno', and set the index to be the 'gene' column. Use .head() to show the first 5 rows of the resulting dataframe.**See https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.htmland https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.set_index.htmland https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.head.htmlfor reference. These are part of the pandas documentation. I've provided these here just so you can get started, but in the future, I'll provide some hints/direction as to how to go about something, but it will be up to you to look up how to actually use the functions in the pandas (or other) documentation.
###Code
# you'll need to change these
path = '/Users/kevin/changlab/github/Bioinformatics-Tutorials/wk2_dataframes/data/'
fn = '/Homo_sapiens.GRCh38.gene_annotations.txt.gz'
# example code to do this to get you started
anno = pd.read_csv(path + fn, sep=',')
anno = anno.set_index('gene')
anno.head()
###Output
_____no_output_____
###Markdown
**Print the information for the gene** *'ENSG00000181449.3'* **. You should familiarize yourself with the .loc and .iloc commands.** **Save the information in the** *'start'* **column of the anno dataframe in a new variable, called** *start_column* **. Print start_column.** One of the most important things about working with genomics data is double checking that the files you are working with have the data you expect them to have.For instance: what values are present in the 'chr' column of our annotation dataframe? How many chromosome values are in this column?What chromosomes would you expect to be there? Are there any other chromosomes present, and if so, what are they?As a hint, you're looking for unique values in that column of the dataframe (and then also the length of the result). Note that some of the values in the chromosome column are numbers (e.g., 1, 2, etc.) and others are strings (e.g., 'X', 'Y'). When Python imported the dataframe (pd.read_csv()), did it import the numerical chromosomes as integers or strings? Let's say that we want to subset this annotation to get a list of only those genes that are on the 'normal' chromosomes: autosomes, sex chromosomes, in the mitochondrial genome.Make a list that looks like this:[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 'X', 'Y', 'MT']Do it without explicitly writing out the numbers 1 to 22. Feel free to use either lists (built-in to Python) or numpy arrays (np.array()). Be sure to save the numerical chromosomes as the correct data type (integer or string) to match the data type of the values in anno['chr']. Subset the anno dataframe to include only those genes whose chromosome annotations are in this list of chromosomes. Save this as a new dataframe called anno_filt.How big is this new annotation/how many things did we filter out? Print the first five columns of the dataframe with .head().You should use the .isin() function, and can also use .shape to get the size of a dataframe.(You should end up with 57106 rows remaining in anno_filt)
###Code
# print(anno.shape) to get the size of the original dataframe
print(anno.shape)
###Output
(62803, 8)
###Markdown
How many genes are on each chromosome? There's a few ways you could do this, but one is to use a Counter.1. from the package 'collections' import Counter. https://docs.python.org/3.6/library/collections.htmlcollections.Counter2. create a new variable called chr_count that is a Counter, and pass in the chr column of your dataframe to your counter to get the counts of how many times each chromosome is found3. print the results Part 3: ENCODE data**Import the file 'all_ENCODE_metadata.tsv.gz' into a dataframe called encode. Set the index column to be the file accession number, and print the first rows with .head(). Note that this file is tab separated rather than comma separated. You'll need to include the argument sep='\t' rather than sep=',' as you did before ('\t' denotes a tab).** How big is this dataframe? What type of information is present in the rows? Columns? Create a new dataframe called encode_filt that includes only samples that: - are from human (homo sapiens) - do not have audit errors. Specifically, only include rows where encode['Audit ERROR'].isnull() is True. For the first criteria, you may need to look at what columns are present in the dataframe to choose the appropriate ones to filter on. Your dataframe should have 223543 rows.
###Code
# I'm providing the answer here, so that you can see how to do this
# first, we're creating a variable m1 which asks "is the value equal to 'Homo sapeins'
# for each value in the 'Biosample organism' column"
m1 = encode['Biosample organism'] == 'Homo sapiens'
# second, we're creating a variable m2 which asks "is there no value, i.e., no error included'
# for each value in the 'Audit ERROR' column"
m2 = encode['Audit ERROR'].isnull()
# now, we're asking if for each element (note that these correspond to rows of the dataframe)
# are both criteria two?
mask = m1 & m2
# now, we're actually filtering the dataframe
encode_filt = encode.loc[mask]
print(encode_filt.shape)
###Output
(223543, 49)
###Markdown
Breaking briefly from the ENCODE data, to try to illustrate what is going on here:Here, we've created three arrays, with three values each. We're performing an "and" operation - meaning that if everything is True, it will return True; otherwise, it will return False.
###Code
# example on how to merge multiple masks
a = np.array([True, True, False])
b = np.array([False, True, False])
c = np.array([True, True, True])
d = a & b & c
print(d)
# note that you can't do this with lists
# (try it yourself and see what happens)
# arrays make our lives easier
###Output
[False True False]
###Markdown
What types of RNA-seq data are available? Create a dataframe called rna that only has rows that satisfy all of the following criteria: - They come from RNA-seq experiments. - Their libraries are made from RNA - They are depleted in rRNA - They are fastq files You will need to look at both the column listings, as well as the unique values in these columns, to be able to know what values to filter on. You will want to look at four columns, create a boolean mask for each of them (a array/series containing either True or False for each value), and then make a final mask that contains only values where all four sub-masks were True.Your final 'rna' dataframe should have 1017 rows. Get a list of the unique biosample term names in the rna dataframe. In other words, a list of biosample term names for which there exists RNA-seq data that satisfied our above criteria. What types of ChIP-seq data are available? Create a dataframe called chip that only has rows that satisfy all of the following criteria: - They come from ChIP-seq experiments - The ChIP-seq target is H3K27ac-human - The file format is bed narrowPeak - The output type is replicated peaks - The bed files were aligned to the GRCh38 assembly. Your final dataframe should have 80 rows. Get a list of the unique biosample term names in the chip dataframe. Now, get a list of the biosample term names which are shared between the two lists. In other words, find the intersection of biosample term names with RNA and ChIP data satisfying our various criteria. How many samples are there in this list?I've provided one way to do this below using list comprehensions - there are many other ways to do this, such as converting the lists to sets, and then finding the intersection of those sets.
###Code
# example
list_1 = ['a','b','c','d','e','f','g']
list_2 = ['d','e','f','g','h','i','j']
list_3 = [i for i in list_1 if i in list_2]
print(list_3)
###Output
['d', 'e', 'f', 'g']
|
prove/clean_code.ipynb | ###Markdown
Train
###Code
X_train, X_test, y_train, y_test = train_test_split(corr_df_train.drop(columns='y'), corr_df_train['y'], test_size=0.25, random_state=42)
X_train.shape
sm = RandomOverSampler(random_state=42)
y_train_pos = np.digitize(y_train, y_train.sort_values().unique())
X_res, y_res = sm.fit_resample(X_train, y_train_pos)
X_train, y_train = X_res, y_train.sort_values().unique()[y_res-1]
X_train.shape
###Output
_____no_output_____
###Markdown
SVR
###Code
#params = [{'kernel' : ['poly'],
# 'C' : [1,5,10,12,15,20,25],
# 'degree' : [2,3,4],
# 'coef0' : [0.02,0.5],
# 'gamma' : ['auto','scale'],
# 'epsilon':[0.02]}]
params = [{'kernel' : ['rbf'],
'C' : [100,150,200],
'gamma' : ['auto','scale'],
'epsilon':[0.02]}]
svr_reg=SVR()
grids = GridSearchCV(svr_reg,params,cv=5,verbose=5,n_jobs=-1)
grids.fit(X_train,y_train)
grids.best_params_
y_pred=grids.predict(X_test)
mean_squared_error(y_test, y_pred,squared=False)
#retrain on all dataset
#svr_reg=SVR(C= 10, degree= 2, kernel= 'poly')
#svr_reg=SVR(C= 10, degree= 3, kernel= 'poly',gamma='scale',coef0=0.5
#svr_reg=SVR(C= 15, degree= 3, kernel= 'poly',gamma='scale',coef0=0.5,epsilon=0.02)
svr_reg=SVR(**grids.best_params_)
svr_reg.fit(corr_df_train.drop(columns='y'),corr_df_train['y'])
from sklearn.neural_network import MLPRegressor
regr = MLPRegressor(random_state=1, max_iter=1000,hidden_layer_sizes=(100,20,30,30,30,20,100),alpha=0.01)
regr.fit(X_train, y_train)
y_pred_nn=regr.predict(X_test)
mean_squared_error(y_test, y_pred_nn,squared=False)
regr.fit(corr_df_train.drop(columns='y'),corr_df_train['y'])
###Output
_____no_output_____
###Markdown
Linear models
###Code
from sklearn import linear_model
from sklearn.linear_model import Ridge,Lasso,MultiTaskLasso,Lars,BayesianRidge,RANSACRegressor,TheilSenRegressor,HuberRegressor
clf = TheilSenRegressor()
clf.fit(X_train, y_train)
y_pred_2=clf.predict(X_test)
mean_squared_error(y_test, y_pred_2,squared=False)
clf.fit(corr_df_train.drop(columns='y'),corr_df_train['y'])
###Output
_____no_output_____
###Markdown
Test
###Code
test_df=pd.read_csv('./Data/test.csv')
corr_test=test_df.progress_apply(lambda row:get_corr(row,usable_roi,test=True),axis=1)
test_final_df=pd.DataFrame(np.array(corr_test.values.tolist())).fillna(0)
#y_pred_test_svr=svr_reg.predict(test_final_df)
#y_pred_test_ten=clf.predict(test_final_df)
y_pred_test_nn=regr.predict(test_final_df)
plt.hist(y_pred_test_nn)
plt.hist(y_pred_test_ten)
y_pred_avg=0.6*y_pred_test_svr+0.4*y_pred_test_ten
y_pred_test_nn
df_leaderboard=pd.DataFrame({'id':test_df['id'],'target':y_pred_avg})
df_leaderboard.to_csv('G14_26_avg_models',index=False)
plt.hist(y_test, bins=20)
#plt.hist(y_pred, bins=20)
plt.hist(y_pred_nn, bins=20);
plt.scatter(y_test, y_pred_nn)
#plt.scatter(y_test, y_pred_2)
plt.scatter(y_test, (y_test-y_pred_nn))
#plt.scatter(y_test, (y_test-y_pred_2))
#plt.scatter(y_test, (y_test-(0.6*y_pred+0.4*y_pred_2)))
for i in np.arange(0,1,0.01):
print(i,mean_squared_error(y_test, i*y_pred+(1-i)*y_pred_2,squared=False))
ROI=np.array([f'ROI{i}' for i in range(1,117)])[avg>=np.median(avg)]
ROI
columns=[ROI[i]+'-'+ROI[j] for i in range(0,len(ROI)-1) for j in range(i+1,len(ROI))]
(corr_df_train['y'] > 100) & (corr_df_train['y'] < 120)
(y_train > 100) & (y_train < 120)
###Output
_____no_output_____ |
code/analysis/physical_footprint_agriculture.ipynb | ###Markdown
Data Analysis for 'The Physical Footprint of Agriculture'
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pywaffle import Waffle
import anthro.viz
import anthro.stats
colors = anthro.viz.plotting_style()
###Output
_____no_output_____
###Markdown
In this notebook, we explore the data associated with the vignette "The Physical Footprint of Agriculture". This notebook relis on a body of data from the FAOSTAT database on global agricultural yields and from the Alltech Global Feed Survey (GFS) which quantifies the mass of livestock feed produced annually. This notebook will change as more data is added and/or removed. Exploring the Species Breakdown of Livestock and Livestock Products
###Code
# Load the FAOSTAT data
livestock_pop = pd.read_csv('../../data/agriculture/FAOSTAT_livestock_population/processed/FAOSTAT_livestock_population.csv')
livestock_prod = pd.read_csv('../../data/agriculture/FAOSTAT_livestock_product_produced/processed/FAOSTAT_livestock_and_product.csv')
feed_prod = pd.read_csv('../../data/agriculture/alltech_global_feed_survey/processed/alltech_feed_species_breakdown.csv')
livestock_pop.loc[livestock_pop['animal'] == 'chicken', 'name'] = 'Chicken (poultry + egg)'
livestock_pop.loc[livestock_pop['animal'] == 'chicken', 'name'] = 'Chicken (poultry + egg)'
avg_pop = livestock_pop[(livestock_pop['year'] >= 2010)].groupby(['animal'])['population_Mhd'].agg(('mean', 'std')).reset_index()
avg_pop_dict = {g:d['mean'].values[0] for g, d in avg_pop.groupby(['animal'])}
j
livestock_pop
avg_pop_dict
###Output
_____no_output_____ |
notebooks/Dstripes/Basic/convolutional/VAE/DstripesVAE_Convolutional_reconst_1ellwlb_01ssim.ipynb | ###Markdown
Settings
###Code
%env TF_KERAS = 1
import os
sep_local = os.path.sep
import sys
sys.path.append('..'+sep_local+'..')
print(sep_local)
os.chdir('..'+sep_local+'..'+sep_local+'..'+sep_local+'..'+sep_local+'..')
print(os.getcwd())
import tensorflow as tf
print(tf.__version__)
###Output
_____no_output_____
###Markdown
Dataset loading
###Code
dataset_name='Dstripes'
import tensorflow as tf
train_ds = tf.data.Dataset.from_generator(
lambda: training_generator,
output_types=tf.float32 ,
output_shapes=tf.TensorShape((batch_size, ) + image_size)
)
test_ds = tf.data.Dataset.from_generator(
lambda: testing_generator,
output_types=tf.float32 ,
output_shapes=tf.TensorShape((batch_size, ) + image_size)
)
_instance_scale=1.0
for data in train_ds:
_instance_scale = float(data[0].numpy().max())
break
_instance_scale
import numpy as np
from collections.abc import Iterable
if isinstance(inputs_shape, Iterable):
_outputs_shape = np.prod(inputs_shape)
_outputs_shape
###Output
_____no_output_____
###Markdown
Model's Layers definition
###Code
units=20
c=50
menc_lays = [
tf.keras.layers.Conv2D(filters=units//2, kernel_size=3, strides=(2, 2), activation='relu'),
tf.keras.layers.Conv2D(filters=units*9//2, kernel_size=3, strides=(2, 2), activation='relu'),
tf.keras.layers.Flatten(),
# No activation
tf.keras.layers.Dense(latents_dim)
]
venc_lays = [
tf.keras.layers.Conv2D(filters=units//2, kernel_size=3, strides=(2, 2), activation='relu'),
tf.keras.layers.Conv2D(filters=units*9//2, kernel_size=3, strides=(2, 2), activation='relu'),
tf.keras.layers.Flatten(),
# No activation
tf.keras.layers.Dense(latents_dim)
]
dec_lays = [
tf.keras.layers.Dense(units=units*c*c, activation=tf.nn.relu),
tf.keras.layers.Reshape(target_shape=(c , c, units)),
tf.keras.layers.Conv2DTranspose(filters=units, kernel_size=3, strides=(2, 2), padding="SAME", activation='relu'),
tf.keras.layers.Conv2DTranspose(filters=units*3, kernel_size=3, strides=(2, 2), padding="SAME", activation='relu'),
# No activation
tf.keras.layers.Conv2DTranspose(filters=3, kernel_size=3, strides=(1, 1), padding="SAME")
]
###Output
_____no_output_____
###Markdown
Model definition
###Code
model_name = dataset_name+'VAE_Convolutional_reconst_1ell_01ssmi'
experiments_dir='experiments'+sep_local+model_name
from training.autoencoding_basic.autoencoders.VAE import VAE as AE
inputs_shape=image_size
variables_params = \
[
{
'name': 'inference_mean',
'inputs_shape':inputs_shape,
'outputs_shape':latents_dim,
'layers': menc_lays
}
,
{
'name': 'inference_logvariance',
'inputs_shape':inputs_shape,
'outputs_shape':latents_dim,
'layers': venc_lays
}
,
{
'name': 'generative',
'inputs_shape':latents_dim,
'outputs_shape':inputs_shape,
'layers':dec_lays
}
]
from utils.data_and_files.file_utils import create_if_not_exist
_restore = os.path.join(experiments_dir, 'var_save_dir')
create_if_not_exist(_restore)
_restore
#to restore trained model, set filepath=_restore
ae = AE(
name=model_name,
latents_dim=latents_dim,
batch_size=batch_size,
variables_params=variables_params,
filepath=None
)
from evaluation.quantitive_metrics.structural_similarity import prepare_ssim_multiscale
from statistical.losses_utilities import similarity_to_distance
from statistical.ae_losses import expected_loglikelihood_with_lower_bound as ellwlb
ae.compile(loss={'x_logits': lambda x_true, x_logits: ellwlb(x_true, x_logits)+ 0.1*similarity_to_distance(prepare_ssim_multiscale([ae.batch_size]+ae.get_inputs_shape()))(x_true, x_logits)})
###Output
_____no_output_____
###Markdown
Callbacks
###Code
from training.callbacks.sample_generation import SampleGeneration
from training.callbacks.save_model import ModelSaver
es = tf.keras.callbacks.EarlyStopping(
monitor='loss',
min_delta=1e-12,
patience=12,
verbose=1,
restore_best_weights=False
)
ms = ModelSaver(filepath=_restore)
csv_dir = os.path.join(experiments_dir, 'csv_dir')
create_if_not_exist(csv_dir)
csv_dir = os.path.join(csv_dir, ae.name+'.csv')
csv_log = tf.keras.callbacks.CSVLogger(csv_dir, append=True)
csv_dir
image_gen_dir = os.path.join(experiments_dir, 'image_gen_dir')
create_if_not_exist(image_gen_dir)
sg = SampleGeneration(latents_shape=latents_dim, filepath=image_gen_dir, gen_freq=5, save_img=True, gray_plot=False)
###Output
_____no_output_____
###Markdown
Model Training
###Code
ae.fit(
x=train_ds,
input_kw=None,
steps_per_epoch=int(1e4),
epochs=int(1e6),
verbose=2,
callbacks=[ es, ms, csv_log, sg],
workers=-1,
use_multiprocessing=True,
validation_data=test_ds,
validation_steps=int(1e4)
)
###Output
_____no_output_____
###Markdown
Model Evaluation inception_score
###Code
from evaluation.generativity_metrics.inception_metrics import inception_score
is_mean, is_sigma = inception_score(ae, tolerance_threshold=1e-6, max_iteration=200)
print(f'inception_score mean: {is_mean}, sigma: {is_sigma}')
###Output
_____no_output_____
###Markdown
Frechet_inception_distance
###Code
from evaluation.generativity_metrics.inception_metrics import frechet_inception_distance
fis_score = frechet_inception_distance(ae, training_generator, tolerance_threshold=1e-6, max_iteration=10, batch_size=32)
print(f'frechet inception distance: {fis_score}')
###Output
_____no_output_____
###Markdown
perceptual_path_length_score
###Code
from evaluation.generativity_metrics.perceptual_path_length import perceptual_path_length_score
ppl_mean_score = perceptual_path_length_score(ae, training_generator, tolerance_threshold=1e-6, max_iteration=200, batch_size=32)
print(f'perceptual path length score: {ppl_mean_score}')
###Output
_____no_output_____
###Markdown
precision score
###Code
from evaluation.generativity_metrics.precision_recall import precision_score
_precision_score = precision_score(ae, training_generator, tolerance_threshold=1e-6, max_iteration=200)
print(f'precision score: {_precision_score}')
###Output
_____no_output_____
###Markdown
recall score
###Code
from evaluation.generativity_metrics.precision_recall import recall_score
_recall_score = recall_score(ae, training_generator, tolerance_threshold=1e-6, max_iteration=200)
print(f'recall score: {_recall_score}')
###Output
_____no_output_____
###Markdown
Image Generation image reconstruction Training dataset
###Code
%load_ext autoreload
%autoreload 2
from training.generators.image_generation_testing import reconstruct_from_a_batch
from utils.data_and_files.file_utils import create_if_not_exist
save_dir = os.path.join(experiments_dir, 'reconstruct_training_images_like_a_batch_dir')
create_if_not_exist(save_dir)
reconstruct_from_a_batch(ae, training_generator, save_dir)
from utils.data_and_files.file_utils import create_if_not_exist
save_dir = os.path.join(experiments_dir, 'reconstruct_testing_images_like_a_batch_dir')
create_if_not_exist(save_dir)
reconstruct_from_a_batch(ae, testing_generator, save_dir)
###Output
_____no_output_____
###Markdown
with Randomness
###Code
from training.generators.image_generation_testing import generate_images_like_a_batch
from utils.data_and_files.file_utils import create_if_not_exist
save_dir = os.path.join(experiments_dir, 'generate_training_images_like_a_batch_dir')
create_if_not_exist(save_dir)
generate_images_like_a_batch(ae, training_generator, save_dir)
from utils.data_and_files.file_utils import create_if_not_exist
save_dir = os.path.join(experiments_dir, 'generate_testing_images_like_a_batch_dir')
create_if_not_exist(save_dir)
generate_images_like_a_batch(ae, testing_generator, save_dir)
###Output
_____no_output_____
###Markdown
Complete Randomness
###Code
from training.generators.image_generation_testing import generate_images_randomly
from utils.data_and_files.file_utils import create_if_not_exist
save_dir = os.path.join(experiments_dir, 'random_synthetic_dir')
create_if_not_exist(save_dir)
generate_images_randomly(ae, save_dir)
from training.generators.image_generation_testing import interpolate_a_batch
from utils.data_and_files.file_utils import create_if_not_exist
save_dir = os.path.join(experiments_dir, 'interpolate_dir')
create_if_not_exist(save_dir)
interpolate_a_batch(ae, testing_generator, save_dir)
###Output
100%|██████████| 15/15 [00:00<00:00, 19.90it/s]
|
BEAVRS/models/openmc/extract-assm.ipynb | ###Markdown
Pre-requisitesThis Notebook requires the following to be installed on one's machine:* **openmc*** **beavrs****NOTE**: You can install the `beavrs` Python module by running the following in the terminal:```$ python setup.py install --user```
###Code
import openmc
import beavrs.builder
import beavrs.constants as c
from IPython.display import Image
###Output
_____no_output_____
###Markdown
Create BEAVRS Model
###Code
# Instantiate a BEAVRS object from the mit-crpg/PWR_benchmarks repository
b = beavrs.builder.BEAVRS()
###Output
_____no_output_____
###Markdown
The BEAVRS model represented by variable `b` encapsulates the fully-detailed 3D BEAVRS core geometry and materials built using the OpenMC Python API. Create "geometry.xml" We wish to extract a fuel assembly from the BEAVRS `Geometry` object for our simulation. We will first extract a Python `list` of all `Lattice` objects in the `Geometry` using the `Geometry.get_all_lattices()` routine:
###Code
# Get all OpenMC Lattices in a Python list
all_latts = b.openmc_geometry.get_all_lattices()
###Output
_____no_output_____
###Markdown
There are a number of different fuel assembly `Lattices` in the BEAVRS model, each of which has been assigned a unique string name. let's loop over all `Lattices` and report the string name for each `Lattice`:
###Code
# Print the name of each Lattice
for id, latt in all_latts.items():
print(id, latt.name)
###Output
762 Core Lattice
336 Fuel 3.1% enr no instr no BAs
354 Fuel 3.1% enr no instr 6S
339 Fuel 3.1% enr instr no BAs
357 Fuel 3.1% enr instr 6S
420 Fuel 3.1% enr no instr RCCA SA
408 Fuel 3.1% enr no instr 16
159 Fuel 1.6% enr instr RCCA B
414 Fuel 3.1% enr no instr 20
171 Fuel 1.6% enr instr RCCA C
156 Fuel 1.6% enr no instr RCCA B
402 Fuel 3.1% enr no instr 15SE
270 Fuel 2.4% enr no instr 16
192 Fuel 1.6% enr no instr RCCA SD
174 Fuel 1.6% enr no instr RCCA SB
273 Fuel 2.4% enr instr 16
162 Fuel 1.6% enr no instr RCCA SC
396 Fuel 3.1% enr no instr 15SW
423 Fuel 3.1% enr instr RCCA SA
324 Fuel 2.4% enr no instr RCCA D
60 Fuel 1.6% enr no instr no BAs
240 Fuel 2.4% enr no instr 12
183 Fuel 1.6% enr instr RCCA SE
63 Fuel 1.6% enr instr no BAs
195 Fuel 1.6% enr instr RCCA SD
363 Fuel 3.1% enr instr 6E
153 Fuel 1.6% enr instr RCCA A
168 Fuel 1.6% enr no instr RCCA C
366 Fuel 3.1% enr no instr 6W
243 Fuel 2.4% enr instr 12
177 Fuel 1.6% enr instr RCCA SB
180 Fuel 1.6% enr no instr RCCA SE
150 Fuel 1.6% enr no instr RCCA A
186 Fuel 1.6% enr no instr RCCA D
417 Fuel 3.1% enr instr 20
360 Fuel 3.1% enr no instr 6E
327 Fuel 2.4% enr instr RCCA D
393 Fuel 3.1% enr instr 15NE
165 Fuel 1.6% enr instr RCCA SC
384 Fuel 3.1% enr no instr 15NW
348 Fuel 3.1% enr no instr 6N
351 Fuel 3.1% enr instr 6N
###Markdown
Now that we know the names of all of the `Lattices` in the BEAVRS `Geometry`, let's select out the one we wish to model.
###Code
# Find the 1.6% enriched fuel lattice w/o BAs
assembly_name = 'Fuel 1.6% enr instr no BAs'
for id, latt in all_latts.items():
if latt.name == assembly_name:
assembly = latt
###Output
_____no_output_____
###Markdown
We can inspect our chosen fuel assembly using OpenMC's built-in string representation for the object:
###Code
assembly
###Output
_____no_output_____
###Markdown
We need to construct a sub-geometry encapsulating only this fuel assembly rather than the entire BEAVRS core. We will do this by first creating a "root" cell and fill it with our fuel assembly:
###Code
# Create surface objects for our "root" cell"
lattice_sides = openmc.model.get_rectangular_prism(17*c.pinPitch, 17*c.pinPitch,
boundary_type='reflective')
min_z = openmc.ZPlane(z0=c.struct_LowestExtent, boundary_type='vacuum')
max_z = openmc.ZPlane(z0=c.struct_HighestExtent, boundary_type='vacuum')
# Create a "root" cell filled by the fuel assembly
root_cell = openmc.Cell(name='root cell',
fill=assembly,
region=lattice_sides & +min_z & -max_z
)
###Output
_____no_output_____
###Markdown
We then create a "root" universe and add our "root" cell to it:
###Code
# Create a "root" universe with ID=0 and add the "root" cell to it
root_univ = openmc.Universe(name='root universe', cells=[root_cell])
###Output
_____no_output_____
###Markdown
Lastly, the "root" universe must be attached to a new OpenMC `Geometry` object representing this new sub-geometry:
###Code
# Create an OpenMC Geometry around root Universe
sub_geometry = openmc.Geometry(root_univ)
###Output
_____no_output_____
###Markdown
Finally, we are ready to create a "geometry.xml" input file for OpenMC! We simply export it to XML as follows:
###Code
# Export the OpenMC Geometry to a "geometry.xml" file
sub_geometry.export_to_xml()
###Output
_____no_output_____
###Markdown
Create "materials.xml" Now we need to create materials for our geometry. This is very easy to do with the `b.write_openmc_materials()` routine. The one disadvantage of this is that it will write *all* materials for the entire BEAVRS geometry to a "materials.xml" file, most of which are not used in our sub-geometry for a single fuel assembly. Instead, we can write out only those materials that are in our geometry as follows:
###Code
# Get a list of all OpenMC Materials
all_materials = sub_geometry.get_all_materials()
# Create a MaterialsFile object
materials = openmc.Materials(all_materials.values())
# Export the OpenMC Materials to a "materials.xml" file
materials.export_to_xml()
###Output
_____no_output_____
###Markdown
Create "settings.xml" Now for the easy part :-) Let's create a "settings.xml" file:
###Code
# Create a MaterialsFile object
settings = openmc.Settings()
# Set any settings of interest
settings.batches = 150
settings.inactive = 10
settings.particles = 1000
# Use a bounding box to define the starting source distribution
lower_left = [-17*c.pinPitch/2, -17*c.pinPitch/2, c.fuel_ActiveFuel_bot]
upper_right = [+17*c.pinPitch/2, +17*c.pinPitch/2, c.fuel_ActiveFuel_top]
settings.source = openmc.source.Source(
openmc.stats.Box(lower_left, upper_right, only_fissionable=True))
# Export the settings to a "settings.xml" file
settings.export_to_xml()
###Output
_____no_output_____
###Markdown
Create "plots.xml"
###Code
# Create a single plot using default paramters (basis='xy', origin=(0,0,0))
plot = openmc.Plot(plot_id=1)
plot.width = [17*c.pinPitch, 17*c.pinPitch]
# Create a PlotsFile object and add our plot to it
plot_file = openmc.Plots([plot])
# Export the plots to a "plots.xml" file
plot_file.export_to_xml()
###Output
_____no_output_____
###Markdown
With the "plots.xml" file, we can now generate and view the plot. We must first instantiate an `openmc.Executor` object and then ask it to plot the geometry(equivalent to running `openmc -p` from within the terminal).
###Code
# Run openmc in plotting mode
openmc.plot_geometry(output=False)
###Output
_____no_output_____
###Markdown
OpenMC outputs plots in .ppm format, which can be converted into a compressed format like .png with the convert utility. We can view the .png image inline using the `IPython.display.Image` class as follows:
###Code
# Convert OpenMC's funky ppm to png
!convert plot_1.ppm plot.png
# Display the plot inline
Image(filename='plot.png', width=250, height=250)
###Output
_____no_output_____
###Markdown
Create "tallies.xml" I'm not exactly sure what you need in the way of tallies, but perhaps I can get you started with a fission tally in the instrument tube. First, let's inspect the names for all of the `Cells` in the geometry using the `all_material_cells` list:
###Code
all_material_cells = sub_geometry.get_all_material_cells()
for id, cell in all_material_cells.items():
print(cell.name)
###Output
Fuel rod - 1.6% enr axial 0: Borated Water
Fuel rod - 1.6% enr axial 1: SS SPN
Fuel rod - 1.6% enr axial 2: Zircaloy 4
Fuel rod - 1.6% enr axial 5: Zircaloy 4
Fuel rod - 1.6% enr axial 6: Borated Water
Fuel rod - 1.6% enr axial 7: SS SPN
Fuel rod - 1.6% enr axial top: Borated Water
Fuel rod active region - 1.6% enr radial 0: Fuel 1.6%
Fuel rod active region - 1.6% enr radial 1: Helium
Fuel rod active region - 1.6% enr radial outer: Zircaloy 4
Fuel rod plenum radial 0: Inconel 718
Fuel rod plenum radial 1: Helium
Fuel rod plenum radial outer: Zircaloy 4
Grids axial universe axial 0: Borated Water
Grids axial universe axial 1: Water SPN
Grids axial universe axial 2: Borated Water
Grids axial universe axial 4: Borated Water
Grids axial universe axial 6: Borated Water
Grids axial universe axial 8: Borated Water
Grids axial universe axial 10: Borated Water
Grids axial universe axial 12: Borated Water
Grids axial universe axial 14: Borated Water
Grids axial universe axial 16: Borated Water
Grids axial universe axial 18: Borated Water
Grids axial universe axial 19: Water SPN
Grids axial universe axial top: Borated Water
Top/Bottom grid pincell radial 0: Borated Water
Top/Bottom grid pincell radial outer: Inconel 718
Intermediate grid pincell radial 0: Borated Water
Intermediate grid pincell radial outer: Zircaloy 4
Empty Guide Tube axial 0: Borated Water
Empty Guide Tube axial 1: Water SPN
Empty Guide Tube axial 4: Water SPN
Empty Guide Tube axial top: Borated Water
Empty GT below the dashpot radial 0: Borated Water
Empty GT below the dashpot radial 1: Zircaloy 4
Empty GT below the dashpot radial outer: Borated Water
Empty GT above the dashpot radial 0: Borated Water
Empty GT above the dashpot radial outer: Zircaloy 4
Instrument tube axial stack axial 3: Water SPN
Instrument tube axial stack axial top: Borated Water
Instrument tube thimble radial 0: Air
Instrument tube thimble radial outer: Zircaloy 4
Instrument tube thimble support plane radial 0: Air
Instrument tube thimble support plane radial outer: Zircaloy 4
Empty Guide Tube in Center Position axial 0: Borated Water
Empty Guide Tube in Center Position axial 1: Water SPN
Empty Guide Tube in Center Position axial 3: Water SPN
Empty Guide Tube in Center Position axial top: Borated Water
###Markdown
Perhaps you'd like a tally in one of these cells. First, we need to extract the cell(s) of interest:
###Code
cell_name = 'Instrument tube thimble radial 0: Air'
for id, cell in all_material_cells.items():
if cell.name == cell_name:
my_cell = cell
###Output
_____no_output_____
###Markdown
Let's tally the fast/thermal scatter and absorption rates for all nuclides in our cell. We create a `Tally` to do so as follows:
###Code
# Instantiate a really cool tally
tally = openmc.Tally(name='a really cool tally')
# Instantiate a cell filter
cell_filter = openmc.CellFilter(bins=[my_cell.id])
# Instantiate energy filter
energy_filter = openmc.EnergyFilter([0., 0.625e-6, 20.])
tally.filters = [cell_filter, energy_filter]
# Add the scores of interest to the tally
tally.scores = ['scatter', 'absorption']
# Add all nuclides to the tally for kicks
tally.nuclides = my_cell.fill.get_nuclides()
###Output
_____no_output_____
###Markdown
Next, we simply need to add our `Tally` object(s) to a `TalliesFile` object and export them to a "tallies.xml" file:
###Code
# Instantiate an empty TalliesFile
tallies_file = openmc.Tallies()
# Add our tally(ies) to the file
tallies_file.append(tally)
# Export the tallies to a "tallies.xml" file
tallies_file.export_to_xml()
###Output
_____no_output_____
###Markdown
Run OpenMC! We can run OpenMC from within the notebook if we wish. Wow, isn't this so much more powerful than ASCII!
###Code
# Run OpenMC with 2 MPI processes
openmc.run(mpi_args=['mpiexec', '-n', '2'])
###Output
%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%
############### %%%%%%%%%%%%%%%%%%%%%%%%
################## %%%%%%%%%%%%%%%%%%%%%%%
################### %%%%%%%%%%%%%%%%%%%%%%%
#################### %%%%%%%%%%%%%%%%%%%%%%
##################### %%%%%%%%%%%%%%%%%%%%%
###################### %%%%%%%%%%%%%%%%%%%%
####################### %%%%%%%%%%%%%%%%%%
####################### %%%%%%%%%%%%%%%%%
###################### %%%%%%%%%%%%%%%%%
#################### %%%%%%%%%%%%%%%%%
################# %%%%%%%%%%%%%%%%%
############### %%%%%%%%%%%%%%%%
############ %%%%%%%%%%%%%%%
######## %%%%%%%%%%%%%%
%%%%%%%%%%%
| The OpenMC Monte Carlo Code
Copyright | 2011-2018 MIT and OpenMC contributors
License | http://openmc.readthedocs.io/en/latest/license.html
Version | 0.10.0
Git SHA1 | 0cf903cc67af4267a8216535fc2fa6a61f4341a1
Date/Time | 2018-08-28 17:16:34
MPI Processes | 2
OpenMP Threads | 20
Reading settings XML file...
Reading cross sections XML file...
Reading materials XML file...
Reading geometry XML file...
Building neighboring cells lists for each surface...
Reading O16 from /home/liangjg/nucdata/nndc_hdf5/O16.h5
Reading O17 from /home/liangjg/nucdata/nndc_hdf5/O17.h5
Reading N14 from /home/liangjg/nucdata/nndc_hdf5/N14.h5
Reading N15 from /home/liangjg/nucdata/nndc_hdf5/N15.h5
Reading Ar36 from /home/liangjg/nucdata/nndc_hdf5/Ar36.h5
WARNING: Negative value(s) found on probability table for nuclide Ar36
at 294K
Reading Ar38 from /home/liangjg/nucdata/nndc_hdf5/Ar38.h5
Reading Ar40 from /home/liangjg/nucdata/nndc_hdf5/Ar40.h5
Reading C0 from /home/liangjg/nucdata/nndc_hdf5/C0.h5
Reading He3 from /home/liangjg/nucdata/nndc_hdf5/He3.h5
Reading He4 from /home/liangjg/nucdata/nndc_hdf5/He4.h5
Reading Si28 from /home/liangjg/nucdata/nndc_hdf5/Si28.h5
Reading Si29 from /home/liangjg/nucdata/nndc_hdf5/Si29.h5
Reading Si30 from /home/liangjg/nucdata/nndc_hdf5/Si30.h5
Reading Cr50 from /home/liangjg/nucdata/nndc_hdf5/Cr50.h5
Reading Cr52 from /home/liangjg/nucdata/nndc_hdf5/Cr52.h5
Reading Cr53 from /home/liangjg/nucdata/nndc_hdf5/Cr53.h5
Reading Cr54 from /home/liangjg/nucdata/nndc_hdf5/Cr54.h5
Reading Mn55 from /home/liangjg/nucdata/nndc_hdf5/Mn55.h5
Reading Fe54 from /home/liangjg/nucdata/nndc_hdf5/Fe54.h5
Reading Fe56 from /home/liangjg/nucdata/nndc_hdf5/Fe56.h5
Reading Fe57 from /home/liangjg/nucdata/nndc_hdf5/Fe57.h5
Reading Fe58 from /home/liangjg/nucdata/nndc_hdf5/Fe58.h5
Reading Ni58 from /home/liangjg/nucdata/nndc_hdf5/Ni58.h5
Reading Ni60 from /home/liangjg/nucdata/nndc_hdf5/Ni60.h5
Reading Ni61 from /home/liangjg/nucdata/nndc_hdf5/Ni61.h5
Reading Ni62 from /home/liangjg/nucdata/nndc_hdf5/Ni62.h5
Reading Ni64 from /home/liangjg/nucdata/nndc_hdf5/Ni64.h5
Reading Zr90 from /home/liangjg/nucdata/nndc_hdf5/Zr90.h5
Reading Zr91 from /home/liangjg/nucdata/nndc_hdf5/Zr91.h5
Reading Zr92 from /home/liangjg/nucdata/nndc_hdf5/Zr92.h5
Reading Zr94 from /home/liangjg/nucdata/nndc_hdf5/Zr94.h5
Reading Zr96 from /home/liangjg/nucdata/nndc_hdf5/Zr96.h5
Reading Sn112 from /home/liangjg/nucdata/nndc_hdf5/Sn112.h5
Reading Sn114 from /home/liangjg/nucdata/nndc_hdf5/Sn114.h5
Reading Sn115 from /home/liangjg/nucdata/nndc_hdf5/Sn115.h5
Reading Sn116 from /home/liangjg/nucdata/nndc_hdf5/Sn116.h5
Reading Sn117 from /home/liangjg/nucdata/nndc_hdf5/Sn117.h5
Reading Sn118 from /home/liangjg/nucdata/nndc_hdf5/Sn118.h5
Reading Sn119 from /home/liangjg/nucdata/nndc_hdf5/Sn119.h5
Reading Sn120 from /home/liangjg/nucdata/nndc_hdf5/Sn120.h5
Reading Sn122 from /home/liangjg/nucdata/nndc_hdf5/Sn122.h5
Reading Sn124 from /home/liangjg/nucdata/nndc_hdf5/Sn124.h5
Reading U234 from /home/liangjg/nucdata/nndc_hdf5/U234.h5
Reading U235 from /home/liangjg/nucdata/nndc_hdf5/U235.h5
Reading U238 from /home/liangjg/nucdata/nndc_hdf5/U238.h5
Reading U236 from /home/liangjg/nucdata/nndc_hdf5/U236.h5
Reading B10 from /home/liangjg/nucdata/nndc_hdf5/B10.h5
Reading B11 from /home/liangjg/nucdata/nndc_hdf5/B11.h5
Reading H1 from /home/liangjg/nucdata/nndc_hdf5/H1.h5
Reading H2 from /home/liangjg/nucdata/nndc_hdf5/H2.h5
Reading c_H_in_H2O from /home/liangjg/nucdata/nndc_hdf5/c_H_in_H2O.h5
Maximum neutron transport energy: 2.00000E+07 eV for O17
Reading tallies XML file...
Writing summary.h5 file...
Initializing source particles...
====================> K EIGENVALUE SIMULATION <====================
Bat./Gen. k Average k
========= ======== ====================
1/1 1.04727
2/1 1.00574
3/1 1.03701
4/1 0.96299
5/1 0.98797
6/1 1.01822
7/1 1.00424
8/1 1.01493
9/1 1.02974
10/1 1.02082
11/1 1.00522
12/1 1.01496 1.01009 +/- 0.00487
13/1 1.01189 1.01069 +/- 0.00287
14/1 0.98296 1.00376 +/- 0.00723
15/1 0.99697 1.00240 +/- 0.00576
16/1 0.98099 0.99883 +/- 0.00590
17/1 0.99588 0.99841 +/- 0.00501
18/1 1.07368 1.00782 +/- 0.01036
19/1 0.95227 1.00165 +/- 0.01103
20/1 1.05009 1.00649 +/- 0.01099
21/1 0.97005 1.00318 +/- 0.01048
22/1 0.99242 1.00228 +/- 0.00961
23/1 1.05491 1.00633 +/- 0.00972
24/1 1.00588 1.00630 +/- 0.00900
25/1 1.00227 1.00603 +/- 0.00838
26/1 0.96455 1.00344 +/- 0.00826
27/1 1.00319 1.00342 +/- 0.00776
28/1 0.98811 1.00257 +/- 0.00736
29/1 1.00272 1.00258 +/- 0.00696
30/1 0.94461 0.99968 +/- 0.00721
31/1 1.01218 1.00028 +/- 0.00689
32/1 0.96420 0.99864 +/- 0.00677
33/1 0.95413 0.99670 +/- 0.00675
34/1 0.97799 0.99592 +/- 0.00651
35/1 0.93211 0.99337 +/- 0.00675
36/1 0.97633 0.99271 +/- 0.00651
37/1 1.01195 0.99343 +/- 0.00631
38/1 0.99218 0.99338 +/- 0.00608
39/1 0.98653 0.99315 +/- 0.00587
40/1 1.01873 0.99400 +/- 0.00574
41/1 0.97922 0.99352 +/- 0.00557
42/1 1.03085 0.99469 +/- 0.00552
43/1 0.99989 0.99485 +/- 0.00535
44/1 1.02227 0.99565 +/- 0.00525
45/1 1.01514 0.99621 +/- 0.00513
46/1 1.02156 0.99691 +/- 0.00503
47/1 0.98417 0.99657 +/- 0.00491
48/1 1.01386 0.99702 +/- 0.00480
49/1 0.98720 0.99677 +/- 0.00468
50/1 1.01058 0.99712 +/- 0.00458
51/1 1.03660 0.99808 +/- 0.00457
52/1 1.03603 0.99898 +/- 0.00455
53/1 0.97500 0.99843 +/- 0.00447
54/1 0.96814 0.99774 +/- 0.00443
55/1 1.03349 0.99853 +/- 0.00440
56/1 1.02902 0.99920 +/- 0.00435
57/1 1.00023 0.99922 +/- 0.00426
58/1 1.04574 1.00019 +/- 0.00428
59/1 1.00650 1.00031 +/- 0.00419
60/1 0.95236 0.99936 +/- 0.00422
61/1 1.00056 0.99938 +/- 0.00414
62/1 1.02127 0.99980 +/- 0.00408
63/1 1.02664 1.00031 +/- 0.00403
###Markdown
Analyze Tally Data
###Code
# Instantiate a StatePoint object
from openmc.statepoint import StatePoint
filename = 'statepoint.{}.h5'.format(settings.batches)
sp = StatePoint(filename)
# Inspect the StatePoint's tallies
sp.tallies
# Get a Pandas DataFrame of the tally data
tally = sp.get_tally(name='a really cool tally')
tally.get_pandas_dataframe()
###Output
_____no_output_____ |
SDM/R-jupyter/008-timeseries.ipynb | ###Markdown
Timeseries decomposition, forecasting and outlier detection Lets start by loading 7 days of CPU measurements for a lab InfluxDB host. Mean values of CPU usage are binned into buckets of 1 minute. Thus resulting in a set of data 1440 points per day. Since the interval is precise and bouned exacty by midnight, then there is no need to maintain timestamps. This results in a vector of following length.
###Code
rm(list=ls())
load("/home/jovyan/data/SDM/rdata/cpu-usage-orig.rda")
print(length(timeseries))
plot(timeseries, type="l", ylab = "CPU usage", xlab = "minute")
###Output
_____no_output_____
###Markdown
There is a clear pattern, but data is quite noisy. We can reduce this noise by binning the data even more, or by smoothing it with a moving average. Former option loses information while the latter introduces delay to real-time anomaly detection. To maintain our resolution of 1440 points per day, I will use the moving average smoothing for now.
###Code
ma1 <- function(x, n, sides = 1){
return(filter(x, rep(1/n,n), sides = sides))
}
timeseries <- ma1(timeseries, n = 15, sides = 1)
plot(timeseries, type="l", ylab = "CPU usage", xlab = "minute")
###Output
_____no_output_____
###Markdown
Resulting set reduced the amount of noise, while the anomaly on last day is still clearly visible. > Note that I used a R filter funciton which applies convolutional filter on timeseries, resulting in a one-sided unweighted moving average. This was done simply to avoid using a for loop. In any reasonable programming language, you can implement something like this (warning! untested out-of-context copypaste code) -
###Code
movingAverage <- function(){
ma1 <- c(rep(NA,4))
items <- 4
for( i in seq_along(1:(length(x)-items))){
set <- x[i:((i-1)+items)]
ma1[i+items] <- mean(set)
}
return(ma1)
}
###Output
_____no_output_____
###Markdown
Autoregressive Moving Average (ARMA) models are commonly used to forecast future timeseries points when historical data with clear seasonal patterns is available. For outlier detection, the idea is to predict past periods and compare these predictions against actual values. This creates an anomaly score for each data point and an alarm can be raised if this score exceeds a certain threshold. Currently, we are only concerned about a single example series, but PCA trick can be used to potentially scale this method to thousands of parallel series.Firstly, we will separate validation data that will be forecasted from prior training data. For this example, last day will be separated.
###Code
total <- length(timeseries)
cycle <- 1440
train <- timeseries[c(1:(total - cycle))]
# our training data seemed to have missing values, just pad with zero for now
train[is.na(train)] <- 0
validate <- timeseries[c( (total-cycle):total )]
# drop last excessive timestamp
validate <- validate[-1441]
plot(train, type = "l")
plot(validate, type = "l")
###Output
_____no_output_____
###Markdown
Notice that spike at the beginning of the day is acually not an anomaly when looked at from weekly context. However, the spike at the end of the day is strange and would warrant further inspection.We have to decompose this series by removing cyclic trend, seasonal pattern and random elements. Additive or multiplicative models can be used depending on the nature of data. Multiplicative can be better when cyclic increase or decrease can be observed while being unsuitable for use on mean-centered normalized data. We will use additive model for this exercise, as no exponential cyclic increase or decrease can be observed.Cyclic trend can be extracted by calculating weighted moving average.
###Code
#trendpatterns <- apply(Ptrunc, 2, ma2, n = SEASONS, slide=TRUE)
ma2 <- function(x, n = 60, slide = TRUE){
# we do weigted moving average, as opposed to 2-ma of N-ma
if (slide == TRUE){
partial <- 1/(n*2)
filter <- c(partial, rep(1/n, n-1), partial)
} else {
filter <- c(rep(1/n, n))
}
return(filter(x, filter = filter, sides=2))
}
trend <- ma2(train, n = 1440, slide = TRUE)
plot(trend, type = "l")
###Output
_____no_output_____
###Markdown
Then we will need to remove trend from series and calculate seasonal coefficients.
###Code
noTrend <- train - trend
plot(noTrend, type = "l")
###Output
_____no_output_____
###Markdown
Seasonal indices can simply be calcuating by measuring the average value for every season over available training cycles.
###Code
seasonalIndex <- function(x, cycles, seasons){
x <- matrix(x, nrow = seasons, ncol = cycles)
x[,1] <- ifelse(is.na(x[,1]), x[,ncol(x)], x[,1])
x <- x[,-ncol(x)]
x <- rowMeans(x)
return(x)
}
###Output
_____no_output_____
###Markdown
Consider this simplified example of quarterly data over a period of 4 years. We will derive trend via weighted moving average over 4 quarters per year which will then be multiplicatively removed from original data. This leaves us with only seasonal component.
###Code
x <- c(4.8, 4.1, 6.0, 6.5, 5.8, 5.2, 6.8, 7.4, 6.0, 5.6, 7.5, 7.8, 6.3, 5.9, 8.0, 8.4)
x_trend <- ma2(x, 4, TRUE)
sc <- x / x_trend
plot(x, type = "l", xlab = "quarter", ylab = "measurement", ylim = c(-1.5, 9))
lines(x_trend, col = "blue")
lines(sc, col = "green")
sc <- matrix(sc, ncol=4, nrow=4)
sc
sc[,1] <- ifelse(is.na(sc[,1]), sc[,ncol(sc)], sc[,1])
sc <- sc[,-4]
sc
###Output
_____no_output_____
###Markdown
Centered moving average loses data from the edges, so we are now missing 1 cycle worth of data. However, since trend is already removed, we can simply fill the gaps in first cycle with data from final one. After that, the final cycle can be discarded. And we can calculate mean values for each row to get final seasonal compoinent.
###Code
sc <- rowMeans(sc)
sc
###Output
_____no_output_____
###Markdown
`seasonalIndex` function does this in a nutshell. We can now do the same for our CPU usage data.
###Code
seasonal <- seasonalIndex(noTrend, 6, 1440)
plot(seasonal, type = "l")
noTrendNoSeason <- matrix(noTrend, nrow=1440, ncol = 6) - seasonal
noTrendNoSeason <- as.vector(noTrendNoSeason)
plot(noTrendNoSeason, type = "l")
###Output
_____no_output_____
###Markdown
We are now done with simple additive decomposition of our CPU measurement data. Next up, we will use simple linear regression to plot optimal path through decomposed data.
###Code
regression <- function(x){
return(lm(as.vector(x)~c(1:length(x))))
}
#slr <- regression(noTrendNoSeason)
#slr
adjusted <- matrix(train, nrow = 1440, ncol = 6) - seasonal
#y <- as.vector(adjusted)
#x <- c(1:length(y))
slr <- regression(adjusted)
slr
y <- as.vector(adjusted)
x <- c(1:length(y))
slope <- cor(x, y)* (sd(y)/sd(x))
intercept <- mean(y) - (slope * mean(x))
# y = slope * x + intercept
print(c(intercept, slope))
#y = slope * x + intercept
###Output
_____no_output_____
###Markdown
Resulting regression cefficients allow us to predict future trends. We have to define bin indexes that correspond to timestamps ourselves. In our case, we want to predict one full day which has 1440 minutes.
###Code
predictTrends <- function(lr,t = c(1,2,3)){
return(t*lr$coefficients[2]+lr$coefficients[1])
}
future_idx <- seq(length(adjusted)+1,length(adjusted)+1440,1)
head(future_idx)
tail(future_idx)
#future <- predictTrends(slr, future_idx)
future <- slope * future_idx + intercept
future <- future + seasonal
#future
###Output
_____no_output_____
###Markdown
We are not actually concerned about the future forecasting. Instad, we would like to compare our predictions against actual data points to raise alerts on deviations.
###Code
plot(validate, type = "l", xlab = "minute", ylab = "measurement", col = "blue", ylim = c(-2, 25))
lines(future, type = "l", col = "red")
deviations <- future - validate
###Output
_____no_output_____
###Markdown
Simple method for creating unified outlier scores for each data point is to normalize deviations via regular data standardization method.$$ \frac{\delta_i - \mu}{\sigma} $$
###Code
scores <- (deviations - mean(deviations)) / sd(deviations)
plot(scores, type = "l")
###Output
_____no_output_____
###Markdown
Outlier scores represent number of standard deviations the value is away from mean, or sigma. Anomaly score of 3 is considered statistically sufficient to raise an alarm, as most 99 percent of values should fall within 3 standard deviations from mean.* https://en.wikipedia.org/wiki/68%E2%80%9395%E2%80%9399.7_ruleIn practice, though, higher values may need to be used if timeseries displays weak seasonal patterns or is noisy.
###Code
alertLevel <- function(x, thresh = 3){
return(which(x > thresh))
}
alerts <- alertLevel(abs(scores), thresh = 3)
ymin <- min(train, future, validate)
ymax <- max(train, future, validate)
plot(train, type="l", xlim = c(1,7*1440), ylim = c(ymin, ymax))
lines(c(rep(NA, length(train)), validate), col = "blue")
outliers <- rep(NA, length(validate))
outliers[alerts] <- validate[alerts]
outliers <- c(rep(NA, length(train)), outliers)
#alerts
points(outliers, col = "red")
###Output
_____no_output_____ |
Pipelines/ETLPipelines/16_featureengineering_exercise/.ipynb_checkpoints/16_featureengineering_exercise-solution-checkpoint.ipynb | ###Markdown
Feature EngineeringPractice creating new features from the GDP and population data. You'll create a new feature gdppercapita, which is GDP divided by population. You'll then write code to create new features like GDP squared and GDP cubed. Start by running the code below. It reads in the World Bank data, filters the data for the year 2016, and cleans the data.
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# read in the projects data set and do basic wrangling
gdp = pd.read_csv('../data/gdp_data.csv', skiprows=4)
gdp.drop(['Unnamed: 62', 'Country Code', 'Indicator Name', 'Indicator Code'], inplace=True, axis=1)
population = pd.read_csv('../data/population_data.csv', skiprows=4)
population.drop(['Unnamed: 62', 'Country Code', 'Indicator Name', 'Indicator Code'], inplace=True, axis=1)
# Reshape the data sets so that they are in long format
gdp_melt = gdp.melt(id_vars=['Country Name'],
var_name='year',
value_name='gdp')
# Use back fill and forward fill to fill in missing gdp values
gdp_melt['gdp'] = gdp_melt.sort_values('year').groupby('Country Name')['gdp'].fillna(method='ffill').fillna(method='bfill')
population_melt = population.melt(id_vars=['Country Name'],
var_name='year',
value_name='population')
# Use back fill and forward fill to fill in missing population values
population_melt['population'] = population_melt.sort_values('year').groupby('Country Name')['population'].fillna(method='ffill').fillna(method='bfill')
# merge the population and gdp data together into one data frame
df_country = gdp_melt.merge(population_melt, on=('Country Name', 'year'))
# filter data for the year 2016
df_2016 = df_country[df_country['year'] == '2016']
# filter out values that are not countries
non_countries = ['World',
'High income',
'OECD members',
'Post-demographic dividend',
'IDA & IBRD total',
'Low & middle income',
'Middle income',
'IBRD only',
'East Asia & Pacific',
'Europe & Central Asia',
'North America',
'Upper middle income',
'Late-demographic dividend',
'European Union',
'East Asia & Pacific (excluding high income)',
'East Asia & Pacific (IDA & IBRD countries)',
'Euro area',
'Early-demographic dividend',
'Lower middle income',
'Latin America & Caribbean',
'Latin America & the Caribbean (IDA & IBRD countries)',
'Latin America & Caribbean (excluding high income)',
'Europe & Central Asia (IDA & IBRD countries)',
'Middle East & North Africa',
'Europe & Central Asia (excluding high income)',
'South Asia (IDA & IBRD)',
'South Asia',
'Arab World',
'IDA total',
'Sub-Saharan Africa',
'Sub-Saharan Africa (IDA & IBRD countries)',
'Sub-Saharan Africa (excluding high income)',
'Middle East & North Africa (excluding high income)',
'Middle East & North Africa (IDA & IBRD countries)',
'Central Europe and the Baltics',
'Pre-demographic dividend',
'IDA only',
'Least developed countries: UN classification',
'IDA blend',
'Fragile and conflict affected situations',
'Heavily indebted poor countries (HIPC)',
'Low income',
'Small states',
'Other small states',
'Not classified',
'Caribbean small states',
'Pacific island small states']
# remove non countries from the data
df_2016 = df_2016[~df_2016['Country Name'].isin(non_countries)]
df_2016.reset_index(inplace=True, drop=True)
###Output
_____no_output_____
###Markdown
Exercise 1Create a new feature called gdppercapita in a new column. This feature should be the gdp value divided by the population.
###Code
# TODO: create a new feature called gdppercapita,
# which is the gdp value divided by the population value for each country
df_2016['gdppercapita'] = df_2016['gdp'] / df_2016['population']
###Output
_____no_output_____
###Markdown
Exercise 2 (Challenge)This next exercise is more challenging and assumes you know how to use the pandas apply() method as well as lambda functions. Write code that creates multiples of a feature. For example, if you take the 'gdp' column and an integer like 3, you want to append a new column with the square of gdp (gdp^2) and another column with the cube of gdp (gdp^3).Follow the TODOs below. These functions build on each other in the following way:create_multiples(b, k) has two inputs. The first input, b, is a floating point number. The second number, k, is an integer. The output is a list of multiples of b. For example create_multiples(3, 4) would return this list: $[3^2, 3^3, 3^4]$ or in other words $[9, 27, 81]$.Then the column_name_generator(colname, k) function outputs a list of column names. For example, column_name_generator('gdp', 4) would output a list of strings `['gdp2', 'gdp3', 'gdp4']`. And finally, concatenate_features(df, column, num_columns) uses the two previous functions to create the new columns and then append these new columns to the original data frame.
###Code
# TODO: Fill out the create_multiples function.
# The create_multiples function has two inputs. A floating point number and an integer.
# The output is a list of multiples of the input b starting from the square of b and ending at b^k.
def create_multiples(b, k):
new_features = []
# TODO: use a for loop to make a list of multiples of b: ie b^2, b^3, b^4, etc... until b^k
for i in range(2,k+1):
new_features.append(b ** i)
return new_features
# TODO: Fill out the column_name_generator function.
# The function has two inputs: a string representing a column name and an integer k.
# The 'k' variable is the same as the create_multiples function.
# The output should be a list of column names.
# For example if the inputs are ('gdp', 4) then the output is a list of strings ['gdp2', 'gdp3', gdp4']
def column_name_generator(colname, k):
col_names = []
for i in range(2,k+1):
col_names.append('{}{}'.format(colname, i))
return col_names
# TODO: Fill out the concatenate_features function.
# The function has three inputs. A dataframe, a column name represented by a string, and an integer representing
# the maximum power to create when engineering features.
# If the input is (df_2016, 'gdp', 3), then the output will be the df_2016 dataframe with two new columns
# One new column will be 'gdp2' ie gdp^2, and then other column will be 'gdp3' ie gdp^3.
# HINT: There may be more than one way to do this.
# The TODOs in this section point you towards one way that works
def concatenate_features(df, column, num_columns):
# TODO: Use the pandas apply() method to create the new features. Inside the apply method, you
# can use a lambda function with the create_mtuliples function
new_features = df[column].apply(lambda x: create_multiples(x, num_columns))
# TODO: Create a dataframe from the new_features variable
# Use the column_name_generator() function to create the column names
# HINT: In the pd.DataFrame() method, you can specify column names inputting a list in the columns option
# HINT: Using new_features.tolist() might be helpful
new_features_df = pd.DataFrame(new_features.tolist(), columns = column_name_generator(column, num_columns))
# TODO: concatenate the original date frame in df with the new_features_df dataframe
# return this concatenated dataframe
return pd.concat([df, new_features_df], axis=1)
###Output
_____no_output_____
###Markdown
SolutionRun the code cell below. If your code is correct, you should get a dataframe with 8 columns. Here are the first two rows of what your results should look like. | Country Name | year | gdp | population | gdppercapita | gdp2 | gdp3 | gdp4 ||--------------|------|--------------|------------|--------------|--------------|--------------|--------------|| Aruba | 2016 | 2.584464e+09 | 104822.0 | 24655.737223 | 6.679453e+18 | 1.726280e+28 | 4.461509e+37 || Afghanistan | 2016 | 1.946902e+10 | 34656032.0 | 561.778746 | 3.790428e+20 | 7.379593e+30 | 1.436735e+41 |There is a solution in the 16_featureengineering_exercise folder if you go to File->Open.
###Code
concatenate_features(df_2016, 'gdp', 4)
###Output
_____no_output_____ |
notebook/Stress_Analysis_BERT.ipynb | ###Markdown
Stress Analysis in Social MediaLeverage the newly published and labelled reddit dataset for stress analysis to develop and improve supervised learning methods for identifying stress, both neural and traditional, and analyze the complexity and diversity of the data and characteristics of each category.
###Code
import os
import re
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow import keras
import tensorflow_hub as hub
from datetime import datetime
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
print(tf.__version__)
import tensorflow as tf
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
import bert
from bert import run_classifier
from bert import optimization
from bert import tokenization
import warnings
warnings.filterwarnings("ignore")
import logging, sys
logging.disable(sys.maxsize)
###Output
_____no_output_____
###Markdown
Data
###Code
path = '../data/'
# path = '/content/Insight_Stress_Analysis/data/'
train = pd.read_csv(path + 'dreaddit-train.csv', encoding = "ISO-8859-1")
test = pd.read_csv(path + 'dreaddit-test.csv', encoding = "ISO-8859-1")
DATA_COLUMN = 'text'
LABEL_COLUMN = 'label'
# label_list is the list of labels, i.e. True, False or 0, 1 or 'dog', 'cat'
label_list = [0, 1]
###Output
_____no_output_____
###Markdown
Data PreprocessingWe'll need to transform our data into a format BERT understands. This involves two steps. First, we create `InputExample`'s using the constructor provided in the BERT library.- `text_a` is the text we want to classify, which in this case, is the `Request` field in our Dataframe. - `text_b` is used if we're training a model to understand the relationship between sentences (i.e. is `text_b` a translation of `text_a`? Is `text_b` an answer to the question asked by `text_a`?). This doesn't apply to our task, so we can leave `text_b` blank.- `label` is the label for our example, i.e. True, False
###Code
# Use the InputExample class from BERT's run_classifier code to create examples from the data
train_InputExamples = train.apply(lambda x: bert.run_classifier.InputExample(guid=None, # Globally unique ID for bookkeeping, unused in this example
text_a = x[DATA_COLUMN],
text_b = None,
label = x[LABEL_COLUMN]), axis = 1)
test_InputExamples = test.apply(lambda x: bert.run_classifier.InputExample(guid=None,
text_a = x[DATA_COLUMN],
text_b = None,
label = x[LABEL_COLUMN]), axis = 1)
###Output
_____no_output_____
###Markdown
Next, we need to preprocess our data so that it matches the data BERT was trained on. For this, we'll need to do a couple of things (but don't worry--this is also included in the Python library):1. Lowercase our text (if we're using a BERT lowercase model)2. Tokenize it (i.e. "sally says hi" -> ["sally", "says", "hi"])3. Break words into WordPieces (i.e. "calling" -> ["call", "ing"])4. Map our words to indexes using a vocab file that BERT provides5. Add special "CLS" and "SEP" tokens (see the [readme](https://github.com/google-research/bert))6. Append "index" and "segment" tokens to each input (see the [BERT paper](https://arxiv.org/pdf/1810.04805.pdf))Happily, we don't have to worry about most of these details.
###Code
# Load a vocabulary file and lowercasing information directly from the BERT tf hub module
# This is a path to an uncased (all lowercase) version of BERT
BERT_MODEL_HUB = "https://tfhub.dev/google/bert_uncased_L-12_H-768_A-12/1"
def create_tokenizer_from_hub_module():
"""Get the vocab file and casing info from the Hub module."""
with tf.Graph().as_default():
bert_module = hub.Module(BERT_MODEL_HUB)
tokenization_info = bert_module(signature="tokenization_info", as_dict=True)
with tf.Session() as sess:
vocab_file, do_lower_case = sess.run([tokenization_info["vocab_file"],
tokenization_info["do_lower_case"]])
return bert.tokenization.FullTokenizer(vocab_file=vocab_file,
do_lower_case=do_lower_case)
tokenizer = create_tokenizer_from_hub_module()
# Set the maximum sequence length.
def get_max_len(text):
max_len = 0
for i in range(len(train)):
if len(text.iloc[i]) > max_len:
max_len = len(text.iloc[i])
return max_len
temp = train.text.str.split(' ')
max_len = get_max_len(temp)
MAX_SEQ_LENGTH = max_len
# Convert our train and test features to InputFeatures that BERT understands.
train_features = bert.run_classifier.convert_examples_to_features(train_InputExamples,
label_list,
MAX_SEQ_LENGTH,
tokenizer)
test_features = bert.run_classifier.convert_examples_to_features(test_InputExamples,
label_list,
MAX_SEQ_LENGTH,
tokenizer)
###Output
_____no_output_____
###Markdown
Classification ModelNow that we've prepared our data, let's focus on building a model. `create_model` does just this below. First, it loads the BERT tf hub module again (this time to extract the computation graph). Next, it creates a single new layer that will be trained to adapt BERT to our sentiment task (i.e. classifying whether a movie review is positive or negative). This strategy of using a mostly trained model is called [fine-tuning](http://wiki.fast.ai/index.php/Fine_tuning).
###Code
def create_model(is_predicting, input_ids, input_mask, segment_ids, labels,
num_labels):
"""Creates a classification model."""
bert_module = hub.Module(BERT_MODEL_HUB, trainable=True)
bert_inputs = dict(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids)
bert_outputs = bert_module(inputs=bert_inputs, signature="tokens", as_dict=True)
# Use "pooled_output" for classification tasks on an entire sentence.
# Use "sequence_outputs" for token-level output.
output_layer = bert_outputs["pooled_output"]
hidden_size = output_layer.shape[-1].value
# Create our own layer to tune for politeness data.
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable("output_bias",
[num_labels],
initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
# Dropout helps prevent overfitting
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
# Convert labels into one-hot encoding
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
predicted_labels = tf.squeeze(tf.argmax(log_probs, axis=-1, output_type=tf.int32))
# If we're predicting, we want predicted labels and the probabiltiies.
if is_predicting:
return (predicted_labels, log_probs)
# If we're train/eval, compute loss between predicted and actual label
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, predicted_labels, log_probs)
###Output
_____no_output_____
###Markdown
Next we'll wrap our model function in a `model_fn_builder` function that adapts our model to work for training, evaluation, and prediction.
###Code
# model_fn_builder actually creates our model function
# using the passed parameters for num_labels, learning_rate, etc.
def model_fn_builder(num_labels, learning_rate, num_train_steps, num_warmup_steps):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)
# TRAIN and EVAL
if not is_predicting:
(loss, predicted_labels, log_probs) = create_model(is_predicting,
input_ids,
input_mask,
segment_ids,
label_ids,
num_labels)
train_op = bert.optimization.create_optimizer(loss,
learning_rate,
num_train_steps,
num_warmup_steps,
use_tpu=False)
# Calculate evaluation metrics.
def metric_fn(label_ids, predicted_labels):
accuracy = tf.metrics.accuracy(label_ids, predicted_labels)
f1_score = tf.contrib.metrics.f1_score(label_ids, predicted_labels)
auc = tf.metrics.auc(label_ids, predicted_labels)
recall = tf.metrics.recall(label_ids, predicted_labels)
precision = tf.metrics.precision(label_ids, predicted_labels)
true_pos = tf.metrics.true_positives(label_ids, predicted_labels)
true_neg = tf.metrics.true_negatives(label_ids, predicted_labels)
false_pos = tf.metrics.false_positives(label_ids, predicted_labels)
false_neg = tf.metrics.false_negatives(label_ids, predicted_labels)
return {
"eval_accuracy": accuracy,
"f1_score": f1_score,
"auc": auc,
"precision": precision,
"recall": recall,
"true_positives": true_pos,
"true_negatives": true_neg,
"false_positives": false_pos,
"false_negatives": false_neg
}
eval_metrics = metric_fn(label_ids, predicted_labels)
if mode == tf.estimator.ModeKeys.TRAIN:
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
else:
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metrics)
else:
(predicted_labels, log_probs) = create_model(is_predicting,
input_ids,
input_mask,
segment_ids,
label_ids,
num_labels)
predictions = {'probabilities': log_probs, 'labels': predicted_labels}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Return the actual model function in the closure
return model_fn
# Compute train and warmup steps from batch size
# These hyperparameters are copied from this colab notebook (https://colab.sandbox.google.com/github/tensorflow/tpu/blob/master/tools/colab/bert_finetuning_with_cloud_tpus.ipynb)
BATCH_SIZE = 32
LEARNING_RATE = 2e-5
NUM_TRAIN_EPOCHS = 3
# Warmup is a period of time where the learning rate
# is small and gradually increases--usually helps training.
WARMUP_PROPORTION = 0.1
# Model configs
SAVE_CHECKPOINTS_STEPS = 500
SAVE_SUMMARY_STEPS = 100
# Compute # train and warmup steps from batch size
num_train_steps = int(len(train_features) / BATCH_SIZE * NUM_TRAIN_EPOCHS)
num_warmup_steps = int(num_train_steps * WARMUP_PROPORTION)
# Specify outpit directory and number of checkpoint steps to save
OUTPUT_DIR = 'output'
run_config = tf.estimator.RunConfig(model_dir=OUTPUT_DIR,
save_summary_steps=SAVE_SUMMARY_STEPS,
save_checkpoints_steps=SAVE_CHECKPOINTS_STEPS)
model_fn = model_fn_builder(num_labels=len(label_list),
learning_rate=LEARNING_RATE,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps)
estimator = tf.estimator.Estimator(model_fn=model_fn,
config=run_config,
params={"batch_size": BATCH_SIZE})
###Output
_____no_output_____
###Markdown
Next we create an input builder function that takes our training feature set (`train_features`) and produces a generator. This is a pretty standard design pattern for working with Tensorflow [Estimators](https://www.tensorflow.org/guide/estimators).
###Code
# Create an input function for training. drop_remainder = True for using TPUs.
train_input_fn = bert.run_classifier.input_fn_builder(features=train_features,
seq_length=MAX_SEQ_LENGTH,
is_training=True,
drop_remainder=False)
###Output
_____no_output_____
###Markdown
Now we train our model! For me, using a Colab notebook running on Google's GPUs, my training time was about 14 minutes.
###Code
print(f'Beginning Training!')
current_time = datetime.now()
# train the model
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
print("Training took time ", datetime.now() - current_time)
# check the test result
test_input_fn = run_classifier.input_fn_builder(features=test_features,
seq_length=MAX_SEQ_LENGTH,
is_training=False,
drop_remainder=False)
estimator.evaluate(input_fn=test_input_fn, steps=None)
def predict(in_sentences):
labels = ["non-stress", "stress"]
labels_idx = [0, 1]
input_examples = [run_classifier.InputExample(guid="", text_a = x, text_b = None, label = 0) for x in in_sentences] # here, "" is just a dummy label
input_features = run_classifier.convert_examples_to_features(input_examples,
labels_idx,
MAX_SEQ_LENGTH,
tokenizer)
predict_input_fn = run_classifier.input_fn_builder(features=input_features,
seq_length=MAX_SEQ_LENGTH,
is_training=False,
drop_remainder=False)
predictions = estimator.predict(predict_input_fn)
return [{"text": sentence, "confidence": list(prediction['probabilities']), "labels": labels[prediction['labels']]}
for sentence, prediction in zip(in_sentences, predictions)]
pred_sentences = ["It's Friday! We wish you a nice start into the weekend!",
"Deep breathing exercises are very relaxing. It can also relieve the symptoms of stress and anxiety.",
"Do you like fruits? I like so much! Be Happy, Keep Smiling!"
]
predictions = predict(pred_sentences)
predictions
###Output
_____no_output_____
###Markdown
Save Ckpts to PB file
###Code
import os
import tensorflow as tf
trained_checkpoint_prefix = '/home/gillianchiang/Insight_Stress_Analysis/framework/output/model.ckpt-266'
export_dir = './bert_output/'
graph = tf.Graph()
with tf.compat.v1.Session(graph=graph) as sess:
# Restore from checkpoint
loader = tf.compat.v1.train.import_meta_graph(trained_checkpoint_prefix + '.meta')
loader.restore(sess, trained_checkpoint_prefix)
# Export checkpoint to SavedModel
builder = tf.compat.v1.saved_model.builder.SavedModelBuilder(export_dir)
builder.add_meta_graph_and_variables(sess,
[tf.saved_model.TRAINING, tf.saved_model.SERVING],
strip_default_attrs=True)
builder.save()
###Output
_____no_output_____
###Markdown
BERT fine-tuned model for Tensorflow servingReference: https://medium.com/delvify/bert-rest-inference-from-the-fine-tuned-model-499997b32851
###Code
def serving_input_fn():
label_ids = tf.placeholder(tf.int32, [None], name='label_ids')
input_ids = tf.placeholder(tf.int32, [None, MAX_SEQ_LENGTH], name='input_ids')
input_mask = tf.placeholder(tf.int32, [None, MAX_SEQ_LENGTH], name='input_mask')
segment_ids = tf.placeholder(tf.int32, [None, MAX_SEQ_LENGTH], name='segment_ids')
input_fn = tf.estimator.export.build_raw_serving_input_receiver_fn({
'label_ids': label_ids,
'input_ids': input_ids,
'input_mask': input_mask,
'segment_ids': segment_ids,
})()
return input_fn
estimator._export_to_tpu = False
estimator.export_savedmodel(OUTPUT_DIR, serving_input_fn)
# !saved_model_cli show --dir /home/gillianchiang/Insight_Stress_Analysis/framework/output/1581039388/ --all
# !tensorflow_model_server --port=8500 --rest_api_port=8501 --model_name=bert_model --model_base_path=/home/gillianchiang/Insight_Stress_Analysis/framework/output/
###Output
_____no_output_____ |
chapters/Chapter 15 - Pandas Indexing and Selecting.ipynb | ###Markdown
This material is copied (possibily with some modifications) from [Joris Van den Bossche's Pandas Tutorial](https://github.com/jorisvandenbossche/pandas-tutorial). Chapter 15 - Indexing and selecting data
###Code
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
try:
import seaborn
except ImportError:
pass
# redefining the example objects
# series
population = pd.Series({'Germany': 81.3, 'Belgium': 11.3, 'France': 64.3,
'United Kingdom': 64.9, 'Netherlands': 16.9})
# dataframe
data = {'country': ['Belgium', 'France', 'Germany', 'Netherlands', 'United Kingdom'],
'population': [11.3, 64.3, 81.3, 16.9, 64.9],
'area': [30510, 671308, 357050, 41526, 244820],
'capital': ['Brussels', 'Paris', 'Berlin', 'Amsterdam', 'London']}
countries = pd.DataFrame(data)
countries
###Output
_____no_output_____
###Markdown
Setting the index to the country names:
###Code
countries = countries.set_index('country')
countries
###Output
_____no_output_____
###Markdown
Some notes on selecting dataOne of pandas' basic features is the labeling of rows and columns, but this makes indexing also a bit more complex compared to numpy. We now have to distuinguish between:- selection by label- selection by position. `data[]` provides some convenience shortcuts For a DataFrame, basic indexing selects the columns.Selecting a single column:
###Code
countries['area']
###Output
_____no_output_____
###Markdown
or multiple columns:
###Code
countries[['area', 'population']]
###Output
_____no_output_____
###Markdown
But, slicing accesses the rows:
###Code
countries['France':'Netherlands']
###Output
_____no_output_____
###Markdown
NOTE: Unlike slicing in numpy, the end label is **included**. So as a summary, `[]` provides the following convenience shortcuts:- Series: selecting a label: `s[label]`- DataFrame: selecting a single or multiple columns: `df['col']` or `df[['col1', 'col2']]`- DataFrame: slicing the rows: `df['row_label1':'row_label2']` or `df[mask]` Systematic indexing with `loc` and `iloc` When using `[]` like above, you can only select from one axis at once (rows or columns, not both). For more advanced indexing, you have some extra attributes: * `loc`: selection by label* `iloc`: selection by positionThese methods index the different dimensions of the frame:* `df.loc[row_indexer, column_indexer]`* `df.iloc[row_indexer, column_indexer]` Selecting a single element:
###Code
countries.loc['Germany', 'area']
###Output
_____no_output_____
###Markdown
But the row or column indexer can also be a list, slice, boolean array, ..
###Code
countries.loc['France':'Germany', ['area', 'population']]
###Output
_____no_output_____
###Markdown
---Selecting by position with `iloc` works similar as indexing numpy arrays:
###Code
countries.iloc[0:2,1:3]
###Output
_____no_output_____
###Markdown
The different indexing methods can also be used to assign data:
###Code
countries2 = countries.copy()
countries2.loc['Belgium':'Germany', 'population'] = 10
countries2
###Output
_____no_output_____
###Markdown
Boolean indexing (filtering) Often, you want to select rows based on a certain condition. This can be done with 'boolean indexing' (like a where clause in SQL). The indexer (or boolean mask) should be 1-dimensional and the same length as the thing being indexed.
###Code
countries['area'] > 100000
countries[countries['area'] > 100000]
###Output
_____no_output_____
###Markdown
--- EXERCISE: Add a column `density` with the population density (note: population column is expressed in millions) EXERCISE: Select the capital and the population column of those countries where the density is larger than 300 EXERCISE: Add a column 'density_ratio' with the ratio of the density to the mean density EXERCISE: Change the capital of the UK to Cambridge EXERCISE: Select all countries whose population density is between 100 and 300 people/km² Some other useful methods: `isin` and string methods The `isin` method of Series is very useful to select rows that may contain certain values:
###Code
s = countries['capital']
s.isin?
s.isin(['Berlin', 'London'])
###Output
_____no_output_____
###Markdown
This can then be used to filter the dataframe with boolean indexing:
###Code
countries[countries['capital'].isin(['Berlin', 'London'])]
###Output
_____no_output_____
###Markdown
Let's say we want to select all data for which the capital starts with a 'B'. In Python, when having a string, we could use the `startswith` method:
###Code
'Berlin'.startswith('B')
###Output
_____no_output_____
###Markdown
In pandas, these are available on a Series through the `str` namespace:
###Code
countries['capital'].str.startswith('B')
###Output
_____no_output_____
###Markdown
For an overview of all string methods, see: http://pandas.pydata.org/pandas-docs/stable/api.htmlstring-handling EXERCISE: Select all countries that have capital names with more than 7 characters EXERCISE: Select all countries that have capital names that contain the character sequence 'am' Pitfall: chained indexing (and the 'SettingWithCopyWarning')
###Code
countries.loc['Belgium', 'capital'] = 'Ghent'
countries
countries['capital']['Belgium'] = 'Antwerp'
countries
countries[countries['capital'] == 'Antwerp']['capital'] = 'Brussels'
countries
###Output
_____no_output_____
###Markdown
How to avoid this?* Use `loc` instead of chained indexing if possible!* Or `copy` explicitly if you don't want to change the original data. More exercises! For the quick ones among you, here are some more exercises with some larger dataframe with film data. These exercises are based on the [PyCon tutorial of Brandon Rhodes](https://github.com/brandon-rhodes/pycon-pandas-tutorial/) (so all credit to him!) and the datasets he prepared for that. You can download these data from here: [`titles.csv`](https://drive.google.com/open?id=0B3G70MlBnCgKajNMa1pfSzN6Q3M) and [`cast.csv`](https://drive.google.com/open?id=0B3G70MlBnCgKal9UYTJSR2ZhSW8) and put them in the `/data` folder.
###Code
cast = pd.read_csv('../data/cast.csv')
cast.head()
cast.shape # big dataset!
titles = pd.read_csv('../data/titles.csv')
titles.head()
###Output
_____no_output_____
###Markdown
This material is copied (possibily with some modifications) from [Joris Van den Bossche's Pandas Tutorial](https://github.com/jorisvandenbossche/pandas-tutorial). Indexing and selecting data
###Code
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
try:
import seaborn
except ImportError:
pass
# redefining the example objects
# series
population = pd.Series({'Germany': 81.3, 'Belgium': 11.3, 'France': 64.3,
'United Kingdom': 64.9, 'Netherlands': 16.9})
# dataframe
data = {'country': ['Belgium', 'France', 'Germany', 'Netherlands', 'United Kingdom'],
'population': [11.3, 64.3, 81.3, 16.9, 64.9],
'area': [30510, 671308, 357050, 41526, 244820],
'capital': ['Brussels', 'Paris', 'Berlin', 'Amsterdam', 'London']}
countries = pd.DataFrame(data)
countries
###Output
_____no_output_____
###Markdown
Setting the index to the country names:
###Code
countries = countries.set_index('country')
countries
###Output
_____no_output_____
###Markdown
Some notes on selecting dataOne of pandas' basic features is the labeling of rows and columns, but this makes indexing also a bit more complex compared to numpy. We now have to distuinguish between:- selection by label- selection by position. `data[]` provides some convenience shortcuts For a DataFrame, basic indexing selects the columns.Selecting a single column:
###Code
countries['area']
###Output
_____no_output_____
###Markdown
or multiple columns:
###Code
countries[['area', 'population']]
###Output
_____no_output_____
###Markdown
But, slicing accesses the rows:
###Code
countries['France':'Netherlands']
###Output
_____no_output_____
###Markdown
NOTE: Unlike slicing in numpy, the end label is **included**. So as a summary, `[]` provides the following convenience shortcuts:- Series: selecting a label: `s[label]`- DataFrame: selecting a single or multiple columns: `df['col']` or `df[['col1', 'col2']]`- DataFrame: slicing the rows: `df['row_label1':'row_label2']` or `df[mask]` Systematic indexing with `loc` and `iloc` When using `[]` like above, you can only select from one axis at once (rows or columns, not both). For more advanced indexing, you have some extra attributes: * `loc`: selection by label* `iloc`: selection by positionThese methods index the different dimensions of the frame:* `df.loc[row_indexer, column_indexer]`* `df.iloc[row_indexer, column_indexer]` Selecting a single element:
###Code
countries.loc['Germany', 'area']
###Output
_____no_output_____
###Markdown
But the row or column indexer can also be a list, slice, boolean array, ..
###Code
countries.loc['France':'Germany', ['area', 'population']]
###Output
_____no_output_____
###Markdown
---Selecting by position with `iloc` works similar as indexing numpy arrays:
###Code
countries.iloc[0:2,1:3]
###Output
_____no_output_____
###Markdown
The different indexing methods can also be used to assign data:
###Code
countries2 = countries.copy()
countries2.loc['Belgium':'Germany', 'population'] = 10
countries2
###Output
_____no_output_____
###Markdown
Boolean indexing (filtering) Often, you want to select rows based on a certain condition. This can be done with 'boolean indexing' (like a where clause in SQL). The indexer (or boolean mask) should be 1-dimensional and the same length as the thing being indexed.
###Code
countries['area'] > 100000
countries[countries['area'] > 100000]
###Output
_____no_output_____
###Markdown
--- EXERCISE: Add a column `density` with the population density (note: population column is expressed in millions) EXERCISE: Select the capital and the population column of those countries where the density is larger than 300 EXERCISE: Add a column 'density_ratio' with the ratio of the density to the mean density EXERCISE: Change the capital of the UK to Cambridge EXERCISE: Select all countries whose population density is between 100 and 300 people/km² Some other useful methods: `isin` and string methods The `isin` method of Series is very useful to select rows that may contain certain values:
###Code
s = countries['capital']
s.isin?
s.isin(['Berlin', 'London'])
###Output
_____no_output_____
###Markdown
This can then be used to filter the dataframe with boolean indexing:
###Code
countries[countries['capital'].isin(['Berlin', 'London'])]
###Output
_____no_output_____
###Markdown
Let's say we want to select all data for which the capital starts with a 'B'. In Python, when having a string, we could use the `startswith` method:
###Code
'Berlin'.startswith('B')
###Output
_____no_output_____
###Markdown
In pandas, these are available on a Series through the `str` namespace:
###Code
countries['capital'].str.startswith('B')
###Output
_____no_output_____
###Markdown
For an overview of all string methods, see: http://pandas.pydata.org/pandas-docs/stable/api.htmlstring-handling EXERCISE: Select all countries that have capital names with more than 7 characters EXERCISE: Select all countries that have capital names that contain the character sequence 'am' Pitfall: chained indexing (and the 'SettingWithCopyWarning')
###Code
countries.loc['Belgium', 'capital'] = 'Ghent'
countries
countries['capital']['Belgium'] = 'Antwerp'
countries
countries[countries['capital'] == 'Antwerp']['capital'] = 'Brussels'
countries
###Output
_____no_output_____
###Markdown
How to avoid this?* Use `loc` instead of chained indexing if possible!* Or `copy` explicitly if you don't want to change the original data. More exercises! For the quick ones among you, here are some more exercises with some larger dataframe with film data. These exercises are based on the [PyCon tutorial of Brandon Rhodes](https://github.com/brandon-rhodes/pycon-pandas-tutorial/) (so all credit to him!) and the datasets he prepared for that. You can download these data from here: [`titles.csv`](https://drive.google.com/open?id=0B3G70MlBnCgKajNMa1pfSzN6Q3M) and [`cast.csv`](https://drive.google.com/open?id=0B3G70MlBnCgKal9UYTJSR2ZhSW8) and put them in the `/data` folder.
###Code
cast = pd.read_csv('../data/cast.csv')
cast.head()
cast.shape # big dataset!
titles = pd.read_csv('../data/titles.csv')
titles.head()
###Output
_____no_output_____ |
notebooks/get_most_frequent_word_full_database.ipynb | ###Markdown
Getting the English vocabulary of the EP full-text data for text analytics 1. Libraries
###Code
# librairies
import os
import re
import glob
import pandas as pd
from collections import Counter
# library to parse the xml content of the EP full text database
# library doc: https://docs.python.org/3/library/xml.etree.elementtree.html
import xml.etree.ElementTree as ET
# disable warnings
import warnings
warnings.filterwarnings("ignore")
# language processing
import nltk
from nltk import word_tokenize
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
nltk.download('stopwords')
###Output
[nltk_data] Downloading package stopwords to
[nltk_data] /home/antoine/nltk_data...
[nltk_data] Package stopwords is already up-to-date!
###Markdown
2. Config
###Code
# location the the files - EP full text data 2020 edition
fpattern = r'../data/ep_full_text_database/2020_edition/EP{}.txt'
files = glob.glob(fpattern.format('*','*'))
# config
sep = '\t'
text_type = 'CLAIM'
lang = 'en'
new_col_names = ['publication_authority', # will always have the value "EP"
'publication_number', # a seven-digit number
'publication_kind', # see https://www.epo.org/searching-for-patents/helpful-resources/first-time-here/definitions.html for help.
'publication_date', # in format YYYY-MM-DD
'language_text_component', # de, en, fr; xx means unknown
'text_type', # TITLE, ABSTR, DESCR, CLAIM, AMEND, ACSTM, SREPT, PDFEP
'text' # it contains, where appropriate, XML tags for better structure. You will find the DTD applicable to all parts of the publication at: http://docs.epoline.org/ebd/doc/ep-patent-document-v1-5.dtd
]
###Output
_____no_output_____
###Markdown
3. Fonction to process text data
###Code
def get_claim_text(f):
"""Open the file f to get all the claims in English"""
# reading the file
print('Reading the file: {}'.format(f))
df = pd.read_csv(f, sep = sep)
# changing the column names
df.columns = new_col_names
# filtering to keep only claims in English (and only once)
condition1 = df['text_type'] == text_type
condition2 = df['language_text_component'] == lang
df.drop_duplicates(subset = ['text'], inplace = True)
df = df[condition1 & condition2]['text'].to_frame()
return df
def parsing(text_xml):
"""Process the xml to get the raw text"""
# removing the tags for bold text
text_xml_modified = text_xml.replace('<b>', '')
text_xml_modified = text_xml_modified.replace('</b>', '')
# modifying the claim to be processed as a real xml
text_xml_modified = "<data>" + text_xml_modified + '</data>'
# we parse it with the ElementTree XML API¶
root = ET.fromstring(text_xml_modified)
# and this is how we access the text of the claims
claims = root.findall("./claim/claim-text")
# we store the claims in a list
claims_text = [claim.text for claim in claims]
return claims_text
###Output
_____no_output_____
###Markdown
4. Load data
###Code
%%time
# test with a single file
df = get_claim_text(files[0])
def fetch_all_vocabulary(files):
l = []
for f in files:
text = get_claim_text(f)
text['text'].apply(parsing)
l.append(df)
return pd.concat(l)
%%time
df = fetch_all_vocabulary(files)
###Output
Reading the file: ../data/ep_full_text_database/2020_edition/EP2400000.txt
Reading the file: ../data/ep_full_text_database/2020_edition/EP2700000.txt
Reading the file: ../data/ep_full_text_database/2020_edition/EP3000000.txt
Reading the file: ../data/ep_full_text_database/2020_edition/EP1700000.txt
Reading the file: ../data/ep_full_text_database/2020_edition/EP0100000.txt
Reading the file: ../data/ep_full_text_database/2020_edition/EP2300000.txt
Reading the file: ../data/ep_full_text_database/2020_edition/EP1800000.txt
Reading the file: ../data/ep_full_text_database/2020_edition/EP1500000.txt
Reading the file: ../data/ep_full_text_database/2020_edition/EP2500000.txt
Reading the file: ../data/ep_full_text_database/2020_edition/EP3300000.txt
Reading the file: ../data/ep_full_text_database/2020_edition/EP0200000.txt
Reading the file: ../data/ep_full_text_database/2020_edition/EP1100000.txt
Reading the file: ../data/ep_full_text_database/2020_edition/EP2800000.txt
Reading the file: ../data/ep_full_text_database/2020_edition/EP2900000.txt
Reading the file: ../data/ep_full_text_database/2020_edition/EP3500000.txt
Reading the file: ../data/ep_full_text_database/2020_edition/EP2000000.txt
Reading the file: ../data/ep_full_text_database/2020_edition/EP1300000.txt
Reading the file: ../data/ep_full_text_database/2020_edition/EP0400000.txt
Reading the file: ../data/ep_full_text_database/2020_edition/EP1200000.txt
Reading the file: ../data/ep_full_text_database/2020_edition/EP3100000.txt
Reading the file: ../data/ep_full_text_database/2020_edition/EP0900000.txt
Reading the file: ../data/ep_full_text_database/2020_edition/EP0800000.txt
Reading the file: ../data/ep_full_text_database/2020_edition/EP3200000.txt
Reading the file: ../data/ep_full_text_database/2020_edition/EP0600000.txt
Reading the file: ../data/ep_full_text_database/2020_edition/EP0300000.txt
Reading the file: ../data/ep_full_text_database/2020_edition/EP0500000.txt
Reading the file: ../data/ep_full_text_database/2020_edition/EP1000000.txt
Reading the file: ../data/ep_full_text_database/2020_edition/EP1900000.txt
Reading the file: ../data/ep_full_text_database/2020_edition/EP0700000.txt
Reading the file: ../data/ep_full_text_database/2020_edition/EP2200000.txt
Reading the file: ../data/ep_full_text_database/2020_edition/EP3400000.txt
Reading the file: ../data/ep_full_text_database/2020_edition/EP1600000.txt
Reading the file: ../data/ep_full_text_database/2020_edition/EP1400000.txt
Reading the file: ../data/ep_full_text_database/2020_edition/EP2600000.txt
Reading the file: ../data/ep_full_text_database/2020_edition/EP0000000.txt
Reading the file: ../data/ep_full_text_database/2020_edition/EP3600000.txt
Reading the file: ../data/ep_full_text_database/2020_edition/EP2100000.txt
CPU times: user 47min 25s, sys: 1min 49s, total: 49min 14s
Wall time: 1h 8min 20s
###Markdown
5. Text preprocessing
###Code
# reshape
documents = df['text'].apply(parsing)
documents = documents.apply(lambda x:x[0])
documents.dropna(inplace=True)
# tokenize
tokenizer = RegexpTokenizer("(?u)\\b[\\w-]+\\b")
documents = documents.apply(tokenizer.tokenize)
def remove_capitalisation(x):
liste = [y.lower() for y in x]
return liste
def remove_stop_words(x):
stopset = set(stopwords.words('english'))
liste = [y for y in x if not y in stopset]
return liste
def remove_numbers(x):
liste = [y for y in x if not any(char.isdigit() for char in y)]
return liste
documents = documents.apply(remove_capitalisation)
documents = documents.apply(remove_stop_words)
documents = documents.apply(remove_numbers)
documents = documents.apply(lambda x:' '.join(x))
# display the most frequent words in the dataset
Counter(" ".join(documents).split()).most_common(100)
###Output
_____no_output_____ |
learning-py/README.ipynb | ###Markdown
Learning pythonA combination of code snippets and syntax from MOOCs and other online resourcesTable of Contents================= * [Print statement](print-statement) * [Printing a List](printing-a-list) * [Comma after print](comma-after-print) * [Reading input](reading-input) * [Boolean operators](boolean-operators) * [Bitwise Operator](bitwise-operator) * [Shift Operatons](shift-operatons) * [NOT Operator](not-operator) * [Flip Bit](flip-bit) * [Conditional statements](conditional-statements) * [type()](type) * [String methods](string-methods) * [Concatenation](concatenation) * [Sub-String](sub-string) * [String Looping](string-looping) * [Reverse](reverse) * [Loops](loops) * [While](while) * [While / else](while--else) * [Example One](example-one) * [Example Two](example-two) * [Example Three](example-three) * [For Loops](for-loops) * [For / else](for--else) * [Functions](functions) * [Function Imports](function-imports) * [List all functions in a module](list-all-functions-in-a-module) * [Universal Import](universal-import) * [Don't use Universal Imports](dont-use-universal-imports) * [Passing multiple arguments](passing-multiple-arguments) * [Example](example) * [Anonymous Functions / lambda Operator](anonymous-functions--lambda-operator) * [filter](filter) * [Example One](example-one-1) * [Example Two](example-two-1) * [Example Three](example-three-1) * [Example Four](example-four) * [Example Five](example-five) * [Example Six](example-six) * [Example Seven](example-seven) * [Lists](lists) * [Building Lists / List Comprehension](building-lists--list-comprehension) * [for/in & if](forin--if) * [List Append](list-append) * [List remove](list-remove) * [List pop](list-pop) * [List remove](list-remove-1) * [List delete](list-delete) * [List concatenate](list-concatenate) * [List Reverse](list-reverse) * [Example One](example-one-2) * [Example Two](example-two-2) * [List Slicing](list-slicing) * [Example One](example-one-3) * [Example Two](example-two-3) * [Example Three](example-three-2) * [Example Four](example-four-1) * [Example Five](example-five-1) * [Omitting Indices](omitting-indices) * [List Insertion & Indexing](list-insertion--indexing) * [Looping in lists](looping-in-lists) * [Example](example-1) * [Method 1 - for item in list](method-1---for-item-in-list) * [Method 2 - iterate through indexes](method-2---iterate-through-indexes) * [List of Lists](list-of-lists) * [Example One](example-one-4) * [Example Two](example-two-4) * [Printing Pretty](printing-pretty) * [Example](example-2) * [Sorting the List](sorting-the-list) * [Range in lists](range-in-lists) * [Passing a range into a function](passing-a-range-into-a-function) * [Example One](example-one-5) * [Example Two](example-two-5) * [Enumerate](enumerate) * [Iterating Multiple Lists](iterating-multiple-lists) * [Dictionary](dictionary) * [Assigning a dictionary with three key-value pairs to residents](assigning-a-dictionary-with-three-key-value-pairs-to-residents) * [Adding New entries](adding-new-entries) * [Add & Delete](add--delete) * [Retrieve key and value](retrieve-key-and-value) * [Method One](method-one) * [Method Two](method-two) * [Example](example-3) * [Lists and Dictionaries](lists-and-dictionaries) * [Looping in dictionaries](looping-in-dictionaries) * [More Looping techniques](more-looping-techniques) * [List of Dictionaries](list-of-dictionaries) * [Classes](classes) * [Example One](example-one-6) * [Errors](errors) Print statement
###Code
print "Turn", turn + 1
print '%s/%s/%s' % (now.year, now.month, now.day)
print "Let's not go to %s. 'Tis a silly %s." % (string_1, string_2)
print 'This isn\'t flying, this is falling with style!'
print("{} is {} and {} is {}.".format(philo.name, philo.age, mikey.name, mikey.age))
print(The {q} {b} {f}'.format(f='fox', b='brown', q='quick'))
print(The {q} {f} {f}'.format(f='fox', b='brown', q='quick'))
###Output
_____no_output_____
###Markdown
Printing a List
###Code
name = ["brave", "ice", "berg"]
id = [1, 2, 3]
for i in id:
print "My name is {0}. ID {1}".format(name[i-1], id[i-1])
###Output
_____no_output_____
###Markdown
Comma after printString manipulation is useful in for loops if you want to modifysome content in a string.
###Code
word = "Marble"
for char in word:
print char,
'''
The example above iterates through each character in word and,
in the end, prints out M a r b l e.
The , character after our print statement means that our next print
statement keeps printing on the same line.
'''
#Example
phrase = "A bird in the hand..."
# Add your for loop
for i in phrase:
if i == 'A' or i == 'a':
print 'X',
else:
print i,
#Don't delete this print statement!
print
###Output
_____no_output_____
###Markdown
Reading input
###Code
name = raw_input("What is your name?")
var = input("Enter a number")
var = int(input("Enter a number"))
###Output
_____no_output_____
###Markdown
There were two functions to get user input, called `input` and `raw_input`.The difference between them is, `raw_input` doesn't evaluate the data and returns as it is,in string form.But, `input` will evaluate whatever you entered and the result of evaluation will be returned
###Code
name = raw_input("What is your name?")
quest = raw_input("What is your quest?")
color = raw_input("What is your favorite color?")
print "Ah, so your name is %s, your quest is %s, " \
"and your favorite color is %s." % (name, quest, color)
# Example
number = raw_input("Enter a number: ")
if int(number) == 0:
print "You entered 0"
'''
raw_input asks the user for input and returns it as a string. But we're going to
want to use integers for our guesses! To do this, we'll wrap the raw_inputs with int()
to convert the string to an integer.
'''
guess = int(raw_input("Your guess: "))
'''
Remember, raw_input turns user input into a string, so we use int()
to make it a number again.
'''
###Output
_____no_output_____
###Markdown
Boolean operators
###Code
True or False
(3 < 4) and (5 >= 5)
this() and not that()
###Output
_____no_output_____
###Markdown
Bitwise Operator
###Code
print 5 >> 4 # Right Shift
print 5 << 1 # Left Shift
print 8 & 5 # Bitwise AND
print 9 | 4 # Bitwise OR
print 12 ^ 42 # Bitwise XOR
print ~88 # Bitwise NOT
print bin(4)
# => 0b100
print int("0b11001001", 2)
# => 201
print int("111",2)
# => 7
print int(bin(5),2)
# => 5
###Output
_____no_output_____
###Markdown
Shift Operatons
###Code
shift_right = 0b1100
shift_left = 0b1
# Your code here!
shift_right >>= 2
shift_left <<= 2
print bin(shift_right)
print bin(shift_left)
# => 0b11
# => 0b100
###Output
_____no_output_____
###Markdown
NOT Operator
###Code
print ~2
# => -3
###Output
_____no_output_____
###Markdown
Flip Bit
###Code
def flip_bit(number, n):
result = 0b0
result = number ^ (1 << (n-1))
return bin(result)
###Output
_____no_output_____
###Markdown
Conditional statements
###Code
if this_might_be_true():
print "This really is true."
elif that_might_be_true():
print "That is true."
else:
print "None of the above."
###Output
_____no_output_____
###Markdown
Don't forget to include a : after your if statements!
###Code
if guess_row not in range(5) or guess_col not in range(5):
print "Oops, that's not even in the ocean."
###Output
_____no_output_____
###Markdown
type()
###Code
'''
Finally, the type() function returns the type of the data it receives as an
argument.
'''
print type(42)
print type(4.2)
print type('spam')
#Python will output:
<type 'int'>
<type 'float'>
<type 'str'>
###Output
_____no_output_____
###Markdown
String methods
###Code
fifth_letter = "MONTY"[4]
ministry = "The ministry of Defence"
len() - len(ministry)
lower() - "Ryan".lower()
upper() - ministry.upper()
str() - str(2), would turn 2 into "2".
x = "J123"
x.isalpha() # False
###Output
_____no_output_____
###Markdown
In the first line, we create a string with letters and numbers.The second line then runs the function isalpha() which returnsFalse since the string contains non-letter characters. ConcatenationRemember how to concatenate (i.e. add) strings together?
###Code
greeting = "Hello "
name = "D. Y."
welcome = greeting + name
print "Spam" + " and" + " eggs"
This will print I have 2 coconuts!
print "I have " + str(2) + " coconuts!"
###Output
_____no_output_____
###Markdown
The `str()` method converts non-strings into strings. In the above example,you convert the number 2 into a string and then you concatenate the stringstogether just like in the previous exercise. Sub-String
###Code
s = "Charlie"
print s[0]
# will print "C"
print s[1:4]
# will print "har"
###Output
_____no_output_____
###Markdown
String LoopingStrings are like lists with characters as elements.You can loop through strings the same way you loop through lists
###Code
for letter in "Codecademy":
print letter
for x in S: print(x, end=' ') # Iterate over a string
...
l u m b e r j a c k
# Empty lines to make the output pretty
print
print
word = "Programming is fun!"
for letter in word:
# Only print out the letter i
if letter == "i":
print letter
###Output
_____no_output_____
###Markdown
Reverse
###Code
reverse("abcd") should return "dcba".
or
[::-1] to help you with this.
S = "hello"
S[::-1] will return "olleh"
###Output
_____no_output_____
###Markdown
Loops While
###Code
loop_condition = True
while loop_condition:
print "I am a loop"
loop_condition = False
###Output
_____no_output_____
###Markdown
While / else`while/else` is similar to `if/else`, but there is a difference: the `else`block will execute anytime the loop condition is evaluated to `False`.This means that it will execute if the loop is never entered or if theloop exits normally. If the loop exits as the result of a `break`, the`else` will not be executed. Example One
###Code
'''
In this example, the loop will break if a 5 is generated, and the else
will not execute. Otherwise, after 3 numbers are generated, the loop
condition will become false and the else will execute.
'''
import random
print "Lucky Numbers! 3 numbers will be generated."
print "If one of them is a '5', you lose!"
count = 0
while count < 3:
num = random.randint(1, 6)
print num
if num == 5:
print "Sorry, you lose!"
break
count += 1
else:
print "You win!"
###Output
_____no_output_____
###Markdown
Example Two
###Code
from random import randint
# Generates a number from 1 through 10 inclusive
random_number = randint(1, 10)
guesses_left = 3
# Start your game!
while guesses_left > 0:
guess = int(raw_input("Your Guess : " ))
if guess == random_number:
print "You Win"
break
else:
guesses_left -= 1
else:
print "You lose."
###Output
_____no_output_____
###Markdown
Example Three
###Code
# A nice use of while/else
def is_prime(x):
if x < 2:
return False
# elif x == 2:
# return True
else:
n = 2
while n <= x-1:
if x % n == 0:
return False
break
n += 1
else:
return True
###Output
_____no_output_____
###Markdown
For Loops
###Code
print "Counting..."
for i in range(20):
print i
###Output
_____no_output_____
###Markdown
For / elseJust like with while, for loops may have an else associated with them.In this case, the else statement is executed after the for, but onlyif the for ends normally—that is, not with a break. This code will breakwhen it hits 'tomato', so the else block won't be executed.
###Code
fruits = ['banana', 'apple', 'orange', 'tomato', 'pear', 'grape']
print 'You have...'
for f in fruits:
if f == 'tomato':
print 'A tomato is not a fruit!' # (It actually is.)
break
print 'A', f
else:
print 'A fine selection of fruits!'
###Output
_____no_output_____
###Markdown
Functions
###Code
def shout(phrase):
if phrase == phrase.upper():
return "YOU'RE SHOUTING!"
else:
return "Can you speak up?"
shout("I'M INTERESTED IN SHOUTING")
def count_small(numbers):
total = 0
for n in numbers:
if n < 10:
total = total + 1
return total
lost = [4, 8, 15, 16, 23, 42]
small = count_small(lost)
print small
###Output
_____no_output_____
###Markdown
Function Imports
###Code
import math
print math.sqrt(25)
###Output
_____no_output_____
###Markdown
Nice work! Now Python knows how to take the square root of a number.However, we only really needed the sqrt function, and it can be frustratingto have to keep typing `math.sqrt()`.It's possible to import only certain variables or functions from a given module.Pulling in just a single function from a module is called a function import, andit's done with the from keyword:
###Code
from module import function
###Output
_____no_output_____
###Markdown
Now you can just type `sqrt()` to get the square root of a number—no more `math.sqrt()`! List all functions in a module
###Code
import math # Imports the math module
everything = dir(math) # Sets everything to a list of things from math
print everything # Prints 'em all!
###Output
_____no_output_____
###Markdown
Universal ImportWhat if we still want all of the variables and functions in a module but don't want tohave to constantly `type math.`?Universal import can handle this for you. The syntax for this is: Don't use Universal ImportsHere Be DragonsUniversal imports may look great on the surface, but they're not agood idea for one very important reason:they fill your program with a ton of variable and function nameswithout the safety of those names still being associated with themodule(s) they came from.If you have a function of your very own named sqrt and you import math,your function is safe: there is your `sqrt` and there is `math.sqrt`.If you do from `math import *`, however, you have a problem: namely,two different functions with the exact same name.Even if your own definitions don't directly conflict with names fromimported modules, if you `import *` from several modules at once,you won't be able to figure out which variable or function camefrom where.For these reasons, it's best to stick with either `import module` andtype `module.name` or just `import` specific variables and functionsfrom various modules as needed.
###Code
from math import *
print(sqrt(25))
## Passing multiple arguments
def biggest_number(*args):
print max(args)
return max(args)
def smallest_number(*args):
print min(args)
return min(args)
def distance_from_zero(arg):
print abs(arg)
return abs(arg)
biggest_number(-10, -5, 5, 10)
smallest_number(-10, -5, 5, 10)
distance_from_zero(-10)
###Output
_____no_output_____
###Markdown
Example
###Code
def shut_down(s):
if s.lower() == "yes":
return "Shutting Down"
elif s.lower() == "no":
return "Shutdown aborted"
else:
return "Sorry"
print(shut_down("yEs"))
print(shut_down("nO"))
print(shut_down("bleh"))
###Output
Shutting Down
Shutdown aborted
Sorry
###Markdown
Anonymous Functions / lambda OperatorOne of the more powerful aspects of Python is that it allows for a styleof programming called functional programming, which means that you'reallowed to pass functions around just as if they were variables or values.The lambda operator or lambda function is a way to create small anonymousfunctions, i.e. functions without a name. These functions are throw-awayfunctions, i.e. they are just needed where they have been created.Lambda functions are mainly used in combination with the functionsfilter(), map() and reduce().
###Code
# lambda
# lambda argument_list: expression
lambda x: x % 3 == 0
# Is the same as
def by_three(x):
return x % 3 == 0
###Output
_____no_output_____
###Markdown
Only we don't need to actually give the function a name; it does its workand returns a value without one. That's why the function the lambdacreates is an anonymous function. filterThe function `filter(function, list)` offers an elegant way to filter outall the elements of a list, for which the function function returns True.The function `filter(f,l)` needs a function `f` as its first argument.`f` returns a Boolean value, i.e. either `True` or `False`.This function will be applied to every element of the list `l`.Only if `f` returns `True` will the element of the list be included inthe result list Example One
###Code
fib = [0,1,1,2,3,5,8,13,21,34,55]
result = filter(lambda x: x % 2, fib)
print (result)
# => [1, 1, 3, 5, 13, 21, 55]
###Output
<filter object at 0x00000256C6E49C10>
###Markdown
Example Two
###Code
my_list = range(16)
print filter(lambda x: x % 3 == 0, my_list)
# => [0, 3, 6, 9, 12, 15]
###Output
_____no_output_____
###Markdown
Example Three
###Code
languages = ["HTML", "JavaScript", "Python", "Ruby"]
print filter(lambda x: 'Python' in x, languages)
# => ['Python']
###Output
_____no_output_____
###Markdown
Example Four
###Code
lst = ['a', 'ab', 'abc', 'bac']
filter(lambda k: 'ab' in k, lst)
# => ['ab', 'abc']
###Output
_____no_output_____
###Markdown
Example Five
###Code
cubes = [x**3 for x in range(1, 11)] # List Comprehension
filter(lambda x: x % 3 == 0, cubes)
# => [27, 216, 729]
###Output
_____no_output_____
###Markdown
Example Six
###Code
squares = [x**2 for x in range(1,11)]
print filter(lambda x: x > 30 and x < 70, squares)
# => [36, 49, 64]
###Output
_____no_output_____
###Markdown
Example Seven
###Code
garbled = "IXXX aXXmX aXXXnXoXXXXXtXhXeXXXXrX sXXXXeXcXXXrXeXt mXXeXsXXXsXaXXXXXXgXeX!XX"
message = filter(lambda x: 'X' not in x, garbled)
print message
# => "I am another secret message!"
###Output
_____no_output_____
###Markdown
ListsLists are a datatype you can use to store a collection of different pieces of information as asequence under a single variable name. (Datatypes you've already learned about include strings,numbers, and booleans.)
###Code
zoo_animals = ["pangolin", "cassowary", "sloth", "Optimus"];
if len(zoo_animals) > 3:
print "The first animal at the zoo is the " + zoo_animals[0]
print "The second animal at the zoo is the " + zoo_animals[1]
print "The third animal at the zoo is the " + zoo_animals[2]
print "The fourth animal at the zoo is the " + zoo_animals[3]
###Output
_____no_output_____
###Markdown
Building Lists / List ComprehensionLet's say you wanted to build a list of the numbers from 0 to 50(inclusive). We could do this pretty easily:
###Code
my_list = range(51)
###Output
_____no_output_____
###Markdown
But what if we wanted to generate a list according to some logic—forexample, a list of all the even numbers from 0 to 50?Python's answer to this is the list comprehension. List comprehensionsare a powerful way to generate lists using the keywords for/in & if
###Code
evens_to_50 = [i for i in range(51) if i % 2 == 0]
print evens_to_50
# This will create a new_list populated by the numbers one to five.
new_list = [x for x in range(1,6)]
# => [1, 2, 3, 4, 5]
# If you want those numbers doubled, you could use:
doubles = [x*2 for x in range(1,6)]
# => [2, 4, 6, 8, 10]
# And if you only wanted the doubled numbers that are evenly divisible by three:
doubles_by_3 = [x*2 for x in range(1,6) if (x*2)%3 == 0]
# => [6]
# Even Squares
even_squares = [i*i for i in range(1,12) if i*i % 2 == 0]
c = ['C' for x in range(5) if x < 3]
print c
# => ['C', 'C', 'C'].
cubes_by_four = [x ** 3 for x in range(1, 11) if x ** 3 % 4 == 0]
print cubes_by_four
# => [8, 64, 216, 512, 1000]
threes_and_fives = [x for x in range(1,16) if x % 3 == 0 or x % 5 == 0]
print threes_and_fives
# => [3, 5, 6, 9, 10, 12, 15]
###Output
_____no_output_____
###Markdown
List Append
###Code
A list doesn't have to have a fixed length. You can add items to the end of a
list any time you like!
letters = ['a', 'b', 'c']
letters.append('d')
print len(letters)
print letters
###Output
_____no_output_____
###Markdown
List removeWe can remove an item from the list.
###Code
letters.remove('a')
###Output
_____no_output_____
###Markdown
List pop`n.pop(index)` will remove the item at index from the list and return it to you:
###Code
n = [1, 3, 5]
n.pop(1)
# Returns 3 (the item at index 1)
print n
# prints [1, 5]
###Output
_____no_output_____
###Markdown
List remove`n.remove(item)` will remove the actual item if it finds it:
###Code
n.remove(1)
# Removes 1 from the list,
# NOT the item at index 1
print n
# prints [3, 5]
###Output
_____no_output_____
###Markdown
List delete`del(n[1])` is like `.pop` in that it will remove the item at the givenindex, but it won't return it:
###Code
del(n[1])
# Doesn't return anything
print n
# prints [1, 5]
###Output
_____no_output_____
###Markdown
List concatenate
###Code
a = [1, 2, 3]
b = [4, 5, 6]
print a + b
# prints [1, 2, 3, 4, 5, 6]
###Output
_____no_output_____
###Markdown
The example above is just a reminder of how to concatenate two lists.
###Code
m = [1, 2, 3]
n = [4, 5, 6]
# Add your code here!
def join_lists(x, y):
return x + y
print join_lists(m, n)
# You want this to print [1, 2, 3, 4, 5, 6]
###Output
_____no_output_____
###Markdown
List ReverseA negative stride progresses through the list from right to left. Example One
###Code
letters = ['A', 'B', 'C', 'D', 'E']
print letters[::-1]
# => ['E', 'D', 'C', 'B', 'A']
###Output
_____no_output_____
###Markdown
Example Two
###Code
my_list = range(1, 11)
backwards = my_list[::-1]
print backwards
# => [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]
###Output
_____no_output_____
###Markdown
List SlicingAccess a portion of a list.List slicing allows us to access elements of a list in a concise manner.The syntax looks like this:
###Code
[start:end:stride]
###Output
_____no_output_____
###Markdown
Where start describes where the slice starts (inclusive), end is whereit ends (exclusive), and stride describes the space between items inthe sliced list. For example, a stride of 2 would select every otheritem from the original list to place in the sliced list.Stride LengthA positive stride length traverses the list from left to right,and a negative one traverses the list from right to left.Further, a stride length of 1 traverses the list "by ones," a stridelength of 2 traverses the list "by twos," and so on. Example One
###Code
l = [i ** 2 for i in range(1, 11)]
# Should be [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]
print l[2:9:2]
# => [9, 25, 49, 81]
###Output
_____no_output_____
###Markdown
Example Two
###Code
letters = ['a', 'b', 'c', 'd', 'e']
slice = letters[1:3]
print slice
print letters
###Output
_____no_output_____
###Markdown
Example Three
###Code
to_21 = range(1,22)
odds = to_21[::2]
middle_third = to_21[7:14:1]
print odds
print middle_third
# => [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21]
# => [8, 9, 10, 11, 12, 13, 14]
###Output
_____no_output_____
###Markdown
Example Four
###Code
suitcase = ["sunglasses", "hat", "passport", "laptop", "suit", "shoes"]
first = suitcase[0:2] # The first and second items (index zero and one)
middle = suitcase[2:4] # Third and fourth items (index two and three)
last = suitcase[4:6] # The last two items (index four and five)
###Output
_____no_output_____
###Markdown
Example Five
###Code
animals = "catdogfrog"
cat = animals[:3] # The first three characters of animals
dog = animals[3:6] # The fourth through sixth characters
frog = animals[6:] # From the seventh character to the end
###Output
_____no_output_____
###Markdown
Omitting IndicesIf you don't pass a particular index to the list slice,Python will pick a default.
###Code
to_five = ['A', 'B', 'C', 'D', 'E']
print to_five[3:]
# prints ['D', 'E']
print to_five[:2]
# prints ['A', 'B']
print to_five[::2]
# print ['A', 'C', 'E']
to_one_hundred = range(101)
backwards_by_tens = to_one_hundred[::-10]
print backwards_by_tens
# => [100, 90, 80, 70, 60, 50, 40, 30, 20, 10, 0]
garbled = "!XeXgXaXsXsXeXmX XtXeXrXcXeXsX XeXhXtX XmXaX XI"
message = garbled[::-2]
print message
# => 'I am the secret message!'
###Output
_____no_output_____
###Markdown
List Insertion & IndexingSearch for an item in a list
###Code
animals = ["ant", "bat", "cat"]
print animals.index("bat")
###Output
_____no_output_____
###Markdown
We can also insert items into a list.
###Code
animals.insert(1, "dog")
print animals
###Output
_____no_output_____
###Markdown
We insert `dog` at `index 1`, which moves everything down by `1`animals will be `["ant", "dog", "bat", "cat"]` Looping in lists
###Code
for variable in list_name:
# Do stuff!
###Output
_____no_output_____
###Markdown
A variable name follows the for keyword; it will be assigned the valueof each list item in turn.Then in list_name designates list_name as the list the loop will work on. Example
###Code
my_list = [1,9,3,8,5,7]
for number in my_list:
# Your code here
print 2 * number
names = ["Adam","Alex","Mariah","Martine","Columbus"]
for i in names:
print i
###Output
_____no_output_____
###Markdown
Method 1 - for item in list
###Code
for item in list:
print item
###Output
_____no_output_____
###Markdown
Method 2 - iterate through indexes
###Code
for i in range(len(list)):
print list[i]
###Output
_____no_output_____
###Markdown
Method 1 is useful to loop through the list, but it's not possibleto modify the list this way.Method 2 uses indexes to loop through the list, making it possibleto also modify the list if needed. List of ListsSingle list that contains multiple lists and how to use them in a function Example One
###Code
list_of_lists = [[1,2,3], [4,5,6]]
for lst in list_of_lists:
for item in lst:
print item
###Output
_____no_output_____
###Markdown
Example Two
###Code
n = [[1, 2, 3], [4, 5, 6, 7, 8, 9]]
# Add your function here
def flatten(lists):
results = []
for numbers in lists:
for i in numbers:
results.append(i)
return results
print flatten(n)
# Creates a list containing 5 lists, each of 8 items, all set to 0
w, h = 8, 5
Matrix = [[0 for x in range(w)] for y in range(h)]
###Output
_____no_output_____
###Markdown
Printing PrettyWe're getting pretty close to a playable board, but wouldn't it be nice to getrid of those quote marks and commas?
###Code
letters = ['a', 'b', 'c', 'd']
print " ".join(letters)
print "---".join(letters)
###Output
_____no_output_____
###Markdown
In the example above, we create a list called letters.Then, we print `a b c d`.The `.join` method uses the string to combine the items in the list.Finally, we print `a---b---c---d`.We are calling the `.join` function on the `---` string. Example
###Code
board = []
# Creates a list containing 5 lists, each of 5 items, all set to 0
w, h = 5, 5
board = [['O' for x in range(w)] for y in range(h)]
def print_board(board):
for lst in board:
print " ".join(lst)
print_board(board)
###Output
_____no_output_____
###Markdown
Sorting the ListSorting can happen on numbers and strings. Others I do not know yet.
###Code
start_list = [5, 3, 1, 2, 4]
square_list = []
# Your code here!
for i in start_list:
square_list.append(i ** 2)
square_list.sort()
print square_list
###Output
_____no_output_____
###Markdown
Range in lists
###Code
n = [3, 5, 7]
def double_list(x):
for i in range(0, len(x)):
x[i] = x[i] * 2
# Don't forget to return your new list!
return x
print double_list(n)
###Output
_____no_output_____
###Markdown
Passing a range into a functionOkay! Range time. The Python `range()` function is just a shortcut for generating alist, so you can use ranges in all the same places you can use lists.
###Code
range(6) # => [0,1,2,3,4,5]
range(1,6) # => [1,2,3,4,5]
range(1,6,3) # => [1,4]
###Output
_____no_output_____
###Markdown
The range function has three different versions:
###Code
range(stop)
range(start, stop)
range(start, stop, step)
###Output
_____no_output_____
###Markdown
In all cases, the `range()` function returns a list of numbers from start up to(but not including) stop. Each item increases by step.If omitted, start defaults to zero and step defaults to one. Example One
###Code
def my_function(x):
for i in range(0, len(x)):
x[i] = x[i] * 2
return x
print my_function(range(2)) # Add your range between the parentheses!
###Output
_____no_output_____
###Markdown
Example Two
###Code
n = ["Michael", "Lieberman"]
# Add your function here
def join_strings(words):
result = ""
for i in range(0, len(words)):
result = result + words[i]
return result
print join_strings(n)
###Output
_____no_output_____
###Markdown
Enumerate`enumerate` works by supplying a corresponding index to each element in the listthat you pass it. Each time you go through the loop, index will be one greater,and item will be the next item in the sequence. It's very similar to using anormal for loop with a list, except this gives us an easy way to count how manyitems we've seen so far.`enumerate` is a built-in function of Python. It’s usefulness can not be summarizedin a single line. Yet most of the newcomers and even some advanced programmers areunaware of it. It allows us to loop over something and have an automatic counter.
###Code
choices = ['pizza', 'pasta', 'salad', 'nachos']
print 'Your choices are:'
for index, item in enumerate(choices):
print index, item
for counter, value in enumerate(some_list):
print(counter, value)
my_list = ['apple', 'banana', 'grapes', 'pear']
for c, value in enumerate(my_list, 1):
print(c, value)
# Output:
# 1 apple
# 2 banana
# 3 grapes
# 4 pear
###Output
_____no_output_____
###Markdown
Iterating Multiple ListsMultiple listsIt's also common to need to iterate over two lists at once. This is wherethe built-in `zip` function comes in handy.`zip` will create pairs of elements when passed two lists, and will stop atthe end of the shorter list.`zip` can handle three or more lists as well!
###Code
list_a = [3, 9, 17, 15, 19]
list_b = [2, 4, 8, 10, 30, 40, 50, 60, 70, 80, 90]
for a, b in zip(list_a, list_b):
# Add your code here!
if a > b:
print a
else:
print b
###Output
_____no_output_____
###Markdown
DictionaryA `dictionary` is similar to a `list`, but you access values by looking up a key insteadof an index. A key can be any string or number. Dictionaries are enclosed in*curly braces*, like so:
###Code
d = {'key1' : 1, 'key2' : 2, 'key3' : 3}
###Output
_____no_output_____
###Markdown
This is a dictionary called d with three key-value pairs. The key `key1` points to the`value 1`, `key2` to `2`, and so on.Dictionaries are great for things like phone books (pairing a name with a phone number),login pages (pairing an e-mail address with a username), and more! Assigning a dictionary with three key-value pairs to residents
###Code
residents = {'Puffin' : 104, 'Sloth' : 105, 'Burmese Python' : 106}
print residents['Puffin'] # Prints Puffin's room number
# Your code here!
print residents['Sloth']
print residents['Burmese Python']
###Output
_____no_output_____
###Markdown
Adding New entriesAn empty pair of curly braces `{}` is an empty dictionary, just like an empty pair of `[]`is an empty list.
###Code
menu = {} # Empty dictionary
menu['Chicken Alfredo'] = 14.50 # Adding new key-value pair
print menu['Chicken Alfredo']
# Your code here: Add some dish-price pairs to menu!
menu['Letuce Sandwich'] = 3.50
menu['Mango Juice'] = 12.70
menu['Sweet Corn + Chat Masala'] = 1.80
print "There are " + str(len(menu)) + " items on the menu."
print menu
###Output
_____no_output_____
###Markdown
Add & DeleteBecause dictionaries are mutable, they can be changed in many ways. Items can be removedfrom a dictionary with the del command:
###Code
del dict_name[key_name]
###Output
_____no_output_____
###Markdown
will remove the key `key_name` and its associated value from the dictionary.A new value can be associated with a key by assigning a value to the key, like so:
###Code
dict_name[key] = new_value
###Output
_____no_output_____
###Markdown
Retrieve key and value Method One
###Code
d = {'a': 'apple', 'b': 'berry', 'c': 'cherry'}
for key in d:
# Your code here!
print key, d[key]
###Output
_____no_output_____
###Markdown
Method Two
###Code
knights = {'gallahad': 'the pure', 'robin': 'the brave'}
for k, v in knights.iteritems():
print k, v
''' Prints the key and the value '''
gallahad the pure
robin the brave
###Output
_____no_output_____
###Markdown
Example
###Code
>>> D = {'a': 1, 'b': 2, 'c': 3}
>>> for key in D:
print(key, '=>', D[key]) # Use dict keys iterator and index
a => 1
c => 3
b => 2
>>> list(D.items())
[('a', 1), ('c', 3), ('b', 2)]
>>> for (key, value) in D.items():
print(key, '=>', value) # Iterate over both keys and values
a => 1
c => 3
b => 2
###Output
_____no_output_____
###Markdown
Lists and Dictionaries
###Code
inventory = {
'gold' : 500,
'pouch' : ['flint', 'twine', 'gemstone'], # Assigned a new list to 'pouch' key
'backpack' : ['xylophone','dagger', 'bedroll','bread loaf']
}
# Adding a key 'burlap bag' and assigning a list to it
inventory['burlap bag'] = ['apple', 'small ruby', 'three-toed sloth']
# Sorting the list found under the key 'pouch'
inventory['pouch'].sort()
# Your code here
inventory['pocket'] = ['seashell', 'strange berry', 'lint']
inventory['backpack'].sort()
inventory['backpack'].remove('dagger')
inventory['gold'] = inventory['gold'] + 50
print inventory['gold']
print inventory['pocket']
print inventory['pouch']
print inventory['backpack']
###Output
_____no_output_____
###Markdown
Looping in dictionaries
###Code
prices = {
"banana" : 4,
"apple" : 2,
"orange" : 1.5,
"pear" : 3,
}
stock = {
"banana" : 6,
"apple" : 0,
"orange" : 32,
"pear" : 15,
}
for key in prices:
print key
print "price: %s" % prices[key]
print "stock: %s" % stock[key]
total = 0
for i in prices:
total = total + prices[i] * stock[i]
print total
###Output
_____no_output_____
###Markdown
More Looping techniquesTo loop over two or more sequences at the same time, theentries can be paired with the `zip()` function.
###Code
questions = ['name', 'quest', 'favorite color']
answers = ['lancelot', 'the holy grail', 'blue']
for q, a in zip(questions, answers):
print 'What is your {0}? It is {1}.'.format(q, a)
###Output
_____no_output_____
###Markdown
List of DictionariesIterating over a list of dictionaries
###Code
lloyd = {
"name": "Lloyd",
"homework": [90.0,97.0, 75.0, 92.0],
"quizzes": [88.0, 40.0, 94.0],
"tests": [75.0, 90.0]
}
alice = {
"name": "Alice",
"homework": [100.0, 92.0, 98.0, 100.0],
"quizzes": [82.0, 83.0, 91.0],
"tests": [89.0, 97.0]
}
tyler = {
"name": "Tyler",
"homework": [0.0, 87.0, 75.0, 22.0],
"quizzes": [0.0, 75.0, 78.0],
"tests": [100.0, 100.0]
}
students = [lloyd, alice, tyler]
for n in [0,1,2]:
for k, v in students[n].iteritems():
print k, v
#OR
for n in range(len(students)):
for k, v in students[n].iteritems():
print k, v
###Output
_____no_output_____
###Markdown
ClassesYou can think of an object as a single data structure that contains data aswell as functions; functions of objects are called methodsPython is an object-oriented programming language, which means itmanipulates programming constructs called objects.
###Code
len("Eric")
###Output
_____no_output_____
###Markdown
Python is checking to see whether the string object you passed it has alength, and if it does, it returns the value associated with that attribute
###Code
my_dict.items()
###Output
_____no_output_____
###Markdown
Python checks to see if `my_dict` has an `items()` method (which all dictionarieshave) and executes that method if it finds it.But what makes `Eric` a string and `my_dict` a dictionary? The fact thatthey're instances of the `str` and `dict` classes, respectively. A `class` isjust a way of organizing and producing objects with similar attributesand methods Example OneCheck out the code in the editor to the right. We've defined our ownclass, Fruit, and created a lemon instance
###Code
class Fruit(object):
"""A class that makes various tasty fruits."""
def __init__(self, name, color, flavor, poisonous):
self.name = name
self.color = color
self.flavor = flavor
self.poisonous = poisonous
def description(self):
print "I'm a %s %s and I taste %s." % (self.color, self.name, self.flavor)
def is_edible(self):
if not self.poisonous:
print "Yep! I'm edible."
else:
print "Don't eat me! I am super poisonous."
lemon = Fruit("lemon", "yellow", "sour", False)
lemon.description()
lemon.is_edible()
###Output
_____no_output_____
###Markdown
Errors
###Code
# local variable 's' referenced before assignment
def digit_sum(n):
#s = 0
while(n != 0):
d = n % 10
n = n / 10
s = s + d
return s
###Output
_____no_output_____ |
sql-grading.ipynb | ###Markdown
Concept Proof===Evaluation of a jupyter notebook in Travis CI using `gradetool`. **Step 1.--** Load the sql extension
###Code
%load_ext sql
###Output
_____no_output_____
###Markdown
**Step 2.--** Connect to mysql server.
###Code
%sql mysql+pymysql://root:password@proof-mysql
###Output
_____no_output_____
###Markdown
**Step 3.--** Data preparation.
###Code
%%sql
DROP DATABASE IF EXISTS conceptproof;
CREATE DATABASE conceptproof;
USE conceptproof;
CREATE TABLE data (
letter VARCHAR(3),
number INT
);
INSERT INTO data VALUES
("a", 1),
("a", 2),
("b", 3),
("b", 4);
###Output
_____no_output_____
###Markdown
**Step 4.-- Point 1.** Write a query returning the columns `letter`, in upper case and, and `number`.
###Code
%%sql
-- >>> Write your answer here <<<
###Output
_____no_output_____
###Markdown
**Step 5.-- Point 2.** Write a query computing the sum of column `number` group by `letter`.
###Code
%%sql
-- >>> Write your answer here <<<
###Output
_____no_output_____ |
JData-0330.ipynb | ###Markdown
读取数据
###Code
User = pd.read_csv('./Data/JData_User.csv',encoding='GBK',na_values=-1,parse_dates=['user_reg_dt'])
User.head()
Action_201602 = pd.read_csv('./Data/JData_Action_201602.csv',\
encoding='GBK',\
na_values=-1,\
parse_dates=['time'],dtype={'user_id':'int','sku_id':'int','type':'int','model_id':'float64'})
Action_201602 = Action_201602.sort_values(['user_id','sku_id','time'])\
.drop_duplicates(['user_id','sku_id','time'])
Action_201603 = pd.read_csv('./Data/JData_Action_201603/JData_Action_201603.csv',\
encoding='GBK',\
na_values=-1,\
parse_dates=['time'],dtype={'user_id':'int','sku_id':'int','type':'int','model_id':'float64'})
Action_201603 = Action_201603.sort_values(['user_id','sku_id','time'])\
.drop_duplicates(['user_id','sku_id','time'])
Action_201603_extra = pd.read_csv('./Data/JData_Action_201603/JData_Action_201603_extra.csv',\
encoding='GBK',\
na_values=-1,\
parse_dates=['time'],dtype={'user_id':'int','sku_id':'int','type':'int','model_id':'float64'})
Action_201603_extra = Action_201603_extra.sort_values(['user_id','sku_id','time'])\
.drop_duplicates(['user_id','sku_id','time'])
Action_201603 = Action_201603.append(Action_201603_extra)
Action_201604 = pd.read_csv('./Data/JData_Action_201604.csv',\
encoding='GBK',\
na_values=-1,\
parse_dates=['time'],dtype={'user_id':'int','sku_id':'int','type':'int','model_id':'float64'})
Action_201604 = Action_201602.append(Action_201603).append(Action_201604)
Action_201604.head()
len(Action_201604)
Action_201604 = Action_201604.sort_values(['user_id','sku_id','time'])\
.drop_duplicates(['user_id','sku_id','time'])
len(Action_201604)
Action_201604 = Action_201604.merge(pd.get_dummies(Action_201604.type,prefix='type'),\
left_index=True,
right_index=True,
how='left').drop(['cate','brand'],axis=1)
Action_201604.head()
Action_201604 = Action_201604.groupby(['user_id','sku_id'])\
.agg({'type_1':sum,'type_2':sum,'type_3':sum,\
'type_4':sum,'type_5':sum,'type_6':sum,\
'time':min,'model_id':len
}).reset_index()
Action_201604.head()
Comments = pd.read_csv('./Data/JData_Comment.csv',encoding='GBK',na_values=-1,\
parse_dates=['dt'],dtype={'sku_id':'int'})
Comments.head()
Product = pd.read_csv('./Data/JData_Product.csv',encoding='GBK',na_values=-1,\
dtype={'sku_id':'int'})
Product.head()
###Output
_____no_output_____
###Markdown
数据切分
###Code
date0 = '2016-02-01'
date1 = '2016-04-06'
date2 = '2016-04-15'
date3 = '2016-04-16'
###Output
_____no_output_____
###Markdown
数据集合并
###Code
def create_data(date_a,date_b):
Action_section = Action_201604[(Action_201604['time'] > date_a) & (Action_201604['time'] < date_b)]
Comments_section = Comments[Comments['dt'] < date_b]
User_section = User[User['user_reg_dt'] < date_b]
User_section = User_section.sort_values('user_id')
# Action_section = Action_section.sort_values(['user_id','sku_id','time']).drop_duplicates(['user_id','sku_id','time'])
UA_section = Action_section.merge(User_section,on='user_id',how='left').sort_values(['user_id','sku_id','time'])
Comments_section = Comments_section.sort_values(['sku_id','dt'],ascending=False).drop_duplicates(['sku_id'])
PC_section = Product.merge(Comments_section,how='left',on='sku_id').sort_values('bad_comment_rate',ascending=False)
PC_section = PC_section
UAPC_section = PC_section.merge(UA_section,on='sku_id',how='left')
UAPC_section = UAPC_section.dropna(axis=0,subset=['user_id'])
UAPC_section[['user_id','sku_id']] = UAPC_section[['user_id','sku_id']].apply(lambda x:x.astype(int))
return UAPC_section
def create_data(date_a,date_b):
Action_section = Action_201604[(Action_201604['time'] > date_a) & (Action_201604['time'] < date_b)]
Comments_section = Comments[(Comments['dt'] > date_a) & (Comments['dt'] < date_b)]
User_section = User[User['user_reg_dt'] < date_b]
User_section = User_section.sort_values('user_id')
# Action_section = Action_section.sort_values(['user_id','sku_id','time']).drop_duplicates(['user_id','sku_id','time'])
UA_section = Action_section.merge(User_section,on='user_id',how='left').sort_values(['user_id','sku_id','time'])
Comments_section = Comments_section.sort_values(['sku_id','dt'],ascending=False).drop_duplicates(['sku_id'])
PC_section = Product.merge(Comments_section,how='left',on='sku_id').sort_values('bad_comment_rate',ascending=False)
PC_section = PC_section
UAPC_section = UA_section.merge(PC_section,on='sku_id',how='inner')
# UAPC_section = UAPC_section.dropna(axis=0,subset=['user_id'])
UAPC_section[['user_id','sku_id']] = UAPC_section[['user_id','sku_id']].apply(lambda x:x.astype(int))
return UAPC_section
def label_create(x):
return x[['user_id','sku_id','type_4']]\
.sort_values(['user_id','sku_id'])\
.rename(columns= {'type_4':'label'})
train_X = create_data(date0,date2)
train_X['type_4'].value_counts()
train_y = create_data(date2,date3)
trial = train_y[train_y['type_4']>=1][['user_id','sku_id']].merge(Action_201604,how='left').sort_values(['user_id','sku_id'])
trial.head()
trial[type]
Comments[Comments['sku_id']==5504]
train_y.sort_values(['sku_id','time']).head()
train_y['type_4'].value_counts()
train_y = train_y[['user_id','sku_id','type_4']]\
.sort_values(['user_id','sku_id'])\
.rename(columns= {'type_4':'label'})
train_y.head()
train_test = train_X.merge(train_y,on=['user_id','sku_id'],how='left')
train_test.head()
train_test['label']= train_test['label'].fillna(0)
train_test['label'].value_counts()
###Output
_____no_output_____ |
torch_basics/torch_random_tests.ipynb | ###Markdown
torch.cuda.manual_seed(SEED)
###Code
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
###Output
_____no_output_____
###Markdown
Linear Layer
###Code
linear1 = nn.Linear(30, 20) # 30 dim in, 20 dim out.
w1 = linear1.weight.view((-1,1)).detach().numpy()
plt.hist(w, bins='auto') # arguments are passed to np.histogram
torch.manual_seed(SEED)
linear2 = nn.Linear(30, 20) # 30 dim in, 20 dim out.
w2 = linear2.weight.view((-1,1)).detach().numpy()
plt.hist(w, bins='auto') # arguments are passed to np.histogram
diff = np.abs(np.sum(w2-w1))
print(diff)
###Output
0.0
|
Reinforced Concrete 3D Frame (FGU).ipynb | ###Markdown
Reinforced Concrete 3D Frame January 2022, By Amir Hossein NamadchiIn this notebook, a 3-Dimensional one story Reinforced Concrete moment resisting frame is modeled. This is an OpenSeesPy simulation of TCL version of the model, presented by [*Fernando Gutiérrez Urzúa*](https://www.ucl.ac.uk/epicentre/fernando-gutierrez-urzua) in his [YouTube channel](https://www.youtube.com/user/lfgurzua). Some minor modifications are made by me in the python version. According to his, modeling assumptions are:- Columns & Beams are modeled as distributed plasticity elementsTo have cleaner code, all of the neccessary functions are written in a single *.py* file named `RC3DF.py`. The file contains:- `build_RC_rect_section`: *Build fiber rectangular RC section*- `build_model`: *Builds the 3D RC Frame model*- `run_gravity`: *Runs gravity analysis*- `run_modal`: *Runs Modal analysis*- `run_pushover`: *Runs Pushover analysis*- `run_time_history`: *Runs Time history analysis*- `reset_analysis`: *Resets the analysis by setting time to 0,removing the recorders and wiping the analysis.*Some additional function arguments are provided to be able to tweak model parameters. Please note that some functions use data obtained by running other functions. For example, in order to run time-history analysis, some of the system's natural frequencies are required which can be obtained by running `run_modal` function.The model has been idealized as follows:  Beam and Column sections are defined as:  Dependencies
###Code
import time
import sys
import os
sys.path.append('FGU_RC3DF_files')
import openseespy.opensees as ops
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import RC3DF as RC
###Output
_____no_output_____
###Markdown
Model Data Units (*Silvia's Style*)
###Code
m = 1.0 # Meters
KN = 1.0 # KiloNewtons
sec = 1.0 # Seconds
mm = 0.001*m # Milimeters
cm = 0.01*m # Centimeters
ton = KN*(sec**2)/m # mass unit (derived)
g = 9.81*(m/sec**2) # gravitational constant (derived)
MPa = 1e3*(KN/m**2) # Mega Pascal
GPa = 1e6*(KN/m**2) # Giga Pascal
###Output
_____no_output_____
###Markdown
Geometric Dimensions
###Code
L_X = 6.0*m # Span in X-direction
L_Y = 7.0*m # Span in Y-direction
L_Z = 3.5*m # Story height
###Output
_____no_output_____
###Markdown
Material Definition
###Code
f_c_1 = -25*MPa # f'c in compression for unconfined concrete
f_c_2 = -28*MPa # f'c in compression for confined concrete
eps_c = -0.002 # strain at maximum stress in compression
eps_u = -0.02 # strain at ultimate stress in compression
f_y = 420.0*MPa # fy for reinforcing steel
E_s = 210.0*GPa # E for reinforcing steel
###Output
_____no_output_____
###Markdown
Section Definition
###Code
rebar = 0.25*np.pi*(25*mm)**2
# uniaxial Kent-Scott-Park concrete material with degraded linear unloading/reloading
mat_KSP_unconf = {'ID':'Concrete02',
'matTag': 1,
'fpc': f_c_1,
'epsc0': eps_c,
'fpcu': 0.2*f_c_1,
'epsU': eps_u,
'lamda': 0.1,
'ft': -0.1*f_c_1,
'Ets': (-0.1*f_c_1)/0.002}
# uniaxial Kent-Scott-Park concrete material with degraded linear unloading/reloading
mat_KSP_conf = {'ID':'Concrete02',
'matTag': 2,
'fpc': f_c_2,
'epsc0': eps_c,
'fpcu': 0.2*f_c_2,
'epsU': eps_u,
'lamda': 0.1,
'ft': -0.1*f_c_2,
'Ets': (-0.1*f_c_2)/0.002}
# uniaxial Giuffre-Menegotto-Pinto steel with isotropic strain hardening
mat_GMP = {'ID':'Steel02',
'matTag':3,
'Fy': f_y,
'E0': E_s,
'b':0.005,
'R0': 20.0,
'cR1': 0.925,
'cR2': 0.15}
sections = {'Beam':{'B':300*mm,
'H':600*mm,
'cover':40*mm,
'n_bars_top':3,
'n_bars_bot':3,
'n_bars_int_tot':4,
'bar_area_top':rebar,
'bar_area_bot':rebar,
'bar_area_int':rebar},
'Column':{'B':300*mm,
'H':400*mm,
'cover':40*mm,
'n_bars_top':3,
'n_bars_bot':3,
'n_bars_int_tot':4,
'bar_area_top':rebar,
'bar_area_bot':rebar,
'bar_area_int':rebar}
}
###Output
_____no_output_____
###Markdown
Loading
###Code
C_L = 80.0*(KN) # Concentrated load
m_1 = 200.0*ton # lumped mass 1
###Output
_____no_output_____
###Markdown
Analysis Gravity analysis
###Code
RC.build_model(L_X, L_Y, L_Z, mat_KSP_unconf, mat_KSP_conf, mat_GMP, sections, C_L, m_1)
RC.run_gravity()
###Output
Model Built Successfully!
Gravity analysis Done!
###Markdown
Modal analysis
###Code
RC.build_model(L_X, L_Y, L_Z, mat_KSP_unconf, mat_KSP_conf, mat_GMP, sections, C_L, m_1)
RC.run_modal()
###Output
Model Built Successfully!
Modal analysis Done!
###Markdown
Pushover analysis in X directions
###Code
RC.build_model(L_X, L_Y, L_Z, mat_KSP_unconf, mat_KSP_conf, mat_GMP, sections, C_L, m_1)
RC.run_gravity()
RC.reset_analysis()
RC.run_pushover(m_1, direction='X')
###Output
Model Built Successfully!
Gravity analysis Done!
Pushover Analysis in X Done in 30.21 seconds
###Markdown
Pushover analysis in Y directions
###Code
RC.build_model(L_X, L_Y, L_Z, mat_KSP_unconf, mat_KSP_conf, mat_GMP, sections, C_L, m_1)
RC.run_gravity()
RC.reset_analysis()
RC.run_pushover(m_1, direction='Y')
###Output
Model Built Successfully!
Gravity analysis Done!
Pushover Analysis in Y Done in 27.81 seconds
###Markdown
Time history analysis in X directions
###Code
RC.build_model(L_X, L_Y, L_Z, mat_KSP_unconf, mat_KSP_conf, mat_GMP, sections, C_L, m_1)
RC.run_gravity()
RC.reset_analysis()
RC.run_time_history(direction='X')
###Output
Model Built Successfully!
Gravity analysis Done!
Running Time-History analysis with lambda= 1
Time-History Analysis in X Done in 31.41 seconds
###Markdown
Time history analysis in Y directions
###Code
RC.build_model(L_X, L_Y, L_Z, mat_KSP_unconf, mat_KSP_conf, mat_GMP, sections, C_L, m_1)
RC.run_gravity()
RC.reset_analysis()
RC.run_time_history(direction='Y')
###Output
Model Built Successfully!
Gravity analysis Done!
Running Time-History analysis with lambda= 1
Time-History Analysis in Y Done in 30.17 seconds
###Markdown
Visualization Pushover Curve
###Code
df_R_X = pd.read_table('FGU_RC3DF_files/Pushover_Horizontal_ReactionsX.out', sep = " ", header = None,
names=["Pseudo-Time","R1_X","R2_X","R3_X","R4_X"])
df_R_Y = pd.read_table('FGU_RC3DF_files/Pushover_Horizontal_ReactionsY.out', sep = " ", header = None,
names=["Pseudo-Time","R1_Y","R2_Y","R3_Y","R4_Y"])
df_R_X['sum_R'] = df_R_X.values[:,1:5].sum(axis =1)
df_R_Y['sum_R'] = df_R_Y.values[:,1:5].sum(axis =1)
df_D_X = pd.read_table('FGU_RC3DF_files/Pushover_Story_DisplacementX.out', sep = " ", header = None,
names=["Pseudo-Time","D1_X","D2_X","D3_X","D4_X"])
df_D_Y = pd.read_table('FGU_RC3DF_files/Pushover_Story_DisplacementY.out', sep = " ", header = None,
names=["Pseudo-Time","D1_Y","D2_Y","D3_Y","D4_Y"])
df_D_X['avg_D'] = df_D_X.values[:,1:5].mean(axis = 1)
df_D_Y['avg_D'] = df_D_Y.values[:,1:5].mean(axis = 1)
plt.figure(figsize=(10,5))
plt.plot(df_D_X['avg_D'], -df_R_X['sum_R'], color = '#C0392B', linewidth=1.5)
plt.plot(df_D_Y['avg_D'], -df_R_Y['sum_R'], color = '#27AE60', linewidth=1.5)
plt.ylabel('Base Shear (KN)', {'fontname':'Cambria', 'fontstyle':'italic','size':14})
plt.xlabel('Average of Roof Displacement (m)', {'fontname':'Cambria', 'fontstyle':'italic','size':14})
plt.grid(which='both')
plt.title('Pushover Curve',{'fontname':'Cambria', 'fontstyle':'normal','size':16})
plt.yticks(fontname = 'Cambria', fontsize = 14)
plt.xticks(fontname = 'Cambria', fontsize = 14);
plt.legend(['X-Direction', 'Y-Direction'],prop={'family':'Cambria','size':14});
###Output
_____no_output_____
###Markdown
Ground Motion histroy
###Code
G_M =np.loadtxt('FGU_RC3DF_files/acc_1.txt')
times = np.arange(0,0.02*len(G_M),0.02)
plt.figure(figsize=(12,4))
plt.plot(times,G_M, color = '#6495ED', linewidth=1.2)
plt.ylabel('Acceleration (m/s2)', {'fontname':'Cambria', 'fontstyle':'italic','size':14})
plt.xlabel('Time (sec)', {'fontname':'Cambria', 'fontstyle':'italic','size':14})
plt.grid(which='both')
plt.title('Time history of Ground Motion record',{'fontname':'Cambria', 'fontstyle':'normal','size':16})
plt.yticks(fontname = 'Cambria', fontsize = 14);
###Output
_____no_output_____
###Markdown
Time history of displacement and acceleration
###Code
story_disp_X = np.loadtxt('FGU_RC3DF_files/TimeHistory_Story_DisplacementX1.1.out')
story_disp_Y = np.loadtxt('FGU_RC3DF_files/TimeHistory_Story_DisplacementY1.1.out')
plt.figure(figsize=(12,5))
plt.plot(story_disp_X[:,0], story_disp_X[:,1], color = '#DE3163', linewidth=1.2)
plt.plot(story_disp_Y[:,0], story_disp_Y[:,2], color = '#FFBF00', linewidth=1.2)
plt.ylabel('Horizontal Displacement (m)', {'fontname':'Cambria', 'fontstyle':'italic','size':14})
plt.xlabel('Time (sec)', {'fontname':'Cambria', 'fontstyle':'italic','size':14})
plt.grid(which='both')
plt.title('Time history of horizontal dispacement',{'fontname':'Cambria', 'fontstyle':'normal','size':16})
plt.yticks(fontname = 'Cambria', fontsize = 14);
plt.xticks(fontname = 'Cambria', fontsize = 14);
plt.legend(['X-Direction', 'Y-Direction'], prop={'family':'Cambria','size':14});
story_accel_X = np.loadtxt('FGU_RC3DF_files/TimeHistory_Story_AccelerationX1.1.out')
story_accel_Y = np.loadtxt('FGU_RC3DF_files/TimeHistory_Story_AccelerationY1.1.out')
plt.figure(figsize=(12,5))
plt.plot(story_accel_X[:,0], story_accel_X[:,1], color = '#DE3163', linewidth=1.2)
plt.plot(story_accel_Y[:,0], story_accel_Y[:,2], color = '#FFBF00', linewidth=1.2)
plt.ylabel('Horizontal Acceleration (m/s2)', {'fontname':'Cambria', 'fontstyle':'italic','size':14})
plt.xlabel('Time (sec)', {'fontname':'Cambria', 'fontstyle':'italic','size':14})
plt.grid(which='both')
plt.title('Time history of horizontal acceleration',{'fontname':'Cambria', 'fontstyle':'normal','size':16})
plt.yticks(fontname = 'Cambria', fontsize = 14);
plt.xticks(fontname = 'Cambria', fontsize = 14);
plt.legend(['X-Direction', 'Y-Direction'], prop={'family':'Cambria','size':14});
###Output
_____no_output_____ |
src/Standard Modules Practice/pickle/pickle_example.ipynb | ###Markdown
Pickle Module pickling (dump)
###Code
import pickle
dogs_dict = {
'Ozzy': 3,
'Filou': 8,
'Luna': 5,
'Skippy': 10,
'Barco': 12,
'Balou': 9,
'Laika': 16
}
filename = 'dogs.txt'
outfile = open(filename, 'wb')
pickle.dump(obj=dogs_dict, file=outfile)
outfile.close()
###Output
_____no_output_____
###Markdown
unpickling (load)
###Code
import pickle
infile = open(filename, 'rb')
new_dict = pickle.load(file=infile, encoding='ASCII', errors='strict')
infile.close()
print(new_dict)
print(new_dict == dogs_dict)
print(type(new_dict))
###Output
{'Ozzy': 3, 'Filou': 8, 'Luna': 5, 'Skippy': 10, 'Barco': 12, 'Balou': 9, 'Laika': 16}
True
<class 'dict'>
###Markdown
unpickling objects from Python2 in Python3
###Code
import pickle
infile = open(file=filename, mode='rb')
new_dict = pickle.load(file=infile, encoding='latin1', errors='strict')
infile.close()
print(new_dict)
###Output
{'Ozzy': 3, 'Filou': 8, 'Luna': 5, 'Skippy': 10, 'Barco': 12, 'Balou': 9, 'Laika': 16}
###Markdown
unpickling numpy arrays
###Code
import numpy as np
import pickle
ones = np.ones(shape=(3, 3), dtype='i8')
print(ones)
filename = 'ones.txt'
outfile = open(filename, mode='wb')
pickle.dump(obj=ones, file=outfile)
outfile.close()
infile = open(file=filename, mode='rb')
new_ones = pickle.load(file=infile, encoding='bytes', errors='strict')
infile.close()
print(new_ones)
###Output
[[1 1 1]
[1 1 1]
[1 1 1]]
[[1 1 1]
[1 1 1]
[1 1 1]]
###Markdown
Compressing Pickle Files
###Code
import bz2
import pickle
sfile = bz2.BZ2File(filename='smallerfile', mode='w', buffering=None, compresslevel=9)
pickle.dump(obj=dogs_dict, file=sfile)
###Output
_____no_output_____ |
2.0.1 - Reinforcement Learning - Keras DQN.ipynb | ###Markdown
Deep Reinforcement Learning for Keras What is it?`keras-rl` implements some state-of-the art deep reinforcement learning algorithms in Python and seamlessly integrates with the deep learning library [Keras](http://keras.io).Furthermore, `keras-rl` works with [OpenAI Gym](https://gym.openai.com/) out of the box. This means that evaluating and playing around with different algorithms is easy.Of course you can extend `keras-rl` according to your own needs. You can use built-in Keras callbacks and metrics or define your own.Even more so, it is easy to implement your own environments and even algorithms by simply extending some simple abstract classes. Documentation is available [online](http://keras-rl.readthedocs.org). TL;DR--- DQNAgent```pythonrl.agents.dqn.DQNAgent(model, policy=None, test_policy=None, enable_double_dqn=True, enable_dueling_network=False, dueling_type='avg')``` ---- [[source]](https://github.com/keras-rl/keras-rl/blob/master/rl/core.pyL11) Agent```pythonrl.core.Agent(processor=None)```Abstract base class for all implemented agents.Each agent interacts with the environment (as defined by the `Env` class) by first observing thestate of the environment. Based on this observation the agent changes the environment by performingan action.Do not use this abstract base class directly but instead use one of the concrete agents implemented.Each agent realizes a reinforcement learning algorithm. Since all agents conform to the sameinterface, you can use them interchangeably.All agents share a common API. This allows you to easily switch between different agents.That being said, keep in mind that some agents make assumptions regarding the action space, i.e. assume discreteor continuous actions.[[source]](https://github.com/keras-rl/keras-rl/blob/master/rl/core.pyL44) fit```pythonfit(self, env, nb_steps, action_repetition=1, callbacks=None, verbose=1, visualize=False, nb_max_start_steps=0, start_step_policy=None, log_interval=10000, nb_max_episode_steps=None)```Trains the agent on the given environment.__Arguments__- __env:__ (`Env` instance): Environment that the agent interacts with. See [Env](env) for details.- __nb_steps__ (integer): Number of training steps to be performed.- __action_repetition__ (integer): Number of times the agent repeats the same action without observing the environment again. Setting this to a value > 1 can be useful if a single action only has a very small effect on the environment.- __callbacks__ (list of `keras.callbacks.Callback` or `rl.callbacks.Callback` instances): List of callbacks to apply during training. See [callbacks](/callbacks) for details.- __verbose__ (integer): 0 for no logging, 1 for interval logging (compare `log_interval`), 2 for episode logging- __visualize__ (boolean): If `True`, the environment is visualized during training. However, this is likely going to slow down training significantly and is thus intended to be a debugging instrument.- __nb_max_start_steps__ (integer): Number of maximum steps that the agent performs at the beginning of each episode using `start_step_policy`. Notice that this is an upper limit since the exact number of steps to be performed is sampled uniformly from [0, max_start_steps] at the beginning of each episode.- __start_step_policy__ (`lambda observation: action`): The policy to follow if `nb_max_start_steps` > 0. If set to `None`, a random action is performed.- __log_interval__ (integer): If `verbose` = 1, the number of steps that are considered to be an interval.- __nb_max_episode_steps__ (integer): Number of steps per episode that the agent performs before automatically resetting the environment. Set to `None` if each episode should run (potentially indefinitely) until the environment signals a terminal state.__Returns__A `keras.callbacks.History` instance that recorded the entire training process.----[[source]](https://github.com/keras-rl/keras-rl/blob/master/rl/core.pyL231) test```pythontest(self, env, nb_episodes=1, action_repetition=1, callbacks=None, visualize=True, nb_max_episode_steps=None, nb_max_start_steps=0, start_step_policy=None, verbose=1)```- __processor__ (`Processor` instance): See [Processor](processor) for details.----[[source]](https://github.com/keras-rl/keras-rl/blob/master/rl/core.pyL454) Processor```pythonrl.core.Processor()```Abstract base class for implementing processors.A processor acts as a coupling mechanism between an `Agent` and its `Env`. This canbe necessary if your agent has different requirements with respect to the form of theobservations, actions, and rewards of the environment. By implementing a custom processor,you can effectively translate between the two without having to change the underlayingimplementation of the agent or environment.Do not use this abstract base class directly but instead use one of the concrete implementationsor write your own.----[[source]](https://github.com/keras-rl/keras-rl/blob/master/rl/core.pyL533) Env```pythonrl.core.Env()```The abstract environment class that is used by all agents. This class has the exactsame API that OpenAI Gym uses so that integrating with it is trivial. In contrast to theOpenAI Gym implementation, this class only defines the abstract methods without any actualimplementation.----[[source]](https://github.com/keras-rl/keras-rl/blob/master/rl/core.pyL609) Space```pythonrl.core.Space()```Abstract model for a space that is used for the state and action spaces. This class has theexact same API that OpenAI Gym uses so that integrating with it is trivial. Installations
###Code
import keras
keras.__version__
from tensorflow.python.client import device_lib
device_lib.list_local_devices()
print 'Running! \nPlease dont interrupt this cell. It might cause serious issues..'
!pip install gym keras-rl pyglet==1.2.4
# !apt-get install -y cmake zlib1g-dev libjpeg-dev xvfb ffmpeg xorg-dev python-opengl libboost-all-dev libsdl2-dev swig
!pip install 'gym[atari]'
print 'Done!'
###Output
_____no_output_____
###Markdown
Q-Learning with Neural NetworksNow, you may be thinking: tables are great, but they don’t really scale, do they? While it is easy to have a 16x4 table for a simple grid world, the number of possible states in any modern game or real-world environment is nearly infinitely larger. For most interesting problems, tables simply don’t work. We instead need some way to take a description of our state, and produce $Q$-values for actions without a table: that is where neural networks come in. By acting as a function approximator, we can take any number of possible states that can be represented as a vector and learn to map them to $Q$-values. In the case of the FrozenLake example, we will be using a one-layer network which takes the state encoded in a one-hot vector (1x16), and produces a vector of 4 $Q$-values, one for each action. Such a simple network acts kind of like a glorified table, with the network weights serving as the old cells. The key difference is that we can easily expand the Tensorflow network with added layers, activation functions, and different input types, whereas all that is impossible with a regular table. The method of updating is a little different as well. Instead of directly updating our table, with a network we will be using backpropagation and a loss function. Our loss function will be sum-of-squares loss, where the difference between the current predicted $Q$-values, and the “target” value is computed and the gradients passed through the network. In this case, our $Q_{target}$ for the chosen action is the equivalent to the $Q$-value computed in equation above ($Q(s,a) + \alpha [r + \gamma max_{a'} Q(s',a') - Q(s,a) ]$).$$Loss = \sum (Q_{target} - Q_{predicted})^2$$
###Code
import matplotlib.pyplot as plt
import numpy as np
import gym
from IPython import display
from PIL import Image
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten, Convolution2D, Permute
from keras.optimizers import Adam
from rl.callbacks import Callback
from rl.agents.dqn import DQNAgent
from rl.policy import LinearAnnealedPolicy, BoltzmannQPolicy, EpsGreedyQPolicy
from rl.memory import SequentialMemory
from rl.core import Processor
from rl.callbacks import FileLogger, ModelIntervalCheckpoint
###Output
_____no_output_____
###Markdown
Get the environment and extract the number of actions.
###Code
ENV_NAME = 'CartPole-v0'
env = gym.make(ENV_NAME)
np.random.seed(123)
env.seed(123)
nb_actions = env.action_space.n
###Output
_____no_output_____
###Markdown
Deep Q-networksWhile our ordinary Q-network was able to barely perform as well as the Q-Table in a simple game environment, Deep $Q$-Networks are much more capable. In order to transform an ordinary Q-Network into a DQN we will be making the following improvements:+ Going from a single-layer network to a multi-layer convolutional network.+ Implementing Experience Replay, which will allow our network to train itself using stored memories from it’s experience.+ Utilizing a second “target” network, which we will use to compute target $Q$-values during our updates.See https://jaromiru.com/2016/09/27/lets-make-a-dqn-theory/ Convolutional LayersSince our agent is going to be learning to play video games, it has to be able to make sense of the game’s screen output in a way that is at least similar to how humans or other intelligent animals are able to. Instead of considering each pixel independently, convolutional layers allow us to consider regions of an image, and maintain spatial relationships between the objects on the screen as we send information up to higher levels of the network. Experience ReplayThe second major addition to make DQNs work is Experience Replay. The problem with online learning is that the *samples arrive in order* they are experienced and as such are highly correlated. Because of this, our network will most likely overfit and fail to generalize properly.The key idea of **experience replay** is that we store these transitions in our memory and during each learning step, sample a random batch and perform a gradient descend on it. The Experience Replay buffer stores a fixed number of recent memories, and as new ones come in, old ones are removed. When the time comes to train, we simply draw a uniform batch of random memories from the buffer, and train our network with them. Separate Target NetworkThis second network is used to generate the $Q$-target values that will be used to compute the loss for every action during training. The issue is that at every step of training, the $Q$-network’s values shift, and if we are using a constantly shifting set of values to adjust our network values, then the value estimations can easily spiral out of control. The network can become destabilized by falling into feedback loops between the target and estimated $Q$-values. In order to mitigate that risk, the target network’s weights are fixed, and only periodically or slowly updated to the primary $Q$-networks values. In this way training can proceed in a more stable manner.Instead of updating the target network periodically and all at once, we will be updating it frequently, but slowly. While the DQN we have described above could learn ATARI games with enough training, getting the network to perform well on those games takes at least a day of training on a powerful machine. Next, we build a very simple model.
###Code
inp = Input(shape=(1,) + env.observation_space.shape )
x = Flatten()(inp)
x = Dense(16)(x)
x = Activation('relu')(x)
x = Dense(16)(x)
x = Activation('relu')(x)
x = Dense(16)(x)
x = Activation('relu')(x)
x = Dense(nb_actions)(x)
x = Activation('linear')(x)
model = Model(inputs=inp, outputs=x)
model.summary()
###Output
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_2 (InputLayer) (None, 1, 4) 0
_________________________________________________________________
flatten_2 (Flatten) (None, 4) 0
_________________________________________________________________
dense_7 (Dense) (None, 16) 80
_________________________________________________________________
activation_5 (Activation) (None, 16) 0
_________________________________________________________________
dense_8 (Dense) (None, 16) 272
_________________________________________________________________
activation_6 (Activation) (None, 16) 0
_________________________________________________________________
dense_9 (Dense) (None, 16) 272
_________________________________________________________________
activation_7 (Activation) (None, 16) 0
_________________________________________________________________
dense_10 (Dense) (None, 2) 34
_________________________________________________________________
activation_8 (Activation) (None, 2) 0
=================================================================
Total params: 658
Trainable params: 658
Non-trainable params: 0
_________________________________________________________________
###Markdown
Finally, we configure and compile our agent. You can use every built-in Keras optimizer andeven the metrics!
###Code
memory = SequentialMemory(limit=50000, window_length=1)
policy = BoltzmannQPolicy()
#single
dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=10,
target_model_update=1e-2, policy=policy)
dqn.compile(Adam(lr=1e-3), metrics=['mae'])
#dual
dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=10,
enable_dueling_network=True, dueling_type='avg', target_model_update=1e-2, policy=policy)
dqn.compile(Adam(lr=1e-3), metrics=['mae'])
###Output
_____no_output_____
###Markdown
Renderer for live animation in jupyter while running those command.in case you are running on your local device you can comment the bottom lines and and run full local environment! (It looks nicer :) )
###Code
class Render(Callback):
def on_step_end(self, step, logs={}):
plt.clf()
plt.imshow(env.render(mode='rgb_array'))
display.display(plt.gcf())
display.clear_output(wait=True)
###Output
_____no_output_____
###Markdown
Okay, now it's time to learn something! We visualize the training here for show, but thisslows down training quite a lot. You can always safely abort the training prematurely usingCtrl + C.
###Code
# nb_steps represents the number of steps, you can try and change it
dqn.fit(env, callbacks=[Render()], nb_steps=100, visualize=False, log_interval=10000)
###Output
72/10000 [..............................] - ETA: 26:53 - reward: 1.0000done, took 11.757 seconds
###Markdown
If you are running locally, uncomment this out!
###Code
# #nb_steps represents the number of steps, you can try and change it
# dqn.fit(env, nb_steps=100, visualize=True, verbose=2)
###Output
_____no_output_____
###Markdown
After training is done, we save the final weights.
###Code
dqn.save_weights('dqn_{}_weights.h5f'.format(ENV_NAME), overwrite=True)
###Output
_____no_output_____
###Markdown
Finally, evaluate our algorithm for 5 episodes.
###Code
dqn.test(env, nb_episodes=5, callbacks=[Render()], visualize=False)
###Output
_____no_output_____
###Markdown
If you are running locally, uncomment this out! SARSA & Expected SARSALet's say that:$$X_t \in \{ v_t, \hat{v_t} \}$$where in expected SARSA:$$ v_t = r_t + \gamma \sum_a \pi_t (s_{t+1}, a) Q_t (s_{t+1}, a)$$and in SARSA:$$ \hat{v_t} = r_t + \gamma Q_t (s_{t+1}, a_{t+1})$$Bias is represented by:$$Bias(s,a) = Q^{\pi} (s,a) - E\{X_t\}$$Variance is denoted by:$$Var(s,a) = E\{(X_t)^2\} - (E\{X_t\})^2$$
###Code
import gym
import numpy as np
from keras.models import Sequential, Model
from keras.optimizers import Adam
from keras.layers import Input, Dense, Activation, Flatten
from rl.agents import SARSAAgent
from rl.policy import BoltzmannQPolicy
###Output
_____no_output_____
###Markdown
Get the environment and extract the number of actions.
###Code
ENV_NAME = 'CartPole-v0'
env = gym.make(ENV_NAME)
np.random.seed(123)
env.seed(123)
nb_actions = env.action_space.n
###Output
_____no_output_____
###Markdown
Next, we build a very simple model.
###Code
inp = Input(shape=(1,) + env.observation_space.shape)
x = Flatten()(inp)
x = Dense(16)(x)
x = Activation('relu')(x)
x = Dense(16)(x)
x = Activation('relu')(x)
x = Dense(16)(x)
x = Activation('relu')(x)
x = Dense(nb_actions)(x)
x = Activation('linear')(x)
model = Model(inputs=inp, outputs=x)
model.summary()
###Output
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_3 (InputLayer) (None, 1, 4) 0
_________________________________________________________________
flatten_3 (Flatten) (None, 4) 0
_________________________________________________________________
dense_12 (Dense) (None, 16) 80
_________________________________________________________________
activation_9 (Activation) (None, 16) 0
_________________________________________________________________
dense_13 (Dense) (None, 16) 272
_________________________________________________________________
activation_10 (Activation) (None, 16) 0
_________________________________________________________________
dense_14 (Dense) (None, 16) 272
_________________________________________________________________
activation_11 (Activation) (None, 16) 0
_________________________________________________________________
dense_15 (Dense) (None, 2) 34
_________________________________________________________________
activation_12 (Activation) (None, 2) 0
=================================================================
Total params: 658
Trainable params: 658
Non-trainable params: 0
_________________________________________________________________
###Markdown
SARSA does not require a memory.
###Code
policy = BoltzmannQPolicy()
sarsa = SARSAAgent(model=model, nb_actions=nb_actions, nb_steps_warmup=10, policy=policy)
sarsa.compile(Adam(lr=1e-3), metrics=['mae'])
###Output
_____no_output_____
###Markdown
Okay, now it's time to learn something! We visualize the training here for show, but thisslows down training quite a lot. You can always safely abort the training prematurely usingCtrl + C.
###Code
# nb_steps represents the number of steps, you can try and change it
dqn.fit(env, callbacks=[Render()], nb_steps=1000, log_interval=10000)
###Output
18/10000 [..............................] - ETA: 26:49 - reward: 1.0000done, took 3.035 seconds
###Markdown
If you are running locally, uncomment this out!
###Code
# #nb_steps represents the number of steps, you can try and change it
# sarsa.fit(env, nb_steps=10000, visualize=True, verbose=2)
###Output
_____no_output_____
###Markdown
After training is done, we save the final weights.
###Code
sarsa.save_weights('sarsa_{}_weights.h5f'.format(ENV_NAME), overwrite=True)
###Output
_____no_output_____
###Markdown
Finally, evaluate our algorithm for 5 episodes.
###Code
dqn.test(env, nb_episodes=5, callbacks=[Render()], visualize=False)
###Output
_____no_output_____
###Markdown
If you are running locally, uncomment this out!
###Code
# sarsa.test(env, nb_episodes=5, visualize=True)
###Output
_____no_output_____ |
MinerDetector/PDG/KNN.ipynb | ###Markdown
Importar datos Puros Tratados
###Code
data = pd.read_csv('./dataA.csv')
data.head(4)
###Output
_____no_output_____
###Markdown
Modelo Holdout
###Code
best_feature_pure = ['Avg_bps','Avg_bpp' ,'Avg_pps','p3_ip','p3_ib']
y = data['Type_mine']
X = data[best_feature_pure]
data = pd.read_csv('./dataA.csv')
X_train, X_test, y_train, y_test = train_test_split(X,y,train_size = 0.7, test_size=0.3, random_state = 1234)
knn = KNeighborsClassifier(n_neighbors=5)
knn.fit(X_train,y_train)
y_pred = knn.predict(X_test)
churm = metrics.confusion_matrix(y_test, y_pred)
fig,ax = plt.subplots(nrows = 1, ncols = 1,figsize = (8,6))
sns.heatmap(churm, annot=True, ax = ax,cmap="Blues",fmt='g',annot_kws={"size": 15});
#plt.imshow(churm, cmap=plt.cm.Blues,annot=True)
ax.set_title(f"Matriz de confusión para K={k_best}",size=15)
ax.xaxis.set_ticklabels(['Not_mine','Mine'])
ax.yaxis.set_ticklabels(['Not_mine','Mine'])
ax.tick_params(labelsize = 15)
###Output
_____no_output_____
###Markdown
KNN
###Code
k_vec = np.arange(1,40,2)
vec_train = []
vec_test =[]
for k in k_vec:
knn = KNeighborsClassifier(n_neighbors = k)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_train)
vec_train.append(metrics.accuracy_score(y_train, y_pred))
y_pred = knn.predict(X_test)
vec_test.append(metrics.accuracy_score(y_test, y_pred))
v = pd.DataFrame()
v['K'] = k_vec
v['vec_train'] = vec_train
v['vec_test'] = vec_test
display(v)
plt.figure(figsize=(10,5))
ax = plt.gca()
plt.plot(k_vec, vec_train)
plt.plot(k_vec, vec_test)
ax.set_xlim(ax.get_xlim()[::-1])
plt.axis('tight')
plt.xlabel('k')
plt.ylabel('accuracy')
plt.title('Evolución de le exactitud vs complejidad del modelo k-nn (valor de k más pequeño)')
plt.legend(['train', 'test'])
k_best = 5
knn = KNeighborsClassifier(n_neighbors=k_best)
knn.fit(X_train,y_train)
y_pred = knn.predict(X_test)
y_pred
###Output
_____no_output_____
###Markdown
Evaluación del modelo
###Code
y_test.shape, y_pred.shape
churm = metrics.confusion_matrix(y_test, y_pred)
fig,ax = plt.subplots(nrows = 1, ncols = 1,figsize = (8,6))
sns.heatmap(churm, annot=True, ax = ax,cmap="Blues",fmt='g',annot_kws={"size": 15});
#plt.imshow(churm, cmap=plt.cm.Blues,annot=True)
ax.set_title(f"Matriz de confusión para K={k_best}",size=15)
ax.xaxis.set_ticklabels(['Not_mine','Mine'])
ax.yaxis.set_ticklabels(['Not_mine','Mine'])
ax.tick_params(labelsize = 15)
pd.DataFrame(churm)
print("El modelo de KNN se equivocó en %d de los %d registros que componen el dataset original"
% ((y_test != y_pred).sum(), data.shape[0]))
print("Exactitud: ", metrics.accuracy_score(y_test, y_pred))
print("Kappa: ", metrics.cohen_kappa_score(y_test, y_pred))
print(colored('Metricas de los registros mineros', attrs=['bold']))
print("Precisión : ", metrics.precision_score(y_test, y_pred, labels=[1], average='macro'))
print("Recall : ", metrics.recall_score(y_test, y_pred, labels=[1], average='macro'))
VN = churm[0,0]
FP = churm[0,1]
specificity = VN/(VN+FP)
print("Especificidad : ", specificity)
print("F1-score : ", metrics.f1_score(y_test, y_pred, labels=[1], average='macro'))
print(colored('Metricas de los registros no mineros', attrs=['bold']))
print("Precisión : ", metrics.precision_score(y_test, y_pred, labels=[0], average='macro'))
print("Recall : ", metrics.recall_score(y_test, y_pred, labels=[0], average='macro'))
VN = churm[1,1]
FP = churm[1,0]
specificity = VN/(VN+FP)
print("Especificidad : ", specificity)
print("F1-score : ", metrics.f1_score(y_test, y_pred, labels=[0], average='macro'))
mse, bias, var = bias_variance_decomp(knn, np.array(X_train), np.array(y_train), np.array(X_test), np.array(y_test), loss='mse', num_rounds=200)
print('MSE: %.3f' % mse)
print('Bias: %.3f' % bias)
print('Variance: %.3f' % var)
curva_aprendizaje(knn, X, y, 'accuracy')
vecinos = list(range(2, 40))
curva_validacion(knn, X_train,y_train,'n_neighbors', vecinos)
###Output
_____no_output_____
###Markdown
Curva ROC
###Code
ROC(X_test,y_test,knn)
###Output
_____no_output_____ |
apex_gun/impact/apex_gun_impact.ipynb | ###Markdown
APEX gun: Impact-T
###Code
# Useful for debugging
%load_ext autoreload
%autoreload 2
# Nicer plotting
%pylab --no-import-all inline
%config InlineBackend.figure_format = 'retina'
from impact import Impact
from impact.autophase import autophase
?autophase
from pmd_beamphysics import single_particle
I1 = Impact('ImpactT-1D.in')
I2 = Impact('ImpactT-2D.in')
###Output
_____no_output_____
###Markdown
Check autophase
###Code
P0 = single_particle(pz=1e-9)
autophase(I1, 'APEX_gun', initial_particles=P0 )
autophase(I2, 'APEX_gun', initial_particles=P0 )
I1.track1(x0=10e-3)
I2.track1(x0=10e-3)
plt.plot(I1.stat('mean_z'), I1.stat('mean_x'))
plt.plot(I2.stat('mean_z'), I2.stat('mean_x'), marker='.')
###Output
_____no_output_____ |
MSc_QG_QA.ipynb | ###Markdown
Requirements
###Code
!pip install transformers==3.0.0
!pip install nltk
!python -m nltk.downloader punkt
from google.colab import drive
drive.mount('/content/drive')
###Output
Mounted at /content/drive
###Markdown
import End-to-End pipelines
###Code
import os
os.chdir("drive/MyDrive/End-to-End/question_generation-master")
%ls
from pipelines import pipeline
###Output
_____no_output_____
###Markdown
import data
###Code
import pandas as pd
os.chdir("../../")
%ls
###Output
'35r4f-73rd5 (1).gif' covid19_news_drop.csv
35r4f-73rd5.gif covid19_news_with_covid.csv
[0m[01;34mBDG-main[0m/ covid19_news.xlsx
'Child TFCE correction.rar' covid_data_not_clean.csv
[01;34m'Colab Notebooks'[0m/ covid_data_QG.csv
covid19_articles.csv covid_news_splited.csv
covid19_articles_test.csv covid_news_without_n.csv
covid19_articles_v1.csv [01;34mDistractor-Generation-RACE-master[0m/
covid19_data_to_process.csv [01;34mEnd-to-End[0m/
covid19_news1.csv [01;34mMed_Child_GM[0m/
covid19_news1.xlsx [01;34mProphetNet-master[0m/
covid19_news.csv [01;34mVBM[0m/
###Markdown
covid19_data_to_process.csv contains 3 sentences in each paragraph
###Code
df=pd.read_csv('covid_data_not_clean.csv')
df.head()
df['context'][1]
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 2636 entries, 0 to 2635
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 index 2636 non-null int64
1 context 2636 non-null object
dtypes: int64(1), object(1)
memory usage: 41.3+ KB
###Markdown
Question Generation Multitask-QG-QA Generate answer corresponding to the question
###Code
nlp = pipeline("multitask-qa-qg")
df=pd.read_csv('covid_data_not_clean.csv',header=0,usecols=['context'])
df
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 2476 entries, 0 to 2475
Data columns (total 1 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 context 2476 non-null object
dtypes: object(1)
memory usage: 19.5+ KB
###Markdown
Processing the data to clean the data
###Code
for i in range(2472,2475):
nlp(df['context'][i])
print('i:',i)
df=df.drop(705) #i+1
df=df.reset_index()
df=df.drop('index',axis=1)
df.info()
df['context'][455]
df.to_csv('covid_data_not_clean.csv')
%ls
questions={}
question=nlp(df['context'][0])
#questions
count=0
end=count+len(question)
df_q=pd.DataFrame(question)
#df_q.loc[count:end,'number']=0
count=end
df_q
question
questions=[]
question=nlp(df['context'][0])
list=[]
list.append(question[0])
#questions
df_q=pd.DataFrame(question)
for i in range(1,400): #2638
questions=nlp(df['context'][i])
print('iter:',i)
list.append(questions[0])
list
li=pd.DataFrame(list)
li.tail()
df['context'][456]
nlp(df['context'][456])
list_1=[]
for i in range(400,800): #2638
questions=nlp(df['context'][i])
print('iter:',i)
list_1.append(questions[0])
list_1
list.append(list_1)
li_1=pd.DataFrame(list_1)
df_con=pd.concat([li,li_1],ignore_index=True)
df_con.to_csv('covid_data_QG.csv') #800
%ls
list_2=[]
for i in range(800,1200): #2638
questions=nlp(df['context'][i])
print('iter:',i)
list_2.append(questions[0])
list_2
li_2=pd.DataFrame(list_2)
df_con1=pd.concat([df_con,li_2],ignore_index=True)
df_con1
df=df.drop(870) #i+1
df=df.reset_index()
df=df.drop('index',axis=1)
# df.info()
df.to_csv('covid_data_not_clean.csv')
# %ls
df=pd.read_csv('covid_data_not_clean.csv',header=0,usecols=['context'])
df.to_csv('covid_data_not_clean.csv')
%ls
df=pd.read_csv('covid_data_not_clean.csv',header=0,usecols=['context'])
df
list_4=[]
for i in range(1600,2000): #2638
questions=nlp(df['context'][i])
print('iter:',i)
list_4.append(questions[0])
list_4
df=df.drop(1958) #i
df=df.reset_index()
df=df.drop('index',axis=1)
# df.info()
df.to_csv('covid_data_not_clean.csv')
# %ls
df=pd.read_csv('covid_data_not_clean.csv',header=0,usecols=['context'])
df_con2
li_4=pd.DataFrame(list_4)
df_con3=pd.concat([df_con2,li_4],ignore_index=True)
df_con3
df_con3.to_csv('covid_data_QG.csv') #2000
%ls
df.info()
list_6=[]
for i in range(2400,2460): #2638
questions=nlp(df['context'][i])
print('iter:',i)
list_6.append(questions[0])
list_6
df=df.drop(2455) #i
df=df.reset_index()
df=df.drop('index',axis=1)
# df.info()
df.to_csv('covid_data_not_clean.csv')
# %ls
df=pd.read_csv('covid_data_not_clean.csv',header=0,usecols=['context'])
%ls
li_5=pd.DataFrame(list_5)
df_con4=pd.concat([df_con3,li_5],ignore_index=True)
df_con4
df_con4.to_csv('covid_data_QG.csv') #200
%ls
li_6=pd.DataFrame(list_6)
df_con5=pd.concat([df_con4,li_6],ignore_index=True)
df_con5
df_con5.to_csv('covid_data_QG.csv') #full context
#######work from here#######
df_result=pd.concat([df,df_con5],axis=1)
df_result
df_result.to_csv('covid_QG_QA_with_context.csv')
%ls
df_result
count=1
for i in range(1,4): #2638
questions=nlp(df['context'][i])
print('questions:',questions)
for j in range(0,len(questions)):
sentence=questions.pop()
df_q.append(sentence,ignore_index=True)
print('sentence:',sentence)
#df_q.loc[count:end,'number']=i
#questions=nlp(df['context'][2])
end=count+len(questions)
#print('i:',i)
#print('count:',count)
print('end:',end)
count=end
df_q.loc[0:1,'number']=1
df_q
questions=[]
for i in range(0,5):
questions.append(nlp(df['context'][i]))
print('question:',questions)
df_qu=pd.DataFrame(questions)
df_qu
df.head(20)
df.head(20)
questions=nlp(df['context'][12])
len(questions)
questions.pop()
questions.pop()
df_q
df_q.append(questions)
df_q.loc[0:1,'number']=1
df_q
type(questions)
#questions = [{'id':1010,'name':"Administrator",'type':1},{'id':1011,'name':"Administrator2",'type':1}]
newquestions = dict()
for question in questions:
newquestions = question
print(newquestions)
#{1010: {'type': 1, 'name': 'Administrator'}, 1011: {'type': 1, 'name': 'Administrator2'}}
df_q=pd.DataFrame(newquestions)
df_q
nlp(df['context'][10])
###Output
_____no_output_____ |
Code/Assignment_2 - Spatial Representation Comparison.ipynb | ###Markdown
Assignment 2 - Spatial Representation Comparison In this assignmnet we will be experimenting with spacial representations of data. The shapes we will be working with are :* Triangle* CircleEach of the shape wiil be used to visualize data different in magnitude in the following way - *The numbers are identical.**The numbers have a difference of a factor of dozen.**The numbers have a difference of a factor of hundred.*For each of the shape, I have identified **4** visual styles. viz. `Filled`, `Blurred`, `Nofill` and `Dashed`. Each of these styles will be experimented by placing the objects *side-by-side* or *inside* each other. Furthermore, the effect of their position, scaling and orientation will also be looked upon. We will first begin with Triangles. 1. Triangles with identical areaIn this step the areas of the triangle are nearly identical.
###Code
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as pt
fig = plt.figure(figsize=(16,16))
col = plt.get_cmap("YlOrRd")
col1 = plt.get_cmap("RdYlBu")
col2 = plt.get_cmap("PRGn")
col3 = plt.get_cmap("PiYG")
triangle = np.array([[0.48566868, 0.59316155],
[0.30185218, 0.45096474],
[0.9362745 , 0.30407635]])
X = np.array([[0, -1], [1, 0]])
plt.subplot(4,4,1)
plt.title("Filled : Scaled and Outside")
axa = plt.gca()
axx = axa.xaxis
axy = axa.yaxis
axa.set_facecolor(col(0))
axx.set_visible(False)
axy.set_visible(False)
small_triangle = triangle*0.8 - 0.2
polygon = pt.Polygon(triangle, True, color = col(100))
small_polygon = pt.Polygon(small_triangle, True, color = col(200), zorder = 2)
axa.add_patch(polygon)
axa.add_patch(small_polygon)
axa.set_xlim(0, 1)
axa.set_ylim(0, 1)
plt.subplot(4,4,2)
plt.title("Filled : Scaled and Inside")
axa1 = plt.gca()
axx1 = axa1.xaxis
axy1 = axa1.yaxis
axa1.set_facecolor(col(100))
axx1.set_visible(False)
axy1.set_visible(False)
small_triangle = triangle*0.8 + 0.1
polygon = pt.Polygon(triangle, True, color = col(200))
small_polygon = pt.Polygon(small_triangle, True, color = col(300), zorder = 2)
axa1.add_patch(polygon)
axa1.add_patch(small_polygon)
axa1.set_xlim(0.2, 1)
axa1.set_ylim(0.2, 1)
plt.subplot(4,4,3)
plt.title("Filled : Zoomed and Outside")
axa2 = plt.gca()
axx2 = axa2.xaxis
axy2 = axa2.yaxis
axa2.set_facecolor(col(200))
axx2.set_visible(False)
axy2.set_visible(False)
small_triangle = triangle*0.8 - 0.2
polygon = pt.Polygon(triangle, True, color = col(300), zorder = 2)
small_polygon = pt.Polygon(small_triangle, True, color = col(100))
axa2.add_patch(polygon)
axa2.add_patch(small_polygon)
axa2.set_xlim(-0.5, 1)
axa2.set_ylim(-0.5, 1)
plt.subplot(4,4,4)
plt.title("Filled : Zoomed and Inside")
axa3 = plt.gca()
axx3 = axa3.xaxis
axy3 = axa3.yaxis
axa3.set_facecolor(col(300))
axx3.set_visible(False)
axy3.set_visible(False)
small_triangle = triangle*0.8 + 0.1
polygon = pt.Polygon(triangle, True, color = col(100))
small_polygon = pt.Polygon(small_triangle, True, color = col(0), zorder = 2)
axa3.add_patch(polygon)
axa3.add_patch(small_polygon)
axa3.set_xlim(-0.5, 1)
axa3.set_ylim(-0.5, 1)
plt.subplot(4,4,5)
plt.title("Blurred : Scaled and Outside")
axa4 = plt.gca()
axx4 = axa4.xaxis
axy4 = axa4.yaxis
axa4.set_facecolor(col1(0))
axx4.set_visible(False)
axy4.set_visible(False)
rotate90_triangle = np.matmul(triangle,X)
rotate90_small_triangle = rotate90_triangle*0.8 - 0.2
rotate90_polygon = pt.Polygon(rotate90_triangle, True, color = col1(200), alpha = 0.4)
rotate90_small_polygon = pt.Polygon(rotate90_small_triangle, True, color = col1(100), alpha = 0.4)
axa4.add_patch(rotate90_polygon)
axa4.add_patch(rotate90_small_polygon)
axa4.set_xlim(0, 0.8)
axa4.set_ylim(-0.2, -1)
plt.subplot(4,4,6)
plt.title("Blurred : Scaled and Inside")
axa5 = plt.gca()
axx5 = axa5.xaxis
axy5 = axa5.yaxis
axa5.set_facecolor(col1(100))
axx5.set_visible(False)
axy5.set_visible(False)
rotate90_triangle = np.matmul(triangle,X)
rotate90_small_triangle = rotate90_triangle*0.8 + np.array([0.1, -0.1])
rotate90_polygon = pt.Polygon(rotate90_triangle, True, color = col1(300), alpha = 0.4)
rotate90_small_polygon = pt.Polygon(rotate90_small_triangle, True, color = col1(200), alpha = 0.4)
axa5.add_patch(rotate90_polygon)
axa5.add_patch(rotate90_small_polygon)
axa5.set_xlim(0, 0.8)
axa5.set_ylim(-0.2, -1)
plt.subplot(4,4,7)
plt.title("Blurred : Zoomed and Outside")
axa6 = plt.gca()
axx6 = axa6.xaxis
axy6 = axa6.yaxis
axa6.set_facecolor(col1(200))
axx6.set_visible(False)
axy6.set_visible(False)
rotate90_triangle = np.matmul(triangle,X)
rotate90_small_triangle = rotate90_triangle*0.8 - 0.2
rotate90_polygon = pt.Polygon(rotate90_triangle, True, color = col1(100), alpha = 0.4)
rotate90_small_polygon = pt.Polygon(rotate90_small_triangle, True, color = col1(300), alpha = 0.4)
axa6.add_patch(rotate90_polygon)
axa6.add_patch(rotate90_small_polygon)
axa6.set_xlim(-0.4, 0.8)
axa6.set_ylim(0.1, -1.1)
plt.subplot(4,4,8)
plt.title("Blurred : Zoomed and Inside")
axa7 = plt.gca()
axx7 = axa7.xaxis
axy7 = axa7.yaxis
axa7.set_facecolor(col1(300))
axx7.set_visible(False)
axy7.set_visible(False)
rotate90_triangle = np.matmul(triangle,X)
rotate90_small_triangle = rotate90_triangle*0.8 + np.array([0.1, -0.1])
rotate90_polygon = pt.Polygon(rotate90_triangle, True, color = col1(0), alpha = 0.4)
rotate90_small_polygon = pt.Polygon(rotate90_small_triangle, True, color = col1(100), alpha = 0.4)
axa7.add_patch(rotate90_polygon)
axa7.add_patch(rotate90_small_polygon)
axa7.set_xlim(-0.4, 0.8)
axa7.set_ylim(0.1, -1.1)
plt.subplot(4,4,9)
plt.title("NoFill : Scaled and Outside")
axa8 = plt.gca()
axx8 = axa8.xaxis
axy8 = axa8.yaxis
axa8.set_facecolor(col2(0))
axx8.set_visible(False)
axy8.set_visible(False)
rotate180_triangle = np.matmul(rotate90_triangle,X)
rotate180_small_triangle = rotate180_triangle*0.8 - np.array([0.1, -0.2])
rotate180_polygon = pt.Polygon(rotate180_triangle - np.array([0.05, -0.05]), True, color = col2(200), fill = None)
rotate180_small_polygon = pt.Polygon(rotate180_small_triangle, True, color = col1(100), fill = None)
axa8.add_patch(rotate180_polygon)
axa8.add_patch(rotate180_small_polygon)
axa8.set_xlim(-1, -0.3)
axa8.set_ylim(-0.7, 0)
plt.subplot(4,4,10)
plt.title("NoFill : Scaled and Inside")
axa9 = plt.gca()
axx9 = axa9.xaxis
axy9 = axa9.yaxis
axa9.set_facecolor(col2(100))
axx9.set_visible(False)
axy9.set_visible(False)
rotate180_triangle = np.matmul(rotate90_triangle,X)
rotate180_small_triangle = rotate180_triangle*0.8 - np.array([0.15, 0.05])
rotate180_polygon = pt.Polygon(rotate180_triangle - np.array([0.05, -0.05]), True, color = col2(300), fill = None)
rotate180_small_polygon = pt.Polygon(rotate180_small_triangle, True, color = col2(200), fill = None)
axa9.add_patch(rotate180_polygon)
axa9.add_patch(rotate180_small_polygon)
axa9.set_xlim(-1, -0.3)
axa9.set_ylim(-0.7, 0)
plt.subplot(4,4,11)
plt.title("NoFill : Zoomed and Outside")
axa10 = plt.gca()
axx10 = axa10.xaxis
axy10 = axa10.yaxis
axa10.set_facecolor(col2(200))
axx10.set_visible(False)
axy10.set_visible(False)
rotate180_triangle = np.matmul(rotate90_triangle,X)
rotate180_small_triangle = rotate180_triangle*0.8 - np.array([0.1, -0.2])
rotate180_polygon = pt.Polygon(rotate180_triangle - np.array([0.05, -0.05]), True, color = col2(100), fill = None)
rotate180_small_polygon = pt.Polygon(rotate180_small_triangle, True, color = col2(300), fill = None)
axa10.add_patch(rotate180_polygon)
axa10.add_patch(rotate180_small_polygon)
axa10.set_xlim(-1.2, -0.2)
axa10.set_ylim(-0.8, 0.2)
plt.subplot(4,4,12)
plt.title("NoFill : Zoomed and Inside")
axa11 = plt.gca()
axx11 = axa11.xaxis
axy11 = axa11.yaxis
axa11.set_facecolor(col2(300))
axx11.set_visible(False)
axy11.set_visible(False)
rotate180_triangle = np.matmul(rotate90_triangle,X)
rotate180_small_triangle = rotate180_triangle*0.8 - np.array([0.15, 0.05])
rotate180_polygon = pt.Polygon(rotate180_triangle - np.array([0.05, -0.05]), True, color = col2(0), fill = None)
rotate180_small_polygon = pt.Polygon(rotate180_small_triangle, True, color = col2(100), fill = None)
axa11.add_patch(rotate180_polygon)
axa11.add_patch(rotate180_small_polygon)
axa11.set_xlim(-1.2, -0.2)
axa11.set_ylim(-0.8, 0.2)
plt.subplot(4,4,13)
plt.title("Dashed : Scaled and Outside")
axa12 = plt.gca()
axx12 = axa12.xaxis
axy12 = axa12.yaxis
axa12.set_facecolor(col3(300))
axx12.set_visible(False)
axy12.set_visible(False)
rotate270_triangle = np.matmul(rotate180_triangle,X)
rotate270_small_triangle = rotate270_triangle*0.8 + 0.2
rotate270_polygon = pt.Polygon(rotate270_triangle, True, color = col3(100), fill = None, linestyle = "-.")
rotate270_small_polygon = pt.Polygon(rotate270_small_triangle, True, color = col3(0), fill = None, linestyle = "-.")
axa12.add_patch(rotate270_polygon)
axa12.add_patch(rotate270_small_polygon)
axa12.set_xlim(0, -0.8)
axa12.set_ylim(1, 0.2)
plt.subplot(4,4,14)
plt.title("Dashed : Scaled and Inside")
axa13 = plt.gca()
axx13 = axa13.xaxis
axy13 = axa13.yaxis
axa13.set_facecolor(col3(200))
axx13.set_visible(False)
axy13.set_visible(False)
rotate270_triangle = np.matmul(rotate180_triangle,X)
rotate270_small_triangle = rotate270_triangle*0.8 + np.array([-0.1, 0.1])
rotate270_polygon = pt.Polygon(rotate270_triangle, True, color = col3(0), fill = None, linestyle = "-.")
rotate270_small_polygon = pt.Polygon(rotate270_small_triangle, True, color = col3(300), fill = None, linestyle = "-.")
axa13.add_patch(rotate270_polygon)
axa13.add_patch(rotate270_small_polygon)
axa13.set_xlim(0, -0.8)
axa13.set_ylim(1, 0.2)
plt.subplot(4,4,15)
plt.title("Dashed : Zoomed and Outside")
axa14 = plt.gca()
axx14 = axa14.xaxis
axy14 = axa14.yaxis
axa14.set_facecolor(col3(100))
axx14.set_visible(False)
axy14.set_visible(False)
rotate270_triangle = np.matmul(rotate180_triangle,X)
rotate270_small_triangle = rotate270_triangle*0.8 + 0.2
rotate270_polygon = pt.Polygon(rotate270_triangle, True, color = col3(200), fill = None, linestyle = "-.")
rotate270_small_polygon = pt.Polygon(rotate270_small_triangle, True, color = col3(300), fill = None, linestyle = "-.")
axa14.add_patch(rotate270_polygon)
axa14.add_patch(rotate270_small_polygon)
axa14.set_xlim(0.4, -0.8)
axa14.set_ylim(1.4, 0.2)
plt.subplot(4,4,16)
plt.title("Dashed : Zoomed and Inside")
axa15 = plt.gca()
axx15 = axa15.xaxis
axy15 = axa15.yaxis
axa15.set_facecolor(col3(0))
axx15.set_visible(False)
axy15.set_visible(False)
rotate270_triangle = np.matmul(rotate180_triangle,X)
rotate270_small_triangle = rotate270_triangle*0.8 + np.array([-0.1, 0.1])
rotate270_polygon = pt.Polygon(rotate270_triangle, True, color = col3(100), fill = None, linestyle = "-.")
rotate270_small_polygon = pt.Polygon(rotate270_small_triangle, True, color = col3(200), fill = None, linestyle = "-.")
axa15.add_patch(rotate270_polygon)
axa15.add_patch(rotate270_small_polygon)
axa15.set_xlim(0.4, -0.8)
axa15.set_ylim(1.4, 0.2)
plt.show()
###Output
_____no_output_____
###Markdown
2. Triangles with areas different by a factor of dozenIn this step the areas of the triangle are different by a dozen.
###Code
fig = plt.figure(figsize=(16,16))
col = plt.get_cmap("YlOrRd")
col1 = plt.get_cmap("RdYlBu")
col2 = plt.get_cmap("PRGn")
col3 = plt.get_cmap("PiYG")
plt.subplot(4,4,1)
plt.title("Filled : Scaled and Outside")
axa = plt.gca()
axx = axa.xaxis
axy = axa.yaxis
axx.set_visible(False)
axy.set_visible(False)
axa.set_facecolor(col(0))
triangle = np.array([[0.48566868, 0.59316155],
[0.30185218, 0.45096474],
[0.9362745 , 0.30407635]])
big_triangle = triangle*10
polygon = pt.Polygon(triangle + np.array([5, 3]), True, color = col(100))
big_polygon = pt.Polygon(big_triangle, True, color = col(200))
axa.add_patch(polygon)
axa.add_patch(big_polygon)
axa.set_xlim(2, 10)
axa.set_ylim(2, 8)
plt.subplot(4,4,2)
plt.title("Filled : Scaled and Inside")
axa1 = plt.gca()
axx1 = axa1.xaxis
axy1 = axa1.yaxis
axx1.set_visible(False)
axy1.set_visible(False)
axa1.set_facecolor(col(100))
big_triangle = triangle*10
polygon = pt.Polygon(triangle + np.array([5, 4]), True, color = col(200), zorder = 2)
big_polygon = pt.Polygon(big_triangle, True, color = col(300))
axa1.add_patch(polygon)
axa1.add_patch(big_polygon)
axa1.set_xlim(2, 10)
axa1.set_ylim(2, 8)
plt.subplot(4,4,3)
plt.title("Filled : Zoomed and Outside")
axa2 = plt.gca()
axx2 = axa2.xaxis
axy2 = axa2.yaxis
axx2.set_visible(False)
axy2.set_visible(False)
axa2.set_facecolor(col(200))
triangle = np.array([[0.48566868, 0.59316155],
[0.30185218, 0.45096474],
[0.9362745 , 0.30407635]])
big_triangle = triangle*10
polygon = pt.Polygon(triangle + np.array([5, 3]), True, color = col(300))
big_polygon = pt.Polygon(big_triangle, True, color = col(100))
axa2.add_patch(polygon)
axa2.add_patch(big_polygon)
axa2.set_xlim(-2, 10)
axa2.set_ylim(-2, 8)
plt.subplot(4,4,4)
plt.title("Filled : Zoomed and Inside")
axa3 = plt.gca()
axx3 = axa3.xaxis
axy3 = axa3.yaxis
axx3.set_visible(False)
axy3.set_visible(False)
axa3.set_facecolor(col(300))
triangle = np.array([[0.48566868, 0.59316155],
[0.30185218, 0.45096474],
[0.9362745 , 0.30407635]])
big_triangle = triangle*10
polygon = pt.Polygon(triangle + np.array([5, 4]), True, color = col(100), zorder = 2)
big_polygon = pt.Polygon(big_triangle, True, color = col(0))
axa3.add_patch(polygon)
axa3.add_patch(big_polygon)
axa3.set_xlim(-2, 10)
axa3.set_ylim(-2, 8)
X = np.array([[0, -1], [1, 0]])
plt.subplot(4,4,5)
plt.title("Blurred : Scaled and Outside")
axa4 = plt.gca()
axx4 = axa4.xaxis
axy4 = axa4.yaxis
axx4.set_visible(False)
axy4.set_visible(False)
axa4.set_facecolor(col1(0))
rotate90_triangle = np.matmul(triangle,X)
rotate90_big_triangle = rotate90_triangle*10
rotate90_polygon = pt.Polygon(rotate90_triangle + np.array([3, -4]), True, color = col1(100), alpha = 0.2)
rotate90_big_polygon = pt.Polygon(rotate90_big_triangle, True, color = col1(200), alpha = 0.2)
axa4.add_patch(rotate90_polygon)
axa4.add_patch(rotate90_big_polygon)
axa4.set_xlim(2, 7)
axa4.set_ylim(-10, -2)
plt.subplot(4,4,6)
plt.title("Blurred : Scaled and Inside")
axa5 = plt.gca()
axx5 = axa5.xaxis
axy5 = axa5.yaxis
axx5.set_visible(False)
axy5.set_visible(False)
axa5.set_facecolor(col1(100))
rotate90_triangle = np.matmul(triangle,X)
rotate90_big_triangle = rotate90_triangle*10
rotate90_polygon = pt.Polygon(rotate90_triangle + np.array([4, -4]), True, color = col1(200), alpha = 0.2)
rotate90_big_polygon = pt.Polygon(rotate90_big_triangle, True, color = col1(300), alpha = 0.2)
axa5.add_patch(rotate90_polygon)
axa5.add_patch(rotate90_big_polygon)
axa5.set_xlim(2, 7)
axa5.set_ylim(-10, -2)
plt.subplot(4,4,7)
plt.title("Blurred : Zoomed and Outside")
axa6 = plt.gca()
axx6 = axa6.xaxis
axy6 = axa6.yaxis
axx6.set_visible(False)
axy6.set_visible(False)
axa6.set_facecolor(col1(200))
rotate90_triangle = np.matmul(triangle,X)
rotate90_big_triangle = rotate90_triangle*10
rotate90_polygon = pt.Polygon(rotate90_triangle + np.array([3, -4]), True, color = col1(300), alpha = 0.2)
rotate90_big_polygon = pt.Polygon(rotate90_big_triangle, True, color = col1(0), alpha = 0.2)
axa6.add_patch(rotate90_polygon)
axa6.add_patch(rotate90_big_polygon)
axa6.set_xlim(0, 7)
axa6.set_ylim(-12, -2)
plt.subplot(4,4,8)
plt.title("Blurred : Zoomed and Inside")
axa7 = plt.gca()
axx7 = axa7.xaxis
axy7 = axa7.yaxis
axx7.set_visible(False)
axy7.set_visible(False)
axa7.set_facecolor(col1(300))
rotate90_triangle = np.matmul(triangle,X)
rotate90_big_triangle = rotate90_triangle*10
rotate90_polygon = pt.Polygon(rotate90_triangle + np.array([4, -4]), True, color = col1(0), alpha = 0.2)
rotate90_big_polygon = pt.Polygon(rotate90_big_triangle, True, color = col1(100), alpha = 0.2)
axa7.add_patch(rotate90_polygon)
axa7.add_patch(rotate90_big_polygon)
axa7.set_xlim(0, 7)
axa7.set_ylim(-12, -2)
plt.subplot(4,4,9)
plt.title("NoFill : Scaled and Outside")
axa8 = plt.gca()
axx8 = axa8.xaxis
axy8 = axa8.yaxis
axx8.set_visible(False)
axy8.set_visible(False)
axa8.set_facecolor(col2(0))
rotate180_triangle = np.matmul(rotate90_triangle,X)
rotate180_big_triangle = rotate180_triangle*10
rotate180_polygon = pt.Polygon(rotate180_triangle + np.array([-4, -3]), True, color = col2(100), fill = None)
rotate180_big_polygon = pt.Polygon(rotate180_big_triangle, True, color = col2(200), alpha = 0.4, fill = None)
axa8.add_patch(rotate180_polygon)
axa8.add_patch(rotate180_big_polygon)
axa8.set_xlim(-10, -2)
axa8.set_ylim(-7, -2)
plt.subplot(4,4,10)
plt.title("NoFill : Scaled and Inside")
axa9 = plt.gca()
axx9 = axa9.xaxis
axy9 = axa9.yaxis
axx9.set_visible(False)
axy9.set_visible(False)
axa9.set_facecolor(col2(100))
rotate180_triangle = np.matmul(rotate90_triangle,X)
rotate180_big_triangle = rotate180_triangle*10
rotate180_polygon = pt.Polygon(rotate180_triangle + np.array([-4, -4]), True, color = col2(200), fill = None)
rotate180_big_polygon = pt.Polygon(rotate180_big_triangle, True, color = col2(300), alpha = 0.4, fill = None)
axa9.add_patch(rotate180_polygon)
axa9.add_patch(rotate180_big_polygon)
axa9.set_xlim(-10, -2)
axa9.set_ylim(-7, -2)
plt.subplot(4,4,11)
plt.title("NoFill : Zoomed and Outside")
axa10 = plt.gca()
axx10 = axa10.xaxis
axy10 = axa10.yaxis
axx10.set_visible(False)
axy10.set_visible(False)
axa10.set_facecolor(col2(200))
rotate180_triangle = np.matmul(rotate90_triangle,X)
rotate180_big_triangle = rotate180_triangle*10
rotate180_polygon = pt.Polygon(rotate180_triangle + np.array([-4, -3]), True, color = col2(300), fill = None)
rotate180_big_polygon = pt.Polygon(rotate180_big_triangle, True, color = col2(0), alpha = 0.4, fill = None)
axa10.add_patch(rotate180_polygon)
axa10.add_patch(rotate180_big_polygon)
axa10.set_xlim(-14, -2)
axa10.set_ylim(-9, -2)
plt.subplot(4,4,12)
plt.title("NoFill : Zoomed and Inside")
axa11 = plt.gca()
axx11 = axa11.xaxis
axy11 = axa11.yaxis
axx11.set_visible(False)
axy11.set_visible(False)
axa11.set_facecolor(col2(300))
rotate180_triangle = np.matmul(rotate90_triangle,X)
rotate180_big_triangle = rotate180_triangle*10
rotate180_polygon = pt.Polygon(rotate180_triangle + np.array([-4, -4]), True, color = col2(0), fill = None)
rotate180_big_polygon = pt.Polygon(rotate180_big_triangle, True, color = col2(100), alpha = 0.4, fill = None)
axa11.add_patch(rotate180_polygon)
axa11.add_patch(rotate180_big_polygon)
axa11.set_xlim(-14, -2)
axa11.set_ylim(-9, -2)
plt.subplot(4,4,13)
plt.title("Dashed : Scaled and Outside")
axa12 = plt.gca()
axx12 = axa12.xaxis
axy12 = axa12.yaxis
axx12.set_visible(False)
axy12.set_visible(False)
axa12.set_facecolor(col3(300))
rotate270_triangle = np.matmul(rotate180_triangle,X)
rotate270_big_triangle = rotate270_triangle*10
rotate270_polygon = pt.Polygon(rotate270_triangle + np.array([-3, 4]), True, color = col3(0), fill = None, linestyle = "-.")
rotate270_big_polygon = pt.Polygon(rotate270_big_triangle, True, color = col3(100), fill = None, linestyle = "-.")
axa12.add_patch(rotate270_polygon)
axa12.add_patch(rotate270_big_polygon)
axa12.set_xlim(-8, -2)
axa12.set_ylim(2, 10)
plt.subplot(4,4,14)
plt.title("Dashed : Scaled and Inside")
axa13 = plt.gca()
axx13 = axa13.xaxis
axy13 = axa13.yaxis
axx13.set_visible(False)
axy13.set_visible(False)
axa13.set_facecolor(col3(200))
rotate270_triangle = np.matmul(rotate180_triangle,X)
rotate270_big_triangle = rotate270_triangle*10
rotate270_polygon = pt.Polygon(rotate270_triangle + np.array([-4, 4]), True, color = col3(300), fill = None, linestyle = "-.")
rotate270_big_polygon = pt.Polygon(rotate270_big_triangle, True, color = col3(0), fill = None, linestyle = "-.")
axa13.add_patch(rotate270_polygon)
axa13.add_patch(rotate270_big_polygon)
axa13.set_xlim(-8, -2)
axa13.set_ylim(2, 10)
plt.subplot(4,4,15)
plt.title("Dashed : Zoomed and Outside")
axa14 = plt.gca()
axx14 = axa14.xaxis
axy14 = axa14.yaxis
axx14.set_visible(False)
axy14.set_visible(False)
axa14.set_facecolor(col3(100))
rotate270_triangle = np.matmul(rotate180_triangle,X)
rotate270_big_triangle = rotate270_triangle*10
rotate270_polygon = pt.Polygon(rotate270_triangle + np.array([-3, 4]), True, color = col3(200), fill = None, linestyle = "-.")
rotate270_big_polygon = pt.Polygon(rotate270_big_triangle, True, color = col3(300), fill = None, linestyle = "-.")
axa14.add_patch(rotate270_polygon)
axa14.add_patch(rotate270_big_polygon)
axa14.set_xlim(-11, -2)
axa14.set_ylim(-2, 10)
plt.subplot(4,4,16)
plt.title("Dashed : Zoomed and Inside")
axa15 = plt.gca()
axx15 = axa15.xaxis
axy15 = axa15.yaxis
axx15.set_visible(False)
axy15.set_visible(False)
axa15.set_facecolor(col3(0))
rotate270_triangle = np.matmul(rotate180_triangle,X)
rotate270_big_triangle = rotate270_triangle*10
rotate270_polygon = pt.Polygon(rotate270_triangle + np.array([-4, 4]), True, color = col3(100), fill = None, linestyle = "-.")
rotate270_big_polygon = pt.Polygon(rotate270_big_triangle, True, color = col3(200), fill = None, linestyle = "-.")
axa15.add_patch(rotate270_polygon)
axa15.add_patch(rotate270_big_polygon)
axa15.set_xlim(-11, -2)
axa15.set_ylim(-2, 10)
plt.show()
###Output
_____no_output_____
###Markdown
3. Triangles with areas different by a factor of hundredIn this step the areas of the triangle have a huge difference.
###Code
fig = plt.figure(figsize=(16,16))
col = plt.get_cmap("YlOrRd")
col1 = plt.get_cmap("RdYlBu")
col2 = plt.get_cmap("PRGn")
col3 = plt.get_cmap("PiYG")
X = np.array([[0, -1], [1, 0]])
plt.subplot(4,4,1)
plt.title("Filled : Scaled and Outside")
axa = plt.gca()
axx = axa.xaxis
axy = axa.yaxis
axa.set_facecolor(col(0))
axx.set_visible(False)
axy.set_visible(False)
triangle = np.array([[0.48566868, 0.59316155],
[0.30185218, 0.45096474],
[0.9362745 , 0.30407635]])
big_triangle = triangle*100
polygon = pt.Polygon(triangle + np.array([30,40]), True, color = col(100))
big_polygon = pt.Polygon(big_triangle, True, color = col(200))
axa.add_patch(polygon)
axa.add_patch(big_polygon)
axa.set_xlim(100, 20)
axa.set_ylim(25, 60)
plt.subplot(4,4,2)
plt.title("Filled : Scaled and Inside")
axa1 = plt.gca()
axx1 = axa1.xaxis
axy1 = axa1.yaxis
axa1.set_facecolor(col(100))
axx1.set_visible(False)
axy1.set_visible(False)
triangle = np.array([[0.48566868, 0.59316155],
[0.30185218, 0.45096474],
[0.9362745 , 0.30407635]])
big_triangle = triangle*100
polygon = pt.Polygon(triangle + np.array([55,45]), True, color = col(200), zorder = 2)
big_polygon = pt.Polygon(big_triangle, True, color = col(300))
axa1.add_patch(polygon)
axa1.add_patch(big_polygon)
axa1.set_xlim(100, 20)
axa1.set_ylim(25, 60)
plt.subplot(4,4,3)
plt.title("Filled : Zoomed and Outside")
axa2 = plt.gca()
axx2 = axa2.xaxis
axy2 = axa2.yaxis
axa2.set_facecolor(col(200))
axx2.set_visible(False)
axy2.set_visible(False)
triangle = np.array([[0.48566868, 0.59316155],
[0.30185218, 0.45096474],
[0.9362745 , 0.30407635]])
big_triangle = triangle*100
polygon = pt.Polygon(triangle + np.array([30,40]), True, color = col(300))
big_polygon = pt.Polygon(big_triangle, True, color = col(0))
axa2.add_patch(polygon)
axa2.add_patch(big_polygon)
axa2.set_xlim(140, 20)
axa2.set_ylim(20, 80)
plt.subplot(4,4,4)
plt.title("Filled : Zoomed and Inside")
axa3 = plt.gca()
axx3 = axa3.xaxis
axy3 = axa3.yaxis
axa3.set_facecolor(col(300))
axx3.set_visible(False)
axy3.set_visible(False)
triangle = np.array([[0.48566868, 0.59316155],
[0.30185218, 0.45096474],
[0.9362745 , 0.30407635]])
big_triangle = triangle*100
polygon = pt.Polygon(triangle + np.array([55,45]), True, color = col(0), zorder = 2)
big_polygon = pt.Polygon(big_triangle, True, color = col(100))
axa3.add_patch(polygon)
axa3.add_patch(big_polygon)
axa3.set_xlim(140, 20)
axa3.set_ylim(20, 80)
plt.subplot(4,4,5)
plt.title("Blurred : Scaled and Outside")
axa4 = plt.gca()
axx4 = axa4.xaxis
axy4 = axa4.yaxis
axa4.set_facecolor(col1(0))
axx4.set_visible(False)
axy4.set_visible(False)
rotate90_triangle = np.matmul(triangle,X)
rotate90_big_triangle = rotate90_triangle*100
rotate90_polygon = pt.Polygon(rotate90_triangle + np.array([30,-40]), True, color = col1(100), alpha = 0.5)
rotate90_big_polygon = pt.Polygon(rotate90_big_triangle, True, color = col1(200), alpha = 0.5)
axa4.add_patch(rotate90_polygon)
axa4.add_patch(rotate90_big_polygon)
axa4.set_xlim(20, 60)
axa4.set_ylim(-25, -100)
plt.subplot(4,4,6)
plt.title("Blurred : Scaled and Inside")
axa5 = plt.gca()
axx5 = axa5.xaxis
axy5 = axa5.yaxis
axa5.set_facecolor(col1(100))
axx5.set_visible(False)
axy5.set_visible(False)
rotate90_triangle = np.matmul(triangle,X)
rotate90_big_triangle = rotate90_triangle*100
rotate90_polygon = pt.Polygon(rotate90_triangle + np.array([45,-55]), True, color = col1(200), alpha = 0.5, zorder = 2)
rotate90_big_polygon = pt.Polygon(rotate90_big_triangle, True, color = col1(300), alpha = 0.5)
axa5.add_patch(rotate90_polygon)
axa5.add_patch(rotate90_big_polygon)
axa5.set_xlim(20, 60)
axa5.set_ylim(-25, -100)
plt.subplot(4,4,7)
plt.title("Blurred : Zoomed and Outside")
axa6 = plt.gca()
axx6 = axa6.xaxis
axy6 = axa6.yaxis
axa6.set_facecolor(col1(200))
axx6.set_visible(False)
axy6.set_visible(False)
rotate90_triangle = np.matmul(triangle,X)
rotate90_big_triangle = rotate90_triangle*100
rotate90_polygon = pt.Polygon(rotate90_triangle + np.array([30,-40]), True, color = col1(300), alpha = 0.5)
rotate90_big_polygon = pt.Polygon(rotate90_big_triangle, True, color = col1(0), alpha = 0.5)
axa6.add_patch(rotate90_polygon)
axa6.add_patch(rotate90_big_polygon)
axa6.set_xlim(0, 80)
axa6.set_ylim(0, -100)
plt.subplot(4,4,8)
plt.title("Blurred : Zoomed and Inside")
axa7 = plt.gca()
axx7 = axa7.xaxis
axy7 = axa7.yaxis
axa7.set_facecolor(col1(300))
axx7.set_visible(False)
axy7.set_visible(False)
rotate90_triangle = np.matmul(triangle,X)
rotate90_big_triangle = rotate90_triangle*100
rotate90_polygon = pt.Polygon(rotate90_triangle + np.array([45,-55]), True, color = col1(0), alpha = 0.5, zorder = 2)
rotate90_big_polygon = pt.Polygon(rotate90_big_triangle, True, color = col1(100), alpha = 0.5)
axa7.add_patch(rotate90_polygon)
axa7.add_patch(rotate90_big_polygon)
axa7.set_xlim(0, 80)
axa7.set_ylim(0, -100)
plt.subplot(4,4,9)
plt.title("NoFill : Scaled and Outside")
axa8 = plt.gca()
axx8 = axa8.xaxis
axy8 = axa8.yaxis
axa8.set_facecolor(col2(0))
axx8.set_visible(False)
axy8.set_visible(False)
rotate180_triangle = np.matmul(rotate90_triangle,X)
rotate180_big_triangle = rotate180_triangle*100
rotate180_polygon = pt.Polygon(rotate180_triangle + np.array([-30,-40]), True, color = col2(100), fill = None)
rotate180_big_polygon = pt.Polygon(rotate180_big_triangle, True, color = col2(200), fill = None)
axa8.add_patch(rotate180_polygon)
axa8.add_patch(rotate180_big_polygon)
axa8.set_xlim(-100, -20)
axa8.set_ylim(-60, -25)
plt.subplot(4,4,10)
plt.title("NoFill : Scaled and Inside")
axa9 = plt.gca()
axx9 = axa9.xaxis
axy9 = axa9.yaxis
axa9.set_facecolor(col2(100))
axx9.set_visible(False)
axy9.set_visible(False)
rotate180_triangle = np.matmul(rotate90_triangle,X)
rotate180_big_triangle = rotate180_triangle*100
rotate180_polygon = pt.Polygon(rotate180_triangle + np.array([-55,-45]), True, color = col2(200), fill = None, zorder = 2)
rotate180_big_polygon = pt.Polygon(rotate180_big_triangle, True, color = col2(300), fill = None)
axa9.add_patch(rotate180_polygon)
axa9.add_patch(rotate180_big_polygon)
axa9.set_xlim(-100, -20)
axa9.set_ylim(-60, -25)
plt.subplot(4,4,11)
plt.title("NoFill : Zoomed and Outside")
axa10 = plt.gca()
axx10 = axa10.xaxis
axy10 = axa10.yaxis
axa10.set_facecolor(col2(200))
axx10.set_visible(False)
axy10.set_visible(False)
rotate180_triangle = np.matmul(rotate90_triangle,X)
rotate180_big_triangle = rotate180_triangle*100
rotate180_polygon = pt.Polygon(rotate180_triangle + np.array([-30,-40]), True, color = col2(300), fill = None)
rotate180_big_polygon = pt.Polygon(rotate180_big_triangle, True, color = col2(0), fill = None)
axa10.add_patch(rotate180_polygon)
axa10.add_patch(rotate180_big_polygon)
axa10.set_xlim(-140, -20)
axa10.set_ylim(-80, -20)
plt.subplot(4,4,12)
plt.title("NoFill : Zoomed and Inside")
axa11 = plt.gca()
axx11 = axa11.xaxis
axy11 = axa11.yaxis
axa11.set_facecolor(col2(300))
axx11.set_visible(False)
axy11.set_visible(False)
rotate180_triangle = np.matmul(rotate90_triangle,X)
rotate180_big_triangle = rotate180_triangle*100
rotate180_polygon = pt.Polygon(rotate180_triangle + np.array([-55,-45]), True, color = col2(0), fill = None, zorder = 2)
rotate180_big_polygon = pt.Polygon(rotate180_big_triangle, True, color = col2(100), fill = None)
axa11.add_patch(rotate180_polygon)
axa11.add_patch(rotate180_big_polygon)
axa11.set_xlim(-140, -20)
axa11.set_ylim(-80, -20)
plt.subplot(4,4,13)
plt.title("Dashed : Scaled and Outside")
axa12 = plt.gca()
axx12 = axa12.xaxis
axy12 = axa12.yaxis
axa12.set_facecolor(col3(300))
axx12.set_visible(False)
axy12.set_visible(False)
rotate270_triangle = np.matmul(rotate180_triangle,X)
rotate270_big_triangle = rotate270_triangle*100
rotate270_polygon = pt.Polygon(rotate270_triangle + np.array([-30,40]), True, color = col3(0), fill = None, linestyle = "-.")
rotate270_big_polygon = pt.Polygon(rotate270_big_triangle, True, color = col3(100), fill = None, linestyle = "-.")
axa12.add_patch(rotate270_polygon)
axa12.add_patch(rotate270_big_polygon)
axa12.set_xlim(-20, -60)
axa12.set_ylim(100, 25)
plt.subplot(4,4,14)
plt.title("Dashed : Scaled and Inside")
axa13 = plt.gca()
axx13 = axa13.xaxis
axy13 = axa13.yaxis
axa13.set_facecolor(col3(200))
axx13.set_visible(False)
axy13.set_visible(False)
rotate270_triangle = np.matmul(rotate180_triangle,X)
rotate270_big_triangle = rotate270_triangle*100
rotate270_polygon = pt.Polygon(rotate270_triangle + np.array([-45,55]), True, color = col3(300), zorder = 2, fill = None, linestyle = "-.")
rotate270_big_polygon = pt.Polygon(rotate270_big_triangle, True, color = col3(0), fill = None, linestyle = "-.")
axa13.add_patch(rotate270_polygon)
axa13.add_patch(rotate270_big_polygon)
axa13.set_xlim(-20, -60)
axa13.set_ylim(100, 25)
X = np.array([[0, -1], [1, 0]])
X2 = np.array([[0, 1], [-1, 0]])
plt.subplot(4,4,15)
plt.title("Dashed : Zoomed and Outside")
axa14 = plt.gca()
axx14 = axa14.xaxis
axy14 = axa14.yaxis
axa14.set_facecolor(col3(100))
axx14.set_visible(False)
axy14.set_visible(False)
rotate270_triangle = np.matmul(rotate180_triangle,X)
rotate270_big_triangle = rotate270_triangle*100
rotate270_polygon = pt.Polygon(rotate270_triangle + np.array([-30,40]), True, color = col3(200), fill = None, linestyle = "-.")
rotate270_big_polygon = pt.Polygon(rotate270_big_triangle, True, color = col3(300), fill = None, linestyle = "-.")
axa14.add_patch(rotate270_polygon)
axa14.add_patch(rotate270_big_polygon)
axa14.set_xlim(0, -80)
axa14.set_ylim(100, 0)
plt.subplot(4,4,16)
plt.title("Dashed : Zoomed and Inside")
axa15 = plt.gca()
axx15 = axa15.xaxis
axy15 = axa15.yaxis
axa15.set_facecolor(col3(0))
axx15.set_visible(False)
axy15.set_visible(False)
rotate270_triangle = np.matmul(rotate180_triangle,X)
rotate270_big_triangle = rotate270_triangle*100
rotate270_polygon = pt.Polygon(rotate270_triangle + np.array([-45,55]), True, color = col3(100), zorder = 2, fill = None, linestyle = "-.")
rotate270_big_polygon = pt.Polygon(rotate270_big_triangle, True, color = col3(200), fill = None, linestyle = "-.")
axa15.add_patch(rotate270_polygon)
axa15.add_patch(rotate270_big_polygon)
axa15.set_xlim(0, -80)
axa15.set_ylim(100, 0)
plt.show()
###Output
_____no_output_____
###Markdown
4. Circles with identical areaIn this step the areas of the circle are nearly identical.
###Code
fig = plt.figure(figsize=(16,16))
col = plt.get_cmap("YlOrRd")
col1 = plt.get_cmap("RdYlBu")
col2 = plt.get_cmap("PRGn")
col3 = plt.get_cmap("PiYG")
plt.subplot(4,4,1)
plt.title("Filled : Scaled and Outside")
axa = plt.gca()
axx = axa.xaxis
axy = axa.yaxis
axa.set_facecolor(col(0))
axx.set_visible(False)
axy.set_visible(False)
circle = np.array([0.5, 0.5])
X = np.array([[0, -1], [1, 0]])
small_circle = circle - 0.2
polygon = pt.Circle(circle, 0.12, color = col(100))
small_polygon = pt.Circle(small_circle, 0.1, color = col(200))
axa.add_patch(polygon)
axa.add_patch(small_polygon)
axa.set_xlim(0, 1)
axa.set_ylim(0, 1)
plt.subplot(4,4,2)
plt.title("Filled : Scaled and Inside")
axa1 = plt.gca()
axx1 = axa1.xaxis
axy1 = axa1.yaxis
axa1.set_facecolor(col(100))
axx1.set_visible(False)
axy1.set_visible(False)
small_circle = circle
polygon = pt.Circle(circle, 0.12, color = col(200))
small_polygon = pt.Circle(small_circle, 0.1, color = col(300))
axa1.add_patch(polygon)
axa1.add_patch(small_polygon)
axa1.set_xlim(0, 1)
axa1.set_ylim(0, 1)
plt.subplot(4,4,3)
plt.title("Filled : Zoomed and Outside")
axa2 = plt.gca()
axx2 = axa2.xaxis
axy2 = axa2.yaxis
axa2.set_facecolor(col(200))
axx2.set_visible(False)
axy2.set_visible(False)
small_circle = circle - 0.2
polygon = pt.Circle(circle, 0.12, color = col(300))
small_polygon = pt.Circle(small_circle, 0.1, color = col(0))
axa2.add_patch(polygon)
axa2.add_patch(small_polygon)
axa2.set_xlim(-1, 1)
axa2.set_ylim(-1, 1)
plt.subplot(4,4,4)
plt.title("Filled : Zoomed and Inside")
axa3 = plt.gca()
axx3 = axa3.xaxis
axy3 = axa3.yaxis
axa3.set_facecolor(col(300))
axx3.set_visible(False)
axy3.set_visible(False)
small_circle = circle
polygon = pt.Circle(circle, 0.12, color = col(0))
small_polygon = pt.Circle(small_circle, 0.1, color = col(100))
axa3.add_patch(polygon)
axa3.add_patch(small_polygon)
axa3.set_xlim(-1, 1)
axa3.set_ylim(-1, 1)
plt.subplot(4,4,5)
plt.title("Blurred : Scaled and Outside")
axa4 = plt.gca()
axx4 = axa4.xaxis
axy4 = axa4.yaxis
axa4.set_facecolor(col1(0))
axx4.set_visible(False)
axy4.set_visible(False)
rotate90_circle = np.matmul(circle,X)
rotate90_small_circle = rotate90_circle*0.8 - 0.2
rotate90_polygon = pt.Circle(rotate90_circle, 0.12, color = col1(100), alpha = 0.4)
rotate90_small_polygon = pt.Circle(rotate90_small_circle, 0.1, color = col1(200), alpha = 0.4)
axa4.add_patch(rotate90_polygon)
axa4.add_patch(rotate90_small_polygon)
axa4.set_xlim(0, 1)
axa4.set_ylim(0, -1)
plt.subplot(4,4,6)
plt.title("Blurred : Scaled and Inside")
axa5 = plt.gca()
axx5 = axa5.xaxis
axy5 = axa5.yaxis
axa5.set_facecolor(col1(100))
axx5.set_visible(False)
axy5.set_visible(False)
rotate90_circle = np.matmul(circle,X)
rotate90_small_circle = rotate90_circle
rotate90_polygon = pt.Circle(rotate90_circle, 0.12, color = col1(200), alpha = 0.4)
rotate90_small_polygon = pt.Circle(rotate90_small_circle, 0.1, color = col1(300), alpha = 0.4)
axa5.add_patch(rotate90_polygon)
axa5.add_patch(rotate90_small_polygon)
axa5.set_xlim(0, 1)
axa5.set_ylim(0, -1)
plt.subplot(4,4,7)
plt.title("Blurred : Zoomed and Outside")
axa6 = plt.gca()
axx6 = axa6.xaxis
axy6 = axa6.yaxis
axa6.set_facecolor(col1(200))
axx6.set_visible(False)
axy6.set_visible(False)
rotate90_circle = np.matmul(circle,X)
rotate90_small_circle = rotate90_circle - 0.2
rotate90_polygon = pt.Circle(rotate90_circle, 0.12, color = col1(300), alpha = 0.4)
rotate90_small_polygon = pt.Circle(rotate90_small_circle, 0.1, color = col1(0), alpha = 0.4)
axa6.add_patch(rotate90_polygon)
axa6.add_patch(rotate90_small_polygon)
axa6.set_xlim(-1, 1)
axa6.set_ylim(-1, 1)
plt.subplot(4,4,8)
plt.title("Blurred : Zoomed and Inside")
axa7 = plt.gca()
axx7 = axa7.xaxis
axy7 = axa7.yaxis
axa7.set_facecolor(col1(300))
axx7.set_visible(False)
axy7.set_visible(False)
rotate90_circle = np.matmul(circle,X)
rotate90_small_circle = rotate90_circle
rotate90_polygon = pt.Circle(rotate90_circle, 0.12, color = col1(0), alpha = 0.4)
rotate90_small_polygon = pt.Circle(rotate90_small_circle, 0.1, color = col1(100), alpha = 0.4)
axa7.add_patch(rotate90_polygon)
axa7.add_patch(rotate90_small_polygon)
axa7.set_xlim(-1, 1)
axa7.set_ylim(-1, 1)
plt.subplot(4,4,9)
plt.title("NoFill : Scaled and Outside")
axa8 = plt.gca()
axx8 = axa8.xaxis
axy8 = axa8.yaxis
axa8.set_facecolor(col2(0))
axx8.set_visible(False)
axy8.set_visible(False)
rotate180_circle = np.matmul(rotate90_circle,X)
rotate180_small_circle = rotate180_circle + 0.2
rotate180_polygon = pt.Circle(rotate180_circle, 0.12, color = col2(100), fill = None)
rotate180_small_polygon = pt.Circle(rotate180_small_circle, 0.1, color = col2(200), fill = None)
axa8.add_patch(rotate180_polygon)
axa8.add_patch(rotate180_small_polygon)
axa8.set_xlim(-1, 0)
axa8.set_ylim(-1, 0)
plt.subplot(4,4,10)
plt.title("NoFill : Scaled and Inside")
axa9 = plt.gca()
axx9 = axa9.xaxis
axy9 = axa9.yaxis
axa9.set_facecolor(col2(100))
axx9.set_visible(False)
axy9.set_visible(False)
rotate180_circle = np.matmul(rotate90_circle,X)
rotate180_small_circle = rotate180_circle
rotate180_polygon = pt.Circle(rotate180_circle, 0.12, color = col2(200), fill = None)
rotate180_small_polygon = pt.Circle(rotate180_small_circle, 0.1, color = col2(300), fill = None)
axa9.add_patch(rotate180_polygon)
axa9.add_patch(rotate180_small_polygon)
axa9.set_xlim(-1, 0)
axa9.set_ylim(-1, 0)
plt.subplot(4,4,11)
plt.title("NoFill : Zoomed and Outside")
axa10 = plt.gca()
axx10 = axa10.xaxis
axy10 = axa10.yaxis
axa10.set_facecolor(col2(200))
axx10.set_visible(False)
axy10.set_visible(False)
rotate180_circle = np.matmul(rotate90_circle,X)
rotate180_small_circle = rotate180_circle + 0.2
rotate180_polygon = pt.Circle(rotate180_circle, 0.12, color = col2(300), fill = None)
rotate180_small_polygon = pt.Circle(rotate180_small_circle, 0.1, color = col2(0), fill = None)
axa10.add_patch(rotate180_polygon)
axa10.add_patch(rotate180_small_polygon)
axa10.set_xlim(-1, 1)
axa10.set_ylim(-1, 1)
plt.subplot(4,4,12)
plt.title("NoFill : Zoomed and Inside")
axa11 = plt.gca()
axx11 = axa11.xaxis
axy11 = axa11.yaxis
axa11.set_facecolor(col2(300))
axx11.set_visible(False)
axy11.set_visible(False)
rotate180_circle = np.matmul(rotate90_circle,X)
rotate180_small_circle = rotate180_circle
rotate180_polygon = pt.Circle(rotate180_circle, 0.12, color = col2(0), fill = None)
rotate180_small_polygon = pt.Circle(rotate180_small_circle, 0.1, color = col2(100), fill = None)
axa11.add_patch(rotate180_polygon)
axa11.add_patch(rotate180_small_polygon)
axa11.set_xlim(-1, 1)
axa11.set_ylim(-1, 1)
plt.subplot(4,4,13)
plt.title("NoFill : Zoomed and Inside")
axa12 = plt.gca()
axx12 = axa12.xaxis
axy12 = axa12.yaxis
axa12.set_facecolor(col3(300))
axx12.set_visible(False)
axy12.set_visible(False)
rotate270_circle = np.matmul(rotate180_circle,X)
rotate270_small_circle = rotate270_circle + 0.2
rotate270_polygon = pt.Circle(rotate270_circle, 0.12, color = col3(0), fill = None, linestyle = "-.")
rotate270_small_polygon = pt.Circle(rotate270_small_circle, 0.1, color = col3(100), fill = None, linestyle = "-.")
axa12.add_patch(rotate270_polygon)
axa12.add_patch(rotate270_small_polygon)
axa12.set_xlim(0, -1)
axa12.set_ylim(1, 0)
plt.subplot(4,4,14)
plt.title("Dashed : Scaled and Inside")
axa13 = plt.gca()
axx13 = axa13.xaxis
axy13 = axa13.yaxis
axa13.set_facecolor(col3(200))
axx13.set_visible(False)
axy13.set_visible(False)
rotate270_circle = np.matmul(rotate180_circle,X)
rotate270_small_circle = rotate270_circle
rotate270_polygon = pt.Circle(rotate270_circle, 0.12, color = col3(300), fill = None, linestyle = "-.")
rotate270_small_polygon = pt.Circle(rotate270_small_circle, 0.1, color = col3(0), fill = None, linestyle = "-.")
axa13.add_patch(rotate270_polygon)
axa13.add_patch(rotate270_small_polygon)
axa13.set_xlim(0, -1)
axa13.set_ylim(1, 0)
plt.subplot(4,4,15)
plt.title("Dashed : Zoomed and Outside")
axa14 = plt.gca()
axx14 = axa14.xaxis
axy14 = axa14.yaxis
axa14.set_facecolor(col3(100))
axx14.set_visible(False)
axy14.set_visible(False)
rotate270_circle = np.matmul(rotate180_circle,X)
rotate270_small_circle = rotate270_circle + 0.2
rotate270_polygon = pt.Circle(rotate270_circle, 0.12, color = col3(200), fill = None, linestyle = "-.")
rotate270_small_polygon = pt.Circle(rotate270_small_circle, 0.1, color = col3(300), fill = None, linestyle = "-.")
axa14.add_patch(rotate270_polygon)
axa14.add_patch(rotate270_small_polygon)
axa14.set_xlim(-1, 1)
axa14.set_ylim(-1, 1)
plt.subplot(4,4,16)
plt.title("Dashed : Zoomed and Inside")
axa15 = plt.gca()
axx15 = axa15.xaxis
axy15 = axa15.yaxis
axa15.set_facecolor(col3(0))
axx15.set_visible(False)
axy15.set_visible(False)
rotate270_circle = np.matmul(rotate180_circle,X)
rotate270_small_circle = rotate270_circle
rotate270_polygon = pt.Circle(rotate270_circle, 0.12, color = col3(100), fill = None, linestyle = "-.")
rotate270_small_polygon = pt.Circle(rotate270_small_circle, 0.1, color = col3(200), fill = None, linestyle = "-.")
axa15.add_patch(rotate270_polygon)
axa15.add_patch(rotate270_small_polygon)
axa15.set_xlim(-1, 1)
axa15.set_ylim(-1, 1)
plt.show()
###Output
_____no_output_____
###Markdown
5. Circles with areas different by a factor of dozenIn this step the areas of the circle are different by a dozen.
###Code
fig = plt.figure(figsize=(16,16))
col = plt.get_cmap("YlOrRd")
col1 = plt.get_cmap("RdYlBu")
col2 = plt.get_cmap("PRGn")
col3 = plt.get_cmap("PiYG")
plt.subplot(4,4,1)
plt.title("Filled : Scaled and Outside")
axa = plt.gca()
axx = axa.xaxis
axy = axa.yaxis
axa.set_facecolor(col(0))
axx.set_visible(False)
axy.set_visible(False)
circle = np.array([2.5, 2.5])
X = np.array([[0, -1], [1, 0]])
big_circle = circle + 0.2
polygon = pt.Circle(circle + np.array([-1, -1]), 0.3, color = col(100), zorder = 2)
big_polygon = pt.Circle(big_circle, 1, color = col(200))
axa.add_patch(polygon)
axa.add_patch(big_polygon)
axa.set_xlim(0, 5)
axa.set_ylim(0, 5)
plt.subplot(4,4,2)
plt.title("Filled : Scaled and Inside")
axa1 = plt.gca()
axx1 = axa1.xaxis
axy1 = axa1.yaxis
axa1.set_facecolor(col(100))
axx1.set_visible(False)
axy1.set_visible(False)
big_circle = circle - 0.2
polygon = pt.Circle(circle, 0.3, color = col(200), zorder = 2)
big_polygon = pt.Circle(big_circle, 1, color = col(300))
axa1.add_patch(polygon)
axa1.add_patch(big_polygon)
axa1.set_xlim(0, 5)
axa1.set_ylim(0, 5)
plt.subplot(4,4,3)
plt.title("Filled : Zoomed and Outside")
axa2 = plt.gca()
axx2 = axa2.xaxis
axy2 = axa2.yaxis
axa2.set_facecolor(col(200))
axx2.set_visible(False)
axy2.set_visible(False)
big_circle = circle + 0.2
polygon = pt.Circle(circle + np.array([-1, -1]), 0.3, color = col(300), zorder = 2)
big_polygon = pt.Circle(big_circle, 1, color = col(0))
axa2.add_patch(polygon)
axa2.add_patch(big_polygon)
axa2.set_xlim(-5, 5)
axa2.set_ylim(-5, 5)
plt.subplot(4,4,4)
plt.title("Filled : Zoomed and Inside")
axa3 = plt.gca()
axx3 = axa3.xaxis
axy3 = axa3.yaxis
axa3.set_facecolor(col(300))
axx3.set_visible(False)
axy3.set_visible(False)
big_circle = circle - 0.2
polygon = pt.Circle(circle, 0.3, color = col(0), zorder = 2)
big_polygon = pt.Circle(big_circle, 1, color = col(100))
axa3.add_patch(polygon)
axa3.add_patch(big_polygon)
axa3.set_xlim(-5, 5)
axa3.set_ylim(-5, 5)
plt.subplot(4,4,5)
plt.title("Blurred : Scaled and Outside")
axa4 = plt.gca()
axx4 = axa4.xaxis
axy4 = axa4.yaxis
axa4.set_facecolor(col1(0))
axx4.set_visible(False)
axy4.set_visible(False)
rotate90_circle = np.matmul(circle,X)
rotate90_big_circle = rotate90_circle + 0.2
rotate90_polygon = pt.Circle(rotate90_circle + np.array([-1, -1]), 0.3, color = col1(100), alpha = 0.4)
rotate90_big_polygon = pt.Circle(rotate90_big_circle, 1, color = col1(200), alpha = 0.4)
axa4.add_patch(rotate90_polygon)
axa4.add_patch(rotate90_big_polygon)
axa4.set_xlim(5, 0)
axa4.set_ylim(-5, 0)
plt.subplot(4,4,6)
plt.title("Blurred : Scaled and Inside")
axa5 = plt.gca()
axx5 = axa5.xaxis
axy5 = axa5.yaxis
axa5.set_facecolor(col1(100))
axx5.set_visible(False)
axy5.set_visible(False)
rotate90_circle = np.matmul(circle,X)
rotate90_big_circle = rotate90_circle - 0.2
rotate90_polygon = pt.Circle(rotate90_circle, 0.3, color = col1(200), alpha = 0.4)
rotate90_big_polygon = pt.Circle(rotate90_big_circle, 1, color = col1(300), alpha = 0.4)
axa5.add_patch(rotate90_polygon)
axa5.add_patch(rotate90_big_polygon)
axa5.set_xlim(5, 0)
axa5.set_ylim(-5, 0)
plt.subplot(4,4,7)
plt.title("Blurred : Zoomed and Outside")
axa6 = plt.gca()
axx6 = axa6.xaxis
axy6 = axa6.yaxis
axa6.set_facecolor(col1(200))
axx6.set_visible(False)
axy6.set_visible(False)
rotate90_circle = np.matmul(circle,X)
rotate90_big_circle = rotate90_circle + 0.2
rotate90_polygon = pt.Circle(rotate90_circle + np.array([-1, -1]), 0.3, color = col1(300), alpha = 0.4)
rotate90_big_polygon = pt.Circle(rotate90_big_circle, 1, color = col1(0), alpha = 0.4)
axa6.add_patch(rotate90_polygon)
axa6.add_patch(rotate90_big_polygon)
axa6.set_xlim(5, -5)
axa6.set_ylim(-5, 5)
plt.subplot(4,4,8)
plt.title("Blurred : Zoomed and Inside")
axa7 = plt.gca()
axx7 = axa7.xaxis
axy7 = axa7.yaxis
axa7.set_facecolor(col1(300))
axx7.set_visible(False)
axy7.set_visible(False)
rotate90_circle = np.matmul(circle,X)
rotate90_big_circle = rotate90_circle - 0.2
rotate90_polygon = pt.Circle(rotate90_circle, 0.3, color = col1(0), alpha = 0.4)
rotate90_big_polygon = pt.Circle(rotate90_big_circle, 1, color = col1(100), alpha = 0.4)
axa7.add_patch(rotate90_polygon)
axa7.add_patch(rotate90_big_polygon)
axa7.set_xlim(5, -5)
axa7.set_ylim(-5, 5)
plt.subplot(4,4,9)
plt.title("NoFill : Scaled and Outside")
axa8 = plt.gca()
axx8 = axa8.xaxis
axy8 = axa8.yaxis
axa8.set_facecolor(col2(0))
axx8.set_visible(False)
axy8.set_visible(False)
rotate180_circle = np.matmul(rotate90_circle,X)
rotate180_big_circle = rotate180_circle + 0.2
rotate180_polygon = pt.Circle(rotate180_circle + np.array([-1, -1]), 0.3, color = col2(100), fill = None)
rotate180_big_polygon = pt.Circle(rotate180_big_circle, 1, color = col2(200), fill = None)
axa8.add_patch(rotate180_polygon)
axa8.add_patch(rotate180_big_polygon)
axa8.set_xlim(-5, 0)
axa8.set_ylim(-5, 0)
plt.subplot(4,4,10)
plt.title("NoFill : Scaled and Inside")
axa9 = plt.gca()
axx9 = axa9.xaxis
axy9 = axa9.yaxis
axa9.set_facecolor(col2(100))
axx9.set_visible(False)
axy9.set_visible(False)
rotate180_circle = np.matmul(rotate90_circle,X)
rotate180_big_circle = rotate180_circle + 0.2
rotate180_polygon = pt.Circle(rotate180_circle, 0.3, color = col2(200), fill = None)
rotate180_big_polygon = pt.Circle(rotate180_big_circle, 1, color = col2(300), fill = None)
axa9.add_patch(rotate180_polygon)
axa9.add_patch(rotate180_big_polygon)
axa9.set_xlim(-5, 0)
axa9.set_ylim(-5, 0)
plt.subplot(4,4,11)
plt.title("NoFill : Zoomed and Outside")
axa10 = plt.gca()
axx10 = axa10.xaxis
axy10 = axa10.yaxis
axa10.set_facecolor(col2(200))
axx10.set_visible(False)
axy10.set_visible(False)
rotate180_circle = np.matmul(rotate90_circle,X)
rotate180_big_circle = rotate180_circle + 0.2
rotate180_polygon = pt.Circle(rotate180_circle + np.array([-1, -1]), 0.3, color = col2(300), fill = None)
rotate180_big_polygon = pt.Circle(rotate180_big_circle, 1, color = col2(0), fill = None)
axa10.add_patch(rotate180_polygon)
axa10.add_patch(rotate180_big_polygon)
axa10.set_xlim(5, -5)
axa10.set_ylim(-5, 5)
plt.subplot(4,4,12)
plt.title("NoFill : Zoomed and Inside")
axa11 = plt.gca()
axx11 = axa11.xaxis
axy11 = axa11.yaxis
axa11.set_facecolor(col2(300))
axx11.set_visible(False)
axy11.set_visible(False)
rotate180_circle = np.matmul(rotate90_circle,X)
rotate180_big_circle = rotate180_circle + 0.2
rotate180_polygon = pt.Circle(rotate180_circle, 0.3, color = col2(0), fill = None)
rotate180_big_polygon = pt.Circle(rotate180_big_circle, 1, color = col2(100), fill = None)
axa11.add_patch(rotate180_polygon)
axa11.add_patch(rotate180_big_polygon)
axa11.set_xlim(5, -5)
axa11.set_ylim(-5, 5)
plt.subplot(4,4,13)
plt.title("Dashed : Scaled and Outside")
axa12 = plt.gca()
axx12 = axa12.xaxis
axy12 = axa12.yaxis
axa12.set_facecolor(col3(300))
axx12.set_visible(False)
axy12.set_visible(False)
rotate270_circle = np.matmul(rotate180_circle,X)
rotate270_big_circle = rotate270_circle + 0.2
rotate270_polygon = pt.Circle(rotate270_circle + np.array([-1, -1]), 0.3, color = col3(0), fill = None, linestyle = "-.")
rotate270_big_polygon = pt.Circle(rotate270_big_circle, 1, color = col3(100), fill = None, linestyle = "-.")
axa12.add_patch(rotate270_polygon)
axa12.add_patch(rotate270_big_polygon)
axa12.set_xlim(-5, 0)
axa12.set_ylim(5, 0)
plt.subplot(4,4,14)
plt.title("Dashed : Scaled and Inside")
axa13 = plt.gca()
axx13 = axa13.xaxis
axy13 = axa13.yaxis
axa13.set_facecolor(col3(200))
axx13.set_visible(False)
axy13.set_visible(False)
rotate270_circle = np.matmul(rotate180_circle,X)
rotate270_big_circle = rotate270_circle + 0.2
rotate270_polygon = pt.Circle(rotate270_circle, 0.3, color = col3(300), fill = None, linestyle = "-.")
rotate270_big_polygon = pt.Circle(rotate270_big_circle, 1, color = col3(0), fill = None, linestyle = "-.")
axa13.add_patch(rotate270_polygon)
axa13.add_patch(rotate270_big_polygon)
axa13.set_xlim(-5, 0)
axa13.set_ylim(5, 0)
plt.subplot(4,4,15)
plt.title("Dashed : Zoomed and Outside")
axa14 = plt.gca()
axx14 = axa14.xaxis
axy14 = axa14.yaxis
axa14.set_facecolor(col3(100))
axx14.set_visible(False)
axy14.set_visible(False)
rotate270_circle = np.matmul(rotate180_circle,X)
rotate270_big_circle = rotate270_circle + 0.2
rotate270_polygon = pt.Circle(rotate270_circle + np.array([-1, -1]), 0.3, color = col3(200), fill = None, linestyle = "-.")
rotate270_big_polygon = pt.Circle(rotate270_big_circle, 1, color = col3(300), fill = None, linestyle = "-.")
axa14.add_patch(rotate270_polygon)
axa14.add_patch(rotate270_big_polygon)
axa14.set_xlim(-5, 5)
axa14.set_ylim(5, -5)
plt.subplot(4,4,16)
plt.title("Dashed : Zoomed and Inside")
axa15 = plt.gca()
axx15 = axa15.xaxis
axy15 = axa15.yaxis
axa15.set_facecolor(col3(0))
axx15.set_visible(False)
axy15.set_visible(False)
rotate270_circle = np.matmul(rotate180_circle,X)
rotate270_big_circle = rotate270_circle + 0.2
rotate270_polygon = pt.Circle(rotate270_circle, 0.3, color = col3(100), fill = None, linestyle = "-.")
rotate270_big_polygon = pt.Circle(rotate270_big_circle, 1, color = col3(200), fill = None, linestyle = "-.")
axa15.add_patch(rotate270_polygon)
axa15.add_patch(rotate270_big_polygon)
axa15.set_xlim(-5, 5)
axa15.set_ylim(5, -5)
plt.show()
###Output
_____no_output_____
###Markdown
6. Circles with areas different by a factor of thousandIn this step the areas of the circle have a very huge difference.
###Code
fig = plt.figure(figsize=(16,16))
col = plt.get_cmap("YlOrRd")
col1 = plt.get_cmap("RdYlBu")
col2 = plt.get_cmap("PRGn")
col3 = plt.get_cmap("PiYG")
plt.subplot(4,4,1)
plt.title("Filled : Scaled and Outside")
axa = plt.gca()
axx = axa.xaxis
axy = axa.yaxis
axa.set_facecolor(col(0))
axx.set_visible(False)
axy.set_visible(False)
circle = np.array([25, 25])
X = np.array([[0, -1], [1, 0]])
big_circle = circle
polygon = pt.Circle(circle + np.array([-10, -10]) , 0.1, color = col(100), zorder = 2)
big_polygon = pt.Circle(big_circle, 10, color = col(200))
axa.add_patch(polygon)
axa.add_patch(big_polygon)
axa.set_xlim(0, 50)
axa.set_ylim(0, 50)
plt.subplot(4,4,2)
plt.title("Filled : Scaled and Inside")
axa1 = plt.gca()
axx1 = axa1.xaxis
axy1 = axa1.yaxis
axa1.set_facecolor(col(100))
axx1.set_visible(False)
axy1.set_visible(False)
big_circle = circle
polygon = pt.Circle(circle, 0.1, color = col(200), zorder = 2)
big_polygon = pt.Circle(big_circle, 10, color = col(300))
axa1.add_patch(polygon)
axa1.add_patch(big_polygon)
axa1.set_xlim(0, 50)
axa1.set_ylim(0, 50)
plt.subplot(4,4,3)
plt.title("Filled : Zoomed and Outside")
axa2 = plt.gca()
axx2 = axa2.xaxis
axy2 = axa2.yaxis
axa2.set_facecolor(col(200))
axx2.set_visible(False)
axy2.set_visible(False)
big_circle = circle
polygon = pt.Circle(circle + np.array([-10, -10]), 0.1, color = col(300), zorder = 2)
big_polygon = pt.Circle(big_circle, 10, color = col(0))
axa2.add_patch(polygon)
axa2.add_patch(big_polygon)
axa2.set_xlim(-50, 50)
axa2.set_ylim(-50, 50)
plt.subplot(4,4,4)
plt.title("Filled : Zoomed and Inside")
axa3 = plt.gca()
axx3 = axa3.xaxis
axy3 = axa3.yaxis
axa3.set_facecolor(col(300))
axx3.set_visible(False)
axy3.set_visible(False)
big_circle = circle
polygon = pt.Circle(circle, 0.1, color = col(0), zorder = 2)
big_polygon = pt.Circle(big_circle, 10, color = col(100))
axa3.add_patch(polygon)
axa3.add_patch(big_polygon)
axa3.set_xlim(-50, 50)
axa3.set_ylim(-50, 50)
plt.subplot(4,4,5)
plt.title("Blurred : Scaled and Outside")
axa4 = plt.gca()
axx4 = axa4.xaxis
axy4 = axa4.yaxis
axa4.set_facecolor(col1(0))
axx4.set_visible(False)
axy4.set_visible(False)
rotate90_circle = np.matmul(circle,X)
rotate90_big_circle = rotate90_circle
rotate90_polygon = pt.Circle(rotate90_circle + np.array([-10, -10]), 0.1, color = col1(100), alpha = 0.7)
rotate90_big_polygon = pt.Circle(rotate90_big_circle, 10, color = col1(200), alpha = 0.7)
axa4.add_patch(rotate90_polygon)
axa4.add_patch(rotate90_big_polygon)
axa4.set_xlim(0, 50)
axa4.set_ylim(-50, 0)
plt.subplot(4,4,6)
plt.title("Blurred : Scaled and Inside")
axa5 = plt.gca()
axx5 = axa5.xaxis
axy5 = axa5.yaxis
axa5.set_facecolor(col1(100))
axx5.set_visible(False)
axy5.set_visible(False)
rotate90_circle = np.matmul(circle,X)
rotate90_big_circle = rotate90_circle
rotate90_polygon = pt.Circle(rotate90_circle, 0.1, color = col1(200), alpha = 0.7)
rotate90_big_polygon = pt.Circle(rotate90_big_circle, 10, color = col1(300), alpha = 0.7)
axa5.add_patch(rotate90_polygon)
axa5.add_patch(rotate90_big_polygon)
axa5.set_xlim(0, 50)
axa5.set_ylim(-50, 0)
plt.subplot(4,4,7)
plt.title("Blurred : Zoomed and Outside")
axa6 = plt.gca()
axx6 = axa6.xaxis
axy6 = axa6.yaxis
axa6.set_facecolor(col1(200))
axx6.set_visible(False)
axy6.set_visible(False)
rotate90_circle = np.matmul(circle,X)
rotate90_big_circle = rotate90_circle
rotate90_polygon = pt.Circle(rotate90_circle + np.array([-10, -10]), 0.1, color = col1(300), alpha = 0.7)
rotate90_big_polygon = pt.Circle(rotate90_big_circle, 10, color = col1(0), alpha = 0.7)
axa6.add_patch(rotate90_polygon)
axa6.add_patch(rotate90_big_polygon)
axa6.set_xlim(50, -50)
axa6.set_ylim(-50, 50)
plt.subplot(4,4,8)
plt.title("Blurred : Zoomed and Inside")
axa7 = plt.gca()
axx7 = axa7.xaxis
axy7 = axa7.yaxis
axa7.set_facecolor(col1(300))
axx7.set_visible(False)
axy7.set_visible(False)
rotate90_circle = np.matmul(circle,X)
rotate90_big_circle = rotate90_circle
rotate90_polygon = pt.Circle(rotate90_circle, 0.1, color = col(0), alpha = 0.7)
rotate90_big_polygon = pt.Circle(rotate90_big_circle, 10, color = col(100), alpha = 0.7)
axa7.add_patch(rotate90_polygon)
axa7.add_patch(rotate90_big_polygon)
axa7.set_xlim(50, -50)
axa7.set_ylim(-50, 50)
plt.subplot(4,4,9)
plt.title("NoFill : Scaled and Outside")
axa8 = plt.gca()
axx8 = axa8.xaxis
axy8 = axa8.yaxis
axa8.set_facecolor(col2(0))
axx8.set_visible(False)
axy8.set_visible(False)
rotate180_circle = np.matmul(rotate90_circle,X)
rotate180_big_circle = rotate180_circle
rotate180_polygon = pt.Circle(rotate180_circle + np.array([-10, -10]), 0.1, color = col2(100), fill = None)
rotate180_big_polygon = pt.Circle(rotate180_big_circle, 10, color = col2(200), fill = None)
axa8.add_patch(rotate180_polygon)
axa8.add_patch(rotate180_big_polygon)
axa8.set_xlim(-50, 0)
axa8.set_ylim(-50, 0)
plt.subplot(4,4,10)
plt.title("NoFill : Scaled and Inside")
axa9 = plt.gca()
axx9 = axa9.xaxis
axy9 = axa9.yaxis
axa9.set_facecolor(col2(100))
axx9.set_visible(False)
axy9.set_visible(False)
rotate180_circle = np.matmul(rotate90_circle,X)
rotate180_big_circle = rotate180_circle
rotate180_polygon = pt.Circle(rotate180_circle, 0.1, color = col2(200), fill = None)
rotate180_big_polygon = pt.Circle(rotate180_big_circle, 10, color = col2(300), fill = None)
axa9.add_patch(rotate180_polygon)
axa9.add_patch(rotate180_big_polygon)
axa9.set_xlim(-50, 0)
axa9.set_ylim(-50, 0)
plt.subplot(4,4,11)
plt.title("NoFill : Zoomed and Outside")
axa10 = plt.gca()
axx10 = axa10.xaxis
axy10 = axa10.yaxis
axa10.set_facecolor(col2(200))
axx10.set_visible(False)
axy10.set_visible(False)
rotate180_circle = np.matmul(rotate90_circle,X)
rotate180_big_circle = rotate180_circle
rotate180_polygon = pt.Circle(rotate180_circle + np.array([-10, -10]), 0.1, color = col2(300), fill = None)
rotate180_big_polygon = pt.Circle(rotate180_big_circle, 10, color = col2(0), fill = None)
axa10.add_patch(rotate180_polygon)
axa10.add_patch(rotate180_big_polygon)
axa10.set_xlim(50, -50)
axa10.set_ylim(-50, 50)
plt.subplot(4,4,12)
plt.title("NoFill : Zoomed and Inside")
axa11 = plt.gca()
axx11 = axa11.xaxis
axy11 = axa11.yaxis
axa11.set_facecolor(col2(300))
axx11.set_visible(False)
axy11.set_visible(False)
rotate180_circle = np.matmul(rotate90_circle,X)
rotate180_big_circle = rotate180_circle
rotate180_polygon = pt.Circle(rotate180_circle, 0.1, color = col2(0), fill = None)
rotate180_big_polygon = pt.Circle(rotate180_big_circle, 10, color = col2(100), fill = None)
axa11.add_patch(rotate180_polygon)
axa11.add_patch(rotate180_big_polygon)
axa11.set_xlim(50, -50)
axa11.set_ylim(-50, 50)
plt.subplot(4,4,13)
plt.title("Dashed : Scaled and Outside")
axa12 = plt.gca()
axx12 = axa12.xaxis
axy12 = axa12.yaxis
axa12.set_facecolor(col3(300))
axx12.set_visible(False)
axy12.set_visible(False)
rotate270_circle = np.matmul(rotate180_circle,X)
rotate270_big_circle = rotate270_circle
rotate270_polygon = pt.Circle(rotate270_circle + np.array([-10, -10]), 0.1, color = col(0), fill = None, linestyle = "-.")
rotate270_big_polygon = pt.Circle(rotate270_big_circle, 10, color = col(100), fill = None, linestyle = "-.")
axa12.add_patch(rotate270_polygon)
axa12.add_patch(rotate270_big_polygon)
axa12.set_xlim(-50, 0)
axa12.set_ylim(0, 50)
plt.subplot(4,4,14)
plt.title("Dashed : Scaled and Inside")
axa13 = plt.gca()
axx13 = axa13.xaxis
axy13 = axa13.yaxis
axa13.set_facecolor(col3(200))
axx13.set_visible(False)
axy13.set_visible(False)
rotate270_circle = np.matmul(rotate180_circle,X)
rotate270_big_circle = rotate270_circle
rotate270_polygon = pt.Circle(rotate270_circle, 0.1, color = col3(300), fill = None, linestyle = "-.")
rotate270_big_polygon = pt.Circle(rotate270_big_circle, 10, color = col3(0), fill = None, linestyle = "-.")
axa13.add_patch(rotate270_polygon)
axa13.add_patch(rotate270_big_polygon)
axa13.set_xlim(-50, 0)
axa13.set_ylim(0, 50)
plt.subplot(4,4,15)
plt.title("Dashed : Zoomed and Outside")
axa14 = plt.gca()
axx14 = axa14.xaxis
axy14 = axa14.yaxis
axa14.set_facecolor(col3(100))
axx14.set_visible(False)
axy14.set_visible(False)
rotate270_circle = np.matmul(rotate180_circle,X)
rotate270_big_circle = rotate270_circle
rotate270_polygon = pt.Circle(rotate270_circle + np.array([-10, -10]), 0.1, color = col3(200), fill = None, linestyle = "-.")
rotate270_big_polygon = pt.Circle(rotate270_big_circle, 10, color = col3(300), fill = None, linestyle = "-.")
axa14.add_patch(rotate270_polygon)
axa14.add_patch(rotate270_big_polygon)
axa14.set_xlim(-50, 50)
axa14.set_ylim(50, -50)
plt.subplot(4,4,16)
plt.title("Dashed : Zoomed and Inside")
axa15 = plt.gca()
axx15 = axa15.xaxis
axy15 = axa15.yaxis
axa15.set_facecolor(col3(0))
axx15.set_visible(False)
axy15.set_visible(False)
rotate270_circle = np.matmul(rotate180_circle,X)
rotate270_big_circle = rotate270_circle
rotate270_polygon = pt.Circle(rotate270_circle, 0.1, color = col3(100), fill = None, linestyle = "-.")
rotate270_big_polygon = pt.Circle(rotate270_big_circle, 10, color = col3(200), fill = None, linestyle = "-.")
axa15.add_patch(rotate270_polygon)
axa15.add_patch(rotate270_big_polygon)
axa15.set_xlim(-50, 50)
axa15.set_ylim(50, -50)
plt.show()
###Output
_____no_output_____ |
01 python/lect 3 materials/2020_DPO_3_0_strings_methods_problems.ipynb | ###Markdown
Центр непрерывного образования Программа «Python для автоматизации и анализа данных» Методы строк (задачки)Неделя 2 - 2*Автор: Татьяна Рогович, НИУ ВШЭ*\*Дополнения: Ян Пиле, Яндекс.Маркет* Задача: цена товараЦена товара обозначена в рублях с точностью до копеек, то есть действительным числом с двумя цифрами после десятичной точки. Запишите в две целочисленные переменные стоимость товара в виде целого числа рублей и целого числа копеек и выведите их на экран. При решении этой задачи нельзя пользоваться условными инструкциями и циклами.**Формат ввода**Вводится неотрицательное действительное число.**Формат вывода**Выведите ответ на задачу.**Примеры** **Тест 1** Входные данные: 10.35 Вывод программы: 10 35 **Тест 2** Входные данные: 1.99 Вывод программы: 1 99 **Тест 3** Входные данные: 3.50 Вывод программы: 3 50
###Code
price = input()
print(' '.join(price.split('.')))
###Output
10.33
###Markdown
Задача: переставить словаДана строка, состоящая ровно из двух слов, разделенных пробелом. Переставьте эти слова местами. Результат запишите в строку и выведите получившуюся строку. При решении этой задачи нельзя пользоваться циклами и инструкцией if.**Формат ввода**Вводится строка.**Формат вывода**Выведите ответ на задачу.**Примеры** **Тест 1** Входные данные: Hello, world!Вывод программы: world! Hello,**Тест 2** Входные данные: A BВывод программы: B A**Тест 3** Входные данные: Q WERRTYUIOPВывод программы: WERRTYUIOP Q
###Code
words = input()
(a, b) = words.split(' ')
print(b, a)
###Output
WERRTYUIOP Q
###Markdown
Задача: сложные процентыПроцентная ставка по вкладу составляет P процентов годовых, которые прибавляются к сумме вклада через год. Вклад составляет X рублей Y копеек. Определите размер вклада через K лет.**Формат ввода**Программа получает на вход целые числа P, X, Y, K.**Формат вывода**Программа должна вывести два числа: величину вклада через K лет в рублях и копейках. Дробное число копеек по истечение года отбрасывается. Перерасчет суммы вклада (с отбрасыванием дробных частей копеек) происходит ежегодно.**Примеры** **Тест 1** Входные данные: 12 179 0 5 Вывод программы: 315 43**Тест 2** Входные данные: 13 179 0 100 Вывод программы:36360285 50**Тест 3** Входные данные: 1 1 0 1000 Вывод программы: 11881 92
###Code
deposit = input()
(P, X, Y, K) = deposit.strip().split()
pv = int(X) + int(Y)/100
K = int(K)
P = int(P)
r = P/100
i = 1
while i <=K:
pv = pv*100 + r*pv*100
pv = int(pv)/100
i += 1
print(int(pv//1), int(pv*100%100))
# r = 1+P/100
# i = 1
# pv = int(X) + int(Y)/100
# while i <=K:
# pv = r*pv*100
# pv = int(pv)/100
# i += 1
# print(pv)
###Output
1 1 0 1000
11881 92
###Markdown
второе решение в комментах не дает точный ответ потому что округление:
###Code
from decimal import *
a = int(5000)
fv = a * 1.13
fv2 = a + a * 0.13
print(fv == fv2)
print(fv)
print(fv2)
print('разница расчета процентных ставок от 1,13 и 0,13 в float \n')
getcontext().prec = 6
fv3 = Decimal(a) * Decimal(1.13)
fv4 = Decimal(a) + Decimal(a) * Decimal(0.13)
print(fv3 == fv4)
print(fv3)
print(fv4)
print('будет работать если считать в Decimal')
###Output
False
5649.999999999999
5650.0
разница расчета процентных ставок от 1,13 и 0,13 в float
True
5650.00
5650.00
будет работать если считать в Decimal
###Markdown
Задача: палиндромВводится слово. Нужно проверить, является ли слово палиндромом. Если да - программа выводит соответствующее уведомление.**Тест 1** **Входные данные** шалаш **Вывод** Палиндром**Тест 2** **Входные данные** собака **Вывод** Не палиндром
###Code
text = input()
if text == text[::-1]:
print('Палиндром')
else:
print('Не палиндром')
###Output
собака
Не палиндром
###Markdown
Задача: удаление фрагментаДана строка, в которой буква h встречается минимум два раза. Удалите из этой строки первое и последнее вхождение буквы h,а также все символы, находящиеся между ними.**Формат ввода**Вводится строка.**Формат вывода**Выведите ответ на задачу.**Примеры****Тест 1** Входные данные: In the hole in the ground there lived a hobbitВывод программы: In tobbit**Тест 2** Входные данные: qwertyhasdfghzxcvbВывод программы: qwertyzxcvb**Тест 3** Входные данные: asdfghhzxcvbВывод программы: asdfgzxcvb
###Code
s = input('string')
print(s[:s.find('h')] + s[s.rfind('h') + 1:])
###Output
stringqwertyhasdfghzxcvb
qwertyzxcvb
###Markdown
Задача: второе вхождениеДана строка. Найдите в этой строке второе вхождение буквы f и выведите индекс этого вхождения. Если буква f в данной строке встречается только один раз, выведите число -1, а если не встречается ни разу, выведите число -2. При решении этой задачи нельзя использовать метод count.**Формат ввода**Вводится строка.**Формат вывода**Выведите ответ на задачу.**Примеры****Тест 1** Входные данные: comfort Вывод программы: -1**Тест 2** Входные данные: coffee Вывод программы: 3**Тест 3** Входные данные: qwerty Вывод программы: -2
###Code
import re
s = input()
def f_counter(st):
pos = 0
n_f = 0
for i in s:
if i == 'f':
n_f += 1
if n_f == 2:
return pos
pos += 1
if n_f == 1:
return -1
else:
return -2
print(f_counter(s))
###Output
coffee
3
###Markdown
Задача: вставка символовДана строка. Получите новую строку, вставив между каждыми двумя символами исходной строки символ !. Выведите полученную строку.**Формат ввода**Вводится строка.**Формат вывода**Выведите ответ на задачу.**Примеры** **Тест 1** Входные данные: PythonВывод программы: P!y!t!h!o!n **Тест 2** Входные данные: HelloВывод программы: H!e!l!l!o**Тест 3** Входные данные: AВывод программы: A
###Code
s = input()
res = [s[0]]
for rems in s[1:]:
res.append('!')
res.append(rems)
print(''.join(res))
###Output
Python
P!y!t!h!o!n
###Markdown
Задача: найти длину самого длинного словаДана строка, состоящая из слов, записанных через пробел. Найдите и выведите длину самого длинного слова. Слово - непрерывная последовательность непробельных символов (^%^4fhjkdslgds - слово)**Формат ввода**Вводится строка из слов, записанных через пробел.**Формат вывода**Выведите ответ на задачу.**Примеры** **Тест 1** Входные данные: Python C++ Perl Ruby BrainfuckВывод программы: 9**Тест 2** Входные данные: HelloВывод программы: 6**Тест 3** Входные данные: AВывод программы: 1
###Code
s = input()
max_l = 1
for i in s.split():
if max_l < len(i):
max_l = len(i)
print(max_l)
###Output
hello
5
###Markdown
Задача: найти количество вхождений подстроки в строку в любом регистреДаны две строки. Нвйти количество вхождений второй строки во первую в любом из регистров.**Формат ввода**Вводятся две строки**Формат вывода**Выводится число вхождений.**Примеры** **Тест 1** Входные данные: Строка 1: Python C++ Perl Ruby Brainfuck \Строка 2: PВывод программы: 2**Тест 2** Входные данные: Строка 1: Hello \Строка 2: H Вывод программы: 1**Тест 3** Входные данные: Строка 1: A \Строка 2: B \Вывод программы: 1
###Code
import re
s1 = "Python C++ Perl Ruby Brainfuck"
s2 = "P"
s1 = s1.lower()
s2 = s2.lower()
print(len(re.findall(s2, s1)))
###Output
2
|
module01-introduction.to.python.programming.ipynb | ###Markdown
DCL-160: Python ProgrammingModule 01December 08, 2020Binnur Kurt
###Code
import this
name = "Jack"
x = None
type(x)
type(name)
y = "\u20ba"
y
z = 3615.73
type(z)
u = 42
type(u)
v = True
type(v)
type("Jack Shephard")
type(None)
type(u * u + 2.45)
v = 2**16
v
type(v)
type( (0.1+0.2)+0.3 == 0.1+(0.2+0.3))
w = (0.1+0.2)+0.3 == 0.1+(0.2+0.3)
w
w = 0.1 + 0.2
w
w = 0.1 + ( 0.2 + 0.3 )
w
w = ( 0.1 + 0.2 ) + 0.3
w == 0.6
100 * 4.35
2.0 - 1.1
# CPU (8-10), GPU (1024-2048-4096), TPU(Tensor Processing Unit) -> IEEE-756 v5
i = 2
j = i ** 100000
j
3615 * 4629
3615.0 * 4629.0
#TensorRT -> Model -> Integer Model x100
3 / 2
4 / 2
3 // 2
3 % 2
2 ** 3
m = 2 / 0
m = float('-inf')
m
n = float('nan')
n
n * 2
m = float('nan')
m
n
m == n
x = 42
x == x
x = float('nan')
x == x
u = "123.45"
type(u)
f = float(u)
f
g = int(f)
g
bool(g)
bool(0)
bool(-1)
bool(+1)
h = None
h is None
h = 42
h is None
h is not None
def add_and_multiple(a,b,c=None):
result = a + b
if c is not None:
result = result * c
return result
x=10
y=20
add_and_multiple(x,y)
add_and_multiple(x,y,2)
c1 = complex(2,3) # 2 + 3i
type(c1)
c2 = complex(3,2) # 3 + 2i
c1
c2
c1 * c2
c1.conjugate()
c1 * c1.conjugate()
c1.conjugate().real
c1.conjugate().imag
fullname = "jack bauer"
fullname
fullname = 'jack bauer'
fullname
fulltext = """this is first line.
this is the second line.
this is the third line.
"""
fulltext= "this is first line.\nthis is second line.\nthis is the third line."
print(fulltext)
print("""This is the firstline.
This is the second line.
This is the third line.
""")
print("This" "is" "the" "line")
print("This","is","the","line")
print("first name: \"%s\", last name: \"%s\"\nsalary: %8.2f"
% ("Jack","Bauer", 123456.789))
numbers = [4,8,15,16,23,42]
print("Lottery numbers:\t\\%s\\" % numbers)
from math import pow, sqrt, exp, expm1
expm1(1e-5)
pow(10,2.3)
sqrt(3615.77)
import math
math.dist((2,8,1),(-1,-7,8))
math.degrees(3.1415)
math.radians(180)
math.acosh(3.14)
math.erf(0.9)
math.e
math.pi
math.inf
math.nan
4 * math.atan(1)
math.factorial(20)
math.gcd(3,5)
math.fsum([1.1,2.2,3.3,4.4,5.5,6.6,7.7,8.8,9.9])
math.fsum([0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1])
sum([0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1])
max([0.9,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1])
min([0.9,0.1,0.05,0.1,0.1,0.1,0.1,0.1,0.1,0.1])
from mpmath import *
x = 0
y = 0
z = x / y
z = math.nan
z = float('-inf')
z
z = z + 1
z
z = math.nan
z += 1
z = z + 1
import this
z
!pip3 install mpmath > requirements.txt
from mpmath import *
mp.dps = 10
mp.pretty = True
4*atan(1)
mp.dps = 100
mp.pretty = True
4*atan(1)
limit(lambda n: (1+1/n)**n, inf)
###Output
_____no_output_____ |
titanic-disaster-prediction/Titanic.ipynb | ###Markdown
Bootstrap environment
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import Imputer
from sklearn import metrics
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.neural_network import MLPClassifier
###Output
_____no_output_____
###Markdown
Training data Read training data
###Code
df = pd.read_csv('./data/train.csv')
df_copy = df.copy()
###Output
_____no_output_____
###Markdown
Visualization
###Code
df_plot = df.copy()
df.head()
sns.factorplot('Sex', data = df_plot, kind = 'count', hue='Survived')
sns.factorplot('Pclass', data = df_plot, kind = 'count', hue='Survived')
sns.factorplot('SibSp', data = df_plot, kind = 'count', hue='Survived')
sns.factorplot('Parch', data = df_plot, kind = 'count', hue='Survived')
sns.factorplot('Age', data = df_plot, kind = 'count', hue='Survived')
###Output
_____no_output_____
###Markdown
Data cleaning
###Code
df.head(0)
del df['PassengerId']
del df['Name']
del df['Ticket']
del df['Cabin']
del df['Fare']
df.head(0)
df[['SibSp', 'Parch']] = df[['SibSp', 'Parch']].fillna(value=0)
df['Age'] = df['Age'].fillna(df['Age'].mean())
df = df.dropna()
df.isnull().values.any()
df['Sex'] = df['Sex'].map({'male': 1, 'female': 2})
df['Embarked'] = df['Embarked'].map({'S': 0, 'C': 1, 'Q': 2})
df['Embarked'] = df['Embarked'].astype(int)
###Output
_____no_output_____
###Markdown
Feature engineering SibSp and Parch
###Code
df['SibSp'].unique()
df['SibSp'] = pd.cut(df['SibSp'], [-1,0,200], labels=[0, 1])
df['Parch'].unique()
df['Parch'] = pd.cut(df['Parch'], [-1,0,200], labels=[0, 1])
###Output
_____no_output_____
###Markdown
Age
###Code
df['Age'].min()
df['Age'].max()
df['Age'] = pd.cut(df['Age'], [-1,20,200], labels=[0, 1])
###Output
_____no_output_____
###Markdown
Test data Read test data
###Code
df_t = pd.read_csv('./data/test.csv')
df_t_copy = df_t.copy()
###Output
_____no_output_____
###Markdown
Data cleaning
###Code
del df_t['PassengerId']
del df_t['Name']
del df_t['Ticket']
del df_t['Cabin']
del df_t['Fare']
df_t['SibSp'] = pd.cut(df_t['SibSp'], [-1,0,200], labels=[0, 1])
df_t['Parch'] = pd.cut(df_t['Parch'], [-1,0,200], labels=[0, 1])
df_t['Sex'] = df_t['Sex'].map({'male': 1, 'female': 2})
df_t['Age'] = df_t['Age'].fillna(df_t['Age'].mean())
###Output
_____no_output_____
###Markdown
Feature engineering
###Code
df_t['Age'] = pd.cut(df_t['Age'], [-1,20,200], labels=[0, 1])
df_t['Embarked'] = df_t['Embarked'].map({'S': 0, 'C': 1, 'Q': 2}).astype(int)
###Output
_____no_output_____
###Markdown
Prediction
###Code
x = df[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Embarked']].values
y = df['Survived'].values
x_t = df_t.values
###Output
_____no_output_____
###Markdown
Support Vector Machine
###Code
clf = SVC(kernel='linear')
clf.fit(x, y)
df_svm = df_t_copy[['PassengerId']].copy()
df_svm['Survived'] = clf.predict(x_t)
df_svm.to_csv('./results/result-SVM.csv', index=False)
###Output
_____no_output_____
###Markdown
Random forest
###Code
clf = RandomForestClassifier(n_jobs=2, random_state=0)
clf.fit(x, y)
df_rf = df_t_copy[['PassengerId']].copy()
df_rf['Survived'] = clf.predict(x_t)
df_rf.to_csv('./results/result-Random-Forest.csv', index=False)
###Output
_____no_output_____
###Markdown
Neural network (MLP)
###Code
clf = MLPClassifier(hidden_layer_sizes=(13,13,13), max_iter=500)
clf.fit(x, y)
df_mlp = df_t_copy[['PassengerId']].copy()
df_mlp['Survived'] = clf.predict(x_t)
df_mlp.to_csv('./results/result-MLP.csv', index=False)
###Output
_____no_output_____ |
Python_Stock/Time_Series_Forecasting/Stock_Forecasting_Pastas_Basic_Model.ipynb | ###Markdown
Stock Forecasting using Pastas (Basic Models) https://pastas.readthedocs.io/en/latest/index.html https://github.com/pastas/pastas
###Code
# Libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pastas as ps
import yfinance as yf
yf.pdr_override()
ps.show_versions()
stock = 'AMD' # input
start = '2021-01-01' # input
end = '2021-11-23' # input
df = yf.download(stock, start, end)
df.head()
df.tail()
plt.figure(figsize=(16,8))
plt.plot(df['Adj Close'])
plt.title('Stock Price')
plt.ylabel('Price')
plt.show()
df['Middle'] = df['High'] - df['Low']
df['Returns'] = df['Adj Close'].pct_change()
df = df.rename(columns={'Adj Close':'AdjClose'})
df = df.dropna()
df.head()
plt.figure(figsize=(16,8))
plt.plot(df['Returns'])
plt.title('Stock Returns')
plt.ylabel('Price')
plt.show()
df.columns.values
dfr = df['Returns'].resample('D').mean().interpolate('linear')
dfv = df['Volume'].resample('D').mean().interpolate('linear')
dfh = df['High'].resample('D').mean().interpolate('linear')
dfl = df['Low'].resample('D').mean().interpolate('linear')
df2 = df['AdjClose'].resample('D').mean().interpolate('linear')
ml = ps.Model(df2)
# Add the recharge data as explanatory variable
sm = ps.StressModel(dfr, ps.Gamma, name='Returns', settings="evap")
ml.add_stressmodel(sm)
ml.solve()
ml.plot()
ml.plots.results(figsize=(10, 6))
ml.stats.summary()
# Create a model object by passing it the observed series
ml2 = ps.Model(df2)
# Add the recharge data as explanatory variable
ts1 = ps.RechargeModel(dfh, dfl, ps.Gamma, name='AdjClose',
recharge=ps.rch.Linear(), settings=("prec", "evap"))
ml2.add_stressmodel(ts1)
# Solve the model
ml2.solve()
# Plot the results
ml2.plot()
# Statistics
ml2.stats.summary()
###Output
INFO: Inferred frequency for time series AdjClose: freq=D
INFO: Inferred frequency for time series High: freq=D
INFO: Inferred frequency for time series Low: freq=D
INFO: Time Series High was extended to 2011-01-08 00:00:00 with the mean value of the time series.
INFO: Time Series Low was extended to 2011-01-08 00:00:00 with the mean value of the time series.
C:\Users\Tin Hang\Anaconda3\lib\site-packages\pastas\model.py:1658: RuntimeWarning: invalid value encountered in log10
atol = np.min([1e-8, 10**(np.round(np.log10(pmin)) - 1)])
|
.ipynb_checkpoints/Landuse_classification-Copy14-checkpoint.ipynb | ###Markdown
Define working environment The following cells are used to: - Import needed libraries- Set the environment variables for Python, Anaconda, GRASS GIS and R statistical computing - Define the ["GRASSDATA" folder](https://grass.osgeo.org/grass73/manuals/helptext.html), the name of "location" and "mapset" where you will to work. **Import libraries**
###Code
## Import libraries needed for setting parameters of operating system
import os
import sys
###Output
_____no_output_____
###Markdown
Environment variables when working on Linux Mint **Set 'Python' and 'GRASS GIS' environment variables** Here, we set [the environment variables allowing to use of GRASS GIS](https://grass.osgeo.org/grass64/manuals/variables.html) inside this Jupyter notebook. Please change the directory path according to your own system configuration.
###Code
### Define GRASS GIS environment variables for LINUX UBUNTU Mint 18.1 (Serena)
# Check is environmental variables exists and create them (empty) if not exists.
if not 'PYTHONPATH' in os.environ:
os.environ['PYTHONPATH']=''
if not 'LD_LIBRARY_PATH' in os.environ:
os.environ['LD_LIBRARY_PATH']=''
# Set environmental variables
os.environ['GISBASE'] = '/home/tais/SRC/GRASS/grass_trunk/dist.x86_64-pc-linux-gnu'
os.environ['PATH'] += os.pathsep + os.path.join(os.environ['GISBASE'],'bin')
os.environ['PATH'] += os.pathsep + os.path.join(os.environ['GISBASE'],'script')
os.environ['PATH'] += os.pathsep + os.path.join(os.environ['GISBASE'],'lib')
#os.environ['PATH'] += os.pathsep + os.path.join(os.environ['GISBASE'],'etc','python')
os.environ['PYTHONPATH'] += os.pathsep + os.path.join(os.environ['GISBASE'],'etc','python')
os.environ['PYTHONPATH'] += os.pathsep + os.path.join(os.environ['GISBASE'],'etc','python','grass')
os.environ['PYTHONPATH'] += os.pathsep + os.path.join(os.environ['GISBASE'],'etc','python','grass','script')
os.environ['PYTHONLIB'] = '/usr/lib/python2.7'
os.environ['LD_LIBRARY_PATH'] += os.pathsep + os.path.join(os.environ['GISBASE'],'lib')
os.environ['GIS_LOCK'] = '$$'
os.environ['GISRC'] = os.path.join(os.environ['HOME'],'.grass7','rc')
os.environ['PATH'] += os.pathsep + os.path.join(os.environ['HOME'],'.grass7','addons')
os.environ['PATH'] += os.pathsep + os.path.join(os.environ['HOME'],'.grass7','addons','bin')
os.environ['PATH'] += os.pathsep + os.path.join(os.environ['HOME'],'.grass7','addons')
os.environ['PATH'] += os.pathsep + os.path.join(os.environ['HOME'],'.grass7','addons','scripts')
## Define GRASS-Python environment
sys.path.append(os.path.join(os.environ['GISBASE'],'etc','python'))
###Output
_____no_output_____
###Markdown
**Import GRASS Python packages**
###Code
## Import libraries needed to launch GRASS GIS in the jupyter notebook
import grass.script.setup as gsetup
## Import libraries needed to call GRASS using Python
import grass.script as gscript
from grass.script import core as grass
###Output
_____no_output_____
###Markdown
**-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-** **Display current environment variables of your computer**
###Code
## Display the current defined environment variables
for key in os.environ.keys():
print "%s = %s \t" % (key,os.environ[key])
###Output
MDMSESSION = mate
MANDATORY_PATH = /usr/share/gconf/mate.mandatory.path
MATE_DESKTOP_SESSION_ID = this-is-deprecated
LESSOPEN = | /usr/bin/lesspipe %s
MDM_LANG = fr_BE.UTF-8
LOGNAME = tais
USER = tais
HOME = /home/tais
XDG_VTNR = 9
PATH = /usr/local/bin:/home/tais/BIN:/home/tais/bin:/home/tais/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/home/tais/SRC/GRASS/grass_trunk/dist.x86_64-pc-linux-gnu/bin:/home/tais/SRC/GRASS/grass_trunk/dist.x86_64-pc-linux-gnu/script:/home/tais/SRC/GRASS/grass_trunk/dist.x86_64-pc-linux-gnu/lib:/home/tais/.grass7/addons:/home/tais/.grass7/addons/bin:/home/tais/.grass7/addons:/home/tais/.grass7/addons/scripts
CLICOLOR = 1
DISPLAY = :0.0
SSH_AGENT_PID = 5974
LANG = fr_BE.UTF-8
TERM = xterm-color
SHELL = /bin/bash
GIS_LOCK = $$
XAUTHORITY = /home/tais/.Xauthority
SESSION_MANAGER = local/tais-HP-Z620-Workstation:@/tmp/.ICE-unix/5837,unix/tais-HP-Z620-Workstation:/tmp/.ICE-unix/5837
SHLVL = 1
QT_LINUX_ACCESSIBILITY_ALWAYS_ON = 1
INSIDE_CAJA_PYTHON =
QT_ACCESSIBILITY = 1
LD_LIBRARY_PATH = :/home/tais/SRC/GRASS/grass_trunk/dist.x86_64-pc-linux-gnu/lib
COMPIZ_CONFIG_PROFILE = mate
WINDOWPATH = 9
GTK_OVERLAY_SCROLLING = 0
PYTHONPATH = :/home/tais/SRC/GRASS/grass_trunk/dist.x86_64-pc-linux-gnu/etc/python:/home/tais/SRC/GRASS/grass_trunk/dist.x86_64-pc-linux-gnu/etc/python/grass:/home/tais/SRC/GRASS/grass_trunk/dist.x86_64-pc-linux-gnu/etc/python/grass/script
GISBASE = /home/tais/SRC/GRASS/grass_trunk/dist.x86_64-pc-linux-gnu
CLUTTER_BACKEND = x11
USERNAME = tais
XDG_SESSION_DESKTOP = mate
GDM_XSERVER_LOCATION = local
XDG_RUNTIME_DIR = /run/user/1000
JPY_PARENT_PID = 28049
QT_STYLE_OVERRIDE = gtk
SSH_AUTH_SOCK = /run/user/1000/keyring/ssh
VTE_VERSION = 4205
GDMSESSION = mate
GISRC = /home/tais/.grass7/rc
GIT_PAGER = cat
XDG_CONFIG_DIRS = /etc/xdg/xdg-mate:/etc/xdg
XDG_CURRENT_DESKTOP = MATE
XDG_SESSION_ID = c21
DBUS_SESSION_BUS_ADDRESS = unix:abstract=/tmp/dbus-oiw1S789SI,guid=e626cdc47bce079de737e4fe5a3fcda7
_ = /usr/local/bin/jupyter
XDG_SESSION_COOKIE = 8441891e86e24d76b9616edf516d5734-1514130855.90561-444848216
DESKTOP_SESSION = mate
WINDOWID = 88080563
LESSCLOSE = /usr/bin/lesspipe %s %s
DEFAULTS_PATH = /usr/share/gconf/mate.default.path
MPLBACKEND = module://ipykernel.pylab.backend_inline
MDM_XSERVER_LOCATION = local
GTK_MODULES = gail:atk-bridge
XDG_DATA_DIRS = /usr/share/mate:/usr/local/share/:/usr/share/:/usr/share/mdm/
PWD = /media/tais/data/Dropbox/ULB/MAUPP/Traitements/Landscape_metrics/r.li
COLORTERM = mate-terminal
PYTHONLIB = /usr/lib/python2.7
LS_COLORS = rs=0:di=01;34:ln=01;36:mh=00:pi=40;33:so=01;35:do=01;35:bd=40;33;01:cd=40;33;01:or=40;31;01:mi=00:su=37;41:sg=30;43:ca=30;41:tw=30;42:ow=34;42:st=37;44:ex=01;32:*.tar=01;31:*.tgz=01;31:*.arc=01;31:*.arj=01;31:*.taz=01;31:*.lha=01;31:*.lz4=01;31:*.lzh=01;31:*.lzma=01;31:*.tlz=01;31:*.txz=01;31:*.tzo=01;31:*.t7z=01;31:*.zip=01;31:*.z=01;31:*.Z=01;31:*.dz=01;31:*.gz=01;31:*.lrz=01;31:*.lz=01;31:*.lzo=01;31:*.xz=01;31:*.bz2=01;31:*.bz=01;31:*.tbz=01;31:*.tbz2=01;31:*.tz=01;31:*.deb=01;31:*.rpm=01;31:*.jar=01;31:*.war=01;31:*.ear=01;31:*.sar=01;31:*.rar=01;31:*.alz=01;31:*.ace=01;31:*.zoo=01;31:*.cpio=01;31:*.7z=01;31:*.rz=01;31:*.cab=01;31:*.jpg=01;35:*.jpeg=01;35:*.gif=01;35:*.bmp=01;35:*.pbm=01;35:*.pgm=01;35:*.ppm=01;35:*.tga=01;35:*.xbm=01;35:*.xpm=01;35:*.tif=01;35:*.tiff=01;35:*.png=01;35:*.svg=01;35:*.svgz=01;35:*.mng=01;35:*.pcx=01;35:*.mov=01;35:*.mpg=01;35:*.mpeg=01;35:*.m2v=01;35:*.mkv=01;35:*.webm=01;35:*.ogm=01;35:*.mp4=01;35:*.m4v=01;35:*.mp4v=01;35:*.vob=01;35:*.qt=01;35:*.nuv=01;35:*.wmv=01;35:*.asf=01;35:*.rm=01;35:*.rmvb=01;35:*.flc=01;35:*.avi=01;35:*.fli=01;35:*.flv=01;35:*.gl=01;35:*.dl=01;35:*.xcf=01;35:*.xwd=01;35:*.yuv=01;35:*.cgm=01;35:*.emf=01;35:*.ogv=01;35:*.ogx=01;35:*.aac=00;36:*.au=00;36:*.flac=00;36:*.m4a=00;36:*.mid=00;36:*.midi=00;36:*.mka=00;36:*.mp3=00;36:*.mpc=00;36:*.ogg=00;36:*.ra=00;36:*.wav=00;36:*.oga=00;36:*.opus=00;36:*.spx=00;36:*.xspf=00;36:
PAGER = cat
XDG_SEAT = seat0
###Markdown
**-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-** Define functions This section of the notebook is dedicated to defining functions which will then be called later in the script. If you want to create your own functions, define them here. Function for computing processing time The "print_processing_time" is used to calculate and display the processing time for various stages of the processing chain. At the beginning of each major step, the current time is stored in a new variable, using [time.time() function](https://docs.python.org/2/library/time.html). At the end of the stage in question, the "print_processing_time" function is called and takes as argument the name of this new variable containing the recorded time at the beginning of the stage, and an output message.
###Code
## Import library for managing time in python
import time
## Function "print_processing_time()" compute processing time and printing it.
# The argument "begintime" wait for a variable containing the begintime (result of time.time()) of the process for which to compute processing time.
# The argument "printmessage" wait for a string format with information about the process.
def print_processing_time(begintime, printmessage):
endtime=time.time()
processtime=endtime-begintime
remainingtime=processtime
days=int((remainingtime)/86400)
remainingtime-=(days*86400)
hours=int((remainingtime)/3600)
remainingtime-=(hours*3600)
minutes=int((remainingtime)/60)
remainingtime-=(minutes*60)
seconds=round((remainingtime)%60,1)
if processtime<60:
finalprintmessage=str(printmessage)+str(seconds)+" seconds"
elif processtime<3600:
finalprintmessage=str(printmessage)+str(minutes)+" minutes and "+str(seconds)+" seconds"
elif processtime<86400:
finalprintmessage=str(printmessage)+str(hours)+" hours and "+str(minutes)+" minutes and "+str(seconds)+" seconds"
elif processtime>=86400:
finalprintmessage=str(printmessage)+str(days)+" days, "+str(hours)+" hours and "+str(minutes)+" minutes and "+str(seconds)+" seconds"
return finalprintmessage
###Output
_____no_output_____
###Markdown
Function for creation of configuration file for r.li (landscape units provided as polygons) (multiprocessed)
###Code
##### Function that create the r.li configuration file for a list of landcover raster.
### It enable to create in one function as many configuration file as the number of raster provided in 'listoflandcoverraster'.
### It could be use only in case study with a several landcover raster and only one landscape unit layer.
### So, the landscape unit layer if fixed and there are the landcover raster which change.
# 'listoflandcoverraster' wait for a list with the name (string) of landcover rasters.
# 'landscape_polygons' wait for the name (string) of the vector layer containing the polygons to be used as landscape units.
# 'masklayerhardcopy' wait for a boolean value (True/False) depending if the user want to create hard copy of the landscape units mask layers or not.
# 'returnlistpath' wait for a boolean value (True/False) according to the fact that a list containing the path to the configuration files is desired.
# 'ncores' wait for a integer corresponding to the number of desired cores to be used for parallelization.
# Import libraries for multiprocessing
import multiprocessing
from multiprocessing import Pool
from functools import partial
# Function that copy the landscape unit raster masks on a new layer with name corresponding to the current 'landcover_raster'
def copy_landscapeunitmasks(current_landcover_raster,base_landcover_raster,landscape_polygons,landscapeunit_bbox,cat):
### Copy the landscape units mask for the current 'cat'
# Define the name of the current "current_landscapeunit_rast" layer
current_landscapeunit_rast=current_landcover_raster.split("@")[0]+"_"+landscape_polygons.split("@")[0]+"_"+str(cat)
base_landscapeunit_rast=base_landcover_raster.split("@")[0]+"_"+landscape_polygons.split("@")[0]+"_"+str(cat)
# Copy the the landscape unit created for the first landcover map in order to match the name of the current landcover map
gscript.run_command('g.copy', overwrite=True, quiet=True, raster=(base_landscapeunit_rast,current_landscapeunit_rast))
# Add the line to the text variable
text="MASKEDOVERLAYAREA "+current_landscapeunit_rast+"|"+landscapeunit_bbox[cat]
return text
# Function that create the r.li configuration file for the base landcover raster and then for all the binary rasters
def create_rli_configfile(listoflandcoverraster,landscape_polygons,
masklayerhardcopy=False,returnlistpath=True,ncores=2):
# Check if 'listoflandcoverraster' is not empty
if len(listoflandcoverraster)==0:
sys.exit("The list of landcover raster is empty and should contain at least one raster name")
# Check if rasters provided in 'listoflandcoverraster' exists to avoid error in mutliprocessing
for cur_rast in listoflandcoverraster:
try:
mpset=cur_rast.split("@")[1]
except:
mpset=""
if cur_rast.split("@")[0] not in [x[0] for x in gscript.list_pairs(type='raster',mapset=mpset)]:
sys.exit('Raster <%s> not found' %cur_rast)
# Check if rasters provided in 'listoflandcoverraster' have the same extend and spatial resolution
raster={}
for x, rast in enumerate(raster_list):
raster[x]=gscript.raster_info(rast)
key_list=raster.keys()
for x in key_list[1:]:
for info in ('north','south','east','west','ewres','nsres'):
if not raster[0][info]==raster[x][info]:
sys.exit("Some raster provided in the list have different spatial resolution or extend, please check")
# Get the version of GRASS GIS
version=grass.version()['version'].split('.')[0]
# Define the folder to save the r.li configuration files
if sys.platform=="win32":
rli_dir=os.path.join(os.environ['APPDATA'],"GRASS"+version,"r.li")
else:
rli_dir=os.path.join(os.environ['HOME'],".grass"+version,"r.li")
if not os.path.exists(rli_dir):
os.makedirs(rli_dir)
## Create an ordered list with the 'cat' value of landscape units to be processed.
list_cat=[int(x) for x in gscript.parse_command('v.db.select', quiet=True,
map=landscape_polygons, column='cat', flags='c')]
list_cat.sort()
# Declare a empty dictionnary which will contains the north, south, east, west values for each landscape unit
landscapeunit_bbox={}
# Declare a empty list which will contain the path of the configation files created
listpath=[]
# Declare a empty string variable which will contains the core part of the r.li configuration file
maskedoverlayarea_1=""
# Duplicate 'listoflandcoverraster' in a new variable called 'tmp_list'
tmp_list=list(listoflandcoverraster)
# Set the current landcover raster as the first of the list
base_landcover_raster=tmp_list.pop(0) #The pop function return the first item of the list and delete it from the list at the same time
# Loop trough the landscape units
for cat in list_cat:
# Extract the current landscape unit polygon as temporary vector
tmp_vect="tmp_"+base_landcover_raster.split("@")[0]+"_"+landscape_polygons.split("@")[0]+"_"+str(cat)
gscript.run_command('v.extract', overwrite=True, quiet=True,
input=landscape_polygons, cats=cat, output=tmp_vect)
# Set region to match the extent of the current landscape polygon, with resolution and alignement matching the landcover raster
gscript.run_command('g.region', vector=tmp_vect, align=base_landcover_raster)
# Rasterize the landscape unit polygon
landscapeunit_rast=tmp_vect[4:]
gscript.run_command('v.to.rast', overwrite=True, quiet=True, input=tmp_vect, output=landscapeunit_rast, use='cat', memory='3000')
# Remove temporary vector
gscript.run_command('g.remove', quiet=True, flags="f", type='vector', name=tmp_vect)
# Set the region to match the raster landscape unit extent and save the region info in a dictionary
region_info=gscript.parse_command('g.region', raster=landscapeunit_rast, flags='g')
n=str(round(float(region_info['n']),5)) #the config file need 5 decimal for north and south
s=str(round(float(region_info['s']),5))
e=str(round(float(region_info['e']),6)) #the config file need 6 decimal for east and west
w=str(round(float(region_info['w']),6))
# Save the coordinates of the bbox in the dictionary (n,s,e,w)
landscapeunit_bbox[cat]=n+"|"+s+"|"+e+"|"+w
# Add the line to the maskedoverlayarea_1 variable
maskedoverlayarea_1+="MASKEDOVERLAYAREA "+landscapeunit_rast+"|"+landscapeunit_bbox[cat]+"\n"
# Compile the content of the r.li configuration file
config_file_content="SAMPLINGFRAME 0|0|1|1\n"
config_file_content+=maskedoverlayarea_1
config_file_content+="RASTERMAP "+base_landcover_raster+"\n"
config_file_content+="VECTORMAP "+landscape_polygons+"\n"
# Create a new file and save the content
configfilename=base_landcover_raster.split("@")[0]+"_"+landscape_polygons.split("@")[0]
path=os.path.join(rli_dir,configfilename)
listpath.append(path)
f=open(path, 'w')
f.write(config_file_content)
f.close()
# Continue creation of r.li configuration file and landscape unit raster the rest of the landcover raster provided
while len(tmp_list)>0:
# Initialize 'maskedoverlayarea_2' variable as an empty string
maskedoverlayarea_2=""
# Set the current landcover raster as the first of the list
current_landcover_raster=tmp_list.pop(0) #The pop function return the first item of the list and delete it from the list at the same time
if masklayerhardcopy: # If the user asked for hard copy of the landscape units mask layers
# Copy all the landscape units masks for the current landcover raster
p=Pool(ncores) #Create a pool of processes and launch them using 'map' function
func=partial(copy_landscapeunitmasks,current_landcover_raster,base_landcover_raster,landscape_polygons,landscapeunit_bbox) # Set fixed argument of the function
maskedoverlayarea_2=p.map(func,list_cat) # Launch the processes for as many items in the list and get the ordered results using map function
p.close()
p.join()
# Compile the content of the r.li configuration file
config_file_content="SAMPLINGFRAME 0|0|1|1\n"
config_file_content+="\n".join(maskedoverlayarea_2)+"\n"
config_file_content+="RASTERMAP "+current_landcover_raster+"\n"
config_file_content+="VECTORMAP "+landscape_polygons+"\n"
else: # If the user not asked for hard copy
# Compile the content of the r.li configuration file
config_file_content="SAMPLINGFRAME 0|0|1|1\n"
config_file_content+=maskedoverlayarea_1 # If user do not asked for hard copy, the mask layers are the same than for the first configuration file
config_file_content+="RASTERMAP "+current_landcover_raster+"\n" # But the name of the RASTERMAP should be the one of the current landcover raster
config_file_content+="VECTORMAP "+landscape_polygons+"\n"
# Create a new file and save the content
configfilename=current_landcover_raster.split("@")[0]+"_"+landscape_polygons.split("@")[0]
path=os.path.join(rli_dir,configfilename)
listpath.append(path)
f=open(path, 'w')
f.write(config_file_content)
f.close()
# Return a list of path of configuration files creates if option actived
if returnlistpath:
return list_cat,listpath
else:
return list_cat
###Output
_____no_output_____
###Markdown
Function for creation of binary raster from a categorical raster (multiprocessed)
###Code
###### Function creating a binary raster for each category of a base raster.
### The function run within the current region. If a category do not exists in the current region, no binary map will be produce
# 'categorical_raster' wait for the name of the base raster to be used. It is the one from which one binary raster will be produced for each category value
# 'prefix' wait for a string corresponding to the prefix of the name of the binary raster which will be produced
# 'setnull' wait for a boolean value (True, False) according to the fact that the output binary should be 1/0 or 1/null
# 'returnlistraster' wait for a boolean value (True, False) regarding to the fact that a list containing the name of binary raster is desired as return of the function
# 'category_list' wait for a list of interger corresponding to specific category of the base raster to be used
# 'ncores' wait for a integer corresponding to the number of desired cores to be used for parallelization
# Import libraries for multiprocessing
import multiprocessing
from multiprocessing import Pool
from functools import partial
def create_binary_raster(categorical_raster,prefix="binary",setnull=False,returnlistraster=True,category_list=None,ncores=2):
# Check if raster exists to avoid error in mutliprocessing
try:
mpset=categorical_raster.split("@")[1]
except:
mpset=""
if categorical_raster not in gscript.list_strings(type='raster',mapset=mpset):
sys.exit('Raster <%s> not found' %categorical_raster)
# Check for number of cores doesnt exceed available
nbcpu=multiprocessing.cpu_count()
if ncores>=nbcpu:
ncores=nbcpu-1
returnlist=[] #Declare empty list for return
#gscript.run_command('g.region', raster=categorical_raster, quiet=True) #Set the region
null='null()' if setnull else '0' #Set the value for r.mapcalc
minclass=1 if setnull else 2 #Set the value to check if the binary raster is empty
if category_list == None: #If no category_list provided
category_list=[cl for cl in gscript.parse_command('r.category',map=categorical_raster,quiet=True)]
for i,x in enumerate(category_list): #Make sure the format is UTF8 and not Unicode
category_list[i]=x.encode('UTF8')
category_list.sort(key=float) #Sort the raster categories in ascending.
p=Pool(ncores) #Create a pool of processes and launch them using 'map' function
func=partial(get_binary,categorical_raster,prefix,null,minclass) # Set the two fixed argument of the function
returnlist=p.map(func,category_list) # Launch the processes for as many items in the 'functions_name' list and get the ordered results using map function
p.close()
p.join()
if returnlistraster:
return returnlist
#### Function that extract binary raster for a specified class (called in 'create_binary_raster' function)
def get_binary(categorical_raster,prefix,null,minclass,cl):
binary_class=prefix+"_"+cl
gscript.run_command('r.mapcalc', expression=binary_class+'=if('+categorical_raster+'=='+str(cl)+',1,'+null+')',overwrite=True, quiet=True)
if len(gscript.parse_command('r.category',map=binary_class,quiet=True))>=minclass: #Check if created binary is not empty
return binary_class
else:
gscript.run_command('g.remove', quiet=True, flags="f", type='raster', name=binary_class)
###Output
_____no_output_____
###Markdown
Function for computation of spatial metrics at landscape level (multiprocessed)
###Code
##### Function that compute different landscape metrics (spatial metrics) at landscape level.
### The metric computed are "dominance","pielou","renyi","richness","shannon","simpson".
### It is important to set the computation region before runing this script so that it match the extent of the 'raster' layer.
# 'configfile' wait for the path (string) to the configuration file corresponding to the 'raster' layer.
# 'raster' wait for the name (string) of the landcover map on which landscape metrics will be computed.
# 'returnlistresult' wait for a boolean value (True/False) according to the fact that a list containing the path to the result files is desired.
# 'ncores' wait for a integer corresponding to the number of desired cores to be used for parallelization.
# Import libraries for multiprocessing
import multiprocessing
from multiprocessing import Pool
from functools import partial
def compute_landscapelevel_metrics(configfile, raster, spatial_metric):
filename=raster.split("@")[0]+"_%s" %spatial_metric
outputfile=os.path.join(os.path.split(configfile)[0],"output",filename)
if spatial_metric=='renyi': # The alpha parameter was set to 2 as in https://en.wikipedia.org/wiki/R%C3%A9nyi_entropy
gscript.run_command('r.li.%s' %spatial_metric, overwrite=True,
input=raster,config=configfile,alpha='2', output=filename)
else:
gscript.run_command('r.li.%s' %spatial_metric, overwrite=True,
input=raster,config=configfile, output=filename)
return outputfile
def get_landscapelevel_metrics(configfile, raster, returnlistresult=True, ncores=2):
# Check if raster exists to avoid error in mutliprocessing
try:
mpset=raster.split("@")[1]
except:
mpset=""
if raster not in gscript.list_strings(type='raster',mapset=mpset):
sys.exit('Raster <%s> not found' %raster)
# Check if configfile exists to avoid error in mutliprocessing
if not os.path.exists(configfile):
sys.exit('Configuration file <%s> not found' %configfile)
# List of metrics to be computed
spatial_metric_list=["dominance","pielou","renyi","richness","shannon","simpson"]
# Check for number of cores doesnt exceed available
nbcpu=multiprocessing.cpu_count()
if ncores>=nbcpu:
ncores=nbcpu-1
if ncores>len(spatial_metric_list):
ncores=len(spatial_metric_list) #Adapt number of cores to number of metrics to compute
#Declare empty list for return
returnlist=[]
# Create a new pool
p=Pool(ncores)
# Set the two fixed argument of the 'compute_landscapelevel_metrics' function
func=partial(compute_landscapelevel_metrics,configfile, raster)
# Launch the processes for as many items in the 'functions_name' list and get the ordered results using map function
returnlist=p.map(func,spatial_metric_list)
p.close()
p.join()
# Return list of paths to result files
if returnlistresult:
return returnlist
###Output
_____no_output_____
###Markdown
Function for computation of spatial metrics at class level (multiprocessed)
###Code
##### Function that compute different landscape metrics (spatial metrics) at class level.
### The metric computed are "patch number (patchnum)","patch density (patchdensity)","mean patch size(mps)",
### "coefficient of variation of patch area (padcv)","range of patch area size (padrange)",
### "standard deviation of patch area (padsd)", "shape index (shape)", "edge density (edgedensity)".
### It is important to set the computation region before runing this script so that it match the extent of the 'raster' layer.
# 'configfile' wait for the path (string) to the configuration file corresponding to the 'raster' layer.
# 'raster' wait for the name (string) of the landcover map on which landscape metrics will be computed.
# 'returnlistresult' wait for a boolean value (True/False) according to the fact that a list containing the path to the result files is desired.
# 'ncores' wait for a integer corresponding to the number of desired cores to be used for parallelization.
# Import libraries for multiprocessing
import multiprocessing
from multiprocessing import Pool
from functools import partial
def compute_classlevel_metrics(configfile, raster, spatial_metric):
filename=raster.split("@")[0]+"_%s" %spatial_metric
gscript.run_command('r.li.%s' %spatial_metric, overwrite=True,
input=raster,config=configfile,output=filename)
outputfile=os.path.join(os.path.split(configfile)[0],"output",filename)
return outputfile
def get_classlevel_metrics(configfile, raster, returnlistresult=True, ncores=2):
# Check if raster exists to avoid error in mutliprocessing
try:
mpset=raster.split("@")[1]
except:
mpset=""
if raster not in [x.split("@")[0] for x in gscript.list_strings(type='raster',mapset=mpset)]:
sys.exit('Raster <%s> not found' %raster)
# Check if configfile exists to avoid error in mutliprocessing
if not os.path.exists(configfile):
sys.exit('Configuration file <%s> not found' %configfile)
# List of metrics to be computed
spatial_metric_list=["patchnum","patchdensity","mps","padcv","padrange","padsd","shape","edgedensity"]
# Check for number of cores doesnt exceed available
nbcpu=multiprocessing.cpu_count()
if ncores>=nbcpu:
ncores=nbcpu-1
if ncores>len(spatial_metric_list):
ncores=len(spatial_metric_list) #Adapt number of cores to number of metrics to compute
# Declare empty list for return
returnlist=[]
# Create a new pool
p=Pool(ncores)
# Set the two fixed argument of the 'compute_classlevel_metrics' function
func=partial(compute_classlevel_metrics,configfile, raster)
# Launch the processes for as many items in the 'functions_name' list and get the ordered results using map function
returnlist=p.map(func,spatial_metric_list)
p.close()
p.join()
# Return list of paths to result files
if returnlistresult:
return returnlist
###Output
_____no_output_____
###Markdown
**-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-** User inputs
###Code
## Define a empty dictionnary for saving user inputs
user={}
## Enter the path to GRASSDATA folder
user["gisdb"] = "/home/tais/Documents/GRASSDATA_Spie2017subset_Ouaga"
## Enter the name of the location (existing or for a new one)
user["location"] = "SPIE_subset"
## Enter the EPSG code for this location
user["locationepsg"] = "32630"
## Enter the name of the mapset to use for segmentation
user["mapsetname"] = "test_rli"
###Output
_____no_output_____
###Markdown
**-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-** Compute spatial metrics for deriving land use in street blocs **Launch GRASS GIS working session**
###Code
## Set the name of the mapset in which to work
mapsetname=user["mapsetname"]
## Launch GRASS GIS working session in the mapset
if os.path.exists(os.path.join(user["gisdb"],user["location"],mapsetname)):
gsetup.init(os.environ['GISBASE'], user["gisdb"], user["location"], mapsetname)
print "You are now working in mapset '"+mapsetname+"'"
else:
print "'"+mapsetname+"' mapset doesn't exists in "+user["gisdb"]
###Output
You are now working in mapset 'test_rli'
###Markdown
**Set the path to the r.li folder for configuration file and for results**
###Code
os.environ
# Define path of the outputfile (in r.li folder)
version=grass.version()['version'].split('.')[0] # Get the version of GRASS GIS
if sys.platform=="win32":
rli_config_dir=os.path.join(os.environ['APPDATA'],"GRASS"+version,"r.li")
rli_output_dir=os.path.join(os.environ['APPDATA'],"GRASS"+version,"r.li","output")
else:
rli_config_dir=os.path.join(os.environ['HOME'],"GRASS"+version,"r.li")
rli_output_dir=os.path.join(os.environ['HOME'],".grass"+version,"r.li","output")
if not os.path.exists(rli_config_dir):
os.makedirs(rli_config_dir)
if not os.path.exists(rli_output_dir):
os.makedirs(rli_output_dir)
# Print
print "GRASS GIS add-on's r.li configuration files will be saved under <%s>."%(rli_config_dir,)
print "GRASS GIS add-on's r.li outputs will be saved under <%s>."%(rli_output_dir,)
###Output
GRASS GIS add-on's r.li configuration files will be saved under </home/tais/GRASS7/r.li>.
GRASS GIS add-on's r.li outputs will be saved under </home/tais/.grass7/r.li/output>.
###Markdown
**-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-** Define the name of the base landcover map and landscape units polygons
###Code
# Set the name of the 'base' landcover map
baselandcoverraster="classif@test_rli"
# Set the name of the vector polygon layer containing the landscape units
landscape_polygons="streetblocks"
###Output
_____no_output_____
###Markdown
Import shapefile containing street blocks polygons
###Code
# Set the path to the shapefile containing streetblocks polygons
pathtoshp="/media/tais/data/Dropbox/ULB/MAUPP/Landuse_mapping/Test_spatial_metrics_computation/Data/streetblocks_subset.shp"
# Import shapefile
gscript.run_command('v.in.ogr', quiet=True, overwrite=True, input=pathtoshp, output=landscape_polygons)
###Output
_____no_output_____
###Markdown
Create binary rasters from the base landcover map
###Code
# Save time for computing processin time
begintime=time.time()
# Create as many binary raster layer as categorical values existing in the base landcover map
gscript.run_command('g.region', raster=baselandcoverraster, quiet=True) #Set the region
pref=baselandcoverraster.split("@")[0]+"_cl" #Set the prefix
raster_list=[] # Initialize a empty list for results
raster_list=create_binary_raster(baselandcoverraster,
prefix=pref,setnull=True,returnlistraster=True,
category_list=None,ncores=15) #Extract binary raster
# Compute and print processing time
print_processing_time(begintime,"Extraction of binary rasters achieved in ")
# Insert the name of the base landcover map at first position in the list
raster_list.insert(0,baselandcoverraster)
# Display the raster to be used for landscape analysis
raster_list
###Output
_____no_output_____
###Markdown
Create r.li configuration file for a list of landcover rasters
###Code
# Save time for computing processin time
begintime=time.time()
# Run creation of r.li configuration file and associated raster layers
list_cats,list_configfile=create_rli_configfile(raster_list,landscape_polygons,masklayerhardcopy=False,returnlistpath=True,ncores=20)
# Compute and print processing time
print_processing_time(begintime,"Creation of r.li configuration files achieved in ")
# Display the path to the configuration files created
list_configfile
###Output
_____no_output_____
###Markdown
**-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-** Compute spatial metrics at landscape level
###Code
# Initialize an empty list which will contains the resultfiles
resultfiles=[]
# Save time for computing processin time
begintime=time.time()
# Get the path to the configuration file for the base landcover raster
currentconfigfile=list_configfile[0]
# Get the name of the base landcover raster
currentraster=raster_list[0]
# Set the region to match the extent of the base raster
gscript.run_command('g.region', raster=currentraster, quiet=True)
# Launch the processes for as many items in the 'functions_name' list and get the ordered results using map function
resultfiles.append(get_landscapelevel_metrics(currentconfigfile, currentraster, returnlistresult=True, ncores=15))
# Compute and print processing time
print_processing_time(begintime,"Computation of spatial metric achieved in ")
resultfiles
###Output
_____no_output_____
###Markdown
Compute spatial metrics at class level
###Code
# Save time for computing processin time
begintime=time.time()
# Get a list with paths to the configuration file for class level metrics
classlevelconfigfiles=list_configfile[1:]
# Get a list with name of binary landcover raster for class level metrics
classlevelrasters=raster_list[1:]
for x,currentraster in enumerate(classlevelrasters[:]):
# Get the path to the configuration file for the base landcover raster
currentconfigfile=classlevelconfigfiles[x]
# Set the region to match the extent of the base raster
gscript.run_command('g.region', raster=currentraster, quiet=True)
# Launch the processes for as many items in the 'functions_name' list and get the ordered results using map function
resultfiles.append(get_classlevel_metrics(currentconfigfile, currentraster, returnlistresult=True, ncores=10))
# Compute and print processing time
print_processing_time(begintime,"Computation of spatial metric achieved in ")
resultfiles
# Flat the 'resultfiles' list which contains several lists
resultfiles=[item for sublist in resultfiles for item in sublist]
resultfiles
###Output
_____no_output_____
###Markdown
Change the results files from r.li to get the correct 'cat' value for each landscape unit
###Code
import csv, shutil
from itertools import izip
for f in resultfiles:
f_in=open(f)
f_tmp=open(f+'_tmp',"w")
f_in_reader=csv.reader(f_in,delimiter='|')
f_tmp_writer=csv.writer(f_tmp,delimiter='|')
f_tmp_writer.writerow(['cat',"_".join(os.path.split(f)[-1].split("_")[1:])])
for i,row in enumerate(f_in_reader):
newrow=[]
newrow.append(list_cats[i])
newrow.append(row[1])
f_tmp_writer.writerow(newrow)
f_in.close()
f_tmp.close()
os.remove(f)
shutil.copy2(f+'_tmp',f)
os.remove(f+'_tmp')
###Output
_____no_output_____
###Markdown
Compute some special metrics
###Code
# Set the name of the nDSM layer
ndsm="ndsm"
# Set the name of the NDVI layer
ndvi="ndvi"
# Set the name of the NDWI layer
ndwi="ndwi"
# Set the prefix of SAR textures layer
SAR_prefix="SAR_w"
###Output
_____no_output_____
###Markdown
Mean and standard deviation of SAR textures, NDVI, NDWI
###Code
# Set up a list with name of raster layer to be used
ancillarylayers=[]
ancillarylayers.append(ndvi)
ancillarylayers.append(ndwi)
[ancillarylayers.append(x) for x in gscript.list_strings("rast", pattern=SAR_prefix, flag='r')] #Append SAR textures
print "Layer to be used :\n\n"+'\n'.join(ancillarylayers)
# Set the path to the file for i.segment.stats results for metrics_ndvi_ndwi_sar
metrics_ndvi_ndwi_sar=os.path.join(rli_output_dir,"metrics_ndvi_ndwi_sar")
# Create a raster corresponding to the landscape units (for computing statistics using i.segment.stats)
gscript.run_command('g.region', raster=baselandcoverraster, quiet=True) #Set the region
raster_landscapeunits="temp_%s"%landscape_polygons.split("@")[0]
gscript.run_command('v.to.rast', overwrite=True, input=landscape_polygons, output=raster_landscapeunits, use='cat')
# Save time for computing processin time
begintime=time.time()
###### Compute shape metrics as well as mean and stddev of ancillary layers for each landscape unit
## Set number of cores to be used
ncores=len(ancillarylayers)
nbcpu=multiprocessing.cpu_count()
if ncores>=nbcpu:
ncores=nbcpu-1
if ncores>len(ancillarylayers):
ncores=len(ancillarylayers) #Adapt number of cores to number of metrics to compute
# Run i.segment.stats
gscript.run_command('i.segment.stats', overwrite=True, map=raster_landscapeunits,
raster_statistics='stddev,median',
area_measures='area,perimeter,compact_circle,compact_square,fd',
rasters=','.join(ancillarylayers),
csvfile=metrics_ndvi_ndwi_sar,
processes=ncores)
# Compute and print processing time
print_processing_time(begintime,"Metrics computed in ")
resultfiles.append(metrics_ndvi_ndwi_sar)
resultfiles
###Output
_____no_output_____
###Markdown
Mean and standard deviation of building's height Create raster with nDSM value of 'buildings' pixels
###Code
# Set pixel value of 'buildings' on the 'baselandcoverraster'
buildpixel=11
# Set the name of the new layer containing height of buildings
buildings_height='buildings_height'
# Set the path to the file for i.segment.stats results for metrics_ndvi_ndwi_sar
metrics_buildings_height=os.path.join(rli_output_dir,"metrics_buildings_height")
# Create temp fil which will contain intermediate results
TMP_sumheights=grass.tempfile()+'_sumheights.csv'
TMP_nbrbuildpixels=grass.tempfile()+'_nbrbuildpixels.csv'
# Save time for computing processin time
begintime=time.time()
# Create a raster layer with height of pixels classified as 'buildings'
gscript.run_command('g.region', raster=baselandcoverraster, quiet=True) #Set the region
formula="%s=if(%s==%s, %s, 0)"%(buildings_height,baselandcoverraster,buildpixel,ndsm)
gscript.mapcalc(formula, overwrite=True)
# Compute and print processing time
print_processing_time(begintime,"Creation of layer in ")
# Save time for computing processin time
begintime=time.time()
# Compute sum of build pixels's height using i.segment.stats
gscript.run_command('i.segment.stats', overwrite=True, map=raster_landscapeunits,
raster_statistics='sum', flags='s', rasters='buildings_height',
csvfile=TMP_sumheights,processes=ncores)
# Compute number of built pixels using i.segment.stats
binary_builtup_raster="%s_cl_%s"%(baselandcoverraster.split("@")[0],buildpixel)
gscript.run_command('g.copy', overwrite=True, raster='%s,tmp'%binary_builtup_raster)
gscript.run_command('r.null', map='tmp', null=0)
gscript.run_command('i.segment.stats', overwrite=True, map=raster_landscapeunits,
raster_statistics='sum', flags='s', rasters='tmp',
csvfile=TMP_nbrbuildpixels,processes=ncores)
# Compute and print processing time
print_processing_time(begintime,"i.segment.stats run in ")
# Save time for computing processin time
begintime=time.time()
# Improt library to be able to iterate on two files in the same loop
from itertools import izip
# Declare empty dictionnary
tmp_dic={}
for i, (line_from_file_1, line_from_file_2) in enumerate(izip(open(TMP_sumheights), open(TMP_nbrbuildpixels))):
if i==0:
continue
f1_items=line_from_file_1.split("\n")[0].split("|")
f2_items=line_from_file_2.split("\n")[0].split("|")
key=f1_items[0]
sumheight=f1_items[1]
nbpixel=f2_items[1]
try:
mean_height=float(sumheight)/float(nbpixel)
except ZeroDivisionError:
mean_height=0
tmp_dic[key]=mean_height
# Get the name of the first colum
with open(TMP_sumheights) as f:
column_a=f.next().split("\n")[0].split("|")[0]
# Built the content of the file
content=[]
content.append((column_a,'mean_build_height'))
for key in tmp_dic.keys():
content.append((key,tmp_dic[key]))
# Create a new file
fout=open(metrics_buildings_height,"w")
writer=csv.writer(fout, delimiter='|')
writer.writerows(content)
fout.close()
# Compute and print processing time
print_processing_time(begintime,"Mean build pixels's height computed in ")
# Remove temporary layers
gscript.run_command('g.remove', flags='ef', type='raster', name='tmp')
# Remove temporary files
os.remove(TMP_sumheights)
os.remove(TMP_nbrbuildpixels)
resultfiles.append(metrics_buildings_height)
resultfiles
###Output
_____no_output_____
###Markdown
Proportion of each of individual classes in the landcover map
###Code
remove temp_streetblocks raster layer
###Output
_____no_output_____
###Markdown
Combine all .csv files together
###Code
## Function which execute a left join using individual .csv files.
## This ddddddddddddd
# The argument "indir" wait for a string containing the path to the directory where the individual .csv files are stored.
# The argument "outfile" wait for a string containing the path to the output file to create.
# The argument "overwrite" wait for True/False value allow or not to overwrite existing outfile.
# The argument "pattern" wait for a string containing the pattern of filename to use. Use wildcards is possible (*.csv for all .csv files)
import os,sys,csv
import glob
def leftjoin_csv(fileList,outfile,separator_in=";",separator_out=";",overwrite=False,pattern=None):
# Stop execution if outputfile exitst and can not be overwriten
if os.path.isfile(outfile) and overwrite==False:
print "File '"+str(outfile)+"' aleady exists and overwrite option is not enabled."
else:
if os.path.isfile(outfile) and overwrite==True: # If outputfile exitst and can be overwriten
os.remove(outfile)
print "File '"+str(outfile)+"' has been overwrited."
if len(fileList)<=1: #Check if there are at least 2 files in the list
sys.exit("This function require at least two .csv files to be jointed together.")
# Save all the value in a dictionnary with key corresponding to the first column
headerdict={}
outputdict={}
for f in [open(f) for f in resultfiles]:
fin=csv.reader(f, delimiter='|')
for i,row in enumerate(f):
row_items=row.split("\r")[0].split("\n")[0]
key=row_items.split("|")[0]
value=row_items.split("|")[1:]
for v in value:
if i==0: # If first line
try:
headerdict[key].append(v)
except:
headerdict[key]=[v,]
else:
try:
outputdict[key].append(v)
except:
outputdict[key]=[v,]
# Write the dictionnary with header in a the output csv file
outputcsv=open(outfile,"w")
for key in headerdict.keys():
outputcsv.write(key+separator_out)
outputcsv.write(separator_out.join(headerdict[key]))
outputcsv.write("\n")
for key in outputdict.keys():
outputcsv.write(key+separator_out)
outputcsv.write(separator_out.join(outputdict[key]))
outputcsv.write("\n")
outputcsv.close()
# Create a .csvt file with type of each column
csvt=open(outfile+"t","w")
results=open(outfile,"r")
header=results.next()
typecolumn=[]
typecolumn.append("Integer")
for columns in header[1:]:
typecolumn.append("Real")
csvt.write(separator_out.join(typecolumn))
csvt.close()
# Print what happend
print str(len(fileList))+" individual .csv files were joint together."
# return headerdict,outputdict
# Define the path for the .csv with final results
outfile=os.path.join(rli_output_dir,"land_use_metrics.csv")
# Join all result files together in a new .csv file
leftjoin_csv(resultfiles, outfile, separator_in="|", separator_out=";", overwrite=True)
###Output
72 individual .csv files were joint together.
###Markdown
Display the .csv using pandas
###Code
import pandas as pd
# Load the .csv file in a pandas dataframe
df=pd.read_csv(outfile, sep=';',header=0)
# Display the dataframe
df
###Output
_____no_output_____
###Markdown
Move files to dedicated folder **Configuration files**
###Code
# Set the folder where to move the configuration files
finalfolder='/home/tais/Documents/GRASSDATA_Spie2017subset_Ouaga/Results_spatial_metrics/rli_config'
## Create the folder if does not exists
if not os.path.exists(finalfolder):
os.makedirs(finalfolder)
print "Folder '"+finalfolder+"' created"
## Copy the files to the final folder and remove them from the original folder
for configfile in list_configfile:
shutil.copy2(configfile,finalfolder)
os.remove(configfile)
###Output
_____no_output_____
###Markdown
**Result files**
###Code
# Set the folder where to move the configuration files
finalfolder='/home/tais/Documents/GRASSDATA_Spie2017subset_Ouaga/Results_spatial_metrics/rli_results'
## Create the folder if does not exists
if not os.path.exists(finalfolder):
os.makedirs(finalfolder)
print "Folder '"+finalfolder+"' created"
## Copy the files to the final folder and remove them from the original folder
for res_file in resultfiles:
shutil.copy2(res_file,finalfolder)
os.remove(res_file)
# Copy the final csv file with all the results
shutil.copy2(outfile,finalfolder)
os.remove(outfile)
shutil.copy2(outfile+'t',finalfolder)
os.remove(outfile+'t')
###Output
_____no_output_____
###Markdown
Make a copy of the .csv file with results, where 'null' values are empty cells **For .csvt**
###Code
# Define path to the .csv files
afile=os.path.join(finalfolder,os.path.split(outfile+'t')[-1])
pathtofile,extension=os.path.splitext(afile)
bfile=pathtofile+"_blanknull"+extension
# Make copy of the file
shutil.copy2(afile,bfile)
###Output
_____no_output_____
###Markdown
**For .csv**
###Code
# Define path to the .csv files
afile=os.path.join(finalfolder,os.path.split(outfile)[-1])
pathtofile,extension=os.path.splitext(afile)
bfile=pathtofile+"_blanknull"+extension
# Create a copy by removing the 'NULL' values
a=open(afile,'r')
b=open(bfile,'w')
for row in a:
newline="".join(row.split('NULL'))
b.write(newline)
###Output
_____no_output_____
###Markdown
Export the landscape polygons (with 'cat' column) as shapefile
###Code
# Define the name of the shapefile with landscape units and the 'cat' column
outputshp='/home/tais/Documents/GRASSDATA_Spie2017subset_Ouaga/Results_spatial_metrics/shapefile/Ouaga_subset_streetblocks.shp'
# Export vector layer as shapefile
gscript.run_command('v.out.ogr', flags='cem', overwrite=True,
input=landscape_polygons, output=outputshp, format='ESRI_Shapefile')
###Output
_____no_output_____
###Markdown
Join the .csv file to the landscape unit polygon layer
###Code
# Import .csv as new table in GRASS
csvfile=os.path.join(finalfolder,os.path.split(outfile)[-1])
gscript.run_command('db.in.ogr', overwrite=True, quiet=True, input=csvfile, output='spatial_metrics_table')
# Join the vector layer with the new table
gscript.run_command('v.db.join', quiet=True, map=landscape_polygons, column='cat',
other_table='spatial_metrics_table', other_column='cat_')
###Output
_____no_output_____ |
MachineLearning/supervised_machine_learning/decision_tree.ipynb | ###Markdown
Data Creation.
###Code
# X = np.array([["Green", 3], ["yello", 3], ["orange_color", 2], ["orange_color", 2], ["red", 1]])
# y = np.array(["apply", "apply", "orange", "orange", "cherry"])
# X = pd.DataFrame(X)
# y = pd.DataFrame(y)
# y.head
# Define the traning data.
X, y = make_classification(n_samples=1000, n_classes=2, n_features=5)
# Chnage the shape of the target to 1 dimentional array.
y = y[:, np.newaxis]
print("="*100)
print("Number of training data samples-----> {}".format(X.shape[0]))
print("Number of training features --------> {}".format(X.shape[1]))
print("Shape of the target value ----------> {}".format(y.shape))
# display the data.
data = pd.DataFrame(X)
data.head()
# display the data.
data_y = pd.DataFrame(y)
data_y.head()
#define the parameters
sys.setrecursionlimit(2000)
param = {
"n_neibours" : 5
}
print("="*100)
decirion_tree_cla = DecisionTreeClassifier(min_sample_split=2, max_depth=45)
# Train the model.
decirion_tree_cla.train(X, y)
# print the decision tree.
print("Printing the tree :).....")
decirion_tree_cla.draw_tree()
# Predict the values.
y_pred = decirion_tree_cla.predict(X)
#calculate accuracy.
acc = np.sum(y==y_pred)/X.shape[0]
print("="*100)
print("Accuracy of the prediction is {}".format(acc))
###Output
====================================================================================================
Printing the tree :).....
Is 3>=0.2158312001199094?
----- True branch :)
Is 3>=1.603491216736123?
----- True branch :)
The predicted value --> 1.0
----- False branch :)
Is 3>=1.6027001267926013?
----- True branch :)
The predicted value --> 0.0
----- False branch :)
Is 1>=1.1102558720057867?
----- True branch :)
Is 1>=1.121124484926718?
----- True branch :)
The predicted value --> 1.0
----- False branch :)
The predicted value --> 0.0
----- False branch :)
The predicted value --> 1.0
----- False branch :)
Is 0>=-1.4505020514700175?
----- True branch :)
Is 0>=-1.2409267655158718?
----- True branch :)
Is 4>=-0.9855838987198552?
----- True branch :)
The predicted value --> 0.0
----- False branch :)
Is 4>=-0.9919443440822534?
----- True branch :)
Is 0>=-0.03912201395755674?
----- True branch :)
Is 1>=-0.7629868478972113?
----- True branch :)
The predicted value --> 0.0
----- False branch :)
The predicted value --> 1.0
----- False branch :)
The predicted value --> 1.0
----- False branch :)
Is 1>=0.975256990218654?
----- True branch :)
Is 1>=0.9835102389333937?
----- True branch :)
The predicted value --> 0.0
----- False branch :)
The predicted value --> 1.0
----- False branch :)
The predicted value --> 0.0
----- False branch :)
Is 0>=-1.2426439239452205?
----- True branch :)
The predicted value --> 1.0
----- False branch :)
Is 1>=1.302431936290737?
----- True branch :)
The predicted value --> 1.0
----- False branch :)
The predicted value --> 0.0
----- False branch :)
Is 0>=-2.0658636847690226?
----- True branch :)
Is 4>=-0.2571977439807178?
----- True branch :)
Is 3>=0.0711753309615336?
----- True branch :)
Is 4>=0.9100016691443558?
----- True branch :)
Is 0>=-1.8617027460798927?
----- True branch :)
Is 3>=0.08340637959843854?
----- True branch :)
The predicted value --> 1.0
----- False branch :)
The predicted value --> 0.0
----- False branch :)
The predicted value --> 0.0
----- False branch :)
The predicted value --> 0.0
----- False branch :)
Is 3>=-1.6620566415668998?
----- True branch :)
The predicted value --> 1.0
----- False branch :)
Is 3>=-1.7031487817906366?
----- True branch :)
Is 0>=-1.578529703567106?
----- True branch :)
The predicted value --> 1.0
----- False branch :)
The predicted value --> 0.0
----- False branch :)
The predicted value --> 1.0
----- False branch :)
Is 4>=-0.4606035472381632?
----- True branch :)
Is 1>=0.026478963572701995?
----- True branch :)
Is 0>=-1.846866131035971?
----- True branch :)
Is 0>=-1.6089996358204093?
----- True branch :)
Is 0>=-1.5466521825344157?
----- True branch :)
The predicted value --> 1.0
----- False branch :)
The predicted value --> 0.0
----- False branch :)
The predicted value --> 1.0
----- False branch :)
The predicted value --> 0.0
----- False branch :)
The predicted value --> 0.0
----- False branch :)
Is 0>=-1.563689378497609?
----- True branch :)
The predicted value --> 0.0
----- False branch :)
Is 0>=-1.8220125446617492?
----- True branch :)
The predicted value --> 1.0
----- False branch :)
Is 4>=-0.6567896585276278?
----- True branch :)
The predicted value --> 1.0
----- False branch :)
The predicted value --> 0.0
----- False branch :)
The predicted value --> 0.0
====================================================================================================
Accuracy of the prediction is 1.0
###Markdown
Decision tree classifier using scikit-learn for comaprision.
###Code
from sklearn.tree import DecisionTreeClassifier as DecisionTreeClassifier_sklearn
# data is already defined, going to use the same data for comparision.
print("="*100)
print("Number of training data samples-----> {}".format(X.shape[0]))
print("Number of training features --------> {}".format(X.shape[1]))
decision_tree_sklearn = DecisionTreeClassifier_sklearn()
decision_tree_sklearn.fit(X, y)
# predict the value
y_pred_sklearn = decision_tree_sklearn.predict(X)
acc = accuracy_score(y, y_pred_sklearn)
print("="*100)
print("Accuracy of the prediction is {}".format(acc))
###Output
====================================================================================================
Accuracy of the prediction is 1.0
###Markdown
Decision Tree Regression.
###Code
class DecisionTreeRegression(DecisionTree):
""" Decision Tree for the classification problem."""
def __init__(self, min_sample_split=3, min_impurity=1e-7, max_depth=float('inf'),
):
"""
:param min_sample_split: min value a leaf node must have.
:param min_impurity: minimum impurity.
:param max_depth: maximum depth of the tree.
"""
self._impurity_function = self._claculate_variance_reduction
self._leaf_value_calculation = self._calculate_colum_mean
super(DecisionTreeRegression, self).__init__(min_sample_split=min_sample_split, min_impurity=min_impurity, max_depth=max_depth,
impurity_function=self._impurity_function, leaf_node_calculation=self._leaf_value_calculation)
def _claculate_variance_reduction(self, y, y1, y2):
"""
Calculate the Variance reduction.
:param y: target value.
:param y1: target value for dataset in the true split/right branch.
:param y2: target value for dataset in the false split/left branch.
"""
# propobility of true values.
variance = np.var(y)
variance_y1 = np.var(y1)
variance_y2 = np.var(y2)
y_len = len(y)
fraction_1 = len(y1) / y_len
fraction_2 = len(y2) / y_len
variance_reduction = variance - (fraction_1 * variance_y1 + fraction_2 * variance_y2)
return variance_reduction
def _calculate_colum_mean(self, y):
"""
calculate the prediction value for that leaf node using mean.
:param y: leaf node target array.
"""
mean = np.mean(y, axis=0)
return mean
def train(self, X, y):
"""
Build the tree.
:param X: Feature array/depentant values.
:parma y: target array/indepentant values.
"""
# train the model.
super(DecisionTreeRegression, self).train(X, y)
###Output
_____no_output_____
###Markdown
create the data.
###Code
# Define the traning data.
X, y = make_regression(n_samples=1000, n_features=8)
# Chnage the shape of the target to 1 dimentional array.
y = y[:, np.newaxis]
print("="*100)
print("Number of training data samples-----> {}".format(X.shape[0]))
print("Number of training features --------> {}".format(X.shape[1]))
print("Shape of the target value ----------> {}".format(y.shape))
# display the data.
data_y = pd.DataFrame(y)
data_y.head()
#define the parameters
sys.setrecursionlimit(2000)
print("="*100)
decirion_tree_reg = DecisionTreeRegression(min_sample_split=2, max_depth=45)
# Train the model.
decirion_tree_reg.train(X, y)
print("Printing the mdoel :)...")
decirion_tree_reg.draw_tree()
# Predict the values.
y_pred = decirion_tree_reg.predict(X)
#Root mean square error.
score = r2_score(y, y_pred)
print("The r2_score of the trained model", score)
###Output
====================================================================================================
Printing the mdoel :)...
Is 0>=-0.21438011135558163?
----- True branch :)
Is 1>=0.7907654865740426?
----- True branch :)
Is 4>=-0.5439254515196377?
----- True branch :)
Is 0>=1.1736031114466987?
----- True branch :)
Is 4>=1.2250522851779126?
----- True branch :)
Is 6>=0.559415815161355?
----- True branch :)
Is 0>=2.039182631837753?
----- True branch :)
The predicted value --> [498.04718411]
----- False branch :)
Is 0>=1.6624438066775729?
----- True branch :)
The predicted value --> [428.02792105]
----- False branch :)
The predicted value --> [442.20262889]
----- False branch :)
Is 0>=2.021069263910125?
----- True branch :)
Is 0>=2.0240234468149447?
----- True branch :)
The predicted value --> [318.1378057]
----- False branch :)
The predicted value --> [322.94670729]
----- False branch :)
The predicted value --> [357.80541801]
----- False branch :)
Is 6>=0.27552203354986454?
----- True branch :)
Is 0>=1.7930838959631736?
----- True branch :)
Is 0>=2.520331022341444?
----- True branch :)
The predicted value --> [359.18791686]
----- False branch :)
The predicted value --> [354.66575921]
----- False branch :)
Is 2>=2.0630950974669497?
----- True branch :)
The predicted value --> [235.33013387]
----- False branch :)
Is 0>=1.4618041240396549?
----- True branch :)
Is 1>=2.2287240175162166?
----- True branch :)
The predicted value --> [283.23311375]
----- False branch :)
Is 0>=1.506259266858819?
----- True branch :)
The predicted value --> [285.33464895]
----- False branch :)
The predicted value --> [284.42233952]
----- False branch :)
The predicted value --> [315.47748049]
----- False branch :)
Is 3>=2.926437707787562?
----- True branch :)
The predicted value --> [341.78168229]
----- False branch :)
Is 0>=1.3349929960870925?
----- True branch :)
Is 2>=0.06463229029913269?
----- True branch :)
Is 4>=0.7080941152050524?
----- True branch :)
The predicted value --> [211.55431374]
----- False branch :)
Is 4>=-0.07128342057021647?
----- True branch :)
Is 0>=1.5663112793100533?
----- True branch :)
Is 0>=1.973237831421619?
----- True branch :)
The predicted value --> [227.57236899]
----- False branch :)
The predicted value --> [227.48232104]
----- False branch :)
The predicted value --> [230.61104147]
----- False branch :)
The predicted value --> [217.17117993]
----- False branch :)
Is 3>=-0.07119393842047049?
----- True branch :)
The predicted value --> [225.35525205]
----- False branch :)
Is 0>=1.8138859015958377?
----- True branch :)
The predicted value --> [183.33805674]
----- False branch :)
The predicted value --> [181.6645605]
----- False branch :)
Is 0>=1.2504503762604693?
----- True branch :)
Is 0>=1.27323264377677?
----- True branch :)
Is 0>=1.3064528360718615?
----- True branch :)
The predicted value --> [122.86553583]
----- False branch :)
The predicted value --> [122.96103204]
----- False branch :)
The predicted value --> [128.8720552]
----- False branch :)
The predicted value --> [247.9700879]
----- False branch :)
Is 6>=0.48503144606915366?
----- True branch :)
Is 4>=0.7566086940391396?
----- True branch :)
Is 3>=-0.3878893218080536?
----- True branch :)
Is 6>=1.7856645255882613?
----- True branch :)
Is 0>=0.6020700892868868?
----- True branch :)
The predicted value --> [374.39166937]
----- False branch :)
The predicted value --> [321.33328531]
----- False branch :)
Is 1>=1.7145484725692188?
----- True branch :)
Is 0>=0.9276969695294021?
----- True branch :)
The predicted value --> [315.12198075]
----- False branch :)
Is 0>=0.6615383600852835?
----- True branch :)
The predicted value --> [300.80056596]
----- False branch :)
The predicted value --> [290.30694568]
----- False branch :)
Is 0>=0.7149339461867977?
----- True branch :)
The predicted value --> [218.66193228]
----- False branch :)
The predicted value --> [256.0477552]
----- False branch :)
Is 1>=0.8358113490294585?
----- True branch :)
Is 0>=0.36988911616033626?
----- True branch :)
The predicted value --> [196.72431694]
----- False branch :)
The predicted value --> [205.27056572]
----- False branch :)
The predicted value --> [179.74239161]
----- False branch :)
Is 3>=1.163289358482999?
----- True branch :)
Is 1>=1.2033961733428102?
----- True branch :)
Is 0>=1.0358653973125587?
----- True branch :)
The predicted value --> [213.37781032]
----- False branch :)
The predicted value --> [234.10545567]
----- False branch :)
The predicted value --> [257.76303194]
----- False branch :)
Is 1>=1.8394463778712604?
----- True branch :)
Is 0>=0.7395512786904889?
----- True branch :)
The predicted value --> [233.14351935]
----- False branch :)
Is 0>=0.581554193376482?
----- True branch :)
The predicted value --> [178.86721597]
----- False branch :)
The predicted value --> [185.64941676]
----- False branch :)
Is 1>=0.9546937898227567?
----- True branch :)
Is 0>=-0.014394725550489441?
----- True branch :)
Is 0>=0.1314452795324303?
----- True branch :)
The predicted value --> [95.70062639]
----- False branch :)
The predicted value --> [101.5580766]
----- False branch :)
The predicted value --> [128.09733187]
----- False branch :)
The predicted value --> [170.9646031]
----- False branch :)
Is 3>=0.16505044458365736?
----- True branch :)
Is 0>=0.31587609088227087?
----- True branch :)
Is 3>=1.146626674666024?
----- True branch :)
Is 1>=1.4915111526255027?
----- True branch :)
Is 0>=0.4657920534670817?
----- True branch :)
The predicted value --> [269.1304384]
----- False branch :)
The predicted value --> [268.88975191]
----- False branch :)
Is 0>=1.1396759270531591?
----- True branch :)
The predicted value --> [230.00512869]
----- False branch :)
The predicted value --> [201.85325448]
----- False branch :)
Is 7>=-0.8192362815766367?
----- True branch :)
Is 1>=1.928246890473794?
----- True branch :)
Is 0>=0.9450953812830002?
----- True branch :)
The predicted value --> [178.621676]
----- False branch :)
The predicted value --> [181.53457111]
----- False branch :)
Is 0>=1.0774476076082624?
----- True branch :)
The predicted value --> [163.62762506]
----- False branch :)
The predicted value --> [157.47788632]
----- False branch :)
Is 0>=0.7255280480069108?
----- True branch :)
Is 0>=1.1565726483221312?
----- True branch :)
The predicted value --> [137.7649703]
----- False branch :)
The predicted value --> [135.19072199]
----- False branch :)
The predicted value --> [152.81065046]
----- False branch :)
Is 3>=1.2997793705539686?
----- True branch :)
Is 1>=1.4866200134954763?
----- True branch :)
Is 0>=0.16997683977036293?
----- True branch :)
The predicted value --> [195.49710432]
----- False branch :)
The predicted value --> [196.71152837]
----- False branch :)
Is 0>=0.09232072355387454?
----- True branch :)
The predicted value --> [131.53061315]
----- False branch :)
The predicted value --> [140.24433894]
----- False branch :)
Is 0>=0.11063852140646922?
----- True branch :)
Is 0>=0.3100946982929794?
----- True branch :)
The predicted value --> [105.90902204]
----- False branch :)
The predicted value --> [116.77131814]
----- False branch :)
The predicted value --> [75.2329626]
----- False branch :)
Is 2>=0.8182084524177224?
----- True branch :)
Is 1>=1.6278376454516053?
----- True branch :)
Is 0>=1.1001470100681676?
----- True branch :)
The predicted value --> [188.20149964]
----- False branch :)
The predicted value --> [192.49764373]
----- False branch :)
Is 0>=0.7179030033004892?
----- True branch :)
The predicted value --> [166.07555462]
----- False branch :)
The predicted value --> [159.97547384]
----- False branch :)
Is 2>=-1.140206599660137?
----- True branch :)
Is 6>=-2.2263878380303237?
----- True branch :)
Is 4>=0.6997619088569753?
----- True branch :)
Is 0>=0.13359468900006524?
----- True branch :)
Is 7>=-0.08761580726703326?
----- True branch :)
Is 1>=1.3821775444561697?
----- True branch :)
The predicted value --> [74.72124143]
----- False branch :)
Is 0>=0.23161361269806072?
----- True branch :)
Is 0>=0.2689924404209204?
----- True branch :)
Is 0>=1.1654668596742883?
----- True branch :)
The predicted value --> [123.29376187]
----- False branch :)
The predicted value --> [116.76678253]
----- False branch :)
The predicted value --> [109.86667386]
----- False branch :)
The predicted value --> [97.20266515]
----- False branch :)
Is 0>=0.7270515653390272?
----- True branch :)
The predicted value --> [151.71859163]
----- False branch :)
The predicted value --> [130.18602815]
----- False branch :)
Is 0>=0.11592882212958684?
----- True branch :)
The predicted value --> [53.34315584]
----- False branch :)
The predicted value --> [88.18633965]
----- False branch :)
Is 0>=0.24671886324715794?
----- True branch :)
Is 1>=1.5896078324932224?
----- True branch :)
Is 0>=0.3573591153347522?
----- True branch :)
Is 0>=0.8883671094107533?
----- True branch :)
The predicted value --> [85.63308359]
----- False branch :)
The predicted value --> [91.38860423]
----- False branch :)
The predicted value --> [75.23852597]
----- False branch :)
Is 7>=1.347628198690706?
----- True branch :)
Is 0>=0.9421251014476406?
----- True branch :)
The predicted value --> [75.3946116]
----- False branch :)
The predicted value --> [68.96210307]
----- False branch :)
Is 0>=0.9746326181381172?
----- True branch :)
The predicted value --> [57.68046605]
----- False branch :)
Is 0>=0.6972887902954669?
----- True branch :)
The predicted value --> [65.58380221]
----- False branch :)
The predicted value --> [63.7310596]
----- False branch :)
Is 3>=-0.49098067349620145?
----- True branch :)
Is 0>=-0.05117426038258039?
----- True branch :)
The predicted value --> [41.60196174]
----- False branch :)
The predicted value --> [47.34345074]
----- False branch :)
The predicted value --> [19.92919652]
----- False branch :)
The predicted value --> [-64.1702219]
----- False branch :)
Is 0>=1.14219075066832?
----- True branch :)
The predicted value --> [12.96328043]
----- False branch :)
The predicted value --> [-73.8404324]
----- False branch :)
Is 3>=1.116697648694918?
----- True branch :)
Is 2>=0.5368050543004441?
----- True branch :)
Is 1>=1.2379915803883796?
----- True branch :)
The predicted value --> [199.5830233]
----- False branch :)
Is 0>=2.1635045791436904?
----- True branch :)
The predicted value --> [282.97140433]
----- False branch :)
The predicted value --> [253.57703168]
----- False branch :)
Is 0>=0.23602193683656134?
----- True branch :)
Is 0>=0.5787732737625594?
----- True branch :)
Is 0>=1.5072562626118555?
----- True branch :)
The predicted value --> [142.90474973]
----- False branch :)
Is 0>=1.058825554203349?
----- True branch :)
The predicted value --> [130.96235281]
----- False branch :)
The predicted value --> [136.78747286]
----- False branch :)
Is 0>=0.4621893865609876?
----- True branch :)
The predicted value --> [109.1435867]
----- False branch :)
The predicted value --> [103.54883259]
----- False branch :)
The predicted value --> [203.39055845]
----- False branch :)
Is 0>=1.066557849634263?
----- True branch :)
Is 6>=0.4666167317191137?
----- True branch :)
Is 2>=0.2543810751460328?
----- True branch :)
Is 0>=1.2731089794012156?
----- True branch :)
The predicted value --> [143.01429658]
----- False branch :)
The predicted value --> [133.93194624]
----- False branch :)
Is 0>=2.5681448984851722?
----- True branch :)
The predicted value --> [176.36866125]
----- False branch :)
The predicted value --> [182.94059236]
----- False branch :)
Is 4>=-1.812722935940019?
----- True branch :)
Is 1>=1.419098895742026?
----- True branch :)
Is 0>=1.288592813996168?
----- True branch :)
The predicted value --> [124.58112827]
----- False branch :)
Is 0>=1.2237975757879083?
----- True branch :)
The predicted value --> [89.81006193]
----- False branch :)
The predicted value --> [93.08720934]
----- False branch :)
Is 0>=1.3283491500822333?
----- True branch :)
The predicted value --> [73.92031727]
----- False branch :)
The predicted value --> [47.54539188]
----- False branch :)
Is 0>=1.6652299776534476?
----- True branch :)
The predicted value --> [-3.62267879]
----- False branch :)
The predicted value --> [-46.50384776]
----- False branch :)
Is 6>=0.44372264710871356?
----- True branch :)
Is 1>=1.7367448013078381?
----- True branch :)
The predicted value --> [138.6465719]
----- False branch :)
Is 7>=0.27459532959763683?
----- True branch :)
Is 0>=-0.07877745343742851?
----- True branch :)
Is 0>=0.5847435032083594?
----- True branch :)
The predicted value --> [27.68985759]
----- False branch :)
The predicted value --> [34.47411464]
----- False branch :)
The predicted value --> [12.79383648]
----- False branch :)
Is 0>=0.42375020192322027?
----- True branch :)
The predicted value --> [46.19684276]
----- False branch :)
The predicted value --> [64.88055983]
----- False branch :)
Is 3>=0.2727867789408442?
----- True branch :)
Is 1>=1.0368201129073966?
----- True branch :)
Is 0>=-0.028119903253857253?
----- True branch :)
Is 0>=0.4652509162637407?
----- True branch :)
The predicted value --> [2.99151237]
----- False branch :)
The predicted value --> [24.17401775]
----- False branch :)
The predicted value --> [47.37573794]
----- False branch :)
Is 0>=0.6178822073957104?
----- True branch :)
Is 0>=0.7732784399306879?
----- True branch :)
The predicted value --> [-40.15495983]
----- False branch :)
The predicted value --> [-39.28709069]
----- False branch :)
The predicted value --> [-21.77199931]
----- False branch :)
Is 5>=0.335549688614331?
----- True branch :)
Is 3>=-0.24304294477376867?
----- True branch :)
Is 0>=0.45554664779197346?
----- True branch :)
The predicted value --> [-56.77168513]
----- False branch :)
The predicted value --> [-32.70431214]
----- False branch :)
Is 0>=0.6828454307637755?
----- True branch :)
The predicted value --> [-97.14416327]
----- False branch :)
The predicted value --> [-74.9388207]
----- False branch :)
Is 0>=0.5742633469749279?
----- True branch :)
The predicted value --> [-143.06830378]
----- False branch :)
Is 0>=0.49637057209851676?
----- True branch :)
The predicted value --> [-111.00784121]
----- False branch :)
The predicted value --> [-102.14788693]
----- False branch :)
Is 6>=-0.14927997391645786?
----- True branch :)
Is 0>=0.7629640743160444?
----- True branch :)
Is 1>=-0.17021728992366916?
----- True branch :)
Is 4>=0.9289429731184491?
----- True branch :)
Is 0>=1.7747244605226533?
----- True branch :)
The predicted value --> [387.97805177]
----- False branch :)
Is 7>=-0.09810455639995685?
----- True branch :)
Is 0>=1.3734076574143828?
----- True branch :)
The predicted value --> [190.72319635]
----- False branch :)
The predicted value --> [206.19353016]
----- False branch :)
Is 0>=1.20097338749842?
----- True branch :)
Is 0>=1.4325514511108262?
----- True branch :)
The predicted value --> [304.09773981]
----- False branch :)
The predicted value --> [303.01950642]
----- False branch :)
Is 0>=1.1201140726410204?
----- True branch :)
The predicted value --> [253.49164607]
----- False branch :)
The predicted value --> [256.12847591]
----- False branch :)
Is 3>=0.7786591204445353?
----- True branch :)
Is 3>=2.0023897114563094?
----- True branch :)
Is 0>=1.8865369978023943?
----- True branch :)
The predicted value --> [312.06352207]
----- False branch :)
The predicted value --> [269.08614644]
----- False branch :)
Is 0>=2.3963207841144?
----- True branch :)
The predicted value --> [294.37322529]
----- False branch :)
Is 2>=-1.4497899928564226?
----- True branch :)
Is 1>=0.09921175915238901?
----- True branch :)
Is 1>=0.7759638684460082?
----- True branch :)
The predicted value --> [224.42758177]
----- False branch :)
Is 0>=2.0644767361589875?
----- True branch :)
The predicted value --> [207.58284482]
----- False branch :)
The predicted value --> [214.21234184]
----- False branch :)
Is 0>=1.6707592022417943?
----- True branch :)
The predicted value --> [193.2592174]
----- False branch :)
The predicted value --> [195.03212268]
----- False branch :)
The predicted value --> [146.88141255]
----- False branch :)
Is 5>=0.0990496248755119?
----- True branch :)
Is 6>=0.8574507883432095?
----- True branch :)
Is 2>=0.5809353704592158?
----- True branch :)
Is 0>=2.6460657524780866?
----- True branch :)
The predicted value --> [262.95205253]
----- False branch :)
The predicted value --> [247.78823065]
----- False branch :)
The predicted value --> [281.19852977]
----- False branch :)
Is 0>=1.5963290718353764?
----- True branch :)
Is 2>=-0.8033896195644251?
----- True branch :)
Is 0>=2.751199593870449?
----- True branch :)
The predicted value --> [138.82608175]
----- False branch :)
The predicted value --> [140.62220115]
----- False branch :)
The predicted value --> [161.62713419]
----- False branch :)
Is 0>=1.5417482657567718?
----- True branch :)
The predicted value --> [190.10439039]
----- False branch :)
The predicted value --> [196.02364455]
----- False branch :)
Is 0>=1.3862096186834234?
----- True branch :)
Is 4>=-0.3304681304565054?
----- True branch :)
Is 2>=-0.47732770780807887?
----- True branch :)
Is 0>=2.184898800813789?
----- True branch :)
The predicted value --> [179.98688754]
----- False branch :)
Is 0>=1.9156916099401846?
----- True branch :)
The predicted value --> [154.41790105]
----- False branch :)
The predicted value --> [158.93229967]
----- False branch :)
The predicted value --> [118.82943246]
----- False branch :)
Is 0>=1.7269006440728278?
----- True branch :)
Is 0>=1.9309863089726242?
----- True branch :)
The predicted value --> [86.46502418]
----- False branch :)
The predicted value --> [91.52762579]
----- False branch :)
The predicted value --> [132.77196393]
----- False branch :)
Is 6>=1.4636427962257557?
----- True branch :)
Is 0>=1.068941831239483?
----- True branch :)
Is 0>=1.2849829172872396?
----- True branch :)
The predicted value --> [112.17557435]
----- False branch :)
The predicted value --> [117.39800215]
----- False branch :)
The predicted value --> [141.24343686]
----- False branch :)
Is 1>=0.2272210273067702?
----- True branch :)
Is 0>=1.1232827720492709?
----- True branch :)
The predicted value --> [22.1336849]
----- False branch :)
The predicted value --> [36.13716923]
----- False branch :)
Is 0>=1.3149513047774304?
----- True branch :)
The predicted value --> [50.72014671]
----- False branch :)
The predicted value --> [66.56274018]
----- False branch :)
Is 4>=-0.05485682197882341?
----- True branch :)
Is 6>=0.9657881885608095?
----- True branch :)
Is 6>=1.6355226369781994?
----- True branch :)
Is 0>=1.2338394384823062?
----- True branch :)
Is 0>=2.058451152261893?
----- True branch :)
The predicted value --> [265.29219926]
----- False branch :)
The predicted value --> [246.24119997]
----- False branch :)
The predicted value --> [206.24917489]
----- False branch :)
Is 3>=0.5330587074442126?
----- True branch :)
Is 0>=1.5243691862431215?
----- True branch :)
The predicted value --> [253.35576878]
----- False branch :)
Is 1>=-0.5743151282861386?
----- True branch :)
The predicted value --> [162.19678125]
----- False branch :)
Is 0>=1.4002231358639292?
----- True branch :)
The predicted value --> [202.24278987]
----- False branch :)
The predicted value --> [209.25809789]
----- False branch :)
Is 5>=0.3239035764800204?
----- True branch :)
Is 0>=1.7396514192372066?
----- True branch :)
The predicted value --> [172.09928136]
----- False branch :)
The predicted value --> [152.32448049]
----- False branch :)
Is 0>=1.303162872270923?
----- True branch :)
The predicted value --> [86.40901335]
----- False branch :)
Is 0>=0.9772311554783095?
----- True branch :)
The predicted value --> [105.81169479]
----- False branch :)
The predicted value --> [108.18403616]
----- False branch :)
Is 0>=0.997104275721016?
----- True branch :)
Is 1>=-0.3524160589517756?
----- True branch :)
Is 0>=1.6288027442798527?
----- True branch :)
The predicted value --> [153.06192734]
----- False branch :)
The predicted value --> [114.01185345]
----- False branch :)
Is 1>=-1.1826832980099036?
----- True branch :)
Is 6>=0.29846949642930926?
----- True branch :)
Is 0>=1.5298305433506636?
----- True branch :)
The predicted value --> [54.83725899]
----- False branch :)
Is 0>=1.2927590515921858?
----- True branch :)
The predicted value --> [41.46158061]
----- False branch :)
The predicted value --> [35.5117093]
----- False branch :)
Is 0>=1.100213196045286?
----- True branch :)
The predicted value --> [73.32252846]
----- False branch :)
The predicted value --> [69.89388337]
----- False branch :)
Is 0>=1.5768174377935895?
----- True branch :)
The predicted value --> [2.05326026]
----- False branch :)
The predicted value --> [-11.44298338]
----- False branch :)
Is 4>=2.211364617325394?
----- True branch :)
Is 0>=0.7851511995379773?
----- True branch :)
The predicted value --> [152.5231147]
----- False branch :)
The predicted value --> [159.33767074]
----- False branch :)
Is 0>=0.8768604185455618?
----- True branch :)
Is 0>=0.9089590759621107?
----- True branch :)
The predicted value --> [108.28969281]
----- False branch :)
The predicted value --> [109.77597739]
----- False branch :)
Is 3>=0.68887456330264?
----- True branch :)
The predicted value --> [124.28277462]
----- False branch :)
Is 0>=0.8194971066736584?
----- True branch :)
The predicted value --> [128.62510291]
----- False branch :)
The predicted value --> [127.79350853]
----- False branch :)
Is 1>=-1.608258831829152?
----- True branch :)
Is 0>=1.9406237104923147?
----- True branch :)
Is 0>=2.0625262522137806?
----- True branch :)
Is 0>=2.115915917997906?
----- True branch :)
The predicted value --> [121.72764913]
----- False branch :)
The predicted value --> [141.18600152]
----- False branch :)
The predicted value --> [186.10930169]
----- False branch :)
Is 6>=0.4887901364376507?
----- True branch :)
Is 3>=-0.7185662093354275?
----- True branch :)
Is 0>=1.1782386020357836?
----- True branch :)
Is 0>=1.2127923333716166?
----- True branch :)
Is 0>=1.3333416204170938?
----- True branch :)
The predicted value --> [73.67984132]
----- False branch :)
The predicted value --> [74.61022464]
----- False branch :)
Is 0>=1.1862398487287695?
----- True branch :)
The predicted value --> [107.08064858]
----- False branch :)
The predicted value --> [112.51372775]
----- False branch :)
Is 0>=1.1331293334251982?
----- True branch :)
The predicted value --> [29.33724877]
----- False branch :)
Is 0>=0.9012398519310854?
----- True branch :)
Is 1>=-1.1199866488776327?
----- True branch :)
Is 1>=-0.47281904626968646?
----- True branch :)
The predicted value --> [63.32129729]
----- False branch :)
Is 0>=1.127168933736644?
----- True branch :)
The predicted value --> [71.86198702]
----- False branch :)
Is 0>=1.11560162538857?
----- True branch :)
The predicted value --> [68.16596122]
----- False branch :)
The predicted value --> [69.25682726]
----- False branch :)
Is 0>=1.0549610171380321?
----- True branch :)
The predicted value --> [62.49167457]
----- False branch :)
The predicted value --> [55.72552308]
----- False branch :)
The predicted value --> [45.57297215]
----- False branch :)
Is 0>=1.3273028902426052?
----- True branch :)
The predicted value --> [50.7076011]
----- False branch :)
Is 0>=1.2102721492584179?
----- True branch :)
The predicted value --> [16.73383448]
----- False branch :)
The predicted value --> [12.04126216]
----- False branch :)
Is 1>=-0.17081405102076344?
----- True branch :)
The predicted value --> [72.65029523]
----- False branch :)
Is 6>=0.1883564948494768?
----- True branch :)
Is 2>=0.5824745556489244?
----- True branch :)
The predicted value --> [-0.08394452]
----- False branch :)
Is 0>=1.4444947515907867?
----- True branch :)
The predicted value --> [-20.96783224]
----- False branch :)
The predicted value --> [-17.51700537]
----- False branch :)
Is 0>=1.1596630140697215?
----- True branch :)
The predicted value --> [26.35868354]
----- False branch :)
Is 0>=1.040343850139022?
----- True branch :)
The predicted value --> [25.73376679]
----- False branch :)
The predicted value --> [25.7351249]
----- False branch :)
Is 0>=1.299356501621629?
----- True branch :)
Is 2>=-0.010006270426632456?
----- True branch :)
The predicted value --> [-65.94556159]
----- False branch :)
Is 0>=2.214160946425765?
----- True branch :)
The predicted value --> [-39.32211744]
----- False branch :)
The predicted value --> [-35.81168511]
----- False branch :)
The predicted value --> [-150.4368533]
----- False branch :)
Is 4>=0.03659320766167066?
----- True branch :)
Is 1>=-0.5316327997092679?
----- True branch :)
Is 3>=-0.9488137341098505?
----- True branch :)
Is 4>=1.0888167280190084?
----- True branch :)
Is 6>=0.3375799552912816?
----- True branch :)
Is 2>=0.1622406690487116?
----- True branch :)
Is 1>=0.6082755502629008?
----- True branch :)
The predicted value --> [210.08919197]
----- False branch :)
Is 1>=-0.2362774302920054?
----- True branch :)
Is 0>=0.2309830953281337?
----- True branch :)
The predicted value --> [176.84674426]
----- False branch :)
The predicted value --> [167.87095496]
----- False branch :)
The predicted value --> [152.78828673]
----- False branch :)
Is 0>=-0.012992108360748005?
----- True branch :)
Is 0>=0.10560378267250393?
----- True branch :)
The predicted value --> [222.15997048]
----- False branch :)
The predicted value --> [224.98103271]
----- False branch :)
The predicted value --> [227.83275513]
----- False branch :)
Is 0>=0.32674664117526914?
----- True branch :)
The predicted value --> [104.48265838]
----- False branch :)
The predicted value --> [108.02581242]
----- False branch :)
Is 1>=0.16589058699139167?
----- True branch :)
Is 6>=0.18171017931123706?
----- True branch :)
Is 3>=-0.6595798600110743?
----- True branch :)
Is 6>=0.7654072873055202?
----- True branch :)
Is 2>=0.46987166532927954?
----- True branch :)
Is 0>=0.67257560660984?
----- True branch :)
The predicted value --> [146.90347568]
----- False branch :)
The predicted value --> [148.37700334]
----- False branch :)
Is 1>=0.6833078118283233?
----- True branch :)
The predicted value --> [159.07623298]
----- False branch :)
Is 5>=1.0652309893505432?
----- True branch :)
The predicted value --> [160.4754587]
----- False branch :)
Is 1>=0.6462697168028368?
----- True branch :)
The predicted value --> [172.00608867]
----- False branch :)
Is 0>=0.5594461826690744?
----- True branch :)
The predicted value --> [168.76767909]
----- False branch :)
The predicted value --> [169.17956744]
----- False branch :)
Is 0>=0.2893813596649714?
----- True branch :)
Is 0>=0.682911747421504?
----- True branch :)
The predicted value --> [136.09792748]
----- False branch :)
The predicted value --> [141.23096289]
----- False branch :)
Is 0>=0.10474837683224224?
----- True branch :)
The predicted value --> [124.94956888]
----- False branch :)
The predicted value --> [112.21797105]
----- False branch :)
Is 0>=0.5831442707501526?
----- True branch :)
The predicted value --> [103.06947535]
----- False branch :)
The predicted value --> [116.8883534]
----- False branch :)
Is 0>=0.1912170778114148?
----- True branch :)
The predicted value --> [82.51970022]
----- False branch :)
The predicted value --> [95.90252294]
----- False branch :)
Is 4>=0.3979042521380845?
----- True branch :)
Is 0>=0.5926153734904527?
----- True branch :)
Is 0>=0.6747687229707788?
----- True branch :)
The predicted value --> [120.75492547]
----- False branch :)
Is 0>=0.6315710436043126?
----- True branch :)
The predicted value --> [158.32737323]
----- False branch :)
The predicted value --> [144.17418479]
----- False branch :)
Is 1>=0.055364741700005854?
----- True branch :)
Is 1>=0.1612772727527175?
----- True branch :)
The predicted value --> [83.85215948]
----- False branch :)
Is 0>=0.1572379893825173?
----- True branch :)
The predicted value --> [71.13843125]
----- False branch :)
The predicted value --> [70.32856554]
----- False branch :)
Is 3>=1.2782092940789231?
----- True branch :)
The predicted value --> [132.25522254]
----- False branch :)
Is 1>=-0.1345615775399616?
----- True branch :)
Is 0>=0.36808589312863?
----- True branch :)
The predicted value --> [104.33514405]
----- False branch :)
The predicted value --> [100.63154948]
----- False branch :)
The predicted value --> [92.82348964]
----- False branch :)
Is 3>=0.08415033741637158?
----- True branch :)
Is 1>=0.036370287263377814?
----- True branch :)
The predicted value --> [63.70322648]
----- False branch :)
Is 0>=0.3994700132664808?
----- True branch :)
The predicted value --> [56.46185229]
----- False branch :)
The predicted value --> [54.4404236]
----- False branch :)
Is 0>=0.4915964145961106?
----- True branch :)
The predicted value --> [51.25623584]
----- False branch :)
The predicted value --> [5.82748773]
----- False branch :)
Is 4>=1.356471105447513?
----- True branch :)
Is 0>=0.2905696178474915?
----- True branch :)
Is 0>=0.5544788847299388?
----- True branch :)
The predicted value --> [137.54525336]
----- False branch :)
The predicted value --> [147.29245065]
----- False branch :)
The predicted value --> [85.11023013]
----- False branch :)
Is 0>=0.22678919712286882?
----- True branch :)
Is 0>=0.6918771537380255?
----- True branch :)
The predicted value --> [35.46657397]
----- False branch :)
Is 0>=0.6417543690967841?
----- True branch :)
Is 0>=0.6738892701710427?
----- True branch :)
The predicted value --> [-15.6579428]
----- False branch :)
The predicted value --> [-13.45180044]
----- False branch :)
Is 0>=0.2453511663085061?
----- True branch :)
Is 0>=0.3182484039412953?
----- True branch :)
The predicted value --> [10.14024619]
----- False branch :)
The predicted value --> [8.56876475]
----- False branch :)
The predicted value --> [4.12637163]
----- False branch :)
Is 0>=-0.16371888327850498?
----- True branch :)
Is 0>=-0.11947537988756142?
----- True branch :)
The predicted value --> [28.75090264]
----- False branch :)
The predicted value --> [32.89732374]
----- False branch :)
The predicted value --> [57.38403241]
----- False branch :)
Is 3>=0.661417405163942?
----- True branch :)
Is 2>=-1.4102140038847535?
----- True branch :)
Is 0>=0.7014882911198239?
----- True branch :)
Is 0>=0.7502821819782973?
----- True branch :)
The predicted value --> [111.08838595]
----- False branch :)
Is 0>=0.7377875202777446?
----- True branch :)
The predicted value --> [101.26256599]
----- False branch :)
The predicted value --> [105.54242876]
----- False branch :)
Is 0>=0.16922185551349486?
----- True branch :)
Is 0>=0.35713014892998557?
----- True branch :)
The predicted value --> [88.29610933]
----- False branch :)
The predicted value --> [92.09945425]
----- False branch :)
The predicted value --> [70.17873736]
----- False branch :)
Is 0>=0.20918583573913968?
----- True branch :)
The predicted value --> [58.4596932]
----- False branch :)
The predicted value --> [43.79032113]
----- False branch :)
Is 4>=1.3386777657345856?
----- True branch :)
Is 4>=1.3506847442348882?
----- True branch :)
Is 4>=1.6425677107443102?
----- True branch :)
Is 0>=0.48332736714434676?
----- True branch :)
The predicted value --> [44.81339337]
----- False branch :)
The predicted value --> [66.25283164]
----- False branch :)
Is 0>=0.6919430579056438?
----- True branch :)
The predicted value --> [44.97031669]
----- False branch :)
Is 0>=-0.1023367549151615?
----- True branch :)
Is 0>=0.14442680112396394?
----- True branch :)
The predicted value --> [-1.33512546]
----- False branch :)
The predicted value --> [-5.37689972]
----- False branch :)
The predicted value --> [13.27441171]
----- False branch :)
The predicted value --> [104.13550601]
----- False branch :)
Is 5>=-0.1640727372208109?
----- True branch :)
Is 0>=0.13074632115249835?
----- True branch :)
Is 0>=0.3765077341510359?
----- True branch :)
Is 0>=0.5531350396932019?
----- True branch :)
The predicted value --> [-4.90793488]
----- False branch :)
The predicted value --> [-6.68194682]
----- False branch :)
The predicted value --> [-20.22087963]
----- False branch :)
Is 5>=0.9003087292487023?
----- True branch :)
The predicted value --> [-71.15292747]
----- False branch :)
Is 0>=0.083129274394513?
----- True branch :)
The predicted value --> [-48.55033908]
----- False branch :)
Is 0>=-0.016201646907311653?
----- True branch :)
The predicted value --> [-33.53768934]
----- False branch :)
The predicted value --> [-34.53348859]
----- False branch :)
Is 0>=0.6005028777713605?
----- True branch :)
The predicted value --> [48.18216732]
----- False branch :)
Is 3>=-0.5618682111597497?
----- True branch :)
Is 0>=0.5889541688671791?
----- True branch :)
The predicted value --> [-8.78071184]
----- False branch :)
Is 0>=0.36605138181228525?
----- True branch :)
The predicted value --> [-14.15652947]
----- False branch :)
The predicted value --> [-11.70798225]
----- False branch :)
Is 0>=0.45857261249055786?
----- True branch :)
The predicted value --> [11.72167519]
----- False branch :)
The predicted value --> [5.30595494]
----- False branch :)
Is 1>=-0.49903672892917117?
----- True branch :)
Is 6>=1.0498184856920754?
----- True branch :)
Is 3>=-0.2603125593344324?
----- True branch :)
Is 0>=0.6717100014626888?
----- True branch :)
The predicted value --> [193.28530823]
----- False branch :)
Is 0>=-0.05007216132538131?
----- True branch :)
Is 2>=-0.41736059381198165?
----- True branch :)
Is 0>=0.3761896905929579?
----- True branch :)
Is 0>=0.44251894892814386?
----- True branch :)
The predicted value --> [106.38596018]
----- False branch :)
The predicted value --> [120.4272018]
----- False branch :)
Is 0>=0.023459661779701272?
----- True branch :)
Is 0>=0.055421760692224646?
----- True branch :)
The predicted value --> [90.33771097]
----- False branch :)
The predicted value --> [90.75421411]
----- False branch :)
The predicted value --> [86.42421384]
----- False branch :)
Is 0>=0.4387454017522367?
----- True branch :)
The predicted value --> [91.46813815]
----- False branch :)
Is 0>=0.41229537362456165?
----- True branch :)
The predicted value --> [62.93261316]
----- False branch :)
Is 0>=0.35188437020155977?
----- True branch :)
The predicted value --> [71.69924721]
----- False branch :)
The predicted value --> [67.75629196]
----- False branch :)
Is 0>=-0.20482433967434582?
----- True branch :)
Is 0>=-0.18771982493906317?
----- True branch :)
Is 0>=-0.10156108456346427?
----- True branch :)
The predicted value --> [63.70035279]
----- False branch :)
The predicted value --> [62.68960766]
----- False branch :)
The predicted value --> [51.08884846]
----- False branch :)
The predicted value --> [77.59569779]
----- False branch :)
Is 6>=1.3388544703886256?
----- True branch :)
Is 3>=-0.34998606535244225?
----- True branch :)
Is 0>=0.6728215216324601?
----- True branch :)
The predicted value --> [57.27696916]
----- False branch :)
The predicted value --> [55.33546039]
----- False branch :)
Is 0>=0.7507046110531016?
----- True branch :)
The predicted value --> [31.21632565]
----- False branch :)
Is 0>=0.1359318727867404?
----- True branch :)
The predicted value --> [14.81824645]
----- False branch :)
The predicted value --> [14.67305762]
----- False branch :)
Is 0>=0.7298531023159068?
----- True branch :)
The predicted value --> [-13.72704059]
----- False branch :)
The predicted value --> [-49.49294081]
----- False branch :)
Is 3>=-0.3813798614338808?
----- True branch :)
Is 4>=-0.9067871417964111?
----- True branch :)
Is 3>=0.78112090187637?
----- True branch :)
Is 2>=1.5105009250528254?
----- True branch :)
The predicted value --> [82.90239981]
----- False branch :)
Is 3>=0.9759861264193217?
----- True branch :)
Is 0>=-0.13763302076582087?
----- True branch :)
Is 0>=0.13586806218318478?
----- True branch :)
The predicted value --> [60.62459604]
----- False branch :)
Is 0>=-0.12650467860743972?
----- True branch :)
The predicted value --> [67.64962046]
----- False branch :)
The predicted value --> [64.71569989]
----- False branch :)
The predicted value --> [57.02434476]
----- False branch :)
The predicted value --> [47.98315244]
----- False branch :)
Is 0>=0.022927049861162632?
----- True branch :)
Is 0>=0.3162930180410524?
----- True branch :)
The predicted value --> [64.49887323]
----- False branch :)
Is 0>=0.22444523658319854?
----- True branch :)
The predicted value --> [11.92764377]
----- False branch :)
Is 0>=0.1943698749286299?
----- True branch :)
The predicted value --> [33.89669173]
----- False branch :)
Is 0>=0.0370171128718144?
----- True branch :)
The predicted value --> [27.59321324]
----- False branch :)
The predicted value --> [26.49007399]
----- False branch :)
The predicted value --> [-14.5856228]
----- False branch :)
Is 7>=0.19955516968636072?
----- True branch :)
Is 3>=0.9697540714174161?
----- True branch :)
Is 0>=0.5839520755365033?
----- True branch :)
The predicted value --> [14.03823843]
----- False branch :)
The predicted value --> [33.01934581]
----- False branch :)
Is 6>=0.7243831150062956?
----- True branch :)
Is 0>=0.702170780006278?
----- True branch :)
The predicted value --> [2.20875469]
----- False branch :)
The predicted value --> [4.13431704]
----- False branch :)
Is 0>=0.6821206540040946?
----- True branch :)
The predicted value --> [-6.88851102]
----- False branch :)
The predicted value --> [-2.5456569]
----- False branch :)
Is 7>=-0.19253146101974644?
----- True branch :)
Is 0>=0.5163770619886339?
----- True branch :)
Is 0>=0.6635506842904011?
----- True branch :)
The predicted value --> [-38.16310221]
----- False branch :)
The predicted value --> [-36.58993705]
----- False branch :)
The predicted value --> [-50.15293283]
----- False branch :)
Is 0>=-0.12762690526526504?
----- True branch :)
Is 0>=0.05789554146330852?
----- True branch :)
Is 0>=0.13315359965363213?
----- True branch :)
The predicted value --> [-22.5953042]
----- False branch :)
The predicted value --> [-25.27766556]
----- False branch :)
The predicted value --> [-15.51622874]
----- False branch :)
The predicted value --> [1.34156432]
----- False branch :)
Is 0>=0.5934565540326325?
----- True branch :)
Is 1>=0.6872669903471033?
----- True branch :)
The predicted value --> [46.3814259]
----- False branch :)
Is 4>=-0.32189144472560904?
----- True branch :)
The predicted value --> [9.225494]
----- False branch :)
Is 0>=0.7091768127319241?
----- True branch :)
The predicted value --> [-16.17360202]
----- False branch :)
The predicted value --> [-13.52140968]
----- False branch :)
Is 5>=-0.08928025488527189?
----- True branch :)
Is 0>=-0.11979522144946647?
----- True branch :)
The predicted value --> [-119.00315135]
----- False branch :)
The predicted value --> [-139.49665767]
----- False branch :)
Is 2>=0.5400829285235212?
----- True branch :)
Is 0>=0.14384974692696906?
----- True branch :)
The predicted value --> [-77.96829336]
----- False branch :)
Is 0>=0.05553122888941905?
----- True branch :)
The predicted value --> [-89.74121816]
----- False branch :)
The predicted value --> [-96.06821351]
----- False branch :)
The predicted value --> [-49.92618841]
----- False branch :)
Is 4>=-0.5503446141165201?
----- True branch :)
Is 7>=-0.32796406114444615?
----- True branch :)
Is 1>=-1.3469748869370055?
----- True branch :)
Is 7>=0.7424092064808873?
----- True branch :)
Is 0>=0.7154540470212571?
----- True branch :)
The predicted value --> [53.689139]
----- False branch :)
The predicted value --> [50.30725048]
----- False branch :)
Is 0>=0.6774733742505352?
----- True branch :)
Is 0>=0.6829814501732396?
----- True branch :)
The predicted value --> [20.56959156]
----- False branch :)
The predicted value --> [25.86975903]
----- False branch :)
Is 0>=0.4856514799160919?
----- True branch :)
The predicted value --> [15.80006886]
----- False branch :)
Is 0>=0.3518551418884957?
----- True branch :)
The predicted value --> [11.72007767]
----- False branch :)
The predicted value --> [12.41567467]
----- False branch :)
The predicted value --> [-33.69685582]
----- False branch :)
Is 0>=0.5955049697500451?
----- True branch :)
The predicted value --> [-89.70675872]
----- False branch :)
Is 0>=0.07787385272512311?
----- True branch :)
The predicted value --> [-30.03946928]
----- False branch :)
The predicted value --> [-37.94856537]
----- False branch :)
Is 4>=-1.3451746596886083?
----- True branch :)
Is 5>=0.581584882976806?
----- True branch :)
Is 1>=-1.0628681964679716?
----- True branch :)
Is 5>=1.0219771436198961?
----- True branch :)
Is 0>=0.7391102720565116?
----- True branch :)
The predicted value --> [-36.7973765]
----- False branch :)
The predicted value --> [-27.09445636]
----- False branch :)
Is 0>=0.7517177757002947?
----- True branch :)
The predicted value --> [-47.13295816]
----- False branch :)
The predicted value --> [-64.47343236]
----- False branch :)
The predicted value --> [-85.68649707]
----- False branch :)
Is 4>=-1.014547653316465?
----- True branch :)
Is 0>=-0.12119812828540011?
----- True branch :)
Is 1>=-0.7032498084341762?
----- True branch :)
Is 0>=0.3847702721426124?
----- True branch :)
The predicted value --> [-134.02009001]
----- False branch :)
The predicted value --> [-129.85376612]
----- False branch :)
Is 5>=0.06019505905161317?
----- True branch :)
Is 0>=0.712717291117544?
----- True branch :)
The predicted value --> [-118.08257706]
----- False branch :)
The predicted value --> [-116.05462475]
----- False branch :)
The predicted value --> [-121.75655059]
----- False branch :)
The predicted value --> [-79.55965952]
----- False branch :)
Is 0>=0.6505832123983423?
----- True branch :)
The predicted value --> [-71.96542001]
----- False branch :)
The predicted value --> [-65.51557204]
----- False branch :)
Is 2>=-0.06612455743834418?
----- True branch :)
Is 1>=-0.6595573213571093?
----- True branch :)
The predicted value --> [-136.60436379]
----- False branch :)
Is 5>=-0.5967246272427426?
----- True branch :)
Is 0>=0.6158291741395202?
----- True branch :)
The predicted value --> [-159.08445947]
----- False branch :)
Is 0>=0.2787623799956595?
----- True branch :)
The predicted value --> [-160.19345056]
----- False branch :)
The predicted value --> [-159.91231645]
----- False branch :)
The predicted value --> [-182.71705969]
----- False branch :)
The predicted value --> [-224.24930933]
----- False branch :)
Is 4>=-0.29930380783663957?
----- True branch :)
Is 1>=-1.5024735045947637?
----- True branch :)
Is 3>=-0.13249775210581133?
----- True branch :)
Is 4>=0.7631672638768207?
----- True branch :)
Is 6>=-1.0147693170717602?
----- True branch :)
Is 6>=-0.5061971496841082?
----- True branch :)
Is 1>=-0.6977267269787674?
----- True branch :)
Is 4>=1.5821816194036649?
----- True branch :)
Is 0>=-0.08303568938831855?
----- True branch :)
The predicted value --> [132.56975252]
----- False branch :)
The predicted value --> [149.40430119]
----- False branch :)
Is 3>=0.8324493397855348?
----- True branch :)
Is 0>=0.9562651629839545?
----- True branch :)
The predicted value --> [122.52177948]
----- False branch :)
The predicted value --> [109.50658409]
----- False branch :)
Is 0>=1.0008150340693216?
----- True branch :)
The predicted value --> [104.92795145]
----- False branch :)
Is 0>=0.41692285365641407?
----- True branch :)
The predicted value --> [90.18982199]
----- False branch :)
Is 0>=0.2943855922965558?
----- True branch :)
The predicted value --> [97.60065617]
----- False branch :)
The predicted value --> [95.75820001]
----- False branch :)
Is 0>=0.9767337389582589?
----- True branch :)
The predicted value --> [55.0577812]
----- False branch :)
The predicted value --> [48.18316559]
----- False branch :)
Is 5>=0.5503631503876768?
----- True branch :)
Is 0>=1.4539785304511594?
----- True branch :)
The predicted value --> [194.22879861]
----- False branch :)
Is 0>=0.8258755172784256?
----- True branch :)
The predicted value --> [166.27610264]
----- False branch :)
The predicted value --> [167.8557864]
----- False branch :)
Is 1>=0.5722991798633261?
----- True branch :)
Is 0>=1.243363646371691?
----- True branch :)
The predicted value --> [163.42057123]
----- False branch :)
The predicted value --> [140.85940484]
----- False branch :)
Is 0>=0.744611566429507?
----- True branch :)
Is 0>=1.2851692773586223?
----- True branch :)
The predicted value --> [115.11046999]
----- False branch :)
The predicted value --> [105.65174726]
----- False branch :)
The predicted value --> [91.59361837]
----- False branch :)
Is 2>=-1.1716306763191537?
----- True branch :)
Is 0>=-0.04444831483424603?
----- True branch :)
The predicted value --> [28.62485716]
----- False branch :)
The predicted value --> [29.98215609]
----- False branch :)
The predicted value --> [-0.28239617]
----- False branch :)
Is 3>=1.5991882074351222?
----- True branch :)
Is 0>=0.6673526699070456?
----- True branch :)
Is 0>=1.7276869553128795?
----- True branch :)
The predicted value --> [204.71799381]
----- False branch :)
The predicted value --> [161.60588526]
----- False branch :)
Is 0>=0.5420271778041925?
----- True branch :)
Is 0>=0.6082400191371043?
----- True branch :)
The predicted value --> [138.39410967]
----- False branch :)
The predicted value --> [125.45039065]
----- False branch :)
The predicted value --> [104.25903159]
----- False branch :)
Is 0>=-0.1442103019840029?
----- True branch :)
Is 0>=0.8658866741709688?
----- True branch :)
Is 1>=-0.9941968899750135?
----- True branch :)
Is 4>=0.6625969656804702?
----- True branch :)
The predicted value --> [36.71640289]
----- False branch :)
Is 7>=0.6279635362033833?
----- True branch :)
Is 2>=0.37808359782369316?
----- True branch :)
The predicted value --> [95.63262493]
----- False branch :)
Is 0>=2.13728480013399?
----- True branch :)
The predicted value --> [83.05198212]
----- False branch :)
The predicted value --> [87.14485054]
----- False branch :)
Is 0>=2.0965083711966903?
----- True branch :)
The predicted value --> [84.68259112]
----- False branch :)
Is 4>=0.22051271151078486?
----- True branch :)
Is 0>=1.5604461073815181?
----- True branch :)
The predicted value --> [58.66466028]
----- False branch :)
The predicted value --> [65.12426541]
----- False branch :)
Is 0>=1.238556574261108?
----- True branch :)
The predicted value --> [73.15667007]
----- False branch :)
The predicted value --> [68.76407286]
----- False branch :)
Is 0>=1.3312575709579098?
----- True branch :)
Is 0>=1.6720468329722589?
----- True branch :)
The predicted value --> [23.4213188]
----- False branch :)
The predicted value --> [40.13586613]
----- False branch :)
The predicted value --> [72.19458095]
----- False branch :)
Is 2>=0.22947265673843067?
----- True branch :)
Is 0>=0.5531713168890086?
----- True branch :)
The predicted value --> [108.10606062]
----- False branch :)
Is 5>=-0.2947847177974039?
----- True branch :)
Is 0>=0.4212790322964651?
----- True branch :)
The predicted value --> [67.05371081]
----- False branch :)
Is 0>=0.3579671250877121?
----- True branch :)
Is 0>=0.38123260978293305?
----- True branch :)
The predicted value --> [52.44986798]
----- False branch :)
The predicted value --> [57.38640125]
----- False branch :)
The predicted value --> [46.7832127]
----- False branch :)
Is 2>=0.6321063686363677?
----- True branch :)
The predicted value --> [4.86864583]
----- False branch :)
Is 0>=0.5480676730385415?
----- True branch :)
The predicted value --> [28.38843857]
----- False branch :)
The predicted value --> [39.79070776]
----- False branch :)
Is 6>=-1.0897400992337798?
----- True branch :)
Is 3>=1.1307671629352203?
----- True branch :)
Is 0>=0.07816199152070366?
----- True branch :)
Is 0>=0.16135019533600908?
----- True branch :)
The predicted value --> [30.34446096]
----- False branch :)
The predicted value --> [30.9890632]
----- False branch :)
The predicted value --> [68.91081366]
----- False branch :)
Is 1>=-0.5123491440400524?
----- True branch :)
Is 0>=0.13759668936529842?
----- True branch :)
Is 1>=-0.18086242133387367?
----- True branch :)
The predicted value --> [7.35776001]
----- False branch :)
Is 0>=0.6717065750358101?
----- True branch :)
The predicted value --> [8.75336056]
----- False branch :)
The predicted value --> [8.31035163]
----- False branch :)
The predicted value --> [2.62606397]
----- False branch :)
Is 2>=-0.43856460038531375?
----- True branch :)
The predicted value --> [-6.06119169]
----- False branch :)
Is 0>=0.7000427679465602?
----- True branch :)
The predicted value --> [-17.27325246]
----- False branch :)
The predicted value --> [-18.18436744]
----- False branch :)
Is 0>=0.5892009441103911?
----- True branch :)
The predicted value --> [-79.49023679]
----- False branch :)
The predicted value --> [-56.76058499]
----- False branch :)
Is 0>=-0.17289032311761973?
----- True branch :)
The predicted value --> [-20.99297049]
----- False branch :)
Is 0>=-0.20012825123315398?
----- True branch :)
The predicted value --> [-77.62887697]
----- False branch :)
The predicted value --> [-92.79879258]
----- False branch :)
Is 0>=1.4403211836763867?
----- True branch :)
Is 2>=1.465672541611519?
----- True branch :)
Is 0>=2.0837924527717284?
----- True branch :)
Is 0>=2.2549850378589125?
----- True branch :)
The predicted value --> [199.53382508]
----- False branch :)
The predicted value --> [173.58937323]
----- False branch :)
The predicted value --> [114.67638903]
----- False branch :)
Is 1>=-0.8864068139559278?
----- True branch :)
Is 3>=-0.24522542437489775?
----- True branch :)
Is 0>=1.5440292609981048?
----- True branch :)
The predicted value --> [36.02735502]
----- False branch :)
The predicted value --> [47.42057622]
----- False branch :)
Is 1>=0.7601927033068294?
----- True branch :)
The predicted value --> [84.80136403]
----- False branch :)
Is 3>=-0.33982354358448746?
----- True branch :)
The predicted value --> [56.7900626]
----- False branch :)
Is 1>=-0.6316997668957165?
----- True branch :)
Is 0>=2.2992383631386826?
----- True branch :)
The predicted value --> [65.28785861]
----- False branch :)
The predicted value --> [64.68109214]
----- False branch :)
The predicted value --> [68.97822219]
----- False branch :)
Is 0>=2.18508062549191?
----- True branch :)
The predicted value --> [-2.37848432]
----- False branch :)
The predicted value --> [-7.45972658]
----- False branch :)
Is 1>=-0.8114642773649525?
----- True branch :)
Is 0>=-0.14853764789202828?
----- True branch :)
Is 0>=0.5216018630094046?
----- True branch :)
Is 6>=-1.0429737445619183?
----- True branch :)
Is 2>=0.24224217456809666?
----- True branch :)
Is 1>=0.660498752804303?
----- True branch :)
Is 0>=0.9132310291451041?
----- True branch :)
The predicted value --> [24.21132321]
----- False branch :)
The predicted value --> [16.32979968]
----- False branch :)
Is 0>=0.7992977616890302?
----- True branch :)
Is 0>=1.2420926730603137?
----- True branch :)
The predicted value --> [64.877505]
----- False branch :)
The predicted value --> [55.82854119]
----- False branch :)
The predicted value --> [37.67119937]
----- False branch :)
Is 7>=0.31282664622227135?
----- True branch :)
Is 6>=-0.6616037450500462?
----- True branch :)
Is 0>=0.9121794547764155?
----- True branch :)
The predicted value --> [-0.60048671]
----- False branch :)
The predicted value --> [-4.14583996]
----- False branch :)
Is 0>=0.9273246463457399?
----- True branch :)
Is 0>=0.9524048493558346?
----- True branch :)
The predicted value --> [19.17704753]
----- False branch :)
The predicted value --> [22.7575008]
----- False branch :)
The predicted value --> [12.82666641]
----- False branch :)
Is 0>=1.0812285504136454?
----- True branch :)
The predicted value --> [1.86746276]
----- False branch :)
Is 0>=0.7340453279430447?
----- True branch :)
The predicted value --> [-23.55216026]
----- False branch :)
The predicted value --> [-40.35474767]
----- False branch :)
Is 3>=-0.2941059900904959?
----- True branch :)
Is 1>=0.700895044640639?
----- True branch :)
The predicted value --> [-25.72315442]
----- False branch :)
Is 0>=1.244640076277428?
----- True branch :)
The predicted value --> [3.04357181]
----- False branch :)
The predicted value --> [-1.21281128]
----- False branch :)
Is 0>=1.3204814376134044?
----- True branch :)
The predicted value --> [-53.55696083]
----- False branch :)
The predicted value --> [-84.40436576]
----- False branch :)
Is 3>=-1.1044230175453045?
----- True branch :)
Is 4>=0.9860577787748486?
----- True branch :)
Is 0>=0.28939383231389426?
----- True branch :)
The predicted value --> [36.41877204]
----- False branch :)
The predicted value --> [8.71980075]
----- False branch :)
Is 3>=-0.4157725283292117?
----- True branch :)
Is 3>=-0.18238388522336862?
----- True branch :)
Is 0>=0.44614478460848905?
----- True branch :)
The predicted value --> [-19.15701932]
----- False branch :)
The predicted value --> [-19.78894727]
----- False branch :)
Is 0>=0.42265270282353057?
----- True branch :)
The predicted value --> [-5.4019719]
----- False branch :)
The predicted value --> [-0.71764336]
----- False branch :)
Is 1>=0.7753423692100373?
----- True branch :)
The predicted value --> [-4.69291426]
----- False branch :)
Is 5>=-0.6036444552656555?
----- True branch :)
Is 0>=0.4389332874059776?
----- True branch :)
Is 0>=0.5063634421523067?
----- True branch :)
The predicted value --> [-39.50109969]
----- False branch :)
The predicted value --> [-49.24284546]
----- False branch :)
Is 0>=0.42961418727240863?
----- True branch :)
The predicted value --> [-32.89618003]
----- False branch :)
Is 0>=0.10710895635373817?
----- True branch :)
The predicted value --> [-26.08814094]
----- False branch :)
The predicted value --> [-24.85683013]
----- False branch :)
Is 0>=0.14159569877310665?
----- True branch :)
The predicted value --> [-52.61436341]
----- False branch :)
The predicted value --> [-64.45922627]
----- False branch :)
Is 2>=1.0260748580518584?
----- True branch :)
Is 0>=0.3234463877289427?
----- True branch :)
The predicted value --> [-90.15157541]
----- False branch :)
The predicted value --> [-84.56133525]
----- False branch :)
Is 1>=0.4358031935327655?
----- True branch :)
Is 0>=0.28739704738043836?
----- True branch :)
The predicted value --> [-50.20175043]
----- False branch :)
The predicted value --> [-49.11501117]
----- False branch :)
The predicted value --> [-57.74141029]
----- False branch :)
Is 0>=-0.15625840806788438?
----- True branch :)
The predicted value --> [-114.16661501]
----- False branch :)
The predicted value --> [-136.42935137]
----- False branch :)
Is 5>=-0.7957149016338635?
----- True branch :)
Is 2>=1.2446036886992826?
----- True branch :)
The predicted value --> [-38.18214467]
----- False branch :)
Is 1>=-1.0812560010128507?
----- True branch :)
The predicted value --> [-61.54741408]
----- False branch :)
Is 0>=1.1047117425185318?
----- True branch :)
The predicted value --> [-78.48739155]
----- False branch :)
The predicted value --> [-79.60021406]
----- False branch :)
Is 0>=0.38775345181095505?
----- True branch :)
Is 0>=0.6542146484286591?
----- True branch :)
The predicted value --> [-123.29123793]
----- False branch :)
The predicted value --> [-106.08790562]
----- False branch :)
The predicted value --> [-151.41216049]
----- False branch :)
Is 0>=0.525913623527506?
----- True branch :)
Is 4>=0.06266527709063269?
----- True branch :)
Is 0>=1.002289737401813?
----- True branch :)
Is 0>=1.2729331064159415?
----- True branch :)
Is 0>=1.6428079376691664?
----- True branch :)
The predicted value --> [8.45138479]
----- False branch :)
The predicted value --> [6.6821474]
----- False branch :)
The predicted value --> [-17.25474285]
----- False branch :)
Is 0>=0.9075187951269517?
----- True branch :)
The predicted value --> [-57.75009218]
----- False branch :)
The predicted value --> [-60.50703152]
----- False branch :)
Is 0>=1.6999573486802764?
----- True branch :)
The predicted value --> [-137.60331656]
----- False branch :)
The predicted value --> [-108.31886399]
----- False branch :)
Is 3>=-0.39206278555052987?
----- True branch :)
Is 1>=-2.033205447805362?
----- True branch :)
Is 1>=-1.8475271687432768?
----- True branch :)
Is 0>=0.466951991309976?
----- True branch :)
The predicted value --> [-125.46962542]
----- False branch :)
The predicted value --> [-119.94479611]
----- False branch :)
Is 0>=0.5248963686064219?
----- True branch :)
The predicted value --> [-145.30517804]
----- False branch :)
The predicted value --> [-140.27631663]
----- False branch :)
Is 0>=0.09356869354777551?
----- True branch :)
The predicted value --> [-171.00137468]
----- False branch :)
The predicted value --> [-162.94974429]
----- False branch :)
The predicted value --> [-246.77878214]
----- False branch :)
Is 3>=1.127667338572199?
----- True branch :)
Is 4>=-1.342181128950089?
----- True branch :)
Is 0>=1.6312127309912312?
----- True branch :)
Is 0>=1.893132196463622?
----- True branch :)
The predicted value --> [65.68403105]
----- False branch :)
The predicted value --> [150.38417319]
----- False branch :)
Is 1>=0.007939135588390035?
----- True branch :)
Is 0>=0.30235732379833663?
----- True branch :)
Is 1>=0.31975694835138957?
----- True branch :)
The predicted value --> [69.50179518]
----- False branch :)
Is 0>=0.8888615232254572?
----- True branch :)
The predicted value --> [61.83063191]
----- False branch :)
The predicted value --> [58.87006036]
----- False branch :)
The predicted value --> [8.04807184]
----- False branch :)
Is 3>=1.3254373582300527?
----- True branch :)
Is 0>=0.9535150741439908?
----- True branch :)
The predicted value --> [-45.27194235]
----- False branch :)
Is 0>=0.07507234409924653?
----- True branch :)
The predicted value --> [-12.55779129]
----- False branch :)
The predicted value --> [-14.33517555]
----- False branch :)
Is 0>=0.5475569724569983?
----- True branch :)
The predicted value --> [15.03289543]
----- False branch :)
The predicted value --> [17.77369973]
----- False branch :)
Is 0>=1.478123358449572?
----- True branch :)
The predicted value --> [-85.87055692]
----- False branch :)
The predicted value --> [-90.67110343]
----- False branch :)
Is 0>=0.6412115223633791?
----- True branch :)
Is 4>=-1.432410547489262?
----- True branch :)
Is 0>=1.1558700841681855?
----- True branch :)
Is 1>=-0.013558910362753116?
----- True branch :)
Is 1>=0.5058615492027569?
----- True branch :)
Is 0>=2.3132853400027065?
----- True branch :)
The predicted value --> [75.53272604]
----- False branch :)
The predicted value --> [69.26610314]
----- False branch :)
Is 0>=1.3865544404453736?
----- True branch :)
The predicted value --> [32.39352252]
----- False branch :)
The predicted value --> [8.96867901]
----- False branch :)
Is 2>=0.9985668792598775?
----- True branch :)
The predicted value --> [-76.16334231]
----- False branch :)
Is 4>=-0.5272163466490989?
----- True branch :)
Is 0>=1.642823760447739?
----- True branch :)
Is 0>=1.8152496168805228?
----- True branch :)
The predicted value --> [-29.07662392]
----- False branch :)
The predicted value --> [-27.58614457]
----- False branch :)
The predicted value --> [-23.80111417]
----- False branch :)
Is 0>=2.034699546291464?
----- True branch :)
The predicted value --> [-38.25789947]
----- False branch :)
The predicted value --> [-48.17946573]
----- False branch :)
Is 1>=-0.5981606584872672?
----- True branch :)
Is 0>=1.0317091786925756?
----- True branch :)
Is 0>=1.0327145357268575?
----- True branch :)
The predicted value --> [-108.81118321]
----- False branch :)
The predicted value --> [-73.29373483]
----- False branch :)
Is 1>=0.49684343558028643?
----- True branch :)
Is 0>=0.9284686261176509?
----- True branch :)
The predicted value --> [-55.50999661]
----- False branch :)
The predicted value --> [-40.01623537]
----- False branch :)
Is 0>=0.9912815316217758?
----- True branch :)
The predicted value --> [-20.10943559]
----- False branch :)
The predicted value --> [-14.24557837]
----- False branch :)
Is 1>=-1.1322783972170753?
----- True branch :)
Is 0>=0.9059929347650988?
----- True branch :)
The predicted value --> [-109.21772445]
----- False branch :)
The predicted value --> [-121.667851]
----- False branch :)
Is 0>=0.8057096745564647?
----- True branch :)
The predicted value --> [-90.65105338]
----- False branch :)
The predicted value --> [-83.46702499]
----- False branch :)
Is 0>=1.8128533238160807?
----- True branch :)
The predicted value --> [-80.97954869]
----- False branch :)
Is 0>=1.2239457362801502?
----- True branch :)
The predicted value --> [-192.9476312]
----- False branch :)
The predicted value --> [-158.53037416]
----- False branch :)
Is 1>=-0.37638355679965274?
----- True branch :)
Is 3>=-0.9398298466588637?
----- True branch :)
Is 6>=-1.5785099130145035?
----- True branch :)
Is 7>=-0.6617199894073579?
----- True branch :)
Is 6>=-0.45891112311541143?
----- True branch :)
Is 1>=0.42882501530497297?
----- True branch :)
The predicted value --> [-50.85127171]
----- False branch :)
Is 0>=0.21894084355352655?
----- True branch :)
The predicted value --> [-30.06676368]
----- False branch :)
The predicted value --> [-28.45109052]
----- False branch :)
Is 0>=-0.014704201214074423?
----- True branch :)
Is 0>=0.5341671273212711?
----- True branch :)
The predicted value --> [-54.60887257]
----- False branch :)
Is 0>=0.14475088672870376?
----- True branch :)
Is 1>=0.6666899695127957?
----- True branch :)
The predicted value --> [-72.04135341]
----- False branch :)
Is 0>=0.3999255726289336?
----- True branch :)
Is 0>=0.41085600260695415?
----- True branch :)
The predicted value --> [-67.57439351]
----- False branch :)
The predicted value --> [-67.72967405]
----- False branch :)
The predicted value --> [-68.01490044]
----- False branch :)
The predicted value --> [-80.23643148]
----- False branch :)
The predicted value --> [-102.95143179]
----- False branch :)
Is 0>=0.45248743700696686?
----- True branch :)
The predicted value --> [-114.17804369]
----- False branch :)
Is 0>=0.1630405850983082?
----- True branch :)
The predicted value --> [-95.82057143]
----- False branch :)
The predicted value --> [-88.26013422]
----- False branch :)
Is 0>=0.21474136685262504?
----- True branch :)
Is 0>=0.29038048528243293?
----- True branch :)
The predicted value --> [-116.72144281]
----- False branch :)
The predicted value --> [-106.91846112]
----- False branch :)
The predicted value --> [-171.95292857]
----- False branch :)
Is 4>=-1.304448303224404?
----- True branch :)
Is 1>=0.5546422415558541?
----- True branch :)
Is 0>=0.6202468333054552?
----- True branch :)
The predicted value --> [-110.9619435]
----- False branch :)
The predicted value --> [-113.26256782]
----- False branch :)
Is 1>=0.35261458561094156?
----- True branch :)
Is 0>=0.45696586732206873?
----- True branch :)
The predicted value --> [-135.04854498]
----- False branch :)
The predicted value --> [-132.60869921]
----- False branch :)
The predicted value --> [-147.83660965]
----- False branch :)
Is 0>=0.540058624252864?
----- True branch :)
The predicted value --> [-204.6653008]
----- False branch :)
The predicted value --> [-210.69917948]
----- False branch :)
Is 3>=-1.189701341690994?
----- True branch :)
Is 4>=-1.3576256368137165?
----- True branch :)
Is 6>=-0.3853407364450885?
----- True branch :)
Is 0>=0.33798923048853446?
----- True branch :)
The predicted value --> [-89.76161758]
----- False branch :)
The predicted value --> [-71.45669836]
----- False branch :)
Is 1>=-1.6079928815571476?
----- True branch :)
Is 6>=-0.580308701435403?
----- True branch :)
Is 1>=-0.5693608759051433?
----- True branch :)
Is 0>=0.3923928340694538?
----- True branch :)
The predicted value --> [-148.48045606]
----- False branch :)
The predicted value --> [-140.26556484]
----- False branch :)
Is 0>=0.5735949693475694?
----- True branch :)
The predicted value --> [-120.54231506]
----- False branch :)
The predicted value --> [-119.02706264]
----- False branch :)
Is 0>=0.42801522192253877?
----- True branch :)
Is 0>=0.5146185418653191?
----- True branch :)
The predicted value --> [-158.18996698]
----- False branch :)
The predicted value --> [-149.29624637]
----- False branch :)
The predicted value --> [-170.29642349]
----- False branch :)
Is 0>=0.4764579275392526?
----- True branch :)
The predicted value --> [-173.91277127]
----- False branch :)
Is 0>=-0.019663942528989578?
----- True branch :)
The predicted value --> [-177.78395773]
----- False branch :)
The predicted value --> [-177.33899014]
----- False branch :)
Is 5>=0.996475588018632?
----- True branch :)
Is 0>=0.3443248354702424?
----- True branch :)
The predicted value --> [-171.86440319]
----- False branch :)
The predicted value --> [-198.97032875]
----- False branch :)
Is 7>=1.7323984542495772?
----- True branch :)
The predicted value --> [-191.80065176]
----- False branch :)
Is 1>=-0.4154698337071887?
----- True branch :)
The predicted value --> [-255.67593721]
----- False branch :)
Is 0>=0.6210334335578493?
----- True branch :)
The predicted value --> [-261.79621105]
----- False branch :)
The predicted value --> [-263.04645859]
----- False branch :)
Is 1>=-0.7865500421186885?
----- True branch :)
Is 0>=0.26843543591855745?
----- True branch :)
The predicted value --> [-311.36143166]
----- False branch :)
The predicted value --> [-284.766543]
----- False branch :)
Is 0>=0.22606657943498917?
----- True branch :)
Is 0>=0.2727277127754129?
----- True branch :)
The predicted value --> [-249.69817625]
----- False branch :)
The predicted value --> [-252.77512279]
----- False branch :)
The predicted value --> [-227.390662]
----- False branch :)
Is 4>=-0.6039142730203028?
----- True branch :)
Is 1>=0.4536881589607881?
----- True branch :)
Is 4>=0.558537319869974?
----- True branch :)
Is 6>=1.2893791306321096?
----- True branch :)
Is 3>=2.0571157662841166?
----- True branch :)
Is 0>=-0.6111926131586037?
----- True branch :)
The predicted value --> [307.86921366]
----- False branch :)
The predicted value --> [324.916862]
----- False branch :)
Is 0>=-0.26536210594682175?
----- True branch :)
The predicted value --> [213.07504337]
----- False branch :)
Is 0>=-0.7268309656824509?
----- True branch :)
The predicted value --> [179.45391636]
----- False branch :)
The predicted value --> [185.63516839]
----- False branch :)
Is 0>=-1.5752466644489118?
----- True branch :)
Is 3>=-0.49392177747499705?
----- True branch :)
Is 1>=0.5401915851940216?
----- True branch :)
Is 7>=-0.9550183124816471?
----- True branch :)
Is 1>=1.45337023787243?
----- True branch :)
Is 0>=-0.6373635520824831?
----- True branch :)
Is 0>=-0.5102314237466816?
----- True branch :)
The predicted value --> [174.38877291]
----- False branch :)
The predicted value --> [174.34770778]
----- False branch :)
The predicted value --> [150.68813225]
----- False branch :)
Is 1>=0.6611780734501983?
----- True branch :)
Is 0>=-0.6041371004178905?
----- True branch :)
Is 1>=1.3809539087482703?
----- True branch :)
Is 0>=-0.43675005910271414?
----- True branch :)
The predicted value --> [76.39227984]
----- False branch :)
The predicted value --> [73.9659885]
----- False branch :)
Is 0>=-0.5860504590735239?
----- True branch :)
Is 0>=-0.3143666362934117?
----- True branch :)
The predicted value --> [104.87977894]
----- False branch :)
The predicted value --> [100.17956912]
----- False branch :)
The predicted value --> [90.9124768]
----- False branch :)
Is 5>=-0.8291948683220198?
----- True branch :)
Is 0>=-0.8363433625783825?
----- True branch :)
Is 0>=-0.6580686984662988?
----- True branch :)
The predicted value --> [107.46998486]
----- False branch :)
The predicted value --> [108.06357462]
----- False branch :)
Is 0>=-0.8922021527215723?
----- True branch :)
The predicted value --> [118.26205787]
----- False branch :)
Is 0>=-0.8976983170583885?
----- True branch :)
The predicted value --> [121.77255038]
----- False branch :)
The predicted value --> [120.03610713]
----- False branch :)
The predicted value --> [147.08325213]
----- False branch :)
Is 0>=-0.4213474900739423?
----- True branch :)
The predicted value --> [179.76833225]
----- False branch :)
The predicted value --> [138.3624467]
----- False branch :)
Is 5>=0.042759398644926436?
----- True branch :)
Is 0>=-1.290389402038673?
----- True branch :)
Is 0>=-0.9951240848694105?
----- True branch :)
The predicted value --> [58.94000698]
----- False branch :)
The predicted value --> [64.27965875]
----- False branch :)
The predicted value --> [33.52654738]
----- False branch :)
Is 0>=-0.24353551131252907?
----- True branch :)
The predicted value --> [107.5022944]
----- False branch :)
Is 0>=-0.36339961971673596?
----- True branch :)
The predicted value --> [85.23772378]
----- False branch :)
The predicted value --> [94.80514106]
----- False branch :)
Is 0>=-0.6174407266768602?
----- True branch :)
The predicted value --> [28.98819907]
----- False branch :)
The predicted value --> [31.87073219]
----- False branch :)
Is 7>=1.0499276599227159?
----- True branch :)
Is 0>=-0.8114039874197325?
----- True branch :)
The predicted value --> [103.98958805]
----- False branch :)
The predicted value --> [64.71474357]
----- False branch :)
Is 0>=-0.3739431618414562?
----- True branch :)
The predicted value --> [30.56653674]
----- False branch :)
Is 0>=-0.5942452647593522?
----- True branch :)
The predicted value --> [5.10663124]
----- False branch :)
The predicted value --> [0.46014263]
----- False branch :)
Is 1>=0.6178072678528453?
----- True branch :)
Is 0>=-1.5878351700840185?
----- True branch :)
The predicted value --> [-63.51494517]
----- False branch :)
The predicted value --> [-75.99314952]
----- False branch :)
The predicted value --> [4.82234659]
----- False branch :)
Is 3>=-0.34518008653717286?
----- True branch :)
Is 6>=-0.6807755417660539?
----- True branch :)
Is 0>=-1.4986997125795267?
----- True branch :)
Is 6>=0.2379288971459442?
----- True branch :)
Is 4>=0.2494395802840642?
----- True branch :)
Is 3>=0.14659271680821812?
----- True branch :)
Is 3>=1.3351691688556537?
----- True branch :)
Is 0>=-0.453866237736346?
----- True branch :)
The predicted value --> [120.78660414]
----- False branch :)
The predicted value --> [128.60566992]
----- False branch :)
Is 0>=-0.3079545286526979?
----- True branch :)
The predicted value --> [83.25354258]
----- False branch :)
The predicted value --> [98.69488387]
----- False branch :)
The predicted value --> [176.46058172]
----- False branch :)
Is 2>=0.5181686549434467?
----- True branch :)
Is 1>=0.8498295381757492?
----- True branch :)
Is 0>=-1.3629267413467423?
----- True branch :)
The predicted value --> [40.66498701]
----- False branch :)
The predicted value --> [31.10475645]
----- False branch :)
The predicted value --> [8.99547901]
----- False branch :)
Is 4>=-0.18973897521038038?
----- True branch :)
Is 0>=-0.8722927385271899?
----- True branch :)
Is 0>=-0.5801885591290122?
----- True branch :)
Is 0>=-0.47350615908848637?
----- True branch :)
The predicted value --> [84.39250456]
----- False branch :)
The predicted value --> [84.90339062]
----- False branch :)
The predicted value --> [76.69951925]
----- False branch :)
Is 0>=-0.9235527426528303?
----- True branch :)
The predicted value --> [110.15149519]
----- False branch :)
The predicted value --> [108.57609134]
----- False branch :)
Is 0>=-0.28970695063629853?
----- True branch :)
The predicted value --> [31.22558544]
----- False branch :)
The predicted value --> [48.10829024]
----- False branch :)
Is 1>=1.7741631499891675?
----- True branch :)
Is 0>=-1.1597355068874524?
----- True branch :)
The predicted value --> [53.82500465]
----- False branch :)
The predicted value --> [75.0766226]
----- False branch :)
Is 4>=0.35899665764186955?
----- True branch :)
Is 1>=1.4588530972166538?
----- True branch :)
The predicted value --> [30.30554652]
----- False branch :)
Is 0>=-0.8213330016919788?
----- True branch :)
The predicted value --> [39.40249203]
----- False branch :)
The predicted value --> [40.46141163]
----- False branch :)
Is 0>=-0.7276118824659409?
----- True branch :)
Is 0>=-0.4949805047031221?
----- True branch :)
The predicted value --> [29.91907557]
----- False branch :)
Is 0>=-0.5076320257195344?
----- True branch :)
The predicted value --> [17.47889103]
----- False branch :)
The predicted value --> [18.57704652]
----- False branch :)
Is 0>=-1.0631686626815577?
----- True branch :)
The predicted value --> [-10.80649715]
----- False branch :)
The predicted value --> [4.00645121]
----- False branch :)
Is 0>=-1.5495928304432298?
----- True branch :)
The predicted value --> [-26.60691559]
----- False branch :)
The predicted value --> [-43.37365486]
----- False branch :)
Is 1>=1.4837205799571878?
----- True branch :)
Is 0>=-1.116749800618701?
----- True branch :)
Is 0>=-0.43754248204970486?
----- True branch :)
The predicted value --> [17.07034211]
----- False branch :)
The predicted value --> [5.51358199]
----- False branch :)
The predicted value --> [-15.23923149]
----- False branch :)
Is 2>=0.1294528825260325?
----- True branch :)
Is 0>=-0.8832269664164478?
----- True branch :)
The predicted value --> [-36.6523246]
----- False branch :)
The predicted value --> [-24.63091631]
----- False branch :)
Is 0>=-0.5767860668663893?
----- True branch :)
The predicted value --> [-57.40708113]
----- False branch :)
The predicted value --> [-75.38728864]
----- False branch :)
Is 6>=-0.3465728103726554?
----- True branch :)
Is 1>=1.7091884467173522?
----- True branch :)
Is 0>=-0.29297302609069165?
----- True branch :)
The predicted value --> [77.27723797]
----- False branch :)
Is 0>=-0.7401601824427289?
----- True branch :)
The predicted value --> [18.9606344]
----- False branch :)
The predicted value --> [25.17821464]
----- False branch :)
Is 5>=0.4829411613284528?
----- True branch :)
Is 0>=-0.2986639959451574?
----- True branch :)
The predicted value --> [30.17030801]
----- False branch :)
Is 2>=0.7034944442297962?
----- True branch :)
The predicted value --> [-27.86951149]
----- False branch :)
Is 0>=-0.4245706588861691?
----- True branch :)
Is 0>=-0.31450812289543073?
----- True branch :)
The predicted value --> [-4.25271678]
----- False branch :)
The predicted value --> [-3.050793]
----- False branch :)
The predicted value --> [3.36598005]
----- False branch :)
Is 2>=0.8605685651771598?
----- True branch :)
Is 0>=-0.5205855391215166?
----- True branch :)
The predicted value --> [-34.74221186]
----- False branch :)
Is 0>=-0.5287971304970759?
----- True branch :)
The predicted value --> [12.34336018]
----- False branch :)
The predicted value --> [-8.71683664]
----- False branch :)
Is 0>=-0.39705404103249436?
----- True branch :)
The predicted value --> [-17.9668919]
----- False branch :)
Is 3>=-1.0918353821742666?
----- True branch :)
Is 3>=-1.0736711040486617?
----- True branch :)
Is 0>=-0.5078755129877948?
----- True branch :)
The predicted value --> [-60.89150623]
----- False branch :)
The predicted value --> [-65.81681848]
----- False branch :)
The predicted value --> [-54.91824109]
----- False branch :)
Is 0>=-0.4005345832102535?
----- True branch :)
The predicted value --> [-73.29537663]
----- False branch :)
The predicted value --> [-68.04548204]
----- False branch :)
Is 3>=-1.0249702940469931?
----- True branch :)
Is 0>=-0.34037222012850665?
----- True branch :)
The predicted value --> [-106.10028364]
----- False branch :)
Is 0>=-1.3067160697134748?
----- True branch :)
The predicted value --> [-118.86493681]
----- False branch :)
The predicted value --> [-128.6541253]
----- False branch :)
The predicted value --> [-218.66794506]
----- False branch :)
Is 3>=0.94685573367967?
----- True branch :)
Is 0>=-1.5940131010898237?
----- True branch :)
Is 4>=0.6920695840925918?
----- True branch :)
Is 6>=0.674857033741643?
----- True branch :)
Is 2>=1.656554646010719?
----- True branch :)
Is 0>=-0.23546488942506322?
----- True branch :)
The predicted value --> [120.72761598]
----- False branch :)
The predicted value --> [144.52072803]
----- False branch :)
Is 0>=-0.3346495228181063?
----- True branch :)
The predicted value --> [232.11390829]
----- False branch :)
The predicted value --> [184.59117514]
----- False branch :)
Is 1>=0.29523692305250293?
----- True branch :)
The predicted value --> [-1.30192001]
----- False branch :)
Is 0>=-0.3760020689020309?
----- True branch :)
The predicted value --> [74.97485878]
----- False branch :)
The predicted value --> [54.8316768]
----- False branch :)
Is 1>=-0.6471986705250511?
----- True branch :)
Is 3>=2.0044498302569744?
----- True branch :)
Is 0>=-0.4087550660373395?
----- True branch :)
The predicted value --> [120.42232912]
----- False branch :)
The predicted value --> [81.84555757]
----- False branch :)
Is 0>=-0.4813064595349107?
----- True branch :)
Is 1>=-0.4714790668541296?
----- True branch :)
Is 0>=-0.4052650570310181?
----- True branch :)
Is 0>=-0.2287108235190566?
----- True branch :)
The predicted value --> [58.04326246]
----- False branch :)
The predicted value --> [53.84168233]
----- False branch :)
The predicted value --> [67.83385128]
----- False branch :)
The predicted value --> [7.27089163]
----- False branch :)
Is 0>=-1.5494983139962692?
----- True branch :)
Is 5>=-0.6477014688641378?
----- True branch :)
Is 5>=1.068931619866385?
----- True branch :)
Is 0>=-1.1087493158021084?
----- True branch :)
The predicted value --> [11.01231627]
----- False branch :)
The predicted value --> [16.77734046]
----- False branch :)
Is 1>=0.04430054528652213?
----- True branch :)
Is 1>=0.10815067286235402?
----- True branch :)
The predicted value --> [8.27719425]
----- False branch :)
Is 0>=-0.6648439186256027?
----- True branch :)
The predicted value --> [-3.60553663]
----- False branch :)
The predicted value --> [-1.30141834]
----- False branch :)
Is 1>=-0.32326013371553913?
----- True branch :)
Is 0>=-0.6375314820426292?
----- True branch :)
The predicted value --> [-13.27764218]
----- False branch :)
The predicted value --> [-16.36829566]
----- False branch :)
The predicted value --> [-6.06899763]
----- False branch :)
Is 0>=-0.773629040246999?
----- True branch :)
The predicted value --> [-32.91804112]
----- False branch :)
The predicted value --> [-17.16560068]
----- False branch :)
The predicted value --> [41.05264608]
----- False branch :)
Is 0>=-0.6690527197264902?
----- True branch :)
Is 6>=0.4955330109431136?
----- True branch :)
Is 0>=-0.3168629704110022?
----- True branch :)
The predicted value --> [-1.6812779]
----- False branch :)
The predicted value --> [-19.78980047]
----- False branch :)
Is 0>=-0.5214164203958006?
----- True branch :)
The predicted value --> [-53.15988094]
----- False branch :)
Is 0>=-0.5998328702457424?
----- True branch :)
The predicted value --> [-36.14522685]
----- False branch :)
Is 0>=-0.6529890213334423?
----- True branch :)
The predicted value --> [-32.16007977]
----- False branch :)
The predicted value --> [-29.42858202]
----- False branch :)
Is 0>=-0.9251938813638639?
----- True branch :)
The predicted value --> [-58.79043527]
----- False branch :)
The predicted value --> [-95.24215142]
----- False branch :)
Is 3>=1.6717257972353616?
----- True branch :)
The predicted value --> [-16.16998165]
----- False branch :)
Is 1>=0.05778188109180294?
----- True branch :)
The predicted value --> [-218.04653633]
----- False branch :)
Is 0>=-1.6678670716374844?
----- True branch :)
The predicted value --> [-162.74088396]
----- False branch :)
The predicted value --> [-183.57510907]
----- False branch :)
Is 6>=0.7985868133188906?
----- True branch :)
Is 4>=0.23850659615036338?
----- True branch :)
Is 0>=-1.409100262578563?
----- True branch :)
Is 3>=0.7078899196182028?
----- True branch :)
The predicted value --> [173.10358502]
----- False branch :)
Is 2>=0.8357171049627868?
----- True branch :)
Is 2>=1.646172899708308?
----- True branch :)
The predicted value --> [68.43449727]
----- False branch :)
Is 0>=-0.33556525450384056?
----- True branch :)
The predicted value --> [99.04603786]
----- False branch :)
The predicted value --> [89.05290514]
----- False branch :)
Is 3>=-0.1845152786728836?
----- True branch :)
Is 1>=0.2887333866562933?
----- True branch :)
The predicted value --> [70.58383318]
----- False branch :)
Is 2>=0.4150415422965292?
----- True branch :)
Is 0>=-0.27560941583167414?
----- True branch :)
The predicted value --> [43.08229104]
----- False branch :)
The predicted value --> [47.61407027]
----- False branch :)
The predicted value --> [54.44305126]
----- False branch :)
Is 1>=0.3316421839789071?
----- True branch :)
Is 0>=-0.24907220448537667?
----- True branch :)
The predicted value --> [12.86705132]
----- False branch :)
The predicted value --> [10.68729194]
----- False branch :)
The predicted value --> [25.75840325]
----- False branch :)
Is 6>=1.3518249335425268?
----- True branch :)
Is 0>=-1.4602759378414576?
----- True branch :)
The predicted value --> [-26.33038271]
----- False branch :)
The predicted value --> [8.1998899]
----- False branch :)
Is 7>=-0.10901113437158703?
----- True branch :)
Is 0>=-1.4862227127858183?
----- True branch :)
The predicted value --> [-86.09299491]
----- False branch :)
The predicted value --> [-113.43143176]
----- False branch :)
Is 0>=-1.428097530783235?
----- True branch :)
The predicted value --> [-50.02937822]
----- False branch :)
The predicted value --> [-57.67651437]
----- False branch :)
Is 7>=1.1415008927094215?
----- True branch :)
Is 0>=-1.789789884754111?
----- True branch :)
Is 1>=-0.3883197956322519?
----- True branch :)
Is 0>=-1.0062263515521375?
----- True branch :)
The predicted value --> [-160.00881075]
----- False branch :)
The predicted value --> [-163.8219031]
----- False branch :)
Is 0>=-0.8729643095310075?
----- True branch :)
The predicted value --> [-173.67061806]
----- False branch :)
The predicted value --> [-178.84214563]
----- False branch :)
The predicted value --> [-102.64210415]
----- False branch :)
Is 0>=-1.2936280585156164?
----- True branch :)
Is 1>=-0.47767376773079995?
----- True branch :)
Is 3>=-0.46607597391586025?
----- True branch :)
Is 1>=-0.19939866596214118?
----- True branch :)
Is 2>=-0.738566015383587?
----- True branch :)
Is 3>=-0.1095692522579201?
----- True branch :)
Is 0>=-0.23187336267295786?
----- True branch :)
The predicted value --> [-6.17837416]
----- False branch :)
The predicted value --> [-5.52649169]
----- False branch :)
The predicted value --> [-15.53588889]
----- False branch :)
Is 0>=-0.36463971536873907?
----- True branch :)
The predicted value --> [-34.67675384]
----- False branch :)
The predicted value --> [-21.49220836]
----- False branch :)
Is 0>=-0.39104396786261003?
----- True branch :)
The predicted value --> [30.75051751]
----- False branch :)
The predicted value --> [11.15976654]
----- False branch :)
Is 0>=-0.6255234126627661?
----- True branch :)
Is 0>=-0.40197950514042075?
----- True branch :)
Is 0>=-0.27107966254419047?
----- True branch :)
The predicted value --> [-52.3297194]
----- False branch :)
The predicted value --> [-50.95849554]
----- False branch :)
The predicted value --> [-64.5999093]
----- False branch :)
The predicted value --> [-31.99193174]
----- False branch :)
Is 0>=-0.3699755068408466?
----- True branch :)
Is 0>=-0.2992629547354531?
----- True branch :)
The predicted value --> [-47.65505121]
----- False branch :)
The predicted value --> [-56.18189634]
----- False branch :)
Is 0>=-0.5704020419934498?
----- True branch :)
The predicted value --> [-79.54074348]
----- False branch :)
The predicted value --> [-83.76854839]
----- False branch :)
Is 0>=-1.4417419024018097?
----- True branch :)
The predicted value --> [-125.87297657]
----- False branch :)
The predicted value --> [-184.55503865]
----- False branch :)
Is 0>=-0.9514232884228382?
----- True branch :)
Is 1>=-0.1680925025857029?
----- True branch :)
Is 3>=0.13632674191837274?
----- True branch :)
Is 4>=0.42477616955365893?
----- True branch :)
Is 1>=0.15106525282846298?
----- True branch :)
Is 0>=-0.5921007717355757?
----- True branch :)
Is 0>=-0.5451462428669384?
----- True branch :)
The predicted value --> [33.68995579]
----- False branch :)
The predicted value --> [31.50443946]
----- False branch :)
The predicted value --> [21.57760377]
----- False branch :)
Is 0>=-0.27025720013863497?
----- True branch :)
The predicted value --> [-0.81317902]
----- False branch :)
The predicted value --> [2.22498111]
----- False branch :)
Is 0>=-0.6216776626097374?
----- True branch :)
Is 0>=-0.26436608901007824?
----- True branch :)
The predicted value --> [-41.07489278]
----- False branch :)
Is 2>=1.0760207020249233?
----- True branch :)
Is 0>=-0.45258738527462655?
----- True branch :)
The predicted value --> [-22.94787972]
----- False branch :)
The predicted value --> [-22.88157312]
----- False branch :)
The predicted value --> [-9.68301031]
----- False branch :)
The predicted value --> [-87.97140237]
----- False branch :)
Is 1>=-0.10585652852423873?
----- True branch :)
Is 4>=0.6474264970929217?
----- True branch :)
Is 6>=-0.3978924826103155?
----- True branch :)
Is 0>=-0.3962558628262739?
----- True branch :)
Is 0>=-0.2978245818646815?
----- True branch :)
The predicted value --> [-43.99657334]
----- False branch :)
The predicted value --> [-41.43248608]
----- False branch :)
Is 0>=-0.4778702225756999?
----- True branch :)
The predicted value --> [-34.30804221]
----- False branch :)
The predicted value --> [-25.38825109]
----- False branch :)
Is 1>=-0.02571411621716516?
----- True branch :)
Is 1>=0.35026502354608124?
----- True branch :)
Is 0>=-0.29067129591319035?
----- True branch :)
The predicted value --> [-76.58386611]
----- False branch :)
The predicted value --> [-77.32340026]
----- False branch :)
Is 0>=-0.4882493166045005?
----- True branch :)
The predicted value --> [-98.885446]
----- False branch :)
The predicted value --> [-92.07002975]
----- False branch :)
The predicted value --> [-51.71100414]
----- False branch :)
Is 1>=0.29193216914286796?
----- True branch :)
The predicted value --> [-115.05653682]
----- False branch :)
Is 0>=-0.2287583126227391?
----- True branch :)
The predicted value --> [-89.45879949]
----- False branch :)
Is 0>=-0.5020999932039946?
----- True branch :)
The predicted value --> [-95.57832189]
----- False branch :)
The predicted value --> [-96.86197224]
----- False branch :)
The predicted value --> [19.63411492]
----- False branch :)
Is 4>=1.1567268035906628?
----- True branch :)
Is 1>=-0.4189480050697541?
----- True branch :)
The predicted value --> [-70.007292]
----- False branch :)
Is 1>=-1.419149446573486?
----- True branch :)
Is 1>=-1.4075983420531388?
----- True branch :)
Is 5>=-0.11149565833247664?
----- True branch :)
Is 4>=1.430253517348522?
----- True branch :)
Is 0>=-0.3195210300892237?
----- True branch :)
The predicted value --> [-32.72326897]
----- False branch :)
The predicted value --> [-33.64761455]
----- False branch :)
The predicted value --> [-31.18802117]
----- False branch :)
Is 0>=-0.6546941167117288?
----- True branch :)
The predicted value --> [-24.64456892]
----- False branch :)
The predicted value --> [-26.13369145]
----- False branch :)
The predicted value --> [-42.01224881]
----- False branch :)
The predicted value --> [2.50991163]
----- False branch :)
Is 3>=-1.1864773123435581?
----- True branch :)
Is 6>=-1.5422272405416801?
----- True branch :)
Is 1>=-0.8194904006252756?
----- True branch :)
Is 4>=-0.12028324026341837?
----- True branch :)
Is 3>=-0.6342888390138943?
----- True branch :)
Is 3>=0.4083446404565754?
----- True branch :)
Is 0>=-0.285668538925528?
----- True branch :)
The predicted value --> [-82.10583144]
----- False branch :)
The predicted value --> [-94.95068959]
----- False branch :)
Is 5>=-0.2061330565274583?
----- True branch :)
Is 0>=-0.7848215238650925?
----- True branch :)
Is 0>=-0.5353708935706063?
----- True branch :)
The predicted value --> [-33.73984483]
----- False branch :)
The predicted value --> [-37.84632374]
----- False branch :)
The predicted value --> [-54.99939973]
----- False branch :)
Is 0>=-0.41754920154519065?
----- True branch :)
Is 4>=-0.07497995048625274?
----- True branch :)
Is 0>=-0.3191962073684125?
----- True branch :)
The predicted value --> [-53.63413264]
----- False branch :)
Is 0>=-0.3647053413575996?
----- True branch :)
The predicted value --> [-60.61101923]
----- False branch :)
The predicted value --> [-57.9425283]
----- False branch :)
The predicted value --> [-68.44591823]
----- False branch :)
The predicted value --> [-79.6030848]
----- False branch :)
Is 0>=-0.603519223764784?
----- True branch :)
The predicted value --> [-100.10605485]
----- False branch :)
The predicted value --> [-83.99910587]
----- False branch :)
Is 3>=0.40420935008954756?
----- True branch :)
Is 0>=-0.3664900402800671?
----- True branch :)
The predicted value --> [-69.74715481]
----- False branch :)
The predicted value --> [-90.8998995]
----- False branch :)
Is 0>=-0.2676196106436?
----- True branch :)
The predicted value --> [-94.53522973]
----- False branch :)
Is 2>=0.616339604486072?
----- True branch :)
The predicted value --> [-110.53863132]
----- False branch :)
Is 0>=-0.6069169636946325?
----- True branch :)
The predicted value --> [-148.89913882]
----- False branch :)
The predicted value --> [-146.39506826]
----- False branch :)
Is 3>=0.295772018888521?
----- True branch :)
Is 5>=0.752555790752537?
----- True branch :)
Is 0>=-0.2620562727982959?
----- True branch :)
The predicted value --> [-89.56528093]
----- False branch :)
The predicted value --> [-78.45133084]
----- False branch :)
Is 0>=-0.23237455888512876?
----- True branch :)
The predicted value --> [-128.02115391]
----- False branch :)
Is 0>=-0.24461173193690772?
----- True branch :)
The predicted value --> [-106.31875946]
----- False branch :)
The predicted value --> [-102.06215984]
----- False branch :)
Is 7>=-1.2090762467593694?
----- True branch :)
Is 1>=-2.3229222377876737?
----- True branch :)
Is 7>=1.8800133099468737?
----- True branch :)
The predicted value --> [-113.73257727]
----- False branch :)
Is 4>=-0.07466491080355263?
----- True branch :)
Is 2>=-0.2763907930530059?
----- True branch :)
Is 0>=-0.3489386728176108?
----- True branch :)
Is 0>=-0.2836296599470513?
----- True branch :)
The predicted value --> [-142.48167357]
----- False branch :)
The predicted value --> [-146.23652065]
----- False branch :)
Is 0>=-0.5209635331942202?
----- True branch :)
The predicted value --> [-155.0556916]
----- False branch :)
The predicted value --> [-155.42811714]
----- False branch :)
Is 0>=-0.23690664049288557?
----- True branch :)
The predicted value --> [-142.44860333]
----- False branch :)
The predicted value --> [-139.69885061]
----- False branch :)
Is 0>=-0.4223686402559092?
----- True branch :)
The predicted value --> [-117.15235476]
----- False branch :)
The predicted value --> [-138.79556359]
----- False branch :)
The predicted value --> [-172.99170762]
----- False branch :)
Is 0>=-0.4305079858248341?
----- True branch :)
The predicted value --> [-118.95747631]
----- False branch :)
The predicted value --> [-89.05088237]
----- False branch :)
Is 0>=-0.4189702566397838?
----- True branch :)
The predicted value --> [-209.65350138]
----- False branch :)
Is 1>=-0.34468639374902404?
----- True branch :)
The predicted value --> [-132.22819807]
----- False branch :)
Is 2>=-0.6547430056874366?
----- True branch :)
Is 0>=-0.4898579951897588?
----- True branch :)
The predicted value --> [-157.02455295]
----- False branch :)
The predicted value --> [-156.33316033]
----- False branch :)
The predicted value --> [-149.57019329]
----- False branch :)
Is 0>=-0.3341066967092276?
----- True branch :)
Is 0>=-0.22803387957873217?
----- True branch :)
The predicted value --> [-159.94910629]
----- False branch :)
The predicted value --> [-154.37571949]
----- False branch :)
Is 0>=-0.44041159380618367?
----- True branch :)
The predicted value --> [-248.06548661]
----- False branch :)
Is 0>=-0.44640561978700477?
----- True branch :)
The predicted value --> [-208.73231574]
----- False branch :)
The predicted value --> [-219.45287385]
----- False branch :)
Is 4>=0.3776379470775291?
----- True branch :)
Is 1>=-0.5119292220862601?
----- True branch :)
Is 4>=2.6975917410026846?
----- True branch :)
The predicted value --> [18.02481871]
----- False branch :)
Is 3>=0.8216849348945788?
----- True branch :)
Is 0>=-1.15360846759211?
----- True branch :)
The predicted value --> [-90.28434578]
----- False branch :)
The predicted value --> [-5.01605251]
----- False branch :)
Is 5>=-0.13772085174832344?
----- True branch :)
Is 0>=-1.0528468699278823?
----- True branch :)
Is 0>=-0.9842203517128043?
----- True branch :)
The predicted value --> [-136.08416588]
----- False branch :)
The predicted value --> [-152.57608965]
----- False branch :)
Is 2>=1.7262909794425367?
----- True branch :)
The predicted value --> [-87.31954351]
----- False branch :)
Is 1>=0.26124052656942903?
----- True branch :)
Is 0>=-1.1917616971582303?
----- True branch :)
The predicted value --> [-107.36025914]
----- False branch :)
The predicted value --> [-99.3336731]
----- False branch :)
Is 0>=-1.1321944289815817?
----- True branch :)
Is 0>=-1.0916824512771168?
----- True branch :)
The predicted value --> [-115.10003949]
----- False branch :)
The predicted value --> [-117.59352173]
----- False branch :)
Is 0>=-1.2409240377699804?
----- True branch :)
The predicted value --> [-123.64863955]
----- False branch :)
The predicted value --> [-125.02038945]
----- False branch :)
Is 0>=-1.0107655989847257?
----- True branch :)
The predicted value --> [-10.53833861]
----- False branch :)
Is 1>=-0.346901287564469?
----- True branch :)
The predicted value --> [-128.14504019]
----- False branch :)
Is 0>=-1.1401033186990435?
----- True branch :)
The predicted value --> [-91.30381648]
----- False branch :)
The predicted value --> [-93.03445076]
----- False branch :)
Is 0>=-1.9529522210242782?
----- True branch :)
Is 1>=-1.2962755042644278?
----- True branch :)
Is 7>=-0.18547365623938922?
----- True branch :)
Is 0>=-1.2273802208150315?
----- True branch :)
The predicted value --> [-144.22058059]
----- False branch :)
The predicted value --> [-161.24131943]
----- False branch :)
Is 0>=-1.306450162583735?
----- True branch :)
Is 0>=-0.9954489518565861?
----- True branch :)
The predicted value --> [-129.80079487]
----- False branch :)
The predicted value --> [-130.70544696]
----- False branch :)
The predicted value --> [-124.9016598]
----- False branch :)
The predicted value --> [-215.53746878]
----- False branch :)
Is 2>=0.2559688684079793?
----- True branch :)
The predicted value --> [-292.62667684]
----- False branch :)
Is 0>=-2.032988871911482?
----- True branch :)
The predicted value --> [-241.63354137]
----- False branch :)
The predicted value --> [-197.25862927]
----- False branch :)
Is 1>=-1.0723830273670993?
----- True branch :)
Is 2>=0.8099287397639071?
----- True branch :)
Is 1>=0.41930904841055594?
----- True branch :)
Is 0>=-1.2431788456469437?
----- True branch :)
The predicted value --> [-122.76054616]
----- False branch :)
The predicted value --> [-108.14823813]
----- False branch :)
The predicted value --> [-89.0652726]
----- False branch :)
Is 6>=-0.49148293517382646?
----- True branch :)
Is 2>=0.4100946016702642?
----- True branch :)
Is 0>=-1.6259040106060378?
----- True branch :)
The predicted value --> [-232.95588251]
----- False branch :)
The predicted value --> [-215.12774716]
----- False branch :)
Is 6>=0.6267828682415933?
----- True branch :)
Is 0>=-0.9605286894028833?
----- True branch :)
The predicted value --> [-192.71627521]
----- False branch :)
The predicted value --> [-191.63111532]
----- False branch :)
Is 3>=0.900499240377611?
----- True branch :)
The predicted value --> [-125.59731103]
----- False branch :)
Is 1>=0.21666060424526723?
----- True branch :)
The predicted value --> [-156.35274991]
----- False branch :)
Is 0>=-1.157766141968969?
----- True branch :)
Is 0>=-0.9910366345131126?
----- True branch :)
The predicted value --> [-161.10409965]
----- False branch :)
The predicted value --> [-161.7436427]
----- False branch :)
Is 0>=-1.242260984747123?
----- True branch :)
The predicted value --> [-163.70081183]
----- False branch :)
The predicted value --> [-163.36133517]
----- False branch :)
Is 0>=-1.702883375675596?
----- True branch :)
Is 4>=-0.27048456580119834?
----- True branch :)
Is 0>=-1.081473050841189?
----- True branch :)
The predicted value --> [-203.56749023]
----- False branch :)
The predicted value --> [-201.39854039]
----- False branch :)
Is 0>=-0.9575537691881162?
----- True branch :)
The predicted value --> [-227.40376638]
----- False branch :)
Is 0>=-1.2616854738231504?
----- True branch :)
The predicted value --> [-227.4698702]
----- False branch :)
The predicted value --> [-227.51022314]
----- False branch :)
The predicted value --> [-265.3618161]
----- False branch :)
Is 1>=-2.384053576967297?
----- True branch :)
Is 0>=-1.051159072287428?
----- True branch :)
The predicted value --> [-302.78937995]
----- False branch :)
Is 2>=-0.03582602846755511?
----- True branch :)
Is 1>=-2.05796407060694?
----- True branch :)
Is 0>=-1.6240454590884863?
----- True branch :)
Is 0>=-1.3246777166661254?
----- True branch :)
The predicted value --> [-237.75335742]
----- False branch :)
The predicted value --> [-245.11252513]
----- False branch :)
The predicted value --> [-254.57206632]
----- False branch :)
Is 0>=-1.2128949622588034?
----- True branch :)
The predicted value --> [-223.73325069]
----- False branch :)
The predicted value --> [-227.33834753]
----- False branch :)
The predicted value --> [-180.57913491]
----- False branch :)
The predicted value --> [-331.84137975]
----- False branch :)
Is 3>=-0.22537718270984775?
----- True branch :)
Is 1>=0.8091794749550286?
----- True branch :)
Is 2>=2.4675820657479934?
----- True branch :)
The predicted value --> [115.24163406]
----- False branch :)
Is 6>=-0.09389637865112474?
----- True branch :)
Is 4>=-1.3969557779501627?
----- True branch :)
Is 0>=-0.9279528610581109?
----- True branch :)
Is 0>=-0.37406116345907875?
----- True branch :)
The predicted value --> [33.14837316]
----- False branch :)
Is 1>=1.013807481979984?
----- True branch :)
Is 0>=-0.672167171807949?
----- True branch :)
The predicted value --> [6.72210136]
----- False branch :)
The predicted value --> [6.74818377]
----- False branch :)
The predicted value --> [-6.11664312]
----- False branch :)
The predicted value --> [-52.60149586]
----- False branch :)
The predicted value --> [-73.9895336]
----- False branch :)
Is 7>=0.41458090737352105?
----- True branch :)
Is 0>=-0.8601087782730383?
----- True branch :)
Is 0>=-0.37397856324250883?
----- True branch :)
The predicted value --> [-120.59612098]
----- False branch :)
The predicted value --> [-112.21556075]
----- False branch :)
The predicted value --> [-103.21562785]
----- False branch :)
Is 0>=-0.6004115255050564?
----- True branch :)
Is 0>=-0.5993947489573823?
----- True branch :)
Is 0>=-0.22849582977032085?
----- True branch :)
The predicted value --> [-23.50611068]
----- False branch :)
The predicted value --> [-22.03601592]
----- False branch :)
The predicted value --> [-54.98248273]
----- False branch :)
The predicted value --> [-103.61927394]
----- False branch :)
Is 6>=-1.2645572350163736?
----- True branch :)
Is 6>=0.9713576823421783?
----- True branch :)
Is 1>=-0.2549611007414916?
----- True branch :)
Is 2>=1.489563483757286?
----- True branch :)
The predicted value --> [27.55885419]
----- False branch :)
Is 1>=0.6932266366526922?
----- True branch :)
The predicted value --> [-40.26708367]
----- False branch :)
Is 0>=-0.5226008360949598?
----- True branch :)
The predicted value --> [-37.07355719]
----- False branch :)
The predicted value --> [-35.13234809]
----- False branch :)
Is 0>=-1.3304715410095938?
----- True branch :)
The predicted value --> [-126.98324268]
----- False branch :)
The predicted value --> [-96.23321084]
----- False branch :)
Is 1>=-0.7958984037299116?
----- True branch :)
Is 7>=-0.08497295040004142?
----- True branch :)
Is 6>=0.2592738236830108?
----- True branch :)
Is 0>=-0.6955237203337958?
----- True branch :)
Is 0>=-0.4868001460782402?
----- True branch :)
The predicted value --> [-46.27325558]
----- False branch :)
The predicted value --> [-32.39588649]
----- False branch :)
Is 2>=-0.618359505473842?
----- True branch :)
Is 0>=-0.7121324707727335?
----- True branch :)
The predicted value --> [-97.67457837]
----- False branch :)
The predicted value --> [-94.08623592]
----- False branch :)
The predicted value --> [-109.66758039]
----- False branch :)
Is 1>=0.11432731792606633?
----- True branch :)
Is 0>=-0.8519281026083186?
----- True branch :)
Is 1>=0.16967415756195472?
----- True branch :)
Is 1>=0.37674939575506206?
----- True branch :)
Is 0>=-0.5359511837287401?
----- True branch :)
The predicted value --> [-91.04136727]
----- False branch :)
The predicted value --> [-95.3349525]
----- False branch :)
The predicted value --> [-104.40877588]
----- False branch :)
The predicted value --> [-77.14643694]
----- False branch :)
The predicted value --> [-138.33867244]
----- False branch :)
Is 1>=-0.5529450592885307?
----- True branch :)
Is 2>=0.9633645954469557?
----- True branch :)
The predicted value --> [-148.58717575]
----- False branch :)
Is 0>=-0.3174512045387065?
----- True branch :)
The predicted value --> [-161.30832771]
----- False branch :)
The predicted value --> [-155.45159395]
----- False branch :)
The predicted value --> [-124.90734365]
----- False branch :)
Is 5>=-0.9525579953422296?
----- True branch :)
Is 2>=0.49275425901198566?
----- True branch :)
Is 0>=-0.47965820591741287?
----- True branch :)
The predicted value --> [-121.15292755]
----- False branch :)
Is 1>=0.25606168813939?
----- True branch :)
Is 0>=-0.8001759994753674?
----- True branch :)
The predicted value --> [-138.73918748]
----- False branch :)
The predicted value --> [-137.5389563]
----- False branch :)
The predicted value --> [-151.98880428]
----- False branch :)
Is 2>=-0.4125784147431431?
----- True branch :)
Is 2>=0.3659278940861191?
----- True branch :)
Is 0>=-0.38475409207602423?
----- True branch :)
The predicted value --> [-163.72233866]
----- False branch :)
The predicted value --> [-159.99816064]
----- False branch :)
The predicted value --> [-154.51524968]
----- False branch :)
The predicted value --> [-175.6085998]
----- False branch :)
The predicted value --> [-233.24807017]
----- False branch :)
Is 0>=-0.5951326668886685?
----- True branch :)
The predicted value --> [-133.9436642]
----- False branch :)
Is 2>=-0.4117671498461235?
----- True branch :)
Is 0>=-1.0371016725849036?
----- True branch :)
The predicted value --> [-218.13928139]
----- False branch :)
The predicted value --> [-237.78712123]
----- False branch :)
Is 0>=-0.7498475479684276?
----- True branch :)
The predicted value --> [-243.43762317]
----- False branch :)
The predicted value --> [-256.18071171]
----- False branch :)
Is 0>=-1.5520201421075486?
----- True branch :)
Is 0>=-0.9262514299805298?
----- True branch :)
The predicted value --> [-310.47186105]
----- False branch :)
The predicted value --> [-306.18986394]
----- False branch :)
The predicted value --> [-279.71291186]
----- False branch :)
Is 1>=-0.2495498941987684?
----- True branch :)
Is 0>=-1.0004301171610233?
----- True branch :)
Is 2>=-0.9241705499010372?
----- True branch :)
Is 1>=0.6401396359846435?
----- True branch :)
Is 2>=-0.1559912034077648?
----- True branch :)
Is 0>=-0.217153584226263?
----- True branch :)
The predicted value --> [-114.25816822]
----- False branch :)
Is 0>=-0.4985032496148889?
----- True branch :)
The predicted value --> [-97.98301307]
----- False branch :)
The predicted value --> [-100.40002735]
----- False branch :)
Is 0>=-0.25914017078353396?
----- True branch :)
The predicted value --> [-142.69810916]
----- False branch :)
The predicted value --> [-127.53693961]
----- False branch :)
Is 2>=-0.20095553747241174?
----- True branch :)
Is 5>=-0.4014182702145055?
----- True branch :)
Is 3>=-0.9042350022606166?
----- True branch :)
Is 0>=-0.3008993981898353?
----- True branch :)
The predicted value --> [-160.77911128]
----- False branch :)
The predicted value --> [-163.25749675]
----- False branch :)
Is 0>=-0.5673736223523902?
----- True branch :)
The predicted value --> [-148.67247105]
----- False branch :)
The predicted value --> [-147.34764856]
----- False branch :)
The predicted value --> [-186.63244323]
----- False branch :)
The predicted value --> [-100.30423934]
----- False branch :)
Is 0>=-0.362385871078916?
----- True branch :)
The predicted value --> [-219.91080539]
----- False branch :)
The predicted value --> [-198.94052481]
----- False branch :)
Is 3>=-1.2175545120070501?
----- True branch :)
Is 0>=-1.1773431622043034?
----- True branch :)
Is 0>=-1.0438592662507413?
----- True branch :)
The predicted value --> [-188.04756785]
----- False branch :)
The predicted value --> [-147.2437487]
----- False branch :)
Is 3>=-1.1148702668897557?
----- True branch :)
Is 0>=-1.2646933800266689?
----- True branch :)
The predicted value --> [-219.3265475]
----- False branch :)
Is 0>=-2.018990686669124?
----- True branch :)
The predicted value --> [-203.22774679]
----- False branch :)
The predicted value --> [-211.25533479]
----- False branch :)
Is 0>=-2.1485250075886118?
----- True branch :)
The predicted value --> [-239.60867747]
----- False branch :)
The predicted value --> [-258.22492641]
----- False branch :)
Is 0>=-1.0293696473547274?
----- True branch :)
The predicted value --> [-310.01806447]
----- False branch :)
The predicted value --> [-369.69156922]
----- False branch :)
Is 6>=-0.7946361196726142?
----- True branch :)
Is 0>=-0.7492200662722783?
----- True branch :)
Is 1>=-0.6282655955050974?
----- True branch :)
Is 0>=-0.22602751577331637?
----- True branch :)
The predicted value --> [-163.06393122]
----- False branch :)
The predicted value --> [-180.13878479]
----- False branch :)
Is 3>=-0.6495711918600559?
----- True branch :)
Is 0>=-0.3741359519757259?
----- True branch :)
The predicted value --> [-214.40439666]
----- False branch :)
The predicted value --> [-216.78288094]
----- False branch :)
Is 0>=-0.5061024109617096?
----- True branch :)
Is 0>=-0.3692695409086472?
----- True branch :)
The predicted value --> [-229.08609018]
----- False branch :)
The predicted value --> [-228.77846431]
----- False branch :)
The predicted value --> [-242.19425071]
----- False branch :)
Is 3>=-0.7298291231609603?
----- True branch :)
Is 0>=-1.1522664976659545?
----- True branch :)
Is 0>=-1.1053855609979097?
----- True branch :)
The predicted value --> [-214.69633828]
----- False branch :)
The predicted value --> [-220.00730161]
----- False branch :)
Is 0>=-2.1253591681929884?
----- True branch :)
Is 3>=-0.6512845840235802?
----- True branch :)
Is 0>=-1.1580359797936601?
----- True branch :)
The predicted value --> [-282.40396187]
----- False branch :)
The predicted value --> [-286.26182523]
----- False branch :)
Is 0>=-1.4659196251276936?
----- True branch :)
The predicted value --> [-299.10425103]
----- False branch :)
The predicted value --> [-297.42058634]
----- False branch :)
The predicted value --> [-311.38254493]
----- False branch :)
Is 2>=-0.010422471652352156?
----- True branch :)
The predicted value --> [-324.42954961]
----- False branch :)
Is 0>=-1.0560326183938673?
----- True branch :)
The predicted value --> [-399.08382411]
----- False branch :)
The predicted value --> [-379.47332467]
----- False branch :)
Is 4>=-1.2659722916644216?
----- True branch :)
Is 1>=-0.3416418890911741?
----- True branch :)
Is 0>=-0.8225491060671047?
----- True branch :)
The predicted value --> [-272.7640145]
----- False branch :)
The predicted value --> [-316.62740351]
----- False branch :)
Is 5>=0.3465840418677546?
----- True branch :)
Is 0>=-0.4372294142866808?
----- True branch :)
The predicted value --> [-375.48407413]
----- False branch :)
Is 0>=-1.2970085462367826?
----- True branch :)
The predicted value --> [-365.39165501]
----- False branch :)
The predicted value --> [-358.02501782]
----- False branch :)
Is 0>=-0.71181060690205?
----- True branch :)
The predicted value --> [-332.0265984]
----- False branch :)
The predicted value --> [-356.24545875]
----- False branch :)
Is 0>=-1.8081266577547588?
----- True branch :)
The predicted value --> [-411.60335519]
----- False branch :)
The predicted value --> [-469.29544003]
The r2_score of the trained model 1.0
###Markdown
Decision tree Regression using scikit learn for comparision.
###Code
from sklearn.tree import DecisionTreeRegressor as DecisionTreeRegressor_sklearn
# data is already defined, going to use the same data for comparision.
print("="*100)
print("Number of training data samples-----> {}".format(X.shape[0]))
print("Number of training features --------> {}".format(X.shape[1]))
decision_tree_reg_sklearn = DecisionTreeRegressor_sklearn()
decision_tree_reg_sklearn.fit(X, y)
# predict the value
y_pred_sklearn = decision_tree_reg_sklearn.predict(X)
socre = r2_score(y, y_pred_sklearn)
print("="*100)
print("r2_score of the prediction is {}".format(socre))
###Output
====================================================================================================
r2_score of the prediction is 1.0
|
course_2.0/05_CNN_practical.ipynb | ###Markdown
CNN Practical: Inelastic Neutron ScatteringInelastic neutron scattering (INS) can be used to infer information about the forces present in a material. Neutrons scatter off a sample, exchanging energy with certain fundamental vibrational modes of the sample. These vibraional modes include phonons (interatomic boding forces) and magnons (spin coupling between magnetic nuclei). [Johnstone et al. (2012)](https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.109.237202) have simulated magnon spectra from a double perovskite systems, where INS was used to distinguish between two possible magnetic Hamiltonians of the system. For this practical, we have simulated datasets for each of the possible Hamiltonians. We are going to train a CNN to classify the system correctly.
###Code
# tensorflow
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout, BatchNormalization
# check version
print('Using TensorFlow v%s' % tf.__version__)
acc_str = 'accuracy' if tf.__version__[:2] == '2.' else 'acc'
# helpers
import h5py
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
###Output
_____no_output_____
###Markdown
Google Cloud Storage BoilerplateThe following two cells have some boilerplate to mount the Google Cloud Storage bucket containing the data used for this notebook to your Google Colab file system. To access the data, you need to:1. Run the first cell;2. Follow the link when prompted (you may be asked to log in with your Google account);3. Copy the Google SDK token back into the prompt and press `Enter`;4. Run the second cell and wait until the data folder appears.If everything works correctly, a new folder called `sciml-workshop-data` should appear in the file browser on the left. Depending on the network speed, this may take one or two minutes. Ignore the warning "You do not appear to have access to project ...". If you are running the notebook locally or you have already connected to the bucket, these cells will take no effect.
###Code
# variables passed to bash; do not change
project_id = 'sciml-workshop'
bucket_name = 'sciml-workshop'
colab_data_path = '/content/sciml-workshop-data/'
try:
from google.colab import auth
auth.authenticate_user()
google_colab_env = 'true'
data_path = colab_data_path
except:
google_colab_env = 'false'
###################################################
######## specify your local data path here ########
###################################################
data_path = './sciml-workshop-data/'
%%bash -s {google_colab_env} {colab_data_path} {project_id} {bucket_name}
# running locally
if ! $1; then
echo "Running notebook locally."
exit
fi
# already mounted
if [ -d $2 ]; then
echo "Data already mounted."
exit
fi
# mount the bucket
echo "deb http://packages.cloud.google.com/apt gcsfuse-bionic main" > /etc/apt/sources.list.d/gcsfuse.list
curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
apt -qq update
apt -qq install gcsfuse
gcloud config set project $3
mkdir $2
gcsfuse --implicit-dirs --limit-bytes-per-sec -1 --limit-ops-per-sec -1 $4 $2
###Output
_____no_output_____
###Markdown
--- The datasetWe have already split the data into training and validation sets and saved them into two HDF5 files, `ins-data/train.h5` and `ins-data/test.h5`, containing respectively 20,000 and 6,676 INS images and their one-hot encoded labels identifying an image as either being of the *Dimer* or *Goodenough* Hamiltonian. The `tf.data.Dataset` classThe number of images is so large that we may not be able to simultaneously load the whole dataset into memory on a small machine. To solve this issue, we will use [tensorflow.data.Dataset](https://www.tensorflow.org/api_docs/python/tf/data/Dataset) to create an interface pointing to the files, which can load the data from disk on the fly when they are actually required.
###Code
# define image size
IMG_HEIGHT = 20
IMG_WIDTH = 200
N_CHANNELS = 1
N_CLASSES = 2
# generator
def hdf5_generator(path, buffer_size=32):
""" Load data INS data from disk
Args:
path: path of the HDF5 file on disk
buffer_size: number of images to read from disk
"""
with h5py.File(path, 'r') as handle:
n_samples, h, w, c = handle['images'].shape
for i in range(0, n_samples, buffer_size):
images = handle['images'][i:i+buffer_size, ..., :1]
labels = handle['labels'][i:i+buffer_size]
yield images, labels
# training data
train_dataset = tf.data.Dataset.from_generator(lambda: hdf5_generator(path=data_path + 'ins-data/train.h5'),
output_types=(tf.float32, tf.float32),
output_shapes=((None, IMG_HEIGHT, IMG_WIDTH, N_CHANNELS),
(None, N_CLASSES,)))
# test data
test_dataset = tf.data.Dataset.from_generator(lambda: hdf5_generator(path=data_path + 'ins-data/test.h5'),
output_types=(tf.float32, tf.float32),
output_shapes=((None, IMG_HEIGHT, IMG_WIDTH, N_CHANNELS),
(None, N_CLASSES,)))
# print
print(train_dataset)
print(test_dataset)
###Output
_____no_output_____
###Markdown
Load and use dataIn the following cell, we will load the first buffer (with 32 data by default) to memory and plot some images and labels from it:
###Code
# load the first buffer (with 32 data by default)
images, labels = list(test_dataset.take(1))[0]
# plot some images and labels from it
nplot = 10
fig, axes = plt.subplots(nplot // 2, 2, figsize=(16, nplot / 1.5), dpi=100)
for ax, image, label in zip(axes.flatten(), images, labels):
ax.matshow(np.squeeze(image))
ax.set_xlabel('0: Dimer' if label[0] < .5 else '1: Goodenough', c='k')
ax.set_xticks([])
ax.set_yticks([])
###Output
_____no_output_____ |
Hacker_rank/Cracking_the_Coding_Interview/Stacks_Balanced_Brackets_Solution.ipynb | ###Markdown
Stacks: Balanced BracketsA bracket is considered to be any one of the following characters: `(`, `)`, `{`, `}`, `[`, or `]`.Two brackets are considered to be a matched pair if the an opening bracket (i.e., `(`, `[`, or `{`) occurs to the left of a closing bracket (i.e., `)`, `]`, or `}`) of the exact same type. There are three types of matched pairs of brackets: `[]`, `{}`, and `()`.A matching pair of brackets is not balanced if the set of brackets it encloses are not matched. For example, `{[(])}` is not balanced because the contents in between `{` and `}` are not balanced. The pair of square brackets encloses a single, unbalanced opening bracket, `(`, and the pair of parentheses encloses a single, unbalanced closing square bracket, `]`.Some examples of balanced brackets are `[]{}()`, `[({})]{}()` and `({(){}[]})[]`.By this logic, we say a sequence of brackets is considered to be balanced if the following conditions are met:- It contains no unmatched brackets.- The subset of brackets enclosed within the confines of a matched pair of brackets is also a matched pair of brackets.Given strings of brackets, determine whether each sequence of brackets is balanced. If a string is balanced, print `YES` on a new line; otherwise, print `NO` on a new line. Input FormatThe first line contains a single integer, $n$, denoting the number of strings. Each line $i$ of the $n$ subsequent lines consists of a single string, $s$, denoting a sequence of brackets. Constraints- $1 \le n \le 10^3$- $1 \le length(s) \le 10^3$, where $length(s)$ is the length of the sequence.- Each character in the sequence will be a bracket (i.e., `{`, `}`, `(`, `)`, `[`, and `]`). Output FormatFor each string, print whether or not the string of brackets is balanced on a new line. If the brackets are balanced, print `YES`; otherwise, print `NO`. Sample Input```3{[()]}{[(])}{{[[(())]]}}``` Sample Output```YESNOYES``` Explanation1. The string `{[()]}` meets both criteria for being a balanced string, so we print `YES` on a new line.2. The string `{[(])}` is not balanced, because the brackets enclosed by the matched pairs `[(]` and `(])` are not balanced. Thus, we print `NO` on a new line.3. The string `{{[[(())]]}}` meets both criteria for being a balanced string, so we print YES on a new line.
###Code
def is_matched(expression):
"""
1. expression: Bracket 문자열
2. '(){}[]'로 매칭되는 dict d를 만듦
3. expression의 bracket이 매칭되면 하나씩 삭제하는 remove_matched 함수 생성
4. 최대 문자열이 expression 1000개 이므로 remove_matched 함수를 최대 500번 반복 - error가 나오면 break
5. 남은 문자열의 길이가 0이면 True 리턴, 그렇지 않으면 False 리턴
"""
iparens = iter('(){}[]')
d = dict(zip(iparens, iparens))
def remove_matched(expression_str):
"""
1. expression_str: expression 문자열
2. expression 문자가 매칭 dict인 d의 키값으로 없거나, idx가 expression_str 문자열 길이보다 1작은것과 같으면, error 리턴
3. expression 문자의 매칭 문자가 다음 idx에 오는 문자와 같으면 expression_str에서 해당 expression 문자 2개를 제거 후 expression_str 리턴
"""
for idx,e in enumerate(expression_str):
if d.get(e) == None or idx == (len(expression_str)-1):
expression_str = "error"
break
if d[e] == expression_str[idx+1]:
expression_str = expression_str[:idx] + expression_str[idx+2:]
break
return expression_str
for _ in range(500):
expression = remove_matched(expression)
if expression == "error":
break
if len(expression) == 0:
return True
return False
t = int(input().strip())
for a0 in range(t):
expression = input().strip()
if is_matched(expression) == True:
print("YES")
else:
print("NO")
###Output
3
{[()]}
YES
{[(])}
NO
{{[[(())]]}}
YES
|
Lec03_DataFrames.ipynb | ###Markdown
DataframesAnother important technique in programming languages is being able to create "composite" variables whichhave several different parts to them. These are used to group things together. In very general case these are called "structures" or "classes" in Python (as well as in many other programming languages). Before we look at how to create general Classes and Structures, we will look at a specific example, Dataframe. Dataframes in Python are a particular example of a class/structure in Python. It is a very useful tool and it shows what can be dome with Classes, Structures and Functions that we will learn about. Here we use Dataframes together with Numpy arrays tomake some plots of sea-level versus time for different locations.Dataframes are very like spread-sheets, but geared toward use in a program.
###Code
import pandas
import matplotlib.pyplot as plt
# Lets create a really simple Dataframe
mydataset = { 'course number': [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] ,
'course name': ["CEE", "Mech E", "DMSE", "Arch", "Chem", "EECS", "Bio", "Phys", "BCS", "Chem E"]}
myDF=pandas.DataFrame(mydataset)
# Now we have a simple table
myDF
# We can lookup a course number!
myDF[myDF['course number']==10]
# Note we also used a Dictionary to intialize e.g.
print("Type of variable mydataset =", type(mydataset))
# The dictionary keys are
print("Dictionary keys ", mydataset.keys())
# As a "Class" the Dataframe package includes all sorts of nice featueres.
#
# For example it knows how to find and read HTML tables from a simple page.
#
# Here is an example scraping some data related t tide-gauge data measuring sea-surface height.
# Site - https://www.psmsl.org maintains tide gauge data for sea-level for locations all around the world,
# including Woods Hole.
#
# The site page "obtaining/" has a table that pandas can identify and read.
#
# This table lists tide gauges all over the planet that measure the sea-
#
sl_list = pandas.read_html("https://www.psmsl.org/data/obtaining/")[0]
sl_list
# As a "Class" Pandas includes useful functions for its sorts of data.
# For example it includes code for searching its tables, which we can use to find
# data from Woods Hole.
swh=sl_list[sl_list['Station Name'].str.contains('WOODS')==True]
swh
# We can the read the Woods Hole data and extract into a Numpy array
# and make a plot
# 1. construct station URL using ID field (per PSMSL documentation)
# 2. read data
# 3. convert table to an array of rows and columns
# 4. plot height v time
whurl="http://www.psmsl.org/data/obtaining/rlr.monthly.data/%d.rlrdata"%(swh['ID'])
print(whurl)
df = pandas.read_csv(whurl,delimiter=';')
npdat =df.to_numpy()
t=npdat[:,0];h=npdat[:,1]
plt.rcParams['figure.figsize'] = [20, 10]
plt.plot(t,(h-h[0])/10);
# Uh-oh, that looks weird!
# Check the minimum - it has a "bad-data" flag by the looks of it.
print("Min h =",h.min())
# Fix and try again
df = pandas.read_csv(whurl,delimiter=';')
df=df[df.iloc[:,1]>=-1000]
npdat =df.to_numpy()
t=npdat[:,0];h=npdat[:,1]
plt.rcParams['figure.figsize'] = [20, 10]
plt.plot(t,(h-h[0])/10);
plt.xlabel("Year",fontsize=20)
plt.ylabel("Tide Height (cm)",fontsize=20)
# Sea-level looks to have risen by about 30cm at the Woods Hole location over the last 100 years.
# Now lets look at another location - this time near Finland
sfi=sl_list[sl_list['Station Name'].str.contains('FOGLO')==True]
sfi
fiurl="http://www.psmsl.org/data/obtaining/rlr.monthly.data/%d.rlrdata"%(sfi['ID'])
print(fiurl)
df = pandas.read_csv(fiurl,delimiter=';')
df=df[df.iloc[:,1]>=-1000]
npdat =df.to_numpy()
t=npdat[:,0];h=npdat[:,1]
plt.rcParams['figure.figsize'] = [20, 10]
plt.plot(t,(h-h[0])/10)
plt.xlabel("Year",fontsize=20)
plt.ylabel("Tide Height (cm)",fontsize=20)
# Any thoughts why the series near Finland appears to go down by ~20cm over 100 years!
###Output
_____no_output_____ |
Lesson 5: Introduction to PyTorch/My Version/2_Defining_Neural_Networks_in_PyTorch_(plus_Forward_Pass).ipynb | ###Markdown
Previously, we did a binary classification using weight matrices and matrix muliplications. In this notebook, we will:- learn the preliminaries such as downloading existing datasets, loading data and creating training and test datasets- learn how to work on a multi-class classification problem using MNIST images as our dataset- learn how to solve it using two ways: weight matrices and matrix multiplications, and Pytorch's `nn` module- learn how to build a simple network for solving that problem 'til forward pass PreliminariesHere we discuss:- MNIST dataset- how to: - download existing datasets - load data - create training and test datasets MNIST Dataset- consist of greyscale handwritten digits - each image is 28x28 pixels Downloading and Loading Data
###Code
# Import necessary packages
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import torch
!wget https://raw.githubusercontent.com/udacity/deep-learning-v2-pytorch/3bd7dea850e936d8cb44adda8200e4e2b5d627e3/intro-to-pytorch/helper.py
import helper
import matplotlib.pyplot as plt
### Run this cell
# package for downloading and loading datasets
from torchvision import datasets, transforms
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,)),
])
# Download training data
trainset = datasets.MNIST('~/.pytorch/MNIST_data/', download=True, train=True, transform=transform)
# Load training data
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
# batch_size = 64
# -the number of images we get in one iteration
# from the data loader and pass through our network,
# often called a batch
# shuffle = True
# - tells it to shuffle the dataset every time we start
# going through the data loader again
###Output
_____no_output_____
###Markdown
Now that's set, let's get those images and labels. There are (2) Two Ways to go through the batches1. for loop we usually use this one as we want it to go over all batches in our dataset.```pythonfor image, label in trainloader: do things with images and labels```2. iter() this one is much convenient when you want to somewhat manually load the batches. The snippet below loads only the first batch. ```pythondataiter = iter(trainloader)images, labels = dataiter.next()print(type(images))print(images.shape)print(labels.shape)```
###Code
# we will use the iter() for now
dataiter = iter(trainloader)
images, labels = dataiter.next()
print(type(images))
print(images.shape)
print(labels.shape)
# looking to one of the images
plt.imshow(images[1].numpy().squeeze(), cmap='Greys_r');
###Output
_____no_output_____
###Markdown
Building a Simple Network (plus Forward Pass) for Multi-class Classification Before anything else, what we're seeing so far are **fully-connected networks**.**Fully-connected networks (a.k.a dense networks**- Each unit in one layer is connected to each unit in the next layer. - *requirement:* the input to each layer must be a one-dimensional vector (which can be stacked into a 2D tensor as a batch of multiple examples) 1. Using Weight Matrices and Matrix MultiplicationThis is the same as the previous notebook, the differences here are:- we have a different dataset- different activation functions - sigmoid function for the hidden layer - softmax function for the output layer recalling the formula: $$\Large \sigma(x_i) = \cfrac{e^{x_i}}{\sum_k^K{e^{x_k}}}$$ Also, given the requirement above, we have to do *flattening*.**Flattening**- conversion of two or many dimensional tensors to 1D vectorsIn our case, since our dataset have a shape of 28x28 that is `(64, 1, 28, 28)` where - 64 is the batch size- 1 is the channel (here only greyscale)- 28,28 is the 28x28we want to turn that 28x28 to a 1D vector which means it will only be 784 that is `(64, 1, 784)`
###Code
def activation(x):
return 1/(1+torch.exp(-x))
# Flatten the input images
# .view is for reshaping
# images.shape[0] is the batch size
# -1 is for flattening (can be 784; -1 is a shortway of doing it)
inputs = images.view(images.shape[0], -1)
# Create parameters
w1 = torch.randn(784, 256)
b1 = torch.randn(256)
w2 = torch.randn(256, 10)
b2 = torch.randn(10)
h = activation(torch.mm(inputs, w1) + b1)
out = torch.mm(h, w2) + b2
# we now have 10 outputs for our network
#We want to pass in an image to our network and get out a probability distribution over the classes that tells us the likely class(es) the image belongs to.
def softmax(x):
return torch.exp(x)/torch.sum(torch.exp(x), dim=1).view(-1, 1)
# we used dim = 1 takes the sum across the rows
# (while dim==0 takes the sum of the columns)
probabilities = softmax(out)
# Shape should be (64, 10)
# 64 images
# 10 classes
print(probabilities.shape)
# Summation of the probabilities should be 1
print(probabilities.sum(dim=1))
###Output
torch.Size([64, 10])
tensor([1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000,
1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000,
1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000,
1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000,
1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000,
1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000,
1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000,
1.0000])
###Markdown
The `out` shall look like since we have an untrained network which means it hasn't seen any data yet so it just returns a uniform distribution with equal probabilities for each class. 2. Using the `nn` PyTorch module Python `nn` module - makes building networks much simpler- we can do what we did in 1 using this one
###Code
from torch import nn
###Output
_____no_output_____
###Markdown
(2) Two Ways to Use Build Neural Nets using the `nn` module 1. Without using `nn.Sequential` Defining Neural Nets
###Code
#inheriting from nn.Module
# * The name of the class itself can be anything.
class Network(nn.Module):
#together with the nn.Module
# creates a class that tracks the architecture and
# provides a lot of useful methods and attributes.
# * It is mandatory to inherit from nn.Module when
# you're creating a class for your network.
def __init__(self):
super().__init__()
# Inputs to hidden layer linear transformation
# - creates a module for a linear transformation,
# xW+b , with 784 inputs and 256 outputs and
# assigns it to self.hidden.
# - module automatically creates the weight
# and bias tensors which we'll use in the forward method.
# * access the weight and bias tensors once the network (net)
# is created with net.hidden.weight and net.hidden.bias.
self.hidden = nn.Linear(784, 256)
# Output layer, 10 units - one for each digit
# this creates another linear transformation with 256 inputs and 10 outputs.
self.output = nn.Linear(256, 10)
# Define sigmoid activation and softmax output
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax(dim=1)
# nn.Softmax(dim=1) calculates softmax across the columns
def forward(self, x):
# Pass the input tensor through each of our operations
x = self.hidden(x)
x = self.sigmoid(x)
x = self.output(x)
x = self.softmax(x)
return x
# Create the network and look at it's text representation
model = Network()
model
###Output
_____no_output_____
###Markdown
***A More Concise Way of Defining Networks***- here we use the using the `torch.nn.functional` module- this is the most common way you'll see networks defined as many operations are simple element-wise functions.- convention: we normally import this module as `F`, `import torch.nn.functional as F`.
###Code
import torch.nn.functional as F
class Network(nn.Module):
def __init__(self):
super().__init__()
# Inputs to hidden layer linear transformation
self.hidden = nn.Linear(784, 256)
# Output layer, 10 units - one for each digit
self.output = nn.Linear(256, 10)
def forward(self, x):
# Hidden layer with sigmoid activation
x = F.sigmoid(self.hidden(x))
# Output layer with softmax activation
x = F.softmax(self.output(x), dim=1)
return x
###Output
_____no_output_____
###Markdown
**ASIDE: Activation Functions**So far we've only been looking at the sigmoid activation function, but in general any function can be used as an activation function. The only requirement is that for a network to approximate a non-linear function, the activation functions must be non-linear. Here are a few more examples of common activation functions: Tanh (hyperbolic tangent), and ReLU (rectified linear unit).In practice, the ReLU function is used almost exclusively as the activation function for hidden layers.The example is as below:
###Code
class Network(nn.Module):
def __init__(self):
super().__init__()
# Defining the layers, 128, 64, 10 units each
self.fc1 = nn.Linear(784, 128)
self.fc2 = nn.Linear(128, 64)
# Output layer, 10 units - one for each digit
self.fc3 = nn.Linear(64, 10)
def forward(self, x):
''' Forward pass through the network, returns the output logits '''
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
x = F.softmax(x, dim=1)
return x
model = Network()
model
###Output
_____no_output_____
###Markdown
***Remark:***It's good practice to name your layers by their type of network, for instance 'fc' to represent a fully-connected layer as we did above. **ASIDE: Initializing weights and biases*****Remark:*** - Weights and such are automatically initialized for you, but it's possible to customize how they are initialized.- The weights and biases are tensors attached to the layer you defined, you can get them with model.fc1.weight for instance.For custom initialization, we want to modify these tensors in place. These are actually autograd *Variables*, so we need to get back the actual tensors with *model.fc1.weight.data*. Once we have the tensors, we can fill them with zeros (for biases) or random normal values.
###Code
# Set biases to all zeros
model.fc1.bias.data.fill_(0)
# sample from random normal with standard dev = 0.01
model.fc1.weight.data.normal_(std=0.01)
###Output
_____no_output_____
###Markdown
Forward PassWe try passing an image to our defined neural network.
###Code
# Grab some data
dataiter = iter(trainloader)
images, labels = dataiter.next()
# Resize images into a 1D vector, new shape is (batch size, color channels, image pixels)
images.resize_(64, 1, 784)
# or images.resize_(images.shape[0], 1, 784) to automatically get batch size
# Forward pass through the network
img_idx = 0
ps = model.forward(images[img_idx,:])
img = images[img_idx]
helper.view_classify(img.view(1, 28, 28), ps)
###Output
_____no_output_____
###Markdown
***Remark:***As you can see above, our network has basically no idea what this digit is. It's because we haven't trained it yet, all the weights are random! 2. Using `nn.Sequential` **`nn.Sequential`**- a convenient way to build networks like this where a tensor is passed sequentially through operations (2) Two Ways to use `nn.Sequential` 1. Without Dictionary
###Code
# the equivalent network is
# Hyperparameters for our network
input_size = 784
hidden_sizes = [128, 64]
output_size = 10
# Build a feed-forward network
model = nn.Sequential(nn.Linear(input_size, hidden_sizes[0]),
nn.ReLU(),
nn.Linear(hidden_sizes[0], hidden_sizes[1]),
nn.ReLU(),
nn.Linear(hidden_sizes[1], output_size),
nn.Softmax(dim=1))
print(model)
# Forward pass through the network and display output
images, labels = next(iter(trainloader))
images.resize_(images.shape[0], 1, 784)
ps = model.forward(images[0,:])
helper.view_classify(images[0].view(1, 28, 28), ps)
###Output
Sequential(
(0): Linear(in_features=784, out_features=128, bias=True)
(1): ReLU()
(2): Linear(in_features=128, out_features=64, bias=True)
(3): ReLU()
(4): Linear(in_features=64, out_features=10, bias=True)
(5): Softmax(dim=1)
)
###Markdown
***Remark: On Accessing the Model Operations***- The operations are available by passing in the appropriate index. For example, if you want to get first Linear operation and look at the weights, you'd use model[0]. 2. With Dictionary
###Code
from collections import OrderedDict
model = nn.Sequential(OrderedDict([
('fc1', nn.Linear(input_size, hidden_sizes[0])),
('relu1', nn.ReLU()),
('fc2', nn.Linear(hidden_sizes[0], hidden_sizes[1])),
('relu2', nn.ReLU()),
('output', nn.Linear(hidden_sizes[1], output_size)),
('softmax', nn.Softmax(dim=1))]))
model
###Output
_____no_output_____
###Markdown
***Remark: On Accessing the Model Operations***- instead of using indices **only**, we **can also** use a dictionary to name the individual layers ad operations *Note: dictionary keys must be unique, so *each operation must have a different name.*
###Code
print(model[0])
print(model.fc1)
###Output
Linear(in_features=784, out_features=128, bias=True)
Linear(in_features=784, out_features=128, bias=True)
|
main/.ipynb_checkpoints/run-checkpoint.ipynb | ###Markdown
Setup
###Code
%load_ext autoreload
%autoreload 2
import os
import sys
import cv2
import math
import numpy as np
import argparse
import warnings
sys.path.append('../pyslam/')
from config import Config
from visual_odometry import VisualOdometry
from camera import PinholeCamera
from ground_truth import groundtruth_factory
from dataset import dataset_factory
import matplotlib.pyplot as plt
from glob import glob
from feature_tracker import feature_tracker_factory, FeatureTrackerTypes
from feature_manager import feature_manager_factory
from feature_types import FeatureDetectorTypes, FeatureDescriptorTypes, FeatureInfo
from feature_matcher import feature_matcher_factory, FeatureMatcherTypes
from tqdm import tqdm
from feature_tracker_configs import FeatureTrackerConfigs
warnings.filterwarnings("ignore")
model_config = FeatureTrackerConfigs.test_configs
folders = os.listdir('../data/dataset/sequences/')
folders.sort()
folders
exp_name = 'T1_SIFT'
print('Experiment: ', exp_name)
for f in folders:
print('Folder: ',f)
config = Config(f)
dataset = dataset_factory(config.dataset_settings)
groundtruth = groundtruth_factory(config.dataset_settings)
cam = PinholeCamera(config.cam_settings['Camera.width'], config.cam_settings['Camera.height'],
config.cam_settings['Camera.fx'], config.cam_settings['Camera.fy'],
config.cam_settings['Camera.cx'], config.cam_settings['Camera.cy'],
config.DistCoef, config.cam_settings['Camera.fps'])
num_features=2000
tracker_config = model_config[exp_name]
tracker_config['num_features'] = num_features
feature_tracker = feature_tracker_factory(**tracker_config)
vo = VisualOdometry(cam, groundtruth, feature_tracker)
traj_img_size = 800
traj_img = np.zeros((traj_img_size, traj_img_size, 3), dtype=np.uint8)
half_traj_img_size = int(0.5*traj_img_size)
draw_scale = 1
result = []
for img_id in tqdm(range(dataset.max_frame_id)):
img = dataset.getImage(img_id)
if img is not None:
vo.track(img, img_id)
tmp = np.reshape(np.hstack((vo.cur_R, vo.cur_t)), 12)
result.append(' '.join([str(i) for i in tmp]))
res_base_path = os.path.join('../data/results/', exp_name)
res_folder_path = os.path.join(res_base_path, f+'.txt')
os.makedirs(res_base_path, exist_ok=True)
txt_file=open(res_folder_path, 'a')
txt_file.writelines("%s\n" % i for i in result)
txt_file.close()
print('Finished till:', exp_name)
###Output
_____no_output_____ |
python/moved-from-mxnet/composite_symbol.ipynb | ###Markdown
Composite symbols into componentIn this example we will show how to make an Inception network by forming single symbol into component.Inception is currently best model. Compared to other models, it has much less parameters, and with best performance. However, it is much more complex than sequence feedforward network.The Inception network in this example is refer to ```Ioffe, Sergey, and Christian Szegedy. "Batch normalization: Accelerating deep network training by reducing internal covariate shift." arXiv preprint arXiv:1502.03167 (2015).```
###Code
import mxnet as mx
###Output
_____no_output_____
###Markdown
For complex network such as inception network, building from single symbol is painful, we can make simple ```ComponentFactory``` to simplfiy the procedure.Except difference in number of filter, we find 2 major differences in each Inception module, so we can build two factories plus one basic ```Convolution + BatchNorm + ReLU``` factory to simplfiy the problem.
###Code
# Basic Conv + BN + ReLU factory
def ConvFactory(data, num_filter, kernel, stride=(1,1), pad=(0, 0), name=None, suffix=''):
conv = mx.symbol.Convolution(data=data, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, name='conv_%s%s' %(name, suffix))
bn = mx.symbol.BatchNorm(data=conv, fix_gamma=False, eps=1e-5 + 1e-10, momentum=0.9, name='bn_%s%s' %(name, suffix))
act = mx.symbol.Activation(data=bn, act_type='relu', name='relu_%s%s' %(name, suffix))
return act
###Output
_____no_output_____
###Markdown
We can visualize our basic component
###Code
prev = mx.symbol.Variable(name="Previos Output")
conv_comp = ConvFactory(data=prev, num_filter=64, kernel=(7,7), stride=(2, 2))
mx.viz.plot_network(symbol=conv_comp)
###Output
_____no_output_____
###Markdown
The next step is making a component factory with all ```stride=(1, 1)```
###Code
# param mapping to paper:
# num_1x1 >>> #1x1
# num_3x3red >>> #3x3 reduce
# num_3x3 >>> #3x3
# num_d3x3red >>> double #3x3 reduce
# num_d3x3 >>> double #3x3
# pool >>> Pool
# proj >>> proj
def InceptionFactoryA(data, num_1x1, num_3x3red, num_3x3, num_d3x3red, num_d3x3, pool, proj, name):
# 1x1
c1x1 = ConvFactory(data=data, num_filter=num_1x1, kernel=(1, 1), name=('%s_1x1' % name))
# 3x3 reduce + 3x3
c3x3r = ConvFactory(data=data, num_filter=num_3x3red, kernel=(1, 1), name=('%s_3x3' % name), suffix='_reduce')
c3x3 = ConvFactory(data=c3x3r, num_filter=num_3x3, kernel=(3, 3), pad=(1, 1), name=('%s_3x3' % name))
# double 3x3 reduce + double 3x3
cd3x3r = ConvFactory(data=data, num_filter=num_d3x3red, kernel=(1, 1), name=('%s_double_3x3' % name), suffix='_reduce')
cd3x3 = ConvFactory(data=cd3x3r, num_filter=num_d3x3, kernel=(3, 3), pad=(1, 1), name=('%s_double_3x3_0' % name))
cd3x3 = ConvFactory(data=cd3x3, num_filter=num_d3x3, kernel=(3, 3), pad=(1, 1), name=('%s_double_3x3_1' % name))
# pool + proj
pooling = mx.symbol.Pooling(data=data, kernel=(3, 3), stride=(1, 1), pad=(1, 1), pool_type=pool, name=('%s_pool_%s_pool' % (pool, name)))
cproj = ConvFactory(data=pooling, num_filter=proj, kernel=(1, 1), name=('%s_proj' % name))
# concat
concat = mx.symbol.Concat(*[c1x1, c3x3, cd3x3, cproj], name='ch_concat_%s_chconcat' % name)
return concat
# We can also visualize network with feature map shape information
# In this case, we must provide all necessary input shape info as a dict
prev = mx.symbol.Variable(name="Previos Output")
in3a = InceptionFactoryA(prev, 64, 64, 64, 64, 96, "avg", 32, name="in3a")
# shape info
# Note shape info must contain batch size although we ignore batch size in graph to save space
batch_size = 128
shape = {"Previos Output" : (batch_size, 3, 28, 28)}
# plot
mx.viz.plot_network(symbol=in3a, shape=shape)
###Output
_____no_output_____
###Markdown
We will make the other factory with ```stride=(2, 2)```
###Code
# param mapping to paper:
# num_1x1 >>> #1x1 (not exist!)
# num_3x3red >>> #3x3 reduce
# num_3x3 >>> #3x3
# num_d3x3red >>> double #3x3 reduce
# num_d3x3 >>> double #3x3
# pool >>> Pool (not needed, all are max pooling)
# proj >>> proj (not exist!)
def InceptionFactoryB(data, num_3x3red, num_3x3, num_d3x3red, num_d3x3, name):
# 3x3 reduce + 3x3
c3x3r = ConvFactory(data=data, num_filter=num_3x3red, kernel=(1, 1), name=('%s_3x3' % name), suffix='_reduce')
c3x3 = ConvFactory(data=c3x3r, num_filter=num_3x3, kernel=(3, 3), pad=(1, 1), stride=(2, 2), name=('%s_3x3' % name))
# double 3x3 reduce + double 3x3
cd3x3r = ConvFactory(data=data, num_filter=num_d3x3red, kernel=(1, 1), name=('%s_double_3x3' % name), suffix='_reduce')
cd3x3 = ConvFactory(data=cd3x3r, num_filter=num_d3x3, kernel=(3, 3), pad=(1, 1), stride=(1, 1), name=('%s_double_3x3_0' % name))
cd3x3 = ConvFactory(data=cd3x3, num_filter=num_d3x3, kernel=(3, 3), pad=(1, 1), stride=(2, 2), name=('%s_double_3x3_1' % name))
# pool + proj
pooling = mx.symbol.Pooling(data=data, kernel=(3, 3), stride=(2, 2), pad=(1, 1), pool_type="max", name=('max_pool_%s_pool' % name))
# concat
concat = mx.symbol.Concat(*[c3x3, cd3x3, pooling], name='ch_concat_%s_chconcat' % name)
return concat
prev = mx.symbol.Variable(name="Previos Output")
in3c = InceptionFactoryB(prev, 128, 160, 64, 96, name='in3c')
mx.viz.plot_network(symbol=in3c)
###Output
_____no_output_____
###Markdown
Now we can use these factories to build the whole network
###Code
# data
data = mx.symbol.Variable(name="data")
# stage 1
conv1 = ConvFactory(data=data, num_filter=64, kernel=(7, 7), stride=(2, 2), pad=(3, 3), name='1')
pool1 = mx.symbol.Pooling(data=conv1, kernel=(3, 3), stride=(2, 2), name='pool_1', pool_type='max')
# stage 2
conv2red = ConvFactory(data=pool1, num_filter=64, kernel=(1, 1), stride=(1, 1), name='2_red')
conv2 = ConvFactory(data=conv2red, num_filter=192, kernel=(3, 3), stride=(1, 1), pad=(1, 1), name='2')
pool2 = mx.symbol.Pooling(data=conv2, kernel=(3, 3), stride=(2, 2), name='pool_2', pool_type='max')
# stage 2
in3a = InceptionFactoryA(pool2, 64, 64, 64, 64, 96, "avg", 32, '3a')
in3b = InceptionFactoryA(in3a, 64, 64, 96, 64, 96, "avg", 64, '3b')
in3c = InceptionFactoryB(in3b, 128, 160, 64, 96, '3c')
# stage 3
in4a = InceptionFactoryA(in3c, 224, 64, 96, 96, 128, "avg", 128, '4a')
in4b = InceptionFactoryA(in4a, 192, 96, 128, 96, 128, "avg", 128, '4b')
in4c = InceptionFactoryA(in4b, 160, 128, 160, 128, 160, "avg", 128, '4c')
in4d = InceptionFactoryA(in4c, 96, 128, 192, 160, 192, "avg", 128, '4d')
in4e = InceptionFactoryB(in4d, 128, 192, 192, 256, '4e')
# stage 4
in5a = InceptionFactoryA(in4e, 352, 192, 320, 160, 224, "avg", 128, '5a')
in5b = InceptionFactoryA(in5a, 352, 192, 320, 192, 224, "max", 128, '5b')
# global avg pooling
avg = mx.symbol.Pooling(data=in5b, kernel=(7, 7), stride=(1, 1), name="global_pool", pool_type='avg')
# linear classifier
flatten = mx.symbol.Flatten(data=avg, name='flatten')
fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=1000, name='fc1')
softmax = mx.symbol.SoftmaxOutput(data=fc1, name='softmax')
# if you like, you can visualize full network structure
mx.viz.plot_network(symbol=softmax, shape={"data" : (128, 3, 224, 224)})
###Output
_____no_output_____ |
nlu/colab/Component Examples/Named_Entity_Recognition_(NER)/NLU_Named_Entity_Recognition_CONLL_2003_5class_example.ipynb | ###Markdown
[](https://colab.research.google.com/github/JohnSnowLabs/nlu/blob/master/examples/collab/Named_Entity_Recognition_(NER)/NLU_Named_Entity_Recognition_CONLL_2003_5class_example.ipynb)Named entities are phrases that contain the names of persons, organizations, locations, times and quantities. Example:[ORG **U.N.** ] official [PER **Ekeus** ] heads for [LOC **Baghdad** ] . https://www.aclweb.org/anthology/W03-0419.pdf CoNLL-2003 is a NER dataset that available in English and German. NLU provides pretrained languages for both of these languages.It features **5 classes** of tags, **LOC (location)** , **ORG(Organisation)**, **PER(Persons)** and the forth which describes all the named entities which do not belong to any of the thre previously mentioned tags **(MISC)**. The fifth class **(O)** is used for tokens which belong to no named entity.|Tag | Description ||------|--------------||PER | A person like **Jim** or **Joe** ||ORG | An organisation like **Microsoft** or **PETA**||LOC | A location like **Germany**||MISC | Anything else like **Playstation** ||O| Everything that is not an entity. | The shared task of [CoNLL-2003 concerns](https://www.clips.uantwerpen.be/conll2003/) language-independent named entity recognition. We will concentrate on four types of named entities: persons, locations, organizations and names of miscellaneous entities that do not belong to the previous three groups. The participants of the shared task will be offered training and test data for two languages. They will use the data for developing a named-entity recognition system that includes a machine learning component. For each language, additional information (lists of names and non-annotated data) will be supplied as well. The challenge for the participants is to find ways of incorporating this information in their system.
###Code
import os
! apt-get update -qq > /dev/null
# Install java
! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"]
! pip install nlu > /dev/null
###Output
_____no_output_____
###Markdown
NLU makes NER easy. You just need to load the NER model via ner.load() and predict on some dataset. It could be a pandas dataframe with a column named text or just an array of strings.
###Code
import nlu
example_text = ["A person like Jim or Joe",
"An organisation like Microsoft or PETA",
"A location like Germany",
"Anything else like Playstation",
"Person consisting of multiple tokens like Angela Merkel or Donald Trump",
"Organisations consisting of multiple tokens like JP Morgan",
"Locations consiting of multiple tokens like Los Angeles",
"Anything else made up of multiple tokens like Super Nintendo",]
nlu.load('ner').predict(example_text)
text = ["Barclays misled shareholders and the public about one of the biggest investments in the bank's history, a BBC Panorama investigation has found.",
"The bank announced in 2008 that Manchester City owner Sheikh Mansour had agreed to invest more than £3bn.",
"But the BBC found that the money, which helped Barclays avoid a bailout by British taxpayers, actually came from the Abu Dhabi government.",
"Barclays said the mistake in its accounts was 'a drafting error'.",
"Unlike RBS and Lloyds TSB, Barclays narrowly avoided having to request a government bailout late in 2008 after it was rescued by £7bn worth of new investment, most of which came from the Gulf states of Qatar and Abu Dhabi.",
"The S&P 500's price to earnings multiple is 71% higher than Apple's, and if Apple were simply valued at the same multiple, its share price would be $840, which is 52% higher than its current price.",
"Alice has a cat named Alice and also a dog named Alice and also a parrot named Alice, it is her favorite name!"
] + example_text
ner_df = nlu.load('ner').predict(text, output_level= 'chunk')
ner_df
###Output
_____no_output_____
###Markdown
Lets explore our data which the predicted NER tags and visalize them! We specify [1:] so we dont se the count for the O-tag wich is the most common, since most words in a sentence are not named entities and thus not part of a chunk
###Code
ner_df['entities'].value_counts()[1:].plot.bar(title='Occurence of Named Entity tokens in dataset')
ner_type_to_viz = 'LOC'
ner_df[ner_df.entities_confidence == ner_type_to_viz]['entities'].value_counts().plot.bar(title='Most often occuring LOC labeled tokens in the dataset')
ner_type_to_viz = 'ORG'
ner_df[ner_df.entities_confidence == ner_type_to_viz]['entities'].value_counts().plot.bar(title='Most often occuring ORG labeled tokens in the dataset')
###Output
_____no_output_____ |
Lab4/.ipynb_checkpoints/lab4-checkpoint.ipynb | ###Markdown
Lab 4: Linear Regression
###Code
# Import necessary python libraries
import numpy as np # For handling array operations
import pandas as pd # For data manipulation and analysis
import matplotlib.pyplot as plt # For figures and graphs
from tabulate import tabulate # For displaying information in tabular form
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
###Output
_____no_output_____
###Markdown
PART A : Prerequisites for linear regression implementation 1. Create an array x = \[1, 1, 2, 3, 4, 3, 4, 6, 4] using numpy. Calculate a function h(x)=t0+t1*x, where t0=1.2 and t1=0.5, for all values of x and plot a graph with x on one axis and h(x)on another axis.
###Code
def h(x):
t0 = 1.2
t1 = 0.5
return (t0 + t1*x)
x = np.array([1, 1, 2, 3, 4, 3, 4, 6, 4])
hx = h(x)
print(tabulate([x, hx]))
plt.plot(x, hx)
plt.xlabel('values in x')
plt.ylabel('h(x)')
plt.show()
###Output
--- --- --- --- --- --- --- --- ---
1 1 2 3 4 3 4 6 4
1.7 1.7 2.2 2.7 3.2 2.7 3.2 4.2 3.2
--- --- --- --- --- --- --- --- ---
###Markdown
2. Create two arrays A and B with the following values using numpy array. Let (Ai,Bi) represent a data point with i th element of A and B. A = \[1, 1, 2, 3, 4, 3, 4, 6, 4] B = [2, 1, 0.5, 1, 3, 3, 2, 5, 4] Find out the dot product of the vectors. [Hint use numpy np.dot(a,b)]
###Code
A = np.array([1, 1, 2, 3, 4, 3, 4, 6, 4])
B = np.array([2, 1, 0.5, 1, 3, 3, 2, 5, 4])
np.dot(A, B)
###Output
_____no_output_____
###Markdown
3. Plot a graph marking the data points (Ai,Bi) with A on the X-axis and B on the Y-axis.
###Code
plt.scatter(A, B)
plt.xlabel('A')
plt.ylabel('B')
plt.show()
###Output
_____no_output_____
###Markdown
4. Calculate Mean Square Error (MSE) of A and B.
###Code
mse = np.sum((A-B)**2) / len(A)
print('Mean Square Error =', mse)
###Output
Mean Square Error = 1.4722222222222223
###Markdown
5. Modify the above equation with the following cost function. Implement as a function with prototype **def compute_cost_function(n,t1,A,B)\:**. Take h(x) =t1*x and t1= 0.5 Modify the above code iterating for different values of t1 and calculate J(t1).Try with t1 =0.1,0.3,0.5,0.7,0.8. Plot a graph with t1 on X-axis and J(t1) on Y-axis. \[hint sum_squared_error = np.square(np.dot(features, theta) -values).sum() cost = sum_squared_error / (2*m)]
###Code
def compute_cost_function(n, t1, A, B):
sum_squared_error = np.sum(np.square(np.dot(t1, A) - B))
cost = sum_squared_error/(2*n)
return cost
t1 = [0.1, 0.3, 0.5, 0.7, 0.8]
cost = []
# Finding the cost for different values of t1 (theta)
for t1_value in t1:
cost.append(compute_cost_function(len(A), t1_value, A, B))
print(tabulate([t1, cost]))
# Plotting the cost for the corresponding value of theta
plt.plot(t1, cost)
plt.xlabel('t1')
plt.ylabel('J(t1)')
plt.show()
###Output
------- ------- -------- -------- --------
0.1 0.3 0.5 0.7 0.8
2.99611 1.65389 0.791667 0.409444 0.398333
------- ------- -------- -------- --------
###Markdown
PART B : Linear Regression Implementation (with one variable) 1. Generate a new data set from student scores with one feature studytime and output variable average grade = (G1+G2+G3)/3
###Code
student_data = pd.read_csv('datasets_student_mat.csv')
df = pd.DataFrame(list(zip(student_data['studytime'], (student_data['G1']+student_data['G2']+student_data['G3'])/3)), columns =['studytime', 'avg_grade'])
df.to_csv("./student_average_grade.csv", sep=',',index=False)
###Output
_____no_output_____
###Markdown
2. Load the new data set
###Code
data = pd.read_csv('student_average_grade.csv')
data.head()
###Output
_____no_output_____
###Markdown
3. Plot data
###Code
plt.scatter(data['studytime'], data['avg_grade'])
plt.xlabel('Study Time')
plt.ylabel('Average Grade')
plt.show()
###Output
_____no_output_____
###Markdown
4. Implement linear regression using inbuilt package python Scikit
###Code
X = data[['studytime']]
y = data[['avg_grade']]
regressor = LinearRegression()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3)
regressor.fit(X_train, y_train)
y_pred = regressor.predict(X_test)
###Output
_____no_output_____
###Markdown
5. Implement gradient descent algorithm with the function prototype **def gradient_descent(alpha, x, y, max_iter=1500):** where alpha is the learning rate, x is the input feature vector. y is the target. Subject the feature vector to normalisation step if needed. Convergence criteria: when no: of iterations exceed max_iter. \[hint sum_squared_error = np.square(np.dot(features, theta) -values).sum() cost = sum_squared_error / (2*m)]
###Code
learned_params = []
def gradient_descent(alpha, x, y, max_iter=1500):
theta = 0
m = float(len(y))
for i in range(max_iter):
theta = theta - (alpha / m) * np.dot(x.T,(np.dot(x, theta) - y))
learned_params.append(theta)
###Output
_____no_output_____
###Markdown
6. Vary learning rate from 0.1 to 0.9 and observe the learned parameter.
###Code
alpha_values = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
for alpha in alpha_values:
gradient_descent(alpha, X_train, y_train, 100)
def gradientDescent(x, y, alpha, num_iter):
theta = 0
m = np.size(x,axis=0)
for i in range(num_iter):
theta = theta - (alpha / m) * np.dot(x.T,(np.dot(x, theta) - y))
return theta
theta = gradientDescent(X_train, y_train, 0.01, 1500)
theta
###Output
_____no_output_____ |
DataScienceLibrariesAssignments/Copy of A_08_GroupingAndAggregationOperationsOnDataFrames_en_SerhanOner.ipynb | ###Markdown
For Pandas assignments, you are going to use [Titanic](https://www.kaggle.com/c/titanic/download/GQf0y8ebHO0C4JXscPPp%2Fversions%2FXkNkvXwqPPVG0Qt3MtQT%2Ffiles%2Ftrain.csv) (train.csv) dataset. Download the dataset and load to a data frame.**(1)** Calculate the average age of the passengers for each gender and passenger class by using `groupby()` method.
###Code
import pandas as pd
import numpy as np
from pandas import read_csv
data = read_csv('train.csv')
df = pd.DataFrame(data)
df.groupby("Sex")["Age"].mean()
df.groupby("Pclass")["Age"].mean()
"""
girls = df.loc[df["Sex"] == 'female', 'Age'].sum()
boys = df.loc[df["Sex"] == 'male', 'Age'].sum()
print("Avg age of girls is: " , girls/261)
print("Avg age of boys is: ", boys/453)
"""
#this is another method that I've found previously
###Output
_____no_output_____
###Markdown
**(2)** Group by embarkation port and print values. (Notice that you get unique values with this.) ---
###Code
abc = df.groupby("Embarked")
print(df["Embarked"].unique())
###Output
_____no_output_____ |
Analysis/InVivo/Combined/Clinical_Parameters.ipynb | ###Markdown
! gsutil -m cp gs://seqwell/data/IRF_SerialSac/Metadata/IRF-049E_DataPackage_v3_McIlwain_ClinicalScores_table_withTatoos.tsv ../../../../data/InVivo/! mv ../../../../data/InVivo/IRF-049E_DataPackage_v3_McIlwain_ClinicalScores_table_withTatoos.tsv ../../../../data/InVivo/clinical_scores.tsv! gsutil -m cp gs://seqwell/data/IRF_SerialSac/Metadata/IRF_SerialSac_ViralLoads_withAnimalTatID.tsv ../../../../data/InVivo/! mv ../../../../data/InVivo/IRF_SerialSac_ViralLoads_withAnimalTatID.tsv ../../../../data/InVivo/viral_loads.tsv
###Code
clinical_data = pd.read_csv('../../../../data/InVivo/clinical_scores_fixid.tsv', sep='\t', header=[0,1])
challenge_group = pd.Series(clinical_data.iloc[:, 0].values, index=clinical_data.iloc[:, 2].values)
clinical_data.index = clinical_data.iloc[:,2].values
clinical_data = clinical_data.iloc[:,3:]
clinical_data = clinical_data.replace('>9', 10).astype(float)
clinical_data.head()
viralload = pd.read_csv('../../../../data/InVivo/viral_loads_fixid.tsv', sep='\t', index_col=0)
viralload = viralload.rename(columns={'BL':0})
viralload.head()
viralload_log = viralload.replace('UND', 1).apply(np.log10)
viralload_log.head()
box_height=1.7
box_width=1.7
title_height=.15
left_space = .15
right_space = .85
bottom_space = .13
top_space = .98
viralload_log_unstack = viralload_log.unstack().reset_index()
viralload_log_unstack.columns = ['Day', 'Animal', 'log_viral_load']
viralload_log_unstack = viralload_log_unstack.dropna()
viralload_log_unstack['Day']=viralload_log_unstack['Day'].astype(int)
#vl_perday_mean = viralload_log_unstack.groupby('Day')['log_viral_load'].mean()
#vl_perday_95 = viralload_log_unstack.groupby('Day')['log_viral_load'].quantile(.95)
#vl_perday_5 = viralload_log_unstack.groupby('Day')['log_viral_load'].quantile(.05)
vl_perday_mean = viralload_log_unstack.groupby('Day')['log_viral_load'].median()
vl_perday_95 = viralload_log_unstack.groupby('Day')['log_viral_load'].min()
vl_perday_5 = viralload_log_unstack.groupby('Day')['log_viral_load'].max()
vl_day_sumary_stats = pd.concat([vl_perday_mean, vl_perday_5, vl_perday_95], axis=1)
vl_day_sumary_stats.columns = ['median', 'min', 'max']
vl_day_sumary_stats
## Full data with all intermediate timepoints within a day
jitter = .1
time_shift = .3
clinical_data_unstack = clinical_data.unstack().reset_index().dropna()
clinical_data_unstack.columns = ['Day', 'Time', 'Animal', 'Clinical_Score']
clinical_data_unstack['Day'] = clinical_data_unstack['Day'].astype(float)
clinical_data_unstack['Day_Shift'] = clinical_data_unstack['Time'].replace({'Early':0, 'Middle':time_shift, 'Late':(time_shift*2)})
clinical_data_unstack['Pos'] = clinical_data_unstack['Day'] + clinical_data_unstack['Day_Shift']
## Per-day summary statistics
#perday_mean = clinical_data_unstack.groupby('Day')['Clinical_Score'].mean()
#perday_95 = clinical_data_unstack.groupby('Day')['Clinical_Score'].quantile(.95)
#perday_5 = clinical_data_unstack.groupby('Day')['Clinical_Score'].quantile(.05)
perday_mean = clinical_data_unstack.groupby('Day')['Clinical_Score'].median()
perday_95 = clinical_data_unstack.groupby('Day')['Clinical_Score'].min()
perday_5 = clinical_data_unstack.groupby('Day')['Clinical_Score'].max()
clinical_day_sumary_stats = pd.concat([perday_mean, perday_5, perday_95], axis=1)
clinical_day_sumary_stats.columns = ['median', 'min', 'max']
clinical_day_sumary_stats
clinical_data_unstack.head()
#a_thresh = 1
ax_width = box_width
fig_width = ax_width/(right_space-left_space)
ax_height = title_height+box_height
fig_height = ax_height/(top_space-bottom_space)
fig_width, fig_height
fig = plt.figure(figsize=(fig_width, fig_height), dpi=200)
gs = gridspec.GridSpec(2, 1, fig,
left_space, bottom_space, right_space,top_space,
hspace=0.0, wspace=0.0,
height_ratios=(title_height, box_height))
ax = fig.add_subplot(gs[1],
xscale='linear', yscale='linear',
frameon=True)
title_ax = fig.add_subplot(gs[0],
xscale='linear', yscale='linear',
frameon=False, xticks=[],yticks=[],
xlim=[0,1],
ylim=[0,1])
title_ax.text(0.50, 1.0, 'Clinical course', va='top', ha='center',
fontsize=11, fontdict=dict(weight='normal'),
clip_on=False)
color = 'tab:red'
ax.set_ylabel('Log RNA copies / mL', color=color, labelpad=.1)
ax.set_xlabel('DPI', labelpad=0.1)
error = [vl_day_sumary_stats['median'] - vl_day_sumary_stats['min'],
vl_day_sumary_stats['max'] - vl_day_sumary_stats['median']]
ax.errorbar(x=vl_day_sumary_stats.index, y=vl_day_sumary_stats['median'],
yerr=error,
fmt='o', ecolor=color, elinewidth=1, markersize=3, color=color, zorder=1)
ax.plot(vl_day_sumary_stats.index, vl_day_sumary_stats['median'], linewidth=1, color=color, zorder=1)
#ax.scatter(viralload_log_unstack.loc[ind, 'Day_jitter'],
# viralload_log_unstack.loc[ind, 'log_viral_load'],
# s=3, color='#901900', zorder=2, edgecolor='None',
# alpha=.7)
ax.axhline(y=3, color=color, linestyle='--')
ax.annotate('LOD', color=color, xy=(0.1, 3.2), size=8)
ax.tick_params(axis='y', labelcolor=color, colors=color)
color = 'tab:blue'
ax2 = ax.twinx()
error = [clinical_day_sumary_stats['median'] - clinical_day_sumary_stats['min'],
clinical_day_sumary_stats['max'] - clinical_day_sumary_stats['median']]
ax2.errorbar(x=clinical_day_sumary_stats.index+.05, y=clinical_day_sumary_stats['median'],
yerr=error,
fmt='o', ecolor=color, elinewidth=1, markersize=3, color=color)
ax2.plot(clinical_day_sumary_stats.index, clinical_day_sumary_stats['median'], linewidth=1, color=color)
#ax2.scatter(df_avg['Day_Jitter'],
# df_avg.loc[:, 'Clinical_Score'],
# s=3, color='#1F4693', zorder=2, edgecolor='None', alpha=.7)
#ax2.scatter(clinical_data_unstack['Day_Jitter'],
# clinical_data_unstack.loc[:, 'Clinical_Score'],
# s=3, color='#1F4693', zorder=2, edgecolor='None', alpha=.7)
ax2.tick_params(axis='y', labelcolor=color, colors=color)
ax2.set_ylabel('Clinical score', color=color, labelpad=.1)
ax2.set_ylim([0,22])
ax.set_ylim([-.05*10.9,10.9])
ax2.set_ylim([-.05*22, 22])
ax.set_xlim([-.5, 8.5])
ax.set_xticks(np.arange(0,9))
x_width=1
ylim = [-.05*10.,10.9]
for i in range(9):
if (i % 2)==0:
ax.axvspan(i-(x_width/2), i+(x_width/2), ymin=ylim[0], ymax=ylim[1], facecolor='gray', alpha=0.1)
figure_file = os.path.join(results_dir, "Clinical.Timecourse.ViralSymptomsSummary.pdf")
fig.savefig(figure_file)
fig = plt.figure(figsize=(5, 6), dpi=200)
full_gs = gridspec.GridSpec(6, 3, fig, left=.01, bottom=.05, right=.95, top=.95,
hspace=0.5, wspace=0, width_ratios=[.2,.75, .05])
group_lab_axes = []
for i in range(6):
ax = fig.add_subplot(full_gs[i, 0], xscale='linear', yscale='linear', frameon=False,
xticks=[], yticks=[])
group_lab_axes.append(ax)
plot_grid = full_gs[:, 1].subgridspec(6, 3,
hspace=0.5, wspace=0.5)
axes = []
for i in range(6):
row_axes = []
for j in range(3):
ax = fig.add_subplot(plot_grid[i, j], xscale='linear', yscale='linear', frameon=True)
row_axes.append(ax)
axes.append(row_axes)
axes = np.array(axes).ravel()
ind = ~challenge_group.isin(['Terminal (no manipulations)'])
challenge_group_subset = challenge_group.loc[ind]
challenge_group_subset = challenge_group_subset.sort_values(ascending=False)
animals = challenge_group_subset.index
clinical_color = 'tab:blue'
load_color = 'tab:red'
for (i,a) in enumerate(animals):
ax2 = axes[i].twinx()
ind = clinical_data_unstack['Animal']==a
vdat = clinical_data_unstack.loc[ind,:]
for j in range(vdat.shape[0]):
# Plot the last point normally
# if j != (vdat.shape[0] - 1):
ax2.scatter(vdat.iloc[j, 5], vdat.iloc[j,3],
color=clinical_color, s=8, edgecolor=clinical_color, zorder=1)
ax2.plot(vdat.iloc[:,5], vdat.iloc[:,3], color=clinical_color, zorder=0)
ax2.tick_params(axis='y', labelcolor=clinical_color, colors=clinical_color)
ind = viralload_log_unstack['Animal']==a
vdat = viralload_log_unstack.loc[ind,:]
axes[i].plot(vdat.iloc[:, 0], vdat.iloc[:,2],
color=load_color, zorder=1)
for j in range(vdat.shape[0]):
# Plot the last point normally
# if j != (vdat.shape[0] - 1):
axes[i].scatter(vdat.iloc[j, 0], vdat.iloc[j,2],
color=load_color, s=8, edgecolor=load_color, zorder=2)
axes[i].axhline(y=3, color=load_color, linestyle='--')
axes[i].tick_params(axis='y', labelcolor=load_color, colors=load_color)
#axes[i].set_ylabel('Log RNA copies / mL', color=color)
axes[i].set_xlim([-.8,8.8])
ax2.set_xlim([-.8,8.8])
axes[i].set_xticks(np.arange(9))
ax2.set_ylim([0,22])
ax2.set_yticks([0, 10, 20])
axes[i].set_ylim([0,10.9])
axes[i].set_yticks([0, 5, 10])
ax2.set_title(a, size=6, pad=-5) # Include the group in the title, or can move this around in Illustrator
# Label only the first LOD
axes[0].annotate('LOD', color=load_color, xy=(2.8, 3.5), size=8)
#challenge_group label
for (i,lab) in enumerate(['Terminal\nroutine\nmanipulation', 'Terminal\nroutine\nmanipulation', 'Scheduled\nD6\nnecropsy', 'Scheduled\nD5\nnecropsy',
'Scheduled\nD4\nnecropsy', 'Scheduled\nD3\nnecropsy',]):
group_lab_axes[i].text(0.03, 0.5,lab, va='center', ha='left', fontsize=6)
left_lab_ax = fig.add_subplot(full_gs[1:5, 0], xscale='linear', yscale='linear', frameon=False,
xticks=[], yticks=[])
left_lab_ax.text(.8, .5, 'Log10 viral load', rotation=90, color=load_color, fontsize=10, va='center', ha='right')
right_lab_ax = fig.add_subplot(full_gs[1:5, 2], xscale='linear', yscale='linear', frameon=False,
xticks=[], yticks=[])
right_lab_ax.text(1., .5, 'Clinical score', rotation=90, color=clinical_color, fontsize=10, va='center', ha='left')
figure_file = os.path.join(results_dir, "Clinical.Timecourse.ViralSymptomsPerAnimal.pdf")
fig.savefig(figure_file)
###Output
_____no_output_____ |
natural_language_processing/spam_classifier.ipynb | ###Markdown
###Code
import pandas as pd
import numpy as np
import re
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
df = pd.read_csv('https://raw.githubusercontent.com/ShubhamPy/Spam-Classifier/master/spam.tsv',delimiter='\t',names=['label', 'text'])
df.head(3)
df['label'] = df['label'].apply(lambda x : 1 if x=='ham' else 0)# mapping ham to 1 and spam to 0
df.head(3)
df.shape
df.label.value_counts()
# Cleaning the texts
corpus = []
for i in range(0, df.shape[0]):
review = re.sub('[^a-zA-Z]', ' ', df['text'][i])
review = review.lower()
review = review.split()
ps = PorterStemmer()
review = [ps.stem(word) for word in review if not word in set(stopwords.words('english'))]
review = ' '.join(review)
corpus.append(review)
len(corpus)
# Creating the Bag of Words model
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(max_features = 1500)
X = cv.fit_transform(corpus).toarray()
y = df.iloc[:, 0].values
X.shape, y.shape
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 0)
# Fitting Naive Bayes to the Training set
from sklearn.naive_bayes import GaussianNB
classifier = GaussianNB()
classifier.fit(X_train, y_train)
from collections import Counter
c = Counter(y_test)
c #here ham==1 and spam ==0
# Predicting the Test set results
y_pred = classifier.predict(X_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
cm
def plot_confusion_matrix(cm,
target_names,
title='Confusion matrix',
cmap=None,
normalize=True):
"""
given a sklearn confusion matrix (cm), make a nice plot
Arguments
---------
cm: confusion matrix from sklearn.metrics.confusion_matrix
target_names: given classification classes such as [0, 1, 2]
the class names, for example: ['high', 'medium', 'low']
title: the text to display at the top of the matrix
cmap: the gradient of the values displayed from matplotlib.pyplot.cm
see http://matplotlib.org/examples/color/colormaps_reference.html
plt.get_cmap('jet') or plt.cm.Blues
normalize: If False, plot the raw numbers
If True, plot the proportions
Usage
-----
plot_confusion_matrix(cm = cm, # confusion matrix created by
# sklearn.metrics.confusion_matrix
normalize = True, # show proportions
target_names = y_labels_vals, # list of names of the classes
title = best_estimator_name) # title of graph
Citiation
---------
http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
"""
import matplotlib.pyplot as plt
import numpy as np
import itertools
accuracy = np.trace(cm) / float(np.sum(cm))
misclass = 1 - accuracy
if cmap is None:
cmap = plt.get_cmap('Blues')
plt.figure(figsize=(8, 6))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
if target_names is not None:
tick_marks = np.arange(len(target_names))
plt.xticks(tick_marks, target_names, rotation=45)
plt.yticks(tick_marks, target_names)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 1.5 if normalize else cm.max() / 2
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
if normalize:
plt.text(j, i, "{:0.4f}".format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
else:
plt.text(j, i, "{:,}".format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, misclass))
plt.show()
plot_confusion_matrix(cm,
normalize = False,
target_names = ['spam','ham'],
title = "Confusion Matrix")
###Output
_____no_output_____ |
jupyter_notebooks/06_roc_sensitivity_specificity.ipynb | ###Markdown
Kaggle Titanic survival - Receiever Operator Characteristic (ROC) curveFrequently in machine learning we wish to go beyond measuring raw accuracy. This is especially true when classes are unbalanced, where errors are often greater in one class than the other. The Receiver Operator Characteristic (ROC) curve allows us to better understand the trade-off between sensitivity (the ability to detect positives of a certain class) and specificity (the ability to detect negatives of a certain class). The area under the ROC curve is also often used to compare different models: a higher Area Under Curve (AUC) is frequently the sign of a better model.The ROC curve is created by plotting the true positive rate (TPR) against the false positive rate (FPR) at various threshold settings. The true-positive rate is also known as sensitivity or recall. The false-positive rate can be calculated as (1 − specificity).In this notebook we repeat our basic logistic regression model as previously described:https://github.com/MichaelAllen1966/1804_python_healthcare/blob/master/titanic/02_logistic_regression.ipynbWe will extend the model to report a range of accuracy measures, as described:https://github.com/MichaelAllen1966/1804_python_healthcare/blob/master/titanic/05_accuracy_standalone.ipynbndWe use false positive rate and true positive rate to construct our ROC curve. Below we build a method ourselves, by adjusting the cut-off probability used We will go through the following steps:* Download and save pre-processed data* Split data into features (X) and label (y)* Split data into training and test sets (we will test on data that has not been used to fit the model)* Standardise data* Fit a logistic regression model (from sklearn learn)* Predict survival probabilities of the test set* ROC version 1: manually construct and plot a ROC curve using a range of cut-off proabiltities* Measure the area under the ROC curve* ROC version 2: use sklearn's build in ROC method Note: To keep this example simple we have used a single random split between training and test data. A more thorough analysis would use repeated measurement using stratified k-fold validation (see https://github.com/MichaelAllen1966/1804_python_healthcare/blob/master/titanic/03_k_fold.ipynb). Load modulesA standard Anaconda install of Python (https://www.anaconda.com/distribution/) contains all the necessary modules.
###Code
import numpy as np
import pandas as pd
# Import machine learning methods
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# Ignore warnigns to keep notebook tidy
import warnings
warnings.filterwarnings("ignore")
###Output
_____no_output_____
###Markdown
Load dataThe section below downloads pre-processed data, and saves it to a subfolder (from where this code is run).If data has already been downloaded that cell may be skipped.Code that was used to pre-process the data ready for machine learning may be found at:https://github.com/MichaelAllen1966/1804_python_healthcare/blob/master/titanic/01_preprocessing.ipynb
###Code
download_required = True
if download_required:
# Download processed data:
address = 'https://raw.githubusercontent.com/MichaelAllen1966/' + \
'1804_python_healthcare/master/titanic/data/processed_data.csv'
data = pd.read_csv(address)
# Create a data subfolder if one does not already exist
import os
data_directory ='./data/'
if not os.path.exists(data_directory):
os.makedirs(data_directory)
# Save data
data.to_csv(data_directory + 'processed_data.csv', index=False)
data = pd.read_csv('data/processed_data.csv')
# Make all data 'float' type
data = data.astype(float)
###Output
_____no_output_____
###Markdown
The first column is a passenger index number. We will remove this, as this is not part of the original Titanic passenger data.
###Code
# Drop Passengerid (axis=1 indicates we are removing a column rather than a row)
# We drop passenger ID as it is not original data
data.drop('PassengerId', inplace=True, axis=1)
###Output
_____no_output_____
###Markdown
Define function to measure accuracyThe following is a function for multiple accuracy measures.
###Code
def calculate_accuracy(observed, predicted):
"""
Calculates a range of accuracy scores from observed and predicted classes.
Takes two list or NumPy arrays (observed class values, and predicted class
values), and returns a dictionary of results.
1) observed positive rate: proportion of observed cases that are +ve
2) Predicted positive rate: proportion of predicted cases that are +ve
3) observed negative rate: proportion of observed cases that are -ve
4) Predicted negative rate: proportion of predicted cases that are -ve
5) accuracy: proportion of predicted results that are correct
6) precision: proportion of predicted +ve that are correct
7) recall: proportion of true +ve correctly identified
8) f1: harmonic mean of precision and recall
9) sensitivity: Same as recall
10) specificity: Proportion of true -ve identified:
11) positive likelihood: increased probability of true +ve if test +ve
12) negative likelihood: reduced probability of true +ve if test -ve
13) false positive rate: proportion of false +ves in true -ve patients
14) false negative rate: proportion of false -ves in true +ve patients
15) true positive rate: Same as recall
16) true negative rate
17) positive predictive value: chance of true +ve if test +ve
18) negative predictive value: chance of true -ve if test -ve
"""
# Converts list to NumPy arrays
if type(observed) == list:
observed = np.array(observed)
if type(predicted) == list:
predicted = np.array(predicted)
# Calculate accuracy scores
observed_positives = observed == 1
observed_negatives = observed == 0
predicted_positives = predicted == 1
predicted_negatives = predicted == 0
true_positives = (predicted_positives == 1) & (observed_positives == 1)
false_positives = (predicted_positives == 1) & (observed_positives == 0)
true_negatives = (predicted_negatives == 1) & (observed_negatives == 1)
accuracy = np.mean(predicted == observed)
precision = (np.sum(true_positives) /
(np.sum(true_positives) + np.sum(false_positives)))
recall = np.sum(true_positives) / np.sum(observed_positives)
sensitivity = recall
f1 = 2 * ((precision * recall) / (precision + recall))
specificity = np.sum(true_negatives) / np.sum(observed_negatives)
positive_likelihood = sensitivity / (1 - specificity)
negative_likelihood = (1 - sensitivity) / specificity
false_positive_rate = 1 - specificity
false_negative_rate = 1 - sensitivity
true_positive_rate = sensitivity
true_negative_rate = specificity
positive_predictive_value = (np.sum(true_positives) /
np.sum(observed_positives))
negative_predictive_value = (np.sum(true_negatives) /
np.sum(observed_positives))
# Create dictionary for results, and add results
results = dict()
results['observed_positive_rate'] = np.mean(observed_positives)
results['observed_negative_rate'] = np.mean(observed_negatives)
results['predicted_positive_rate'] = np.mean(predicted_positives)
results['predicted_negative_rate'] = np.mean(predicted_negatives)
results['accuracy'] = accuracy
results['precision'] = precision
results['recall'] = recall
results['f1'] = f1
results['sensitivity'] = sensitivity
results['specificity'] = specificity
results['positive_likelihood'] = positive_likelihood
results['negative_likelihood'] = negative_likelihood
results['false_positive_rate'] = false_positive_rate
results['false_negative_rate'] = false_negative_rate
results['true_positive_rate'] = true_positive_rate
results['true_negative_rate'] = true_negative_rate
results['positive_predictive_value'] = positive_predictive_value
results['negative_predictive_value'] = negative_predictive_value
return results
###Output
_____no_output_____
###Markdown
Divide into X (features) and y (labels)We will separate out our features (the data we use to make a prediction) from our label (what we are truing to predict).By convention our features are called `X` (usually upper case to denote multiple features), and the label (survive or not) `y`.
###Code
X = data.drop('Survived',axis=1) # X = all 'data' except the 'survived' column
y = data['Survived'] # y = 'survived' column from 'data'
###Output
_____no_output_____
###Markdown
Divide into training and tets setsWhen we test a machine learning model we should always test it on data that has not been used to train the model.We will use sklearn's `train_test_split` method to randomly split the data: 75% for training, and 25% for testing.
###Code
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25)
###Output
_____no_output_____
###Markdown
Standardise dataWe want all of out features to be on roughly the same scale. This generally leads to a better model, and also allows us to more easily compare the importance of different features. A common method used in many machine learning methods is standardisation, where we use the mean and standard deviation of the training set of data to normalise the data. We subtract the mean of the test set values, and divide by the standard deviation of the training data. Note that the mean and standard deviation of the training data are used to standardise the test set data as well. Here we will use sklearn's `StandardScaler method`. This method also copes with problems we might otherwise have (such as if one feature has zero standard deviation in the training set).
###Code
def standardise_data(X_train, X_test):
# Initialise a new scaling object for normalising input data
sc = StandardScaler()
# Set up the scaler just on the training set
sc.fit(X_train)
# Apply the scaler to the training and test sets
train_std=sc.transform(X_train)
test_std=sc.transform(X_test)
return train_std, test_std
X_train_std, X_test_std = standardise_data(X_train, X_test)
###Output
_____no_output_____
###Markdown
Fit logistic regression modelNow we will fir a logistic regression model, using sklearn's `LogisticRegression` method. Our machine learning model fitting is only two lines of code! By using the name `model` for our logistic regression model we will make our model more interchangeable later on.
###Code
model = LogisticRegression(solver='lbfgs')
model.fit(X_train_std,y_train)
###Output
_____no_output_____
###Markdown
Receiver operating characteristic (ROC) CurveWe calculate the ROC curve by adjusting the probability cut-off (the probability cut-off of someone surviving in order to classify them as a survivor). Here we create a loop with cut-off probabilities in the range 0-1 in steps of 0.01. We do not need to refit the model – we take the same prediction probabilities output and apply varying cut-off thresholds to that output.
###Code
# Get probability of non-survive and survive
probabilities = model.predict_proba(X_test_std)
# Take just the survival probabilities (column 1)
probability_survival = probabilities[:,1]
# Set up list for accuracy measures
curve_fpr = [] # flase positive rate
curve_tpr = [] # true positive rate
# Loop through increments in probability of survival
thresholds = np.arange(0, 1.01, 0.01)
for cutoff in thresholds: # loop 0 --> 1 on steps of 0.1
# Get whether passengers survive using cutoff
predicted_survived = probability_survival >= cutoff
# Call accuracy measures function
accuracy = calculate_accuracy(y_test, predicted_survived)
# Add accuracy scores to lists
curve_fpr.append(accuracy['false_positive_rate'])
curve_tpr.append(accuracy['true_positive_rate'])
# Note: some warning may appear, which may be ignored
###Output
_____no_output_____
###Markdown
And using MatPlotLib to plot the curve:
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# Define a function for plotting the ROC curve
def plot_roc_curve(fpr, tpr):
plt.plot(fpr, tpr, color='orange', label='ROC')
plt.plot([0, 1], [0, 1], color='darkblue', linestyle='--')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate (Sensitivity)')
plt.title('Receiver Operator Characteristic (ROC) Curve')
plt.grid(True)
plt.legend()
plt.show()
plot_roc_curve(curve_fpr, curve_tpr)
###Output
_____no_output_____
###Markdown
Calculating area of ROCA common measurement from the ROC curve is to measure the area under the curve. A model that performs no better than guessing by random chance will have an area of 0.5. A perfect model will have an area of 1.0.sklearn has a method for calculating the ROC area-under-curve, that we will use.
###Code
from sklearn.metrics import auc
roc_auc = auc(curve_fpr, curve_tpr)
print ('ROC AUC: {0:0.3}'.format(roc_auc))
###Output
ROC AUC: 0.819
###Markdown
Choosing thresholdOnce we have decided what balance of false positive and false negative rate we want, we can look up what probability threshold to apply in order to achieve that balance. for example, if we have decided we will accept a 0.2 false positive rate, we can plot the relationship between threshold and false positive rate and read off the required threshold.We will transfer our data to a pandas DataFrame, and then use a mask to filter data so we only see false positive rates around 0.2.
###Code
# Set up empty DataFrame
ROC_df = pd.DataFrame()
# Add data to DataFrame
ROC_df['threshold'] = thresholds
ROC_df['fpr'] = curve_fpr
ROC_df['tpr'] = curve_tpr
# Filter data to show false positive rates around 0.2.
mask = (ROC_df['fpr'] > 0.17) & (ROC_df['fpr'] < 0.23)
ROC_df[mask]
###Output
_____no_output_____
###Markdown
From the table above you can pick the threshold to apply to classify survivors (the default threshold in `model.predict` is 0.5, but we can now use `model.predict_proba` and classify by using the desired threshold (remembering that `model.predict_proba` outputs probabilities for non-survivors (column 0) and survivors (column 1). Our new code might look something like:
###Code
threshold = 0.39
survived = model.predict_proba(X_test_std)[:, 1] > threshold
# Show first 5 cases
survived[0:5]
###Output
_____no_output_____
###Markdown
Using sklearn ROC methodThe method above was a manual implementation of ROC. It is useful to learn how to do this, as the same method type can be used to explore other relationships, and can be used to explore the direct effect of changing the cut-off threshold. But we can also use the sklearn `roc_curve` method which we import from `sklearn.metrics`. This, like our method above, runs the model once to predict the survival probabilities, and constructs the curve from those.
###Code
# Import roc_curve method
from sklearn.metrics import roc_curve
# Get probabilties from model
probabilities = model.predict_proba(X_test_std)
probabilities = probabilities[:, 1] # Probability of 'survived' class
# Get ROC curve using sklearn method
fpr, tpr, thresholds = roc_curve(y_test, probabilities)
# Plot ROC curve
plot_roc_curve(fpr, tpr)
###Output
_____no_output_____
###Markdown
Note,from the above model we can see that if we pick the appropriate threshold, we can achieve a 20% false positive rate (80% specificity) and a 80% true positive rate (80% sensitivity) simultaneously. And again we can calculate the Area Under Curve (it should be the same!)
###Code
roc_auc = auc(curve_fpr, curve_tpr)
print ('ROC AUC: {0:0.3}'.format(roc_auc))
###Output
ROC AUC: 0.819
###Markdown
Sensitivity-Specicifity curve
###Code
# Get probability of non-survive and survive
probabilities = model.predict_proba(X_test_std)
# Take just the survival probabilities (column 1)
probability_survival = probabilities[:,1]
# Set up list for accuracy measures
sensitivity = []
specificity = [] # true positive rate
# Loop through increments in probability of survival
thresholds = np.arange(0, 1.01, 0.01)
for cutoff in thresholds: # loop 0 --> 1 on steps of 0.1
# Get whether passengers survive using cutoff
predicted_survived = probability_survival >= cutoff
# Call accuracy measures function
accuracy = calculate_accuracy(y_test, predicted_survived)
# Add accuracy scores to lists
sensitivity.append(accuracy['sensitivity'])
specificity.append(accuracy['specificity'])
plt.plot(sensitivity, specificity)
plt.xlabel('Sensitivity')
plt.ylabel('Specificity')
plt.title('Specificity')
plt.grid(True)
plt.show()
###Output
_____no_output_____ |
Limpeza_e_Tratamento_de_Dados_com_R.ipynb | ###Markdown
Estudando uma base de dados do Kaggle sobre Churn
###Code
#importar dados, strings vazias com NA, string como fatores
dados = read.csv("Churn.csv", sep=";", na.strings="", stringsAsFactors=T)
head(dados)
summary(dados)
###Output
_____no_output_____
###Markdown
Observação: os fatores, diferentemente das Strings, são dados categorizados, possuem um valor, NÃO sendo somente letras ou palavras.* Note que a tabela NÃO possui um nome correto para as colunas.
###Code
#renomeando as colunas
colnames(dados) = c('Id', 'Score', 'Estado', 'Genero', 'Idade', 'Patrimonio', 'Saldo', 'Produtos', 'TemCartaoCredito', 'Ativo', 'Salario', 'Saiu')
head(dados)
#Estados
counts = table(dados$Estado)
barplot(counts, main="Estados", xlab = "Estados")
#Gênero
counts = table(dados$Genero)
barplot(counts, main = "Gêneros", xlab = "Gêneros")
###Output
_____no_output_____
###Markdown
Explorando dados numéricos Coluna Score
###Code
summary(dados$Score)
boxplot(dados$Score, main='Boxplot da coluna Score')
hist(dados$Score, main='Histograma da coluna Score', xlab = 'Score')
###Output
_____no_output_____
###Markdown
Coluna Idade
###Code
summary(dados$Idade)
boxplot(dados$Idad, main='Boxplot da coluna Idade')
hist(dados$Idade, main='Histograma da coluna Idade', xlab = 'Idade')
###Output
_____no_output_____
###Markdown
Coluna Saldo
###Code
summary(dados$Saldo)
boxplot(dados$Saldo, main='Boxplot da coluna Saldo')
hist(dados$Saldo, main='Histograma da coluna Saldo', xlab = 'Saldo')
###Output
_____no_output_____
###Markdown
Coluna Salario
###Code
summary(dados$Salario)
boxplot(dados$Salario, main='Boxplot da coluna Salario')
hist(dados$Salario, main='Histograma da coluna Salario', xlab = 'Salário')
#outline=F para não mostrar os outliers junto
###Output
_____no_output_____
###Markdown
Valores Faltantes, NA's e outras modificações mais
###Code
dados[!complete.cases(dados$Salario),]
###Output
_____no_output_____
###Markdown
Note que temos 7 linhas com valores NA na coluna Salario.
###Code
summary(dados$Salario)
mediana_salario = median(dados$Salario, na.rm=T)
#atribuindo a mediana aos valores NA's
dados[is.na(dados$Salario), ]$Salario = mediana_salario
#verificar se os dados NA's foram substituidos
dados[!complete.cases(dados$Salario),]
###Output
_____no_output_____
###Markdown
Padronizar a variável Genero
###Code
#ver valores
unique(dados['Genero'])
summary(dados['Genero'])
###Output
_____no_output_____
###Markdown
Acima podemos notar que os gêneros estão com um problema de não padronização e precisam ser atualizados para melhor podermos analisar os dados.
###Code
#transformando F e Fem em Feminino e M em Masculino
dados[dados$Genero == 'F' | dados$Genero == "Fem",]$Genero = "Feminino"
dados[is.na(dados$Genero) | dados$Genero == "M",]$Genero = "Masculino"
summary(dados['Genero'])
#Excluindo os fatores F, Fem e M do nosso Banco de Dados
dados$Genero = factor(dados$Genero)
summary(dados['Genero'])
###Output
_____no_output_____
###Markdown
Fazendo as devidas mudanças na coluna Idade
###Code
#idades fora do domínio(outliers e dados incorretos)
dados[dados$Idade < 0 | dados$Idade > 110, ]$Idade
dados[is.na(dados$Idade),]
###Output
_____no_output_____
###Markdown
NÃO temos dados NA's na coluna Idade.
###Code
#mediana da coluna idade
median(dados$Idade)
#Testando para ver se a mediana se altera caso eu exclua os valores anormais de Idade
idade_correta <- dados[dados$Idade > 0 & dados$Idade < 110, ]$Idade
median(idade_correta)
summary(idade_correta)
#atribuindo a mediana das idades às idades erradas (outliers)
dados[dados$Idade < 0 | dados$Idade > 110, ]$Idade = median(dados$Idade)
#visualizando se as mudanças de fato ocorreram
summary(dados$Idade)
###Output
_____no_output_____
###Markdown
Dados dupicados (utilizando a Coluna Id como parâmetro)
###Code
#buscar dados duplicados pelo ID
x = dados[duplicated(dados$Id), ]
x
###Output
_____no_output_____
###Markdown
Perceba que a linha 82 está duplicando a linha 81.
###Code
#exluindo a linha duplicada pelo Id, não pelo índice
dados = dados[-c(82),]
#buscando a linha 82 para ver se foi realmente excluída
dados[dados$Id == x$Id,]
#buscar dados duplicados pelo ID
x = dados[duplicated(dados$Id), ]
x
###Output
_____no_output_____
###Markdown
Estado fora do domínio
###Code
#estados que possam estar fora do domínio categórico
summary(dados['Estado'])
###Output
_____no_output_____ |
old/Phosphorylation Sequence Tests -Bagging -dbptm+ELM-VectorAvr..ipynb | ###Markdown
Template for test
###Code
from pred import Predictor
from pred import sequence_vector
from pred import chemical_vector
###Output
/Users/mark/anaconda3/lib/python3.6/site-packages/sklearn/cross_validation.py:44: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. Also note that the interface of the new CV iterators are different from that of this module. This module will be removed in 0.20.
"This module will be removed in 0.20.", DeprecationWarning)
###Markdown
Controlling for Random Negatve vs Sans Random in Imbalanced Techniques using S, T, and Y Phosphorylation.Included is N Phosphorylation however no benchmarks are available, yet. Training data is from phospho.elm and benchmarks are from dbptm. Note: SMOTEEN seems to preform best
###Code
par = ["pass", "ADASYN", "SMOTEENN", "random_under_sample", "ncl", "near_miss"]
for i in par:
print("y", i)
y = Predictor()
y.load_data(file="Data/Training/clean_s_filtered.csv")
y.process_data(vector_function="sequence", amino_acid="S", imbalance_function=i, random_data=0)
y.supervised_training("bagging")
y.benchmark("Data/Benchmarks/phos.csv", "S")
del y
print("x", i)
x = Predictor()
x.load_data(file="Data/Training/clean_s_filtered.csv")
x.process_data(vector_function="sequence", amino_acid="S", imbalance_function=i, random_data=1)
x.supervised_training("bagging")
x.benchmark("Data/Benchmarks/phos.csv", "S")
del x
###Output
y pass
Loading Data
Loaded Data
Working on Data
Sample Vector [1, 2, 16, 10, 13, 20, 10, 11, 1, 1, 18, 18, 12, -0.3692307692307692, 32.93076923076923, 0.0]
Finished working with Data
Training Data Points: 200363
Test Data Points: 50091
Starting Training
Done training
Test Results
Sensitivity: 0.15450690442535608
Specificity : 0.9454687729251234
Accuracy: 0.8002435567267573
ROC 0.549987838675
TP 1421 FP 2230 TN 38664 FN 7776
None
Cross: Validation: [ 0.79996806 0.79940508 0.7992853 0.79918147 0.79778399]
Number of data points in benchmark 65895
Benchmark Results
Sensitivity: 0.23771462403789223
Specificity : 0.9309787737735336
Accuracy: 0.895439714697625
ROC 0.584346698906
TP 803 FP 4315 TN 58202 FN 2575
None
x pass
Loading Data
Loaded Data
Working on Data
Sample Vector [10, 1, 10, 10, 18, 12, 10, 10, 11, 3, 11, 14, 5, 0.16923076923076927, 81.67692307692309, 0.07692307692307693]
Finished working with Data
Training Data Points: 200363
Test Data Points: 50091
Starting Training
Done training
Test Results
Sensitivity: 0.1441500381637771
Specificity : 0.9467741935483871
Accuracy: 0.7998243197380767
ROC 0.545462115856
TP 1322 FP 2178 TN 38742 FN 7849
None
Cross: Validation: [ 0.80012776 0.80080254 0.79940508 0.79994011 0.79846277]
Number of data points in benchmark 65895
Benchmark Results
Sensitivity: 0.25636471284783896
Specificity : 0.9372170769550682
Accuracy: 0.9023142878822369
ROC 0.596790894901
TP 866 FP 3925 TN 58592 FN 2512
None
y ADASYN
Loading Data
Loaded Data
Working on Data
Sample Vector [18, 11, 3, 1, 15, 3, 10, 5, 3, 2, 7, 19, 2, 0.21538461538461537, 0.48461538461538456, 0.15384615384615385]
Balancing Data
Balanced Data
Finished working with Data
Training Data Points: 331728
Test Data Points: 82932
Starting Training
Done training
Test Results
Sensitivity: 0.8049394443125149
Specificity : 0.9312870510998971
Accuracy: 0.8671321082332514
ROC 0.868113247706
TP 33896 FP 2805 TN 38017 FN 8214
None
Cross: Validation: [ 0.52008248 0.90747842 0.93661072 0.93544108 0.93811723]
Number of data points in benchmark 65895
Benchmark Results
Sensitivity: 0.19330965068087627
Specificity : 0.9196378585024874
Accuracy: 0.8824038242658775
ROC 0.556473754592
TP 653 FP 5024 TN 57493 FN 2725
None
x ADASYN
Loading Data
Loaded Data
Working on Data
Sample Vector [3, 2, 17, 3, 19, 2, 10, 17, 16, 9, 10, 13, 8, -0.6615384615384616, 53.67692307692308, 0.0]
Balancing Data
Balanced Data
Finished working with Data
Training Data Points: 331646
Test Data Points: 82912
Starting Training
Done training
Test Results
Sensitivity: 0.8055343875023814
Specificity : 0.9273949169110459
Accuracy: 0.8656768622153609
ROC 0.866464652207
TP 33826 FP 2971 TN 37949 FN 8166
None
Cross: Validation: [ 0.51964107 0.90494741 0.93722184 0.93751131 0.93792139]
Number of data points in benchmark 65895
Benchmark Results
Sensitivity: 0.2847838957963292
Specificity : 0.9203736583649247
Accuracy: 0.8877911829425601
ROC 0.602578777081
TP 962 FP 4978 TN 57539 FN 2416
None
y SMOTEENN
Loading Data
Loaded Data
Working on Data
Sample Vector [2, 11, 3, 20, 18, 5, 10, 19, 10, 19, 19, 20, 12, -0.46153846153846145, 4.176923076923081, 0.07692307692307693]
Balancing Data
Balanced Data
Finished working with Data
Training Data Points: 305876
Test Data Points: 76470
Starting Training
Done training
Test Results
Sensitivity: 0.8097830639082
Specificity : 0.9536073598504415
Accuracy: 0.8862429710997777
ROC 0.881695211879
TP 29004 FP 1886 TN 38767 FN 6813
None
Cross: Validation: [ 0.56215509 0.94004106 0.95558985 0.95514522 0.95540677]
Number of data points in benchmark 65895
Benchmark Results
Sensitivity: 0.21551213735938426
Specificity : 0.9368811683222164
Accuracy: 0.8999013582214128
ROC 0.576196652841
TP 728 FP 3946 TN 58571 FN 2650
None
x SMOTEENN
Loading Data
Loaded Data
Working on Data
Sample Vector [13, 18, 3, 19, 7, 11, 10, 18, 11, 10, 10, 16, 9, -1.4153846153846155, 39.13076923076924, 0.0]
Balancing Data
Balanced Data
Finished working with Data
Training Data Points: 305822
Test Data Points: 76456
Starting Training
Done training
Test Results
Sensitivity: 0.8111776110595971
Specificity : 0.9568600582376979
Accuracy: 0.8890472951763105
ROC 0.884018834649
TP 28869 FP 1763 TN 39104 FN 6720
None
Cross: Validation: [ 0.56117818 0.94031861 0.95564711 0.95613106 0.95505853]
Number of data points in benchmark 65895
Benchmark Results
Sensitivity: 0.1950858496151569
Specificity : 0.9318425388294383
Accuracy: 0.8940739054556491
ROC 0.563464194222
TP 659 FP 4261 TN 58256 FN 2719
None
y random_under_sample
Loading Data
Loaded Data
Working on Data
Sample Vector [7, 12, 3, 9, 19, 11, 10, 10, 18, 12, 5, 11, 7, -0.6230769230769231, 61.961538461538474, 0.07692307692307693]
Balancing Data
Balanced Data
Finished working with Data
Training Data Points: 73804
Test Data Points: 18452
Starting Training
Done training
Test Results
Sensitivity: 0.5995934959349594
Specificity : 0.7205623901581723
Accuracy: 0.6592781270323
ROC 0.660077943047
TP 5605 FP 2544 TN 6560 FN 3743
None
Cross: Validation: [ 0.65770648 0.65759809 0.64627141 0.65859079 0.65376694]
Number of data points in benchmark 65895
Benchmark Results
Sensitivity: 0.6551213735938425
Specificity : 0.6128253115152679
Accuracy: 0.6149935503452463
ROC 0.633973342555
TP 2213 FP 24205 TN 38312 FN 1165
None
x random_under_sample
Loading Data
Loaded Data
Working on Data
Sample Vector [10, 11, 8, 8, 8, 17, 10, 11, 1, 10, 0, 0, 0, -2.1, 198.60000000000002, 0.0]
Balancing Data
Balanced Data
Finished working with Data
Training Data Points: 73804
Test Data Points: 18452
Starting Training
Done training
Test Results
Sensitivity: 0.600556268720582
Specificity : 0.7126537785588752
Accuracy: 0.6558638629958812
ROC 0.65660502364
TP 5614 FP 2616 TN 6488 FN 3734
None
Cross: Validation: [ 0.65673098 0.65304574 0.65743551 0.65756098 0.65913279]
Number of data points in benchmark 65895
Benchmark Results
Sensitivity: 0.5361160449970397
Specificity : 0.6255898395636387
Accuracy: 0.6210031110099401
ROC 0.58085294228
TP 1811 FP 23407 TN 39110 FN 1567
None
y ncl
Loading Data
Loaded Data
Working on Data
Sample Vector [10, 10, 4, 11, 1, 1, 10, 20, 11, 12, 10, 10, 2, -0.06153846153846161, 96.96923076923079, 0.0]
Balancing Data
Balanced Data
Finished working with Data
Training Data Points: 143742
Test Data Points: 35936
Starting Training
Done training
Test Results
Sensitivity: 0.30112212659331083
Specificity : 0.9248047239974586
Accuracy: 0.7654997773820125
ROC 0.612963425295
TP 2764 FP 2012 TN 24745 FN 6415
None
Cross: Validation: [ 0.76819902 0.7660285 0.76769813 0.76449144 0.76766384]
Number of data points in benchmark 65895
Benchmark Results
Sensitivity: 0.30076968620485495
Specificity : 0.8593662523793528
Accuracy: 0.8307307079444571
ROC 0.580067969292
TP 1016 FP 8792 TN 53725 FN 2362
None
x ncl
Loading Data
Loaded Data
Working on Data
Sample Vector [20, 7, 3, 3, 20, 7, 10, 11, 10, 10, 3, 8, 13, -0.06153846153846153, 91.20769230769231, 0.0]
Balancing Data
Balanced Data
Finished working with Data
Training Data Points: 143737
Test Data Points: 35935
Starting Training
Done training
Test Results
Sensitivity: 0.3179738562091503
Specificity : 0.9247991029714072
Accuracy: 0.7697787672185891
ROC 0.62138647959
TP 2919 FP 2012 TN 24743 FN 6261
None
Cross: Validation: [ 0.77047447 0.76744121 0.7666342 0.76935493 0.7723541 ]
Number of data points in benchmark 65895
Benchmark Results
Sensitivity: 0.3359976317347543
Specificity : 0.8684357854663531
Accuracy: 0.8411412094999621
ROC 0.602216708601
TP 1135 FP 8225 TN 54292 FN 2243
None
y near_miss
Loading Data
Loaded Data
Working on Data
Sample Vector [12, 5, 18, 9, 13, 18, 10, 17, 9, 13, 9, 7, 13, -0.4769230769230769, 80.94615384615385, 0.07692307692307693]
Balancing Data
Balanced Data
Finished working with Data
Training Data Points: 73804
Test Data Points: 18452
Starting Training
Done training
Test Results
Sensitivity: 0.5902866923406076
Specificity : 0.6827768014059754
Accuracy: 0.6359202254498157
ROC 0.636531746873
TP 5518 FP 2888 TN 6216 FN 3830
None
Cross: Validation: [ 0.52904834 0.66962931 0.65689356 0.63804878 0.62493225]
Number of data points in benchmark 65895
###Markdown
Y Phosphorylation
###Code
par = ["pass", "ADASYN", "SMOTEENN", "random_under_sample", "ncl", "near_miss"]
for i in par:
print("y", i)
y = Predictor()
y.load_data(file="Data/Training/clean_Y_filtered.csv")
y.process_data(vector_function="sequence", amino_acid="Y", imbalance_function=i, random_data=0)
y.supervised_training("bagging")
y.benchmark("Data/Benchmarks/phos.csv", "Y")
del y
print("x", i)
x = Predictor()
x.load_data(file="Data/Training/clean_Y_filtered.csv")
x.process_data(vector_function="sequence", amino_acid="Y", imbalance_function=i, random_data=1)
x.supervised_training("bagging")
x.benchmark("Data/Benchmarks/phos.csv", "Y")
del x
###Output
y pass
Loading Data
Loaded Data
Working on Data
Sample Vector [12, 12, 1, 10, 8, 9, 15, 10, 2, 10, 1, 10, 11, -0.2846153846153846, 66.13076923076923, 0.07692307692307693]
Finished working with Data
Training Data Points: 10099
Test Data Points: 2525
Starting Training
Done training
Test Results
Sensitivity: 0.028846153846153848
Specificity : 0.9991738950846758
Accuracy: 0.9592079207920792
ROC 0.514010024465
TP 3 FP 2 TN 2419 FN 101
None
Cross: Validation: [ 0.95486936 0.95326733 0.95524752 0.95562599 0.95721078]
Number of data points in benchmark 23555
Benchmark Results
Sensitivity: 0.975609756097561
Specificity : 0.9980437186357064
Accuracy: 0.9980046699214604
ROC 0.986826737367
TP 40 FP 46 TN 23468 FN 1
None
x pass
Loading Data
Loaded Data
Working on Data
Sample Vector [8, 9, 14, 11, 9, 1, 15, 17, 12, 20, 15, 20, 11, -1.2230769230769232, 53.43076923076925, 0.15384615384615385]
Finished working with Data
Training Data Points: 10099
Test Data Points: 2525
Starting Training
Done training
Test Results
Sensitivity: 0.08270676691729323
Specificity : 0.9987458193979933
Accuracy: 0.9504950495049505
ROC 0.540726293158
TP 11 FP 3 TN 2389 FN 122
None
Cross: Validation: [ 0.95486936 0.95524752 0.95485149 0.95364501 0.9548336 ]
Number of data points in benchmark 23555
Benchmark Results
Sensitivity: 0.975609756097561
Specificity : 0.9965127158288679
Accuracy: 0.996476331988962
ROC 0.986061235963
TP 40 FP 82 TN 23432 FN 1
None
y ADASYN
Loading Data
Loaded Data
Working on Data
Sample Vector [5, 4, 17, 7, 3, 6, 15, 18, 5, 12, 2, 1, 2, 0.35384615384615375, -2.976923076923078, 0.3076923076923077]
Balancing Data
Balanced Data
Finished working with Data
Training Data Points: 19352
Test Data Points: 4838
Starting Training
Done training
Test Results
Sensitivity: 0.9494780793319415
Specificity : 0.9950880065493246
Accuracy: 0.9725093013642001
ROC 0.972283042941
TP 2274 FP 12 TN 2431 FN 121
None
Cross: Validation: [ 0.8545154 0.98016119 0.98222406 0.97374406 0.97808559]
Number of data points in benchmark 23555
Benchmark Results
Sensitivity: 0.975609756097561
Specificity : 0.9965977715403589
Accuracy: 0.9965612396518786
ROC 0.986103763819
TP 40 FP 80 TN 23434 FN 1
None
x ADASYN
Loading Data
Loaded Data
Working on Data
Sample Vector [3, 17, 9, 15, 17, 2, 15, 15, 6, 17, 9, 10, 10, -1.6384615384615389, 42.18461538461539, 0.3076923076923077]
Balancing Data
Balanced Data
Finished working with Data
Training Data Points: 19352
Test Data Points: 4838
Starting Training
Done training
Test Results
Sensitivity: 0.943609022556391
Specificity : 0.9950900163666121
Accuracy: 0.9696155436130632
ROC 0.969349519462
TP 2259 FP 12 TN 2432 FN 135
None
Cross: Validation: [ 0.86567473 0.97974788 0.97850351 0.97705189 0.97891255]
Number of data points in benchmark 23555
Benchmark Results
Sensitivity: 0.975609756097561
Specificity : 0.9964276601173769
Accuracy: 0.9963914243260454
ROC 0.986018708107
TP 40 FP 84 TN 23430 FN 1
None
y SMOTEENN
Loading Data
Loaded Data
Working on Data
Sample Vector [9, 18, 20, 2, 14, 2, 15, 3, 2, 18, 3, 15, 13, 0.4769230769230769, 36.5, 0.15384615384615385]
Balancing Data
Balanced Data
Finished working with Data
Training Data Points: 19226
Test Data Points: 4807
Starting Training
Done training
Test Results
Sensitivity: 0.9408033826638478
Specificity : 0.9905814905814906
Accuracy: 0.9660911171208654
ROC 0.965692436623
TP 2225 FP 23 TN 2419 FN 140
None
Cross: Validation: [ 0.89244851 0.9854379 0.98647805 0.98605909 0.98605909]
Number of data points in benchmark 23555
Benchmark Results
Sensitivity: 0.975609756097561
Specificity : 0.9944713787530832
Accuracy: 0.9944385480789641
ROC 0.985040567425
TP 40 FP 130 TN 23384 FN 1
None
x SMOTEENN
Loading Data
Loaded Data
Working on Data
Sample Vector [9, 7, 8, 17, 3, 13, 15, 8, 1, 17, 12, 3, 8, -0.9461538461538465, 52.492307692307705, 0.07692307692307693]
Balancing Data
Balanced Data
Finished working with Data
Training Data Points: 19228
Test Data Points: 4808
Starting Training
Done training
Test Results
Sensitivity: 0.9444681759931653
Specificity : 0.9914876368058371
Accuracy: 0.9685940099833611
ROC 0.9679779064
TP 2211 FP 21 TN 2446 FN 130
None
Cross: Validation: [ 0.89663062 0.98585691 0.98793677 0.98231377 0.98730753]
Number of data points in benchmark 23555
Benchmark Results
Sensitivity: 0.975609756097561
Specificity : 0.9946414901760653
Accuracy: 0.9946083634047973
ROC 0.985125623137
TP 40 FP 126 TN 23388 FN 1
None
y random_under_sample
Loading Data
Loaded Data
Working on Data
Sample Vector [10, 13, 5, 15, 12, 12, 15, 5, 12, 12, 5, 11, 5, 2.1153846153846154, 58.13923076923078, 0.46153846153846156]
Balancing Data
Balanced Data
Finished working with Data
Training Data Points: 945
Test Data Points: 237
Starting Training
Done training
Test Results
Sensitivity: 0.4406779661016949
Specificity : 0.7478991596638656
Accuracy: 0.5949367088607594
ROC 0.594288562883
TP 52 FP 30 TN 89 FN 66
None
Cross: Validation: [ 0.53781513 0.55508475 0.53389831 0.56355932 0.56779661]
Number of data points in benchmark 23555
Benchmark Results
Sensitivity: 1.0
Specificity : 0.6756825720847155
Accuracy: 0.6762470812990873
ROC 0.837841286042
TP 41 FP 7626 TN 15888 FN 0
None
x random_under_sample
Loading Data
Loaded Data
Working on Data
Sample Vector [17, 17, 9, 6, 2, 7, 15, 4, 9, 12, 16, 9, 7, -1.9076923076923076, 63.96923076923077, 0.15384615384615385]
Balancing Data
Balanced Data
Finished working with Data
Training Data Points: 945
Test Data Points: 237
Starting Training
Done training
Test Results
Sensitivity: 0.4745762711864407
Specificity : 0.6722689075630253
Accuracy: 0.5738396624472574
ROC 0.573422589375
TP 56 FP 39 TN 80 FN 62
None
Cross: Validation: [ 0.6092437 0.61864407 0.55932203 0.61864407 0.56355932]
Number of data points in benchmark 23555
Benchmark Results
Sensitivity: 0.975609756097561
Specificity : 0.664625329590882
Accuracy: 0.6651666312884738
ROC 0.820117542844
TP 40 FP 7886 TN 15628 FN 1
None
y ncl
Loading Data
Loaded Data
Working on Data
Sample Vector [12, 20, 3, 10, 12, 17, 15, 11, 11, 9, 12, 10, 13, 0.46923076923076923, 34.57692307692308, 0.07692307692307693]
Balancing Data
Balanced Data
Finished working with Data
Training Data Points: 9226
Test Data Points: 2307
Starting Training
Done training
Test Results
Sensitivity: 0.05217391304347826
Specificity : 0.9995437956204379
Accuracy: 0.9523190290420459
ROC 0.525858854332
TP 6 FP 1 TN 2191 FN 109
None
Cross: Validation: [ 0.95103986 0.95015171 0.952732 0.9518647 0.94882914]
Number of data points in benchmark 23555
Benchmark Results
Sensitivity: 0.975609756097561
Specificity : 0.9982988857701794
Accuracy: 0.9982593929102102
ROC 0.986954320934
TP 40 FP 40 TN 23474 FN 1
None
x ncl
Loading Data
Loaded Data
Working on Data
Sample Vector [5, 9, 2, 4, 5, 1, 15, 7, 2, 7, 5, 1, 3, 0.33076923076923076, 2.7, 0.3076923076923077]
Balancing Data
Balanced Data
Finished working with Data
Training Data Points: 9232
Test Data Points: 2309
Starting Training
Done training
Test Results
Sensitivity: 0.08695652173913043
Specificity : 0.9990884229717412
Accuracy: 0.9536595928973581
ROC 0.543022472355
TP 10 FP 2 TN 2192 FN 105
None
Cross: Validation: [ 0.95062798 0.94887348 0.94930676 0.95060659 0.95147314]
Number of data points in benchmark 23555
Benchmark Results
Sensitivity: 0.975609756097561
Specificity : 0.9964276601173769
Accuracy: 0.9963914243260454
ROC 0.986018708107
TP 40 FP 84 TN 23430 FN 1
None
y near_miss
Loading Data
Loaded Data
Working on Data
Sample Vector [13, 10, 9, 3, 2, 17, 15, 10, 18, 10, 9, 12, 12, -0.015384615384615467, 33.06153846153847, 0.07692307692307693]
Balancing Data
Balanced Data
Finished working with Data
Training Data Points: 945
Test Data Points: 237
Starting Training
Done training
Test Results
Sensitivity: 0.5677966101694916
Specificity : 0.7058823529411765
Accuracy: 0.6371308016877637
ROC 0.636839481555
TP 67 FP 35 TN 84 FN 51
None
Cross: Validation: [ 0.66386555 0.69491525 0.59745763 0.59322034 0.63559322]
Number of data points in benchmark 23555
Benchmark Results
Sensitivity: 1.0
Specificity : 0.38895976864846477
Accuracy: 0.39002334960730206
ROC 0.694479884324
TP 41 FP 14368 TN 9146 FN 0
None
x near_miss
Loading Data
###Markdown
T Phosphorylation
###Code
par = ["pass", "ADASYN", "SMOTEENN", "random_under_sample", "ncl", "near_miss"]
for i in par:
print("y", i)
y = Predictor()
y.load_data(file="Data/Training/clean_t_filtered.csv")
y.process_data(vector_function="sequence", amino_acid="T", imbalance_function=i, random_data=0)
y.supervised_training("bagging")
y.benchmark("Data/Benchmarks/phos.csv", "T")
del y
print("x", i)
x = Predictor()
x.load_data(file="Data/Training/clean_t_filtered.csv")
x.process_data(vector_function="sequence", amino_acid="T", imbalance_function=i, random_data=1)
x.supervised_training("bagging")
x.benchmark("Data/Benchmarks/phos.csv", "T")
del x
###Output
y pass
Loading Data
Loaded Data
Working on Data
Sample Vector [12, 9, 19, 12, 20, 1, 13, 8, 16, 3, 20, 0, 0, 0.10909090909090913, 3.445454545454546, 0.0]
Finished working with Data
Training Data Points: 66323
Test Data Points: 16581
Starting Training
Done training
Test Results
Sensitivity: 0.10776699029126213
Specificity : 0.9600474390334297
Accuracy: 0.8012182618659912
ROC 0.533907214662
TP 333 FP 539 TN 12952 FN 2757
None
Cross: Validation: [ 0.80400434 0.80430587 0.80723764 0.80735826 0.80772014]
Number of data points in benchmark 47730
Benchmark Results
Sensitivity: 0.259529602595296
Specificity : 0.9512871798180528
Accuracy: 0.9334171380683008
ROC 0.605408391207
TP 320 FP 2265 TN 44232 FN 913
None
x pass
Loading Data
Loaded Data
Working on Data
Sample Vector [1, 10, 20, 20, 20, 9, 20, 20, 20, 1, 19, 10, 18, -1.3153846153846156, 32.330769230769235, 0.0]
Finished working with Data
Training Data Points: 66323
Test Data Points: 16581
Starting Training
Done training
Test Results
Sensitivity: 0.11469780219780219
Specificity : 0.960421391469749
Accuracy: 0.8118931306917556
ROC 0.537559596834
TP 334 FP 541 TN 13128 FN 2578
None
Cross: Validation: [ 0.80316005 0.80141117 0.80259349 0.80464415 0.80548854]
Number of data points in benchmark 47730
Benchmark Results
Sensitivity: 0.22141119221411193
Specificity : 0.959932898896703
Accuracy: 0.9408548082966688
ROC 0.590672045555
TP 273 FP 1863 TN 44634 FN 960
None
y ADASYN
Loading Data
Loaded Data
Working on Data
Sample Vector [20, 4, 7, 13, 8, 4, 10, 20, 17, 20, 8, 17, 14, -0.9230769230769231, 30.7, 0.0]
Balancing Data
Balanced Data
Finished working with Data
Training Data Points: 109785
Test Data Points: 27447
Starting Training
Done training
Test Results
Sensitivity: 0.7862885857860732
Specificity : 0.947399570910705
Accuracy: 0.8656319452034831
ROC 0.866844078348
TP 10953 FP 711 TN 12806 FN 2977
None
Cross: Validation: [ 0.52074908 0.91427114 0.94494644 0.94695038 0.94662246]
Number of data points in benchmark 47730
Benchmark Results
Sensitivity: 0.1678832116788321
Specificity : 0.9488999290276792
Accuracy: 0.9287240729101194
ROC 0.558391570353
TP 207 FP 2376 TN 44121 FN 1026
None
x ADASYN
Loading Data
Loaded Data
Working on Data
Sample Vector [3, 17, 13, 18, 18, 12, 20, 10, 11, 9, 5, 20, 10, -0.33076923076923076, 64.68461538461538, 0.07692307692307693]
Balancing Data
Balanced Data
Finished working with Data
Training Data Points: 109778
Test Data Points: 27445
Starting Training
Done training
Test Results
Sensitivity: 0.7966882649388048
Specificity : 0.949760236075249
Accuracy: 0.8722900346146839
ROC 0.873224250507
TP 11066 FP 681 TN 12874 FN 2824
None
Cross: Validation: [ 0.52014866 0.91860084 0.94545256 0.94126221 0.94425011]
Number of data points in benchmark 47730
Benchmark Results
Sensitivity: 0.28791565287915655
Specificity : 0.9440394003914231
Accuracy: 0.9270898805782527
ROC 0.615977526635
TP 355 FP 2602 TN 43895 FN 878
None
y SMOTEENN
Loading Data
Loaded Data
Working on Data
Sample Vector [14, 1, 11, 17, 20, 11, 20, 12, 8, 16, 18, 20, 4, -0.9076923076923076, 15.115384615384615, 0.0]
Balancing Data
Balanced Data
Finished working with Data
Training Data Points: 102928
Test Data Points: 25732
Starting Training
Done training
Test Results
Sensitivity: 0.8027764087399376
Specificity : 0.9590647588139843
Accuracy: 0.8851235815327219
ROC 0.880920583777
TP 9773 FP 555 TN 13003 FN 2401
None
Cross: Validation: [ 0.55691136 0.94882058 0.95449246 0.95495706 0.95192569]
Number of data points in benchmark 47730
Benchmark Results
Sensitivity: 0.19302514193025141
Specificity : 0.9552659311353421
Accuracy: 0.9355751099937146
ROC 0.574145536533
TP 238 FP 2080 TN 44417 FN 995
None
x SMOTEENN
Loading Data
Loaded Data
Working on Data
Sample Vector [2, 11, 2, 3, 12, 13, 20, 20, 8, 14, 9, 13, 14, 1.2000000000000002, 42.07692307692308, 0.0]
Balancing Data
Balanced Data
Finished working with Data
Training Data Points: 102917
Test Data Points: 25730
Starting Training
Done training
Test Results
Sensitivity: 0.8066076405968191
Specificity : 0.9601684895063554
Accuracy: 0.8873688301593471
ROC 0.883388065052
TP 9839 FP 539 TN 12993 FN 2359
None
Cross: Validation: [ 0.55825884 0.94869802 0.95565315 0.95557542 0.95444829]
Number of data points in benchmark 47730
Benchmark Results
Sensitivity: 0.19302514193025141
Specificity : 0.9537174441361809
Accuracy: 0.9340666247642991
ROC 0.573371293033
TP 238 FP 2152 TN 44345 FN 995
None
y random_under_sample
Loading Data
Loaded Data
Working on Data
Sample Vector [9, 9, 9, 5, 9, 18, 20, 17, 7, 18, 16, 2, 17, -2.5538461538461537, 52.85384615384616, 0.07692307692307693]
Balancing Data
Balanced Data
Finished working with Data
Training Data Points: 24059
Test Data Points: 6015
Starting Training
Done training
Test Results
Sensitivity: 0.5328947368421053
Specificity : 0.7015126050420168
Accuracy: 0.6162926018287614
ROC 0.617203670942
TP 1620 FP 888 TN 2087 FN 1420
None
Cross: Validation: [ 0.61635638 0.62101064 0.63801131 0.61988693 0.61822414]
Number of data points in benchmark 47730
Benchmark Results
Sensitivity: 0.6853203568532036
Specificity : 0.6500204314256833
Accuracy: 0.6509323276765138
ROC 0.667670394139
TP 845 FP 16273 TN 30224 FN 388
None
x random_under_sample
Loading Data
Loaded Data
Working on Data
Sample Vector [13, 3, 3, 7, 19, 7, 20, 12, 3, 1, 10, 13, 18, 0.6076923076923079, 10.984615384615386, 0.0]
Balancing Data
Balanced Data
Finished working with Data
Training Data Points: 24059
Test Data Points: 6015
Starting Training
Done training
Test Results
Sensitivity: 0.5342105263157895
Specificity : 0.7210084033613445
Accuracy: 0.6266001662510391
ROC 0.627609464839
TP 1624 FP 830 TN 2145 FN 1416
None
Cross: Validation: [ 0.625 0.61768617 0.63318923 0.61739275 0.6130695 ]
Number of data points in benchmark 47730
Benchmark Results
Sensitivity: 0.6536901865369019
Specificity : 0.6277609308127406
Accuracy: 0.628430756337733
ROC 0.640725558675
TP 806 FP 17308 TN 29189 FN 427
None
y ncl
Loading Data
Loaded Data
Working on Data
Sample Vector [11, 1, 2, 9, 9, 8, 20, 2, 12, 2, 13, 20, 10, -0.04615384615384599, 42.5923076923077, 0.0]
Balancing Data
Balanced Data
Finished working with Data
Training Data Points: 47213
Test Data Points: 11804
Starting Training
Done training
Test Results
Sensitivity: 0.25221540558963873
Specificity : 0.936076662908681
Accuracy: 0.7660962385631989
ROC 0.594146034249
TP 740 FP 567 TN 8303 FN 2194
None
Cross: Validation: [ 0.75559132 0.75770925 0.75734983 0.75599424 0.75768872]
Number of data points in benchmark 47730
Benchmark Results
Sensitivity: 0.23763179237631793
Specificity : 0.9154138976708175
Accuracy: 0.8979048816258118
ROC 0.576522845024
TP 293 FP 3933 TN 42564 FN 940
None
x ncl
Loading Data
Loaded Data
Working on Data
Sample Vector [7, 9, 2, 13, 12, 7, 20, 2, 7, 2, 3, 12, 9, 0.2076923076923077, 0.48461538461538456, 0.0]
Balancing Data
Balanced Data
Finished working with Data
Training Data Points: 47274
Test Data Points: 11819
Starting Training
Done training
Test Results
Sensitivity: 0.23231292517006802
Specificity : 0.9321995720238766
Accuracy: 0.7581013622133852
ROC 0.582256248597
TP 683 FP 602 TN 8277 FN 2257
None
Cross: Validation: [ 0.7571912 0.75894746 0.75241158 0.75638856 0.75672703]
Number of data points in benchmark 47730
Benchmark Results
Sensitivity: 0.2733171127331711
Specificity : 0.9084887197023463
Accuracy: 0.8920804525455688
ROC 0.590902916218
TP 337 FP 4255 TN 42242 FN 896
None
y near_miss
Loading Data
Loaded Data
Working on Data
Sample Vector [3, 18, 10, 19, 10, 2, 20, 19, 9, 20, 13, 11, 19, -0.9230769230769229, 16.030769230769234, 0.0]
Balancing Data
Balanced Data
Finished working with Data
Training Data Points: 24059
Test Data Points: 6015
Starting Training
Done training
Test Results
Sensitivity: 0.5394736842105263
Specificity : 0.6769747899159664
Accuracy: 0.6074812967581047
ROC 0.608224237063
TP 1640 FP 961 TN 2014 FN 1400
None
Cross: Validation: [ 0.54521277 0.6512633 0.62071832 0.60508813 0.59161955]
Number of data points in benchmark 47730
Benchmark Results
Sensitivity: 0.7112733171127331
Specificity : 0.4847624577929759
Accuracy: 0.4906138696836371
ROC 0.598017887453
TP 877 FP 23957 TN 22540 FN 356
None
x near_miss
Loading Data
|
distributions/exercises/Central_Limit_Theorem_and_Continuous_Distributions.ipynb | ###Markdown
###Code
#Setup - run this first
install.packages('cowplot')
library(ggplot2)
library(tidyverse)
library(cowplot)
###Output
_____no_output_____
###Markdown
Exercises for Understanding Continuous Distributions and Central Limit TheoremHere, we will go through some of the R commands you can use to simulate and explore distributions. First up are the commands you need to simulate data from each of the distribution types. In `R`, these are typically called `rxxx`, where `xxx` is the type of distribution for example:* `runif` simulates data from a uniform distribution* `rexp` simulates data from an exponential distribution* `rgamma` simulates data from a gamma distribution* `rnorm` simulates data from a normal distributionEach method takes a parameter to state how many observations we wish to simulate and then a number of other parameters depending on the parameters of the probability density function for that distribution. For example, `rnorm` takes three parameters:1. The number of observations to simulate2. The mean of the observations3. The standard deviation of the observationsLet's try it out. Suppose we want to simulate ten observations from a population with a normal distribution, with a mean of 4 and a standard deviation of 2. So that our 'random' numbers are the same each time you run this, we can set a seed value for random number generation using the `set.seed` method. The seed can be any number, but setting it before simulating populations is critical if you want your work to be reproducible by others!
###Code
set.seed(14)
values = rnorm(10, mean=4, sd=2)
print(values)
###Output
_____no_output_____
###Markdown
You can see that it has randomly generated 10 real numbers (because the normal distribution is continuous). We can double-check the mean and standard deviation:
###Code
print(paste('The mean of values is:', mean(values)))
print(paste('The standard deviation of values is:', sd(values)))
###Output
_____no_output_____
###Markdown
Well, it looks like our mean of 5.51 is a bit higher than the value of 5 we used to simulate the data. **Why do you think this is?** _(hint, try simulating 100 values below and see what you get)_
###Code
# Simulate a normal distribution of 100 values, with a mean of 4
# a standard deviation of 2 here, then print the mean and standard deviation
# of the simulated data
###Output
_____no_output_____
###Markdown
Testing the Central Limit TheoremHere, we are going to simulate a population of 1 million with a uniform distribution going from 1 to 10. We are then going to randomly sample five observations from that population and calculate the mean:
###Code
#simulate the observations
population = runif(1e6, min=1, max=10)
#subsample 10 observations
subsample = sample(population, size=10, replace=TRUE)
print(subsample)
print(paste('The mean of this subsample is:', mean(subsample)))
###Output
_____no_output_____
###Markdown
Notice here that we selected `replace=TRUE`. This is known as **sampling with replacement**, i.e. the number is selected from the population, and then placed back in the population so that it might be selected randomly again. The opposite, where `replace=FALSE` means that the number can _only be selected once_. Thus, the first randomly selected observation will be chosen from one million numbers, the second will be chosen from 999,999 numbers etc. When subsampling your data, particularly if you are doing so to infer properties of the whole population, it is important to sample _with replacement_.Now we have one subsample, we can follow a similar pattern to repeat this lots of times with a `for` loop and store these values in a vector. Let's take 100 subsamples, calculate their means and then store the mean in a vector called `subsampled_means`.
###Code
#set up the storage vector
subsampled_means = rep(0, times=100)
#Loop through and set the values
for (i in 1:length(subsampled_means)){
subsample = sample(population, size=10, replace=TRUE)
subsampled_means[i] = mean(subsample)
}
print('The first five records of our subsample are:')
print(subsampled_means[1:5])
###Output
_____no_output_____
###Markdown
Now let's plot these values
###Code
ggplot(tibble(x=subsampled_means), aes(x)) +
geom_density(fill='#0072B2') +
theme_minimal_hgrid(12) +
scale_x_continuous(name="Subsample Mean") +
scale_y_continuous(name='Probability Density')
###Output
_____no_output_____
###Markdown
Well, it looks a bit wonky for a normal distribution, and the ends are cut off. We can fix the latter problem by using a histogram instead of a density plot:
###Code
ggplot(tibble(x=subsampled_means), aes(x)) +
geom_histogram(fill='#0072B2', bins=20, color='black') +
theme_minimal_hgrid(12) +
scale_x_continuous(name="Subsample Mean") +
scale_y_continuous(name='Probability Density')
###Output
_____no_output_____
###Markdown
Still wonky though... **How might you fix this to make it look more like a normal distribution?** Do so below:
###Code
###Output
_____no_output_____
###Markdown
With your new and improved `subsampled_means`, we can work out the 95% confidence intervals by using the `quantile` function. This function takes a vector of values and a vector of the quantiles you wish to calculate. For 95% confidence, we are interested in all the values that sit between a quantile of 2.5% and 97.5%.
###Code
ci95 = quantile(subsampled_means, c(0.025, 0.975))
print(ci95)
sprintf('We can be 95%% confident that the mean of our population can be found between the values %1.2f and %1.2f', ci95[1], ci95[2])
###Output
_____no_output_____
###Markdown
**How might we improve that confidence (i.e. make it a narrower distribution)?**. Try your answer below:
###Code
###Output
_____no_output_____
###Markdown
**Repeat the exercise with sampling from a population with an exponential distribution (where $\lambda=4$) and a gamma distribution (where $\lambda=100$ and $\alpha=5$) to evaluate the Central Limit Theorem**
###Code
# Test with Exponential distribution (hint use `rexp`)
# Test with Gamma distribution (hint use `rgamma`)
###Output
_____no_output_____ |
export_job_api.ipynb | ###Markdown
Import Statements
###Code
import requests
import pandas as pd
import json
import time
import os
###Output
_____no_output_____
###Markdown
Retrieve API Key
###Code
# to use this notebook, fill in values for your api user and api key
#####User Input#####
api_user = "INSERT_API_USER"
api_pass = "INSERT_API_KEY|1"
#####User Input#####
headers = {"content-type": "application/json"}
###Output
_____no_output_____
###Markdown
Get API Key Profiles
###Code
intro_url = "https://api.securevan.com/v4/apiKeyProfiles/"
response = (
requests.get(
intro_url,
headers=headers,
auth=requests.auth.HTTPBasicAuth(api_user, api_pass)
).json()
)
response
###Output
_____no_output_____
###Markdown
Get Export Job Types
###Code
job_types_url = "https://api.securevan.com/v4/exportJobTypes"
job_types = (
requests.get(job_types_url,
headers=headers,
auth=requests.auth.HTTPBasicAuth(api_user, api_pass)
).json()
)
job_types
###Output
_____no_output_____
###Markdown
Get Saved Lists
###Code
saved_lists_url = "https://api.securevan.com/v4/savedLists"
query_string = {"maxDoorCount":"1000","maxPeopleCount":"2000"}
saved_lists = (
requests.request(
"GET",
saved_lists_url,
headers=headers,
auth=requests.auth.HTTPBasicAuth(api_user, api_pass),
params=query_string
).json()
)
saved_lists
###Output
_____no_output_____
###Markdown
Check SavedList is in ID
###Code
#####User Input#####
saved_list_id = "INSERT_LIST_ID_AS_INT"
#####User Input#####
if (saved_list_id in set([item['savedListId'] for item in saved_lists['items']])):
print('Valid Saved List')
else:
print('Invalid Saved List')
###Output
_____no_output_____
###Markdown
Create Export JobAfter creating the export job, there are two options on receiving the download url. The first is through repeateduly calling the API, at set intervals, to check if the job is complete. Once it is, the download link will be printed. The second is through using a webhook which will post the job details once it is complete, which our function will then get to retrieve the downlaod link. Only one of the following implementations are needed.
###Code
#####User Input#####
webhook_url = "INSERT_WEBHOOK_URL"
job_type = "INSERT_JOB_TYPE_AS_INT"
#####User Input#####
url = "https://api.securevan.com/v4/exportJobs"
payload = {
"savedListId": saved_list_id,
"type": job_type,
"webhookUrl": webhook_url
}
response = (requests.request(
"POST",
url,
auth=requests.auth.HTTPBasicAuth(api_user, api_pass),
json=payload,
headers=headers)
).json()
response
###Output
_____no_output_____
###Markdown
Pulling Method 1 - Check export status retrieve download URL
###Code
export_job_id = response["exportJobId"]
url = f"https://api.securevan.com/v4/exportJobs/{export_job_id}"
while True:
response = requests.request(
"GET",
url,
headers=headers,
auth=requests.auth.HTTPBasicAuth(api_user, api_pass)
).json()
if response["status"] != "Completed":
print(f"Job Status: {response['status']}")
time.sleep(5)
else:
print(f"Job Status: {response['status']}")
print(f"File at {response['downloadUrl']}")
break
###Output
_____no_output_____
###Markdown
Pulling Method 2 (Webhook) - Check export status and retrieve download URL
###Code
headers = {"content-type": "application/json"}
response = requests.get(
webhook_url,
headers=headers
).json()
print(f"Job Status: {response['status']}")
print(f"File at {response['downloadUrl']}")
###Output
_____no_output_____
###Markdown
Download CSV and load into panda frame
###Code
file_name = "response_file.csv"
resp = requests.get(response['downloadUrl'])
with open(file_name, "wb") as f:
f.write(resp.content)
frame = pd.read_csv(file_name)
frame
###Output
_____no_output_____ |
ApplicationLab6/ATOC5860_applicationlab6_cluster_mesa_data.ipynb | ###Markdown
Here, we will use [K-means clustering](https://en.wikipedia.org/wiki/K-means_clustering) to classify weather data into different clusters. Each cluster might look like a season. Why would we cluster weather observations? We already know which observations are in which season by looking at the date. But we all know that a day in February sometimes feels like summer and a day in September can feel like winter. We often have multiple seasons in a single week... So this could be quite fun to see how the algorithm decides how to cluster our data and assign each day to a "season". :) **STEP 1: Import packages and functions we will use**
###Code
import pandas as pd
import numpy as np
import datetime
from scipy.cluster.vq import vq, kmeans, whiten
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
You will be working with weather data from the [NCAR Mesa Laboratory in Boulder, Colorado](https://en.wikipedia.org/wiki/Mesa_Laboratory). We'll call this dataset the "Mesa dataset". The data go from 2016-2021. Prof. Kay's munging code is up [on github](https://github.com/jenkayco/ATOC4500_Spring2022/tree/main/mesadata_process). Information on the site and the instruments is [here](https://www.eol.ucar.edu/content/ncar-foothills-lab-weather-station-information). Real-time data from the site is [here](https://archive.eol.ucar.edu/cgi-bin/weather.cgi?site=ml&period=5-minute&fields=tdry&fields=rh&fields=cpres0&fields=wspd&fields=wdir&fields=raina&units=english). An example of what some of the munging might look like for this dataset is [here](https://ncar.github.io/esds/posts/2021/weather-station-data-preprocess/). *Note: Each year in this dataset has 365 days. Leap year data (i.e., Feb. 29 data for 2016 and 2020 have been excluded.)* **STEP 2: Read in the Data and Look At It**
###Code
### Read in your data
df = pd.read_csv("NCAR_mesa_data_2016-2021_withmissing.csv")
### Preview the values in the data frame by printing it
print(df)
### Print the Shape of the data frame
print(df.shape)
### Check out the available days in this dataset
print(f'How many days are in this dataset?: {df.day.nunique()}')
print(f'nyrs*ndays={6*365}')
## check that the day format is a readable date
print(df['day'])
###Output
How many days are in this dataset?: 2190
nyrs*ndays=2190
0 2016-01-01
1 2016-01-01
2 2016-01-01
3 2016-01-01
4 2016-01-01
...
52555 2021-12-31
52556 2021-12-31
52557 2021-12-31
52558 2021-12-31
52559 2021-12-31
Name: day, Length: 52560, dtype: object
###Markdown
**STEP 3: Add the month-based season as a variable to your data frame**
###Code
## add a new variable called "season" to the dataframe df
## this variable contains the season defined traditionally based on the month:
## DJF=winter, MAM=spring, JJA=summer, SON=fall
## use a dictionary to define seasons from months
### define the seasons as DJF=winter=1, MAM=spring=2, JJA=summer=3, SON=fall=4
seasons = {(1, 12, 2): 1, (3, 4, 5): 2, (6, 7, 8): 3, (9, 10, 11): 4}
## define a function to assign seasons to months using the dictionary
def season(ser):
for k in seasons.keys():
if ser in k:
return seasons[k]
## apply the function season to one time in my dataset as a sanity check
#print(df['day'][5000].month)
#print(season(df['day'][5000].month))
## apply it to all times in my dataset - check
#for month in pd.DatetimeIndex(df['day']).month[-5000:-4890]:
# print(season(month))
## apply the function season to all times in my dataset
df['season']=[season(month) for month in pd.DatetimeIndex(df['day']).month]
#check random value values
#df['season'][5000]
## check the data frame to see if there is a new variable called "season"
df
###Output
_____no_output_____
###Markdown
**STEP 4: Select Data To Use, Convert data into a Numpy Array, Look at data again!**
###Code
#### select the time
##Only select a certain time
##Note: time is in UTC. Noon in Colorado depends on daylight savings.
## For simplicity just starting with 4 UTC (9/10 pm in Denver). could also use 17 UTC (Noon/1 pm in Denver)
selectedhourUTC=17 ## added so that there is a variable that sets the selected hour for the rest of the notebook.
selectdf = df[df.hour_UTC==selectedhourUTC] ##
print(len(selectdf))
### tinker later (code that you may want to "borrow" later in this notebook)
#selectdf = df # try selecting all data instead of just noon data
selectdf
## Select variables of interest. Aside variables are called "features" by some in the ML world.
included_cols = ['pres_mb','tdry_degC','rh_percent','wdir','wspd_m_per_s','wspdmax_m_per_s','raina_event_mm']
## convert to a numpy array for model input and leave out the time dimensions day & hour, and wind directions.
data = selectdf.loc[:, selectdf.columns.isin(list(included_cols))].to_numpy()
## check the shape of your data -- should be (6*365,7) (nyrs*ndays, variable)
print(f'Your data are now 2190 observations over 7 variables: {np.shape(data)}')
print(data.shape)
### option to check out quick plots of your data...
### say yes first time through - double check your input, ALWAYS.
quickplot = 'no'
if quickplot == 'yes':
## Quick plots of your data
for i in np.arange(0,np.shape(data)[1]):
plt.plot(data[:,i]);
plt.title(included_cols[i])
plt.xticks(ticks=np.arange(0,len(data)+365,365)) ###
plt.xlabel('Days since Jan 1, 2016')
plt.show()
###Output
_____no_output_____
###Markdown
**STEP 5: Replace missing data.** There are missing values in the Mesa dataset. Before you use the data in the k-means clustering algorithm - you need to replace missing data with a real value. There are a couple of options below for how to do this. If you would like to code it up (add to the code base!!), you are also welcome to experiment with other options (e.g., interpolate, randomly select values).
###Code
## copy the data array into a new array without missing values called (data_nomissing)
data_nomissing=data.copy();
print(data_nomissing.shape)
option_missing='option1'
##### OPTION #1 - replace the NaN data with the mean value for that season. Simple!
if option_missing=='option1':
season=selectdf['season'];
#print(len(season))
## this is the actual season from the definition in the season dictionary above
season_idx=df['season'][df.hour_UTC==4].to_numpy()
#print(season_idx.shape)
## find all of the days in winter, i.e., where season_idx=1
winteridx=np.nonzero(np.where(season_idx==1,1,0))
## loop over variables
for i in np.arange(0,len(included_cols)):
data_nomissing[winteridx,i]=np.where(np.isnan(data[winteridx,i])==True,np.nanmean(data[winteridx,i]),data[winteridx,i])
## find all of the days in spring, i.e., where season_idx=2
springidx=np.nonzero(np.where(season_idx==2,1,0))
## loop over variables
for i in np.arange(0,len(included_cols)):
data_nomissing[springidx,i]=np.where(np.isnan(data[springidx,i])==True,np.nanmean(data[springidx,i]),data[springidx,i])
## find all of the days in summer, i.e., where season_idx=3
summeridx=np.nonzero(np.where(season_idx==3,1,0))
## loop over variables
for i in np.arange(0,len(included_cols)):
data_nomissing[summeridx,i]=np.where(np.isnan(data[summeridx,i])==True,np.nanmean(data[summeridx,i]),data[summeridx,i])
## find all of the days in fall, i.e., where season_idx=4
fallidx=np.nonzero(np.where(season_idx==4,1,0))
## loop over variables
for i in np.arange(0,len(included_cols)):
data_nomissing[fallidx,i]=np.where(np.isnan(data[fallidx,i])==True,np.nanmean(data[fallidx,i]),data[fallidx,i])
##### plot your data to see what they look like after you have dealt with nan (i.e., missing data)
quickplot = 'no'
if quickplot == 'yes':
## Quick plots of your data
for i in np.arange(0,np.shape(data_nomissing)[1]):
plt.plot(data_nomissing[:,i]);
plt.title(included_cols[i])
plt.show()
### if np.mean for data_nomissing is a real value - we have removed all the missing values!
print(f'take the mean of the variable data exluding missing data: {np.nanmean(data)}')
print(f'take the mean of the variable data: {np.mean(data)}')
print(f'take the mean of the variable data_nomissing: {np.mean(data_nomissing)}')
###Output
take the mean of the variable data exluding missing data: 146.90404589424074
take the mean of the variable data: nan
take the mean of the variable data_nomissing: 146.85374602814528
###Markdown
**STEP 6: Recale your data to have unit variance**. *Why? Since clustering among features depends on distance, we need to scale all of our features so that all features have equal variance. We don't want the clustering to be dominated by the feature with the largest variance...*
###Code
## https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.vq.whiten.html
#"Before running k-means, it is beneficial to rescale each feature dimension of the observation
#set by its standard deviation (i.e. “whiten” it - as in “white noise” where each frequency has equal power).
#Each feature is divided by its standard deviation across all observations to give it unit variance."
scaled_data = whiten(data_nomissing) # sci-kit learn's function to standardize data in the clustering toolbox
#scaled_data = data.copy() # uncomment to NOT whiten, but still enable this notebook to run without modifications
## Check that you end up with the same shape as the original data...
print(f'LOOK AT YOUR DATA: shape should be (days, variable), more specifically (366,8): {np.shape(scaled_data)}')
print('mean of each variable')
print(np.mean(scaled_data,axis=0))
print('standard deviation of each variable')
print(np.std(scaled_data,axis=0))
### print the type of scaled_data
print(type(scaled_data))
## check out quick plots of scaled_data... always take this option.
## note: we did not remove the mean...
quickplot = 'yes'
if quickplot == 'yes':
## Quick plots of your data
for i in np.arange(0,np.shape(scaled_data)[1]):
plt.plot(scaled_data[:,i],label=included_cols[i]);
## check what happens when you uncomment the two lines below
#plt.title(included_cols[i])
#plt.show()
plt.legend()
###Output
_____no_output_____
###Markdown
**STEP 7: Use K-means clustering to look for patterns in the data**
###Code
## select the number of clusters (i.e., here the number of seasons you want to try to identify using the weather data)
NO_CLUSTERS = 4 # use 4 clusters - see if you get out 4 seasons of winter, fall, spring, summer
NO_CLUSTERS = 3 # JSHAW
## do the clustering, return centroids (i.e., Coordinates of cluster centers.)
## https://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html
## https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.vq.kmeans.html
centroids, _ = kmeans(scaled_data,NO_CLUSTERS,iter=20)
## find an index (idx) that is the label for each value in your dataset (i.e, each day at noon)
## https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.vq.vq.html
idx, _ = vq(scaled_data,centroids) # vq returns cluster idx for each observation
## add 1 so that idx is also 1-based like df['season']
idx=idx+1
print('Here is the cluster idx for each day of the year:')
print(idx.shape) # prints K-mean's season label for each day
###Output
Here is the cluster idx for each day of the year:
(2190,)
###Markdown
**STEP 8: Visualize your results**.
###Code
### Make a quick plot to show the index results
### plot each year
plt.plot(idx[0:365],'.',label='2016');
plt.plot(idx[365:365*2]+0.1,'.',label='2017');
plt.plot(idx[365*2:365*3]+0.2,'.',label='2018');
plt.plot(idx[365*3:365*4]+0.3,'.',label='2019');
plt.plot(idx[365*4:365*5]+0.4,'.',label='2020');
plt.plot(idx[365*5:365*6]+0.5,'.',label='2021');
#plt.legend(loc='best');
plt.legend(bbox_to_anchor=(1, 0.75), loc='upper left', ncol=1);
plt.yticks(np.arange(1,NO_CLUSTERS+1),labels=['cluster1','cluster2','cluster3','cluster4']);
plt.xlabel('Day of the year')
plt.title('Seasonal cycle of cluster assignment by Year');
#### OH! Here's a prettier plot contributed by Rachel Mooers (ATOC Undergrad)
fig, ax = plt.subplots(figsize=(10, 6))
ax.set_xlabel('Day of the Year', fontsize=14)
ax.set_ylabel('Cluster Index', fontsize=14)
ax.set_xlim(0,365)
ax.set_ylim(0.5,5.3)
ax.fill_between(range(len(idx))[0:79], [0]*len(range(len(idx))[0:79]), [6]*len(range(len(idx))[0:79]), color='lightcyan')
ax.fill_between(range(len(idx))[79:172], [0]*len(range(len(idx))[79:172]), [6]*len(range(len(idx))[79:172]), color='honeydew')
ax.fill_between(range(len(idx))[172:263], [0]*len(range(len(idx))[172:263]), [6]*len(range(len(idx))[172:263]), color='seashell')
ax.fill_between(range(len(idx))[263:355], [0]*len(range(len(idx))[263:355]), [6]*len(range(len(idx))[263:355]), color='lightgoldenrodyellow')
ax.fill_between(range(len(idx))[355:366], [0]*len(range(len(idx))[355:366]), [6]*len(range(len(idx))[355:366]), color='lightcyan')
ax.plot(idx[0:365],'.',label='2016');
ax.plot(idx[365:365*2]+0.1,'.',label='2017');
ax.plot(idx[365*2:365*3]+0.2,'.',label='2018');
ax.plot(idx[365*3:365*4]+0.3,'.',label='2019');
ax.plot(idx[365*4:365*5]+0.4,'.',label='2020');
ax.plot(idx[365*5:365*6]+0.5,'.',label='2021');
#plt.legend(loc='best');
plt.legend(bbox_to_anchor=(1, 0.75), loc='upper left', ncol=1);
plt.yticks(np.arange(1,NO_CLUSTERS+1),labels=['cluster1','cluster2','cluster3','cluster4']);
ax.set_title('Seasonal cycle of cluster assignment by Year', fontsize=17);
ax.text(22, 4.8, 'Winter', fontsize=15, color='cornflowerblue');
ax.text(108, 4.8, 'Spring', fontsize=15, color='limegreen');
ax.text(195, 4.8, 'Summer', fontsize=15, color='tomato');
ax.text(300, 4.8, 'Fall', fontsize=15, color='Goldenrod');
###Output
_____no_output_____
###Markdown
What is the plot in the cell above this question showing? What information is it providing to you about the results? What have you learned about the seasonal occurrence of the different clusters by looking at it?
###Code
# pres_mb tdry_degC rh_percent wdir wspd_m_per_s wspdmax_m_per_s raina_event_mm
## Create a few xy scatter plots, where points are colored by "season" (from clustering algorithm).
### Pick any two variables that are a part of included_cols
### ['pres_mb','tdry_degC','rh_percent','wdir','wspd_m_per_s','wspdmax_m_per_s','raina_event_mm']
##print(included_cols)
##### PLOT #1
### select two variables and put them in vars2plot - # format (x, y)
vars2plot = ['wspdmax_m_per_s','wspd_m_per_s']
print(f'plotting these variables: {vars2plot}')
data2plot = [data[:,included_cols.index(var)] for var in vars2plot]
## find the integer index of the variable to plot
varidx2plot=np.zeros(2,dtype="int")
for i in np.arange(0,2):
#print(vars2plot[i])
varidx2plot[i]=included_cols.index(vars2plot[i])
#print(varidx2plot)
### Next plot these variables as the original valueswith colors to identify the associated cluster
# (red=1, blue=2, grey=3, orange=4)
cols = ['','red','blue','grey','orange']
plt.figure(figsize=(8,5))
plt.title('K-means classification with ' + str(NO_CLUSTERS) + ' Clusters',fontsize=22)
for (ind,val) in enumerate(np.transpose(data2plot)):
plt.plot(val[0],val[1],".", color=cols[idx[ind]], markersize=10, markerfacecolor = 'none')
plt.xticks(fontsize=18)
plt.yticks(fontsize=18)
plt.xlabel(vars2plot[0],fontsize=18);
plt.ylabel(vars2plot[1],fontsize=18);
# plt.xlim(0,1)
plt.show()
### You can also plot these variables as scaled values with the cluster centers
#plt.figure(figsize=(8,5))
#plt.title('K-means classification with ' + str(NO_CLUSTERS) + ' Clusters',fontsize=22)
##plt.scatter(scaled_data[:, varidx2plot[0]], scaled_data[:, varidx2plot[1]])
#plt.scatter(centroids[:, varidx2plot[0]], centroids[:, varidx2plot[1]],color='red',marker='*',s=1000)
#plt.xticks(fontsize=18)
#plt.yticks(fontsize=18)
#plt.xlabel('scaled '+vars2plot[0],fontsize=18);
#plt.ylabel('scaled '+vars2plot[1],fontsize=18);
#plt.show()
##### PLOT #2
### select two variables and put them in vars2plot - # format (x, y)
vars2plot = ['wdir','rh_percent']
#print(f'plotting these variables: {vars2plot}')
data2plot = [data[:,included_cols.index(var)] for var in vars2plot]
## find the integer index of the variable to plot
varidx2plot=np.zeros(2,dtype="int")
for i in np.arange(0,2):
varidx2plot[i]=included_cols.index(vars2plot[i])
### Now plot these variables as the original valueswith colors to identify the associated cluster
# (red=1, blue=2, grey=3, orange=4)
cols = ['','red','blue','grey','orange']
plt.figure(figsize=(8,5))
plt.title('K-means classification with ' + str(NO_CLUSTERS) + ' Clusters',fontsize=22)
for (ind,val) in enumerate(np.transpose(data2plot)):
plt.plot(val[0],val[1],".", color=cols[idx[ind]], markersize=10, markerfacecolor = 'none')
plt.xticks(fontsize=18)
plt.yticks(fontsize=18)
plt.xlabel(vars2plot[0],fontsize=18);
plt.ylabel(vars2plot[1],fontsize=18);
### You can also plot these variables as scaled values with the cluster centers
#plt.figure(figsize=(8,5))
#plt.title('K-means classification with ' + str(NO_CLUSTERS) + ' Clusters',fontsize=22)
##plt.scatter(scaled_data[:, varidx2plot[0]], scaled_data[:, varidx2plot[1]])
#plt.scatter(centroids[:, varidx2plot[0]], centroids[:, varidx2plot[1]],color='red',marker='*',s=1000)
#plt.xticks(fontsize=18)
#plt.yticks(fontsize=18)
#plt.xlabel('scaled '+vars2plot[0],fontsize=18);
#plt.ylabel('scaled '+vars2plot[1],fontsize=18);
#plt.show()
### select THREE variables and put them in vars2plot - # format (x, y, z)
vars2plot = ['wdir','tdry_degC','rh_percent']
print(f'plotting these variables: {vars2plot}')
data2plot = [data[:,included_cols.index(var)] for var in vars2plot]
print(len(data2plot))
## find the integer index of the variable to plot
varidx2plot=np.zeros(3,dtype="int")
for i in np.arange(0,3):
print(vars2plot[i])
varidx2plot[i]=included_cols.index(vars2plot[i])
print(varidx2plot)
### Next plot these variables as the original valueswith colors to identify the associated cluster
# (red=1, blue=2, grey=3, orange=4)
cols = ['','red','blue','grey','orange']
fig = plt.figure(figsize=(14, 10))
ax = fig.add_subplot(projection='3d')
ax.set_title('K-means classification with ' + str(NO_CLUSTERS) + ' Clusters',fontsize=22)
for (ind,val) in enumerate(np.transpose(data2plot)):
ax.scatter(val[0],val[1],val[2],".", color=cols[idx[ind]])
ax.set_xlabel('scaled '+vars2plot[0],fontsize=15);
ax.set_ylabel('scaled '+vars2plot[1],fontsize=15);
ax.set_zlabel('scaled '+vars2plot[2],fontsize=15);
plt.show()
###Output
plotting these variables: ['wdir', 'tdry_degC', 'rh_percent']
3
wdir
tdry_degC
rh_percent
[3 1 2]
###Markdown
Based on your scatter plots above - When do the variables help (or NOT help) define the clusters? **STEP 9: Plot histograms of the assigned clusters in the known seasons**.
###Code
#### prepare data to plot in the histograms
## this is the actual season from the definition in the season dictionary above
season_idx=df['season'][df.hour_UTC==selectedhourUTC].to_numpy()
print(season_idx.shape)
## this is the season from the k-means clustering algorithm
print(idx.shape)
### find what the k-means clustering assigned in each season
## find all of the days in winter, i.e., where season_idx=1
winteridx=np.nonzero(np.where(season_idx==1,1,0))
#print(winteridx) ### uncomment me to check the code
## find what values the k-means classified for this season
winter=idx[winteridx]
#print(winter) ### uncomment me to check the code
## find all of the days in spring, i.e., where season_idx=2
springidx=np.nonzero(np.where(season_idx==2,1,0))
## find what values the k-means classified for this season
spring=idx[springidx]
## find all of the days in summer, i.e., where season_idx=3
summeridx=np.nonzero(np.where(season_idx==3,1,0))
## find what values the k-means classified for this season
summer=idx[summeridx]
## find all of the days in fall, i.e., where season_idx=4
fallidx=np.nonzero(np.where(season_idx==4,1,0))
## find what values the k-means classified for this season
fall=idx[fallidx]
### Histogram Type #1 - Compare seasons based on month and based on k-means clustering over the entire year
## define bins for the histogram
delta= 1 ## bin size
minbin= 0.5 ## minimum bin value
maxbin= 5.5 ## maximum bin value
hist_bins = np.arange(minbin,maxbin,delta) ## bins
#print(hist_bins) ## print your histograms to check them
#### all data - plot the histogram
h, mids = np.histogram(idx,bins=hist_bins);
plt.hist(idx,bins=mids,facecolor='white',edgecolor="grey",label='k-means',lw=2);
plt.hist(season_idx,bins=mids,facecolor='r',edgecolor="r",label='actual seasons',alpha=0.5,lw=2);
plt.ylabel('count')
#plt.ylim(0,1000)
plt.xticks(np.arange(1,4+1),labels=['cluster1','cluster2','cluster3','cluster4']);
plt.legend();
plt.title('Month-based seasons (red) vs. Clustering seasons (black)');
### Histogram Type #2 - Histograms of seasons based on k-means clustering for each season
## define bins for the histogram
delta= 1 ## bin size
minbin= 0.5 ## minimum bin value
maxbin= 5.5 ## maximum bin value
hist_bins = np.arange(minbin,maxbin,delta) ## bins
#print(hist_bins) ## print your histograms to check them
## sanity check = plot the histogram of assigned clusters for winter
#h, mids = np.histogram(winter,bins=hist_bins);
#labels4xaxis=['cluster1','cluster2','cluster3','cluster4']
#plt.figure(figsize=(4,4))
#plt.hist(winter,bins=mids,facecolor='w',edgecolor="k");
#plt.xticks(np.arange(1,4+1),labels=labels4xaxis);
#plt.title('Winter');
#plt.show();
### Show all seasons using subplots
### Help with subplots: https://matplotlib.org/devdocs/gallery/subplots_axes_and_figures/subplots_demo.html
labels4xaxis=['','cluster1','cluster2','cluster3','cluster4']
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)
fig.suptitle('k-means clustering results - all seasons',fontsize=20)
fig.set_figheight(6)
fig.set_figwidth(8)
ymax=500
h, mids = np.histogram(winter,bins=hist_bins);
ax1.hist(winter,bins=mids,facecolor='w',edgecolor="k");
ax1.set_title("winter");
ax1.set_ylabel("count");
ax1.set_ylim(0,ymax)
h, mids = np.histogram(spring,bins=hist_bins);
ax2.hist(spring,bins=mids,facecolor='w',edgecolor="k");
ax2.set_title("spring");
ax2.set_ylim(0,ymax)
h, mids = np.histogram(summer,bins=hist_bins);
ax3.hist(summer,bins=mids,facecolor='w',edgecolor="k");
ax3.set_title("summer");
ax3.set_xticklabels(labels4xaxis);
ax3.set_ylabel("count");
ax3.set_ylim(0,ymax)
h, mids = np.histogram(fall,bins=hist_bins);
ax4.hist(fall,bins=mids,facecolor='w',edgecolor="k");
ax4.set_title("fall");
ax4.set_xticklabels(labels4xaxis);
ax4.set_ylim(0,ymax)
for ax in fig.get_axes():
ax.label_outer()
###Output
/opt/anaconda3/envs/intro2ml_environment/lib/python3.7/site-packages/ipykernel_launcher.py:42: UserWarning: FixedFormatter should only be used together with FixedLocator
/opt/anaconda3/envs/intro2ml_environment/lib/python3.7/site-packages/ipykernel_launcher.py:48: UserWarning: FixedFormatter should only be used together with FixedLocator
###Markdown
Based on your plots histograms plotted by season - What clusters are happening in what seasons? When you use 4 clusters - do they align with our definitions of "winter", "summer", "spring", "fall" based on month of the year? Why or why not?
###Code
### Add cluster to the data frame and print the values for a given date range
### for example you might want to print the days around Labor Day in 2020
### these are the days we talked about in the introduction to the application lab...
startdate='2020-09-04'
enddate='2020-09-16'
### or the end of 2021..
#startdate='2021-12-01'
#enddate='2021-12-31'
foo=selectdf.copy()
foo['cluster']=idx
#print(foo[(foo['day'] > startdate) & (foo['day'] < '2020-09-10')][enddate]);
#print(foo[(foo['day'] > startdate) & (foo['day'] < '2020-09-10')][enddate]);
foo[(foo['day'] > startdate) & (foo['day'] < enddate)]
###Output
_____no_output_____ |
_math/MIT_OCW_18_06_Linear_algebra/III_02_Complex_matrices_FFT.ipynb | ###Markdown
+ This notebook is part of lecture 26 *Complex matrices and the fast Fourier transform* in the OCW MIT course 18.06 by Prof Gilbert Strang [1]+ Created by me, Dr Juan H Klopper + Head of Acute Care Surgery + Groote Schuur Hospital + University Cape Town + Email me with your thoughts, comments, suggestions and corrections Linear Algebra OCW MIT18.06 IPython notebook [2] study notes by Dr Juan H Klopper is licensed under a Creative Commons Attribution-NonCommercial 4.0 International License.+ [1] OCW MIT 18.06+ [2] Fernando Pérez, Brian E. Granger, IPython: A System for Interactive Scientific Computing, Computing in Science and Engineering, vol. 9, no. 3, pp. 21-29, May/June 2007, doi:10.1109/MCSE.2007.53. URL: http://ipython.org
###Code
from IPython.core.display import HTML, Image
css_file = 'style.css'
HTML(open(css_file, 'r').read())
from sympy import init_printing, Matrix, symbols, I, sqrt, Rational
from IPython.display import Image
from warnings import filterwarnings
init_printing(use_latex = 'mathjax')
filterwarnings('ignore')
###Output
_____no_output_____
###Markdown
Complex vectors, matrices Fast Fourier transform Complex vectors * Consider the following vector with complex entries (from this point on I will not use the underscore to indicate a vector, so as not to create confusion with the bar, noting complex conjugate, instead, inferring from context)$$ {z} = \begin{bmatrix} {z}_{1} \\ {z}_{2} \\ \vdots \\ {z}_{n} \end{bmatrix} $$ * The length (actually length squared) of this vector is *no good*, since it should be positive$$ {z}^{T}{z} $$* Instead we consider the following$$ z\bar { z } ={ \left| { z } \right| }^{ 2 }\\ \therefore \quad \bar { z } ^{ T }z\\ \left[ { \bar { z } }_{ 1 },{ \bar { z } }_{ 2 },\dots ,{ \bar { z } }_{ n } \right] \begin{bmatrix} { z }_{ 1 } \\ { z }_{ 2 } \\ \vdots \\ { z }_{ n } \end{bmatrix} $$
###Code
z = Matrix([1, I]) # I is the sympy symbol for the imaginary number i
z
###Output
_____no_output_____
###Markdown
* Let's calculate this manually
###Code
z.norm() # The length of a vector
z_cc = Matrix([1, -I])
z_cc
sqrt(z_cc.transpose() * z)
###Output
_____no_output_____
###Markdown
* Taking the transpose of the complex conjugate is called the Hermitian$$ {z}^{H}{z} $$ * We can use the Hermitian for non-complex (or mixed complex) vectors **u** and **v** too$$ \bar{y}^{T}{x} \\ {y}^{H}{x} $$
###Code
from sympy.physics.quantum.dagger import Dagger # A fun way to quickly get the Hermitian
Dagger(z)
sqrt(Dagger(z) * z)
###Output
_____no_output_____
###Markdown
Complex symmetric matrices The transpose * If the symmetric matrix has complex entries then AT=A is *no good*
###Code
A = Matrix([[2, 3 + I], [3 - I, 5]])
A # A Hermitian matrix
A.transpose() == A
Dagger(A)
Dagger(A) == A
###Output
_____no_output_____
###Markdown
* This will work for real-values symmetric matrices as well
###Code
A = Matrix([[3, 4], [4, 2]])
A
A.transpose() == A
Dagger(A) == A
###Output
_____no_output_____
###Markdown
The eigenvalues and eigenvectors * Back to the complex matrix A
###Code
A = Matrix([[2, 3 + I], [3 - I, 5]])
A
A.eigenvals()
###Output
_____no_output_____
###Markdown
$$ A=\begin{bmatrix} 2 & 3+i \\ 3-i & 5 \end{bmatrix}\\ A-\lambda I=\underline { 0 } \\ \left| \begin{bmatrix} 2 & 3+i \\ 3-i & 5 \end{bmatrix}-\begin{bmatrix} \lambda & 0 \\ 0 & \lambda \end{bmatrix} \right| =0\\ \begin{vmatrix} 2-\lambda & 3+i \\ 3-i & 5-\lambda \end{vmatrix}=0\\ \left( 2-\lambda \right) \left( 5-\lambda \right) -\left( 3+i \right) \left( 3-i \right) =0\\ 10-7\lambda +{ \lambda }^{ 2 }-\left( 9+1 \right) =0\\ { \lambda }^{ 2 }-7\lambda =0\\ { \lambda }_{ 1 }=0\\ { \lambda }_{ 2 }=7 $$
###Code
A.eigenvects()
S, D = A.diagonalize()
S
D
###Output
_____no_output_____
###Markdown
* What about S now?* We have to use its transpose, but it is complex, so we have to take the Hermitian
###Code
Dagger(S)
S == Dagger(S) # Don't get confused here, S is not symmetric
###Output
_____no_output_____
###Markdown
* Remember that for a symmetric matrix the column vectors in S (usually called Q, the matrix of eigenvectors) are orthogonal, with QTQ=I* With complex entries we have to consider the Hermitian here, not just the simple transpose* Here we call Q *unitary* The fast Fourier transform * Look at this special matrix (where we start counting rows and columns at zero) $$ { F }_{ n }=\begin{bmatrix} W^{ \left( 0 \right) \left( 0 \right) } & { W }^{ \left( 0 \right) \left( 1 \right) } & { W }^{ \left( 0 \right) \left( 2 \right) } & \dots & { W }^{ \left( 0 \right) \left( n-1 \right) } \\ W^{ \left( 1 \right) \left( 0 \right) } & { W }^{ \left( 1 \right) \left( 1 \right) } & { W }^{ \left( 1 \right) \left( 2 \right) } & \dots & { W }^{ \left( 1 \right) \left( n-1 \right) } \\ { W }^{ \left( 2 \right) \left( 0 \right) } & { W }^{ \left( 2 \right) \left( 1 \right) } & { W }^{ \left( 2 \right) \left( 2 \right) } & \dots & { W }^{ \left( 2 \right) \left( n-1 \right) } \\ \vdots & \vdots & \vdots & \dots & \vdots \\ { W }^{ \left( n-1 \right) \left( 0 \right) } & { W }^{ \left( n-1 \right) \left( 1 \right) } & { W }^{ \left( n-1 \right) \left( 2 \right) } & \dots & { W }^{ \left( n-1 \right) \left( n-1 \right) } \end{bmatrix} \\ \left({F}_{n}\right)_{ij}={W}^{ij}; i,j=0,1,2,\dots,n-1 $$ * W is a special number whose *n*th power equals 1$$ {W}^{n}=1 \\ W={ e }^{ \frac { i2\pi }{ n } }=\cos { \frac { 2\pi }{ n } +i\sin { \frac { 2\pi }{ n } } } $$* It is in the complex plane of course (as written in *sin* and *cos* above) * Remember than *n* here refers to the size the matrix* Here it also refers to the *n*th *n* roots (if that makes any sense, else look at the image below)
###Code
Image(filename = 'W.png')
###Output
_____no_output_____
###Markdown
* So for *n*=4 we will have the following$$ { F }_{ 4 }=\begin{bmatrix} 1 & 1 & 1 & 1 \\ 1 & \left( { e }^{ \frac { 2\pi i }{ 4 } } \right) ^{ 1 } & { \left( { e }^{ \frac { 2\pi i }{ 4 } } \right) ^{ 2 } } & { \left( { e }^{ \frac { 2\pi i }{ 4 } } \right) ^{ 3 } } \\ 1 & \left( { e }^{ \frac { 2\pi i }{ 4 } } \right) ^{ 2 } & { \left( { e }^{ \frac { 2\pi i }{ 4 } } \right) ^{ 4 } } & { \left( { e }^{ \frac { 2\pi i }{ 4 } } \right) ^{ 6 } } \\ 1 & \left( { e }^{ \frac { 2\pi i }{ 4 } } \right) ^{ 3 } & { \left( { e }^{ \frac { 2\pi i }{ 4 } } \right) ^{ 6 } } & { \left( { e }^{ \frac { 2\pi i }{ 4 } } \right) ^{ 9 } } \end{bmatrix} $$ * We note that a quarter of the way around is *i*$$ {e}^{\frac{2\pi{i}}{4}}={i} $$* We thus have the following$$ { F }_{ 4 }=\begin{bmatrix} 1 & 1 & 1 & 1 \\ 1 & i & { i }^{ 2 } & { i }^{ 3 } \\ 1 & { i }^{ 2 } & { i }^{ 4 } & { i }^{ 6 } \\ 1 & { i }^{ 3 } & { i }^{ 6 } & { i }^{ 9 } \end{bmatrix}\\ { F }_{ 4 }=\begin{bmatrix} 1 & 1 & 1 & 1 \\ 1 & i & -1 & -i \\ 1 & -1 & 1 & -1 \\ 1 & -i & -1 & i \end{bmatrix} $$ * Note how the columns are orthogonal
###Code
F = Matrix([[1, 1, 1, 1], [1, I, -1, -I], [1, -1, 1, -1], [1, -I, -1, I]])
F
F.col(0) # Calling only the selected column (counting starts at 0)
###Output
_____no_output_____
###Markdown
* The columns are supposed to be orthogonal, i.e. inner (dot) product should be zero* Clearly below it is not
###Code
F.col(1).dot(F.col(3))
###Output
_____no_output_____
###Markdown
* Remember, though, that this is a complex matrix and we have to use the Hermitian
###Code
col1 = F.col(1)
col3 = F.col(3)
col1, col3
Dagger(col3), col1
Dagger(col3) * col1 # Another way to do the dot product
###Output
_____no_output_____
###Markdown
* So, these columns are all orthogonal, but they are not orthonormal* Note, though that the are all of length 2, so we can normalize each
###Code
Rational(1, 2) * F
###Output
_____no_output_____
###Markdown
* We also note the following$$ {F}_{n}^{H}{F}_{n}={I} $$* Just remember to normalize them
###Code
Dagger(Rational(1, 2) * F)
Dagger(Rational(1, 2) * F) * ((Rational(1, 2) * F))
###Output
_____no_output_____ |
Python-For-Data-Analysis/Chapter 7 Data Cleaning and Preparation/7.1 Handling Missing Data.ipynb | ###Markdown
7.1 Handling Missing Data All of the descriptive statistics on pandas objects exclude missing data by default For numeric data, pandas uses the floating-point value NaN (Not a Number) to represent missing data. We call this a sentinel value The built-in Python None value is also treated as NA in object arraysWhen cleaning up data foranalysis, it is often important to do analysis on the missing data itself to identify datacollection problems or potential biases in the data caused by missing data  Filtering Out Missing Data
###Code
import numpy as np
from numpy import nan as NA
import pandas as pd
data = pd.Series([1, NA, 3.5, NA, 7])
###Output
_____no_output_____
###Markdown
Series
###Code
data.dropna()
data[~data.isnull()] #boolean indexing
###Output
_____no_output_____
###Markdown
Dataframe
###Code
data = pd.DataFrame([[1., 6.5, 3.], [1., NA, NA],[NA, NA, NA], [NA, 6.5, 3.]])
data
###Output
_____no_output_____
###Markdown
dropna by default dropsany row containing a missing value
###Code
data.dropna()
###Output
_____no_output_____
###Markdown
Passing how='all' will only drop columns/rows that are all NADrop columns by passing axis=1
###Code
data.dropna(axis=1, how='all')
###Output
_____no_output_____
###Markdown
Time Series
###Code
df = pd.DataFrame(np.random.randn(7, 3))
df.iloc[:4, 1] = NA
df.iloc[1:4, 0] = NA
df
df.dropna()
###Output
_____no_output_____
###Markdown
Suppose you want to keep only rows containing a certain number of observations. You can indicate this with the thresh argument
###Code
df.dropna(thresh=2)
###Output
_____no_output_____
###Markdown
Filling In Missing Data Rather than filtering out missing data (and potentially discarding other data along with it), you may want to fill in the “holes” in any number of ways. For most pur‐poses, the fillna method is the workhorse function to use. Calling fillna with a constant replaces missing values with that value
###Code
df.fillna(0)
###Output
_____no_output_____
###Markdown
Calling fillna with a dict, you can use a different fill value for each column
###Code
df.fillna({0: 0.5, 1: 0})
###Output
_____no_output_____
###Markdown
fillna returns a new object, but you can modify the existing object in-place by passing inplace=True The same interpolation methods available for reindexing can be used with fillna
###Code
df
df.fillna(method='bfill',limit=2)
###Output
_____no_output_____ |
notebooks/01-oscars_processing.ipynb | ###Markdown
The OscarsDalton Hahn (2762306) Shakespearean Play Datahttps://www.kaggle.com/kingburrito666/shakespeare-plays/download Data Visualization and StorytellingI want to examine Shakespeare's playset and see if there are trends that I believe may be present in the data. Specifically, I will try to examine the following:1. What is the ratio/trend in "airtime" that Shakespeare gives to men vs. women2. Does Shakespeare become more verbose with his later plays than his earlier plays3. What is the proportion of "airtime" that Shakespeare grants to his main characters vs. his auxiliary characters4. Word cloud of entire datasets most used words
###Code
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
import numpy as np
import datetime as dt
import seaborn as sns
import matplotlib.pyplot as plt
import math
from statistics import mean, stdev
df = pd.read_csv("../data/external/Shakespeare_data.csv")
df.head()
# Remove NaN rows from the dataset (these represent stage directions/non-dialogue)
print("With NaNs = ", df.shape)
df = df.dropna()
print("Without NaNs = ", df.shape)
df = df.reset_index(drop=True)
df.head()
###Output
With NaNs = (111396, 6)
Without NaNs = (105152, 6)
###Markdown
Augmenting DataNeed to augment our data with some additional information in order to create the visualizations described previously in the notebook. Specifically, we need to add:- Year the play was written/initially performed- Gender to characters- Primary vs. auxiliary to characters Adding year to play
###Code
# Get the unique play set
print(df["Play"].unique())
# Create dictionary with year corresponding to play and add as new column
# NOTE: Years of plays were taken from Wikipedia articles regarding the history of the plays (All Henry VI Part X written 1591)
year_dict = {
"Henry IV": 1597, "Henry VI Part 1": 1591, "Henry VI Part 2": 1591,"Henry VI Part 3": 1591,
"Alls well that ends well": 1598,"As you like it": 1599,"Antony and Cleopatra": 1607,"A Comedy of Errors": 1594,
"Coriolanus": 1605,"Cymbeline": 1611,'Hamlet': 1599,'Henry V': 1599,'Henry VIII': 1613,'King John': 1595,
'Julius Caesar': 1599,'King Lear': 1605,'Loves Labours Lost': 1598,'macbeth': 1606,'Measure for measure': 1603,
'Merchant of Venice': 1596,'Merry Wives of Windsor': 1602,'A Midsummer nights dream': 1595,
'Much Ado about nothing': 1598,'Othello': 1603,'Pericles': 1607,'Richard II': 1595,'Richard III': 1593,
'Romeo and Juliet': 1591,'Taming of the Shrew': 1590,'The Tempest': 1610,'Timon of Athens': 1605,
'Titus Andronicus': 1588,'Troilus and Cressida': 1602,'Twelfth Night': 1601,'Two Gentlemen of Verona': 1589,
'A Winters Tale': 1610
}
df_years = df
df_years["Year"] = ""
for index,row in df_years.iterrows():
row = row.copy()
year = year_dict[row["Play"]]
df_years.loc[index, "Year"] = year
df_years.head()
df_years.to_csv("../data/processed/play_year.csv")
###Output
_____no_output_____
###Markdown
Adding gender to characters
###Code
# Create dictionary with gender of character corresponding to play they are in
# NOTE: Utilized work by Douglas Duhaime https://github.com/duhaime/mining_the_bard who previously
# wrote a script to populate an XML file with the gender information of 842 of Shakespeare's characters, will
# match these in our dataset where possible
gender = pd.read_csv("../data/external/shakespeare_gender.txt", sep='\t', lineterminator='\n',
names=["File", "Character", "Num_Lines", "Play_Type", "Year", "Play", "Gender"])
gender = gender.drop(columns=["File", "Num_Lines", "Play_Type", "Year"])
gender.head()
print(gender["Play"].unique())
# TODO:
# 1. Remove years from all titles "Macbeth (1606). . ."
# 2. Rename 1 Henry VI -> Henry VI Part 1, etc.
# 3. Make all titles and characters uppercase in both dataframes
# 4. Remove unnecessary apostrophes
# 5. Encode Female as '1' and Male as '0'
for index,row in gender.iterrows():
row = row.copy()
row["Play"] = row["Play"].split('(')[0][:-1].upper()
row["Play"] = row["Play"].replace("'", "")
if "1 HENRY VI" == row["Play"]:
row["Play"] = "HENRY VI PART 1"
elif "2 HENRY VI" == row["Play"]:
row["Play"] = "HENRY VI PART 2"
elif "3 HENRY VI" == row["Play"]:
row["Play"] = "HENRY VI PART 3"
elif "1 HENRY IV" == row["Play"] or "2 HENRY IV" == row["Play"]:
row["Play"] = "HENRY IV"
if row["Gender"] == "male":
row["Gender"] = 0
elif row["Gender"] == "female":
row["Gender"] = 1
gender.loc[index, "Play"] = row["Play"]
gender.loc[index, "Character"] = row["Character"].upper()
gender.loc[index, "Gender"] = row["Gender"]
gender.head()
df_gender = df
df_gender["Player"] = df_gender["Player"].str.upper()
merged = pd.merge(df_gender,gender, left_on='Player', right_on="Character")
merged = merged.drop_duplicates(subset=["Dataline"])
merged = merged.reset_index(drop=True)
merged = merged.drop(columns=["Character", "Play_y"])
merged.columns = ['Dataline', 'Play', 'PlayerLinenumber', 'ActSceneLine', 'Player',
'PlayerLine', 'Year', 'Gender']
merged.head()
print(df_gender.shape)
print(merged.shape)
print("Able to match: ", len(list(set(gender["Character"]).intersection(merged["Player"]))), " characters with gender")
merged.to_csv("../data/processed/genders.csv")
###Output
_____no_output_____
###Markdown
Adding role to characters
###Code
# Create dictionary with role (primary vs secondary) of character corresponding to play they are in
# NOTE: Utilized work by MARTIN GRANDJEAN http://www.martingrandjean.ch/network-visualization-shakespeare/
# who previously did network visualization work on Shakespeare's tradgedies to describe the "true" main character
# of the work. Will use their findings as a means to populate the character roles of the matching works in our
#original dataset.
# IF THE CHARACTER IS THE MAIN CHARACTER, THEN VALUE FOR MAIN COL WILL BE 1, ELSE 0
roles = {
"Titus and Andronicus": "Lavinia",
"Romeo and Juliet": "Romeo",
"Julius Caesar": "Brutus",
"Hamlet": "Hamlet",
"Troilus and Cressida": "Troilus",
"Othello": "Othello",
"King Lear": "King Lear",
"Macbeth": "Rosse",
"Timon of Athens": "Timon",
"Antony and Cleopatra": "Mark Antony",
"Coriolanus": "Coriolanus"
}
df_role = df
df_role["Play"] = df_role["Play"].str.upper()
roles = {k.upper(): v for k, v in roles.items()}
roles_df = pd.DataFrame(list(roles.items()), columns=["Play", "Main"])
mer_role = pd.merge(df_role,roles_df, left_on='Play', right_on="Play")
mer_role = mer_role.drop_duplicates(subset=["Dataline"])
mer_role = mer_role.reset_index(drop=True)
for index, row in mer_role.iterrows():
row = row.copy()
if row["Player"] == row["Main"].replace("'", "").upper():
main = 1
else:
main = 0
mer_role.loc[index, "Main"] = main
mer_role.head(10)
mer_role.to_csv("../data/processed/roles.csv")
###Output
_____no_output_____ |
Assignment3/HMM_ALPHABETA&EM.ipynb | ###Markdown
Heuristic approach
###Code
def pred_heur(roll_seq, range, th):
pred_points = np.zeros(len(roll_seq))
for i in np.arange(len(roll_seq)):
piece_of_seq = np.array(roll_seq[i:i+range])
if np.sum(piece_of_seq==6)/range > th/range:
pred_points[i:i+range] += np.sum(piece_of_seq==6)/range
mean_points = np.mean(pred_points)
return (pred_points > mean_points).astype(int)
roll_seq, label_seq = genseq(10000)
plot_seq(roll_seq, label_seq)
target_seq = pred_heur(roll_seq, 15, 6)
print("Accuracy: {}".format(np.sum(target_seq == label_seq)/len(label_seq)))
plot_seq(roll_seq, target_seq)
###Output
Accuracy: 0.8247
###Markdown
**Accuracy varies between 60%-82%** Alpha_Beta algorithm
###Code
def alphabeta(roll_seq, initial_prob, trans_prob, obs_prob, return_seq=True):
alpha_seq = np.zeros((2, len(roll_seq)), dtype=np.float128)
alpha_seq[: , 0] = initial_prob
for roll in np.arange(1, len(roll_seq)):
for j in np.arange(len(init_prob)):
alpha_seq[j, roll] = np.sum([alpha_seq[i, roll-1] * trans_prob[i, j] * obs_prob[j, roll_seq[roll-1]-1] * 6
for i in np.arange(len(init_prob))])
beta_seq = np.zeros((2, len(roll_seq)), dtype=np.float128)
beta_seq[:, len(roll_seq)-1] = [1, 1]
for roll in np.arange(len(roll_seq) - 2, -1, -1):
for j in np.arange(len(init_prob)):
beta_seq[j, roll] = np.sum([beta_seq[i, roll+1] * trans_prob[i, j] * obs_prob[j, roll_seq[roll]-1] * 6
for i in np.arange(len(init_prob))])
gamma_seq = (alpha_seq * beta_seq)/np.sum((alpha_seq*beta_seq))
if return_seq:
return np.argmax(gamma_seq, 0), np.argmax(alpha_seq, 0), np.argmax(beta_seq, 0)
else:
return gamma_seq, alpha_seq, beta_seq
trans_prob = np.array([[1-0.04, 0.04], [0.05, 1-0.05]])
die_1_prob = np.array([1/6, 1/6, 1/6, 1/6, 1/6, 1/6])
die_2_prob = np.array([1/10, 1/10, 1/10, 1/10, 1/10, 1/2])
die_prob = np.array([die_1_prob, die_2_prob])
init_prob = np.array([1, 0])
roll_seq, label_seq = genseq(10000)
plot_seq(roll_seq, label_seq)
target_seq = alphabeta(roll_seq, init_prob, trans_prob, die_prob)[0]
print("Accuracy: {}".format(np.sum(target_seq == label_seq)/len(label_seq)))
plot_seq(roll_seq, target_seq)
###Output
_____no_output_____
###Markdown
Predicting Parameters
###Code
def EM(seq, initial_prob, trans_prob, obs_prob, iters):
def KSI(alpha_seq, beta_seq, trans_prob, obs_prob):
ksi = np.zeros((len(seq) - 1, len(initial_prob), len(initial_prob)), dtype=np.float128)
for roll in range(len(seq) - 1):
for i in range(len(initial_prob)):
for j in range(len(initial_prob)):
ksi[roll, j, i] = alpha_seq[i, roll] * trans_prob[i, j] * obs_prob[i, (seq[roll+1] - 1)] * beta_seq[j, roll+1]
return ksi
for _ in tqdm(range(iters)):
gamma_seq, alpha_seq, beta_seq = alphabeta(roll_seq, initial_prob, trans_prob, obs_prob, return_seq=False)
ksi = KSI(alpha_seq, beta_seq, trans_prob, obs_prob)
### trans_prob_HAT
for i in range(len(initial_prob)):
for j in range(len(initial_prob)):
trans_prob[i, j] = np.sum(ksi[:, i, j])/np.sum(ksi[:, i, :])
### trans_obs_prob_hat
for i in range(len(initial_prob)):
for j in range(len(obs_prob[i])):
obs_prob[i, j] = np.sum(gamma_seq[i, (seq - 1) == j]) / np.sum(gamma_seq[i, :])
return trans_prob, obs_prob
prob_die_1 = np.ones(6)
prob_die_2 = np.ones(6)
die_prob = np.array([prob_die_1/np.sum(prob_die_1) , prob_die_2/np.sum(prob_die_2)])
trans_prob = np.array([[1 - 0.5, 0.5],[0.5, 1 - 0.5]])
init_prob = np.array([0.5, 0.5])
EM(roll_seq, init_prob, trans_prob, die_prob, 1000)
trans_1 = np.random.rand(1)[0]
trans_2 = np.random.rand(1)[0]
prob_die_1 = np.random.rand(6)
prob_die_2 = np.random.rand(6)
die_prob = np.array([prob_die_1/np.sum(prob_die_1) , prob_die_2/np.sum(prob_die_2)])
trans_prob = np.array([[1 - trans_1, trans_1],[trans_2, 1 - trans_2]])
init_prob = np.array([0.5, 0.5])
EM(roll_seq, init_prob, trans_prob, die_prob, 1000)
casino = []
f = open('test.txt', 'r')
for line in f:
for number in line:
if number != '\n':
casino.append(int(number))
casino = np.array(casino)
trans_1 = np.random.rand(1)[0]
trans_2 = np.random.rand(1)[0]
prob_die_1 = np.random.rand(6)
prob_die_2 = np.random.rand(6)
die_prob = np.array([prob_die_1/np.sum(prob_die_1) , prob_die_2/np.sum(prob_die_2)])
trans_prob = np.array([[1 - trans_1, trans_1],[trans_2, 1 - trans_2]])
init_prob = np.array([0.5, 0.5])
EM(casino, init_prob, trans_prob, die_prob, 1000)
###Output
100%|██████████| 1000/1000 [12:34<00:00, 1.33it/s]
|
awszarr/Fetch_SNOTEL_CSO.ipynb | ###Markdown
Fetch and export SNOTEL sites and daily time series data11/5, 6/23, 2/29/2020. https://github.com/emiliom/
###Code
from collections import OrderedDict
import numpy as np
import pandas as pd
import geopandas as gpd
from shapely import geometry as sgeom
import ulmo
import matplotlib.pyplot as plt
%matplotlib inline
##USER INPUTS## - most of this can be read directly from the .ctl file or the .par file
#-> need to read in text file
#start date
st = "2014-10-01"
#end date
ed = "2019-09-29"
#Snotel bounding box
Bbox = {
'latmax' : 44.582480,
'latmin' : 42.363116,
'lonmax': -109.477849,
'lonmin': -111.155208,}
# Snotel projection
stn_proj = 'epsg:4326' #WGS84
#model projection
mod_proj = 'epsg:32612' #UTM zone 12N
###Output
_____no_output_____
###Markdown
Use ulmo to get SNOTEL station metadata within bounding box Get SNOTEL metadata over modeling domain
###Code
#function to extract data
def sites_asgdf(ulmo_getsites, crs=stn_proj):
""" Convert ulmo.cuahsi.wof.get_sites response into a point GeoDataframe
"""
# Note: Found one SNOTEL site that was missing the location key
sites_df = pd.DataFrame.from_records([
OrderedDict(code=s['code'],
longitude=float(s['location']['longitude']),
latitude=float(s['location']['latitude']),
name=s['name'],
elevation_m=s['elevation_m'])
for _,s in ulmo_getsites.items()
if 'location' in s
])
sites_gdf = gpd.GeoDataFrame(
sites_df,
geometry=gpd.points_from_xy(sites_df['longitude'], sites_df['latitude']),
crs=crs
)
return sites_gdf
# Convert the bounding box dictionary to a shapely Polygon geometry using sgeom.box
box_sgeom = sgeom.box(Bbox['lonmin'], Bbox['latmin'], Bbox['lonmax'], Bbox['latmax'])
box_gdf = gpd.GeoDataFrame(geometry=[box_sgeom], crs=stn_proj)
# WaterML/WOF WSDL endpoint url
wsdlurl = "http://hydroportal.cuahsi.org/Snotel/cuahsi_1_1.asmx?WSDL"
# get dictionary of snotel sites
sites = ulmo.cuahsi.wof.get_sites(wsdlurl)
#turn sites as geodataframe
snotel_gdf = sites_asgdf(sites)
CSO_gdf = gpd.sjoin(snotel_gdf, box_gdf, how="inner")
CSO_gdf.drop(columns='index_right', inplace=True)
CSO_gdf.reset_index(drop=True, inplace=True)
#CSO_gdf.index = CSO_gdf['code']
#add columns with projected coordinates
CSO_proj = CSO_gdf.to_crs(mod_proj)
CSO_gdf['easting'] = CSO_proj.geometry.x
CSO_gdf['northing'] = CSO_proj.geometry.y
CSO_gdf.head()
len(CSO_gdf)
CSO_gdf.plot();
###Output
_____no_output_____
###Markdown
Get SNOTEL SWE data in domain over time period of interest
###Code
#Emilio code
def fetch(sitecode, variablecode, start_date, end_date):
print(sitecode, variablecode, start_date, end_date)
values_df = None
try:
#Request data from the server
site_values = ulmo.cuahsi.wof.get_values(
wsdlurl, 'SNOTEL:'+sitecode, variablecode, start=start_date, end=end_date
)
#Convert to a Pandas DataFrame
values_df = pd.DataFrame.from_dict(site_values['values'])
#Parse the datetime values to Pandas Timestamp objects
values_df['datetime'] = pd.to_datetime(values_df['datetime'])
#Set the DataFrame index to the Timestamps
values_df.set_index('datetime', inplace=True)
#Convert values to float and replace -9999 nodata values with NaN
values_df['value'] = pd.to_numeric(values_df['value']).replace(-9999, np.nan)
#Remove any records flagged with lower quality
values_df = values_df[values_df['quality_control_level_code'] == '1']
except:
print("Unable to fetch %s" % variablecode)
return values_df
stn_swe = pd.DataFrame(index=pd.date_range(start=st, end=ed))
for sitecode in CSO_gdf.code:
try:
swe = fetch(sitecode, variablecode='SNOTEL:WTEQ_D', start_date=st, end_date=ed)
stn_swe[sitecode] = swe.value
except:
print(sitecode, 'has no data')
stn_swe[sitecode] = np.nan
stn_swe.head()
#convert SNOTEL units[in] to SnowModel units [m]
for sitecode in CSO_gdf.code:
# overwrite the original values (no use for the original values in inches)
stn_swe[sitecode] = 0.0254 * stn_swe[sitecode]
stn_swe.head()
stn_swe.plot(y='314_WY_SNTL', title='Daily SWE [m]');
###Output
_____no_output_____
###Markdown
Export the sites and daily time series to files, for reuse- CSO_gdf to GeoJSON- stn_swe to csv or parquet **Note:** Sometimes the statement below produces this warning:```CSO_SNOTEL_sites.geojson: No such file or directory driver GeoJSON does not support creation option ENCODING```Ignore it. The GeoJSON file is still written out without issues.
###Code
CSO_gdf.to_file('CSO_SNOTEL_sites.geojson', driver='GeoJSON')
len(stn_swe)
# The file that's written is tiny, only 0.5 MB
stn_swe.to_csv('CSO_SNOTEL_data_SWEDmeters.csv')
###Output
_____no_output_____ |
nbs/auto_grad.ipynb | ###Markdown
###Code
import torch
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import math
a = torch.linspace(0., 2. * math.pi, steps=25, requires_grad=True)
b = torch.sin(a)
a.shape
b.shape
plt.plot(a.detach(), b.detach())
c = 2*b
d = c+1
out = d.sum()
out.backward()
a.grad
plt.plot(a.detach(), a.grad.detach())
## Basics
a = torch.tensor(4.0,requires_grad=True)
a
out = a**2+ 10.0
out
out.backward()
a
a.grad.numpy()
def myfunc(a):
#a**2
out = a**2
scal_out = out.sum()
scal_out.backward()
return out
a = torch.linspace(0,10,100,requires_grad=True)
a
out = myfunc(a)
out
a.grad.numpy()
import numpy as np
2*np.linspace(0,10,100) -
###Output
_____no_output_____ |
gbif/gbif_name_match/gbif_species_name_extraction.ipynb | ###Markdown
GBIF API request for species naming explanation
###Code
import json
import requests
import pandas as pd
###Output
_____no_output_____
###Markdown
Introduction: example request for info The requirements are discussed in the following comment:https://github.com/LifeWatchINBO/invasive-t0-occurrences/issues/6 and an example is included. Let us test the example by making a request:
###Code
temp = requests.get("http://api.gbif.org/v1/species/match?verbose=false&kingdom=Plantae&name=Heracleum%20mantegazziaum&strict=false")
temp.json()
###Output
_____no_output_____
###Markdown
Development of the building blocks of our small application Two important functions are available to extract the information:* A function that can do a request with the given species/kingdom combination: `extract_gbif_species_names_info`* A function that can iteratively do this for every specie in the provided list on https://github.com/LifeWatchINBO/invasive-t0-occurrences/blob/master/species-list/species-list.tsv `extract_species_information` Importing them makes it available:
###Code
from gbif_species_name_match import (extract_gbif_species_names_info,
extract_species_information,
extract_gbif_accepted_key)
###Output
_____no_output_____
###Markdown
Extract info from GBIF about provided species `extract_gbif_species_names_info` provides the funcionality to request species info from the GBIF API. It can be used as python function or from the command line: Python function:
###Code
!cat opiliones.csv
updated_tsv = extract_species_information("opiliones.csv",
output=None,
api_terms=["usageKey",
"scientificName",
"canonicalName",
"status",
"rank",
"matchType",
"confidence"],
)
updated_tsv.head()
###Output
_____no_output_____
###Markdown
The available options are:* output : if None, nothing is written to output, pandas DataFrame is returnes; if string, the output is written to tsv file* api_terms : Either a list of existing terms or just provide 'all' if interested in all the information
###Code
extract_species_information("opiliones.csv", output=None, api_terms="all")
###Output
Using only scientificName as name column for API request.
###Markdown
Command line When working in the command line, the function will take the first argument as input file and the last argument as file to write it to, both combined with the relative path:```bashpython gbif_species_name_extraction.py sample.csv sample_dump.csv```The terms added to the tsv file are the default list as follows:```python["usageKey", "scientificName", "canonicalName", "status", "rank", "matchType", "confidence"]``` Extract info of individual species The `extract_gbif_species_names_info` function is useful for a request of a single species/kingdom combination:
###Code
extract_gbif_species_names_info("Dinebra panicea (Retz.) P.M. Peterson & N. Snow var. brachiata (Steud.) P.M. Peterson & N. Snow", kingdom="Plantae")
###Output
_____no_output_____
###Markdown
The `extract_gbif_accepted_key` fucntion is useful to get the acceptedKey corresponding to any usage key of GBIF:
###Code
extract_gbif_accepted_key(3025758)
###Output
_____no_output_____ |
Project/Code/getFeaturesFromReview.ipynb | ###Markdown
import statements
###Code
import spacy
lang='en'
nlp = spacy.load('en')
###Output
_____no_output_____
###Markdown
restaurant review class
###Code
class RestaurantReviewFeatures(object):
"""Stores reviews data and rating data for one restaurant
Attributes:
id (str): id of the restaurant, this will be used as Primary Key
index (int): index of last added review
rating (dict): rating of this restaurant
reviews (dict): reviews of this restaurant
"""
def __init__(self, key):
"""Initialize class
"""
self.id = key
self.index = 0
self.rating = {}
self.reviews = {}
def __getitem__(self, idx):
"""returns features of a specific review
Args:
idx (int): index of the review
Returns:
dict: The review dictionary at index=idx.
"""
return self.reviews[idx]
def __len_(self):
"""returns total reviews for this restaurant
Returns:
int: total reviews for this restaurant
"""
return self.index
def addReview(self, rating,unigrams=[], bigrams1=[], bigrams2=[], trigrams=[], dataset="", nouns=[], raw=[]):
"""Adds a review
Args:
rating (float): rating of the review
'nouns' (list): list of noun_chunks in review
raw (list): list of tokens in review (raw features)
unigrams (list): list of unigrams in review
bigrams1 (list): list of bigrams 'ADJ + next word'
"bigrams2" (list): list of bigrams 'prev word + ADJ'
"trigrams" (list): list of trigrams
"rating" (list): rating of that review
"dataset" (str): original dataset
"""
if len(unigrams)>0 or len(bigrams1)>0 or len(trigrams) or len(raw)>0:
self.reviews[self.index] = {'nouns':nouns, 'raw':raw, 'unigrams':unigrams, 'bigrams':bigrams1, 'bigrams2':bigrams2, 'trigrams':trigrams, 'rating':rating,'dataset':dataset}
self.index += 1
def addRating(self, rating, dataset):
"""Adds rating for the restaurant
Args:
rating (float): rating value
dataset (string): original dataset
"""
if rating > 0:
self.rating[dataset] = rating
def getRating(self, dataset="avg"):
"""returns rating for the restaurant
Args:
dataset (string): original dataset name.
possible values for dataset = ["avg","google","yelp"]
Returns:
float: rating value
"""
if dataset == "avg":
return float(sum(self.rating.values()))/len(self.rating)
else:
return self.rating[dataset]
###Output
_____no_output_____
###Markdown
review cleanup
###Code
def cleanup_text(doc):
"""Initial cleanup of the review
Args:
doc (str): review string
Note:
removes all stopwords and punctuations from the string
Returns:
str: cleaned string
"""
doc = nlp(doc, disable=['parser', 'ner'])
tokens = [tok.lemma_.lower().strip() for tok in doc if (tok.lemma_ != '-PRON-' and tok.is_stop==False and tok.is_punct==False)]
tokens = ' '.join(tokens)
return tokens
###Output
_____no_output_____
###Markdown
process one restaurant
###Code
def processOneRestaurant(restaurant, log=True):
"""Process reviews of one restaurnat
Note:
returns RestaurantReviewFeatures object containing all reivews and rating information
Args:
restaurant (dict): dictionary containing restaurant info
Returns:
Object: RestaurantReviewFeatures class object
"""
if "google" in restaurant:
restaurant_id = restaurant["google"]["place_id"]
else:
restaurant_id = restaurant["yelp"]["id"]
r = RestaurantReviewFeatures(restaurant_id)
for dataset in restaurant:
#print("DataSet:" + dataset)
if "rating" in restaurant[dataset]:
r.addRating(restaurant[dataset]["rating"], dataset.replace("ui",""))
if "reviews" not in restaurant[dataset].keys():
if log:
print("{} : NO reviews found for {} dataset".format(restaurant_id,dataset))
else:
##print(len(restaurant[dataset]["reviews"]))
for review in restaurant[dataset]["reviews"]:
review_text = review["text"].strip()
review_text = cleanup_text(review_text)
#print(review_text)
doc = nlp(review_text, disable=['ner'])
uni_feature_list = []
bi_feature_list = []
bi_feature_list2 = []
tri_feature_list = []
raw_feature_list = review_text.split(' ')
for i in range(len(doc)):
token = doc[i]
if token.tag_.startswith('J'):
uni_feature_list.append(token.lemma_)
if i<len(doc)-1:
#print(token.text,":" ,doc[i-1].lemma_, token.lemma_, doc[i+1].lemma_)
bi_feature_list.append(token.lemma_+" "+doc[i+1].lemma_)
if i>0:
tri_feature_list.append(doc[i-1].lemma_+" "+token.lemma_+" "+doc[i+1].lemma_)
if i>0:
bi_feature_list2.append(doc[i-1].lemma_+" "+token.lemma_)
review_stars = None
if "stars" in review:
review_stars = review["stars"]
if "rating" in review:
review_stars = review["rating"]
if review_stars != None:
r.addReview(review_stars, unigrams=uni_feature_list, bigrams1=bi_feature_list,
bigrams2= bi_feature_list2, trigrams=tri_feature_list, raw=raw_feature_list,
nouns=doc.noun_chunks, dataset=dataset)
return r
###Output
_____no_output_____
###Markdown
Sample code to process one restaurant from combined dataset
###Code
import json
import time
t1 = time.time()
review_dict = {}
noun_chunks = set()
with open("C:\\Users\\utkar\\Downloads\\large_data.json","r") as f:
## use parallel processing (Pool.map()) here to expedite processing
while(True):
line = f.readline()
if not line:
break
restaurant = json.loads(line)
r = processOneRestaurant(restaurant)
#assert(r.id not in review_dict)
#review_dict[r.id] = r
for k in r.reviews.keys():
nouns = r.reviews[k]["nouns"]
for n in list(nouns):
noun_chunks.add(n.text)
del r
print("Number of restaurants: " + len(review_dict))
#f.close()
t2 = time.time()
print("{}s to process {} restaurants".format(t2-t1, len(review_dict)))
f =open("C:\\Users\\utkar\\Downloads\\nouns.txt","w")
data = str(list(noun_chunks)).replace(",","\n").replace("'")
f.write()
f.close()
print(review_dict.keys())
review_dict['ChIJQapGk_y9woARKIZHrNZEiwE'].rating
ambience = []
service = []
food = []
###Output
_____no_output_____ |
first_form.ipynb | ###Markdown
Lesson1 基本問題 ex1 17より小さい数字を渡すと17との差を返すが、17より大きい数字を渡すと差の2乗を返す関数を作成せよ
###Code
def func1(n):
###Output
_____no_output_____
###Markdown
テストコード
###Code
assert func1(0) == 17
assert func1(15) == 2
assert func1(17) == 0
assert func1(19) == 4
assert func1(100) == 6889
###Output
_____no_output_____
###Markdown
ex2 1000か2000との差が100以下の数であることを判定する関数を作成せよ
###Code
def func2(n):
###Output
_____no_output_____
###Markdown
テストコード
###Code
assert func2(899) == False
assert func2(900) == True
assert func2(1100) == True
assert func2(1101) == False
assert func2(1899) == False
assert func2(1900) == True
assert func2(2100) == True
assert func2(2101) == False
###Output
_____no_output_____
###Markdown
ex3 アルファベットを1文字渡すとそのアルファベットが子音か母音かを判定する関数を作成せよ
###Code
def func3(s):
###Output
_____no_output_____
###Markdown
テストコード
###Code
assert func3("a") == "vowel"
assert func3("b") == "consonant"
assert func3("c") == "consonant"
assert func3("d") == "consonant"
assert func3("e") == "vowel"
###Output
_____no_output_____
###Markdown
ex4 文字列と数字を渡すと、文字列の先頭2文字を数字の回数だけ繰り返した文字列を返す関数を作成せよただし、渡した文字列が1文字の場合、その文字を数字の回数だけ繰り返した文字列を返すこと
###Code
def func4(s, n):
###Output
_____no_output_____
###Markdown
テストコード
###Code
assert func4("ab", 8) == "abababababababab"
assert func4("abc", 10) == "abababababababababab"
assert func4("abcdefg", 20) == "abababababababababababababababababababab"
assert func4("a", 2) == "aa"
###Output
_____no_output_____
###Markdown
ex5 数字3個をリストで渡すと合計を返すが、3個の数字がすべて同じであれば合計の3倍を返す関数を作成せよ
###Code
def func5(a):
###Output
_____no_output_____
###Markdown
テストコード
###Code
assert func5([1, 2, 3]) == 6
assert func5([2, 2, 2]) == 18
assert func5([2, 4, 6]) == 12
assert func5([5, 5, 5]) == 45
###Output
_____no_output_____
###Markdown
Lesson2 応用問題 ex6 BMI計算 BMIを計算する関数を作成せよ>$BMI = 体重[kg] ÷ (身長[m])^2$ただし、小数点以下2桁まで(未満は切り捨て)
###Code
def func6(h, w):
###Output
_____no_output_____
###Markdown
テストコード
###Code
assert func6(1.60, 60) == 23.43
assert func6(1.65, 65) == 23.87
assert func6(1.70, 70) == 24.22
assert func6(1.75, 75) == 24.48
###Output
_____no_output_____
###Markdown
ex7 BMI判定 BMIをから肥満度や健康状態を確認する関数を作成せよなお、BMIの判定基準は次のようにする* 18.5未満は「痩せすぎ」* 18.5以上25未満が「標準」* 25以上が「肥満」* 22 $±$ 10%の人は「健康」* 23以上24未満の人は「長生き」ちなみに、BMIは次のようなことが知られている* 健康診断の結果に異常がない人が最も多いのは22前後* 最も長生きするのは23〜24の人
###Code
def func7(h, w):
###Output
_____no_output_____
###Markdown
テストコード
###Code
assert func7(1.70, 50) == "あなたは「痩せすぎ」です。"
assert func7(1.70, 60) == "あなたは「標準」です。"
assert func7(1.70, 64) == "あなたは「健康」です。"
assert func7(1.70, 68) == "あなたは「長生き」です。"
assert func7(1.70, 80) == "あなたは「肥満」です。"
###Output
_____no_output_____
###Markdown
ex8 温度変換 華氏と摂氏を相互に変換する関数を作成せよただし、華氏は「F」を、摂氏は「C」を、それぞれ数字の後に付けた文字列で表すこととするなお、華氏と摂氏の変換式は次のとおり>$(F - 32) × 5 ÷ 9 = C$変換後の数値は小数点1桁まで(未満は四捨五入)※もし可能なら、変換できない不明な入力には「無効」を返すこと
###Code
def func8(s):
assert func8("32F") == "0.0C"
assert func8("4C") == "39.2F"
assert func8("211f") == "99.4C"
assert func8("-4c") == "24.8F"
assert func8("5X") == "無効"
assert func8("!5F") == "無効"
###Output
_____no_output_____
###Markdown
lesson3 RSA暗号① 〜準備〜 ex9 桁の大きい指数関数 $n^{n^n}$ の1の桁を計算する関数を作成せよ
###Code
def func9(n):
###Output
_____no_output_____
###Markdown
テストコード※処理が止められなくなった場合、右上のメニューから一旦セッションを切断すると止まる
###Code
assert func9(3) == 7
assert func9(7) == 3
assert func9(13) == 3
assert func9(432) == 6
###Output
_____no_output_____
###Markdown
ex10 16進ダンプ 文字列を16進ダンプする関数せよただし、文字符号化形式は「UTF-8」とする
###Code
def func10(s):
###Output
_____no_output_____
###Markdown
テストコード
###Code
assert func10("Caeleb Remel Dressel") == "4361656C65622052656D656C204472657373656C"
assert func10("Lamont Marcell Jacobs") == "4C616D6F6E74204D617263656C6C204A61636F6273"
assert func10("西矢椛") == "E8A5BFE79FA2E6A49B"
assert func10("堀米雄斗") == "E5A080E7B1B3E99B84E69697"
###Output
_____no_output_____
###Markdown
ex11 16進→10進変換 16進数を10進数に変換する関数
###Code
def func11(s):
###Output
_____no_output_____
###Markdown
テストコード
###Code
assert func11("4361656C65622052656D656C204472657373656C") == 384674389155590982046234764697110782161450132844
assert func11("4C616D6F6E74204D617263656C6C204A61636F6273") == 111630337030194331012420294446637731487233908630131
assert func11("E8A5BFE79FA2E6A49B") == 4291588144510244660379
assert func11("E5A080E7B1B3E99B84E69697") == 71066104117138884767124985495
###Output
_____no_output_____
###Markdown
ex12 10進→文字列変換 10進数を文字列に変換する関数ただし、文字符号化形式は「UTF-8」とする
###Code
def func12(n):
###Output
_____no_output_____
###Markdown
テストコード
###Code
assert func12(384674389155590982046234764697110782161450132844) == "Caeleb Remel Dressel"
assert func12(111630337030194331012420294446637731487233908630131) =="Lamont Marcell Jacobs"
assert func12(4291588144510244660379) == "西矢椛"
assert func12(71066104117138884767124985495) == "堀米雄斗"
###Output
_____no_output_____
###Markdown
Lesson4 RSA暗号② 〜暗号化と復号〜 ex13 RSA暗号(暗号化) 次のパラメータを使って数値をRSAで暗号化する関数を作成せよp = 1223608816783989526977697142729q = 1181419059721907087891844374533N = 1445594777792376190871161299886642462542562458520846233720557e = 65537※$N = p × q$ただし、暗号文Cと平文Mの関係は次のとおり>$C = M^e ~ mod ~ N$
###Code
p = 1223608816783989526977697142729
q = 1181419059721907087891844374533
N = 1445594777792376190871161299886642462542562458520846233720557
e = 65537
def func13(m):
###Output
_____no_output_____
###Markdown
テストコード
###Code
assert func13(384674389155590982046234764697110782161450132844) == 933724396887774009673956822287392949344951453324424927225843
assert func13(111630337030194331012420294446637731487233908630131) == 96166222195780378773068570820947104808959039543354452361988
assert func13(4291588144510244660379) == 259428661535051310743580900142038188754161976801949527257666
assert func13(71066104117138884767124985495) == 251132345489958486648793004527006641296516405032422636490685
###Output
_____no_output_____
###Markdown
作った暗号文を復号する関数を作りたいが、そのためには復号鍵$d$を計算する必要があるRSAの秘密鍵$d$は公開鍵$e$と次の関係を満たす>$e×d ≡ 1 ~ mod ~ \phi \\ \phi = (p-1)×(q-1)$「$≡$」 や 「$mod ~ \phi$」 は剰余計算の合同式(左辺と右辺の$\phi$で割った余りが等しいこと)を表す※面倒なので、以降は「$≡$」を「$=$」と書く ex14 モジュラ逆数の計算(総当たり) 2つの素数$p,q$を与えた時、次の関係式を満たす$d$を**総当たりで**探す関数を作成せよ>$e×d ≡ 1 ~ mod ~ \phi \\\phi = (p-1)×(q-1)$ただし、$e = 65537, \quad d < (p-1)×(q-1)$
###Code
e = 65537
def func14(p, q):
###Output
_____no_output_____
###Markdown
テストコード
###Code
assert func14(659, 643) == 232937
assert func14(821, 769) == 197633
assert func14(691, 941) == 274673
###Output
_____no_output_____
###Markdown
テストコード2※終了まで少し時間がかかります(30秒くらい)
###Code
assert func14(17519, 24611) == 108290953
###Output
_____no_output_____
###Markdown
テストコード3※終了までさらに時間がかかります(2〜3分)
###Code
assert func14(36373, 64901) == 841967873
###Output
_____no_output_____
###Markdown
以上の結果から、$p,q$が大きくなると総当たりで$d$を計算するのはキツいことが分かる そこで、別の方法を考える---2つの整数 $(a, b)$ に対する不定方程式>$ax+by = gcd(a,b)$の解$(x,y)$は高速に計算できる---ことが知られている※$gcd(a,b)$は$a$と$b$の最大公約数 例えば>$(a,b) = (e, \phi) \\ gcd(e, \phi) = 1$とすると>$ex+\phi y = 1$両辺で$\phi$の剰余を計算すると>$ex = 1 ~ mod ~ \phi$となり、$d$ $(=x)$ が求まる ex15 ユークリッドの互除法 $gcd(e, \phi)=1$ を確認するため、2つの素数 $(p,q)$ から$gcd(e, \phi)$を計算する関数を作成せよただし、>$e = 65537 \\ \phi=(p-1)\times(q-1)$※「ユークリッドの互除法」を実装するのが望ましいが、mathモジュールのmath.gcd(a, b)関数を使っても良い
###Code
e = 65537
def func15(p, q):
###Output
_____no_output_____
###Markdown
テストコード
###Code
assert func15(36373, 64901) == 1
assert func15(915349060633981, 879702975012439) == 1
assert func15(1223608816783989526977697142729, 1181419059721907087891844374533) == 1
###Output
_____no_output_____
###Markdown
※もしも$gcd(e, (p-1)×(q-1)) ≠ 1$だったら、$d$が決まらず復号できない ex16 拡張ユークリッドの互除法 2つの素数(a, b)から次の関係式を満たす$x$を計算する関数を作成せよ>$ax = 1 ~ mod ~ b$※不定方程式の解は「拡張ユークリッドの互除法」で実装するのが望ましいが、sympyモジュールのgcdex関数を使っても良い※$x$は負の値の場合もあるので、最後に$b$の剰余を計算しておくと良い
###Code
def func16(a, b):
###Output
_____no_output_____
###Markdown
テストコード
###Code
assert func16(73571, 1072894853) == 192905535
assert func16(64901, 1207471893509091040314362866849) == 920734295894846096271513596362
assert func16(81119, 1375780239273875130104284840151075796433493450132783529632293) == 237152024627603840582948691673128278967067381417506528616580
###Output
_____no_output_____
###Markdown
今回は大きな数字でも $d$ が高速に計算できている ex17 RSA暗号(復号) 以上のことを踏まえて、復号関数を作成せよただし、暗号文Cを平文Mに復号する関数は次のとおり>$M = C^d ~ mod ~ N$ また、秘密鍵 $d$ はex16の結果を利用すると計算できて
###Code
p = 1223608816783989526977697142729
q = 1181419059721907087891844374533
e = 65537
func16(e, (p-1)*(q-1))
###Output
_____no_output_____
###Markdown
なので
###Code
N = 1445594777792376190871161299886642462542562458520846233720557
d = 904012334605531925578889401627410272401142288068641359129345
def func17(c):
###Output
_____no_output_____
###Markdown
テストコード
###Code
assert func17(933724396887774009673956822287392949344951453324424927225843) == 384674389155590982046234764697110782161450132844
assert func17(96166222195780378773068570820947104808959039543354452361988) == 111630337030194331012420294446637731487233908630131
assert func17(259428661535051310743580900142038188754161976801949527257666) == 4291588144510244660379
assert func17(251132345489958486648793004527006641296516405032422636490685) == 71066104117138884767124985495
###Output
_____no_output_____
###Markdown
Lesson5 RSA暗号③ 〜練習問題〜 ex18 練習問題1 ある文字列を次のように暗号化した```print(func13(func11(func10("★★★★★★"))))print(func13(func11(func10("■■■■■■"))))print(func13(func11(func10("▶︎▶︎▶︎▶︎▶︎▶︎"))))```それぞれ結果は次のとおり```1003877171348949160374299553769977330726090328065100116050778225647961907964819754026097296433612017266008485331217477479159418251333432300444122281769765489025932051607611723427661```元の文字列を復号せよ
###Code
###Output
_____no_output_____
###Markdown
ex19 練習問題2 次のパラーメータでRSA暗号化した暗号文を復号する関数を作成せよ ```p = 99280930794300920071382774999216354524467420720528123759974588765034074663876045012987508905100128519759656290036198160476599881414260590989088460633320720729553296458254250825593914337729366809693487561815522430135404840009054317649002737463174582770508303823068955868689982831714452911982071223716649802469q = 100219261295560742240797249262241836429593803690430204413099950961916296187735281900162200626563247056460037491059010759261716848232442550563715171608821366962038603101580563641890422729098242769815241344092373705491169032776678296707049935771071818115217897562283195638162708850284681971687415194614231563837e = 65537```
###Code
p = 99280930794300920071382774999216354524467420720528123759974588765034074663876045012987508905100128519759656290036198160476599881414260590989088460633320720729553296458254250825593914337729366809693487561815522430135404840009054317649002737463174582770508303823068955868689982831714452911982071223716649802469
q = 100219261295560742240797249262241836429593803690430204413099950961916296187735281900162200626563247056460037491059010759261716848232442550563715171608821366962038603101580563641890422729098242769815241344092373705491169032776678296707049935771071818115217897562283195638162708850284681971687415194614231563837
e = 65537
###Output
_____no_output_____
###Markdown
暗号文```c1 = 5993282892294268012337210646422460224942003682339814479302884022181417999114855972670712186545118220125662871519229929846646836121084304816649015386737099513475747449874780402119038224661422110585167929611841407240680162076463742265931238648499543704282525494309999633856049015070714887185020390388127522863038787024024971681295320738326637792964656596726338299837333044702246888328862024915305745845222013436536335920158802049683304981193846955698062770587725601229602815288132137856123708800724991975252889579775780660852492871832071628983023169299866211503370369088572285358877954624525621213978189430281298032646c2 = 2336409511515301298547613713805669698248801677012087786057830501685650092538851986158769315867383717531880893251879509173724747927988791995516999608829600380697658493739580397309777848549361780394898290309951442037981802516497561444266938575272668862506762747187662132887820853363257749315976620660964024077325598852336723615518659537997457586921076734858772380546261905233601169670405081331396013237534887055495226043808789346604646225739916037582803663028904004003447242618912882161431435149901381963797730750982328072738593117026750663183875159808706807269624132799819971700339717155999455769309288344415057241725c3 = 7600801810617642582567941203777532865788859636265801290463357246340886777897267579251637555777501840347042749808043518617765984847892272013790181195503979654412547622527341949081816441318181628937923304654746983239808392198887768652839410205513907360928586204587597343609040675266749862522460124733481516513231755377734833191373671270070488620526463738692447142845303160936163560592238301973610095029089056932271401012158701771413934357230110456848626470814363473199265058838669921844384882825396553078297447253066680649265731945526822596942367105937910408865091393058285705680803783025716020959286434428891496923594```ただし、平文の文字符号化形式は「UTF-8」とする
###Code
def func19(c):
###Output
_____no_output_____
###Markdown
テストコード
###Code
print(func19(5993282892294268012337210646422460224942003682339814479302884022181417999114855972670712186545118220125662871519229929846646836121084304816649015386737099513475747449874780402119038224661422110585167929611841407240680162076463742265931238648499543704282525494309999633856049015070714887185020390388127522863038787024024971681295320738326637792964656596726338299837333044702246888328862024915305745845222013436536335920158802049683304981193846955698062770587725601229602815288132137856123708800724991975252889579775780660852492871832071628983023169299866211503370369088572285358877954624525621213978189430281298032646))
print(func19(2336409511515301298547613713805669698248801677012087786057830501685650092538851986158769315867383717531880893251879509173724747927988791995516999608829600380697658493739580397309777848549361780394898290309951442037981802516497561444266938575272668862506762747187662132887820853363257749315976620660964024077325598852336723615518659537997457586921076734858772380546261905233601169670405081331396013237534887055495226043808789346604646225739916037582803663028904004003447242618912882161431435149901381963797730750982328072738593117026750663183875159808706807269624132799819971700339717155999455769309288344415057241725))
print(func19(7600801810617642582567941203777532865788859636265801290463357246340886777897267579251637555777501840347042749808043518617765984847892272013790181195503979654412547622527341949081816441318181628937923304654746983239808392198887768652839410205513907360928586204587597343609040675266749862522460124733481516513231755377734833191373671270070488620526463738692447142845303160936163560592238301973610095029089056932271401012158701771413934357230110456848626470814363473199265058838669921844384882825396553078297447253066680649265731945526822596942367105937910408865091393058285705680803783025716020959286434428891496923594))
###Output
_____no_output_____ |
JUPYTER_NOTEBOOK_NOTES/5. Lists in python.ipynb | ###Markdown
Lists In python
###Code
my_list = [1,2,3,4,5]
my_list2 = ['STRING', 100, 23.2569]
#Length of the list
len(my_list)
#indexing
my_list[0]
#slicing
my_list[1:]
#concatinate
my_list + my_list2
# they didn't get stored until assigning
#make Changes
my_list[0] = 'ONE ALL CAPS'
my_list
# Add an element to the very end of the list
my_list.append('six')
my_list
# Removing Items from list
my_list.pop() #done poping several times
my_list
my_list.pop(0)
my_list
my_list = [1,2,3,4,5]
popped_item = my_list.pop(2)
popped_item
# Sort and reverse lists
new_list_1 = ['a', 'e', 'f', 'z', 'g']
new_no_list_2 = [4, 1, 8, 3]
new_list_1.sort()
new_list_1
# Which only gonna sort the list but returns nothing
my_sorted_list = new_list_1.sort()
my_sorted_list
type(my_sorted_list)
new_list_1.sort()
my_sorted_list = new_list_1
my_sorted_list
new_no_list_2.sort()
new_no_list_2
new_no_list_2.reverse()
new_no_list_2
#Duplicate
list_23 = new_no_list_2 * 2
list_23
# Nesting Lists
list_1 = [1, 2, 3]
list_2 = [4, 5, 6]
list_3 = [7, 8, 9]
#to nest
matrix = [list_1, list_2, list_3]
matrix[0]
matrix[0][0]
# List comprehension: - uses for loop
first_column = [row[0] for row in matrix] # 0th element of every row in matrix
first_column
###Output
_____no_output_____ |
_notebooks/tools.ipynb | ###Markdown
”create a new blog!“- hide: true
###Code
!pip uninstall translation
from googletrans import Translator
from datetime import datetime
from functools import lru_cache
title='Pytorch一小时入门'
description='抄写一遍Pytorch入门代码'
toc='true'
tags=','.join(['jupyter','pytorch'])
branch='master'
badges='true'
comments='true'
use_math='true'
date=datetime.today().strftime('%Y-%m-%d')
@lru_cache(maxsize=10)
def translate(text):
return '-'.join(Translator().translate(text).text.split())
if len(title) < 1:
print('title not set!')
exit()
format_title = translate(title)
print(format_title)
import nbformat as nbf
nb = nbf.v4.new_notebook()
text = """
# {title}
- toc: {toc}
- branch: {branch}
- badges: {badges}
- use_math: {use_math}
- comments: {comments}
- categories: [{tags}]
- description: {description}
---
""".format(title=title, toc=toc, branch=branch, badges=badges, use_math=use_math, comments=comments, tags=tags, description=description)
print(text)
nb['cells'] = [nbf.v4.new_markdown_cell(text)]
nbf.write(nb, '{}-{}.ipynb'.format(date,format_title))
###Output
Getting-Pytorch-one-hour
# Pytorch一小时入门
- toc: true
- branch: master
- badges: true
- comments: true
- categories: [jupyter,pytorch]
- description: 抄写一遍Pytorch入门代码
---
|
qiskit-textbook/content/ch-prerequisites/python-and-jupyter-notebooks.ipynb | ###Markdown
Introduction to Python and Jupyter notebooks Python is a programming language where you don't need to compile. You can just run it line by line (which is how we can use it in a notebook). So if you are quite new to programming, Python is a great place to start. The current version is Python 3, which is what we'll be using here.One way to code in Python is to use a Jupyter notebook. This is probably the best way to combine programming, text and images. In a notebook, everything is laid out in cells. Text cells and code cells are the most common. If you are viewing this section as a Jupyter notebook, the text you are now reading is in a text cell. A code cell can be found just below.To run the contents of a code cell, you can click on it and press Shift + Enter. Or if there is a little arrow thing on the left, you can click on that.
###Code
1 + 1
###Output
_____no_output_____
###Markdown
If you are viewing this section as a Jupyter notebook, execute each of the code cells as you read through.
###Code
a = 1
b = 0.5
a + b
###Output
_____no_output_____
###Markdown
Above we created two variables, which we called `a` and `b`, and gave them values. Then we added them. Simple arithmetic like this is pretty straightforward in Python. Variables in Python come in many forms. Below are some examples.
###Code
an_integer = 42 # Just an integer
a_float = 0.1 # A non-integer number, up to a fixed precision
a_boolean = True # A value that can be True or False
a_string = '''just enclose text between two 's, or two "s, or do what we did for this string''' # Text
none_of_the_above = None # The absence of any actual value or variable type
###Output
_____no_output_____
###Markdown
As well as numbers, another data structure we can use is the *list*.
###Code
a_list = [0,1,2,3]
###Output
_____no_output_____
###Markdown
Lists in Python can contain any mixture of variable types.
###Code
a_list = [ 42, 0.5, True, [0,1], None, 'Banana' ]
###Output
_____no_output_____
###Markdown
Lists are indexed from `0` in Python (unlike languages such as Fortran). So here's how you access the `42` at the beginning of the above list.
###Code
a_list[0]
###Output
_____no_output_____
###Markdown
A similar data structure is the *tuple*.
###Code
a_tuple = ( 42, 0.5, True, [0,1], None, 'Banana' )
a_tuple[0]
###Output
_____no_output_____
###Markdown
A major difference between the list and the tuple is that list elements can be changed
###Code
a_list[5] = 'apple'
print(a_list)
###Output
_____no_output_____
###Markdown
whereas tuple elements cannot
###Code
a_tuple[5] = 'apple'
###Output
_____no_output_____
###Markdown
Also we can add an element to the end of a list, which we cannot do with tuples.
###Code
a_list.append( 3.14 )
print(a_list)
###Output
_____no_output_____
###Markdown
Another useful data structure is the *dictionary*. This stores a set of *values*, each labeled by a unique *key*.Values can be any data type. Keys can be anything sufficiently simple (integer, float, Boolean, string). It cannot be a list, but it _can_ be a tuple.
###Code
a_dict = { 1:'This is the value, for the key 1', 'This is the key for a value 1':1, False:':)', (0,1):256 }
###Output
_____no_output_____
###Markdown
The values are accessed using the keys
###Code
a_dict['This is the key for a value 1']
###Output
_____no_output_____
###Markdown
New key/value pairs can be added by just supplying the new value for the new key
###Code
a_dict['new key'] = 'new value'
print(a_dict['new key'])
###Output
_____no_output_____
###Markdown
To loop over a range of numbers, the syntax is
###Code
for j in range(5):
print(j)
###Output
_____no_output_____
###Markdown
Note that it starts at 0 (by default), and ends at n-1 for `range(n)`. You can also loop over any 'iterable' object, such as lists
###Code
for j in a_list:
print(j)
###Output
_____no_output_____
###Markdown
or dictionaries
###Code
for key in a_dict:
value = a_dict[key]
print('key =',key)
print('value =',value)
print()
###Output
_____no_output_____
###Markdown
Conditional statements are done with `if`, `elif` and `else` with the following syntax.
###Code
if 'strawberry' in a_list:
print('We have a strawberry!')
elif a_list[5]=='apple':
print('We have an apple!')
else:
print('Not much fruit here!')
###Output
_____no_output_____
###Markdown
Importing packages is done with a line such as
###Code
import numpy
###Output
_____no_output_____
###Markdown
The `numpy` package is important for doing maths
###Code
numpy.sin( numpy.pi/2 )
###Output
_____no_output_____
###Markdown
We have to write `numpy.` in front of every numpy command so that it knows to find that command defined in `numpy`. To save writing, it is common to use
###Code
import numpy as np
np.sin( np.pi/2 )
###Output
_____no_output_____
###Markdown
Then you only need the shortened name. Most people use `np`, but you can choose what you like.You can also pull everything straight out of `numpy` with
###Code
from numpy import *
###Output
_____no_output_____
###Markdown
Then you can use the commands directly. But this can cause packages to mess with each other, so use with caution.
###Code
sin( pi/2 )
###Output
_____no_output_____
###Markdown
If you want to do trigonometry, linear algebra, etc, you can use `numpy`. For plotting, use `matplotlib`. For graph theory, use `networkx`. For quantum computing, use `qiskit`. For whatever you want, there will probably be a package to help you do it. A good thing to know about in any language is how to make a function.Here's a function, whose name was chosen to be `do_some_maths`, whose inputs are named `Input1` and `Input2` and whose output is named `the_answer`.
###Code
def do_some_maths ( Input1, Input2 ):
the_answer = Input1 + Input2
return the_answer
###Output
_____no_output_____
###Markdown
It's used as follows
###Code
x = do_some_maths(1,72)
print(x)
###Output
_____no_output_____
###Markdown
If you give a function an object, and the function calls a method of that object to alter its state, the effect will persist. So if that's all you want to do, you don't need to `return` anything. For example, let's do it with the `append` method of a list.
###Code
def add_sausages ( input_list ):
if 'sausages' not in input_list:
input_list.append('sausages')
print('List before the function')
print(a_list)
add_sausages(a_list) # function called without an output
print('\nList after the function')
print(a_list)
###Output
_____no_output_____
###Markdown
Randomness can be generated using the `random` package.
###Code
import random
for j in range(5):
print('* Results from sample',j+1)
print('\n Random number from 0 to 1:', random.random() )
print("\n Random choice from our list:", random.choice( a_list ) )
print('\n')
###Output
_____no_output_____ |
layer-projection/plot-layers.ipynb | ###Markdown
Analyze the ActivationsIn this notebook, we analyze the activations of the nueral network to determine how it might be functioning. To determine the activations we provided a set of inputs into the network and measure the inputs to each hidden layer. As these hidden layers can have >$10^3$ inputs, we compute the first two principle components of the input features and only plot those.
###Code
%matplotlib inline
from matplotlib import pyplot as plt
from matplotlib import patheffects
from pymatgen import Element, Composition
from adjustText import adjust_text
import pandas as pd
import numpy as np
import os
###Output
_____no_output_____
###Markdown
Plotting details
###Code
font_size = 12
figure_width = 89 / 25.4
figure_height = figure_width
###Output
_____no_output_____
###Markdown
Load in the DatasetsWe have two different datasets: one of the activations of the elements, and the other of all possible AB compounds.
###Code
elem_dataset = pd.read_csv(os.path.join('activations', 'elem_act.csv'), index_col=0)
ab_dataset = pd.read_csv(os.path.join('activations', 'AB-set_act.csv'), index_col=0)
###Output
_____no_output_____
###Markdown
Make a utility function to get the activations for a certain layer
###Code
def get_activations(data, layer, add_columns=[]):
return data[list(data.columns[:1]) + ['act_layer_{}_{}'.format(layer, x) for x in [0,1]] + add_columns]
to_plot = get_activations(elem_dataset, 1)
###Output
_____no_output_____
###Markdown
Study the Element ActivationsThe main thing we are curious about is whether the network detects similarity between elements
###Code
elem_dataset['label'] = elem_dataset['group_id'].apply(lambda x: 'Group {}'.format(x))
###Output
_____no_output_____
###Markdown
Plot the first hidden layer
###Code
fig, ax = plt.subplots()
def make_layer_plot(ax, data, layer, color_by=['group_id'], label='label', autoarrange=True, lim=250, font_scale=0.8):
# Get the data to plot
to_plot = get_activations(data, layer, color_by + [label]).copy()
# Normalize the activations
to_plot.loc[:,1:3] = (to_plot.values[:,1:3] - to_plot.values[:,1:3].min(axis=0)) / \
(to_plot.values[:,1:3].max(axis=0) - to_plot.values[:,1:3].min(axis=0))
labels = []
for label, group in to_plot.groupby(color_by):
ax.scatter(group.values[:,1], group.values[:,2], label=group['label'].iloc[0], alpha=0.8)
for l,x,y in group.values[:,:3]:
labels.append(ax.text(x, y, l, ha='center', va='center', fontweight='bold', fontsize=font_size*font_scale,
bbox={'facecolor':'w', 'edgecolor':'none', 'alpha':0.2, 'boxstyle':"square,pad=0."},
path_effects=[patheffects.SimpleLineShadow(), patheffects.Normal()]))
# Format the axis
ax.set_xlim([-0.05,1.05])
ax.set_ylim([-0.05,1.05])
ax.set_xticks([])
ax.set_yticks([])
#ax.set_xlabel('PC1')
#ax.set_ylabel('PC2')
if autoarrange:
adjust_text(labels, ax=ax, arrowprops=dict(arrowstyle="->", color='r', lw=0.5), lim=lim)
make_layer_plot(ax, elem_dataset, 1)
fig.set_size_inches(figure_width, figure_height)
fig.tight_layout()
###Output
_____no_output_____
###Markdown
*Finding*: This is a little bit messy. Let's just plot the main group elements
###Code
acceptable_groups = [1, 2, 13, 14, 15, 16, 17]
fig, ax = plt.subplots()
make_layer_plot(ax, elem_dataset[[e in acceptable_groups for e in elem_dataset['group_id']]], 1)
ax.legend(fontsize=font_size*0.8, loc='upper left')
fig.set_size_inches(figure_width, figure_height)
fig.tight_layout()
###Output
_____no_output_____
###Markdown
*Findings*: - Element groups are only similar arcs, and ordered by the row- Metalloids/nonmetals are separate from metals in the same group (see Pb) Plot some later layers
###Code
fig, axs = plt.subplots(1, 4)
for i,ax in enumerate(axs):
make_layer_plot(ax, elem_dataset[[e in acceptable_groups for e in elem_dataset['group_id']]], i+1, autoarrange=False)
fig.set_size_inches(figure_width * 4, figure_height)
fig.tight_layout()
###Output
_____no_output_____
###Markdown
*Finding*: As we increase the number of layers, the "periodic-table-ness" starts to disappear Analyze the Binary CompoundsOur hypothesis is that the later layers encode the interactions between elements Load in more compound dataJoin the AB projections with some other data about each compound
###Code
ab_metadata = pd.read_csv(os.path.join('datasets', 'AB-set.csv'))
ab_dataset = ab_dataset.merge(ab_metadata, on='pretty_comp')
ab_dataset = ab_dataset[~ab_dataset['pretty_comp'].isnull()]
###Output
_____no_output_____
###Markdown
Plot based on formation enthalpyThe network should at least group AB compounds with similar formation enthalpies
###Code
fig, axs = plt.subplots(1, 4)
def make_value_layer_plot(ax, data, layer, value='delta_e', ascending=False):
# Get the data to plot
to_plot = get_activations(data, layer, [value]).copy()
# Normalize the activations
to_plot.loc[:,1:3] = (to_plot.values[:,1:3] - to_plot.values[:,1:3].min(axis=0)) / \
(to_plot.values[:,1:3].max(axis=0) - to_plot.values[:,1:3].min(axis=0))
to_plot.sort_values(value, ascending=ascending, inplace=True)
cb_data = ax.scatter(to_plot.values[:,1], to_plot.values[:,2], c=to_plot[value], alpha=0.9)
# Format the axis
ax.set_xlim([-0.05,1.05])
ax.set_ylim([-0.05,1.05])
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlabel('PC1')
ax.set_ylabel('PC2')
for i,ax in enumerate(axs):
make_value_layer_plot(ax, ab_dataset, i+1, 'delta_e')
ax.text(0.95, 0.95, 'Layer {}'.format(i+1), ha='right',
fontweight='bold')
fig.set_size_inches(figure_width * 4, figure_height)
fig.tight_layout()
###Output
_____no_output_____
###Markdown
*Finding*: As we increase the layers, the large formation enthalpy etries are clustered together. This is expected behavior for NNs Understand the element clusteringThe above plots of formation enthalpy being cluster do not tell us much about what kind of features it is learning.
###Code
fig, axs = plt.subplots(1, 4)
for i,ax in enumerate(axs):
make_value_layer_plot(ax, ab_dataset, i+1, 'range X', ascending=True)
ax.text(0.95, 0.95, 'Layer {}'.format(i+1), ha='right',
fontweight='bold')
fig.set_size_inches(figure_width * 4, figure_height)
fig.tight_layout()
###Output
_____no_output_____
###Markdown
*Finding*: We also see compounds with larger differencs in electronegativity being grouped together, but that is because this is related to formation enthalpy. To understand the grouping of elements better, let us consider a subset of AB compounds. Those with a group 1 or 2 cation and either P, S, or Cl as the anion.
###Code
ab_dataset['comp_obj'] = ab_dataset['pretty_comp'].apply(lambda x: Composition(x))
def has_elements(c, elems):
return any([Element(e) in c.keys() for e in elems])
has_anions = ab_dataset['comp_obj'].apply(lambda x: has_elements(x, ['S', 'Cl']))
has_cations = ab_dataset['comp_obj'].apply(lambda x: has_elements(x, ['Li', 'Na', 'K', 'Rb', 'Cs'] +
['Mg', 'Ca', 'Sr', 'Ba']))
ab_subset = ab_dataset[np.logical_and(has_anions, has_cations)].copy()
ab_subset['anion_group'] = ab_subset['comp_obj'].apply(lambda x: max([e.group for e in x]))
ab_subset['cation_group'] = ab_subset['comp_obj'].apply(lambda x: min([e.group for e in x]))
ab_subset['label'] = ab_subset.apply(lambda x: '{1}-{0}'.format('VII' if x['anion_group'] == 17 else 'VI',
'I' if x['cation_group'] == 1 else 'II'), axis=1)
fig, ax = plt.subplots()
make_layer_plot(ax, ab_subset, 1, color_by=['label'], label='anion_group', autoarrange=True)
fig.set_size_inches(figure_width, figure_height)
ax.legend()
fig.tight_layout()
###Output
_____no_output_____
###Markdown
*Finding*: Somewhat unsurprsing result. Different combinations of elements are cluster together. I-V is closer to I-VI than II-VI
###Code
fig, axs = plt.subplots(1, 4)
for i,ax in enumerate(axs):
make_layer_plot(ax, ab_subset, 1+i, color_by=['label'], label='anion_group', autoarrange=True)
axs[0].legend()
fig.set_size_inches(figure_width * 4, figure_height)
fig.tight_layout()
###Output
_____no_output_____
###Markdown
*Findings*:- In the first layer, element groups are closest based on similarity between the groups: - I-V is closer to I-VI than II-VI - II-V is closer to II-V than I-VI - Ca-Cl is closer to Cs-S than the Alkali Metal Chlorides- In the second layer, charge-balanced compounds are closer together than elements that simply have similar elements Make a FigureCombine these figures together into a plot for the paper
###Code
fig, axs = plt.subplots(2, 3)
layers=[1,2,8]
elem_subset = elem_dataset[[e in acceptable_groups for e in elem_dataset['group_id']]]
for i,ax in zip(layers, axs[0,:]):
if i != 8:
make_layer_plot(ax, elem_subset, i, autoarrange=False, font_scale=0.85)
else:
to_plot = elem_subset.query('element != "F"')
make_layer_plot(ax, to_plot, i, autoarrange=False)
ax.annotate('F', xy=(0.1, 1), xytext=(0.1, 0.8), transform=ax.transAxes,
ha='center', arrowprops=dict(arrowstyle="->", color='darkblue', lw=1.5),
fontweight='bold')
for i,ax in zip(layers, axs[1,:]):
make_layer_plot(ax, ab_subset, i, color_by=['label'], label='anion_group', autoarrange=True if i != 8 else False,
lim=2000, font_scale=0.85)
for ax, l in zip(axs[0, :], layers):
ax.text(0, 1, 'Layer {}'.format(l), ha='left', va='bottom',
fontweight='bold', fontsize=font_size, transform=ax.transAxes)
for ax in axs[1, :]:
ax.set_xlabel('PC1', fontsize=0.8*font_size)
for ax in axs[:, 0]:
ax.set_ylabel('PC2', fontsize=0.8*font_size)
axs[0, 2].legend(ncol=1, loc='lower right', fontsize=font_size*0.8)
axs[1, 0].legend()
axs[0, 0].text(0, 1.05, 'Elements', fontsize=font_size, fontweight='bold',
va='top', ha='right', rotation=90, transform=axs[0,0].transAxes)
axs[1, 0].text(0, 1.05, 'AB Compounds', fontsize=font_size, fontweight='bold',
va='top', ha='right', rotation=90, transform=axs[1,0].transAxes)
fig.set_size_inches(figure_width * 3, figure_height * 2)
fig.tight_layout()
fig.savefig(os.path.join('figures', 'layer-projection.png'), dpi=320)
fig.savefig(os.path.join('figures', 'layer-projection.pdf'))
###Output
_____no_output_____ |
Lesson2/Pandas.ipynb | ###Markdown
IntroductionIf you've had any experience with the python scientific stack, you've probably come into contact with, or at least heard of, the [pandas][1] data analysis library. Before the introduction of pandas, if you were to ask anyone what language to learn as a budding data scientist, most would've likely said the [R statistical programming language][2]. With its [data frame][3] data structure, it was the obvious winner when it came to filtering, slicing, aggregating, or analyzing your data. However, with the introduction of pandas to python's growing set of data analysis libraries, the gap between the two langauges has effectively closed, and as a result, pandas has become a vital tool for data scientists using python.While we won't be covering the pandas library itself, since that's a topic fit for a course of its own, in this lesson we will be discussing the simple interface pandas provides for interacting with the matplotlib library. In addition, we'll also take a look at the recent changes the matplotlib team has made to make it possible for the two libraries to work together more harmoniously.That said, let's get set up and see what pandas has to offer.[1]: http://pandas.pydata.org/[2]: https://www.r-project.org/[3]: https://cran.r-project.org/doc/manuals/r-release/R-intro.htmlData-frames
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('retina')
###Output
_____no_output_____
###Markdown
What is pandas?Pandas is a library created by [Wes McKinney][1] that provides several data structures that make working with data fast, efficient, and easy. Chief among them is the `DataFrame`, which takes on R's `data.frame` data type, and in many scenarios, bests it. It also provides a simple wrapper around the `pyplot` interface, allowing you to plot the data in your `DataFrame` objects without any context switching in many cases. But, enough talk, let's see it in action.[1]: https://twitter.com/wesmckinn Import the LibraryThe following bit of code imports the pandas library using the widely accepted `pd` naming convention. You'll likely see pandas imported like this just about everywhere it's used, and it is recommended that you always use the same naming convention in your code as well.
###Code
import pandas as pd
###Output
_____no_output_____
###Markdown
Load in Some DataIn the next cell, we'll use the `read_csv` function to load in the [Census Income][1] dataset from the [UCI Machine Learning Repository][2]. Incidentally, this is the exact same dataset that we used in our Exploratory Data Analysis (EDA) example in chapter 2, so we'll get to see some examples of how we could perform some of the same steps using the plotting commands on our `DataFrame` object. [1]: http://archive.ics.uci.edu/ml/datasets/Adult[2]: http://archive.ics.uci.edu/ml/index.html
###Code
import pandas as pd
# Download and read in the data from the UCI Machine Learning Repository
df = pd.read_csv('http://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data',
header=None,
names=('age',
'workclass',
'fnlwgt',
'education',
'education_num',
'marital_status',
'occupation',
'relationship',
'race',
'sex',
'capital_gain',
'capital_loss',
'hours_per_week',
'native_country',
'target'))
###Output
_____no_output_____
###Markdown
Plotting With pandasJust like we did in our EDA example from chapter 2, we can once again create a simple histogram from our data. This time though, notice that we simply call the `hist` command on the column that contains the education level to plot our data.
###Code
df.education_num.hist(bins=16);
###Output
_____no_output_____
###Markdown
And, remember, pandas isn't doing anything magical here, it's just providing a very simple wrapper around the `pyplot` module. At the end of the day, the code above is simply calling the `pyplot.hist` function to create the histogram. So, we can interact with the plot that it produces the same way we would any other plot. As an example, let's create our histogram again, but this time let's get rid of that empty bar to the left by setting the plot's x-axis limits using the `pyplot.xlim` function.
###Code
df.education_num.hist(bins=16)
# Remove the empty bar from the histogram that's below the
# education_num's minimum value.
plt.xlim(df.education_num.min(), df.education_num.max());
###Output
_____no_output_____
###Markdown
Well, that looks better, but we're still stuck with many of the same problems that we had in the original EDA lesson. You'll notice that most of the x-ticks don't actually line up with their bars, and there's a good reason for that. Remember, in that lesson, we discussed how a histogram was meant to be used with continuous data, and in our case we're dealing with discrete values. So, a bar chart is actually what we want to use here.Luckily, pandas makes the task of creating the bar chart even easier. In our EDA lesson, we had to do the frequency count ourselves, and take care of lining the x-axis labels up properly, and several other small issues. With pandas, it's just a single line of code. First, we call the `value_counts` function on the `education` column to get a set of frequency counts, ordered largest to smallest, for each education level. Then, we call the `plot` function on the `Series` object returned from `value_counts`, and pass in the type of plot with the `kind` parameter, and while we're at it, we'll set our width to 1, like we did in the chapter 2 example, to make it look more histogram-ish.
###Code
df.education.value_counts().plot(kind='bar', width=1);
###Output
_____no_output_____
###Markdown
Now, rather than passing in the plot type with the `kind` parameter, we could've also just called the `bar` function from the `plot` object, like we do in the next cell.
###Code
df.education.value_counts().plot.bar(width=1);
###Output
_____no_output_____
###Markdown
Ok, so that's a pretty good introduction to the simple interface that pandas provides to the matplotlib library, but it doesn't stop there. Pandas also provides a handful of more complex plotting functions in the `pandas.tools.plotting` module. So, let's import another dataset and take a look at an example of what's available. In the cell below, we pull in the Iris dataset that we used in our scatterplot matrix example from chapter 3. Incidentally, if you don't want to mess with network connections, or if you happen to be in a situation where network access just isn't an option, I've copied the data file to the local data folder. The file can be found at `./data/iris_data.csv`
###Code
df = pd.read_csv('https://raw.githubusercontent.com/pydata/pandas/master/pandas/tests/data/iris.csv')
###Output
_____no_output_____
###Markdown
We'll need a color map, essentially just a dictionary mapping each species to a unique color, so we'll put one together in the next cell. Fortunately, pandas makes it easy to get the species names by simply calling the `unique` function on the `Name` column.
###Code
names = df.Name.unique()
colors = ['red', 'green', 'blue']
cmap = dict(zip(names, colors))
###Output
_____no_output_____
###Markdown
Now, before we take a look at one of the functions from the `plotting` module, let's quickly take a look at one of the [changes that was made to matplotlib in version 1.5][1] to accommodate labeled data, like a pandas `DataFrame` for example. The code in the next cell, creates a scatter plot using the `pyplot.scatter` function, like we've done in the past, but notice how we specify the columns that contain our `x` and `y` values. In our example below, we are simply passing in the names of the columns alongside the `DataFrame` object itself. Now, it's arguable just how much more readable this light layer of abstraction is over just passing in the data directly, but it's nice to have the option, nonetheless.[1]: http://matplotlib.org/users/whats_new.htmlworking-with-labeled-data-like-pandas-dataframes
###Code
plt.scatter(x='PetalLength', y='PetalWidth', data=df, c=df.Name.apply(lambda name: cmap[name]));
###Output
_____no_output_____
###Markdown
Now, we're ready to take a look at one of the functions that pandas provides us, and for comparison sake, let's take a look at our old friend, the scatterplot matrix. In the next cell, we'll import the `scatter_matrix` function from the `pandas.tools.plotting` module and run it on the Iris dataset.
###Code
from pandas.tools.plotting import scatter_matrix
scatter_matrix(df, figsize=(10,8), c=df.Name.apply(lambda name: cmap[name]), s=40);
###Output
_____no_output_____ |
Lecture04_TopicModels.ipynb | ###Markdown
Lecture 4 - Topic ModelsIn this notebook we will learn how to cluster text into topics using different embeddings and the K-means clustering algorithm. Below is the overview of this notebook.0. Install required packages (only need to do this the first time we run the notebook)1. Load corpus of tweets2. Make word clouds of the tweets3. Create tf and tf-idf embeddings of the tweets4. Create LDA topic model embeddings of the tweets5. Create low dimensional embeddings of the tweets using UMAP6. Cluster the tweets using K-means clustering7. Analyze clusters using word clouds and screen name histogramsThis notebook can be opened in Colab [](https://colab.research.google.com/github/zlisto/social_media_analytics/blob/main/Lecture04_TopicModels.ipynb)Before starting, select "Runtime->Factory reset runtime" to start with your directories and environment in the base state.If you want to save changes to the notebook, select "File->Save a copy in Drive" from the top menu in Colab. This will save the notebook in your Google Drive. Clone GitHub RepositoryThis will clone the repository to your machine. This includes the code and data files. Then change into the directory of the repository.
###Code
!git clone https://github.com/zlisto/social_media_analytics
import os
os.chdir("social_media_analytics")
###Output
_____no_output_____
###Markdown
Install Requirements
###Code
!pip install -r requirements.txt
###Output
_____no_output_____
###Markdown
Import packagesWe import the packages we are going to use. A package contains several useful functions that make our life easier.
###Code
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import umap
import gensim.downloader as api
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.decomposition import NMF, LatentDirichletAllocation
import sklearn.cluster as cluster
from sklearn import metrics
from scipy import stats
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
import pyLDAvis
import pyLDAvis.sklearn
pyLDAvis.enable_notebook()
import scripts.TextAnalysis as ta
from scripts.api import *
###Output
_____no_output_____
###Markdown
Data Cleaning Load dataWe will load csv files containing tweets from several users into a dataframe **df**.
###Code
fname_db = "data/lecture_04"
df = DB.fetch(table_name = 'user_tweets', path = fname_db)
n = len(df)
print(f"{n} tweets in dataframe")
df.sample(5)
###Output
_____no_output_____
###Markdown
Remove Superfluous Columns We don't need all the columns. We can remove them from this dataframe using the column selection operation. We just choose which columns we want to keep and put them in a list.
###Code
df = df[ ['screen_name', 'text', 'retweet_count']]
df.sample(5)
###Output
_____no_output_____
###Markdown
Plot Tweets per UserA count plot shows us how many tweets each user has in the dataset. If we choose `y` to be `"screen_name"` the plot will be vertical.We can choose the `palette` for the plot from this list here: https://seaborn.pydata.org/tutorial/color_palettes.html
###Code
plt.figure(figsize=(8,8))
sns.countplot(data=df,y='screen_name', palette = "Set2")
plt.ylabel("Screen name", fontsize = 14)
plt.xlabel("Tweet count", fontsize = 14)
plt.show()
###Output
_____no_output_____
###Markdown
Cleaning Text DataNext we will clean the tweet text. We use the `clean_tweet` function in the TextAnalytics module. This function removes punctuation and hyperlinks, and also makes all the text lower case. We remove any cleaned tweets which have zero length, as these won't be useful for clustering. We add a column to `df` called `"text_clean"` with the cleaned tweets.
###Code
df['text_clean'] = df.text.apply(ta.clean_tweet) #clean the tweets
df = df[df.text_clean.str.len() >0] #remove cleaned tweets of lenght 0
nclean = len(df)
print(f"{n} tweets, {nclean} clean tweets")
df.sample(n=5)
###Output
_____no_output_____
###Markdown
Copy of DataframeSometimes you want to work on a slice of a dataframe. For example, maybe you want to work with a slice that contains tweets from a single screen name. If you want to add a column to the slice, you will get a warning, because the slice is tied to the original dataframe. To avoid this, use the `copy` function when creating the slice. This makes the slice an independent copy and now you can add colummns without any error.
###Code
print("Adding a column to a slice of a dataframe (not a copy) gives a warning")
df_aoc = df[df.screen_name=='AOC']
df_aoc['test'] = df.retweet_count
print("Adding a column to a copy of a slice of a dataframe is ok")
df_aoc = df[df.screen_name=='AOC'].copy()
df_aoc['test'] = df.retweet_count
###Output
_____no_output_____
###Markdown
Word CloudWe can make a word cloud of the tweets using the `WordCloud` function which takes as input a list of stopwords and many other parameters. The list of stopwords is `STOPWORDS`. We apply the `set` function so we can remove duplicates and easily add new words. To add a new stopword to the set, we use the `add` function.We convert the `text` column of our dataframe into a single giant string called `text` using the `tolist` and `join` functions. We then apply the `generate` function to `text` to make the word cloud. The `imshow` function allows us to visualize the word cloud.
###Code
stopwords = set(STOPWORDS)
stopwords.add("de")
stopwords.add("que")
text=' '.join(df.text_clean.tolist()).lower()
wordcloud = WordCloud(stopwords=stopwords,max_font_size=150,
max_words=100,
background_color="black",
width=1000,
height=600)
wordcloud.generate(text)
#visualize word cloud
fig = plt.figure(figsize = (10,8))
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show()
###Output
_____no_output_____
###Markdown
Create Text EmbeddingsTo cluster the tweets, we need to create vector embeddings for them. We can do this using vectorizers. We have two simple options here. One is as a term frequency (tf) vectorizer called *CountVectorizer*. The other is a term-frequency inverse document-frequency (tf-idf) vectorizer called *TfidfVectorizer*. Term Frequency (TF) EmbeddingWe initialize the `CountVectorizer` and tell it to remove English stopwords with the `stop_words` parameter set to `"english"`. We also tell it to remove any word that occur in less than 5 documents with the `min_df` parameter. Then we use the `fit_transform` method applied to the `text_clean` column of `df` to create the document vectors, which we call `tf_embedding`. We store the words for each element of the vector in `tf_feature_names`.
###Code
tf_vectorizer = CountVectorizer(min_df=5, stop_words='english')
tf_embedding = tf_vectorizer.fit_transform(df.text_clean)
tf_feature_names = tf_vectorizer.get_feature_names_out()
nvocab = len(tf_feature_names)
ntweets = len(df.text_clean)
print(f"{ntweets} tweets, {nvocab} words in vocabulary")
print(f"TF embedding shape is {tf_embedding.shape}")
###Output
_____no_output_____
###Markdown
Term Frequency-Inverse Document Frequency (TF-IDF) EmbeddingWe initialize the `TfidfVectorizer` as we did the `CountVectorizer`. Then we use the `fit_transform` method applied to the `text_clean` column of `df` to create the document vectors, which we call `tfidf_embedding`. We store the words for each element of the vector in `tfidf_feature_names`.
###Code
tfidf_vectorizer = TfidfVectorizer(min_df=5, stop_words='english')
tfidf_embedding = tfidf_vectorizer.fit_transform(df.text_clean)
tfidf_feature_names = tfidf_vectorizer.get_feature_names_out()
nvocab = len(tfidf_feature_names)
print(f"{ntweets} tweets, {nvocab} words in vocabulary")
print(f"TF-IDF embedding shape is {tfidf_embedding.shape}")
###Output
_____no_output_____
###Markdown
Latent Dirichlet Allocation (LDA) EmbeddingWe will fit an LDA topic model on the tf embedding of the tweets. Much of this section pulls code from this blog:https://medium.com/mlreview/topic-modeling-with-scikit-learn-e80d33668730 Fitting LDA ModelTo fit an LDA model we need to specify the number of topics. There are sophisticated ways to do this, but because it takes some time to fit the model, we will cheat here. We set `num_topics` equal to the number of unique users in the dataset. Hopefully we find one topic for each user. To fit the model we use the `LatentDirichletAllocation` function. We first initialize this object with the number of topics, and then use the `fit` function to fit the model to `tf_embedding` (we can't use `tfidf_embedding` because LDA data must be word counts (integers)). The fit model object is called `lda`.
###Code
%%time
num_topics = len(df.screen_name.unique())
lda = LatentDirichletAllocation(n_components=num_topics, max_iter=5,
learning_method='online', learning_offset=50.,
random_state=0).fit(tf_embedding)
###Output
_____no_output_____
###Markdown
Convert Tweets into Topic Embedding Vectors Using LDA ModelNext we convert each tweet into a topic embedding vector. This vector length is the number of topics in the LDA model. The value of each element tells us the probability the tweet contains this topic. The conversion is done using the `transform` function of `lda`. The resulting topic vectors are called `lda_embedding`.
###Code
lda_embedding = lda.transform(tf_embedding)
print(f"{ntweets} tweets, {num_topics} topics in LDA model")
print(f"shape of lda embedding is {lda_embedding.shape}")
###Output
_____no_output_____
###Markdown
Visualizing LDA Topics with pyLDAvisA cool way to visualize the topics in an LDA model is using the pyLDAvis package. To do this we use the `prepare` function in `pyLDAvis.sklearn` to create an object called `viz`. The inputs are the model (`lda`), the tf embedding (`tf_embedding`), and the CountVectorizer (`tf_vectorizer`). Then we create an interactive visualization of the model using the `show` function applied to `viz`. Here's how to use the pyLDAvis webpage. Each circle is a topic. Hover over it and the bar graph lights up with the highest probabilit words in the topic. You can slide the value of the relevance metric (lambda) to adjust how the relevance of each word is measured. lambda = 0 means the red bar just shows the probability of the word in the topic. lambda = 1 means the red bar shows the probability of the word in the topic divided by the probability of the word in the entire corpus of tweets. For our purposes, lambda = 0 is fine.
###Code
viz = pyLDAvis.sklearn.prepare(lda, tf_embedding, tf_vectorizer)
pyLDAvis.display(viz)
###Output
_____no_output_____
###Markdown
UMAP EmbeddingWe can use UMAP to create low-dimensional embeddings of the tweets. This allows us to plot the tweets in two dimensions. Also, sometimes the lower dimensional embedding makes better text clusters.
###Code
%%time
umap_tf_embedding = umap.UMAP(n_components=2, metric='hellinger').fit_transform(tf_embedding)
umap_tfidf_embedding = umap.UMAP(n_components=2, metric='hellinger').fit_transform(tfidf_embedding)
#zscoring centers the vectors at zero
umap_tf_embedding = stats.zscore(umap_tf_embedding,nan_policy='omit')
umap_tfidf_embedding = stats.zscore(umap_tfidf_embedding,nan_policy='omit')
###Output
_____no_output_____
###Markdown
Add UMAP Embeddings to DataFrameAdd UMAP embeddings x and y coordinates for each tweet to `df`.
###Code
df['tf_umap_x'] = umap_tf_embedding[:,0]
df['tf_umap_y'] = umap_tf_embedding[:,1]
df['tfidf_umap_x'] = umap_tfidf_embedding[:,0]
df['tfidf_umap_y'] = umap_tfidf_embedding[:,1]
###Output
_____no_output_____
###Markdown
Visualize EmbeddingsWe can use `scatterplot` to plot the embeddings using the UMAP x-y coordinates. We will color the data points, which are tweets, by the screen name of their creator using the `hue` parameter.
###Code
xmax = 3 #range for x-axis
ymax = 3 #range for y-axis
s = 5 #marker size
fig = plt.figure(figsize = (16,8))
ax1 = plt.subplot(1,2,1)
sns.scatterplot(data=df, x="tf_umap_x",
y="tf_umap_y", hue="screen_name", s=s)
plt.title("TF Embedding")
plt.xlim([-xmax, xmax])
plt.ylim([-ymax,ymax])
ax2 = plt.subplot(1,2,2)
sns.scatterplot(data=df, x="tfidf_umap_x",
y="tfidf_umap_y", hue="screen_name", s=s)
plt.title("TF-IDF Embedding");
plt.xlim([-xmax, xmax])
plt.ylim([-ymax,ymax])
plt.show()
###Output
_____no_output_____
###Markdown
Cluster Tweets Using K-Means on EmbeddingsWe will cluster the tf, tf-idf, and word2vec embedding vectors using the k-means algorithm. We choose the number of clusters we want with the variable `n_clusters`. To get the cluster label of each tweet we initiailize a `KMeans` object with the number of clusters, and then call the `fit_predict` function on the embedding array. We create a column in `df` for each k-means cluster label.
###Code
#n_clusters = len(df.screen_name.unique())
n_clusters = 6
kmeans_label = cluster.KMeans(n_clusters=n_clusters).fit_predict(tf_embedding)
df['kmeans_label_tf'] = [str(x) for x in kmeans_label]
kmeans_label = cluster.KMeans(n_clusters=n_clusters).fit_predict(tfidf_embedding)
df['kmeans_label_tfidf'] = [str(x) for x in kmeans_label]
kmeans_label = cluster.KMeans(n_clusters=n_clusters).fit_predict(lda_embedding)
df['kmeans_label_lda'] = [str(x) for x in kmeans_label]
kmeans_label = cluster.KMeans(n_clusters=n_clusters).fit_predict(np.nan_to_num(umap_tf_embedding))
df['kmeans_label_tf_umap'] = [str(x) for x in kmeans_label]
kmeans_label = cluster.KMeans(n_clusters=n_clusters).fit_predict(np.nan_to_num(umap_tfidf_embedding))
df['kmeans_label_tfidf_umap'] = [str(x) for x in kmeans_label]
###Output
_____no_output_____
###Markdown
Plot Embeddings with Cluster LabelsWe can make a scatterplot of the tweet embeddings, but this time color the data points using the cluster label.
###Code
embedding_types = ['tf_umap','tfidf_umap','lda']
s = 5
xmax,ymax = 3,3
for embedding_type in embedding_types:
fig = plt.figure(figsize = (16,8))
ax1 = plt.subplot(1,2,1)
kmeans_label = f"kmeans_label_{embedding_type}"
sns.scatterplot(data=df, x=f"tfidf_umap_x",
y=f"tfidf_umap_y",
hue="screen_name", s=s)
plt.title("True Clusters")
plt.xlim([-xmax, xmax])
plt.ylim([-ymax,ymax])
ax2 = plt.subplot(1,2,2)
sns.scatterplot(data=df, x=f"tfidf_umap_x",
y=f"tfidf_umap_y",
hue=kmeans_label, s=s)
plt.title(f"{kmeans_label} Clusters");
plt.xlim([-xmax, xmax])
plt.ylim([-ymax,ymax])
plt.show()
###Output
_____no_output_____
###Markdown
Histograms of Users and Word Clouds of Tweets in the ClustersWe will take the tweets in each cluster, make a word cloud for them, and a histogram of the screen names of the users who posted the tweets. If we have good clusters, we expect one user to dominate each cluster, or a group of users who use tweet about similar topics.We will be creating word clouds and histograms again later on, so lets write a function to do it. The function is called `kmeans_wordcloud_userhist`. Its inputs are the dataframe with the tweets and cluster labels, `df`, the name of the column with the cluster labels `cluster_label_column`, and a set of stopwords called `stopwords`.
###Code
def kmeans_wordcloud_userhist(df, cluster_label_column,stopwords):
print(cluster_label_column)
for k in np.sort(df[cluster_label_column].unique()):
s=df[df[cluster_label_column]==k]
text=' '.join(s.text_clean.tolist()).lower()
wordcloud = WordCloud(stopwords=stopwords,max_font_size=150, max_words=100, background_color="white",width=1000, height=600)
wordcloud.generate(text)
print(f"\n\tCluster {k} {cluster_label_column} has {len(s)} tweets")
plt.figure(figsize = (16,4))
plt.subplot(1,2,1)
ax = sns.countplot(data = s, x = 'screen_name')
plt.xticks(rotation=45)
plt.ylabel("Number of tweets")
plt.xlabel("Screen name")
plt.subplot(1,2,2)
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show()
return 1
###Output
_____no_output_____
###Markdown
Wordcloud of ClustersWe can plot a word cloud for each cluster found, along with a histogram of the screen names in the cluster. Wordcloud for TF Embedding
###Code
stopwords = set(STOPWORDS)
cluster_label_column= 'kmeans_label_tf'
kmeans_wordcloud_userhist(df,cluster_label_column,stopwords )
###Output
_____no_output_____
###Markdown
Wordcloud for TF-IDF Embedding
###Code
stopwords = set(STOPWORDS)
cluster_label_column= 'kmeans_label_tfidf'
kmeans_wordcloud_userhist(df,cluster_label_column,stopwords )
###Output
_____no_output_____
###Markdown
Wordcloud for LDA UMAP Embedding
###Code
stopwords = set(STOPWORDS)
cluster_label_column= 'kmeans_label_lda'
kmeans_wordcloud_userhist(df,cluster_label_column,stopwords )
###Output
_____no_output_____ |
econometria-basica/econ_basic_ch_03.ipynb | ###Markdown
Capítulo 03 - Econometria Básica - Gujarati e Porter***
###Code
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
#plt.style.use(['science', 'notebook', 'grid', 'dark_background'])
plt.style.use(['science', 'notebook', 'grid'])
###Output
_____no_output_____
###Markdown
*** Resumo- As hipóteses do modelo de regressão linear clássico são: - $I.$ O modelo é linear nos parâmetros - $II.$ Valores de X fixos (em amostras repetidas) ou independentes do termo de erro $u_i$ ($(cov(X_i, u_i) = 0$)). - $III.$ $E(u_i | X_i ) = 0$ ou $E(u_i) = 0$. - $IV.$ Homocedasticidade, isto é, $var(u_i) = \sigma^2$, independentemente do valor de $X_i$. - $V.$ Não há autocorrelação entre os termos de erro $cov(u_i, u_j) = 0, i \ne j$. - $VI.$ O número de observações deve ser maior que o número de parâmetros a serem estimados. - $VII.$ Os valores de X em uma amostra não devem ser os mesmos ($var(X) > 0$).- Estimador ***BLUE*** (best linear unbiased estimator): - É linear. - É não viesado, isto é, $E(\hat{\beta}) = \beta$. - É um estimador eficiente, isto é, com variância mínima e não viesado na classe dos estimadores lineares. - Teorema de Gauss-Markov: *Dadas as premissas do modelo clássico de regressão linear, os estimadores de mínimos quadrados (MQO) da classe dos estimadores lineares não viesados têm variância mínima, isto é, são o melhor estimador linear não viesado (MELNT).*- O coeficiente de determinação $R^2$ é uma medida resumida que diz quanto a linha de regressão amostral ajusta-se aos dados.*** Exercício 3.18
###Code
from scipy.stats import spearmanr
# Calculando Coeficiente de Correlação de Spearman com SciPy
p1 = np.array([1, 3, 7, 10, 9, 5, 4, 8, 2, 6])
p2 = np.array([3, 2, 8, 7, 9, 6, 5, 10, 1, 4])
print(spearmanr(p1, p2))
# Calculando Coeficiente de Correlação de Spearman com Pandas
cols = ['P1', 'P2']
idx = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J']
df = pd.DataFrame(data=np.column_stack([p1, p2]), columns=cols, index=idx)
print(df.corr(method='spearman'))
###Output
SpearmanrResult(correlation=0.8424242424242423, pvalue=0.0022200312259168407)
P1 P2
P1 1.000000 0.842424
P2 0.842424 1.000000
###Markdown
- Há um alto grau de correlação entre as notas da primeira prova e as notas da prova final, ou seja, quanto mais alta a nota da primeira prova, mais alta a nota da prova final. Exercício 3.19- Y = taxa de câmbio de dólar canadense em relação ao dólar americano (DC/$).- X = razão do IPC americano pelo IPC canadense, isto é, os preços relativos dos dois países.- Resultados da Regressão: - $\hat{Y_t} = 0.912 + 2.250 X_t$ - $r^2 = 0.440$ - $\sigma = 0.096$ A regressão acima nos diz que a taxa de câmbio (C\\$/USD) depreciou cerca de 2.25 unidades para cada unidade de aumento no preço relativo entre os dois países ( $\frac{\text{IPC}_{USA}}{\text{IPC}_{CND}}$). Segundo a teoria da paridade de poder de compra, o valor positivo do coeficiente não faz sentido econômico. *"A teoria PPC, portanto, prevê que uma queda no poder de compra nacional da moeda (como indicado por um aumento no nível de preço nacional) será associada com uma depreciação proporcional da moeda no mercado cambial estrangeiro"* (Economia Internacional; Krugman, Obstfeld e Melitz). Portanto, o fato de um aumento no IPC canadense levar a uma depreciação da taxa de câmbio ( $ \downarrow \frac{\text{CND}}{\text{USD}} = \frac{\text{IPC}_{USA}}{\uparrow \text{IPC}_{CND}} $ ) contradiz essa teoria. Exercício 3.20
###Code
# Lendo e tratando os dados
# TABLE B–49.—Productivity and related data, business sector, 1959–2006
df = pd.read_excel("dados/ERP-2007/ERP-2007-table49.xls", index_col=0, header=[2,3], nrows=52)
df.columns = pd.MultiIndex.from_tuples([(x.upper(), y.upper()) for x, y in df.columns])
# df.columns = ['__'.join(col).strip() for col in df.columns.values]
display(df.head())
x1 = df['OUTPUT PER HOUR OF ALL PERSONS', 'BUSINESS SECTOR']
x2 = df['OUTPUT PER HOUR OF ALL PERSONS', 'NONFARM BUSINESS SECTOR']
y1 = df['REAL COMPENSATION PER HOUR', 'BUSINESS SECTOR']
y2 = df['REAL COMPENSATION PER HOUR', 'NONFARM BUSINESS SECTOR']
fig, axes = plt.subplots(1, 2, figsize=(15, 6))
axes[0].scatter(x1, y1)
axes[0].set_title('BUSINESS SECTOR')
axes[0].set_xlabel('OUTPUT PER HOUR OF ALL PERSONS')
axes[0].set_ylabel('REAL COMPENSATION PER HOUR')
axes[1].scatter(x2, y2)
axes[1].set_title('NONFARM BUSINESS SECTOR')
axes[1].set_xlabel('OUTPUT PER HOUR OF ALL PERSONS')
plt.show()
import statsmodels.api as sm
x1 = sm.add_constant(x1)
model = sm.OLS(y1, x1)
results = model.fit()
print(results.summary())
###Output
OLS Regression Results
=============================================================================================================
Dep. Variable: ('REAL COMPENSATION PER HOUR', 'BUSINESS SECTOR') R-squared: 0.980
Model: OLS Adj. R-squared: 0.980
Method: Least Squares F-statistic: 2475.
Date: Wed, 25 May 2022 Prob (F-statistic): 2.96e-44
Time: 18:37:58 Log-Likelihood: -119.89
No. Observations: 52 AIC: 243.8
Df Residuals: 50 BIC: 247.7
Df Model: 1
Covariance Type: nonrobust
=========================================================================================================================
coef std err t P>|t| [0.025 0.975]
-------------------------------------------------------------------------------------------------------------------------
const 33.1184 1.244 26.627 0.000 30.620 35.617
('OUTPUT PER HOUR OF ALL PERSONS', 'BUSINESS SECTOR') 0.6627 0.013 49.752 0.000 0.636 0.689
==============================================================================
Omnibus: 5.052 Durbin-Watson: 0.193
Prob(Omnibus): 0.080 Jarque-Bera (JB): 2.913
Skew: -0.366 Prob(JB): 0.233
Kurtosis: 2.101 Cond. No. 338.
==============================================================================
Notes:
[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
###Markdown
Exercício 3.22
###Code
df = pd.read_excel('dados/ouro-nyse.xlsx')
fig, axes = plt.subplots(1, 2, figsize=(15, 5))
axes[0].scatter(df['IPC'], df['Preço do Ouro'])
axes[0].set_xlabel('IPC')
axes[0].set_ylabel('Preço do Ouro')
axes[1].scatter(df['IPC'], df['Índice NYSE'])
axes[1].set_xlabel('IPC')
axes[1].set_ylabel('Índice NYSE')
plt.show()
import statsmodels.api as sm
ipc = sm.add_constant(df['IPC'])
model = sm.OLS(df['Preço do Ouro'], ipc)
results = model.fit()
print(results.summary())
ipc = sm.add_constant(df['IPC'])
model = sm.OLS(df['Índice NYSE'], ipc)
results = model.fit()
print(results.summary())
###Output
OLS Regression Results
==============================================================================
Dep. Variable: Índice NYSE R-squared: 0.839
Model: OLS Adj. R-squared: 0.834
Method: Least Squares F-statistic: 161.5
Date: Wed, 25 May 2022 Prob (F-statistic): 7.89e-14
Time: 18:37:59 Log-Likelihood: -274.09
No. Observations: 33 AIC: 552.2
Df Residuals: 31 BIC: 555.2
Df Model: 1
Covariance Type: nonrobust
==============================================================================
coef std err t P>|t| [0.025 0.975]
------------------------------------------------------------------------------
const -3444.9920 533.966 -6.452 0.000 -4534.023 -2355.961
IPC 50.2972 3.958 12.707 0.000 42.224 58.370
==============================================================================
Omnibus: 12.685 Durbin-Watson: 0.189
Prob(Omnibus): 0.002 Jarque-Bera (JB): 3.084
Skew: 0.292 Prob(JB): 0.214
Kurtosis: 1.621 Cond. No. 410.
==============================================================================
Notes:
[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
|
numpy/Numpy Review .ipynb | ###Markdown
NumPy Exercises Now that we've learned about NumPy let's test your knowledge. We'll start off with a few simple tasks, and then you'll be asked some more complicated questions. Import NumPy as np
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
Create an array of 10 zeros
###Code
zeros = np.zeros(10)
zeros
###Output
_____no_output_____
###Markdown
Create an array of 10 ones
###Code
ones = np.ones(10)
ones
###Output
_____no_output_____
###Markdown
Create an array of 10 fives
###Code
fives = ones * 5
fives
###Output
_____no_output_____
###Markdown
Create an array of the integers from 10 to 50
###Code
integers = np.arange(10,51)
integers
###Output
_____no_output_____
###Markdown
Create an array of all the even integers from 10 to 50
###Code
evens = np.arange(10,51,2)
evens
###Output
_____no_output_____
###Markdown
Create a 3x3 matrix with values ranging from 0 to 8
###Code
matrix = np.arange(0,9).reshape(3,3)
matrix
###Output
_____no_output_____
###Markdown
Create a 3x3 identity matrix
###Code
eye = np.eye(3)
eye
###Output
_____no_output_____
###Markdown
Use NumPy to generate a random number between 0 and 1
###Code
rdn = np.random.rand(1)
rdn
###Output
_____no_output_____
###Markdown
Use NumPy to generate an array of 25 random numbers sampled from a standard normal distribution
###Code
rdn_snd = np.random.randn(25)
rdn_snd
###Output
_____no_output_____
###Markdown
Create the following matrix:
###Code
mtx = np.arange(1,101).reshape(10,10) / 100
mtx
###Output
_____no_output_____
###Markdown
Create an array of 20 linearly spaced points between 0 and 1:
###Code
lin = np.linspace(0,1,20)
lin
###Output
_____no_output_____
###Markdown
Numpy Indexing and SelectionNow you will be given a few matrices, and be asked to replicate the resulting matrix outputs:
###Code
mat = np.arange(1,26).reshape(5,5)
mat
# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW
# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T
# BE ABLE TO SEE THE OUTPUT ANY MORE
mat[2:,2:]
# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW
# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T
# BE ABLE TO SEE THE OUTPUT ANY MORE
mat[3,4]
# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW
# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T
# BE ABLE TO SEE THE OUTPUT ANY MORE
mat[:3,1]
# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW
# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T
# BE ABLE TO SEE THE OUTPUT ANY MORE
mat[-1,:]
# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW
# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T
# BE ABLE TO SEE THE OUTPUT ANY MORE
mat[-2:,:]
###Output
_____no_output_____
###Markdown
Now do the following Get the sum of all the values in mat
###Code
mat.sum()
###Output
_____no_output_____
###Markdown
Get the standard deviation of the values in mat
###Code
mat.std()
###Output
_____no_output_____
###Markdown
Get the sum of all the columns in mat
###Code
mat.sum(axis=0)
###Output
_____no_output_____ |
tests/wad_data_inference.ipynb | ###Markdown
Configuration && Dataset
###Code
from .context.project import wad_data
# WAD Configuration
class WADInferenceConfig(wad_data.WADConfig):
BATCH_SIZE = 1
IMAGES_PER_GPU = 1
DETECTION_MIN_CONFIDENCE = 0
wad_config = WADInferenceConfig()
# Dataset
dataset = wad_data.WADDataset()
dataset.load_data_from_file(os.path.join(ROOT_DIR, 'project/last_run_validation.pkl'))
dataset.root_dir = os.path.join(DATA_DIR, 'train')
dataset.prepare()
print('Number of Images in Dataset: {}'.format(dataset.num_images))
###Output
_____no_output_____
###Markdown
Load Model
###Code
MODEL_WEIGHTS_PATH = os.path.join(LOGS_DIR, 'wad20180621T1404/mask_rcnn_wad_0001.h5')
# Create model in inference mode
model = modellib.MaskRCNN(mode="inference", config=wad_config, model_dir=LOGS_DIR)
model.load_weights(MODEL_WEIGHTS_PATH, by_name=True)
###Output
_____no_output_____
###Markdown
Inference
###Code
# Load a random image from the dataset
image_id = np.random.randint(0, dataset.num_images)
image = dataset.load_image(image_id)
gt_masks = dataset.load_mask(image_id)
print('Running detection on image {} (filename: {})'.format(image_id, dataset.image_info[image_id]['path']))
results = model.detect([image], verbose=1)[0]
visualize.display_instances(image, results['rois'], results['masks'], results['class_ids'],
wad_data.index_to_classes, results['scores'])
# Calculate Average Precision (AP) for each image
APs = []
for image_id in range(dataset.num_images):
# Load image and ground truth data
image, image_meta, gt_class_id, gt_bbox, gt_mask =\
modellib.load_image_gt(dataset, wad_config,
image_id, use_mini_mask=False)
molded_images = np.expand_dims(modellib.mold_image(image, wad_config), 0)
# Run object detection
results = model.detect([image], verbose=0)
r = results[0]
# Compute AP
AP, precisions, recalls, overlaps =\
utils.compute_ap(gt_bbox, gt_class_id, gt_mask,
r["rois"], r["class_ids"], r["scores"], r['masks'])
APs.append(AP)
print('Image: {0:3d}\tAP: {1:1.4f}'.format(image_id, AP))
# Calculate mAP score for dataset
print("mAP: ", np.mean(APs))
###Output
_____no_output_____ |
Python_lib/pinpong/pinpong-ipynb/第二部分 物联网应用/2.3 数据绘图.ipynb | ###Markdown
虚谷物联之数据绘图 1. 范例说明 数据是物联网的核心。无论将物联网技术用于哪个方面,数据分析永远是重要的一个环节。物联网数据分为复合数据和单一数据。复合数据指即彼此关联的数据,如环境数据中温度和湿度,这需要用一个消息主题(topicID)进行存储。我们普通使用的都是单一数据,即一个topicID中记录一组数据。虽然SIoT的Web管理页面中本来就能呈现这种单一数据,用掌控板或者Mind+来绘制图表,都比较容易。该案例演示如何用Python代码实现动态绘图,借助matplotlib库,可以画出很多不同样式的图表。1)数据类型:单一数据。2)涉及资源:siot服务器,siot库、matplotlib库。 3)文档写作:谢作如4)参考网站:https://github.com/vvlink/SIoT5)其他说明:本作品范例可以移植到其他平台。因为虚谷号已经默认安装了siot库,也预装了siot服务器,使用虚谷号的用户可以省略这一步。 2. 代码编写 2.1 数据采集端数据采集端指利用掌控板、Arduino或者虚谷号采集温湿度传感器的数据,发送到SIoT服务器。TopicID名称为:xzr/100传感器数据采集的方案很多,仅硬件就有很多种,代码略。具体请参考:https://github.com/vvlink/siot 2.2 数据呈现端第一步:导入库siot库是对mqtt库对二次封装,让代码更加简洁。
###Code
import siot
###Output
_____no_output_____
###Markdown
第二步:配置SIOT服务器
###Code
SERVER = "127.0.0.1" #MQTT服务器IP
CLIENT_ID = "" #在SIoT上,CLIENT_ID可以留空
IOT_pubTopic = 'xzr/100' #“topic”为“项目名称/设备名称”
IOT_UserName ='scope' #用户名
IOT_PassWord ='scope' #密码
###Output
_____no_output_____
###Markdown
说明:虚谷号可以用127.0.0.1表示本机,用户名和密码统一使用“scope”,topicid自己定义,这里用的是“xzr/100”,表示项目名称为“xzr”,设备名称为“100”。 第三步:连接SIOT服务器
###Code
siot.init(CLIENT_ID, SERVER, user=IOT_UserName, password=IOT_PassWord)
siot.connect()
###Output
_____no_output_____
###Markdown
第四步:设置绘图函数因为在jupyter上运行,为了能够动态显示图表,特意加了“display.clear_output(wait=True)”,如果直接运行.py文件,请删除如下几句: %matplotlib inline from IPython import display display.clear_output(wait=True)修改“plt.plot”中的参数'r--',就可以得到不同颜色和类型的线条。其中第一个字母表示颜色,如“r”为红色,“b”为蓝色等。后面的字符表示线型。如: - 'r--':红色的虚线 - 'bs':蓝色的方块 - 'g^':绿色的三角形 - ……
###Code
import matplotlib.pyplot as plt
%matplotlib inline
from IPython import display
x,p1=[],[]
i=0
w=20 #设置数据的长度
def draw(v1):
global x,i,p1
i=i+1
x.append(i)
p1.append(v1)
# 当数据太多了开始删除,避免图表越来越小
if len(x)>w:
x.pop(0)
p1.pop(0)
fig = plt.figure()
plt.plot(x,p1,'r--')
display.clear_output(wait=True)
plt.show()
###Output
_____no_output_____
###Markdown
第五步:订阅消息“siot.subscribe(IOT_pubTopic, sub_cb)”中,“sub_cb”是回调函数名称。当“siot”对象收到一次消息,就会执行一次回调函数。在回调函数中调用绘图函数。**需要注意的是,回调函数中如果存在代码错误,Python是不会输出的。这对代码调试带来了一定的难度。**
###Code
def sub_cb(client, userdata, msg):
print("\nTopic:" + str(msg.topic) + " Message:" + str(msg.payload))
# msg.payload是bytes类型,要转换
s=float(msg.payload)
draw(s)
siot.subscribe(IOT_pubTopic, sub_cb)
siot.loop()
###Output
_____no_output_____ |
Models/model_v1/Model_v1_0.ipynb | ###Markdown
Include libraries and load pretrained weak models
###Code
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
import pickle
import numpy as np
import sys
import sklearn.svm.classes
import pandas as pd
import os
#path constants
train_path = '../../data/train'
test_path = '../../data/test'
#type constants
vehicle_types = ['ZVe44', 'ZV573', 'ZV63d', 'ZVfd4', 'ZVa9c', 'ZVa78', 'ZV252']
#two label dataframes
train_label_df = pd.read_csv(train_path + '/train_label.csv', delimiter = ',', encoding = 'utf-8')
test_label_df = pd.read_csv(test_path + '/test_label.csv', delimiter = ',', encoding = 'utf-8')
sys.path.append(r'D:/ProgramData/Anaconda3/Lib/site-packages/sklearn/svm/')
cluster_n = 50
saved_model_path = '../SVM/ZVe44/'
svm_list = list()
for i in range(cluster_n):
print(saved_model_path +'trained'+str(i)+'.pkl')
with open(saved_model_path +'trained'+str(i)+'.pkl', 'rb') as file:
pickle_model = pickle.load(file)
svm_list.append(pickle_model)
model = svm_list[0]
print(model)
print(model.support_vectors_)
###Output
SVC(C=0.9, break_ties=False, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape='ovr', degree=3, gamma='scale', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
[[0.09101846 0.1039147 0.11444814 ... 0. 0. 1. ]
[0.04438014 0.07589779 0.07438593 ... 0. 0. 1. ]
[0.04607944 0.08558117 0.10634989 ... 0. 0. 1. ]
...
[0.03700318 0.08280045 0.08988523 ... 0. 0. 1. ]
[0.03496786 0.08425569 0.07320605 ... 0. 0. 1. ]
[0.04344518 0.07684005 0.0637134 ... 0. 0. 1. ]]
###Markdown
load train data and labels and generate pooling score tensors
###Code
def getLabel(filename, label_df):
idx = label_df.loc[label_df['sample_file_name'] == filename]
return idx.iloc[0]['label']
def feature_tensor_gen(path, label_df, model_list):
#path: train_path or test_path
#vehicle_type: one string element under vehicle_types = ['ZVe44', 'ZV573', 'ZV63d', 'ZVfd4', 'ZVa9c', 'ZVa78', 'ZV252']
#these are variables to calculate traversing progress (DO NOT CHANGE)
counts_per_percent = int(len(os.listdir(path)) / 100)
percentage_completion = 0
counter = 0
#pooling result from 50 weak learners then concatenated with the label
feature_tensor = np.empty((0, cluster_n+1))
for file in os.listdir(path):
if file == 'clustered':
continue
sample_df = pd.read_csv(path + '/' + file, delimiter = ',', encoding = 'utf-8')
# NO NEED TO CHANGE ANYTHING ABOVE
# --------------------------------------------------------------------------
label = getLabel(file, label_df)
np_arr = sample_df.to_numpy()
feature_vector = list()
if np_arr.shape[0] == 0:
continue
for i in range(cluster_n):
input_vector = np_arr[np_arr[:,-1] == i]
if input_vector.shape[0] == 0:
feature_vector.append(0.5)
continue
model = model_list[i]
pooling_score = np.average(model.predict(input_vector[:,:-1]))
feature_vector.append(pooling_score)
feature_vector.append(label)
feature_vector = np.array(feature_vector)
feature_tensor = np.append(feature_tensor, [feature_vector], axis=0)
# --------------------------------------------------------------------------
# NO NEED TO CHANGE ANYTHING BELOW
#belows are to show traversing progress (DO NOT CHANGE)
counter += 1
if counter == counts_per_percent:
counter = 0
percentage_completion += 1
print('traversing files under', path, ':', percentage_completion, "%", end="\r", flush=True)
return feature_tensor
csv_path = '../../cleaned_data/train/ZVe44/clustered/'
feature_tensor = feature_tensor_gen(csv_path, train_label_df, svm_list)
feature_tensor.shape
x = feature_tensor[:, 0:50]
y = feature_tensor[:,50]
print(feature_tensor.shape)
print(x.shape)
print(y.shape)
np.savetxt("feature_tensor.csv", feature_tensor, delimiter=",")
###Output
(9335, 51)
(9335, 50)
(9335,)
###Markdown
Logistic Regression Kernel to convert pooling scores into label
###Code
lrm = LogisticRegression(max_iter = 1000000, penalty = 'none', tol = 1e-10)
lrm.fit(x, y)
###Output
_____no_output_____
###Markdown
Performance analysis
###Code
y_hat = lrm.predict(x)
from sklearn.metrics import accuracy_score
accuracy_score(y, y_hat)
from sklearn import metrics
fpr, tpr, thresholds = metrics.roc_curve(y, y_hat, pos_label=1)
metrics.auc(fpr, tpr)
label = y
label.shape
label = label.reshape((-1, 1))
print(label.shape)
###Output
(9335, 1)
|
docs/level1/dsdot.ipynb | ###Markdown
`dsdot(N, SX, INCX, SY, INCY)`Computes the dot product of a vector $\mathbf{x}$ and a vector $\mathbf{y}$.Operates on single-precision real valued arrays.Input vector $\mathbf{x}$ is represented as a [strided array](../strided_arrays.ipynb) `SX`, spaced by `INCX`.Input vector $\mathbf{y}$ is represented as a [strided array](../strided_arrays.ipynb) `SY`, spaced by `INCY`.Both $\mathbf{x}$ and $\mathbf{y}$ are of size `N`. Example usage
###Code
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.abspath(''), "..", "..")))
import numpy as np
from pyblas.level1 import dsdot
x = np.array([1, 2, 3], dtype=np.single)
y = np.array([6, 7, 8], dtype=np.single)
N = len(x)
incx = 1
incy = 1
dsdot(N, x, incx, y, incy)
###Output
_____no_output_____
###Markdown
Docstring
###Code
help(dsdot)
###Output
Help on function dsdot in module pyblas.level1.dsdot:
dsdot(N, SX, INCX, SY, INCY)
Computes the dot-product of a vector x and a vector y.
Parameters
----------
N : int
Number of elements in input vectors
SX : numpy.ndarray
A single precision real array, dimension (1 + (`N` - 1)*abs(`INCX`))
INCX : int
Storage spacing between elements of `SX`
SY : numpy.ndarray
A single precision real array, dimension (1 + (`N` - 1)*abs(`INCY`))
INCY : int
Storage spacing between elements of `SY`
Returns
-------
numpy.double
See Also
--------
sdot : Single-precision real dot product
sdsdot : Single-precision real dot product (computed in double precision, returned as single precision)
ddot : Double-precision real dot product
cdotu : Single-precision complex dot product
cdotc : Single-precision complex conjugate dot product
zdotu : Double-precision complex dot product
zdotc : Double-precision complex conjugate dot product
Notes
-----
Online PyBLAS documentation: https://nbviewer.jupyter.org/github/timleslie/pyblas/blob/main/docs/dsdot.ipynb
Reference BLAS documentation: https://github.com/Reference-LAPACK/lapack/blob/v3.9.0/BLAS/SRC/dsdot.f
Examples
--------
>>> x = np.array([1, 2, 3], dtype=np.single)
>>> y = np.array([6, 7, 8], dtype=np.single)
>>> N = len(x)
>>> incx = 1
>>> incy = 1
>>> dsdot(N, x, incx, y, incy)
44.0
###Markdown
Source code
###Code
dsdot??
###Output
_____no_output_____ |
3_backprop-example.ipynb | ###Markdown
Backpropagation, by exampleFull explanation available (link removed due to anonymization)Let's work this out for our small network:
###Code
x1, x2 = 3., 1.
w11, w21 = 6., -2.
w12, w22 = -3., 5.
h1_in = w11*x1 + w21*x2
h2_in = w12*x1 + w22*x2
print(h1_in, h2_in)
h1_out, h2_out = sigmoid(h1_in), sigmoid(h2_in)
print(h1_out, h2_out)
# next layer
print("------")
v11, v21 = 1., 0.25
v12, v22 = -2., 2
y1_in = v11*h1_out + v21*h2_out
y2_in = v12*h1_out + v22*h2_out
print(y1_in, y2_in)
y1_out, y2_out = sigmoid(y1_in), sigmoid(y2_in)
print(y1_out, y2_out)
###Output
16.0 -4.0
0.9999998874648379 0.01798620996209156
------
1.0044964399553609 -1.9640273550054927
0.7319417133694889 0.12303185591001443
###Markdown
The network reached `(0.73, 0.12)` whereas it should have produced `(1, 0)`. Let's zoom in on the last layer first.
###Code
t1, t2 = 1., 0.
e1, e2 = (y1_out-t1), (y2_out-t2)
print(e1, e2)
grad_y1_out = 2*e1
grad_y2_out = 2*e2
print(grad_y1_out, grad_y2_out)
# backprop through sigmoid, simply multiply by sigmoid(z) * (1-sigmoid(z))
grad_y1_in = (y1_out * (1-y1_out)) * grad_y1_out
grad_y2_in = (y2_out * (1-y2_out)) * grad_y2_out
print(grad_y1_in, grad_y2_in)
###Output
-0.10518770232556676 0.026549048699963138
###Markdown
That concludes the output units: Next, we go for the weights, (here weight `v21` is highlighted)
###Code
grad_v21 = grad_y1_in * h2_out
grad_v22 = grad_y2_in * h2_out
print(grad_v21, grad_v22)
grad_v11 = grad_y1_in * h1_out
grad_v12 = grad_y2_in * h1_out
print(grad_v11, grad_v12)
###Output
-0.0018919280994576303 0.0004775167642113309
-0.10518769048825163 0.02654904571226164
###Markdown
And now to the hidden outputs
###Code
grad_h1_out = grad_y1_in*v11 + grad_y2_in*v12
grad_h2_out = grad_y1_in*v21 + grad_y2_in*v22
print(grad_h1_out, grad_h2_out)
###Output
-0.15828579972549303 0.026801171818534586
###Markdown
We are now done with the last layer and can proceed with the first layer.
###Code
# backprop through sigmoid, simply multiply by sigmoid(z) * (1-sigmoid(z))
grad_h1_in = (h1_out * (1-h1_out)) * grad_h1_out
grad_h2_in = (h2_out * (1-h2_out)) * grad_h2_out
print(grad_h1_in, grad_h2_in)
print("----")
# get the gradients for the weights
grad_w21 = grad_h1_in * x2
grad_w22 = grad_h2_in * x2
print(grad_w21, grad_w22)
grad_w11 = grad_h1_in * x1
grad_w12 = grad_h2_in * x1
print(grad_w11, grad_w12)
# get the gradients for the inputs (could be ignored in this case)
grad_x1 = grad_h1_in*w11 + grad_h2_in*w12
grad_x2 = grad_h1_in*w21 + grad_h2_in*w22
print(grad_x1, grad_x2)
###Output
-1.7812716122177433e-08 0.0004733812240027136
----
-1.7812716122177433e-08 0.0004733812240027136
-5.34381483665323e-08 0.0014201436720081408
-0.0014202505483048738 0.0023669417454458123
###Markdown
Now with an autodiff frameworkFortunately, we can hide away almost all of the gradient calculation behind differentiable modules (i.e., `classes`). That way, we only need to define the forward pass and a framework such as PyTorch or TensorFlow can work out the backward pass automatically.For now, we'll work with the framework described in [this post](https://alexander-schiendorfer.github.io/2020/02/16/automatic-differentiation.html) since we can inspect every line of code if need be.
###Code
class CompNode:
def __init__(self, tape):
# make sure that the gradient tape knows us
tape.add(self)
self.output = 0
# perform the intended operation
# and store the result in self.output
def forward(self):
pass
# assume that self.gradient has all the information
# from outgoing nodes prior to calling backward
# -> perform the local gradient step with respect to inputs
def backward(self):
pass
# needed to be initialized to 0
def set_gradient(self, gradient):
self.gradient = gradient
# receive gradients from downstream nodes
def add_gradient(self, gradient):
self.gradient += gradient
class ConstantNode(CompNode):
def __init__(self, value, tape):
self.value = value
super().__init__(tape)
def forward(self):
self.output = self.value
def backward(self):
# nothing to do here
pass
class Multiply(CompNode):
def __init__(self, left : CompNode, right : CompNode, tape):
self.left = left
self.right = right
super().__init__(tape)
def forward(self):
self.output = self.left.output * self.right.output
# has to know how to locally differentiate multiplication
def backward(self):
self.left.add_gradient(self.right.output * self.gradient)
self.right.add_gradient(self.left.output * self.gradient)
class Tape:
def __init__(self):
self.computations = []
def add(self, compNode : CompNode):
self.computations.append(compNode)
def forward(self):
for compNode in self.computations:
compNode.forward()
def backward(self):
# first initialize all gradients to zero
for compNode in self.computations:
compNode.set_gradient(0)
# we need to invert the order
self.computations.reverse()
# last node gets a default value of one for the gradient
self.computations[0].set_gradient(1)
for compNode in self.computations:
compNode.backward()
class Square(CompNode):
def __init__(self, x : CompNode, tape : Tape):
self.x = x
super().__init__(tape)
def forward(self):
self.output = self.x.output**2
# has to know how to locally differentiate x^2
def backward(self):
self.x.add_gradient( (2*self.x.output) * self.gradient)
# first, we need new functions for inverting a node's output, the sigmoid, and an Add operation
class Invert(CompNode):
def __init__(self, x : CompNode, tape : Tape):
self.x = x
super().__init__(tape)
def forward(self):
self.output = (-1)*self.x.output
# has to know how to locally differentiate x * (-1)
def backward(self):
self.x.add_gradient( (-1) * self.gradient)
class Add(CompNode):
def __init__(self, left : CompNode, right : CompNode, tape):
self.left = left
self.right = right
super().__init__(tape)
def forward(self):
self.output = self.left.output + self.right.output
# has to know how to locally differentiate addition (SPOILER: it just distributes its incoming gradient)
# d (l + r) / d l = 1
# d (l + r) / d r = 1
def backward(self):
self.left.add_gradient(self.gradient)
self.right.add_gradient(self.gradient)
class Sigmoid(CompNode):
def __init__(self, x : CompNode, tape : Tape):
self.x = x
super().__init__(tape)
def forward(self):
self.output = 1. / (1. + np.exp(-self.x.output))
# has to know how to locally differentiate sigmoid (which is easy, given the output)
# d sigmoid(x) / d x = sigmoid(x)*(1-sigmoid(x))
def backward(self):
local_gradient = self.output * (1. - self.output)
self.x.add_gradient( local_gradient * self.gradient)
###Output
_____no_output_____
###Markdown
Now we just need to build the forward calculations using this framework
###Code
gt = Tape()
# inputs, targets, and weights are our starting
# points
x1 = ConstantNode(3.,gt)
x2 = ConstantNode(1.,gt)
w11, w21 = ConstantNode(6.,gt), ConstantNode(-2.,gt)
w12, w22 = ConstantNode(-3.,gt), ConstantNode(5.,gt)
v11, v21 = ConstantNode(1.,gt), ConstantNode(0.25,gt)
v12, v22 = ConstantNode(-2.,gt), ConstantNode(2.,gt)
t1 = ConstantNode(1.,gt)
t2 = ConstantNode(0.,gt)
# calculating the hidden layer
h1_in = Add(Multiply(x1, w11, gt), Multiply(x2, w21, gt), gt)
h2_in = Add(Multiply(x1, w12, gt), Multiply(x2, w22, gt), gt)
h1, h2 = Sigmoid(h1_in, gt), Sigmoid(h2_in, gt)
# calculating the output layer
y1_in = Add(Multiply(h1, v11, gt), Multiply(h2, v21, gt), gt)
y2_in = Add(Multiply(h1, v12, gt), Multiply(h2, v22, gt), gt)
y1, y2 = Sigmoid(y1_in, gt), Sigmoid(y2_in, gt)
t1_inv = Invert(t1, gt)
t2_inv = Invert(t2, gt)
e1 = Add(y1, t1_inv, gt)
e2 = Add(y2, t2_inv, gt)
l = Add(Square(e1, gt), Square(e2,gt), gt)
gt.forward()
# now we can just play it backwards and inspect the results
gt.backward()
print("First layer gradients by framework")
print(w11.gradient, w12.gradient)
print(w21.gradient, w22.gradient)
print("--")
print("First layer gradients manually")
print(grad_w11, grad_w12)
print(grad_w21, grad_w22)
###Output
First layer gradients by framework
-5.34381483665323e-08 0.0014201436720081408
-1.7812716122177433e-08 0.0004733812240027136
--
First layer gradients manually
-5.34381483665323e-08 0.0014201436720081408
-1.7812716122177433e-08 0.0004733812240027136
###Markdown
Now, let's work out that example using PyTorch
###Code
import torch
x1 = torch.tensor(3., requires_grad=False)
x2 = torch.tensor(1., requires_grad=False)
w11 = torch.tensor(6., requires_grad=True)
w21 = torch.tensor(-2., requires_grad=True)
w12 = torch.tensor(-3., requires_grad=True)
w22 = torch.tensor(5., requires_grad=True)
v11 = torch.tensor(1., requires_grad=True)
v21 = torch.tensor(0.25, requires_grad=True)
v12 = torch.tensor(-2., requires_grad=True)
v22 = torch.tensor(2., requires_grad=True)
t1 = torch.tensor(1., requires_grad=False)
t2 = torch.tensor(0., requires_grad=False)
# calculating the hidden layer
h1 = torch.sigmoid(w11*x1 + w21*x2)
h2 = torch.sigmoid(w12*x1 + w22*x2)
# calculating the output layer
y1 = torch.sigmoid(v11*h1 + v21*h2)
y2 = torch.sigmoid(v12*h1 + v22*h2)
e1 = y1 - t1
e2 = y2 - t2
# the loss function
l = e1**2 + e2**2
l.backward()
print("First layer gradients by framework")
print(w11.grad, w12.grad)
print(w21.grad, w22.grad)
print("--")
print("First layer gradients manually")
print(grad_w11, grad_w12)
print(grad_w21, grad_w22)
# with higher precision
import torch
x1 = torch.tensor(3., requires_grad=False,dtype=torch.float64)
x2 = torch.tensor(1., requires_grad=False,dtype=torch.float64)
w11 = torch.tensor(6., requires_grad=True,dtype=torch.float64)
w21 = torch.tensor(-2., requires_grad=True,dtype=torch.float64)
w12 = torch.tensor(-3., requires_grad=True,dtype=torch.float64)
w22 = torch.tensor(5., requires_grad=True,dtype=torch.float64)
v11 = torch.tensor(1., requires_grad=True,dtype=torch.float64)
v21 = torch.tensor(0.25, requires_grad=True,dtype=torch.float64)
v12 = torch.tensor(-2., requires_grad=True,dtype=torch.float64)
v22 = torch.tensor(2., requires_grad=True,dtype=torch.float64)
t1 = torch.tensor(1., requires_grad=False,dtype=torch.float64)
t2 = torch.tensor(0., requires_grad=False,dtype=torch.float64)
# calculating the hidden layer
h1 = torch.sigmoid(w11*x1 + w21*x2)
h2 = torch.sigmoid(w12*x1 + w22*x2)
# calculating the output layer
y1 = torch.sigmoid(v11*h1 + v21*h2)
y2 = torch.sigmoid(v12*h1 + v22*h2)
e1 = y1 - t1
e2 = y2 - t2
# the loss function
l = e1**2 + e2**2
l.backward()
print("First layer gradients by framework")
print(w11.grad, w12.grad)
print(w21.grad, w22.grad)
print("--")
print("First layer gradients manually")
print(grad_w11, grad_w12)
print(grad_w21, grad_w22)
###Output
First layer gradients by framework
tensor(-5.3438e-08, dtype=torch.float64) tensor(0.0014, dtype=torch.float64)
tensor(-1.7813e-08, dtype=torch.float64) tensor(0.0005, dtype=torch.float64)
--
First layer gradients manually
-5.34381483665323e-08 0.0014201436720081408
-1.7812716122177433e-08 0.0004733812240027136
###Markdown
A vectorized implementation Where $X$ contains a row for every training instance, $W$ is a weight matrix, and activations are applied componentwise.
###Code
# first, the training data
X = np.array( [[3., 1. ]])
T = np.array( [[1., 0. ]])
# then the weights
W = np.array([[6., -3.], [-2., 5.]])
V = np.array([[1., -2.], [0.25, 2.]])
# now the forward pass
H_in = np.dot(X,W)
H = sigmoid(H_in)
# ----
Y_in = np.dot(H,V)
Y = sigmoid(Y_in)
print(Y)
# now for the loss function per training instance
# simply apply componentwise subtraction
E = Y - T
# square each term
E_sq = E**2
# sum up per row (keep dimensions as they are)
L = np.sum(E_sq, axis=1, keepdims=True)
print(L)
grad_Y = 2*E
grad_Y_in = (Y) * (1-Y) * grad_Y
print(grad_Y_in)
###Output
[[-0.1051877 0.02654905]]
###Markdown
now for the weights: Let's generalize this to get an outer product:$$\frac{\partial L}{ \partial v_{i,j}} = \frac{\partial L}{\partial y_j^{(\mathit{in})}} \cdot \underbrace{ \frac{\partial y_j^{(\mathit{in})}}{\partial v_{i,j}} }_{h_i}$$
###Code
grad_V = np.dot(H.T, grad_Y_in)
print(grad_V)
###Output
[[-0.10518769 0.02654905]
[-0.00189193 0.00047752]]
###Markdown
And now again back to the hidden outputs in vector notation
###Code
grad_H = np.dot(grad_Y_in, V.T)
print(grad_H)
# now on to the hidden layer
grad_H_in = (H * (1.-H))*grad_H # sigmoid
grad_W = np.dot(X.T, grad_H_in) # outer product
grad_X = np.dot(grad_H_in, W.T) # not really necessary
print(grad_W)
###Output
[[-5.34381484e-08 1.42014367e-03]
[-1.78127161e-08 4.73381224e-04]]
###Markdown
A Neural Network classTime to wrap this all up into a nice class that performs forward and backward pass and can be used for training.
###Code
import numpy as np
def sigmoid(z):
return 1./(1. + np.exp(-z))
class NeuralNetwork:
def __init__(self, input_dim=2, hidden_dim=2, output_dim=2):
self.W = 0.1 * np.random.rand(input_dim, hidden_dim)
self.V = 0.1 * np.random.rand(hidden_dim, output_dim)
# expects X to be a (n X input_dim) matrix
def forward(self, X):
self.X = X # keep for backward pass
self.H_in = np.dot(X, self.W)
self.H = sigmoid(self.H_in)
# ----
self.Y_in = np.dot(self.H, self.V)
self.Y = sigmoid(self.Y_in)
return self.Y
# expects T to be a (n X output_dim) matrix
def backward(self, T):
E = self.Y - T
E_sq = E**2
self.L = np.sum(E_sq, axis=1, keepdims=True)
grad_Y = 2*E
# -----
grad_Y_in = (self.Y) * (1-self.Y) * grad_Y # sigmoid
grad_V = np.dot(self.H.T, grad_Y_in) # outer product
grad_H = np.dot(grad_Y_in, self.V.T)
# -----
grad_H_in = (self.H * (1.-self.H))*grad_H # sigmoid
grad_W = np.dot(self.X.T, grad_H_in) # outer product
return grad_W, grad_V
net = NeuralNetwork()
net.W = W
net.V = V
net.forward(X)
g_W, g_V = net.backward(T)
print(g_W)
print(g_V)
###Output
[[-5.34381484e-08 1.42014367e-03]
[-1.78127161e-08 4.73381224e-04]]
[[-0.10518769 0.02654905]
[-0.00189193 0.00047752]]
###Markdown
Applying gradients for weight updates Let's now change some weights to improve the network's performance! Recall the training data
###Code
net = NeuralNetwork()
net.W = W.copy()
net.V = V.copy()
# first training instance
X = np.array( [[3., 1. ]])
T = np.array( [[1., 0. ]])
net.forward(X)
g_W_1, g_V_1 = net.backward(T)
# initial loss
init_loss_1 = net.L
# second training instance
X = np.array( [[-1., 4. ]])
T = np.array( [[0., 1. ]])
net.forward(X)
g_W_2, g_V_2 = net.backward(T)
# initial loss
init_loss_2 = net.L
g_W, g_V = g_W_1 + g_W_2, g_V_1 + g_V_2
# update weights
alpha = 0.5
net.W -= alpha * g_W
net.V -= alpha * g_V
print(net.W)
print(net.V)
X = np.array( [[3., 1. ]])
T = np.array( [[1., 0. ]])
y = net.forward(X)
print(y)
net.backward(T)
print("Old loss for instance #1:", init_loss_1)
print("New loss for instance #1:", net.L)
new_loss_1 = net.L
X = np.array( [[-1., 4. ]])
T = np.array( [[0., 1. ]])
y = net.forward(X)
print(y)
net.backward(T)
print("Old loss for instance #2:", init_loss_2)
print("New loss for instance #2:", net.L)
new_loss_2 = net.L
# vanishing gradients
print(grad_W)
print(init_loss_1+init_loss_2)
print(new_loss_1+new_loss_2)
# iterate for 200 epochs
train_X = np.array( [[3., 1. ], [-1., 4.]])
train_T = np.array( [[1., 0. ], [0., 1.]])
n_epochs = 200
alpha = 0.5
for n in range(n_epochs):
# grad_W
grad_W = np.zeros_like(net.W)
grad_V = np.zeros_like(net.V)
for i in range(train_X.shape[0]):
X = train_X[i, :].reshape(1,-1)
T = train_T[i, :].reshape(1,-1)
net.forward(X)
grad_W_i, grad_V_i = net.backward(T)
grad_W += grad_W_i
grad_V += grad_V_i
# apply gradient
net.W -= alpha * grad_W
net.V -= alpha * grad_V
# inspect the trained net's outputs
print(net.forward(np.array([3.,1.])))
print(net.forward(np.array([-1.,4.])))
print(net.W)
print(net.V)
###Output
_____no_output_____ |
Preprocessing/Pre-processing_byChapter.ipynb | ###Markdown
Pre-processing TextsThis notebook is used to cleaned the text and put into chunks based on the model requirements.Current process:* Clean off top and bottom unnecessary words e.g. table of content* Put into rows with Author name as csv
###Code
import nltk
import numpy as np
import random
import pandas as pd
import re
from collections import OrderedDict, defaultdict
nltk.download('punkt')
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
###Output
_____no_output_____
###Markdown
Support Functions
###Code
def split_by_chapter(text):
thisdict = defaultdict()
# ALERT - manual check if there is 3 digit chapters
# split by chapter with one or two digits number with newline
split_text = re.split("chapter (\d{1,2})+", text, flags=re.IGNORECASE)
i = 0
for s in split_text:
if i%2 ==0 or i==0:
Chapter = "chapter_" + str(int(i/2+1))
thisdict[Chapter] = s
i+=1
return thisdict
###Output
_____no_output_____
###Markdown
Read Data
###Code
# ALERT - manual check your file location
f = open("../Dataset/original/Jane/Pride_and_Prejudice_Jane_Austen.txt", "r", encoding="utf8")
book = f.read().replace('\n', ' ').replace('_', '')
###Output
_____no_output_____
###Markdown
Clean top and botten unnecessary words
###Code
# ALERT - manual check where you want to start
#book = book.split('chapter 1 ')[1]
book = book.split('Chapter 1 ')[1]
###Output
_____no_output_____
###Markdown
Clean words, space, newline
###Code
book = re.sub('([.,!?()""])', r' \1 ', book)
book_dict = split_by_chapter(book)
for key,item in book_dict.items():
## remove newline and space at beginning and end
book_dict[key] = re.sub(' +', ' ', item)
book_dict.keys()
# ALERT - manual check quickly to scan through
book_dict['chapter_18']
import csv
Author = "Jane_Austen"
Book = "Pride_and_Prejudice"
csvname = '../Dataset/cleaned/bychapter_temp/Jane/'+Author+'_'+Book+'_chapter.csv'
with open(csvname, 'w') as csv_file:
fieldnames = ['chapter','text']
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writeheader()
writer = csv.writer(csv_file)
for key, value in book_dict.items():
writer.writerow([key, value])
###Output
_____no_output_____ |
Part5Improve/04-kNN/07-Feature-Scaling/07-Feature-Scaling.ipynb | ###Markdown
归一化 Normallization 为什么要进行数值归一化?> 下面是肿瘤预测数据的表格,可以看出发现时间和肿瘤大小两个参数的数值不是一个量级,会导致kNN中的样本间的距离被发现时间所主导,这样是明显有失偏颇地 数据归一化的原理> 将所有的数据映射到同一尺度 数据归一化最常用的方案:最值归一化> 最值归一化:把所有的数据映射到0-1之间  最值归一化适用于有明显分界的情况,受outlier(`翻译为极端值、离群值或逸出值`)影响较大。比如收入调查样本中,如果100个人中有一个人是个亿万富翁,其他人都是穷光蛋,那么用最值归一化时其他的人都可以忽略不计了,这是不合理的,即受到了outlier的影响。所以有了更为合理的归一化方案:均值方差归一化 standardization 均值方差归一化 standardization> 把所有的数据归一到均值为0方差为1的分布中。适用于数据分布没有明显的边界,而且有可能存在极端数据值的情况
###Code
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
下面是一维数组的归一化,简单点
###Code
x = np.random.randint(0, 100, size=100) # 100个0~100的随机整数
x
(x - np.min(x)) / (np.max(x) - np.min(x)) # 最值归一化:将x的所有元素都进行了归一化
###Output
_____no_output_____
###Markdown
下面是二维数组的归一化,对每一列进行归一化即可
###Code
X= np.random.randint(0, 100, (50, 2))
X[:10] # 取出前10个数看下
X[:10, :] # 取出x的前10行
X = np.array(X, dtype=float) # 因为要归一化到0~1,所以数据类型要转换到浮点型
###Output
_____no_output_____
###Markdown
下面是对每列进行最值归一化
###Code
X[:, 0] = (X[:, 0] - np.min(X[:, 0])) / (np.max(X[:, 0]) - np.min(X[:, 0])) # 第一列归一化
X[:, 1] = (X[:, 1] - np.min(X[:, 1])) / (np.max(X[:, 1]) - np.min(X[:, 1])) # 第二列归一化。n列的话注意要循环一下
X[:10]
plt.scatter(X[:, 0], X[:, 1])
plt.show()
# 从下面图中可以看到所有的点都被归一到0~1范围内了
np.mean(X[:, 0]) # 接近0.5
np.std(X[:, 0])
np.mean(X[:, 1]) # 接近0.5
np.std(X[:, 1])
np.max(X[:, 0])
np.min(X[:, 0])
np.max(X[:, 1])
np.min(X[:, 1])
###Output
_____no_output_____
###Markdown
均值方差归一化 Standardization> 
###Code
X2 = np.random.randint(0, 100, (50, 2))
X2 = np.array(X2, dtype=float)
X2[:, 0] = (X2[:, 0] - np.mean(X2[:, 0])) / np.std(X2[:, 0]) # 第一列的均值方差归一化
X2[:, 1] = (X2[:, 1] - np.mean(X2[:, 1])) / np.std(X2[:, 1]) # 第二列的均值方差归一化
plt.scatter(X2[:, 0], X2[:, 1])
plt.show()
# 下面图可以看到均值方差归一化并不能让样本的值限制在0~1之间
np.mean(X2[:, 0]) # 无限逼近0
np.std(X2[:, 0]) # 无限逼近1
np.mean(X2[:, 1]) # 无限逼近0
np.std(X2[:, 1]) # 无限逼近1,或者就是1.满足均值是0,方差是1的均值方差归一化的条件
###Output
_____no_output_____ |
qt_lab_mms.ipynb | ###Markdown
M/M/s/$\infty$ queues**In this lab you will learn:*** How to convert M/M/S queuing equations into python functions* How to analyse M/M/s queuing systems to inform health system improvement and design.
###Code
from scipy import math
import numpy as np
import pandas as pd
###Output
_____no_output_____
###Markdown
An **M/M/s** system is a queuing process having Poisson arrival pattern, $s$ servers with $s$ i.i.d expeonential service times. Service times do not depend on the state of the system. The system (i.e. queue + service) has infinite capacity and a FIFO queue discipline. Traffic Intensity\begin{equation*}\rho = \frac{\lambda}{s\mu}\label{eq:rho} \tag{1}\end{equation*} Inference about the number of patients in the system\begin{equation*}P_0 = \left[ \sum_{n=0}^{s-1} \frac{\left(\lambda/ \mu \right)^n}{n!} + \frac{\left( \lambda / \mu \right)^s}{s!\left(1-\rho\right)} \right]^{-1}\label{eq:p0} \tag{2}\end{equation*}\begin{equation} P_n = \left\{ \begin{array}{l} \dfrac{\left( \lambda / \mu \right)^n}{n!}p_0, \>\>\>\> n \leq s\\ \\ \dfrac{\left( \lambda / \mu \right)^n}{s!s^{n-s}}p_0, \>\>\>\> n > s \end{array} \right.\label{eq:pn} \tag{3} \end{equation} Expected number of customers in the queue for service\begin{equation}L_q = \frac{p_0\left(\lambda / \mu \right)^s \rho}{s!\left(1 - \rho\right)}\tag{4}\end{equation} Little's Formula\begin{equation}L_s = \lambda W_s \\ L_q = \lambda W_q\tag{5a, 5b}\end{equation}\begin{equation*} W_s = W_q + \dfrac{1}{\mu} \tag{6}\end{equation*}\begin{equation*} L_s = L_q + \dfrac{\lambda}{\mu} \tag{7}\end{equation*} Hospital Pharmacy exampleDuring the afternoon, a pharmacy based in a large hospital has 3 trained pharmacists on duty to check and fullfill patient prescriptions for drugs to take home with them at discharge. They are are able to handle 15 transactions per hour. The service times are exponentially distributed. During this busy period, prescriptions arrive at the pharmacy according to a Possion process, at a mean rate of 40 per hour. **Questions**1. What is the probability that there are more than 3 prescriptions in the pharmacy at any one time2. Calculate the expected number of drug prescriptions waiting to be fulfilled3. Calcluate the expected number of drug prescriptions in the system4. Calculate the expected prescription turnaround time Example Solution:This is a M/M/3 system with $\lambda=40$ and $\mu = 15$ Is the system in control?Let's first check that steady state conditions hold by calculating the traffic intensity $\rho$.\begin{equation}\rho = \frac{\lambda}{s\mu}\label{eq:rho} \tag{1}\end{equation}Steady state conditions hold if $\rho < 1$
###Code
def traffic_intensity(_lambda, mu, s):
'''
calculate the traffic intensity (server utilisation)
of an M/M/s queue
'''
return _lambda / (s * mu)
#calculate traffic intensity
LAMBDA = 40
MU = 15
S = 3
rho = traffic_intensity(LAMBDA, S, MU)
rho
###Output
_____no_output_____
###Markdown
**Conclusion**: $\rho < 1$ steady state conditions will hold. 1. Calculate the probability that there are 3 drug orders in the pharmacy at any one timeSteady state probabilities are given by\begin{equation*}P_0 = \left[ \sum_{n=0}^{s-1} \frac{\left(\lambda/ \mu \right)^n}{n!} + \frac{\left( \lambda / \mu \right)^s}{s!\left(1-\rho\right)} \right]^{-1}\label{eq:p0} \tag{2}\end{equation*}\begin{equation} P_n = \left\{ \begin{array}{l} \dfrac{\left( \lambda / \mu \right)^n}{n!}p_0, \>\>\>\> n \leq s\\ \\ \dfrac{\left( \lambda / \mu \right)^n}{s!s^{n-s}}p_0, \>\>\>\> n > s \end{array} \right.\label{eq:pn} \tag{3} \end{equation}
###Code
def prob_system_empty(_lambda, mu, s):
'''
The probability that a M/M/s/infinity queue is empty
'''
p0 = 0.0
rho = traffic_intensity(_lambda, mu, s)
for n in range(s):
p0 += ((_lambda / mu) ** n) / math.factorial(n)
p0 += ((_lambda / mu) ** s) / (math.factorial(s) * (1 - rho))
return p0**-1
p0 = prob_system_empty(LAMBDA, MU, S)
print(f'p0 = {p0:.2f}')
def prob_n_in_system(n, _lambda, mu, s, return_all_solutions=True):
'''
Calculate the probability that n customers
in the system (queuing + service)
Parameters:
--------
n: int,
Number of customers in the system
_lambda: float
Mean arrival rate to system
mu: float
Mean service rate
s: int
number of servers
return_all_solutions: bool, optional (default=True)
Returns all solutions for 0,1 ... n
Returns:
------
np.ndarray of solutions
'''
p0 = prob_system_empty(_lambda, mu, s)
probs = [p0]
#for n <= s
for i in range(1, min(s+1, n+1)):
pn = (((_lambda / mu)**i) / math.factorial(i)) * p0
probs.append(pn)
#for n > s
for i in range(s+1, n+1):
pn = (((_lambda / mu)**i) / (math.factorial(s) * (s**(n-s)))) * p0
probs.append(pn)
if return_all_solutions:
return np.array(probs)
else:
return probs[:-1]
prob = prob_n_in_system(3, LAMBDA, MU, S)
#returns: [p0, p1, p2, p3] => probabilities of 3 or less drug orders
prob.sum()
#prob.sum() => p(X <=3)
more_than_three = 1 - prob.sum()
print(f'P(X > 3) = {more_than_three:.2f}')
###Output
P(X > 3) = 0.71
###Markdown
2. Expected number of drug prescriptions waiting to be fullfilled$L_q$ = Expected number of customers in the queue for service\begin{equation}L_q = \frac{p_0\left(\lambda / \mu \right)^s \rho}{s!\left(1 - \rho\right)^2}\tag{4}\end{equation}
###Code
def mean_queue_length(_lambda, mu, s):
'''
Mean length of queue Lq
'''
p0 = prob_system_empty(_lambda, mu, s)
rho = traffic_intensity(_lambda, mu, s)
lq = (p0 * ((_lambda / mu)**s) * rho) / (math.factorial(s) * (1 - rho)**2)
return lq
lq = mean_queue_length(LAMBDA, MU, S)
print(f'lq = {lq:.2f}')
###Output
lq = 6.38
###Markdown
3. Expected number of drug prescriptions in the system$L_s$ = Expected number of customers in the queueWe have already calculated $L_q$ therefore we will use\begin{equation}L_s = L_q + \frac{\lambda}{\mu}\tag{5}\end{equation}
###Code
ls = lq + (LAMBDA / MU)
print(f'Ls = {ls:.2f}')
###Output
Ls = 9.05
###Markdown
4. Expected prescription turnaround timeUsing:\begin{equation}L_s = \lambda W_s\tag{5}\end{equation}\begin{equation}\frac{L_s}{\lambda} = W_s\end{equation}
###Code
ws = ls / LAMBDA
print(f'Ws = {ws:.2f}')
###Output
Ws = 0.23
###Markdown
MMsQueue ClassA somewhat cleaner way of analytic modelling of queues is to implement a class. An example implementation is below.
###Code
class MMsQueue(object):
'''
M/M/S/inf/inf/FIFO system
'''
def __init__(self, _lambda, mu, s):
'''
Constructor
Parameters:
-------
_lambda: float
The arrival rate of customers to the facility
mu: float
The service rate of the facility
s: int
The number of servers
'''
self._lambda = _lambda
self.mu = mu
self.s = s
self.rho = self._get_traffic_intensity()
#create a dict of performance metrics
#solve for L_q then use little's law to calculate remaining KPIs
self.metrics = {}
self.metrics[r'$\rho$'] = self.rho
self.metrics[r'$L_q$'] = self._get_mean_queue_length()
self.metrics[r'$L_s$'] = self.metrics[r'$L_q$'] + (_lambda / mu)
self.metrics[r'$W_s$'] = self.metrics[r'$L_s$'] / _lambda
self.metrics[r'$W_q$'] = self.metrics[r'$W_s$'] - (1 / mu)
def _get_traffic_intensity(self):
'''
calculate the traffic intensity (server utilisation)
of an M/M/s queue
'''
return self._lambda / (self.s * self.mu)
def _get_mean_queue_length(self):
'''
Mean length of queue Lq
'''
p0 = self.prob_system_empty()
lq = (p0 * ((self._lambda / self.mu)**self.s) *
self.rho) / (math.factorial(self.s) * (1 - self.rho)**2)
return lq
def prob_system_empty(self):
'''
The probability that a M/M/s/infinity queue is empty
'''
p0 = 0.0
for n in range(self.s):
p0 += ((self._lambda / self.mu) ** n) / math.factorial(n)
p0 += ((self._lambda / self.mu) ** self.s) / (math.factorial(self.s)
* (1 - self.rho))
return p0**-1
def prob_n_in_system(self, n, return_all_solutions=True, as_frame=True):
'''
Calculate the probability that n customers
in the system (queuing + service)
Parameters:
--------
n: int,
Number of customers in the system
return_all_solutions: bool, optional (default=True)
Returns all solutions for 0,1 ... n
as_frame: bool, optional (default=True)
If True, returns all solutions in a pd.DataFrame
else returns all solutions as np.ndarray
has no effect is return_all_solutions == False
Returns:
------
np.ndarray of solutions
'''
p0 = self.prob_system_empty()
probs = [p0]
#for n <= s
for i in range(1, min(self.s+1, n+1)):
pn = (((self._lambda / self.mu)**i) / math.factorial(i)) * p0
probs.append(pn)
#for n > s
for i in range(self.s+1, n+1):
pn = (((self._lambda / self.mu)**i) / (math.factorial(self.s)
* (self.s**(n-self.s)))) * p0
probs.append(pn)
if return_all_solutions:
results = np.array(probs)
if as_frame:
return pd.DataFrame(results, columns=['P(X=n)'])
else:
return results
else:
return probs[:-1]
def summary_frame(self):
'''
Return performance metrics
Returns:
---------
pd.DataFrame
'''
df = pd.Series(self.metrics).to_frame()
df.columns = ['performance']
return df
model = MMsQueue(LAMBDA, MU, S)
model.summary_frame()
model.prob_n_in_system(5)
#county hospital example
model = MMsQueue(2, 3, 2)
model.summary_frame()
model.prob_n_in_system(2)
###Output
_____no_output_____ |
nbs/99_manuscript/lvs/lv116/lv116-pathways.ipynb | ###Markdown
Description Generates manubot tables for pathways enriched (from the MultiPLIER models) given an LV name (in Settings below). Modules loading
###Code
%load_ext autoreload
%autoreload 2
import re
from pathlib import Path
import pandas as pd
from entity import Trait
import conf
###Output
_____no_output_____
###Markdown
Settings
###Code
LV_NAME = "LV116"
assert (
conf.MANUSCRIPT["BASE_DIR"] is not None
), "The manuscript directory was not configured"
OUTPUT_FILE_PATH = conf.MANUSCRIPT["CONTENT_DIR"] / "50.00.supplementary_material.md"
display(OUTPUT_FILE_PATH)
assert OUTPUT_FILE_PATH.exists()
###Output
_____no_output_____
###Markdown
Load MultiPLIER summary
###Code
multiplier_model_summary = pd.read_pickle(conf.MULTIPLIER["MODEL_SUMMARY_FILE"])
multiplier_model_summary.shape
multiplier_model_summary.head()
###Output
_____no_output_____
###Markdown
LV pathways
###Code
lv_pathways = multiplier_model_summary[
multiplier_model_summary["LV index"].isin((LV_NAME[2:],))
& (
(multiplier_model_summary["FDR"] < 0.05)
# | (multiplier_model_summary["AUC"] >= 0.75)
)
]
lv_pathways.shape
lv_pathways = lv_pathways[["pathway", "AUC", "FDR"]].sort_values("FDR")
lv_pathways = lv_pathways.assign(AUC=lv_pathways["AUC"].apply(lambda x: f"{x:.2f}"))
lv_pathways = lv_pathways.assign(FDR=lv_pathways["FDR"].apply(lambda x: f"{x:.2e}"))
lv_pathways = lv_pathways.rename(
columns={
"pathway": "Pathway",
}
)
lv_pathways.head()
###Output
_____no_output_____
###Markdown
Split names
###Code
lv_pathways["Pathway"] = lv_pathways["Pathway"].apply(lambda x: " ".join(x.split("_")))
lv_pathways.head()
###Output
_____no_output_____
###Markdown
Fill empty
###Code
if lv_pathways.shape[0] == 0:
lv_pathways.loc[0, "Pathway"] = "No pathways significantly enriched"
lv_pathways = lv_pathways.fillna("")
###Output
_____no_output_____
###Markdown
Save
###Code
# result_set is either phenomexcan or emerge
LV_FILE_MARK_TEMPLATE = "<!-- {lv}:multiplier_pathways:{position} -->"
TABLE_CAPTION = (
"Table: Pathways aligned to {lv_name} from the MultiPLIER models. {table_id}"
)
TABLE_CAPTION_ID = "#tbl:sup:multiplier_pathways:{lv_name_lower_case}"
# start
lv_file_mark_start = LV_FILE_MARK_TEMPLATE.format(lv=LV_NAME, position="start")
display(lv_file_mark_start)
# end
lv_file_mark_end = LV_FILE_MARK_TEMPLATE.format(lv=LV_NAME, position="end")
display(lv_file_mark_end)
new_content = lv_pathways.to_markdown(index=False, disable_numparse=True)
# add table caption
table_caption = TABLE_CAPTION.format(
lv_name=LV_NAME,
table_id="{" + TABLE_CAPTION_ID.format(lv_name_lower_case=LV_NAME.lower()) + "}",
)
display(table_caption)
new_content += "\n\n" + table_caption
full_new_content = (
lv_file_mark_start + "\n" + new_content.strip() + "\n" + lv_file_mark_end
)
with open(OUTPUT_FILE_PATH, "r", encoding="utf8") as f:
file_content = f.read()
new_file_content = re.sub(
lv_file_mark_start + ".*?" + lv_file_mark_end,
full_new_content,
file_content,
flags=re.DOTALL,
)
with open(OUTPUT_FILE_PATH, "w", encoding="utf8") as f:
f.write(new_file_content) # .replace("\beta", r"\beta"))
###Output
_____no_output_____ |
sample5.2_Singlemodel_starfrac.ipynb | ###Markdown
Sample 5.2 Single parameter Bayesian model---Fraction of stars in a sky field
###Code
%matplotlib inline
#posteior distribution of binomial data
import numpy as np
import scipy.stats as stats
import matplotlib.pyplot as plt
import matplotlib
import astropy.io.fits as fits
matplotlib.rc('xtick', labelsize=12)
matplotlib.rc('ytick', labelsize=12)
# star/galaxy ratio in a small field of sky
from astropy.table import Table
filename = 'sdss_field.fits'
sdss = Table.read(filename)
#the first 50 data
types = sdss["type"][0:50] #star if type =3, galaxy if type=6
Nstar = np.sum(types==3)
N = 50
print('N_star=%(d1)d, N_gal=%(d2)d\n' % {'d1':Nstar,'d2':np.sum(types==6)})
E_th_y = (Nstar+1.)/(N+2.)
print(r'First 50 sources: E(theta|y)=%(n).2f' % {'n': E_th_y})
#with uniform distribution
theta = np.arange(0,1,0.001)
p = stats.beta.pdf(theta,Nstar+1,N-Nstar+1)
fig = plt.figure(figsize=[4,4])
ax = fig.add_subplot(111)
ax.plot(theta,p,'k-')
ax.set_xlabel(r'$\theta$',fontsize=20)
# fig.show()
#with conjugate prior determined by the first 50 and use the 51:100 data
alpha = Nstar+1
beta = N-Nstar+1
types = sdss["type"][50:100]
Nstar2 = np.sum(types==3)
N2 = 50
print('N_star=%(d1)d, N_gal=%(d2)d\n' % {'d1':Nstar2,'d2':np.sum(types==6)})
E_th_y = (Nstar2+1.)/(N2+2.)
print(r'Second 50 sources: E(theta|y)=%(n).2f' % {'n': E_th_y})
#uniform prior
p2_0 = stats.beta.pdf(theta,Nstar2+1,N2-Nstar2+1)
#conjugate prior
p2 = stats.beta.pdf(theta,Nstar2+alpha,N2-Nstar2+beta)
print(r'Second 50 sources with the prior from the first 50: E(theta|y)=%(n).2f' %\
{'n': (Nstar2+alpha)/np.float(N2+alpha+beta)})
fig = plt.figure(figsize=[4,4])
ax = fig.add_subplot(111)
ax.plot(theta,p,'k--')
ax.plot(theta,p2_0,'k-')
ax.plot(theta,p2,'r-')
ax.set_xlabel(r'$\theta$',fontsize=20)
# fig.show()
#when data increases prior play less role
alpha = Nstar+1
beta = N-Nstar+1
types = sdss["type"][50:600]
Nstar2 = np.sum(types==3)
N2 = 550
print ('N_star=%(d1)d, N_gal=%(d2)d\n' % {'d1':Nstar2,'d2':np.sum(types==6)})
E_th_y = (Nstar2+1.)/(N2+2.)
print ('550 sources with conjugate prior: theta=%(d1).4f' % {'d1':E_th_y})
print ('550 sources: theta=%(d1).4f' % {'d1':Nstar2/550.})
#uniform prior
p2_0 = stats.beta.pdf(theta,Nstar2+1,N2-Nstar2+1)
#conjugate prior
p2 = stats.beta.pdf(theta,Nstar2+alpha,N2-Nstar2+beta)
fig = plt.figure(figsize=[4,4])
ax = fig.add_subplot(111)
ax.plot(theta,p,'k--')
ax.plot(theta,p2_0,'k-')
ax.plot(theta,p2,'r-')
ax.set_xlabel(r'$\theta$',fontsize=20)
# fig.show()
###Output
N_star=363, N_gal=187
550 sources with conjugate prior: theta=0.6594
550 sources: theta=0.6600
|
tutorials/crypto.ipynb | ###Markdown
IntroductionThis notebook shows how to connect to a crypto exchange and receive a trade feed from it. We then start the run using this trade feed. Besides the support for Binance (see also the other notebook), roboquant includes support for most other crypto exchanges using the XChange library. This notebook demonstrates how to use that library.> XChange is a Java library providing a streamlined API for interacting with 60+ Bitcoin and Altcoin exchanges providing a consistent interface for trading and accessing market data.
###Code
%use @http://roboquant.org/roboquant-crypto.json
Welcome()
###Output
_____no_output_____
###Markdown
Add required librariesWhen usign the XChange integration, there are a few additional steps required to use a particular exchange:1. Load the exchange specific libraries, like bitstamp in this example. You can use the **@file:DependsOn** syntax for this. Make sure the version is the same as is included with roboquant.2. Import both the roboquant packages (import org.roboquant.xchange.*) and the packages for the exchange that you want to connect to.Check the [XChange](https://github.com/knowm/XChange) repository for more details on all the available exchanges. In this example we use Bitstamp
###Code
@file:DependsOn("org.knowm.xchange:xchange-bitstamp:5.0.13")
@file:DependsOn("org.knowm.xchange:xchange-stream-bitstamp:5.0.13")
import info.bitrich.xchangestream.bitstamp.v2.BitstampStreamingExchange
import info.bitrich.xchangestream.core.StreamingExchangeFactory
###Output
_____no_output_____
###Markdown
Setup a RoboquantWe now create, as usual, the strategy we want to test. There is nothing specific to crypto trading that needs to be taken care of.
###Code
val strategy = EMACrossover.EMA_5_15
val roboquant = Roboquant(strategy, AccountSummary())
###Output
_____no_output_____
###Markdown
Define FeedNow we create an instance of the Bitstamp exchange and connect to it. Then we are ready to use it as a feed and subscribe to one or more currency pairs using the *subscribeTicker* method.
###Code
val exchange = StreamingExchangeFactory.INSTANCE.createExchange(BitstampStreamingExchange::class.java)
exchange.connect().blockingAwait()
val feed = XChangeLiveFeed(exchange)
feed.availableAssets
feed.subscribeTicker("BTC_USD")
###Output
_____no_output_____
###Markdown
Run Live TestAll that remains, is to start run to roboquant and evaluate the strategy against the feed. We'll run it for 10 minutes. If the time that is displayed looks wrong, don't forget that roboquant uses a timezone independent representation for all internal time processing and not your local timezone.
###Code
val timeframe = Timeframe.next(60.minutes)
roboquant.run(feed, timeframe)
###Output
_____no_output_____
###Markdown
DisconnectAnd we disconnect from the exchnage
###Code
exchange.disconnect().blockingAwait()
roboquant.broker.account.fullSummary()
###Output
_____no_output_____ |
2-Introductory Workshop/3-Basic-programming/1_qubits_gates_measurements_run.ipynb | ###Markdown
 Getting Started with QiskitHere, we provide an overview of working with Qiskit. The fundamental package of Qiskit is Terra that provides the basic building blocks necessary to program quantum computers. The fundamental unit of Qiskit is the [**quantum circuit**](https://en.wikipedia.org/wiki/Quantum_circuit). A basic workflow using Qiskit consists of two stages: **Build** and **Execute**. **Build** allows you to make different quantum circuits that represent the problem you are solving, and **Execute** that allows you to run them on different backends. After the jobs have been run, the data is collected and postprocessed depending on the desired output.In this notebook, we learn how to build a basic circuit We first import necessary libraries. These are tools that help you with coding your circuit in qiskit.
###Code
import numpy as np
from qiskit import *
%matplotlib inline
from qiskit.visualization import plot_histogram
###Output
_____no_output_____
###Markdown
1. Circuit Basics: Gates Building the circuitThe basic element needed for your first program is the QuantumCircuit. We begin by creating a `QuantumCircuit` comprised of one qubit and one classical bit. Why do we need a corresponding classical bit?
###Code
# Create a Quantum Circuit acting on a quantum register of one qubit
cr = ClassicalRegister(1)
qr = QuantumRegister(1)
circ = QuantumCircuit(qr,cr)
# Add an X gate on qubit 0
circ.x(qr[0])
# Add a H gate, putting this in superposition
circ.h(qr[0])
###Output
_____no_output_____
###Markdown
2. Visualize Circuit You can visualize your circuit using Qiskit `QuantumCircuit.draw()`, which plots the circuit in the form found in many textbooks.
###Code
circ.draw(output = 'mpl')
###Output
_____no_output_____
###Markdown
We can have multiple qubits too! Here is an example:
###Code
# Create a Quantum Circuit acting on a quantum register of three qubits
cr2 = ClassicalRegister(3)
qr2 = QuantumRegister(3)
circ2 = QuantumCircuit(qr2,cr2)
circ2.x(qr2[0])
circ2.h(qr2[1])
circ2.x(qr2[2])
circ2.h(qr2[2])
circ2.draw(output = 'mpl')
###Output
_____no_output_____
###Markdown
3. Your turn Question 1: Create a quantum circuit comprising of two qubits. Apply the X gate on the first qubit followed by a hadamard gate. On the second qubit apply the hadamard gate followed by the X gate Question 2: Create a quantum circuit comprising of five qubits. Apply the H gate on all the qubits. Question 3: Create a quantum circuit comprising of 4 qubits. Apply the H gate on the first qubit, X on the second qubit, two Hadamards on the third qubit and a hadamard and an X gate on the fourth qubit 4. MeasurementsThis signifies the end of your qubits journey - once a qubit is measured, it cannot be used again. The measured qubits converge to a classical value (therefore we provide two arguments to the measure function)
###Code
circ.measure(qr, cr)
circ.draw(output='mpl')
###Output
_____no_output_____
###Markdown
5. Your turnAdd measurements to all the circuits that you've built 6. Run your experiment on a simulator
###Code
# We can choose to run our circuit on the various backends available. We choose the qasm simulator
simulator = Aer.get_backend('qasm_simulator')
result = execute(circ, simulator, shots=1000).result()
counts = result.get_counts(circ)
print(counts)
###Output
{'1': 495, '0': 505}
|
analysis/spheroid/20190703-sta-titration/pipeline.ipynb | ###Markdown
STA Titration Spheroid Quantification Pipeline
###Code
# Params
exp_name = None
output_dir = None
raw_dir = None
# # Debug
# exp_name = 'sta-00.500-20um-s-XY01'
# output_dir = '/lab/data/spheroid/20190703-sta-titration/output/sta-00.500-20um-s-XY01/v00'
# raw_dir = '/lab/data/spheroid/20190703-sta-titration/raw/05uMsta-20um-grids/XY01'
for v in ['exp_name', 'output_dir', 'raw_dir']:
assert globals()[v], '"{}" must be set'.format(v)
%matplotlib inline
%load_ext dotenv
%dotenv env.sh
%run source/utils.py
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import os
import os.path as osp
import tqdm
import collections
from centrosome import propagate
from scipy import ndimage as ndi
from skimage import util, morphology, transform, exposure, filters, feature, segmentation
from skimage import io as sk_io
from cytokit import io as ck_io
from cytokit import config as ck_config
from cytokit.cytometry.cytometer import CytometerBase
from cytokit.utils import ij_utils
from cytokit.function import data as ck_data
from cytokit.function import core as ck_core
from cytokit.ops import tile_generator, cytometry, tile_crop
from cytokit.image.ops import describe
from cytokit.exec import ilastik
matplotlib.rc('image', cmap='gray')
df = pd.read_csv('experiments.csv')
df = df[df['name'] == exp_name]
assert len(df) == 1, 'Found {} rows for experiment {}'.format(len(df), exp_name)
df
cyto_dir = osp.join(output_dir, 'cytometry')
cyto_dir
ilastik_proj = os.environ['EXP_SPHEROID_ILASTIK_PRJ']
ilastik_proj
config = ck_config.load(osp.join(output_dir, 'config'))
config.register_environment()
config
###Output
_____no_output_____
###Markdown
Load All Images
###Code
tiles = get_tiles(config, raw_dir)
len(tiles)
img_mtg = ck_core.montage([t[0] for t in tiles], config)
img_mtg.shape
###Output
_____no_output_____
###Markdown
Create max-z BF for spheroid segmentation
###Code
def get_maxz_projection(img_mtg):
img = img_mtg[0, :, config.channel_names.index('BF')]
img = util.img_as_float(img)
img = util.invert(img)
img = img.max(axis=0)
# The target scale for BF segmentation is 25% of original 1440x1920 images
img = transform.rescale(img, .25, anti_aliasing=True, multichannel=False, mode='constant', preserve_range=True)
assert 0 <= img.min() <= img.max() <= 1
img = exposure.rescale_intensity(img, in_range=(0, 1), out_range=np.uint8).astype(np.uint8)
return img
img_bf_maxz = get_maxz_projection(img_mtg)
describe(img_bf_maxz)
plt.imshow(img_bf_maxz)
plt.gcf().set_size_inches(6, 12)
###Output
_____no_output_____
###Markdown
Spheroid Segmentation
###Code
def get_spheroid_probabilities(img, print_command=False, print_logs=False):
export_source = 'Probabilities'
input_path = osp.join(cyto_dir, 'ilastik', 'R001.BF.input.tif')
output_path = osp.join(cyto_dir, 'ilastik', 'R001.BF.output.tif')
os.makedirs(osp.dirname(input_path), exist_ok=True)
sk_io.imsave(input_path, img)
pres = ilastik.CLI.classify(ilastik_proj, input_path, output_path, export_source=export_source)
if print_command:
print('COMMAND: {}\n'.format(' '.join(pres.args)))
if print_logs:
print('STDOUT:\n{}'.format(pres.stdout.decode('utf-8')))
print('STDERR:\n{}'.format(pres.stderr.decode('utf-8')))
img = sk_io.imread(output_path)
# Ilastik result is multi-channel image with channels in order of annotated class
# in project; assume first channel is class of interest and extract it here:
assert img.ndim == 3, 'Expecting 2D image, got shape {}'.format(img.shape)
img = img[..., 0]
return img
img_proba = get_spheroid_probabilities(img_bf_maxz, print_command=False, print_logs=False)
describe(img_proba)
plt.imshow(img_proba)
plt.gcf().set_size_inches(6, 12)
def get_spheroid_mask(img, low=.5, high=.8, sigma=10):
img = filters.gaussian(img, sigma=sigma)
img = filters.apply_hysteresis_threshold(img, low, high)
img = ndi.binary_fill_holes(img)
return img
def get_spheroid_objects(img_segment, img_mask, min_peak_distance=16, regularization=.001):
assert 0 <= img_segment.min() <= img_segment.max() <= 1
img_dist = ndi.distance_transform_edt(img_mask)
img_pk = feature.peak_local_max(img_dist, min_distance=min_peak_distance, indices=False)
img_pk = morphology.label(img_pk)
img_obj, _ = propagate.propagate(img_segment, img_pk, img_mask, regularization)
return img_obj
img_mask = get_spheroid_mask(img_proba)
img_obj = get_spheroid_objects(util.img_as_float(img_bf_maxz), img_mask)
# Upsample to original size
img_obj = transform.resize(
img_obj, img_mtg.shape[-2:], order=0, mode='constant',
anti_aliasing=False, preserve_range=True
).astype(img_obj.dtype)
assert img_obj.shape[-2:] == img_mtg.shape[-2:]
describe(img_obj)
plt.imshow(img_obj, cmap=rand_cmap(1))
plt.gcf().set_size_inches(6, 12)
df_sph = CytometerBase.quantify(
# Quantify averages over z channels
img_mtg.mean(axis=1)[np.newaxis],
# Duplicate spheroid body as center
np.stack([img_obj]*2)[np.newaxis],
channel_names=config.channel_names,
nucleus_intensity=False
)
df_sph.info()
# Export spheroid stats
path = osp.join(output_dir, 'cytometry', 'data.spheroid.csv')
df_sph.to_csv(path, index=False)
path
###Output
_____no_output_____
###Markdown
Cell Segmentation
###Code
cyto_op = cytometry.Cytometry2D(config, z_plane='all').initialize()
assert config.channel_names == cyto_op.quantification_params['channel_names']
cyto_op.quantification_params['channel_names']
cyto_op.quantification_params['channel_names'] = cyto_op.quantification_params['channel_names'] + ['SPHEROID']
cyto_op.quantification_params['channel_names']
obj_map = {ti: tile for tile, ti in ck_core.unmontage(img_obj, config, strict=True)}
def repeat(img, nz):
assert img.ndim == 2
return np.repeat(img[np.newaxis, np.newaxis, np.newaxis], nz, axis=1)
repeat(list(obj_map.values())[0], config.n_z_planes).shape
def get_cell_segmentation():
res = collections.OrderedDict()
ct = 0
for tile, tile_index in ck_core.unmontage(img_mtg, config, strict=True):
# Get mask for spheroids (repeated along z dimension) and append to tile (as new channel on end)
mask_tile = repeat(obj_map[tile_index] > 0, config.n_z_planes)
tile = np.concatenate((tile, mask_tile.astype(tile.dtype)), axis=2)
# Run segmentation and quantification
tile, (tseg, tstat) = cyto_op.run(tile)
res[tile_index] = tile, tseg, tstat
return res
seg_res = get_cell_segmentation()
###Output
_____no_output_____
###Markdown
Export Tile Results
###Code
# Export per-tile results
for tile_index in seg_res:
tile, tseg, tstat = seg_res[tile_index]
paths = cyto_op.save(tile_index, output_dir, (tseg, tstat), compress=0)
print('Saved cytometry data to paths ', paths)
# tags = ij_utils.get_slice_label_tags(['BF', 'MASK', 'BOUNDARY'] + CFLRO + CFLRO_QUANT)
# ck_io.save_tile(path, img_ext, config=config, infer_labels=False, extratags=tags)
# Aggregate cell stats
ck_core.aggregate_cytometry_statistics(output_dir, config, mode='all')
###Output
_____no_output_____
###Markdown
Montage Results
###Code
img_mtg.shape, img_mtg.dtype
img_obj_border = img_obj * segmentation.find_boundaries(img_obj, mode='inner').astype(img_obj.dtype)
img_obj.shape, img_obj.dtype, img_obj_border.shape, img_obj_border.dtype
img_mtg_cell = ck_core.montage([v[1] for v in seg_res.values()], config)
img_mtg_cell.shape, img_mtg_cell.dtype
# Build montage with original channels, spheroid objects, and cell + nucleus objects
# * For now, save only boundaries and not full masks
assert img_mtg.dtype == np.uint16
mtg_channels = config.channel_names + ['SPHEROID_BOUNDARY', 'CELL_BOUNDARY', 'NUCLEUS_BOUNDARY']
img_mtg_exp = np.concatenate((
img_mtg,
repeat(img_obj_border, config.n_z_planes).astype(np.uint16),
# repeat(img_obj, config.n_z_planes).astype(np.uint16),
img_mtg_cell[:, :, [2,3]].astype(np.uint16)
), axis=2)
assert len(mtg_channels) == img_mtg_exp.shape[2]
describe(img_mtg_exp)
mtg_channels
path = osp.join(output_dir, ck_io.get_montage_image_path(0, 'segm'))
os.makedirs(osp.dirname(path), exist_ok=True)
tags = ij_utils.get_channel_label_tags(mtg_channels, z=config.n_z_planes, t=1)
ck_io.save_tile(path, img_mtg_exp, config=config, infer_labels=False, extratags=tags)
path
###Output
_____no_output_____ |
tutorials/logging/Comet_SparkNLP_Intergration.ipynb | ###Markdown
 [](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/logging/Comet_SparkNLP_Intergration.ipynb) [Comet](https://www.comet.ml/site/) is an MLOps Platform that is designed to help Data Scientists and Teams build better models faster! Comet provides tooling to track, Explain, Manage, and Monitor your models in a single place! It works with Jupyter Notebooks and Scripts and most importantly it's 100% free!Comet can be easily integrated into the Spark NLP workflow with a dedicated logging class CometLogger, to log training and evaluation metrics, pipeline parameters and NER visualization made with sparknlp-display. Installing SparkNLP
###Code
# This is only to setup PySpark and Spark NLP on Colab
!wget http://setup.johnsnowlabs.com/colab.sh -O - | bash
# Install Spark NLP Display for visualization
!pip install --ignore-installed spark-nlp-display
# Installing Comet
!pip install comet_ml --quiet
###Output
_____no_output_____
###Markdown
Importing Dependencies and Starting Spark NLP
###Code
# Import Spark NLP
import sparknlp
from sparknlp.base import *
from sparknlp.annotator import *
from sparknlp.pretrained import PretrainedPipeline
from pyspark.ml import Pipeline
from pyspark.sql import SparkSession
import pyspark.sql.functions as F
spark = sparknlp.start()
# Import Comet
import comet_ml
from sparknlp.logging.comet import CometLogger
###Output
_____no_output_____
###Markdown
Initialize Comet To run an online experiment, you will need an API key From Comet. See [Quick Start - Comet.ml](https://www.comet.ml/docs/quick-start/) for more information.
###Code
comet_ml.init(project_name='sparknlp-test')
###Output
Please enter your Comet API key from https://www.comet.ml/api/my/settings/
(api key may not show as you type)
Comet API key: ··········
###Markdown
Logging from SparkNLP Logging Training Metrics To log a Spark NLP annotator, it will need an “outputLogPath” parameter, as the CometLogger reads the log file generated during the training process.
###Code
OUTPUT_LOG_PATH = './run'
###Output
_____no_output_____
###Markdown
Create a Comet Logger
###Code
logger = CometLogger()
###Output
COMET WARNING: As you are running in a Jupyter environment, you will need to call `experiment.end()` when finished to ensure all metrics and code are logged before exiting.
COMET INFO: Couldn't find a Git repository in '/content' and lookings in parents. You can override where Comet is looking for a Git Patch by setting the configuration `COMET_GIT_DIRECTORY`
COMET INFO: Experiment is live on comet.ml https://www.comet.ml/team-comet-ml/sparknlp-test/0c433df24b1b47038f0fc4b955ca9fba
###Markdown
Download Data
###Code
!curl -O 'https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/resources/en/classifier-dl/toxic_comments/toxic_train.snappy.parquet'
###Output
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 2702k 100 2702k 0 0 3081k 0 --:--:-- --:--:-- --:--:-- 3077k
###Markdown
Create Dataset
###Code
trainDataset = spark.read.parquet("toxic_train.snappy.parquet").repartition(120)
###Output
_____no_output_____
###Markdown
Define SparkNLP Pipeline
###Code
# Let's use shrink to remove new lines in the comments
document = (
DocumentAssembler()
.setInputCol("text")
.setOutputCol("document")
.setCleanupMode("shrink")
)
tokenizer = Tokenizer().setInputCols(["document"]).setOutputCol("token")
# Here we use the state-of-the-art Universal Sentence Encoder model from TF Hub
use = (
UniversalSentenceEncoder.pretrained()
.setInputCols(["document"])
.setOutputCol("sentence_embeddings")
)
# We will use MultiClassifierDL built by using Bidirectional GRU and CNNs inside TensorFlow that supports up to 100 classes
# We will use only 5 Epochs but feel free to increase it on your own dataset
multiClassifier = (
MultiClassifierDLApproach()
.setInputCols("sentence_embeddings")
.setOutputCol("category")
.setLabelColumn("labels")
.setBatchSize(128)
.setMaxEpochs(10)
.setLr(1e-3)
.setThreshold(0.5)
.setShufflePerEpoch(False)
.setEnableOutputLogs(True)
.setOutputLogsPath(OUTPUT_LOG_PATH)
.setValidationSplit(0.1)
)
###Output
tfhub_use download started this may take some time.
Approximate size to download 923.7 MB
[OK!]
###Markdown
Monitor the Model log file SparkNLP will write the training metrics for this run to a log file. We're going to monitor this file for updates and log the entries to Comet as metrics. Before starting to fit our model, lets display Comet's Experiment View in the cell below so that we can view the metrics as they are reported. **Note:** It may take a few minutes before you see metrics displayed in the UI.
###Code
logger.experiment.display(tab='charts')
logger.monitor(OUTPUT_LOG_PATH, multiClassifier)
###Output
_____no_output_____
###Markdown
Run the training Pipeline
###Code
pipeline = Pipeline(stages=[document, use, multiClassifier])
model = pipeline.fit(trainDataset)
###Output
_____no_output_____
###Markdown
Logging Completed Runs We can also log runs to Comet after training has finished. Let's take a look at the created log file from the earlier training run
###Code
!ls ./run
logger = CometLogger()
logger.log_completed_run('./run/MultiClassifierDLApproach_736adb7a5ea5.log')
logger.experiment.display(tab='charts')
###Output
_____no_output_____
###Markdown
Logging Evaluation Metrics SparkNLP model predictions are easily convertible to Pandas Dataframes. We can then evaluate these predictions using libraries like `scikit-learn`. Here we demonstrate how to log metrics from a classification report to Comet
###Code
logger = CometLogger()
!curl -O 'https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/resources/en/classifier-dl/toxic_comments/toxic_test.snappy.parquet'
testDataset = spark.read.parquet("/content/toxic_test.snappy.parquet").repartition(10)
prediction = model.transform(testDataset)
preds_df = prediction.select('labels', 'category.result').toPandas()
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.metrics import classification_report
mlb = MultiLabelBinarizer()
y_true = mlb.fit_transform(preds_df['labels'])
y_pred = mlb.fit_transform(preds_df['result'])
report = classification_report(y_true, y_pred, output_dict=True)
for key, value in report.items():
logger.log_metrics(value, prefix=key)
logger.experiment.display(tab='metrics')
###Output
_____no_output_____
###Markdown
Logging Pipeline Parameters You can also use the CometLogger to log SparkNLP Pipeline Parameters to Comet.
###Code
logger = CometLogger()
###Output
_____no_output_____
###Markdown
Define a Pipeline with a Pretrained Model
###Code
# If you change the model, re-run all the cells below
# Other applicable models: ner_dl, ner_dl_bert
MODEL_NAME = "onto_100"
documentAssembler = DocumentAssembler() \
.setInputCol('text') \
.setOutputCol('document')
tokenizer = Tokenizer() \
.setInputCols(['document']) \
.setOutputCol('token')
# ner_dl and onto_100 model are trained with glove_100d, so the embeddings in
# the pipeline should match
if (MODEL_NAME == "ner_dl") or (MODEL_NAME == "onto_100"):
embeddings = WordEmbeddingsModel.pretrained('glove_100d') \
.setInputCols(["document", 'token']) \
.setOutputCol("embeddings")
# Bert model uses Bert embeddings
elif MODEL_NAME == "ner_dl_bert":
embeddings = BertEmbeddings.pretrained(name='bert_base_cased', lang='en') \
.setInputCols(['document', 'token']) \
.setOutputCol('embeddings')
ner_model = NerDLModel.pretrained(MODEL_NAME, 'en') \
.setInputCols(['document', 'token', 'embeddings']) \
.setOutputCol('ner')
ner_converter = NerConverter() \
.setInputCols(['document', 'token', 'ner']) \
.setOutputCol('ner_chunk')
nlp_pipeline = Pipeline(stages=[
documentAssembler,
tokenizer,
embeddings,
ner_model,
ner_converter
])
empty_df = spark.createDataFrame([['']]).toDF('text')
pipeline_model = nlp_pipeline.fit(empty_df)
###Output
_____no_output_____
###Markdown
Log PipelineModel Parameters
###Code
logger.log_pipeline_parameters(pipeline_model)
###Output
_____no_output_____
###Markdown
Logging Individual Parameters
###Code
logger.log_parameters({"run-type": "training"})
###Output
_____no_output_____
###Markdown
Let's take a look at the logged Parameters.
###Code
logger.experiment.display(tab='parameters')
logger.end()
###Output
_____no_output_____
###Markdown
Logging Visualizations SparkNLP comes with a rich suite of visualization tools. Comet supports logging these visualizations so that they are readily available for analysis. In this section we will cover how to log SparkNLP visualzations to Comet Define a Pipeline with a Pretrained Model
###Code
# If you change the model, re-run all the cells below
# Other applicable models: ner_dl, ner_dl_bert
MODEL_NAME = "onto_100"
documentAssembler = DocumentAssembler() \
.setInputCol('text') \
.setOutputCol('document')
tokenizer = Tokenizer() \
.setInputCols(['document']) \
.setOutputCol('token')
# ner_dl and onto_100 model are trained with glove_100d, so the embeddings in
# the pipeline should match
if (MODEL_NAME == "ner_dl") or (MODEL_NAME == "onto_100"):
embeddings = WordEmbeddingsModel.pretrained('glove_100d') \
.setInputCols(["document", 'token']) \
.setOutputCol("embeddings")
# Bert model uses Bert embeddings
elif MODEL_NAME == "ner_dl_bert":
embeddings = BertEmbeddings.pretrained(name='bert_base_cased', lang='en') \
.setInputCols(['document', 'token']) \
.setOutputCol('embeddings')
ner_model = NerDLModel.pretrained(MODEL_NAME, 'en') \
.setInputCols(['document', 'token', 'embeddings']) \
.setOutputCol('ner')
ner_converter = NerConverter() \
.setInputCols(['document', 'token', 'ner']) \
.setOutputCol('ner_chunk')
nlp_pipeline = Pipeline(stages=[
documentAssembler,
tokenizer,
embeddings,
ner_model,
ner_converter
])
empty_df = spark.createDataFrame([['']]).toDF('text')
pipeline_model = nlp_pipeline.fit(empty_df)
###Output
_____no_output_____
###Markdown
Run Inference with the NER Pipeline
###Code
empty_df = spark.createDataFrame([['']]).toDF('text')
pipeline_model = nlp_pipeline.fit(empty_df)
import pandas as pd
text_list = [
"""William Henry Gates III (born October 28, 1955) is an American business magnate, software developer, investor, and philanthropist. He is best known as the co-founder of Microsoft Corporation. During his career at Microsoft, Gates held the positions of chairman, chief executive officer (CEO), president and chief software architect, while also being the largest individual shareholder until May 2014. He is one of the best-known entrepreneurs and pioneers of the microcomputer revolution of the 1970s and 1980s. Born and raised in Seattle, Washington, Gates co-founded Microsoft with childhood friend Paul Allen in 1975, in Albuquerque, New Mexico; it went on to become the world's largest personal computer software company. Gates led the company as chairman and CEO until stepping down as CEO in January 2000, but he remained chairman and became chief software architect. During the late 1990s, Gates had been criticized for his business tactics, which have been considered anti-competitive. This opinion has been upheld by numerous court rulings. In June 2006, Gates announced that he would be transitioning to a part-time role at Microsoft and full-time work at the Bill & Melinda Gates Foundation, the private charitable foundation that he and his wife, Melinda Gates, established in 2000.[9] He gradually transferred his duties to Ray Ozzie and Craig Mundie. He stepped down as chairman of Microsoft in February 2014 and assumed a new post as technology adviser to support the newly appointed CEO Satya Nadella.""",
"""The Mona Lisa is a 16th century oil painting created by Leonardo. It's held at the Louvre in Paris."""
]
df = spark.createDataFrame(pd.DataFrame({'text': text_list}))
results = pipeline_model.transform(df)
###Output
_____no_output_____
###Markdown
Create and Log Visualizations
###Code
from sparknlp_display import NerVisualizer
logger = CometLogger()
for idx, result in enumerate(results.collect()):
viz = NerVisualizer().display(
result=result,
label_col='ner_chunk',
document_col='document',
return_html=True
)
logger.log_visualization(viz, name=f'viz-{idx}.html')
###Output
_____no_output_____
###Markdown
The HTML files from these files can be found in the "Assets & Artifacts" tab in the Experiment View.
###Code
logger.experiment.display(tab='assets')
###Output
_____no_output_____ |
notebooks/raingauges/NOAA dataset - get station locations.ipynb | ###Markdown
NOAA data - Get station locationsOne can get the NOAA hourly precipitation data here: https://www.ncdc.noaa.gov/cdo-web/search?datasetid=PRECIP_HLYThis notebook gets the geographic coordinates of the locations of the gauges from the CSV file product.The sample used here is for April 2013, for Illinois state.
###Code
%pylab inline
import pandas as pd
###Output
Populating the interactive namespace from numpy and matplotlib
###Markdown
Set the file path of the CSV file.
###Code
fname = 'C:/Users/irene/Documents/Work/Data/NCDC_NOAA/NOAA_HourlyPrecip_April2013.csv'
###Output
_____no_output_____
###Markdown
Read the file as a pandas dataframe.
###Code
df = pd.read_csv(fname,header=0)
df.head()
###Output
_____no_output_____
###Markdown
Check how many stations there are.
###Code
len(df['STATION_NAME'].unique())
###Output
_____no_output_____
###Markdown
Since for this notebook we are only interested in the station locations and not the measured data, we can drop the duplicates based on the `STATION` parameter.
###Code
df_station_locations = df.drop_duplicates('STATION')[['STATION','STATION_NAME','LATITUDE','LONGITUDE']]
df_station_locations.head()
###Output
_____no_output_____
###Markdown
Save the dataframe as a CSV file.
###Code
df_station_locations.to_csv('noaa_station_locations.csv',index=False)
###Output
_____no_output_____ |
convnet_training_exercises.ipynb | ###Markdown
Computer Vision Training YM Exercise formatThe sections of code that you will have to fill in yourself are marked by blocks starting with and ending with =======.>>(notes)lines provide hints for the problem and - lines are stand-ins for statements you need to fill in. You'll often find variables in the format ```variable = None```, these can be variables or objects where I've provided the name so they'll be consistent with code later in the exercise. These are just guidelines and if you have a different idea of how to approach the problem feel free to deviate. Try to think of what conceptually needs to happen to solve the problem and then implement it. Don't stay with one problem for too long if you get stuck. Instead, just look at the answers. Sometimes these problems come down to knowing some specific syntax and become a lot harder if you don't. It is more important that you get a feel for some of these concepts so have a starting point if you have to work with them for a project than that you solve all of on your own. 1: Introduction to Convnets with pytorch In this section we will build a simple convolutional model and train it to classify images from the CIFAR-10 dataset. The CIFAR-10 dataset is one of the first large scale image datasets, but the images are very small (32x32). All convolutional models will do well on this if they have enough layers. Download the data using the torchvision libraryPytorch has some build in libraries to download certain datasets, the CIFAR-10 dataset is one of them.
###Code
# convnet classification task on
import torchvision
import torch.nn as nn
import torch
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import torchvision.models as models
from torch.optim.lr_scheduler import StepLR
import matplotlib.pyplot as plt
import numpy as np
transform = transforms.Compose([
# you can add other transformations in this list if you want.
transforms.ToTensor()
])
#################
# >> make cifar10 test and train dataset objects
cifar10_train_dataset = None
cifar10_test_dataset = None
#================
###Output
Files already downloaded and verified
Files already downloaded and verified
###Markdown
Make dataloaders from the datasetsWrap the dataset classes in a pytorch dataloader class. This will allow us to feed the data to the model in a easy and controllable way.
###Code
# adjust batch size to fit gpu space AND for finetuning the training process, you'll have to do this for all different exercises.
# batch size is not an insignificant factor in the training of convnets (or any neural networks for that matter)
batch_size = 32
#################
# >> make train and test dataloader objects
train_loader = None
test_loader = None
#================
# the dataset class should contain the mapping of label to label_idx
label_to_classname = {v: k for k, v in cifar10_test_dataset.class_to_idx.items()}
# show single sample
for batch, target in test_loader:
sample = batch[0].permute(1, 2, 0)
t_label = target[0].item()
print(f"target label : {label_to_classname[t_label]}")
print(f"shape of tensors: batch={batch.shape}, target={target.shape}")
plt.imshow(sample.numpy())
break
###Output
_____no_output_____
###Markdown
Build your convolutional modelUse 2d Convolutional layers and ReLU layers to construct a simple neural network. You will also need a linear layer (also called fully connected layer) at the end and perhaps some dropout layers.
###Code
class VerySimpleNet(nn.Module):
def __init__(self, n_classes):
super(VerySimpleNet, self).__init__()
self.conv1 = nn.Conv2d(3, 32, 3, 1)
self.relu1 = nn.ReLU()
#################
# >> add a few layers, I suggest alternating conv2d and relu layers while increasing the amount of filters
self.fc = None
#================
def forward(self, x):
x = self.conv1(x)
x = self.relu1(x)
#################
# >> call layers in the forward pass
#================
x = torch.flatten(x, 1)
x = self.fc(x)
return x
###Output
_____no_output_____
###Markdown
Training and testing the modelWe'll now define basic training and testing routines. In pytorch you'll have to specify which optimizer, loss and scheduler (if you want to use one) you want to use and put the statements in the right spots.
###Code
# I've written the train and test methods for you, have a look at them and see if you understand what they do.
def train(model, device, train_loader, optimizer, criterion, epoch, log_interval=5, dry_run=False):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
#################
# data and target to gpu
#-
# reset gradients of previous iteration
#-
# forward pass of model
#-
# calculate loss
#-
# calculate gradients
#-
# apply optimizer based on gradients
#-
#================
# log the loss
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
if dry_run:
break
def test(model, device, criterion, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
#################
# data to gpu
#-
# data through model
#-
# output = ...
# calculate loss (only for logging, we're not going to use it for backpropagation)
#-
# add loss to total loss
test_loss += loss
#================
# calculate metric
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
# some parameters
device = "cuda:0"
lr = 0.001
gamma = 0.7
epochs = 5
model = VerySimpleNet(10).to(device) # transfer model to GPU
#################
# >> define optimizer, loss function (criterion) and scheduler
optimizer = None
criterion = None
scheduler = None
#================
for epoch in range(1, epochs + 1):
train(model, device, train_loader, optimizer, criterion, epoch, log_interval=100)
test(model, device, criterion, test_loader)
scheduler.step()
###Output
_____no_output_____
###Markdown
Inspecting the resultsLets look at some classification examples from the test set, how does your model perform?
###Code
for batch, target in test_loader:
sample = batch[0].permute(1, 2, 0)
output = model(batch.to(device))
p_label = torch.argmax(output[0]).item()
t_label = target[0].item()
print(f"target label: {label_to_classname[t_label]}, predicted label: {label_to_classname[p_label]}")
plt.imshow(sample.numpy())
break
###Output
_____no_output_____
###Markdown
Understanding Convnets - Architecture, Backbones & LayersBackbones are large (often pretrained) neural networks that serve as a way to extract features that are useful for almost all types of problems.The general idea of a backbone is to have a model that is very good at identifying high level features in an image. The easiest way to understand this is to think of images as collections of shapes. A face is just a collection of circles (such as the eyes) and curvy lines.This means that if we already have a model that can detect all these components the step to the full combination of those components (such as a face) is a lot easier!In practice most pretrained backbones will already have concepts such as faces embedded into the layers. Additionally, lines and circles are actually pretty basic features and a large pretrained backbone will contain much more complex concepts. Revisiting CIFAR-10 Now we know about backbones, let's use one on the CIFAR-10 dataset. For this we'll need to download a pretrained model and adjust the number of classes. We'll pick the resnext50 backbone model, a variant of the resnet style architecture (see, https://pytorch.org/hub/pytorch_vision_resnext/).
###Code
#################
# >> import a pretrained backbone from the torchvision.models library
pretrained_model = None
#================
# you can always print a model to see the full structure or even the partial structure if you select it.
print(pretrained_model.layer1)
###Output
_____no_output_____
###Markdown
Changing a backboneWe now want to change the backbone so it fits our 10 class classification problem. There are a few ways to do this and here we will look at both of the methods.1. The first (and easiest) way is to change the model is to just swap out the last layer. In this case the original model was already a classification model and we are just changing the number of output classes in the last layer to fit our problem. In this case this is also the most 'correct' way of doing it.2. The second way to adjust the model is to wrap it in a new model class. This method is useful in more complicated scenarios where you want to change more than just the number of outputs. For example if we wanted to use the backbone as the basis for a segmentation model. Now before you ask, yes wrapping the backbone like this preserves the last layer that would be replaced in the other example, luckily this does not effect performance (only memory usage). 1: replacing layers
###Code
num_classes = 10
#################
# >> change the last layer of the model, (run the training cell further below to see if it worked):
#================
print(pretrained_model)
###Output
_____no_output_____
###Markdown
2: wrapping modules
###Code
# now let's wrap our model in a pytorch module instead, this is syntactically the same as adding a layer to a regular network.
class WrappedResnext(nn.Module):
def __init__(self, n_classes, backbone):
super(WrappedResnext, self).__init__()
#################
self.backbone = None
self.fc2 = None
#================
def forward(self, x):
#################
# >> do forward pass
#================
return x
#################
# >> import unchanged model again
pretrained_model = None
wrapped_model = None
#================
print(wrapped_model)
###Output
_____no_output_____
###Markdown
Test your adjusted models by running the cell belowNow that we've adjusted our model for our problem we can try it out. Try both ways
###Code
device = "cuda:0"
gamma = 0.7
epochs = 10
learning_rate = 0.001
wrapped_model = wrapped_model.to(device)
#################
# >> define optimizer, loss function (criterion) and scheduler again. You've done this before, I'm just making you do it again.
optimizer = None
criterion = None
scheduler = None
#================
for epoch in range(1, epochs + 1):
train(model, device, train_loader, optimizer, criterion, epoch, log_interval=100)
test(model, device, criterion, test_loader)
scheduler.step()
###Output
_____no_output_____
###Markdown
Inspecting the resultsAnd? does the model perform better? can you even tell without training for a (very) long time?try looking at some examples.
###Code
# If you want to see some samples
for batch, target in test_loader:
sample = batch[0].permute(1, 2, 0)
output = model(batch.to(device))
p_label = torch.argmax(output[0]).item()
t_label = target[0].item()
print(f"target label: {label_to_classname[t_label]}, predicted label: {label_to_classname[p_label]}")
plt.imshow(sample.numpy())
break
###Output
_____no_output_____
###Markdown
Before you start this section!We will be using another fairly large dataset in this section so turn on the download by running the imports and the *Testing the model* section Utility: clearing GPU memorywhen working with pytorch or any deep learning framework you will likely get errors concerning the GPU memory. Take a look at the code below, running this method (or something similar) can help clearing the GPU memory if this becomes a problem.
###Code
torch.cuda.empty_cache()
import gc # gc = garbage collection
for obj in gc.get_objects():
try:
if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):
print(type(obj), obj.size())
if obj.size() == torch.Size([4, 21, 568, 568]):
del obj
if obj.size() == torch.Size([4, 3, 568, 568]):
del obj
if obj.size() == torch.Size([4, 1, 568, 568]):
del obj
except:
pass
###Output
_____no_output_____
###Markdown
2: Understanding Convnets - Model architectureIn this section we'll look at Unet, a classic (compared to more recent models) deep learning segmentation model that introduced the concept of encoder-decoder architecture. It is also fairly easy to understand. It consists of two parts, the encoder and the decoder. The Encoder has the same job as the backbone has in the previous section: Its purpose is to extract features (you can thing of the as classifications or objects) from the image, that will then be used in the second half of the model (the decoder) to make the segmentation, (place those objects in the right place with the right boundries). In this sense, the Decoder does the same thing as the single layer of linear nodes in the backbone example. It uses the features supplied by the encoder to make a classification, just for all pixels in the image instead of the entire image. So thats the general idea, but what are some of the other interesting parts of the model? 1. As we can see in the image below, the shape of the layers gets deeper in filters but smaller in width/height.This is done to allow the model to learn larger more complex concepts. As the size of the convolutional filters stays the same throughout the model (generally kernel size is always 3x3), a larger object like a car would never fit in those 3 pixels. By downsizing the output after each layer, a full object CAN be represented in that 3x3 grid of pictures. This is because filters specialize, a certain filter in the 4th layer of the model might only respond to a specific pattern that was found in the previous layer. That pattern is again already an abstraction upon the previous input etc. etc. until you reach the first layer where only lines and squiggles are detected.2. What are those grey lines jumping from the enconder part to the decoder part? As you might have suspected this is just the output of that layer being saved and copied to be concatenated to the decoder at the mirrored layer later. This is done because a lot of spatial detail is lost in the process of downsampling. By adding the higher resolution data from the downsampling process this effect is somewhat mitigated as the network is able to use this data to make more precise object boundaries.  Building the segmentation modelI've defined some functions to get you started, try to complete the model based on the architecture shown above. you can use a linear layer at the end but you don't have to.
###Code
# imports so you don't have to scroll up when you get an OOM error
import torchvision
import torch.nn as nn
import torch
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
import matplotlib.pyplot as plt
import numpy as np
## From classification model to segmentation model
def double_conv(in_channels, out_channels):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 3, padding=1),
nn.ReLU(inplace=True)
)
class UNet(nn.Module):
def __init__(self, n_class):
super().__init__()
#################
# >> finish the encoder and then make the decoder
# encoder
self.dconv_down1 = double_conv(3, 64)
self.dconv_down2 = double_conv(64, 128)
self.dconv_down3 = None
self.dconv_down4 = None
# up
self.maxpool = nn.MaxPool2d(2)
# down
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
# decoder
# >> add layers for the decoder yourself
# >> final layer
self.final_layer = None
#================
def forward(self, x):
conv1 = self.dconv_down1(x)
x = self.maxpool(conv1)
conv2 = self.dconv_down2(x)
x = self.maxpool(conv2)
#################
# >> finish the encoder forward pass
# >> add the decoder forward pass
#================
out = self.final_layer(x)
return out
###Output
_____no_output_____
###Markdown
Testing the model Lets try out our new model on another pytorch/torchvision dataset. We'll use the SBD dataset, another dataset that can be downloaded using the torchvision dataset library.This dataset supplies full segmentations instead of just classes. Its a little trickier to use so I've completed this section for you, feel free to look around though.
###Code
from PIL import Image
# custom compose object to transform the pil images to tensors in the right format
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, target):
for t in self.transforms:
image = t(image)
if type(t) != transforms.Normalize:
target = t(target)
return image, target
transform = []
transform.append(transforms.Resize((568, 568), interpolation=Image.NEAREST))
transform.append(transforms.ToTensor())
transform.append(transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]))
transform = Compose(transform)
# sadly this dataset didnt have the classes attribute so I had to add them manually.
label_to_classname = {
0: "background",
1: "aeroplane",
2: "bicycle",
3: "bird",
4: "boat",
5: "bottle",
6: "bus",
7: "car",
8: "cat",
9: "chair",
10: "cow",
11: "diningtable",
12: "dog",
13: "horse",
14: "motorbike",
15: "person",
16: "pottedplant",
17: "sheep",
18: "sofa",
19: "train",
20: "tvmonitor"
}
# this dataset is 1.4 Gigabyte so be patient.
sbd_dataset = torchvision.datasets.SBDataset(root="./sbd", download=False, mode="segmentation", image_set='train', transforms=transform)
# pick the right batch_size, generally you would want to use at least 16 for any convnet training,
# but often this will be impossible due to the size on smaller gpus, we're really only trying stuff out so you can use a smaller size if needed here
batch_size = 4
train_loader = torch.utils.data.DataLoader(sbd_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=5)
###Output
_____no_output_____
###Markdown
Inspecting the dataRun this a few times to look at some examples
###Code
for image_batch, target_batch in train_loader:
image = image_batch[0]
target = target_batch[0]
image_cl = image.permute(1, 2, 0)
target_cl = target.permute(1, 2, 0)
fig, axs = plt.subplots(1, 2, figsize=(20,10))
# We are plotting normalized images here, you could turn denormalize them with the values in the transform operation, but for the purpose of seeing
#the content of the image this is just fine
axs[0].imshow(image_cl.numpy())
# naive approach to muticlass plotting, classes are assigned different colors due to the values being different, but is not consistent between images
axs[1].imshow(target_cl.numpy())
print(f"classes: {[label_to_classname[c] for c in np.unique(target_cl.numpy()*255)]}")
break
###Output
_____no_output_____
###Markdown
Training the modelTraining the segmentation model is basically the same as for any neural network. Just plug in the data and let the optimizer do the work. Fitting this model is a pretty heavy task and it might take too long, I've added an overfit function that will just overfit the model on 1 image, things like this are a good way of testing whether the model is capable of processing this type of data. For the model to perfectly overfit an image the whole training routine has to work.
###Code
def train(model, device, train_loader, optimizer, criterion, epoch, log_interval=5, dry_run=False, overfitrun=False):
model.train()
losses = []
n_iters = 0
overfitdata = None
overfittarget = None
for batch_idx, (data, target) in enumerate(train_loader):
if overfitrun:
if batch_idx == 0:
overfitdata = data
overfittarget = target
else:
data = overfitdata
target = overfittarget
data = data.to(device)
target = target.to(device)
optimizer.zero_grad()
output = model(data)
target = target.reshape(batch_size, 568, 568) * 255
target = target.long()
loss = criterion(output, target)
losses.append(loss.item())
n_iters += 1
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\t average Loss (last 500): {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), np.mean(losses[-500:])))
if dry_run:
break
# parameters
device = "cuda:0"
epochs = 10
learning_rate = 0.001
gamma = 0.7
#################
# >> this time write the training code yourself (its still more or less the same as before))
# >> make the model
# >> define optimizer, loss function and scheduler
# >> do the train and test step for n epochs
#================
###Output
_____no_output_____
###Markdown
You can save the model if you want or load one I pretrainedIf your model is taking a while to train, you can copy the example model code and import these pretrained weights to see some results.
###Code
# save model
# torch.save(segmentation_model.state_dict(), "segmentation_model.pt")
# load model
# segmentation_model = UNet(sbd_dataset.num_classes + 1)
# segmentation_model.load_state_dict(torch.load("segmentation_model.pt"))
# segmentation_model.eval()
# segmentation_model = segmentation_model.to(device)
print("done!")
###Output
_____no_output_____
###Markdown
Inspect the resultsYou can see the results of your training here. keep in mind however that it takes a long time to train a large model like this. You won't get good looking results unless you leave it running for a while. Try the pretrained backbone (look in the answers to find my implementation that fits it) for a working(-ish) example.
###Code
for image_batch, target_batch in train_loader:
#################
# >> extract the first image in the batch and its target
# >> switch channel orders for plotting
# >> pass the batch through the model and get the predictions
# >> get the predictions for the first image and transform them for plotting
#================
fig, axs = plt.subplots(1, 3, figsize=(20,10))
axs[0].imshow(image_cl.numpy())
# naive approach to muticlass plotting, classes are assigned different colors due to the values being different, but is not consistent between images
axs[1].imshow(target_cl.numpy())
axs[2].imshow(pred.numpy())
print(f"classes: {[label_to_classname[c] for c in np.unique(target_cl.numpy()*255)]}")
break
###Output
_____no_output_____
###Markdown
3: Understanding Convnets - Filter Visualization An interesting area of research within convolutional neural networks is the interpretability. As you will likely know, neural networks are mostly black box solutions. It is hard to get an idea of why the network performs the way it does. There are several ways in which more insight can be gained from the model, but most of them focus on various ways of tracking the activation of the model on a certain image. Doing this for a large amount of images gives insight into what parts of the model respond to specific stimuly. This process is somewhat similar to how human or animal brains can be studied as well, if you are shown multiple similar pictures, most likely the same area of the brain will be used. Using neural networks however, we can do more than just track the activation throught the network. Neural networks, although they are large black boxes, are deterministic. This means that we always get the same output for the same image, but more interesting, this means we can make small adjustments to the input image and by doing so map the internal logic of the network! In this example we will apply a process called deep-dreaming (https://en.wikipedia.org/wiki/DeepDream) to see inside a neural network. More specifically we will change the optimization goal to visualize the convolutional filters. We will do so by inputting random noise and adjusting that noise to get a higher activation of a specific filter/layer. Adjusting values to get a higher activation? does that sound familiar? Well it should because this method uses the same backpropagation algorithm as regular training just with a different target! Preparing our modelFor this task we will use a pretrained model. This is because a pretrained model will already have well trained filters that look for very specific patterns. If we were to use a untrained model we might not get good visualizations at all!
###Code
# imports
import torchvision
import torch.nn as nn
import torch
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
import matplotlib.pyplot as plt
import requests
import numpy as np
import torchvision.models as models
from io import BytesIO
from PIL import Image
# lets use the same backbone model as before
#################
# >> import pretrained model
model = None
#================
# set in eval mode to avoid adjusting the weights
model.eval()
print("done")
# have a look at the layers & modules in the model and pick one as optimization target
# list(resnext50.modules())
model.layer2[1].conv1
###Output
_____no_output_____
###Markdown
Retrieving intermediate resultsTo do the optimization we need the output of a specific layer/module in the model. We can do this in pytorch by making use of a Hook. A hook will be attached to a specific location in the model and will save automatically save what we want when the models forward function is called.
###Code
class Hook():
def __init__(self, module, backward=False):
if backward==False:
self.hook = module.register_forward_hook(self.hook_fn)
else:
self.hook = module.register_backward_hook(self.hook_fn)
def hook_fn(self, module, input, output):
self.input = input
self.output = output
def close(self):
self.hook.remove()
###Output
_____no_output_____
###Markdown
Getting gradients for optimizationInstead of optimizing using the loss gained by comparing the output to the target label, our loss will just be the activation of the layer that we set as target. Additionally we will be trying to get the loss as high as possible instead of low like in a regular training setup, this is called gradient ascent.
###Code
#Function to make gradients calculations from the output channels of the target layer
def get_gradients(model_input, model, layer, filter_idx):
model_input = model_input.unsqueeze(0).cuda() # some reshaping
#################
# >> fill in these one line statements at the #-
# we want to get gradients from the forward pass so make sure the inpute data generates gradients
#-
# discard any previous gradients
#-
# apply the hook we made earlier to the target layer
#-
# do the forward pass, we won't actually use the final output
#-
# get the loss by retrieving the output saved by the hook.
# we will take the norm of the output because we want extreme values in both directions, positive AND negative.
# don't forget that the data is in batched format, even though we're only supplying one example
#-
# use the loss to calculate the gradients
#-
# return the gradients we just calculated (and reshape data)
return None
#================
###Output
_____no_output_____
###Markdown
Gradient AscentNow that we have a way to get specific outputs from inside the model and a way to calculate gradients for our optimization target, we can write the full deep dream function.In this function we will prepare the image, perform the gradient ascent and return the final output.
###Code
# denormalization image transform, used to give the image the right colors again.
denorm = transforms.Compose([transforms.Normalize(mean = [ 0., 0., 0. ], std = [ 1/0.229, 1/0.224, 1/0.225 ]),
transforms.Normalize(mean = [ -0.485, -0.456, -0.406 ], std = [ 1., 1., 1. ]),
])
def dream(image, model, layer, iterations, lr, filter_idx):
"""Deep dream an image using given model and parameters."""
#################
# >> fill in these one line statements at the #-
# convert image to tensor
#-
# remove additional channels if it's present (pngs will have a 4th transparancy channel)
#-
# normalize the image
image_tensor = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])(image_tensor).cuda()
# deep dream the image
for i in range(iterations):
# get gradients
gradients = get_gradients(image_tensor, model, layer, filter_idx)
# add gradients to image to morph the image in a direction that causes a higher activation, we'll add a learning rate parameter to control the effect
#-
#================
# get the final image from gpu
img_out = image_tensor.detach().cpu()
# denormalize
img_out = denorm(img_out)
# do some reshaping, conversion
img_out_np = img_out.numpy().transpose(1,2,0)
img_out_np = np.clip(img_out_np, 0, 1)
img_out_pil = Image.fromarray(np.uint8(img_out_np * 255))
return img_out_pil
###Output
_____no_output_____
###Markdown
Deep Dreaming Now that we have all our functions, let's try them out on a bunch of images!
###Code
device = "cuda:0"
# Get an input image, I've used weblinks here but you can upload your own as well, you could even try using random noise!
url = 'https://upload.wikimedia.org/wikipedia/commons/thumb/3/33/Kladsk%C3%A1_forrest.jpg/1024px-Kladsk%C3%A1_forrest.jpg'
# url = 'https://www.marineterrein.nl/wp-content/uploads/2019/02/IMG_8815-830x466.jpg'
# url = 'https://youngmavericks.com/img/contact_location.png'
response = requests.get(url)
img = Image.open(BytesIO(response.content))
# might want to resize if they're very large, it would take a while to do the whole deep dream
orig_size = np.array(img.size)
# new_size = np.array(img.size)*0.5
#################
# make sure the model is on the gpu
#-
# pick a target layer
#-
# set some parameters
filter_idx = None
learning_rate = None
iterations = None
# call the deep dream function on an image and get the deep dreamed image
# -
#================
# resize to original if needed
img = img.resize(orig_size)
# plot
fig = plt.figure(figsize = (20 , 20))
plt.imshow(img)
###Output
_____no_output_____
###Markdown
Convnets for spatially related non-image data Downloading the data.The data for this section is quite large so before going any further, start the download :). Download the data here. https://drive.google.com/file/d/1r_SUJpfz3qX0j6ZwmFwCBTEHw7EE-q4l/view?usp=sharing & https://drive.google.com/file/d/1GO6Stq_eRsJGaQL8KVWoex1A2vcX4D87/view?usp=sharingdataset site:http://machine-listening.eecs.qmul.ac.uk/bird-audio-detection-challenge/downloads Overview Convnets are typically associated with image classification tasks, however any spatially related data can be classified with convnets. In this example we will see how it possible to classify audio data with convnets. More specifically, we will try to classify short audio samples as contains birdsong or not. To do this, we'll first have to convert audio data into something that can be processed using a neural net. Spectograms (https://en.wikipedia.org/wiki/Spectrogram) are images that show the frequencies present in sound data over a period of time. These images will then be fed to the convnet together with the label. Making the spectogramsTo make the spectograms we will make use of the scipy signal processing library, this is pretty straightforward.
###Code
import matplotlib.pyplot as plt
import numpy as np
from scipy import signal
from scipy.io import wavfile
#################
# Read the wav file (stereo)
# -
# convert to mono if needed and reshape, the bird data should already be mono
#-
# crop to 10 seconds (44kHz audio), it should be 10s already anyway
#-
#make the spectogram using the signal library
#- frequencies, times, spectrogram = ...
#================
# draw the image
plt.pcolormesh(times, frequencies, np.log(spectrogram), shading='auto')
plt.ylabel('Frequency [Hz]')
plt.xlabel('Time [sec]')
plt.show()
# some parameters
#################
basedir = "path/to/spectogram/save/folder/" # i suggest making a spectogram/ folder to save the images in
filenames = glob("/path/to/wav/folder/*.wav")
#================
os.makedirs(basedir, exist_ok=True)
###Output
_____no_output_____
###Markdown
Convert all samples to spectogramsWe will now use this sample process to convert all audio samples to spectograms, to speed it up, lets use the python multiprocessing pool function.
###Code
import os
import sys
from tqdm import tqdm
from multiprocessing import Pool
import matplotlib.pyplot as plt
import math
def make_spectogram(filename):
#################
# Read the wav file (stereo)
# -
# convert to mono if needed and reshape, the bird data should already be mono
#-
# crop to 10 seconds (44kHz audio), it should be 10s already anyway
#-
#make the spectogram using the signal library
#- frequencies, times, spectrogram = ...
#================
# To make a figure without the frame :
fig = plt.figure(frameon=False)
w = 10
h = 5
fig.set_size_inches(w,h)
# To make the content fill the whole figure
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# draw the image
plt.pcolormesh(times, frequencies, np.log(spectrogram), shading='auto')
dpi = 50
savename = os.path.join(basedir, os.path.basename(filename).split(".")[0] + ".jpg")
fig.savefig(savename, dpi=dpi)
plt.close(fig)
# turn of plot generation
plt.ioff()
# this will take a few minutes.
p = Pool(4) # adjust number of cores to desired amount or use os.cpu_count()
with p:
p.map(make_spectogram, filenames)
###Output
_____no_output_____
###Markdown
Make the pytorch dataset class Read the labelsLabels are given in a csv file, each row contains the name of the sample and the classification (bird or notbird), we can read them using pandas and split them up into train/test sets.
###Code
# parameters
csv_file = '/home/parting/Downloads/warblrb10k_public/warblrb10k_public_metadata.csv'
device = "cuda:0"
train_test_split = 0.8
#################
# read csv with pandas
df = None
# split up the dataframe into train and test
train_df = None
test_df = None
#================
print(f"length of trainset: {len(train_df)}, length of test set: {len(test_df)}")
# example
print("example:")
print(f" spectogram with key: ({'759808e5-f824-401e-9058'}) has class: ({int(df.loc['759808e5-f824-401e-9058'][1])})")
###Output
_____no_output_____
###Markdown
Pytorch dataset classTo load and preprocess all the spectograms we will need a new pytorch dataset class, we will then wrap this dataset class in a dataloader.
###Code
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from skimage import io, transform
import pandas as pd
import numpy as np
import torch.optim as optim
import torchvision.models as models
from torch.optim.lr_scheduler import StepLR
#################
class BirdDataset(Dataset):
"""Face Landmarks dataset."""
def __init__(self, df, root_dir, transform=None):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
# setup all the object attributes
def __len__(self):
# return the amount of samples
return None
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
#- read the image
# -get the label
#- normalize the image
# return the image, target and image name (for debuggin later)
return image, label, img_name
#================
# some standard transforms
transform = transforms.Compose([
# you can add other transformations in this list
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
#################
# make the train and test datasets
birddataset_train = None
birddataset_test = None
batch_size = 32
# wrap them in dataloaders
train_loader = None
test_loader = None
#================
# try out the dataset
sample, label, fname = birddataset_train.__getitem__(0)
print(f"sample shape: {sample.shape}, label: {label}, filepath: {fname}")
###Output
_____no_output_____
###Markdown
Training and testing the modelWe'll use basically the same code that we used to train on the cifar-10 and segmentation datasets, so I'm not going to give you a lot of help here. If you're curious, the original paper achieved about 88% accuracy on this dataset, how close can you get ;)?
###Code
#################
def train(model, device, train_loader, optimizer, criterion, epoch, log_interval=5, dry_run=False):
model.train()
# do training
def test(model, device, criterion, test_loader):
model.eval()
# do testing
lr = 0.001
gamma = 0.7
epochs = 5
# prepare a pretrained model to train and use.
# define optimizer, loss function, scheduler
# train, test after each epoch
#================
# save model
# torch.save(resnext50.state_dict(), "bird_class_model.pt")
# load model
# resnext50 = models.resnext50_32x4d()
# resnext50.fc = nn.Linear(2048, birddataset_train.n_classes)
# resnext50.load_state_dict(torch.load("bird_class_model.pt"))
# resnext50.eval()
# resnext50 = resnext50.to(device)
print("done!")
###Output
_____no_output_____
###Markdown
Inspecting the resultsNow that everything is working, lets look at some examples, you probably can't play the sounds in this notebook, so open them outside of the notebook (download them if needed) if you want to get a feel for what the model can and can't do.
###Code
import matplotlib.pyplot as plt
def plot(image_batch, target_batch, fnames):
image = image_batch[0]
image2 = image_batch[1]
target = target_batch[0]
image = image.permute(1, 2, 0)
image2 = image2.permute(1, 2, 0)
image_batch = image_batch.to(device)
output = resnext50(image_batch).cpu()
image_batch = image_batch.cpu()
pred = torch.argmax(output, dim=1)
# we're plotting normalized images instead of the originals, but it gets the point across I think
fig, axs = plt.subplots(1, 2, figsize=(20,10))
axs[0].imshow(image.numpy())
axs[0].set_title(f"class: {target_batch[0]}, predicted: {pred[0].item()},\n filepath: {fnames[0]}")
axs[1].imshow(image2.numpy())
axs[1].set_title(f"class: {target_batch[1]}, predicted: {pred[1].item()},\n filepath: {fnames[1]}")
print(fnames[0], fnames[1])
for image_batch, target_batch, fnames in test_loader:
plot(image_batch, target_batch, fnames)
break
###Output
_____no_output_____ |
04_pandas_practice_2.ipynb | ###Markdown
Pandas Practice====After the notebook with a lot of new input, let's start applying it totally by yourselves. For this purpose we will use one of the most standard real-life datasets: Its called Iris Dataset, and is all about the plant iris. Let's learn a little bit more about the dataset by looking at it. Learning ObjectivesBy the end of this notebook, you will feel more comfortable with your newly acquired knowledge, as you will see that you can- load data from csv files into DataFrames- access data stored in DataFrames- use the general functions of a DataFrame to answer questions about the data- create insightful plots using the Pandas function `.plot()`- explain what conclusions you draw from these visualizations.
###Code
# import pandas
import pandas as pd
# load the data
df = pd.read_csv('data/iris.csv')
###Output
_____no_output_____
###Markdown
Let us first have a look at the head of the table, maybe also on the last 10 rows...
###Code
df.tail(10)
###Output
_____no_output_____
###Markdown
How many irises are the data set?How many different species are there?
###Code
df['species'].nunique()
###Output
_____no_output_____
###Markdown
Calculate the mean, median, mode for petal length
###Code
df['petal_length'].mean()
df['petal_length'].median()
df['petal_length'].mode()
###Output
_____no_output_____
###Markdown
What can you conclude about the distribution for petal length?
###Code
df['petal_length'].plot(kind='hist')
###Output
_____no_output_____
###Markdown
What is the smallest and largest value for petal lenth?
###Code
#iris[(iris['petal_length'].min()) & (iris['petal_length'].max())]
print("min: " + str(df['petal_length'].min()))
print("max: " + str(df['petal_length'].max()))
###Output
min: 1.0
max: 6.9
###Markdown
Calculate the variance and standard deviation for petal length
###Code
df['petal_length'].std()
df['petal_length'].var()
###Output
_____no_output_____
###Markdown
Calculate the basic descriptive statistics for all columns of the iris data set with a single command.
###Code
df.describe()
###Output
_____no_output_____
###Markdown
What is the overall average for sepal length?
###Code
df['sepal_length'].mean()
###Output
_____no_output_____
###Markdown
Use DataFrame grouping function to determine the count by species.
###Code
a = df.groupby('species')
a.count()
###Output
_____no_output_____
###Markdown
Use DataFrame grouping function to determine the average length and width of sepals and pedals by species.
###Code
df.groupby('species').mean()
###Output
_____no_output_____
###Markdown
Add the sum of the sepal width and length as a new column to your data frame.
###Code
#a = iris['sepal_length'].sum()
#b = iris['sepal_width'].sum()
#c = a+b
#print(c)
df.eval('total_acidity = sepal_length + sepal_width', inplace = True)
df.columns
###Output
_____no_output_____
###Markdown
Visualize petal length with a histogram
###Code
df['petal_length'].plot(kind='hist')
###Output
_____no_output_____
###Markdown
Describe the distribution
###Code
df['petal_length'].describe()
###Output
_____no_output_____
###Markdown
Which is more informative: the summary statistics or visualizations?
###Code
#summary statistics
###Output
_____no_output_____
###Markdown
Visualize petal length and width with a "scatter_matrix"
###Code
df.plot(kind = "scatter", x= "petal_length", y = "petal_width")
###Output
_____no_output_____
###Markdown
Describe the joint distribution
###Code
The larger the Petal Length, the larger the Petal Width
###Output
_____no_output_____
###Markdown
Create a new column with a rough estimate of petal area by multiplying petal length and width together.
###Code
df.eval('petal_area = petal_length * petal_width', inplace = True)
df
###Output
_____no_output_____
###Markdown
Create a new dataframe with petal areas greater than $1cm^2$.
###Code
df.eval("petal_bigger = petal_area > 1", inplace = True)
df
###Output
_____no_output_____
###Markdown
Create 3 new dataframes, one for each species. Use the entire dataset.
###Code
a = df[(df['species'] == "Iris-setosa")]
#df.eval("i_s = df[(df['species'] == "Iris-setosa")]", inplace = True)
a
b = df[(df['species'] == "Iris-versicolor")]
b
c = df[(df['species'] == "Iris-virginica")]
c
###Output
_____no_output_____ |
lecture-9/DataDive-Lecture9.ipynb | ###Markdown
Data Dive Week 9: Decision TreesThis week we take a look at *decision trees*, our second type of classification model that brings deeper into the machine learning territory. We'll be using `scikit-learn` in today's exercise. *** This week we'll be illustrating how decision trees work using the Titanic survivor dataset available on [Kaggle](https://www.kaggle.com/c/titanic/data). We'll look at a create variety of variables to help us learn predict whether a given passenger on the Titanic was able to survive. There is a ton out on the web (including [here](https://triangleinequality.wordpress.com/2013/09/08/basic-feature-engineering-with-the-titanic-data/)) about this dataset, as it's a popular among those just coming up to speed on machine learning classification models. Play around and use what you learn in class to join [the Kaggle competition](https://www.kaggle.com/c/titanic)!. *** Data Dictionary|Variable|Definition|Key|| --- | --- |:---|| survival | Survival | 0 = No, 1 = Yes || pclass | Ticket class | 1 = 1st, 2 = 2nd, 3 = 3rd || sex | Sex | | | Age | Age in years | | | sibsp | of siblings / spouses aboard the Titanic | | | parch | of parents / children aboard the Titanic | | | ticket | Ticket number | | | fare | Passenger fare | | | cabin | Cabin number| | | embarked | Port of Embarkation | C = Cherbourg (France), Q = Queenstown (Ireland), S = Southampton (England) |
###Code
import numpy as np
import pandas as pd
import warnings
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, recall_score, precision_score, confusion_matrix
from sklearn.model_selection import KFold, cross_val_score
# Used for visualizing trees, but not strictly necessary
from sklearn.externals.six import StringIO
from IPython.display import Image
from sklearn.tree import export_graphviz
import pydotplus
%matplotlib inline
warnings.filterwarnings("ignore")
###Output
_____no_output_____
###Markdown
Load and summarize data
###Code
df = pd.read_csv('https://grantmlong.com/data/titanic.csv')
df.head()
df.Survived.describe()
###Output
_____no_output_____
###Markdown
Summarize survival by age.
###Code
df.loc[(df.Survived==0), 'Age'].hist(bins=20, alpha=.6, color='red', figsize=[15, 5])
df.loc[(df.Survived==1), 'Age'].hist(bins=20, alpha=.6, color='blue')
###Output
_____no_output_____
###Markdown
Summarize survival by sex.
###Code
df[['Sex', 'Survived']].groupby('Sex').agg(['mean', 'count'])
###Output
_____no_output_____
###Markdown
Find and Count Nulls
###Code
df.isna().sum()
###Output
_____no_output_____
###Markdown
TODO: Summarize by Pclass, point of embarkment Data Cleaning and Feature EngineeringSadly `sci-kit learn` will only let use numeric or boolean variables to train our decision tree, so let's transform some of our variables to address that. * Create booleans for each of the Embarkment points.* Create a boolean for is_male. * Create a boolean for whether someone has a cabin. * **TODO, time permitting:** create identifiers for passengers in A, B, C, and D cabinsMoreover, some of our ages are missing, so let's enter the missing values as 100 for now.
###Code
# Embarkment booleans
for k in df.Embarked.unique():
if type(k)==str:
df['emb_' + k] = (df.Embarked==k)*1
# Sex boolean
df['is_male'] = (df.Sex=='male')*1
# Has cabin boolean
df.loc[:, 'has_cabin'] = 0
df.loc[df.Cabin.isna(), 'has_cabin'] = 1
# Age fill
df.loc[df.Age.isna(), 'Age'] = 100
print(list(df))
df.head()
###Output
_____no_output_____
###Markdown
Let's assign a list of our clean and model ready features to a list so we can call them easily while training our model.
###Code
features = ['Pclass', 'Age', 'SibSp', 'Parch', 'Fare',
'emb_S', 'emb_C', 'emb_Q', 'is_male', 'has_cabin']
valid = df[features].notna().all(axis=1)
print(len(df), sum(valid))
###Output
_____no_output_____
###Markdown
Building a Decision TreeNow that we have variables in good shape, we can start modeling. Let's train a simple tree and see how it performs. Note: for the documentation on `DecisionTreeClassifier`, see [here](http://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html).
###Code
dtree=DecisionTreeClassifier(
criterion='entropy',
random_state=20181105,
max_depth=5,
#min_samples_split=2,
#min_samples_leaf=1,
#max_features=None,
#max_leaf_nodes=None,
)
dtree.fit(df[features], df['Survived'])
###Output
_____no_output_____
###Markdown
Visualize the tree. *Note: there's a strong chance this will not work if you do not have `graphviz` installed.* For more on visualizing decision trees see [here](https://chrisalbon.com/machine_learning/trees_and_forests/visualize_a_decision_tree/), and for more on installing graphviz see [here](https://graphviz.gitlab.io). To install `graphviz` on my Macbook Air, I used `brew install graphviz`.
###Code
dot_data = StringIO()
export_graphviz(dtree,
out_file=dot_data,
filled=True,
rounded=True,
feature_names=features,
special_characters=True
)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
Image(graph.create_png())
###Output
_____no_output_____
###Markdown
Calculate metrics from in-sample performance
###Code
pred_survival = dtree.predict(df[features])
print(confusion_matrix(df.Survived, pred_survival), '\n')
print('Accuracy: %0.3f' % accuracy_score(df.Survived, pred_survival))
print('Precision: %0.3f' % precision_score(df.Survived, pred_survival))
print('Recall: %0.3f' % recall_score(df.Survived, pred_survival))
###Output
_____no_output_____
###Markdown
Wait, are nonlinear models actually doing better here? * Let's run a logistic regression to compare
###Code
logreg = LogisticRegression(random_state=20181105, solver='lbfgs')
logreg.fit(df[features], df['Survived'])
pred_survival = logreg.predict(df[features])
print(confusion_matrix(df.Survived, pred_survival), '\n')
print('Accuracy: %0.3f' % accuracy_score(df.Survived, pred_survival))
print('Precision: %0.3f' % precision_score(df.Survived, pred_survival))
print('Recall: %0.3f' % recall_score(df.Survived, pred_survival))
###Output
_____no_output_____
###Markdown
Selecting Hyperparameters with Cross Validation* First, we use the `KFold` function from `sci-kit learn` to generate five folds for cross validation. We can show the balance of the survivor rate among the different folds to get a better idea of what's going on.* Next, we train a different decision tree model against each of the folds and track our performance.* Finally, we track average cv metrics for different values of our hyperparameters.
###Code
k_fold = KFold(n_splits=5, random_state=20181105)
# Print the number of observations and survivor rate for
for train_indices, test_indices in k_fold.split(df[features]):
print('Train: n=%i, s_rate=%0.2f | test: n=%i, s_rate=%0.2f ' %
(df.loc[train_indices, 'Survived'].count(),
df.loc[train_indices, 'Survived'].mean(),
df.loc[test_indices, 'Survived'].count(),
df.loc[test_indices, 'Survived'].mean(),
)
)
###Output
_____no_output_____
###Markdown
Creating a function to fit our model and return relevant metrics makes it easy to track cross validation performance over different values of our parameters.
###Code
def get_cv_results(classifier):
results = []
for train, test in k_fold.split(df[features]):
classifier.fit(df.loc[train, features], df.loc[train, 'Survived'])
y_predicted = classifier.predict(df.loc[test, features])
accuracy = accuracy_score(df.loc[test, 'Survived'], y_predicted)
results.append(accuracy)
return np.mean(results), np.std(results)
###Output
_____no_output_____
###Markdown
Let's track mean and variance of accuracy for different values of the minimum samples per split.
###Code
hp_values = [2, 5, 7, 10, 15, 20, 50, 60, 70, 80, 90, 100, 120, 150]
all_mu = []
all_sigma = []
for m in hp_values:
dtree=DecisionTreeClassifier(
criterion='entropy',
random_state=20181105,
min_samples_split=m,
#max_depth=m,
#min_samples_leaf=m,
#max_features=m,
#max_leaf_nodes=m,
)
mu, sigma = get_cv_results(dtree)
all_mu.append(mu)
all_sigma.append(sigma)
print(m, mu, sigma)
plt.figure(figsize=(14, 5))
plt.plot(hp_values, all_mu)
plt.ylabel('Cross Validation Accuracy')
plt.xlabel('Minimum Samples Per Leaf')
plt.figure(figsize=(14, 5))
plt.plot(hp_values, all_sigma)
plt.ylabel('Cross Validation Std Dev.')
plt.xlabel('Minimum Samples Per Leaf')
###Output
_____no_output_____
###Markdown
Pretty cool, right? We can take a quick look again at how these results compare to logistic regression.* What do you make of these results?* Is this a better model? Why or why not?
###Code
logreg = LogisticRegression(random_state=20181105, solver='lbfgs')
get_cv_results(logreg)
###Output
_____no_output_____
###Markdown
Selecting Our Model and Applying It to Our Test Set From this, it seems like `min_samples_split=70` might provide our best fit. We can train our best model using that value. We can then read in our holdout test set from the Kaggle competition to enter our predictions. We'll first double check and see if our model makes sense by taking a closer look at our predictions.
###Code
dtree=DecisionTreeClassifier(
criterion='entropy',
random_state=20181105,
min_samples_split=90,
)
# Here we train our final model against all of our validation data.
dtree.fit(df.loc[:, features], df.loc[:, 'Survived'])
###Output
_____no_output_____
###Markdown
Read in our test data and apply the same transformations as our training set.
###Code
test_df = pd.read_csv('https://grantmlong.com/data/titanic_test.csv')
# Embarkment booleans
for k in test_df.Embarked.unique():
if type(k)==str:
test_df['emb_' + k] = (test_df.Embarked==k)*1
# Sex boolean
test_df['is_male'] = (test_df.Sex=='male')*1
# Has cabin boolean
test_df.loc[:, 'has_cabin'] = 0
test_df.loc[test_df.Cabin.isna(), 'has_cabin'] = 1
# Age fill
test_df.loc[test_df.Age.isna(), 'Age'] = 100
# Fare fill
test_df.loc[test_df.Fare.isna(), 'Fare'] = test_df.loc[test_df.Fare.notna(), 'Fare'].median()
print(list(test_df))
test_df.head()
###Output
_____no_output_____
###Markdown
Rank the most likely to survive according to our model.
###Code
# Calculate the probability of
test_probabilities = dtree.predict_proba(test_df[features])[:,1]
test_df['survival_likelihood'] = test_probabilities
readable_features = ['Name', 'Pclass', 'Sex', 'Age', 'SibSp', 'Parch',
'Ticket', 'Fare', 'Cabin', 'Embarked', 'survival_likelihood']
# Find the rankings based on the probabilities
probability_rankings = np.argsort(test_probabilities)
###Output
_____no_output_____
###Markdown
Most Likely to Survive:
###Code
test_df.loc[probability_rankings[-20:], readable_features]
###Output
_____no_output_____
###Markdown
Most Likely to Die:
###Code
test_df.loc[probability_rankings[:20], readable_features]
###Output
_____no_output_____ |
datasets/versioning/cough-classification/wiai-crowdsourced/default-clf.ipynb | ###Markdown
Creates a dataset version for cough classification learning tasks.
###Code
%load_ext autoreload
%autoreload 2
from os import makedirs, symlink, rmdir
from os.path import join, dirname, exists, isdir, basename, splitext
from shutil import rmtree
import math
from collections import defaultdict
import pandas as pd
import numpy as np
from glob import glob
from tqdm import tqdm
from librosa import get_duration
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from termcolor import colored
from cac.utils.io import save_yml
from cac.utils.pandas import apply_filters
random_state = 0
np.random.seed(random_state)
# directory where the data resides
data_root = '/data/wiai-crowdsourced/'
save_root = join(data_root, 'processed')
version_dir = join(save_root, 'versions')
makedirs(version_dir, exist_ok=True)
save_audio_dir = join(save_root, 'audio')
attributes = pd.read_csv(join(save_root, 'attributes.csv'))
annotation = pd.read_csv(join(save_root, 'annotation.csv'))
annotation.shape, attributes.shape
type(annotation['classification'][0])
annotation['classification'] = annotation['classification'].apply(eval)
type(annotation['classification'][0])
annotation['classification'][0]
annotation = apply_filters(annotation, {'audio_type': ['cough_1', 'cough_2', 'cough_3']})
annotation.shape
###Output
_____no_output_____
###Markdown
Split patients in training and validation sets
###Code
all_patients = list(annotation['id'].unique())
NUM_PATIENTS_TO_SELECT = 100
all_patients = np.random.choice(all_patients, 100)
len(all_patients)
train_ids, val_test_ids = train_test_split(all_patients, test_size=0.2, random_state=random_state)
val_ids, test_ids = train_test_split(val_test_ids, test_size=0.5, random_state=random_state)
len(train_ids), len(val_ids), len(test_ids)
df_train = apply_filters(annotation, {'id': train_ids}, reset_index=True)
df_train = df_train.drop(columns=['unsupervised', 'users', 'audio_type', 'id'])
df_train.rename({'classification': 'label'}, axis=1, inplace=True)
df_train['label'] = df_train['label'].apply(lambda x: {'classification': x})
df_val = apply_filters(annotation, {'id': val_ids}, reset_index=True)
df_val = df_val.drop(columns=['unsupervised', 'users', 'audio_type', 'id'])
df_val.rename({'classification': 'label'}, axis=1, inplace=True)
df_val['label'] = df_val['label'].apply(lambda x: {'classification': x})
df_test = apply_filters(annotation, {'id': test_ids}, reset_index=True)
df_test = df_test.drop(columns=['unsupervised', 'users', 'audio_type', 'id'])
df_test.rename({'classification': 'label'}, axis=1, inplace=True)
df_test['label'] = df_test['label'].apply(lambda x: {'classification': x})
df_all = apply_filters(annotation, {'id': all_patients}, reset_index=True)
df_all = df_all.drop(columns=['unsupervised', 'users', 'audio_type', 'id'])
df_all.rename({'classification': 'label'}, axis=1, inplace=True)
df_all['label'] = df_all['label'].apply(lambda x: {'classification': x})
df_train.shape, df_val.shape, df_test.shape, df_all.shape
version = 'default-clf'
save_path = join(save_root, 'versions', '{}.yml'.format(version))
description = dict()
description['description'] = 'version for COVID vs non COVID task(s) on cough with random split'
for name, _df in zip(['all', 'train', 'val', 'test'], [df_all, df_train, df_val, df_test]):
description[name] = {
'file': _df['file'].values.tolist(),
'label': _df['label'].values.tolist()
}
# save description
makedirs(dirname(save_path), exist_ok=True)
save_yml(description, save_path)
###Output
_____no_output_____ |
Quiminformatica/qinfo - Acessando o banco de dados ChEMBL.ipynb | ###Markdown
Acessando o banco de dados ChEMBL pelo ChEMBL webresource clientPara usar essa ferramenta, é necessário instalar o cliente utilizando:`pip install chembl_webresource_client`Tendo instalado o cliente, é necessário importá-lo, usando:
###Code
from chembl_webresource_client.new_client import new_client
###Output
_____no_output_____
###Markdown
O [repositório no GitHub](https://github.com/chembl/chembl_webresource_client) sugere algumas formas de uso, como: Procurar uma estrutura (molecule) pelo nome
###Code
molecule = new_client.molecule
res = molecule.search('viagra')
type(res)
###Output
_____no_output_____
###Markdown
Veja que o resultado é um objeto do tipo QuerySet, contendo um extenso dicionário:
###Code
res
# Chaves do dicionário
res[0].keys()
print("Tipo de molécula:", res[0]['molecule_type'])
print("Número identificador no ChEMBL:", res[0]['molecule_chembl_id'])
print("É um medicamento tomado por via oral?", res[0]['oral'])
print("SMILES:", res[0]['molecule_structures']['canonical_smiles']) # molecule_structures é um dict dentro de um dict
###Output
Tipo de molécula: Small molecule
Número identificador no ChEMBL: CHEMBL192
É um medicamento tomado por via oral? True
SMILES: CCCc1nn(C)c2c(=O)[nH]c(-c3cc(S(=O)(=O)N4CCN(C)CC4)ccc3OCC)nc12
###Markdown
Procurar um alvo (target) pelo nome do gene
###Code
target = new_client.target
gene_name = 'BRD4'
res = target.search(gene_name)
res[0].keys()
###Output
_____no_output_____
###Markdown
Encontre compostos similares à aspirina (similaridade > 70%)
###Code
molecule = new_client.molecule
similarity = new_client.similarity
aspirin_chembl_id = molecule.search('aspirin')[0]['molecule_chembl_id']
res = similarity.filter(chembl_id=aspirin_chembl_id, similarity=70)
for item in res:
print(item['molecule_structures']['canonical_smiles'])
###Output
CC(=O)Oc1ccccc1C(=O)O.NCCCC[C@H](N)C(=O)O
CC(=O)Oc1ccccc1C(=O)O.NCCCCC(N)C(=O)O
CC(=O)Oc1ccccc1C(=O)[O-].CC(=O)Oc1ccccc1C(=O)[O-].NC(N)=O.[Ca+2]
CC(=O)Oc1ccccc1C(=O)O.CC(=O)Oc1ccccc1C(=O)O.NC(N)=O
CC(=O)Oc1ccccc1C(=O)Oc1ccccc1C(=O)O
O=C(O)Oc1ccccc1C(=O)O
CC(=O)Oc1cccc(C(=O)O)c1OC(C)=O
###Markdown
Gerar um dataframe com todos os inibidores de um alvoEscrevi uma função que recebe como entrada o código ChEMBL do alvo desejado. Para descobrir o código de um alvo, acesse o [site do ChEMBL](https://www.ebi.ac.uk/chembl/) e procure pelo alvo na barra *Search in ChEMBL*. A função retorna um dataframe contendo as estruturas (SMILES) e atividades (pChEMBL) dos compostos
###Code
import pandas as pd
activities = new_client.activity
def get_inhibitors(target):
"""
Returns a dataframe with SMILES and pChEMBL values of inhibitors for a given target
Example: target = Cathepsin B = "CHEMBL4072"
"""
inhibitors = activities.filter(target_chembl_id=target, pchembl_value__isnull=False)
data = [[item["canonical_smiles"], item["pchembl_value"]] for item in inhibitors]
df = pd.DataFrame(data, columns=["SMILES", "pChEMBL"])
return df
# Encontrando inibidores da enzima Catepsina B
df = get_inhibitors("CHEMBL4072") # código ChEMBL da Catepsina B
df.dropna(inplace=True)
df
###Output
_____no_output_____ |
Natural Language Processing/Course 2 - Natural Language Processing with Probabilistic Models/Assignments/Week 1/Assignment 1 - Auto Correct.ipynb | ###Markdown
Assignment 1: Auto CorrectWelcome to the first assignment of Course 2. This assignment will give you a chance to brush up on your python and probability skills. In doing so, you will implement an auto-correct system that is very effective and useful. Outline- [0. Overview](0) - [0.1 Edit Distance](0-1)- [1. Data Preprocessing](1) - [1.1 Exercise 1](ex-1) - [1.2 Exercise 2](ex-2) - [1.3 Exercise 3](ex-3)- [2. String Manipulation](2) - [2.1 Exercise 4](ex-4) - [2.2 Exercise 5](ex-5) - [2.3 Exercise 6](ex-6) - [2.4 Exercise 7](ex-7)- [3. Combining the edits](3) - [3.1 Exercise 8](ex-8) - [3.2 Exercise 9](ex-9) - [3.3 Exercise 10](ex-10)- [4. Minimum Edit Distance](4) - [4.1 Exercise 11](ex-11)- [5. Backtrace (Optional)](5) 0. OverviewYou use autocorrect every day on your cell phone and computer. In this assignment, you will explore what really goes on behind the scenes. Of course, the model you are about to implement is not identical to the one used in your phone, but it is still quite good. By completing this assignment you will learn how to: - Get a word count given a corpus- Get a word probability in the corpus - Manipulate strings - Filter strings - Implement Minimum edit distance to compare strings and to help find the optimal path for the edits. - Understand how dynamic programming worksSimilar systems are used everywhere. - For example, if you type in the word **"I am lerningg"**, chances are very high that you meant to write **"learning"**, as shown in **Figure 1**. Figure 1 0.1 Edit DistanceIn this assignment, you will implement models that correct words that are 1 and 2 edit distances away. - We say two words are n edit distance away from each other when we need n edits to change one word into another. An edit could consist of one of the following options: - Delete (remove a letter): ‘hat’ => ‘at, ha, ht’- Switch (swap 2 adjacent letters): ‘eta’ => ‘eat, tea,...’- Replace (change 1 letter to another): ‘jat’ => ‘hat, rat, cat, mat, ...’- Insert (add a letter): ‘te’ => ‘the, ten, ate, ...’You will be using the four methods above to implement an Auto-correct. - To do so, you will need to compute probabilities that a certain word is correct given an input. This auto-correct you are about to implement was first created by [Peter Norvig](https://en.wikipedia.org/wiki/Peter_Norvig) in 2007. - His [original article](https://norvig.com/spell-correct.html) may be a useful reference for this assignment.The goal of our spell check model is to compute the following probability:$$P(c|w) = \frac{P(w|c)\times P(c)}{P(w)} \tag{Eqn-1}$$The equation above is [Bayes Rule](https://en.wikipedia.org/wiki/Bayes%27_theorem). - Equation 1 says that the probability of a word being correct $P(c|w) $is equal to the probability of having a certain word $w$, given that it is correct $P(w|c)$, multiplied by the probability of being correct in general $P(C)$ divided by the probability of that word $w$ appearing $P(w)$ in general.- To compute equation 1, you will first import a data set and then create all the probabilities that you need using that data set. Part 1: Data Preprocessing
###Code
import re
from collections import Counter
import numpy as np
import pandas as pd
###Output
_____no_output_____
###Markdown
As in any other machine learning task, the first thing you have to do is process your data set. - Many courses load in pre-processed data for you. - However, in the real world, when you build these NLP systems, you load the datasets and process them.- So let's get some real world practice in pre-processing the data!Your first task is to read in a file called **'shakespeare.txt'** which is found in your file directory. To look at this file you can go to `File ==> Open `. Exercise 1Implement the function `process_data` which 1) Reads in a corpus (text file)2) Changes everything to lowercase3) Returns a list of words. Options and Hints- If you would like more of a real-life practice, don't open the 'Hints' below (yet) and try searching the web to derive your answer.- If you want a little help, click on the green "General Hints" section by clicking on it with your mouse.- If you get stuck or are not getting the expected results, click on the green 'Detailed Hints' section to get hints for each step that you'll take to complete this function. General Hints General Hints to get started Python input and output Python 're' documentation Detailed Hints Detailed hints if you're stuck Use 'with' syntax to read a file Decide whether to use 'read()' or 'readline(). What's the difference? Choose whether to use either str.lower() or str.lowercase(). What is the difference? Use re.findall(pattern, string) Look for the "Raw String Notation" section in the Python 're' documentation to understand the difference between r'\W', r'\W' and '\\W'. For the pattern, decide between using '\s', '\w', '\s+' or '\w+'. What do you think are the differences?
###Code
# UNQ_C1 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: process_data
def process_data(file_name):
"""
Input:
A file_name which is found in your current directory. You just have to read it in.
Output:
words: a list containing all the words in the corpus (text file you read) in lower case.
"""
words = [] # return this variable correctly
### START CODE HERE ###
with open(file_name) as fp:
data = fp.read()
data = data.lower()
words = re.findall('\w+', data)
### END CODE HERE ###
return words
###Output
_____no_output_____
###Markdown
Note, in the following cell, 'words' is converted to a python `set`. This eliminates any duplicate entries.
###Code
#DO NOT MODIFY THIS CELL
word_l = process_data('shakespeare.txt')
vocab = set(word_l) # this will be your new vocabulary
print(f"The first ten words in the text are: \n{word_l[0:10]}")
print(f"There are {len(vocab)} unique words in the vocabulary.")
###Output
The first ten words in the text are:
['o', 'for', 'a', 'muse', 'of', 'fire', 'that', 'would', 'ascend', 'the']
There are 6116 unique words in the vocabulary.
###Markdown
Expected Output```PythonThe first ten words in the text are: ['o', 'for', 'a', 'muse', 'of', 'fire', 'that', 'would', 'ascend', 'the']There are 6116 unique words in the vocabulary.``` Exercise 2Implement a `get_count` function that returns a dictionary- The dictionary's keys are words- The value for each word is the number of times that word appears in the corpus. For example, given the following sentence: **"I am happy because I am learning"**, your dictionary should return the following: Key Value I 2 am 2 happy 1 because 1 learning 1 **Instructions**: Implement a `get_count` which returns a dictionary where the key is a word and the value is the number of times the word appears in the list. Hints Try implementing this using a for loop and a regular dictionary. This may be good practice for similar coding interview questions You can also use defaultdict instead of a regualr dictionary, along with the for loop Otherwise, to skip using a for loop, you can use Python's Counter class
###Code
# UNQ_C2 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# UNIT TEST COMMENT: Candidate for Table Driven Tests
# GRADED FUNCTION: get_count
def get_count(word_l):
'''
Input:
word_l: a set of words representing the corpus.
Output:
word_count_dict: The wordcount dictionary where key is the word and value is its frequency.
'''
word_count_dict = {} # fill this with word counts
### START CODE HERE
word_count_dict = Counter(word_l)
### END CODE HERE ###
return word_count_dict
#DO NOT MODIFY THIS CELL
word_count_dict = get_count(word_l)
print(f"There are {len(word_count_dict)} key values pairs")
print(f"The count for the word 'thee' is {word_count_dict.get('thee',0)}")
###Output
There are 6116 key values pairs
The count for the word 'thee' is 240
###Markdown
Expected Output```PythonThere are 6116 key values pairsThe count for the word 'thee' is 240``` Exercise 3Given the dictionary of word counts, compute the probability that each word will appear if randomly selected from the corpus of words.$$P(w_i) = \frac{C(w_i)}{M} \tag{Eqn-2}$$where $C(w_i)$ is the total number of times $w_i$ appears in the corpus.$M$ is the total number of words in the corpus.For example, the probability of the word 'am' in the sentence **'I am happy because I am learning'** is:$$P(am) = \frac{C(w_i)}{M} = \frac {2}{7} \tag{Eqn-3}.$$**Instructions:** Implement `get_probs` function which gives you the probability that a word occurs in a sample. This returns a dictionary where the keys are words, and the value for each word is its probability in the corpus of words. HintsGeneral advice Use dictionary.values() Use sum() The cardinality (number of words in the corpus should be equal to len(word_l). You will calculate this same number, but using the word count dictionary. If you're using a for loop: Use dictionary.keys() If you're using a dictionary comprehension: Use dictionary.items()
###Code
# UNQ_C3 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: get_probs
def get_probs(word_count_dict):
'''
Input:
word_count_dict: The wordcount dictionary where key is the word and value is its frequency.
Output:
probs: A dictionary where keys are the words and the values are the probability that a word will occur.
'''
probs = {} # return this variable correctly
### START CODE HERE ###
M = sum(word_count_dict.values())
for key in word_count_dict.keys():
probs[key] = word_count_dict[key] / M
### END CODE HERE ###
return probs
#DO NOT MODIFY THIS CELL
probs = get_probs(word_count_dict)
print(f"Length of probs is {len(probs)}")
print(f"P('thee') is {probs['thee']:.4f}")
###Output
Length of probs is 6116
P('thee') is 0.0045
###Markdown
Expected Output```PythonLength of probs is 6116P('thee') is 0.0045``` Part 2: String ManipulationsNow, that you have computed $P(w_i)$ for all the words in the corpus, you will write a few functions to manipulate strings so that you can edit the erroneous strings and return the right spellings of the words. In this section, you will implement four functions: * `delete_letter`: given a word, it returns all the possible strings that have **one character removed**. * `switch_letter`: given a word, it returns all the possible strings that have **two adjacent letters switched**.* `replace_letter`: given a word, it returns all the possible strings that have **one character replaced by another different letter**.* `insert_letter`: given a word, it returns all the possible strings that have an **additional character inserted**. List comprehensionsString and list manipulation in python will often make use of a python feature called [list comprehensions](https://docs.python.org/3/tutorial/datastructures.htmllist-comprehensions). The routines below will be described as using list comprehensions, but if you would rather implement them in another way, you are free to do so as long as the result is the same. Further, the following section will provide detailed instructions on how to use list comprehensions and how to implement the desired functions. If you are a python expert, feel free to skip the python hints and move to implementing the routines directly. Python List Comprehensions embed a looping structure inside of a list declaration, collapsing many lines of code into a single line. If you are not familiar with them, they seem slightly out of order relative to for loops. Figure 2 The diagram above shows that the components of a list comprehension are the same components you would find in a typical for loop that appends to a list, but in a different order. With that in mind, we'll continue the specifics of this assignment. We will be very descriptive for the first function, `deletes()`, and less so in later functions as you become familiar with list comprehensions. Exercise 4**Instructions for delete_letter():** Implement a `delete_letter()` function that, given a word, returns a list of strings with one character deleted. For example, given the word **nice**, it would return the set: {'ice', 'nce', 'nic', 'nie'}. **Step 1:** Create a list of 'splits'. This is all the ways you can split a word into Left and Right: For example, 'nice is split into : `[('', 'nice'), ('n', 'ice'), ('ni', 'ce'), ('nic', 'e'), ('nice', '')]`This is common to all four functions (delete, replace, switch, insert). Figure 3 **Step 2:** This is specific to `delete_letter`. Here, we are generating all words that result from deleting one character. This can be done in a single line with a list comprehension. You can make use of this type of syntax: `[f(a,b) for a, b in splits if condition]` For our 'nice' example you get: ['ice', 'nce', 'nie', 'nic'] Figure 4 Levels of assistanceTry this exercise with these levels of assistance. - We hope that this will make it both a meaningful experience but also not a frustrating experience. - Start with level 1, then move onto level 2, and 3 as needed. - Level 1. Try to think this through and implement this yourself. - Level 2. Click on the "Level 2 Hints" section for some hints to get started. - Level 3. If you would prefer more guidance, please click on the "Level 3 Hints" cell for step by step instructions. - If you are still stuck, look at the images in the "list comprehensions" section above. Level 2 Hints Use array slicing like my_string[0:2] Use list comprehensions or for loops Level 3 Hints splits: Use array slicing, like my_str[0:2], to separate a string into two pieces. Do this in a loop or list comprehension, so that you have a list of tuples. For example, "cake" can get split into "ca" and "ke". They're stored in a tuple ("ca","ke"), and the tuple is appended to a list. We'll refer to these as L and R, so the tuple is (L,R) When choosing the range for your loop, if you input the word "cans" and generate the tuple ('cans',''), make sure to include an if statement to check the length of that right-side string (R) in the tuple (L,R) deletes: Go through the list of tuples and combine the two strings together. You can use the + operator to combine two strings When combining the tuples, make sure that you leave out a middle character. Use array slicing to leave out the first character of the right substring.
###Code
# UNQ_C4 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# UNIT TEST COMMENT: Candidate for Table Driven Tests
# GRADED FUNCTION: deletes
def delete_letter(word, verbose=False):
'''
Input:
word: the string/word for which you will generate all possible words
in the vocabulary which have 1 missing character
Output:
delete_l: a list of all possible strings obtained by deleting 1 character from word
'''
delete_l = []
split_l = []
### START CODE HERE ###
split_l = [(word[:c],word[c:]) for c in range(len(word))]
delete_l = [a+b[1:] for a, b in split_l]
### END CODE HERE ###
if verbose: print(f"input word {word}, \nsplit_l = {split_l}, \ndelete_l = {delete_l}")
return delete_l
delete_word_l = delete_letter(word="cans",
verbose=True)
###Output
input word cans,
split_l = [('', 'cans'), ('c', 'ans'), ('ca', 'ns'), ('can', 's')],
delete_l = ['ans', 'cns', 'cas', 'can']
###Markdown
Expected Output```CPPNote: You might get a slightly different result with split_linput word cans, split_l = [('', 'cans'), ('c', 'ans'), ('ca', 'ns'), ('can', 's')], delete_l = ['ans', 'cns', 'cas', 'can']``` Note 1- Notice how it has the extra tuple `('cans', '')`.- This will be fine as long as you have checked the size of the right-side substring in tuple (L,R).- Can you explain why this will give you the same result for the list of deletion strings (delete_l)?```CPPinput word cans, split_l = [('', 'cans'), ('c', 'ans'), ('ca', 'ns'), ('can', 's'), ('cans', '')], delete_l = ['ans', 'cns', 'cas', 'can']``` Note 2If you end up getting the same word as your input word, like this:```Pythoninput word cans, split_l = [('', 'cans'), ('c', 'ans'), ('ca', 'ns'), ('can', 's'), ('cans', '')], delete_l = ['ans', 'cns', 'cas', 'can', 'cans']```- Check how you set the `range`.- See if you check the length of the string on the right-side of the split.
###Code
# test # 2
print(f"Number of outputs of delete_letter('at') is {len(delete_letter('at'))}")
###Output
Number of outputs of delete_letter('at') is 2
###Markdown
Expected output```CPPNumber of outputs of delete_letter('at') is 2``` Exercise 5**Instructions for switch_letter()**: Now implement a function that switches two letters in a word. It takes in a word and returns a list of all the possible switches of two letters **that are adjacent to each other**. - For example, given the word 'eta', it returns {'eat', 'tea'}, but does not return 'ate'.**Step 1:** is the same as in delete_letter() **Step 2:** A list comprehension or for loop which forms strings by swapping adjacent letters. This is of the form: `[f(L,R) for L, R in splits if condition]` where 'condition' will test the length of R in a given iteration. See below. Figure 5 Levels of difficultyTry this exercise with these levels of difficulty. - Level 1. Try to think this through and implement this yourself.- Level 2. Click on the "Level 2 Hints" section for some hints to get started.- Level 3. If you would prefer more guidance, please click on the "Level 3 Hints" cell for step by step instructions. Level 2 Hints Use array slicing like my_string[0:2] Use list comprehensions or for loops To do a switch, think of the whole word as divided into 4 distinct parts. Write out 'cupcakes' on a piece of paper and see how you can split it into ('cupc', 'k', 'a', 'es') Level 3 Hints splits: Use array slicing, like my_str[0:2], to separate a string into two pieces. Splitting is the same as for delete_letter To perform the switch, go through the list of tuples and combine four strings together. You can use the + operator to combine strings The four strings will be the left substring from the split tuple, followed by the first (index 1) character of the right substring, then the zero-th character (index 0) of the right substring, and then the remaining part of the right substring. Unlike delete_letter, you will want to check that your right substring is at least a minimum length. To see why, review the previous hint bullet point (directly before this one).
###Code
# UNQ_C5 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# UNIT TEST COMMENT: Candidate for Table Driven Tests
# GRADED FUNCTION: switches
def switch_letter(word, verbose=False):
'''
Input:
word: input string
Output:
switches: a list of all possible strings with one adjacent charater switched
'''
switch_l = []
split_l = []
### START CODE HERE ###
split_l = [(word[:c],word[c:]) for c in range(len(word))]
switch_l = [a + b[1] + b[0] + b[2:] for a,b in split_l if len(b) >= 2]
### END CODE HERE ###
if verbose: print(f"Input word = {word} \nsplit_l = {split_l} \nswitch_l = {switch_l}")
return switch_l
switch_word_l = switch_letter(word="eta",
verbose=True)
###Output
Input word = eta
split_l = [('', 'eta'), ('e', 'ta'), ('et', 'a')]
switch_l = ['tea', 'eat']
###Markdown
Expected output```PythonInput word = eta split_l = [('', 'eta'), ('e', 'ta'), ('et', 'a')] switch_l = ['tea', 'eat']``` Note 1You may get this:```PythonInput word = eta split_l = [('', 'eta'), ('e', 'ta'), ('et', 'a'), ('eta', '')] switch_l = ['tea', 'eat']```- Notice how it has the extra tuple `('eta', '')`.- This is also correct.- Can you think of why this is the case? Note 2If you get an error```PythonIndexError: string index out of range```- Please see if you have checked the length of the strings when switching characters.
###Code
# test # 2
print(f"Number of outputs of switch_letter('at') is {len(switch_letter('at'))}")
###Output
Number of outputs of switch_letter('at') is 1
###Markdown
Expected output```CPPNumber of outputs of switch_letter('at') is 1``` Exercise 6**Instructions for replace_letter()**: Now implement a function that takes in a word and returns a list of strings with one **replaced letter** from the original word. **Step 1:** is the same as in `delete_letter()`**Step 2:** A list comprehension or for loop which form strings by replacing letters. This can be of the form: `[f(a,b,c) for a, b in splits if condition for c in string]` Note the use of the second for loop. It is expected in this routine that one or more of the replacements will include the original word. For example, replacing the first letter of 'ear' with 'e' will return 'ear'.**Step 3:** Remove the original input letter from the output. Hints To remove a word from a list, first store its contents inside a set() Use set.discard('the_word') to remove a word in a set (if the word does not exist in the set, then it will not throw a KeyError. Using set.remove('the_word') throws a KeyError if the word does not exist in the set.
###Code
# UNQ_C6 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# UNIT TEST COMMENT: Candidate for Table Driven Tests
# GRADED FUNCTION: replaces
def replace_letter(word, verbose=False):
'''
Input:
word: the input string/word
Output:
replaces: a list of all possible strings where we replaced one letter from the original word.
'''
letters = 'abcdefghijklmnopqrstuvwxyz'
replace_l = []
split_l = []
### START CODE HERE ###
split_l = [(word[0:c],word[c:]) for c in range(len(word))]
replace_l = [a + l + (b[1:] if len(b)> 1 else '') for a,b in split_l if b for l in letters]
replace_set=set(replace_l)
replace_set.remove(word)
### END CODE HERE ###
# turn the set back into a list and sort it, for easier viewing
replace_l = sorted(list(replace_set))
if verbose: print(f"Input word = {word} \nsplit_l = {split_l} \nreplace_l {replace_l}")
return replace_l
replace_l = replace_letter(word='can',
verbose=True)
###Output
Input word = can
split_l = [('', 'can'), ('c', 'an'), ('ca', 'n')]
replace_l ['aan', 'ban', 'caa', 'cab', 'cac', 'cad', 'cae', 'caf', 'cag', 'cah', 'cai', 'caj', 'cak', 'cal', 'cam', 'cao', 'cap', 'caq', 'car', 'cas', 'cat', 'cau', 'cav', 'caw', 'cax', 'cay', 'caz', 'cbn', 'ccn', 'cdn', 'cen', 'cfn', 'cgn', 'chn', 'cin', 'cjn', 'ckn', 'cln', 'cmn', 'cnn', 'con', 'cpn', 'cqn', 'crn', 'csn', 'ctn', 'cun', 'cvn', 'cwn', 'cxn', 'cyn', 'czn', 'dan', 'ean', 'fan', 'gan', 'han', 'ian', 'jan', 'kan', 'lan', 'man', 'nan', 'oan', 'pan', 'qan', 'ran', 'san', 'tan', 'uan', 'van', 'wan', 'xan', 'yan', 'zan']
###Markdown
Expected Output**: ```PythonInput word = can split_l = [('', 'can'), ('c', 'an'), ('ca', 'n')] replace_l ['aan', 'ban', 'caa', 'cab', 'cac', 'cad', 'cae', 'caf', 'cag', 'cah', 'cai', 'caj', 'cak', 'cal', 'cam', 'cao', 'cap', 'caq', 'car', 'cas', 'cat', 'cau', 'cav', 'caw', 'cax', 'cay', 'caz', 'cbn', 'ccn', 'cdn', 'cen', 'cfn', 'cgn', 'chn', 'cin', 'cjn', 'ckn', 'cln', 'cmn', 'cnn', 'con', 'cpn', 'cqn', 'crn', 'csn', 'ctn', 'cun', 'cvn', 'cwn', 'cxn', 'cyn', 'czn', 'dan', 'ean', 'fan', 'gan', 'han', 'ian', 'jan', 'kan', 'lan', 'man', 'nan', 'oan', 'pan', 'qan', 'ran', 'san', 'tan', 'uan', 'van', 'wan', 'xan', 'yan', 'zan']```- Note how the input word 'can' should not be one of the output words. Note 1If you get something like this:```PythonInput word = can split_l = [('', 'can'), ('c', 'an'), ('ca', 'n'), ('can', '')] replace_l ['aan', 'ban', 'caa', 'cab', 'cac', 'cad', 'cae', 'caf', 'cag', 'cah', 'cai', 'caj', 'cak', 'cal', 'cam', 'cao', 'cap', 'caq', 'car', 'cas', 'cat', 'cau', 'cav', 'caw', 'cax', 'cay', 'caz', 'cbn', 'ccn', 'cdn', 'cen', 'cfn', 'cgn', 'chn', 'cin', 'cjn', 'ckn', 'cln', 'cmn', 'cnn', 'con', 'cpn', 'cqn', 'crn', 'csn', 'ctn', 'cun', 'cvn', 'cwn', 'cxn', 'cyn', 'czn', 'dan', 'ean', 'fan', 'gan', 'han', 'ian', 'jan', 'kan', 'lan', 'man', 'nan', 'oan', 'pan', 'qan', 'ran', 'san', 'tan', 'uan', 'van', 'wan', 'xan', 'yan', 'zan']```- Notice how split_l has an extra tuple `('can', '')`, but the output is still the same, so this is okay. Note 2If you get something like this:```PythonInput word = can split_l = [('', 'can'), ('c', 'an'), ('ca', 'n'), ('can', '')] replace_l ['aan', 'ban', 'caa', 'cab', 'cac', 'cad', 'cae', 'caf', 'cag', 'cah', 'cai', 'caj', 'cak', 'cal', 'cam', 'cana', 'canb', 'canc', 'cand', 'cane', 'canf', 'cang', 'canh', 'cani', 'canj', 'cank', 'canl', 'canm', 'cann', 'cano', 'canp', 'canq', 'canr', 'cans', 'cant', 'canu', 'canv', 'canw', 'canx', 'cany', 'canz', 'cao', 'cap', 'caq', 'car', 'cas', 'cat', 'cau', 'cav', 'caw', 'cax', 'cay', 'caz', 'cbn', 'ccn', 'cdn', 'cen', 'cfn', 'cgn', 'chn', 'cin', 'cjn', 'ckn', 'cln', 'cmn', 'cnn', 'con', 'cpn', 'cqn', 'crn', 'csn', 'ctn', 'cun', 'cvn', 'cwn', 'cxn', 'cyn', 'czn', 'dan', 'ean', 'fan', 'gan', 'han', 'ian', 'jan', 'kan', 'lan', 'man', 'nan', 'oan', 'pan', 'qan', 'ran', 'san', 'tan', 'uan', 'van', 'wan', 'xan', 'yan', 'zan']```- Notice how there are strings that are 1 letter longer than the original word, such as `cana`.- Please check for the case when there is an empty string `''`, and if so, do not use that empty string when setting replace_l.
###Code
# test # 2
print(f"Number of outputs of switch_letter('at') is {len(switch_letter('at'))}")
###Output
Number of outputs of switch_letter('at') is 1
###Markdown
Expected output```CPPNumber of outputs of switch_letter('at') is 1``` Exercise 7**Instructions for insert_letter()**: Now implement a function that takes in a word and returns a list with a letter inserted at every offset.**Step 1:** is the same as in `delete_letter()`**Step 2:** This can be a list comprehension of the form: `[f(a,b,c) for a, b in splits if condition for c in string]`
###Code
# UNQ_C7 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# UNIT TEST COMMENT: Candidate for Table Driven Tests
# GRADED FUNCTION: inserts
def insert_letter(word, verbose=False):
'''
Input:
word: the input string/word
Output:
inserts: a set of all possible strings with one new letter inserted at every offset
'''
letters = 'abcdefghijklmnopqrstuvwxyz'
insert_l = []
split_l = []
### START CODE HERE ###
split_l = [(word[0:c],word[c:]) for c in range(len(word) + 1)]
insert_l = [ a + l + b for a,b in split_l for l in letters]
### END CODE HERE ###
if verbose: print(f"Input word {word} \nsplit_l = {split_l} \ninsert_l = {insert_l}")
return insert_l
insert_l = insert_letter('at', True)
print(f"Number of strings output by insert_letter('at') is {len(insert_l)}")
###Output
Input word at
split_l = [('', 'at'), ('a', 't'), ('at', '')]
insert_l = ['aat', 'bat', 'cat', 'dat', 'eat', 'fat', 'gat', 'hat', 'iat', 'jat', 'kat', 'lat', 'mat', 'nat', 'oat', 'pat', 'qat', 'rat', 'sat', 'tat', 'uat', 'vat', 'wat', 'xat', 'yat', 'zat', 'aat', 'abt', 'act', 'adt', 'aet', 'aft', 'agt', 'aht', 'ait', 'ajt', 'akt', 'alt', 'amt', 'ant', 'aot', 'apt', 'aqt', 'art', 'ast', 'att', 'aut', 'avt', 'awt', 'axt', 'ayt', 'azt', 'ata', 'atb', 'atc', 'atd', 'ate', 'atf', 'atg', 'ath', 'ati', 'atj', 'atk', 'atl', 'atm', 'atn', 'ato', 'atp', 'atq', 'atr', 'ats', 'att', 'atu', 'atv', 'atw', 'atx', 'aty', 'atz']
Number of strings output by insert_letter('at') is 78
###Markdown
Expected output```PythonInput word at split_l = [('', 'at'), ('a', 't'), ('at', '')] insert_l = ['aat', 'bat', 'cat', 'dat', 'eat', 'fat', 'gat', 'hat', 'iat', 'jat', 'kat', 'lat', 'mat', 'nat', 'oat', 'pat', 'qat', 'rat', 'sat', 'tat', 'uat', 'vat', 'wat', 'xat', 'yat', 'zat', 'aat', 'abt', 'act', 'adt', 'aet', 'aft', 'agt', 'aht', 'ait', 'ajt', 'akt', 'alt', 'amt', 'ant', 'aot', 'apt', 'aqt', 'art', 'ast', 'att', 'aut', 'avt', 'awt', 'axt', 'ayt', 'azt', 'ata', 'atb', 'atc', 'atd', 'ate', 'atf', 'atg', 'ath', 'ati', 'atj', 'atk', 'atl', 'atm', 'atn', 'ato', 'atp', 'atq', 'atr', 'ats', 'att', 'atu', 'atv', 'atw', 'atx', 'aty', 'atz']Number of strings output by insert_letter('at') is 78``` Note 1If you get a split_l like this:```PythonInput word at split_l = [('', 'at'), ('a', 't')] insert_l = ['aat', 'bat', 'cat', 'dat', 'eat', 'fat', 'gat', 'hat', 'iat', 'jat', 'kat', 'lat', 'mat', 'nat', 'oat', 'pat', 'qat', 'rat', 'sat', 'tat', 'uat', 'vat', 'wat', 'xat', 'yat', 'zat', 'aat', 'abt', 'act', 'adt', 'aet', 'aft', 'agt', 'aht', 'ait', 'ajt', 'akt', 'alt', 'amt', 'ant', 'aot', 'apt', 'aqt', 'art', 'ast', 'att', 'aut', 'avt', 'awt', 'axt', 'ayt', 'azt']Number of strings output by insert_letter('at') is 52```- Notice that split_l is missing the extra tuple ('at', ''). For insertion, we actually **WANT** this tuple.- The function is not creating all the desired output strings.- Check the range that you use for the for loop. Note 2If you see this:```PythonInput word at split_l = [('', 'at'), ('a', 't'), ('at', '')] insert_l = ['aat', 'bat', 'cat', 'dat', 'eat', 'fat', 'gat', 'hat', 'iat', 'jat', 'kat', 'lat', 'mat', 'nat', 'oat', 'pat', 'qat', 'rat', 'sat', 'tat', 'uat', 'vat', 'wat', 'xat', 'yat', 'zat', 'aat', 'abt', 'act', 'adt', 'aet', 'aft', 'agt', 'aht', 'ait', 'ajt', 'akt', 'alt', 'amt', 'ant', 'aot', 'apt', 'aqt', 'art', 'ast', 'att', 'aut', 'avt', 'awt', 'axt', 'ayt', 'azt']Number of strings output by insert_letter('at') is 52```- Even though you may have fixed the split_l so that it contains the tuple `('at', '')`, notice that you're still missing some output strings. - Notice that it's missing strings such as 'ata', 'atb', 'atc' all the way to 'atz'.- To fix this, make sure that when you set insert_l, you allow the use of the empty string `''`.
###Code
# test # 2
print(f"Number of outputs of insert_letter('at') is {len(insert_letter('at'))}")
###Output
Number of outputs of insert_letter('at') is 78
###Markdown
Expected output```CPPNumber of outputs of insert_letter('at') is 78``` Part 3: Combining the editsNow that you have implemented the string manipulations, you will create two functions that, given a string, will return all the possible single and double edits on that string. These will be `edit_one_letter()` and `edit_two_letters()`. 3.1 Edit one letter Exercise 8**Instructions**: Implement the `edit_one_letter` function to get all the possible edits that are one edit away from a word. The edits consist of the replace, insert, delete, and optionally the switch operation. You should use the previous functions you have already implemented to complete this function. The 'switch' function is a less common edit function, so its use will be selected by an "allow_switches" input argument.Note that those functions return *lists* while this function should return a *python set*. Utilizing a set eliminates any duplicate entries. Hints Each of the functions returns a list. You can combine lists using the `+` operator. To get unique strings (avoid duplicates), you can use the set() function.
###Code
# UNQ_C8 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# UNIT TEST COMMENT: Candidate for Table Driven Tests
# GRADED FUNCTION: edit_one_letter
def edit_one_letter(word, allow_switches = True):
"""
Input:
word: the string/word for which we will generate all possible wordsthat are one edit away.
Output:
edit_one_set: a set of words with one possible edit. Please return a set. and not a list.
"""
edit_one_set = set()
### START CODE HERE ###
edit_one_set.update(delete_letter(word))
if allow_switches:
edit_one_set.update(switch_letter(word))
edit_one_set.update(replace_letter(word))
edit_one_set.update(insert_letter(word))
### END CODE HERE ###
return edit_one_set
tmp_word = "at"
tmp_edit_one_set = edit_one_letter(tmp_word)
# turn this into a list to sort it, in order to view it
tmp_edit_one_l = sorted(list(tmp_edit_one_set))
print(f"input word {tmp_word} \nedit_one_l \n{tmp_edit_one_l}\n")
print(f"The type of the returned object should be a set {type(tmp_edit_one_set)}")
print(f"Number of outputs from edit_one_letter('at') is {len(edit_one_letter('at'))}")
###Output
input word at
edit_one_l
['a', 'aa', 'aat', 'ab', 'abt', 'ac', 'act', 'ad', 'adt', 'ae', 'aet', 'af', 'aft', 'ag', 'agt', 'ah', 'aht', 'ai', 'ait', 'aj', 'ajt', 'ak', 'akt', 'al', 'alt', 'am', 'amt', 'an', 'ant', 'ao', 'aot', 'ap', 'apt', 'aq', 'aqt', 'ar', 'art', 'as', 'ast', 'ata', 'atb', 'atc', 'atd', 'ate', 'atf', 'atg', 'ath', 'ati', 'atj', 'atk', 'atl', 'atm', 'atn', 'ato', 'atp', 'atq', 'atr', 'ats', 'att', 'atu', 'atv', 'atw', 'atx', 'aty', 'atz', 'au', 'aut', 'av', 'avt', 'aw', 'awt', 'ax', 'axt', 'ay', 'ayt', 'az', 'azt', 'bat', 'bt', 'cat', 'ct', 'dat', 'dt', 'eat', 'et', 'fat', 'ft', 'gat', 'gt', 'hat', 'ht', 'iat', 'it', 'jat', 'jt', 'kat', 'kt', 'lat', 'lt', 'mat', 'mt', 'nat', 'nt', 'oat', 'ot', 'pat', 'pt', 'qat', 'qt', 'rat', 'rt', 'sat', 'st', 't', 'ta', 'tat', 'tt', 'uat', 'ut', 'vat', 'vt', 'wat', 'wt', 'xat', 'xt', 'yat', 'yt', 'zat', 'zt']
The type of the returned object should be a set <class 'set'>
Number of outputs from edit_one_letter('at') is 129
###Markdown
Expected Output```CPPinput word at edit_one_l ['a', 'aa', 'aat', 'ab', 'abt', 'ac', 'act', 'ad', 'adt', 'ae', 'aet', 'af', 'aft', 'ag', 'agt', 'ah', 'aht', 'ai', 'ait', 'aj', 'ajt', 'ak', 'akt', 'al', 'alt', 'am', 'amt', 'an', 'ant', 'ao', 'aot', 'ap', 'apt', 'aq', 'aqt', 'ar', 'art', 'as', 'ast', 'ata', 'atb', 'atc', 'atd', 'ate', 'atf', 'atg', 'ath', 'ati', 'atj', 'atk', 'atl', 'atm', 'atn', 'ato', 'atp', 'atq', 'atr', 'ats', 'att', 'atu', 'atv', 'atw', 'atx', 'aty', 'atz', 'au', 'aut', 'av', 'avt', 'aw', 'awt', 'ax', 'axt', 'ay', 'ayt', 'az', 'azt', 'bat', 'bt', 'cat', 'ct', 'dat', 'dt', 'eat', 'et', 'fat', 'ft', 'gat', 'gt', 'hat', 'ht', 'iat', 'it', 'jat', 'jt', 'kat', 'kt', 'lat', 'lt', 'mat', 'mt', 'nat', 'nt', 'oat', 'ot', 'pat', 'pt', 'qat', 'qt', 'rat', 'rt', 'sat', 'st', 't', 'ta', 'tat', 'tt', 'uat', 'ut', 'vat', 'vt', 'wat', 'wt', 'xat', 'xt', 'yat', 'yt', 'zat', 'zt']The type of the returned object should be a set Number of outputs from edit_one_letter('at') is 129``` Part 3.2 Edit two letters Exercise 9Now you can generalize this to implement to get two edits on a word. To do so, you would have to get all the possible edits on a single word and then for each modified word, you would have to modify it again. **Instructions**: Implement the `edit_two_letters` function that returns a set of words that are two edits away. Note that creating additional edits based on the `edit_one_letter` function may 'restore' some one_edits to zero or one edits. That is allowed here. This accounted for in get_corrections. Hints You will likely want to take the union of two sets. You can either use set.union() or use the '|' (or operator) to union two sets See the documentation Python sets for examples of using operators or functions of the Python set.
###Code
# UNQ_C9 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# UNIT TEST COMMENT: Candidate for Table Driven Tests
# GRADED FUNCTION: edit_two_letters
def edit_two_letters(word, allow_switches = True):
'''
Input:
word: the input string/word
Output:
edit_two_set: a set of strings with all possible two edits
'''
edit_two_set = set()
### START CODE HERE ###
edit_one = edit_one_letter(word, allow_switches=allow_switches)
for i in edit_one:
if i:
edit_two = edit_one_letter(i, allow_switches=allow_switches)
edit_two_set.update(edit_two)
### END CODE HERE ###
return edit_two_set
tmp_edit_two_set = edit_two_letters("a")
tmp_edit_two_l = sorted(list(tmp_edit_two_set))
print(f"Number of strings with edit distance of two: {len(tmp_edit_two_l)}")
print(f"First 10 strings {tmp_edit_two_l[:10]}")
print(f"Last 10 strings {tmp_edit_two_l[-10:]}")
print(f"The data type of the returned object should be a set {type(tmp_edit_two_set)}")
print(f"Number of strings that are 2 edit distances from 'at' is {len(edit_two_letters('at'))}")
###Output
Number of strings with edit distance of two: 2654
First 10 strings ['', 'a', 'aa', 'aaa', 'aab', 'aac', 'aad', 'aae', 'aaf', 'aag']
Last 10 strings ['zv', 'zva', 'zw', 'zwa', 'zx', 'zxa', 'zy', 'zya', 'zz', 'zza']
The data type of the returned object should be a set <class 'set'>
Number of strings that are 2 edit distances from 'at' is 7154
###Markdown
Expected Output```CPPNumber of strings with edit distance of two: 2654First 10 strings ['', 'a', 'aa', 'aaa', 'aab', 'aac', 'aad', 'aae', 'aaf', 'aag']Last 10 strings ['zv', 'zva', 'zw', 'zwa', 'zx', 'zxa', 'zy', 'zya', 'zz', 'zza']The data type of the returned object should be a set Number of strings that are 2 edit distances from 'at' is 7154``` Part 3-3: suggest spelling suggestionsNow you will use your `edit_two_letters` function to get a set of all the possible 2 edits on your word. You will then use those strings to get the most probable word you meant to type aka your typing suggestion. Exercise 10**Instructions**: Implement `get_corrections`, which returns a list of zero to n possible suggestion tuples of the form (word, probability_of_word). **Step 1:** Generate suggestions for a supplied word: You'll use the edit functions you have developed. The 'suggestion algorithm' should follow this logic: * If the word is in the vocabulary, suggest the word. * Otherwise, if there are suggestions from `edit_one_letter` that are in the vocabulary, use those. * Otherwise, if there are suggestions from `edit_two_letters` that are in the vocabulary, use those. * Otherwise, suggest the input word.* * The idea is that words generated from fewer edits are more likely than words with more edits.Note: - Edits of one or two letters may 'restore' strings to either zero or one edit. This algorithm accounts for this by preferentially selecting lower distance edits first. Short circuitIn Python, logical operations such as `and` and `or` have two useful properties. They can operate on lists and they have ['short-circuit' behavior](https://docs.python.org/3/library/stdtypes.html). Try these:
###Code
# example of logical operation on lists or sets
print( [] and ["a","b"] )
print( [] or ["a","b"] )
#example of Short circuit behavior
val1 = ["Most","Likely"] or ["Less","so"] or ["least","of","all"] # selects first, does not evalute remainder
print(val1)
val2 = [] or [] or ["least","of","all"] # continues evaluation until there is a non-empty list
print(val2)
###Output
[]
['a', 'b']
['Most', 'Likely']
['least', 'of', 'all']
###Markdown
The logical `or` could be used to implement the suggestion algorithm very compactly. Alternately, if/then constructs could be used. **Step 2**: Create a 'best_words' dictionary where the 'key' is a suggestion and the 'value' is the probability of that word in your vocabulary. If the word is not in the vocabulary, assign it a probability of 0.**Step 3**: Select the n best suggestions. There may be fewer than n. Hints edit_one_letter and edit_two_letters return *python sets*. Sets have a handy set.intersection feature To find the keys that have the highest values in a dictionary, you can use the Counter dictionary to create a Counter object from a regular dictionary. Then you can use Counter.most_common(n) to get the n most common keys. To find the intersection of two sets, you can use set.intersection or the & operator. If you are not as familiar with short circuit syntax (as shown above), feel free to use if else statements instead. To use an if statement to check of a set is empty, use 'if not x:' syntax
###Code
# UNQ_C10 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# UNIT TEST COMMENT: Candidate for Table Driven Tests
# GRADED FUNCTION: get_corrections
def get_corrections(word, probs, vocab, n=2, verbose = False):
'''
Input:
word: a user entered string to check for suggestions
probs: a dictionary that maps each word to its probability in the corpus
vocab: a set containing all the vocabulary
n: number of possible word corrections you want returned in the dictionary
Output:
n_best: a list of tuples with the most probable n corrected words and their probabilities.
'''
suggestions = []
n_best = []
### START CODE HERE ###
suggestions = list((word in vocab and word) or edit_one_letter(word).intersection(vocab) or edit_two_letters(word).intersection(vocab))
n_best = [[s, probs[s]] for s in list(reversed(suggestions))]
### END CODE HERE ###
if verbose: print("entered word = ", word, "\nsuggestions = ", suggestions)
return n_best
# Test your implementation - feel free to try other words in my word
my_word = 'dys'
tmp_corrections = get_corrections(my_word, probs, vocab, 2, verbose=True) # keep verbose=True
for i, word_prob in enumerate(tmp_corrections):
print(f"word {i}: {word_prob[0]}, probability {word_prob[1]:.6f}")
# CODE REVIEW COMMENT: using "tmp_corrections" insteads of "cors". "cors" is not defined
print(f"data type of corrections {type(tmp_corrections)}")
###Output
entered word = dys
suggestions = ['days', 'dye']
word 0: dye, probability 0.000019
word 1: days, probability 0.000410
data type of corrections <class 'list'>
###Markdown
Expected Output- Note: This expected output is for `my_word = 'dys'`. Also, keep `verbose=True````CPPentered word = dys suggestions = {'days', 'dye'}word 0: days, probability 0.000410word 1: dye, probability 0.000019data type of corrections ``` Part 4: Minimum Edit distanceNow that you have implemented your auto-correct, how do you evaluate the similarity between two strings? For example: 'waht' and 'what'Also how do you efficiently find the shortest path to go from the word, 'waht' to the word 'what'?You will implement a dynamic programming system that will tell you the minimum number of edits required to convert a string into another string. Part 4.1 Dynamic ProgrammingDynamic Programming breaks a problem down into subproblems which can be combined to form the final solution. Here, given a string source[0..i] and a string target[0..j], we will compute all the combinations of substrings[i, j] and calculate their edit distance. To do this efficiently, we will use a table to maintain the previously computed substrings and use those to calculate larger substrings.You have to create a matrix and update each element in the matrix as follows: $$\text{Initialization}$$\begin{align}D[0,0] &= 0 \\D[i,0] &= D[i-1,0] + del\_cost(source[i]) \tag{4}\\D[0,j] &= D[0,j-1] + ins\_cost(target[j]) \\\end{align} $$\text{Per Cell Operations}$$\begin{align} \\D[i,j] =min\begin{cases}D[i-1,j] + del\_cost\\D[i,j-1] + ins\_cost\\D[i-1,j-1] + \left\{\begin{matrix}rep\_cost; & if src[i]\neq tar[j]\\0 ; & if src[i]=tar[j]\end{matrix}\right.\end{cases}\tag{5}\end{align} So converting the source word **play** to the target word **stay**, using an input cost of one, a delete cost of 1, and replace cost of 2 would give you the following table: s t a y 0 1 2 3 4 p 1 2 3 4 5 l 2 3 4 5 6 a 3 4 5 4 5 y 4 5 6 5 4 The operations used in this algorithm are 'insert', 'delete', and 'replace'. These correspond to the functions that you defined earlier: insert_letter(), delete_letter() and replace_letter(). switch_letter() is not used here. The diagram below describes how to initialize the table. Each entry in D[i,j] represents the minimum cost of converting string source[0:i] to string target[0:j]. The first column is initialized to represent the cumulative cost of deleting the source characters to convert string "EER" to "". The first row is initialized to represent the cumulative cost of inserting the target characters to convert from "" to "NEAR". Figure 6 Initializing Distance Matrix Filling in the remainder of the table utilizes the 'Per Cell Operations' in the equation (5) above. Note, the diagram below includes in the table some of the 3 sub-calculations shown in light grey. Only 'min' of those operations is stored in the table in the `min_edit_distance()` function. Figure 7 Filling Distance Matrix Note that the formula for $D[i,j]$ shown in the image is equivalent to:\begin{align} \\D[i,j] =min\begin{cases}D[i-1,j] + del\_cost\\D[i,j-1] + ins\_cost\\D[i-1,j-1] + \left\{\begin{matrix}rep\_cost; & if src[i]\neq tar[j]\\0 ; & if src[i]=tar[j]\end{matrix}\right.\end{cases}\tag{5}\end{align}The variable `sub_cost` (for substitution cost) is the same as `rep_cost`; replacement cost. We will stick with the term "replace" whenever possible. Below are some examples of cells where replacement is used. This also shows the minimum path from the lower right final position where "EER" has been replaced by "NEAR" back to the start. This provides a starting point for the optional 'backtrace' algorithm below. Figure 8 Examples Distance Matrix Exercise 11Again, the word "substitution" appears in the figure, but think of this as "replacement". **Instructions**: Implement the function below to get the minimum amount of edits required given a source string and a target string. Hints The range(start, stop, step) function excludes 'stop' from its output words
###Code
# UNQ_C11 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: min_edit_distance
def min_edit_distance(source, target, ins_cost = 1, del_cost = 1, rep_cost = 2):
'''
Input:
source: a string corresponding to the string you are starting with
target: a string corresponding to the string you want to end with
ins_cost: an integer setting the insert cost
del_cost: an integer setting the delete cost
rep_cost: an integer setting the replace cost
Output:
D: a matrix of len(source)+1 by len(target)+1 containing minimum edit distances
med: the minimum edit distance (med) required to convert the source string to the target
'''
# use deletion and insert cost as 1
m = len(source)
n = len(target)
#initialize cost matrix with zeros and dimensions (m+1,n+1)
D = np.zeros((m+1, n+1), dtype=int)
### START CODE HERE (Replace instances of 'None' with your code) ###
# Fill in column 0, from row 1 to row m
for row in range(1, m+1): # Replace None with the proper range
D[row,0] = D[row-1,0] + del_cost
# Fill in row 0, for all columns from 1 to n
for col in range(1, n+1): # Replace None with the proper range
D[0, col] = D[0, col-1] + ins_cost
# Loop through row 1 to row m
for row in range(1, m+1):
# Loop through column 1 to column n
for col in range(1, n+1):
# Intialize r_cost to the 'replace' cost that is passed into this function
r_cost = rep_cost
# Check to see if source character at the previous row
# matches the target character at the previous column,
if source[row-1] == target[col-1]:
# Update the replacement cost to 0 if source and target are the same
r_cost = 0
# Update the cost at row, col based on previous entries in the cost matrix
# Refer to the equation calculate for D[i,j] (the minimum of three calculated costs)
D[row, col] = min([D[row-1, col] + del_cost, D[row, col-1] + ins_cost, D[row-1, col-1] + r_cost])
# Set the minimum edit distance with the cost found at row m, column n
med = D[m, n]
### END CODE HERE ###
return D, med
#DO NOT MODIFY THIS CELL
# testing your implementation
source = 'play'
target = 'stay'
matrix, min_edits = min_edit_distance(source, target)
print("minimum edits: ",min_edits, "\n")
idx = list('#' + source)
cols = list('#' + target)
df = pd.DataFrame(matrix, index=idx, columns= cols)
print(df)
###Output
minimum edits: 4
# s t a y
# 0 1 2 3 4
p 1 2 3 4 5
l 2 3 4 5 6
a 3 4 5 4 5
y 4 5 6 5 4
###Markdown
**Expected Results:** ```CPPminimum edits: 4 s t a y 0 1 2 3 4p 1 2 3 4 5l 2 3 4 5 6a 3 4 5 4 5y 4 5 6 5 4```
###Code
#DO NOT MODIFY THIS CELL
# testing your implementation
source = 'eer'
target = 'near'
matrix, min_edits = min_edit_distance(source, target)
print("minimum edits: ",min_edits, "\n")
idx = list(source)
idx.insert(0, '#')
cols = list(target)
cols.insert(0, '#')
df = pd.DataFrame(matrix, index=idx, columns= cols)
print(df)
###Output
minimum edits: 3
# n e a r
# 0 1 2 3 4
e 1 2 1 2 3
e 2 3 2 3 4
r 3 4 3 4 3
###Markdown
**Expected Results** ```CPPminimum edits: 3 n e a r 0 1 2 3 4e 1 2 1 2 3e 2 3 2 3 4r 3 4 3 4 3``` We can now test several of our routines at once:
###Code
source = "eer"
targets = edit_one_letter(source,allow_switches = False) #disable switches since min_edit_distance does not include them
for t in targets:
_, min_edits = min_edit_distance(source, t,1,1,1) # set ins, del, sub costs all to one
if min_edits != 1: print(source, t, min_edits)
###Output
_____no_output_____
###Markdown
**Expected Results** ```CPP(empty)```The 'replace()' routine utilizes all letters a-z one of which returns the original word.
###Code
source = "eer"
targets = edit_two_letters(source,allow_switches = False) #disable switches since min_edit_distance does not include them
for t in targets:
_, min_edits = min_edit_distance(source, t,1,1,1) # set ins, del, sub costs all to one
if min_edits != 2 and min_edits != 1: print(source, t, min_edits)
###Output
eer eer 0
###Markdown
**Expected Results** ```CPPeer eer 0```We have to allow single edits here because some two_edits will restore a single edit. SubmissionMake sure you submit your assignment before you modify anything below Part 5: Optional - BacktraceOnce you have computed your matrix using minimum edit distance, how would find the shortest path from the top left corner to the bottom right corner? Note that you could use backtrace algorithm. Try to find the shortest path given the matrix that your `min_edit_distance` function returned.You can use these [lecture slides on minimum edit distance](https://web.stanford.edu/class/cs124/lec/med.pdf) by Dan Jurafsky to learn about the algorithm for backtrace.
###Code
# Experiment with back trace - insert your code here
###Output
_____no_output_____ |
IBM_Data_Science_Capstone_Project_Preprocessing.ipynb | ###Markdown
Coursera Capstone Course About this repositoryThis repository represents the final assignment in the coursera certification course "IBM Data Science Professional Certificate" (See: https://www.coursera.org/professional-certificates/ibm-data-science). PreprocessingTo reduce clutter in the main notebook, I decided to split it in two components. This is the preprocessing notebook, where I use python to load data from the internet, cleaning it and saving it as a .csv file. This will allow me to keep the main component more readable.
###Code
import numpy as np
import pandas as pd
import requests
import codecs
import folium
import json
print("Finished importing.")
###Output
Finished importing.
###Markdown
Loading data from the internet
###Code
# Links to sources
source_link_vvs = "https://www.openvvs.de/dataset/1f7ec4c1-b590-4751-a25b-57ef988c14b6/resource/d87d1f01-5c14-4d08-8452-e405a6472ab4/download/vvs_haltestellen.csv"
df_vvs = pd.read_csv(source_link_vvs, delimiter=";", encoding="iso-8859-1")
# Extract all possible ways of commotion (Verkehrsmittel = means of transport)
vm = list(df_vvs["Verkehrsmittel"].unique())
# This fields consists of multiple values seperated by a ';'
# We have to extract single words
vm_unique = set()
for y in vm:
for x in y.split(";"):
vm_unique.add(x)
print(vm_unique)
# For this project I will only use connections using the so called 'S-Bahn' (a type
# of train running only in a limited region)
# Extract entries where
df_vvs = df_vvs[df_vvs["Verkehrsmittel"].str.match(".*S-Bahn.*")].reset_index()
print(df_vvs.shape)
df_vvs.head()
# Print the names of the various train stations
df_vvs["Name"].values
## Create a map to see if the location data seems reasonable
# Create a copy of the original dataframe
df_plot = df_vvs.loc[:, ["Name", "Y-Koordinate", "X-Koordinate"]]
df_plot.columns = ["Name", "Latitude", "Longitude"]
# Rectify some german conventions
formater = lambda x: x.replace(",", ".")
df_plot["Latitude"] = df_plot["Latitude"].apply(formater).astype(np.float64)
df_plot["Longitude"] = df_plot["Longitude"].apply(formater).astype(np.float64)
# Get mean lat and long as starting point
latitude = df_plot["Latitude"].mean()
longitude = df_plot["Longitude"].mean()
# Create the map and the labels
map = folium.Map(location = (latitude, longitude), zoom_start=10)
for _, (name, lat, long) in df_plot.iterrows():
label = folium.Popup(name)
folium.CircleMarker(
location=(lat,long),
popup=label,
color="red",
fill=True,
radius=10,
fill_opacity=0.2
).add_to(map)
map
print(df_vvs.columns)
# Drop unnecessary columns
df_vvs.drop(["index", "Nummer", "Name mit Ort", "Globale ID", "GKZ", "Landkreis", "Tarifzonen", "Linien (DIVA)","Anzahl Linien", "Betriebszweige"], axis=1, inplace=True)
df_vvs.head()
# Rename columns to english
df_vvs.columns = ["name", "place", "district", "type of transport", "lines", "longitude", "latitude"]
df_vvs.reset_index()
# Rectify some german conventions
formater = lambda x: x.replace(",", ".")
df_vvs["latitude"] = df_vvs["latitude"].apply(formater).astype(np.float64)
df_vvs["longitude"] = df_vvs["longitude"].apply(formater).astype(np.float64)
df_vvs.head()
# Write data to a file
df_vvs.to_csv("data/stations.csv", encoding="utf-8", index=False)
###Output
_____no_output_____
###Markdown
Loading data from Foursquare
###Code
data = pd.read_json(".private_data.json")["FOURSQUARE"]
# Authentification data
CLIENT_ID = data["ID"]
CLIENT_SECRET = data["SECRET"]
# Parameters
LIMIT = 10
RADIUS = 500 # 500 meters within range of location
VERSION = "20201115"
# Code url fragments
url_base = "https://api.foursquare.com/v2/venues/search?"
url_client = f"&client_id={CLIENT_ID}&client_secret={CLIENT_SECRET}"
url_parameters = f"&radius={RADIUS}&limit={LIMIT}&v={VERSION}"
url_payload = "&ll={},{}"
assemble = lambda ll: url_base + url_client + url_parameters + url_payload.format(ll[0], ll[1])
# Iterate through all stations and query the API for near venues
print("Start querying the API...")
results = {}
for _, (name, _, _, _, _, long, lat) in df_vvs.iterrows():
# Assemble url
url = assemble((lat, long))
# print(f"Send query '{url}' to foursquare API.")
# Query API and save the result for further processing
result = requests.get(url).text
results[name] = result
print("Done.")
# Delete private fields so they are not accesible from outside this cell
del(CLIENT_ID, CLIENT_SECRET, url_client, assemble)
print(f"Number of results: {len(results)} for {df_vvs.shape[0]} stations.")
# Print part of the string to see if everything seems correct
print(f"Sample:\n{results[list(results.keys())[0]][:500]}...")
# Print a sample to get some feeeling about the layout of the documents
index = list(results.keys())[0]
result = results[index]
result = json.loads(result)["response"]["venues"]
print(result[0].keys())
print()
print(result[0]["categories"])
def extract_values(sname, venue_json):
venue_json = json.loads(venue_json)["response"]["venues"]
# Iterate through all venues in the json object
venues = []
for venue in venue_json:
id = venue["id"]
name = venue["name"]
address = venue["location"].get("address")
latitude = venue["location"]["lat"]
longitude = venue["location"]["lng"]
categories = [x["name"] for x in venue["categories"]]
venues.append([sname, id, name, address, latitude, longitude, ",".join(categories)])
return venues
def extract_all(venue_jsons):
# Extract values from each response
extracts = [extract_values(x[0], x[1]) for x in venue_jsons.items()]
# Flatten the result
res = [y for x in extracts for y in x]
columns = [
"station name",
"venue id",
"venue name",
"venue address",
"venue latitude",
"venue longitude",
"venue categories"
]
return pd.DataFrame(data=res, columns=columns)
df_venues = extract_all(results)
df_venues.head()
# Replace all unknown tokens with a unknown string
df_venues[pd.isna(df_venues["venue categories"])]["venue categories"] = "Unknown"
df_venues["venue categories"].replace("", "Unknown", inplace = True)
df_venues.head()
# Write data to a file
df_venues.to_csv("data/venues.csv", encoding="utf-8", index=False)
###Output
_____no_output_____ |
data/Python_simple_machine_learning_classification.ipynb | ###Markdown
Now lets use pandas to load the dataset from a url.
###Code
import pandas
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
dataset = pandas.read_csv(url)
print('shape=',dataset.shape)
print(dataset.head())
###Output
shape= (149, 5)
5.1 3.5 1.4 0.2 Iris-setosa
0 4.9 3.0 1.4 0.2 Iris-setosa
1 4.7 3.2 1.3 0.2 Iris-setosa
2 4.6 3.1 1.5 0.2 Iris-setosa
3 5.0 3.6 1.4 0.2 Iris-setosa
4 5.4 3.9 1.7 0.4 Iris-setosa
###Markdown
Oh no, the dataset has no column names, lets add them
###Code
dataset.columns=['A','B','C','D','E']
print(dataset.head())
###Output
A B C D E
0 4.9 3.0 1.4 0.2 Iris-setosa
1 4.7 3.2 1.3 0.2 Iris-setosa
2 4.6 3.1 1.5 0.2 Iris-setosa
3 5.0 3.6 1.4 0.2 Iris-setosa
4 5.4 3.9 1.7 0.4 Iris-setosa
###Markdown
Lets look at the class distribution of the column we will use as target for our machine learning tasks, column 'E'.
###Code
print(dataset.groupby('E').size())
###Output
E
Iris-setosa 49
Iris-versicolor 50
Iris-virginica 50
dtype: int64
###Markdown
Now lets split our dataset randomly into training, testing, and validation datasets. First, shuffle the dataset randomly with replacement.
###Code
# The frac keyword argument specifies the fraction of rows to return in
# the random sample, so frac=1 means return all rows (in random order).
dataset_shuffled = dataset.sample(frac=1)
print(dataset_shuffled.head())
###Output
A B C D E
9 5.4 3.7 1.5 0.2 Iris-setosa
113 5.8 2.8 5.1 2.4 Iris-virginica
96 6.2 2.9 4.3 1.3 Iris-versicolor
118 6.0 2.2 5.0 1.5 Iris-virginica
147 6.2 3.4 5.4 2.3 Iris-virginica
###Markdown
Now split the data into training and validation sets in 80:20 ratio. We will use the validation set to test the performance of machine learning algorithms.
###Code
n = dataset_shuffled.shape[0]
validation_size = int(0.2*n)
from sklearn import model_selection
data_array = dataset_shuffled.values
X = data_array[:,0:4] # separate the predictor variables
Y = data_array[:,4] # from the target variable (last column)
X_train, X_validation, Y_train, Y_validation = model_selection.train_test_split(X,Y,test_size=validation_size,random_state=13)
###Output
_____no_output_____
###Markdown
Check the sizes of the training and validation data sets.
###Code
print('Train_X = ',X_train.shape, 'Train_Y = ',Y_train.shape)
print('Val_X = ',X_validation.shape, 'Val_Y = ',Y_validation.shape)
import sklearn
from sklearn import linear_model
from sklearn import tree
from sklearn import svm
from sklearn import neighbors
# we will use these classifiers
LR = linear_model.LogisticRegression()
KNN = neighbors.KNeighborsClassifier()
DT = tree.DecisionTreeClassifier()
SVM = svm.SVC()
models_list = [LR,KNN,DT,SVM]
num_folds = 8
kfolds = model_selection.KFold(n_splits=num_folds, shuffle=True, random_state=13)
for model in models_list:
model_name = type(model).__name__
cv_results = model_selection.cross_val_score(model,X_train,Y_train,scoring='accuracy',cv=kfolds)
print(model_name,' : ',cv_results.tolist())
print(model_name,' : mean=',cv_results.mean(), ' sd=',cv_results.std())
print('------------------------------------------------------------------------')
###Output
LogisticRegression : [1.0, 1.0, 1.0, 0.6666666666666666, 0.9333333333333333, 0.9333333333333333, 0.9333333333333333, 1.0]
LogisticRegression : mean= 0.9333333333333333 sd= 0.10540925533894599
------------------------------------------------------------------------
KNeighborsClassifier : [1.0, 0.9333333333333333, 1.0, 0.8666666666666667, 1.0, 0.9333333333333333, 1.0, 1.0]
KNeighborsClassifier : mean= 0.9666666666666667 sd= 0.04714045207910316
------------------------------------------------------------------------
DecisionTreeClassifier : [0.9333333333333333, 0.9333333333333333, 0.9333333333333333, 0.9333333333333333, 1.0, 0.9333333333333333, 1.0, 0.9333333333333333]
DecisionTreeClassifier : mean= 0.95 sd= 0.02886751345948128
------------------------------------------------------------------------
SVC : [0.9333333333333333, 1.0, 0.9333333333333333, 0.9333333333333333, 1.0, 0.9333333333333333, 1.0, 1.0]
SVC : mean= 0.9666666666666667 sd= 0.033333333333333326
------------------------------------------------------------------------
###Markdown
But this is naive training with each of the classifiers as we have not done anything to optimize the parameters for each classifer. We will use sklearn.model_selection.GridSearchCV for that. Lets do this with each classifier separately because each classifier will have its own set of parametes to tune.
###Code
kfolds = model_selection.KFold(n_splits=num_folds,shuffle=True,random_state=13)
#define function to be called to do grid search for each of the classifiers.
def print_cv_results(estimator,results):
means = results.cv_results_['mean_test_score']
stds = results.cv_results_['std_test_score']
print(type(estimator).__name__)
for mean, std, params in zip(means, stds, results.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r" % (mean, std * 2, params))
print('best_parameters=',results.best_params_)
print('best_accuracy=',results.best_score_)
print('----------------------------------------------')
def do_grid_search(estimator,grid_values,kfolds):
clf = model_selection.GridSearchCV(estimator=estimator,param_grid=grid_values,scoring='accuracy',cv=kfolds)
results = clf.fit(X_train,Y_train)
print_cv_results(estimator,results)
return clf
# First, logistic Regression parameters to tune : C, penalty
grid_values = {'C':[0.01,0.1,1,10,100],'penalty':['l1','l2']}
clf_LR = do_grid_search(LR,grid_values,kfolds)
# Next we do with KNN
grid_values = {'n_neighbors':list(range(1,10))}
clf_KNN = do_grid_search(KNN,grid_values,kfolds)
# Next Decision Tree
grid_values = {'criterion':['gini','entropy'],'splitter':['best','random'],'min_samples_split':list(range(2,10)),'min_samples_leaf':[1,2,3,4,5]}
clf_DT = do_grid_search(DT,grid_values,kfolds)
# And finally SVM
grid_values = {'C':[1,10,100],'kernel':['linear', 'poly', 'rbf', 'sigmoid'] }
clf_SVM = do_grid_search(SVM,grid_values,kfolds)
from sklearn.metrics import classification_report
# predictions for LR
Y_true, Y_pred = Y_validation, clf_LR.predict(X_validation)
print(classification_report(Y_true, Y_pred))
# predictions for KNN
Y_true, Y_pred = Y_validation, clf_KNN.predict(X_validation)
print(classification_report(Y_true, Y_pred))
# predictions for DT
Y_true, Y_pred = Y_validation, clf_DT.predict(X_validation)
print(classification_report(Y_true, Y_pred))
# predictions for SVM
Y_true, Y_pred = Y_validation, clf_SVM.predict(X_validation)
print(classification_report(Y_true, Y_pred))
###Output
precision recall f1-score support
Iris-setosa 1.00 1.00 1.00 10
Iris-versicolor 0.90 1.00 0.95 9
Iris-virginica 1.00 0.90 0.95 10
avg / total 0.97 0.97 0.97 29
|
day3/notebooks/lgde-spark-core/lgde-spark-core-3-data-types.ipynb | ###Markdown
3교시 데이터 타입> 스파크에서 사용되는 데이터 타입에 대해 실습합니다 목차* [1. 리터럴 타입](1.-리터럴-타입)* [2. 불리언 형 데이터 타입 다루기](2.-불리언-형-데이터-타입-다루기)* [3. 수치형 데이터 타입 다루기](3.-수치형-데이터-타입-다루기)* [4. 문자열 데이터 타입 다루기](4.-문자열-데이터-타입-다루기)* [5. 정규 표현식](5.-정규-표현식)* [6. 날짜와 타임스팸프 데이터 타입 다루기](6.-날짜와-타임스팸프-데이터-타입-다루기)* [7. 널 값 다루기](7.-널-값-다루기)* [참고자료](참고자료)
###Code
from pyspark.sql import *
from pyspark.sql.functions import *
from pyspark.sql.types import *
from IPython.display import display, display_pretty, clear_output, JSON
spark = (
SparkSession
.builder
.config("spark.sql.session.timeZone", "Asia/Seoul")
.getOrCreate()
)
# 노트북에서 테이블 형태로 데이터 프레임 출력을 위한 설정을 합니다
spark.conf.set("spark.sql.repl.eagerEval.enabled", True) # display enabled
spark.conf.set("spark.sql.repl.eagerEval.truncate", 100) # display output columns size
# 공통 데이터 위치
home_jovyan = "/home/jovyan"
work_data = f"{home_jovyan}/work/data"
work_dir=!pwd
work_dir = work_dir[0]
# 로컬 환경 최적화
spark.conf.set("spark.sql.shuffle.partitions", 5) # the number of partitions to use when shuffling data for joins or aggregations.
spark.conf.set("spark.sql.streaming.forceDeleteTempCheckpointLocation", "true")
spark
""" DataFrame 생성 """
df = (
spark.read.format("csv")
.option("header", "true")
.option("inferSchema", "true")
.load(f"{work_data}/retail-data/by-day/2010-12-01.csv")
)
df.printSchema()
df.createOrReplaceTempView("retail")
df.show(5)
###Output
root
|-- InvoiceNo: string (nullable = true)
|-- StockCode: string (nullable = true)
|-- Description: string (nullable = true)
|-- Quantity: integer (nullable = true)
|-- InvoiceDate: string (nullable = true)
|-- UnitPrice: double (nullable = true)
|-- CustomerID: double (nullable = true)
|-- Country: string (nullable = true)
+---------+---------+--------------------+--------+-------------------+---------+----------+--------------+
|InvoiceNo|StockCode| Description|Quantity| InvoiceDate|UnitPrice|CustomerID| Country|
+---------+---------+--------------------+--------+-------------------+---------+----------+--------------+
| 536365| 85123A|WHITE HANGING HEA...| 6|2010-12-01 08:26:00| 2.55| 17850.0|United Kingdom|
| 536365| 71053| WHITE METAL LANTERN| 6|2010-12-01 08:26:00| 3.39| 17850.0|United Kingdom|
| 536365| 84406B|CREAM CUPID HEART...| 8|2010-12-01 08:26:00| 2.75| 17850.0|United Kingdom|
| 536365| 84029G|KNITTED UNION FLA...| 6|2010-12-01 08:26:00| 3.39| 17850.0|United Kingdom|
| 536365| 84029E|RED WOOLLY HOTTIE...| 6|2010-12-01 08:26:00| 3.39| 17850.0|United Kingdom|
+---------+---------+--------------------+--------+-------------------+---------+----------+--------------+
only showing top 5 rows
###Markdown
1. 리터럴 타입
###Code
from pyspark.sql.functions import lit
df.select(lit(5), lit("five"), lit(5.0)).limit(5)
###Output
_____no_output_____
###Markdown
2. 불리언 형 데이터 타입 다루기 2.1 AND 조건
###Code
from pyspark.sql.functions import col
x1 = df.where(col("InvoiceNO") != 536365).select("InvoiceNO", "Description")
x2 = df.where("InvoiceNO <> 536365").select("InvoiceNO", "Description")
x3 = df.where("InvoiceNO = 536365").select("InvoiceNO", "Description")
x1.show(2)
x2.show(2)
###Output
+---------+--------------------+
|InvoiceNO| Description|
+---------+--------------------+
| 536366|HAND WARMER UNION...|
| 536366|HAND WARMER RED P...|
+---------+--------------------+
only showing top 2 rows
+---------+--------------------+
|InvoiceNO| Description|
+---------+--------------------+
| 536366|HAND WARMER UNION...|
| 536366|HAND WARMER RED P...|
+---------+--------------------+
only showing top 2 rows
###Markdown
2.2 OR 조건
###Code
from pyspark.sql.functions import instr
# df.where( (col("UnitPrice") > 600) | (instr(col("Description"), "POSTAGE") >= 1) ).show()
df.where("UnitPrice > 600 OR instr(Description, 'POSTAGE') >= 1").show()
###Output
+---------+---------+--------------+--------+-------------------+---------+----------+--------------+
|InvoiceNo|StockCode| Description|Quantity| InvoiceDate|UnitPrice|CustomerID| Country|
+---------+---------+--------------+--------+-------------------+---------+----------+--------------+
| 536370| POST| POSTAGE| 3|2010-12-01 08:45:00| 18.0| 12583.0| France|
| 536403| POST| POSTAGE| 1|2010-12-01 11:27:00| 15.0| 12791.0| Netherlands|
| 536527| POST| POSTAGE| 1|2010-12-01 13:04:00| 18.0| 12662.0| Germany|
| 536544| DOT|DOTCOM POSTAGE| 1|2010-12-01 14:32:00| 569.77| null|United Kingdom|
| 536592| DOT|DOTCOM POSTAGE| 1|2010-12-01 17:06:00| 607.49| null|United Kingdom|
+---------+---------+--------------+--------+-------------------+---------+----------+--------------+
+---------+---------+--------------+--------+-------------------+---------+----------+--------------+
|InvoiceNo|StockCode| Description|Quantity| InvoiceDate|UnitPrice|CustomerID| Country|
+---------+---------+--------------+--------+-------------------+---------+----------+--------------+
| 536370| POST| POSTAGE| 3|2010-12-01 08:45:00| 18.0| 12583.0| France|
| 536403| POST| POSTAGE| 1|2010-12-01 11:27:00| 15.0| 12791.0| Netherlands|
| 536527| POST| POSTAGE| 1|2010-12-01 13:04:00| 18.0| 12662.0| Germany|
| 536544| DOT|DOTCOM POSTAGE| 1|2010-12-01 14:32:00| 569.77| null|United Kingdom|
| 536592| DOT|DOTCOM POSTAGE| 1|2010-12-01 17:06:00| 607.49| null|United Kingdom|
+---------+---------+--------------+--------+-------------------+---------+----------+--------------+
###Markdown
2.3 ISIN - 제공된 목록에 포함되었는지 여부
###Code
# SparkSQL 을 이용한 is in 구문 사용
from pyspark.sql.functions import desc
# df.select("StockCode").where(col("StockCode").isin(["DOT", "POST", "C2"])).distinct().show()
df.select('StockCode').where("StockCode in ('DOT', 'POST', 'C2')").distinct().show()
###Output
+---------+
|StockCode|
+---------+
| POST|
| C2|
| DOT|
+---------+
+---------+
|StockCode|
+---------+
| POST|
| C2|
| DOT|
+---------+
###Markdown
2.4 INSTR - 특정 문자열이 포함되었는지 여부
###Code
from pyspark.sql.functions import *
""" instr 함수 """
df.withColumn("added", instr(df.Description, "POSTAGE")).where("added > 1").show() # 8번째 글자에 'POSTAGE'가 시작됨
###Output
+---------+---------+--------------+--------+-------------------+---------+----------+--------------+-----+
|InvoiceNo|StockCode| Description|Quantity| InvoiceDate|UnitPrice|CustomerID| Country|added|
+---------+---------+--------------+--------+-------------------+---------+----------+--------------+-----+
| 536544| DOT|DOTCOM POSTAGE| 1|2010-12-01 14:32:00| 569.77| null|United Kingdom| 8|
| 536592| DOT|DOTCOM POSTAGE| 1|2010-12-01 17:06:00| 607.49| null|United Kingdom| 8|
+---------+---------+--------------+--------+-------------------+---------+----------+--------------+-----+
###Markdown
1. [기본] f"{work_data}/retail-data/by-day/2010-12-01.csv" 에 저장된 CSV 파일을 읽고 1. 스키마를 출력하세요 2. 데이터를 10건 출력하세요 3. 송장번호(InvoiceNo) 가 '536365' 이면서 4. 상품코드(StockCode) 가 ('85123A', '84406B', '84029G', '84029E') 중에 하나이면서 5. 제품단가(UnitPrice) 가 2.6 이하 혹은 3.0 이상인 경우를 출력하세요[실습1] 출력 결과 확인 > 아래와 유사하게 방식으로 작성 되었다면 정답입니다```pythondf1 = ( spark.read.format("csv") .option("header", "true") .option("inferSchema", "true") .load(f"{work_data}/retail-data/by-day/2010-12-01.csv"))df1.printSchema()df1.show(10)answer = df1.where("InvoiceNo = '536365'").where("StockCode in ('85123A', '84406B', '84029G', '84029E')").where("UnitPrice 3.0")answer.show()```
###Code
# 여기에 실습 코드를 작성하고 실행하세요 (Shift+Enter)
###Output
root
|-- InvoiceNo: string (nullable = true)
|-- StockCode: string (nullable = true)
|-- Description: string (nullable = true)
|-- Quantity: integer (nullable = true)
|-- InvoiceDate: string (nullable = true)
|-- UnitPrice: double (nullable = true)
|-- CustomerID: double (nullable = true)
|-- Country: string (nullable = true)
+---------+---------+--------------------+--------+-------------------+---------+----------+--------------+
|InvoiceNo|StockCode| Description|Quantity| InvoiceDate|UnitPrice|CustomerID| Country|
+---------+---------+--------------------+--------+-------------------+---------+----------+--------------+
| 536365| 85123A|WHITE HANGING HEA...| 6|2010-12-01 08:26:00| 2.55| 17850.0|United Kingdom|
| 536365| 71053| WHITE METAL LANTERN| 6|2010-12-01 08:26:00| 3.39| 17850.0|United Kingdom|
| 536365| 84406B|CREAM CUPID HEART...| 8|2010-12-01 08:26:00| 2.75| 17850.0|United Kingdom|
| 536365| 84029G|KNITTED UNION FLA...| 6|2010-12-01 08:26:00| 3.39| 17850.0|United Kingdom|
| 536365| 84029E|RED WOOLLY HOTTIE...| 6|2010-12-01 08:26:00| 3.39| 17850.0|United Kingdom|
| 536365| 22752|SET 7 BABUSHKA NE...| 2|2010-12-01 08:26:00| 7.65| 17850.0|United Kingdom|
| 536365| 21730|GLASS STAR FROSTE...| 6|2010-12-01 08:26:00| 4.25| 17850.0|United Kingdom|
| 536366| 22633|HAND WARMER UNION...| 6|2010-12-01 08:28:00| 1.85| 17850.0|United Kingdom|
| 536366| 22632|HAND WARMER RED P...| 6|2010-12-01 08:28:00| 1.85| 17850.0|United Kingdom|
| 536367| 84879|ASSORTED COLOUR B...| 32|2010-12-01 08:34:00| 1.69| 13047.0|United Kingdom|
+---------+---------+--------------------+--------+-------------------+---------+----------+--------------+
only showing top 10 rows
+---------+---------+--------------------+--------+-------------------+---------+----------+--------------+
|InvoiceNo|StockCode| Description|Quantity| InvoiceDate|UnitPrice|CustomerID| Country|
+---------+---------+--------------------+--------+-------------------+---------+----------+--------------+
| 536365| 85123A|WHITE HANGING HEA...| 6|2010-12-01 08:26:00| 2.55| 17850.0|United Kingdom|
| 536365| 84029G|KNITTED UNION FLA...| 6|2010-12-01 08:26:00| 3.39| 17850.0|United Kingdom|
| 536365| 84029E|RED WOOLLY HOTTIE...| 6|2010-12-01 08:26:00| 3.39| 17850.0|United Kingdom|
+---------+---------+--------------------+--------+-------------------+---------+----------+--------------+
###Markdown
2. [중급] f"{work_data}/retail-data/by-day/2010-12-01.csv" 에 저장된 CSV 파일을 읽고 1. 상품코드(StockCode) 가 ("POST", "M", "DOT", "D", "C2") 혹은 제품단가(UnitPrice) 가 30 이상인 데이터에 대하여 2. 상품코드(StockCode) 기준의 빈도(count)는 얼마인지 출력하세요* Structured API를 활용하여 작성하세요* StockCode 내림 차순으로 정렬하세요[실습2] 출력 결과 확인 > 아래와 유사하게 방식으로 작성 되었다면 정답입니다```pythondf2 = spark.read.csv(f"{work_data}/retail-data/by-day/2010-12-01.csv", inferSchema=True, header=True)df2.where( (col("StockCode").isin(["POST", "M", "DOT", "D", "C2"])) | (col("UnitPrice") >= 30)).groupBy("StockCode").count().orderBy(desc("StockCode"))```
###Code
# 여기에 실습 코드를 작성하고 실행하세요 (Shift+Enter)
###Output
_____no_output_____
###Markdown
3. 수치형 데이터 타입 다루기 3.1 각종 함수를 표현식으로 작성합니다
###Code
from pyspark.sql.functions import expr, pow
df.selectExpr("CustomerID", "pow(Quantity * UnitPrice, 2) + 5 as realQuantity").show(2)
""" 지수만큼 제곱하는 pow 함수를 API를 사용해도 결과는 동일합니다 """
from pyspark.sql.functions import expr, pow
# 아래의 연산이 필요한 경우에는 반드시 column 으로 지정되어야 연산자 계산이 됩니다. (문자열 * 연산자는 없습니다)
fabricateQuantity = pow(col("Quantity") * col("UnitPrice"), 2) + 5
df.select(expr("CustomerID"), fabricateQuantity.alias("realQuantity")).show(2)
###Output
+----------+------------------+
|CustomerID| realQuantity|
+----------+------------------+
| 17850.0|239.08999999999997|
| 17850.0| 418.7156|
+----------+------------------+
only showing top 2 rows
###Markdown
3.2 반올림(round), 올림(ceil), 버림(floor)
###Code
from pyspark.sql.functions import *
df.selectExpr("round(2.5, 0)", "ceil(2.4)", "floor(2.6)").show(1)
###Output
+-------------+---------+----------+
|round(2.5, 0)|CEIL(2.4)|FLOOR(2.6)|
+-------------+---------+----------+
| 3| 3| 2|
+-------------+---------+----------+
only showing top 1 row
###Markdown
3.3 요약 통계
###Code
df.describe().show()
df.describe("InvoiceNo").show() # 컬럼을 입력
###Output
###Markdown
3. [기본] f"{work_data}/retail-data/by-day/2010-12-01.csv" 에 저장된 CSV 파일을 읽고 1. 스키마를 출력하세요 2. 데이터를 10건 출력하세요 3. 송장번호(InvoiceNo) 가 '536367' 인 거래 내역의 4. 총 금액 (TotalPrice) = 수량(Quantity) * 단가(UnitPrice) 를 계산하여 TotalPrice 컬럼을 추가하세요 5. 단, 총 금액 (TotalPrice) 계산시에 소수점 이하는 버림으로 처리하세요[실습3] 출력 결과 확인 > 아래와 유사하게 방식으로 작성 되었다면 정답입니다```pythondf3 = ( spark.read.format("csv") .option("header", "true") .option("inferSchema", "true") .load(f"{work_data}/retail-data/by-day/2010-12-01.csv"))df3.printSchema()df3.show(10)answer = df3.where("InvoiceNo = '536367'").withColumn("TotalPrice", expr("floor(Quantity * UnitPrice)"))display(answer)```
###Code
# 여기에 실습 코드를 작성하고 실행하세요 (Shift+Enter)
###Output
root
|-- InvoiceNo: string (nullable = true)
|-- StockCode: string (nullable = true)
|-- Description: string (nullable = true)
|-- Quantity: integer (nullable = true)
|-- InvoiceDate: string (nullable = true)
|-- UnitPrice: double (nullable = true)
|-- CustomerID: double (nullable = true)
|-- Country: string (nullable = true)
+---------+---------+--------------------+--------+-------------------+---------+----------+--------------+
|InvoiceNo|StockCode| Description|Quantity| InvoiceDate|UnitPrice|CustomerID| Country|
+---------+---------+--------------------+--------+-------------------+---------+----------+--------------+
| 536365| 85123A|WHITE HANGING HEA...| 6|2010-12-01 08:26:00| 2.55| 17850.0|United Kingdom|
| 536365| 71053| WHITE METAL LANTERN| 6|2010-12-01 08:26:00| 3.39| 17850.0|United Kingdom|
| 536365| 84406B|CREAM CUPID HEART...| 8|2010-12-01 08:26:00| 2.75| 17850.0|United Kingdom|
| 536365| 84029G|KNITTED UNION FLA...| 6|2010-12-01 08:26:00| 3.39| 17850.0|United Kingdom|
| 536365| 84029E|RED WOOLLY HOTTIE...| 6|2010-12-01 08:26:00| 3.39| 17850.0|United Kingdom|
| 536365| 22752|SET 7 BABUSHKA NE...| 2|2010-12-01 08:26:00| 7.65| 17850.0|United Kingdom|
| 536365| 21730|GLASS STAR FROSTE...| 6|2010-12-01 08:26:00| 4.25| 17850.0|United Kingdom|
| 536366| 22633|HAND WARMER UNION...| 6|2010-12-01 08:28:00| 1.85| 17850.0|United Kingdom|
| 536366| 22632|HAND WARMER RED P...| 6|2010-12-01 08:28:00| 1.85| 17850.0|United Kingdom|
| 536367| 84879|ASSORTED COLOUR B...| 32|2010-12-01 08:34:00| 1.69| 13047.0|United Kingdom|
+---------+---------+--------------------+--------+-------------------+---------+----------+--------------+
only showing top 10 rows
###Markdown
4. 문자열 데이터 타입 다루기 4.1 첫 문자열만 대문자로 변경* 공백으로 나뉘는 모든 단어의 첫 글자를 대문자로 변경, initcap
###Code
from pyspark.sql.functions import initcap
df.select(initcap(col("Description"))).show(2, False)
###Output
+----------------------------------+
|initcap(Description) |
+----------------------------------+
|White Hanging Heart T-light Holder|
|White Metal Lantern |
+----------------------------------+
only showing top 2 rows
###Markdown
4.2 대문자(upper), 소문자(lower)
###Code
from pyspark.sql.functions import lower, upper
df.selectExpr("Description", "lower(Description)", "upper(Description)").show(2)
###Output
+--------------------+--------------------+--------------------+
| Description| lower(Description)| upper(Description)|
+--------------------+--------------------+--------------------+
|WHITE HANGING HEA...|white hanging hea...|WHITE HANGING HEA...|
| WHITE METAL LANTERN| white metal lantern| WHITE METAL LANTERN|
+--------------------+--------------------+--------------------+
only showing top 2 rows
###Markdown
4.3 문자열 주변의 공백을 제거, lpad/ltrim/rpad/rtrim/trim
###Code
from pyspark.sql.functions import lit, ltrim, rtrim, rpad, lpad, trim
df.select(
ltrim(lit(" HELLO ")).alias("ltrim"),
rtrim(lit(" HELLO ")).alias("rtrim"),
trim(lit(" HELLO ")).alias("trim"),
lpad(lit("HELLO"), 3, " ").alias("lp"),
rpad(lit("HELLO"), 10, " ").alias("rp")
).show(2)
###Output
+--------+--------+-----+---+----------+
| ltrim| rtrim| trim| lp| rp|
+--------+--------+-----+---+----------+
|HELLO | HELLO|HELLO|HEL|HELLO |
|HELLO | HELLO|HELLO|HEL|HELLO |
+--------+--------+-----+---+----------+
only showing top 2 rows
###Markdown
4. [중급] f"{work_data}/retail-data/by-day/2010-12-01.csv" 에 저장된 CSV 파일을 읽고 1. 송장번호(InvoiceNo) 가 '536365' 인 거래 내역의 제품코드(StockCode) 를 총 8자리 문자로 출력해 주세요* 제품코드의 출력 시의 빈 앞자리는 0으로 채워주세요 (Padding)* 0이 패딩된 제품코드(StockCode) 컬럼의 컬럼명은 StockCode 로 유지되어야 합니다* 최종 출력되는 컬럼은 "InvoiceNo", "StockCode", "Description" 만 출력하세요* 가능한 Structured API 를 사용하여 작성하되 최대한 간결하게 작성해 보세요[실습4] 출력 결과 확인 > 아래와 유사하게 방식으로 작성 되었다면 정답입니다```pythondf4 = spark.read.csv(f"{work_data}/retail-data/by-day/2010-12-01.csv", inferSchema=True, header=True)df4.where("InvoiceNo = '536365'").select("InvoiceNo", lpad("StockCode", 8, "0").alias("StockCode"), "Description")```
###Code
# 여기에 실습 코드를 작성하고 실행하세요 (Shift+Enter)
###Output
_____no_output_____
###Markdown
5. 정규 표현식 & 조건부 컬럼 5.1 단어 치환, regexp_extract* 존재 여부를 확인하거나 일치하는 모든 문자열을 치환* 정규 표현식을 위해 regexp_extract 함수와 regexp_replace 함수를 제공
###Code
from pyspark.sql.functions import regexp_replace
regex_string = "BLACK|WHITE|RED|GRENN|BLUE"
df.select(regexp_replace(col("Description"), regex_string, "COLOR").alias("color_clean"), col("Description")).show(2, truncate=False)
""" 문자 치환, translate """
from pyspark.sql.functions import translate
df.select(
col("Description"),
translate(col("Description"), "LEET", "12").alias("Translated") # 정확히 매칭되지 않아도 부분만 적용됩니다 L:1, E:2
).show(5)
""" 단어 추출, regexp_extract """
from pyspark.sql.functions import regexp_extract
extract_str = "(BLACK|WHITE|RED|GRENN|BLUE)"
df.select(
col("Description"),
regexp_extract(col("Description"), extract_str, 1).alias("Extracted")
).show(2)
""" 단어 존재유무, contain """ # 파이썬과 SQL은 instr 함수를 사용
from pyspark.sql.functions import instr
containBlack = instr(col("Description"), "BLACK") > 1
containWhite = instr(col("Description"), "WHITE") > 1
df.withColumn("hasSimpleColor", containBlack | containWhite) \
.where("hasSimpleColor") \
.select("Description") \
.show(3, False)
""" 필드에 색깔 문자열이 포함되어 있는지 여부를 locate 함수를 이용하여 컬럼으로 생성하는 에제 """
from pyspark.sql.functions import expr, locate
simple_colors = ["black", "white", "red", "green", "blue"]
def color_locator(column, color_string): # color_strings 단어가 시작되는 문자기준(단어기준 X) 위치
return locate(color_string.upper(), column).cast("boolean").alias("is_" + color_string)
selected_cols = [color_locator(df.Description, c) for c in simple_colors] # locate 함수를 하나씩 list에 저장
selected_cols.append(expr("*")) # column 타입이여야 함
# * means -> Unnest List to variable arguments
df.select(*selected_cols).show(3)
df.select(*selected_cols).where(expr("is_white OR is_red")) \
.select(col("Description")) \
.show(3, False)
###Output
+--------+--------+------+--------+-------+---------+---------+--------------------+--------+-------------------+---------+----------+--------------+
|is_black|is_white|is_red|is_green|is_blue|InvoiceNo|StockCode| Description|Quantity| InvoiceDate|UnitPrice|CustomerID| Country|
+--------+--------+------+--------+-------+---------+---------+--------------------+--------+-------------------+---------+----------+--------------+
| false| true| false| false| false| 536365| 85123A|WHITE HANGING HEA...| 6|2010-12-01 08:26:00| 2.55| 17850.0|United Kingdom|
| false| true| false| false| false| 536365| 71053| WHITE METAL LANTERN| 6|2010-12-01 08:26:00| 3.39| 17850.0|United Kingdom|
| false| false| false| false| false| 536365| 84406B|CREAM CUPID HEART...| 8|2010-12-01 08:26:00| 2.75| 17850.0|United Kingdom|
+--------+--------+------+--------+-------+---------+---------+--------------------+--------+-------------------+---------+----------+--------------+
only showing top 3 rows
+----------------------------------+
|Description |
+----------------------------------+
|WHITE HANGING HEART T-LIGHT HOLDER|
|WHITE METAL LANTERN |
|RED WOOLLY HOTTIE WHITE HEART. |
+----------------------------------+
only showing top 3 rows
###Markdown
5.2 조건부 컬럼 생성, when case else* IF ELSE 와 같이 조건에 부합하는 경우에 따라 컬럼을 반환* 조건을 중첩시켜서 N개의 조건을 적용할 수 있습니다
###Code
(
df.withColumn(
"Size",
when(col("Quantity") > 500, "Large")
.when(col("Quantity") > 250, "Middle")
.otherwise("Small")
)
.select("Quantity", "size")
.orderBy(desc("Quantity"))
).show(10)
spark.sql("""
select Quantity,
case when Quantity > 500 then 'large'
when Quantity > 250 then 'middle'
else 'small' end as size
from retail
order by Quantity desc
""").show(10)
###Output
+--------+------+
|Quantity| size|
+--------+------+
| 600| Large|
| 480|Middle|
| 432|Middle|
| 432|Middle|
| 384|Middle|
| 288|Middle|
| 252|Middle|
| 216| Small|
| 200| Small|
| 200| Small|
+--------+------+
only showing top 10 rows
+--------+------+
|Quantity| size|
+--------+------+
| 600| large|
| 480|middle|
| 432|middle|
| 432|middle|
| 384|middle|
| 288|middle|
| 252|middle|
| 216| small|
| 200| small|
| 200| small|
+--------+------+
only showing top 10 rows
###Markdown
5-3. 사용자 정의 함수 > User defined function(UDF)는 레포트별로 데이터를 처리하는 함수이며, SparkSession이나 Context에서 사용할 수 있도록 임시 함수 형태로 등록됨* 내장 함수가 제공하는 코드 생성 기능의 장점을 활용할 수 없어 약간의 성능 저하 발생* 언어별로 성능차이가 존재, 파이썬에서도 사용할 수 있으므로 자바나 스칼라도 함수 작성을 추천
###Code
""" UDF 사용하기 """
udfExDF = spark.range(5).toDF("num")
def power3(double_value):
return double_value ** 3
power3(2.0)
""" UDF 등록 및 사용 """
from pyspark.sql.functions import udf
power3udf = udf(power3)
udfExDF.select(power3udf(col("num"))).show()
###Output
+-----------+
|power3(num)|
+-----------+
| 0|
| 1|
| 8|
| 27|
| 64|
+-----------+
###Markdown
5. [중급] f"{work_data}/retail-data/by-day/2010-12-01.csv" 에 저장된 CSV 파일을 읽고 1. 송장번호(InvoiceNo) 가 '536365' 인 거래 내역의 제품코드(StockCode) 를 총 8자리 문자로 출력해 주세요* 제품코드의 출력 시의 빈 앞자리는 0으로 채워주세요 (Padding)* 0이 패딩된 제품코드(StockCode) 컬럼의 컬럼명은 StockCode 로 유지되어야 합니다* 최종 출력되는 컬럼은 "InvoiceNo", "StockCode", "Description" 만 출력하세요* 가능한 Structured API 를 사용하여 작성하되 최대한 간결하게 작성해 보세요[실습5] 출력 결과 확인 > 아래와 유사하게 방식으로 작성 되었다면 정답입니다```pythondf5 = spark.read.csv(f"{work_data}/retail-data/by-day/2010-12-01.csv", inferSchema=True, header=True)df5.where("InvoiceNo = '536365'").select("InvoiceNo", lpad("StockCode", 8, "0").alias("StockCode"), "Description")```
###Code
# 여기에 실습 코드를 작성하고 실행하세요 (Shift+Enter)
###Output
_____no_output_____
###Markdown
6. [고급] f"{work_data}/retail-data/by-day/2010-12-01.csv" 에 저장된 CSV 파일을 읽고 1. DESCRIPTION 항목에서 'GREEN' -> '초급', 'BLUE' -> '중급', 'RED' -> '고급' 으로 변경하여 출력하세요* 해당 함수를 colorGrade(column) 라는 함수를 이용해서 작성해 보세요* 최종 출력 시에는 Description 컬럼에 '급' 문자를 포함한 결과만 출력하세요[실습6] 출력 결과 확인 > 아래와 유사하게 방식으로 작성 되었다면 정답입니다```pythondf6 = spark.read.csv(f"{work_data}/retail-data/by-day/2010-12-01.csv", inferSchema=True, header=True)df6.printSchema()df6.show(3)def colorReplacer(column): return ( when(column.contains("GREEN"), regexp_replace(column, "GREEN", "초급")) .when(column.contains("BLUE"), regexp_replace(column, "BLUE", "중급")) .when(column.contains("RED"), regexp_replace(column, "RED", "고급")) .otherwise(column).alias("Description") ) df6.select(colorReplacer(col("Description"))).where(col("Description").contains("급")).show(truncate=False)```
###Code
# 여기에 실습 코드를 작성하고 실행하세요 (Shift+Enter)
###Output
root
|-- InvoiceNo: string (nullable = true)
|-- StockCode: string (nullable = true)
|-- Description: string (nullable = true)
|-- Quantity: integer (nullable = true)
|-- InvoiceDate: string (nullable = true)
|-- UnitPrice: double (nullable = true)
|-- CustomerID: double (nullable = true)
|-- Country: string (nullable = true)
+---------+---------+--------------------+--------+-------------------+---------+----------+--------------+
|InvoiceNo|StockCode| Description|Quantity| InvoiceDate|UnitPrice|CustomerID| Country|
+---------+---------+--------------------+--------+-------------------+---------+----------+--------------+
| 536365| 85123A|WHITE HANGING HEA...| 6|2010-12-01 08:26:00| 2.55| 17850.0|United Kingdom|
| 536365| 71053| WHITE METAL LANTERN| 6|2010-12-01 08:26:00| 3.39| 17850.0|United Kingdom|
| 536365| 84406B|CREAM CUPID HEART...| 8|2010-12-01 08:26:00| 2.75| 17850.0|United Kingdom|
+---------+---------+--------------------+--------+-------------------+---------+----------+--------------+
only showing top 3 rows
+-----------------------------------+
|Description |
+-----------------------------------+
|고급 WOOLLY HOTTIE WHITE HEART. |
|HAND WARMER 고급 POLKA DOT |
|고급 COAT RACK PARIS FASHION |
|중급 COAT RACK PARIS FASHION |
|ALARM CLOCK BAKELIKE 고급 |
|ALARM CLOCK BAKELIKE 초급 |
|SET/2 고급 RETROSPOT TEA TOWELS |
|고급 TOADSTOOL LED NIGHT LIGHT |
|HAND WARMER 고급 POLKA DOT |
|EDWARDIAN PARASOL 고급 |
|고급 WOOLLY HOTTIE WHITE HEART. |
|EDWARDIAN PARASOL 고급 |
|고급 WOOLLY HOTTIE WHITE HEART. |
|고급 HANGING HEART T-LIGHT HOLDER |
|HAND WARMER 고급 POLKA DOT |
|고급 3 PIECE RETROSPOT CUTLERY SET |
|중급 3 PIECE POLKADOT CUTLERY SET |
|SET/6 고급 SPOTTY PAPER PLATES |
|LUNCH BAG 고급 RETROSPOT |
|고급 CHARLIE+LOLA PERSONAL DOORSIGN|
+-----------------------------------+
only showing top 20 rows
###Markdown
6. 날짜와 타임스팸프 데이터 타입 다루기> 시간대 설정이 필요하다면 스파크 SQL 설정의 spark.conf.sessionLocalTimeZone 속성으로 가능 > TimestampType 클래스는 초 단위 정밀도만 지원 - 초 단위 이상 정밀도 요구 시 long 데이터 타입으로 데이터를 변환해 처리하는 우회 정책이 필요 * 스파크는 2가지 시간 정보만 다룸 - 날짜 정보만 가지는 date - 날짜와 시간 정보를 모두 가지는 timestamp* 시간대 설정이 필요하다면 스파크 SQL 설정의 spark.conf.sessionLocalTimeZone 속성으로 가능 - 자바 TimeZone 포맷을 따라야 함* TimestampType 클래스는 초 단위 정밀도만 지원 - 초 단위 이상 정밀도 요구 시 long 데이터 타입으로 데이터를 변환해 처리하는 우회 정책이 필요 6.1 오늘 날짜 구하기
###Code
from pyspark.sql.functions import current_date, current_timestamp
dateDF = (
spark.range(10)
.withColumn("today", current_date())
.withColumn("now", current_timestamp())
)
dateDF.createOrReplaceTempView("dataTable")
dateDF.printSchema()
dateDF.show(3, False)
###Output
root
|-- id: long (nullable = false)
|-- today: date (nullable = false)
|-- now: timestamp (nullable = false)
+---+----------+-----------------------+
|id |today |now |
+---+----------+-----------------------+
|0 |2021-08-21|2021-08-21 17:49:05.713|
|1 |2021-08-21|2021-08-21 17:49:05.713|
|2 |2021-08-21|2021-08-21 17:49:05.713|
+---+----------+-----------------------+
only showing top 3 rows
###Markdown
6.2 날짜를 더하거나 빼기
###Code
from pyspark.sql.functions import date_sub, date_add
dateDF.select(
date_sub(col("today"), 5),
date_add(col("today"), 5)
).show(1)
""" 두 날짜 사이의 일/개월 수를 파악 """
from pyspark.sql.functions import datediff, months_between, to_date
(
dateDF.withColumn("week_ago", date_sub(col("today"), 7))
.select(datediff(col("week_ago"), col("today")))
).show(1) # 현재 날짜에서 7일 제외 후 datediff 결과 확인
(
dateDF
.select(to_date(lit("2016-01-01")).alias("start"), to_date(lit("2017-05-22")).alias("end"))
.select(months_between(col("start"), col("end")))
).show(1) # 개월 수 차이 파악
""" 문자열을 날짜로 변환 """ # 자바의 simpleDateFormat 클래스가 지원하는 포맷 사용 필요
from pyspark.sql.functions import to_date, lit
spark.range(5) \
.withColumn("date", lit("2017-01-01")) \
.select(to_date(col("date"))) \
.show(1)
""" 파싱오류로 날짜가 null로 반환되는 사례 """
dateDF.select(to_date(lit("2016-20-12")), to_date(lit("2017-12-11"))).show(1) # 월과 일의 순서가 바뀜
""" SimpleDateFormat 표준을 활용하여 날짜 포멧을 지정 """
from pyspark.sql.functions import to_date
dateFormat = "yyyy-dd-MM" # 소문자 mm 주의
cleanDateDF = spark.range(1).select( # 1개 Row를 생성
to_date(lit("2017-12-11"), dateFormat).alias("date"),
to_date(lit("2017-20-12"), dateFormat).alias("date2"))
cleanDateDF.createOrReplaceTempView("dateTable2")
###Output
_____no_output_____
###Markdown
※ SimpleDateFormat : https://bvc12.tistory.com/168 6.3 문자열을 날짜로 변환
###Code
from pyspark.sql.functions import to_date, lit
spark.range(5) \
.withColumn("date", lit("2017-01-01")) \
.select(to_date(col("date"))) \
.show(1)
""" 파싱오류로 날짜가 null로 반환되는 사례 """
dateDF.select(to_date(lit("2016-20-12")), to_date(lit("2017-12-11"))).show(1) # 월과 일의 순서가 바뀜
###Output
+-------------------+-------------------+
|to_date(2016-20-12)|to_date(2017-12-11)|
+-------------------+-------------------+
| null| 2017-12-11|
+-------------------+-------------------+
only showing top 1 row
###Markdown
7. [중급] f"{work_data}/retail-data/by-day/2010-12-01.csv" 에 저장된 CSV 파일을 읽고 1. 적재일자(LoadDate) 컬럼을 넣되 포맷은 'yyyy-MM-dd' 으로 추가해 주시고 현재 일자를 넣으시면 됩니다 2. 송장일자(InvoiceDate) 와 오늘 시간과의 차이를 나타내는 컬럼(InvoiceDiff)을 (LoadDate - to_date(InvoiceDate))넣어주세요* 변경된 스키마를 출력하여 동일한 지 확인해 주세요* 가능한 Structured API 를 사용하여 작성하되 최대한 간결하게 작성해 보세요[실습7] 출력 결과 확인 > 아래와 유사하게 방식으로 작성 되었다면 정답입니다```pythondf7 = spark.read.csv(f"{work_data}/retail-data/by-day/2010-12-01.csv", inferSchema=True, header=True)answer = df7.withColumn("LoadDate", current_date()).withColumn("InvoiceDiff", col("LoadDate") - to_date(col("InvoiceDate")))display(answer)answer.printSchema()```
###Code
# 여기에 실습 코드를 작성하고 실행하세요 (Shift+Enter)
###Output
_____no_output_____
###Markdown
7. 널 값 다루기+ null 값을 사용하는 것 보다 명시적으로 사용하는 것이 항상 좋음+ null 값을 허용하지 않는 컬럼을 선언해도 강제성은 없음+ nullable 속성은 스파크 SQL 옵티마이저가 해당 컬럼을 제어하는 동작을 단순하게 돕는 역할+ null 값을 다루는 방법은 두 가지 + 명시적으로 null을 제거 + 전역 또느 컬럼 단위로 null 값을 특정 값으로 채움 7-1. 컬럼 값에 따른 널 처리 함수 (ifnull, nullIf, nvl, nvl2)+ SQL 함수이며 DataFrame의 select 표현식으로 사용 가능 + ifnull(null, 'return_value') 두 번째 값을, 아니라면 첫 번째 값을 반환 + nullif('value', 'value') 두 값이 같으면 null + nvl(null, 'return_value') 두 번째 값을, 아니라면 첫 번째 값을 반환 + nvl2('not_null', 'return_value', 'else_value') 두 번째 값을, 아니라면 세번째 값을 반환
###Code
spark.sql("""
SELECT
ifnull(null, 'return_value'),
nullif('value', 'value'),
nvl(null, 'return_value'),
nvl2('not null', 'return_value', 'else_value')
""").show()
###Output
+--------------------------+--------------------+-----------------------+----------------------------------------+
|ifnull(NULL, return_value)|nullif(value, value)|nvl(NULL, return_value)|nvl2(not null, return_value, else_value)|
+--------------------------+--------------------+-----------------------+----------------------------------------+
| return_value| null| return_value| return_value|
+--------------------------+--------------------+-----------------------+----------------------------------------+
###Markdown
7-2 컬럼의 널 값에 따른 로우 제거 (na.drop)
###Code
df.na.drop()
df.na.drop("any").show(1) # 로우 컬럼값 중 하나라도 null이면 제거
df.na.drop("all").show(1) # 로우 컬럼값 모두 null이면 제거
# 배열 형태의 컬럼을 인수로 전달하여 지정한 컬럼만 전체(all)가 null 인 경우만 제거합니다
df.na.drop("all", subset=("StockCode", "InvoiceNo")).show(1)
###Output
+---------+---------+--------------------+--------+-------------------+---------+----------+--------------+
|InvoiceNo|StockCode| Description|Quantity| InvoiceDate|UnitPrice|CustomerID| Country|
+---------+---------+--------------------+--------+-------------------+---------+----------+--------------+
| 536365| 85123A|WHITE HANGING HEA...| 6|2010-12-01 08:26:00| 2.55| 17850.0|United Kingdom|
+---------+---------+--------------------+--------+-------------------+---------+----------+--------------+
only showing top 1 row
###Markdown
7.3 컬럼의 널 값에 따른 값을 채움 (na.fill)
###Code
""" null을 포함한 DataFrame 행성 """
from pyspark.sql import Row
from pyspark.sql.types import StructField, StructType, StringType, DoubleType
myManualSchema = StructType([
StructField("string_null", StringType(), True),
StructField("string2_null", StringType(), True),
StructField("number_null", DoubleType(), True)
])
myRows = []
myRows.append(Row("Hello", "World", float(5))) # string 컬럼에 null 포함
myRows.append(Row("Hello", None, float(5))) # string 컬럼에 null 포함
myRows.append(Row(None, "World", None)) # number 컬럼에 null 포함
myRows.append(Row(None, None, None)) # 모든 컬럼이 null
myDf = spark.createDataFrame(myRows, myManualSchema)
myDf.show()
myDf.na.fill( {"number_null": 5.0, "string_null": "not_null"} ).show()
myDf.show()
myDf.na.drop("all", subset=("string_null", "number_null")).show()
myDf.na.drop("all").show()
myDf.na.drop("any", subset=("string_null", "number_null")).show()
myDf.na.drop("any", subset=("string_null", "string2_null", "number_null")).show()
myDf.na.drop("any").show()
###Output
+-----------+------------+-----------+
|string_null|string2_null|number_null|
+-----------+------------+-----------+
| Hello| World| 5.0|
| Hello| null| 5.0|
| null| World| null|
| null| null| null|
+-----------+------------+-----------+
+-----------+------------+-----------+
|string_null|string2_null|number_null|
+-----------+------------+-----------+
| Hello| World| 5.0|
| Hello| null| 5.0|
+-----------+------------+-----------+
+-----------+------------+-----------+
|string_null|string2_null|number_null|
+-----------+------------+-----------+
| Hello| World| 5.0|
| Hello| null| 5.0|
| null| World| null|
+-----------+------------+-----------+
+-----------+------------+-----------+
|string_null|string2_null|number_null|
+-----------+------------+-----------+
| Hello| World| 5.0|
| Hello| null| 5.0|
+-----------+------------+-----------+
+-----------+------------+-----------+
|string_null|string2_null|number_null|
+-----------+------------+-----------+
| Hello| World| 5.0|
+-----------+------------+-----------+
+-----------+------------+-----------+
|string_null|string2_null|number_null|
+-----------+------------+-----------+
| Hello| World| 5.0|
+-----------+------------+-----------+
###Markdown
7.4 조건에 따라 다른 값으로 대체 (na.replace)
###Code
""" 조건에 따라 다른 값으로 대체 """
myDf.na.replace([""], ["Hello"], "string_null").show() # null을 지정하는 방법은?
###Output
+-----------+------------+-----------+
|string_null|string2_null|number_null|
+-----------+------------+-----------+
| Hello| null| 5.0|
| null| World| null|
+-----------+------------+-----------+
###Markdown
8. [중급] f"{work_data}/retail-data/by-day/2010-12-01.csv" 에 저장된 CSV 파일을 읽고 1. 고객아이디(CustomerID) 컬럼에 대해서만 널 검사를 하되 널값이 있다면 해당 로우는 제외해 주세요 2. 국가(Country) 값은 아래의 규칙으로 변경해 주세요* "United Kingdom" -> "UK", "France" -> "FR", "Germany" -> "DE", "Netherlands" -> "NL", "Australia" -> "AT", "Norway" -> "NO", "EIRE" -> "IE" 3. 국가(Country) 별 빈도수를 출력해 주세요* 국가의 빈도수 역순으로 정렬해 주세요* 가능한 Structured API 를 사용하여 작성하되 최대한 간결하게 작성해 주세요[실습8] 출력 결과 확인 > 아래와 유사하게 방식으로 작성 되었다면 정답입니다```pythondf8 = spark.read.csv(f"{work_data}/retail-data/by-day/2010-12-01.csv", inferSchema=True, header=True)keys = ["United Kingdom", "France", "Germany", "Netherlands", "Australia", "Norway", "EIRE"]values = ["UK", "FR", "DE", "NL", "AT", "NO", "IE"]answer = ( df8.na.drop("any", subset=("CustomerID")) .na.replace(keys, values, "Country") .groupBy("Country") .count().orderBy(desc("count"))).show()```
###Code
# 여기에 실습 코드를 작성하고 실행하세요 (Shift+Enter)
###Output
+-------+-----+
|Country|count|
+-------+-----+
| UK| 1809|
| NO| 73|
| DE| 29|
| IE| 21|
| FR| 20|
| AT| 14|
| NL| 2|
+-------+-----+
###Markdown
8. 복합 데이터 다루기> 구조체, 배열, 맵 등을 스파크에서 다루기 8.1 구조체* DataFrame 내부의 DataFrame - 다수의 컬럼을 괄호로 묶어 생성 가능 - 문법에 점(.)을 사용하거나 getField 메서드를 사용 - (*) 문자로 모든 값을 조회할 수 있음
###Code
from pyspark.sql.functions import struct
complexDF = df.select(struct("Description", "InvoiceNo").alias("complex"))
complexDF.createOrReplaceTempView("complexDF")
complexDF.show(5, False)
complexDF.printSchema()
complexDF.select("complex.Description", "complex.InvoiceNo") # 모두 동일
complexDF.select(col("complex").getField("Description"), col("complex").getField("InvoiceNo"))
complexDF.select("complex.*")
complexDF.select(col("complex.*"))
complexDF.selectExpr("complex.*").show(5)
###Output
+--------------------+---------+
| Description|InvoiceNo|
+--------------------+---------+
|WHITE HANGING HEA...| 536365|
| WHITE METAL LANTERN| 536365|
|CREAM CUPID HEART...| 536365|
|KNITTED UNION FLA...| 536365|
|RED WOOLLY HOTTIE...| 536365|
+--------------------+---------+
only showing top 5 rows
###Markdown
8.2 배열> 데이터에서 Description 컬럼의 모든 단어를 하나의 로우로 변환 컬럼을 구분자로 분리하여 배열로 변환 (split)
###Code
""" 컬럼을 배열로 변환 """
from pyspark.sql.functions import split
df.select(split(col("Description"), " ")).show(2)
""" 배열값의 조회 """
df.select(split(col("Description"), " ").alias("array_col"))\
.selectExpr("array_col[0]").show(2)
###Output
+------------+
|array_col[0]|
+------------+
| WHITE|
| WHITE|
+------------+
only showing top 2 rows
###Markdown
배열의 길이 (size)
###Code
""" size 함수 """
from pyspark.sql.functions import size
df.select(size(split(col("Description"), " "))).show(2)
###Output
+-------------------------------+
|size(split(Description, , -1))|
+-------------------------------+
| 5|
| 3|
+-------------------------------+
only showing top 2 rows
###Markdown
배열에 특정 값이 존재하는지 확인 (array_contains)
###Code
from pyspark.sql.functions import array_contains
df.select(array_contains(split(col("Description"), " "), "WHITE")).show(2)
###Output
+------------------------------------------------+
|array_contains(split(Description, , -1), WHITE)|
+------------------------------------------------+
| true|
| true|
+------------------------------------------------+
only showing top 2 rows
###Markdown
컬럼의 배열값에 포함된 모든 값을 로우로 변환 (explode)
###Code
from pyspark.sql.functions import split, explode
exploded = df \
.withColumn("splitted", split(col("Description"), " ")) \
.withColumn("exploded", explode(col("splitted")))
exploded.printSchema()
ef = exploded.select("Description", "InvoiceNo", "exploded") # 모든 단어가 하나의 로우로 전환됨
print(df.select("Description").count())
print(ef.select("exploded").count()) # 로우 수가 다름
exploded.select("Description", "exploded").count() # 큰 쪽으로 카운드
exploded.select("Description", "exploded").take(10) # Description 컬럼이 Group이 되어 중복됨
###Output
_____no_output_____
###Markdown
8.3 맵+ map 함수와 컬럼의 키0값 쌍을 이용해 생성+ 적합한 키를 사용해 데이터를 조회할 수 있으며, 해당키가 없다면 null값을 반환
###Code
""" 맵 생성 """
from pyspark.sql.functions import create_map
df.select(create_map(col("Description"), col("InvoiceNo")).alias("complex_map")).show(20, False)
""" 맵의 데이터 조회 """
mapped = df \
.select(create_map(col("Description"), col("InvoiceNo")).alias("complex_map"))
mapped.printSchema()
mapped.selectExpr("complex_map['WHITE METAL LANTERN']").where("complex_map['WHITE METAL LANTERN'] is not null").show()
""" 맵의 분해 """
exploded = df \
.select(create_map(col("Description"), col("InvoiceNo")).alias("complex_map")) \
.selectExpr("explode(complex_map)")
exploded.printSchema()
exploded.show(5)
###Output
root
|-- key: string (nullable = false)
|-- value: string (nullable = true)
+--------------------+------+
| key| value|
+--------------------+------+
|WHITE HANGING HEA...|536365|
| WHITE METAL LANTERN|536365|
|CREAM CUPID HEART...|536365|
|KNITTED UNION FLA...|536365|
|RED WOOLLY HOTTIE...|536365|
+--------------------+------+
only showing top 5 rows
###Markdown
9. JSON 다루기
###Code
""" Json 컬럼 생성 """
jsonDF = spark.range(1).selectExpr(
"""
'{"myJSONKey" : {"myJSONValue" : [1, 2, 3]}}' as jsonString
"""
)
""" 인라인 쿼리로 JSON 조회하기 """
from pyspark.sql.functions import get_json_object, json_tuple
jsonDF.select(
get_json_object(col("jsonString"), "jsonString.myJSONKey.myJSONValue[1]").alias("column"),
json_tuple(col("jsonString"), "myJSONKey")
).show(2)
""" StructType을 Json 문자열로 변경 """
from pyspark.sql.functions import to_json
df.selectExpr("(InvoiceNo, Description) as myStruct") \
.select(to_json(col("myStruct"))) \
.take(3)
""" Json 문자열을 객체로 변환 """
from pyspark.sql.functions import from_json
from pyspark.sql.types import *
parseSchema = StructType([
StructField("InvoiceNo", StringType(), True),
StructField("Description", StringType(), True)
])
df.selectExpr("(InvoiceNo, Description) as myStruct") \
.select(to_json(col("myStruct")).alias("newJSON")) \
.select(from_json(col("newJSON"), parseSchema), col("newJSON")) \
.show(2) # 키를 컬럼명으로 값을 로우로 변경
###Output
+--------------------+--------------------+
| from_json(newJSON)| newJSON|
+--------------------+--------------------+
|{536365, WHITE HA...|{"InvoiceNo":"536...|
|{536365, WHITE ME...|{"InvoiceNo":"536...|
+--------------------+--------------------+
only showing top 2 rows
###Markdown
9. [기본] f"{work_data}/retail-data/by-day/2010-12-01.csv" 에 저장된 CSV 파일을 읽고 1. 스키마를 출력하세요 2. 데이터를 10건 출력하세요 3. 고객구분자(CustomerID)와 설명(Description) 컬럼이 널값인 데이터프레임을 추출하여 출력하세요 4. 고객구분자(CustomerID)가 null 인 경우는 0.0 으로 치환하고 5. 설명(Description)가 null 인 경우는 "NOT MENTIONED" 값으로 저장될 수 있도록 만들어주세요 6. 최종 스키마와 데이터를 출력해 주세요[실습9] 출력 결과 확인 > 아래와 유사하게 방식으로 작성 되었다면 정답입니다```pythondf5 = ( spark.read.format("csv") .option("header", "true") .option("inferSchema", "true") .load(f"{work_data}/retail-data/by-day/2010-12-01.csv")).where(expr("Description is null or CustomerID is null"))df5.printSchema()df5.show(10)desc_custid_fill = {"Description":"NOT MENTIONED", "CustomerID":0.0}answer = df5.na.fill(desc_custid_fill)answer.printSchema()display(answer)```
###Code
# 여기에 실습 코드를 작성하고 실행하세요 (Shift+Enter)
###Output
root
|-- InvoiceNo: string (nullable = true)
|-- StockCode: string (nullable = true)
|-- Description: string (nullable = true)
|-- Quantity: integer (nullable = true)
|-- InvoiceDate: string (nullable = true)
|-- UnitPrice: double (nullable = true)
|-- CustomerID: double (nullable = true)
|-- Country: string (nullable = true)
+---------+---------+--------------------+--------+-------------------+---------+----------+--------------+
|InvoiceNo|StockCode| Description|Quantity| InvoiceDate|UnitPrice|CustomerID| Country|
+---------+---------+--------------------+--------+-------------------+---------+----------+--------------+
| 536414| 22139| null| 56|2010-12-01 11:52:00| 0.0| null|United Kingdom|
| 536544| 21773|DECORATIVE ROSE B...| 1|2010-12-01 14:32:00| 2.51| null|United Kingdom|
| 536544| 21774|DECORATIVE CATS B...| 2|2010-12-01 14:32:00| 2.51| null|United Kingdom|
| 536544| 21786| POLKADOT RAIN HAT | 4|2010-12-01 14:32:00| 0.85| null|United Kingdom|
| 536544| 21787|RAIN PONCHO RETRO...| 2|2010-12-01 14:32:00| 1.66| null|United Kingdom|
| 536544| 21790| VINTAGE SNAP CARDS| 9|2010-12-01 14:32:00| 1.66| null|United Kingdom|
| 536544| 21791|VINTAGE HEADS AND...| 2|2010-12-01 14:32:00| 2.51| null|United Kingdom|
| 536544| 21801|CHRISTMAS TREE DE...| 10|2010-12-01 14:32:00| 0.43| null|United Kingdom|
| 536544| 21802|CHRISTMAS TREE HE...| 9|2010-12-01 14:32:00| 0.43| null|United Kingdom|
| 536544| 21803|CHRISTMAS TREE ST...| 11|2010-12-01 14:32:00| 0.43| null|United Kingdom|
+---------+---------+--------------------+--------+-------------------+---------+----------+--------------+
only showing top 10 rows
root
|-- InvoiceNo: string (nullable = true)
|-- StockCode: string (nullable = true)
|-- Description: string (nullable = false)
|-- Quantity: integer (nullable = true)
|-- InvoiceDate: string (nullable = true)
|-- UnitPrice: double (nullable = true)
|-- CustomerID: double (nullable = false)
|-- Country: string (nullable = true)
|
Chapter 5/Python/cross-validation/3. Simplified Tibshirani resampling Lab pol features.ipynb | ###Markdown
Above we see polynomial features of 123. Now let us see polynomial features of a real data column: horse power.
###Code
df1.shape
df1.head()
# Generate 10 random splits of the dataset
for (i,j),v in np.ndenumerate(Z):
poly = PolynomialFeatures(int(X[i,j])) # set transormation object with poly degree from X matrix
X_poly = poly.fit_transform(df1.horsepower.values.reshape(-1,1)) # transform data by object
X_train, X_test, y_train, y_test = train_test_split(X_poly, df1.mpg.ravel(),
test_size=t_prop, random_state=Y[i,j])
# randomly half split data by using seeds from Y matrix below
regr.fit(X_train, y_train)
pred = regr.predict(X_test)
# calculate MSE on validation data
Z[i,j]= mean_squared_error(y_test, pred)
X_poly.ndim, np.shape(X_poly)
X_poly
fig, (ax1, ax2) = plt.subplots(1,2, figsize=(10,4))
# Left plot (first split)
ax1.plot(X.T[0],Z.T[0], '-o') # plot first column of degrees and MSE
ax1.set_title('Random split of the data set')
# Right plot (all splits)
ax2.plot(X,Z)
ax2.set_title('10 random splits of the data set')
for ax in fig.axes:
ax.set_ylabel('Mean Squared Error')
ax.set_ylim(15,30)
ax.set_xlabel('Degree of Polynomial')
ax.set_xlim(0.5,10.5)
ax.set_xticks(range(2,11,2));
###Output
_____no_output_____
###Markdown
Different lines = different seedsDifferent points of lines = differrent degrees of polynom Another way by zip
###Code
t_prop = 0.5
p_order = np.arange(1,11) # polynomial degree
r_state = np.arange(0,10) # every seed splplits differently
X, Y = np.meshgrid(p_order, r_state, indexing='ij') # constant rows and constant cols
pairs = np.c_[X.ravel(), Y.ravel()]
pairs[:10]
regr = skl_lm.LinearRegression()
pairs.shape
z = zip(*pairs.T) # this way
next(z), next(z), next(z)
z = zip(*pairs) # not this way
next(z)[:10], next(z)[:10]
Z = []
for degree, seed in zip(*pairs.T):
poly = PolynomialFeatures(int(degree)) # set transormation object with poly degree from X matrix
X_poly = poly.fit_transform(df1.horsepower.values.reshape(-1,1)) # transform data by object
X_train, X_test, y_train, y_test = train_test_split(X_poly, df1.mpg.ravel(),
test_size=t_prop, random_state=seed)
# randomly half split data by using seeds from Y matrix below
regr.fit(X_train, y_train)
pred = regr.predict(X_test)
# calculate MSE on validation data
Z.append(mean_squared_error(y_test, pred))
Z = np.array(Z).reshape(len(p_order), len(r_state))
Z.shape
fig, (ax1, ax2) = plt.subplots(1,2, figsize=(10,4))
# Left plot (first split)
ax1.plot(X.T[0],Z.T[0], '-o') # plot first column of degrees and MSE
ax1.set_title('Random split of the data set')
# Right plot (all splits)
ax2.plot(X,Z)
ax2.set_title('10 random splits of the data set')
for ax in fig.axes:
ax.set_ylabel('Mean Squared Error')
ax.set_ylim(15,30)
ax.set_xlabel('Degree of Polynomial')
ax.set_xlim(0.5,10.5)
ax.set_xticks(range(2,11,2));
###Output
_____no_output_____ |
GAN/StackGAN/CUB-200-2010/StackGAN-Stage-1.ipynb | ###Markdown
StackGAN Stage-1 Import libraries
###Code
import os
import pickle
import random
import time
import PIL
import numpy as np
import pandas as pd
import tensorflow as tf
from PIL import Image
from keras import Input, Model
from keras import backend as K
from keras.callbacks import TensorBoard
from keras.layers import Dense, LeakyReLU, BatchNormalization, ReLU, Reshape, UpSampling2D, Conv2D, Activation
from keras.layers import concatenate, Flatten, Lambda, Concatenate
from keras.optimizers import Adam
from matplotlib import pyplot as plt
###Output
_____no_output_____
###Markdown
Load dataset
###Code
def load_class_ids(class_info_file_path):
"""
Load class ids from class_info.pickle file
"""
with open(class_info_file_path, 'rb') as f:
class_ids = pickle.load(f, encoding='latin1')
return class_ids
def load_embeddings(embeddings_file_path):
"""
Load embeddings
"""
with open(embeddings_file_path, 'rb') as f:
embeddings = pickle.load(f, encoding='latin1')
embeddings = np.array(embeddings)
print('embeddings: ', embeddings.shape)
return embeddings
def load_filenames(filenames_file_path):
"""
Load filenames.pickle file and return a list of all file names
"""
with open(filenames_file_path, 'rb') as f:
filenames = pickle.load(f, encoding='latin1')
return filenames
def load_bounding_boxes(dataset_dir):
"""
Load bounding boxes and return a dictionary of file names and corresponding bounding boxes
"""
# Paths
bounding_boxes_path = os.path.join(dataset_dir, 'bounding_boxes.txt')
file_paths_path = os.path.join(dataset_dir, 'images.txt')
# Read bounding_boxes.txt and images.txt file
df_bounding_boxes = pd.read_csv(bounding_boxes_path,
delim_whitespace=True, header=None).astype(int)
df_file_names = pd.read_csv(file_paths_path, delim_whitespace=True, header=None)
# Create a list of file names
file_names = df_file_names[1].tolist()
# Create a dictionary of file_names and bounding boxes
filename_boundingbox_dict = {img_file[:-4]: [] for img_file in file_names[:2]}
# Assign a bounding box to the corresponding image
for i in range(0, len(file_names)):
# Get the bounding box
bounding_box = df_bounding_boxes.iloc[i][1:].tolist()
key = file_names[i][:-4]
filename_boundingbox_dict[key] = bounding_box
return filename_boundingbox_dict
def get_img(img_path, bbox, image_size):
"""
Load and resize image
"""
img = Image.open(img_path).convert('RGB')
width, height = img.size
if bbox is not None:
R = int(np.maximum(bbox[2], bbox[3]) * 0.75)
center_x = int((2 * bbox[0] + bbox[2]) / 2)
center_y = int((2 * bbox[1] + bbox[3]) / 2)
y1 = np.maximum(0, center_y - R)
y2 = np.minimum(height, center_y + R)
x1 = np.maximum(0, center_x - R)
x2 = np.minimum(width, center_x + R)
img = img.crop([x1, y1, x2, y2])
img = img.resize(image_size, PIL.Image.BILINEAR)
return img
def load_dataset(filenames_file_path, class_info_file_path, cub_dataset_dir, embeddings_file_path, image_size):
"""
Load dataset
"""
filenames = load_filenames(filenames_file_path)
class_ids = load_class_ids(class_info_file_path)
bounding_boxes = load_bounding_boxes(cub_dataset_dir)
all_embeddings = load_embeddings(embeddings_file_path)
X, y, embeddings = [], [], []
print("Embeddings shape:", all_embeddings.shape)
for index, filename in enumerate(filenames):
bounding_box = bounding_boxes[filename]
try:
# Load images
img_name = '{}/images/{}.jpg'.format(cub_dataset_dir, filename)
img = get_img(img_name, bounding_box, image_size)
all_embeddings1 = all_embeddings[index, :, :]
embedding_ix = random.randint(0, all_embeddings1.shape[0] - 1)
embedding = all_embeddings1[embedding_ix, :]
X.append(np.array(img))
y.append(class_ids[index])
embeddings.append(embedding)
except Exception as e:
print(e)
X = np.array(X)
y = np.array(y)
embeddings = np.array(embeddings)
return X, y, embeddings
###Output
_____no_output_____
###Markdown
Create model.
###Code
def generate_c(x):
mean = x[:, :128]
log_sigma = x[:, 128:]
stddev = K.exp(log_sigma)
epsilon = K.random_normal(shape=K.constant((mean.shape[1],), dtype='int32'))
c = stddev * epsilon + mean
return c
def build_ca_model():
"""
Get conditioning augmentation model.
Takes an embedding of shape (1024,) and returns a tensor of shape (256,)
"""
input_layer = Input(shape=(1024,))
x = Dense(256)(input_layer)
x = LeakyReLU(alpha=0.2)(x)
model = Model(inputs=[input_layer], outputs=[x])
return model
def build_embedding_compressor_model():
"""
Build embedding compressor model
"""
input_layer = Input(shape=(1024,))
x = Dense(128)(input_layer)
x = ReLU()(x)
model = Model(inputs=[input_layer], outputs=[x])
return model
def build_stage1_generator():
"""
Builds a generator model used in Stage-I
"""
input_layer = Input(shape=(1024,))
x = Dense(256)(input_layer)
mean_logsigma = LeakyReLU(alpha=0.2)(x)
c = Lambda(generate_c)(mean_logsigma)
input_layer2 = Input(shape=(100,))
gen_input = Concatenate(axis=1)([c, input_layer2])
x = Dense(128 * 8 * 4 * 4, use_bias=False)(gen_input)
x = ReLU()(x)
x = Reshape((4, 4, 128 * 8), input_shape=(128 * 8 * 4 * 4,))(x)
x = UpSampling2D(size=(2, 2))(x)
x = Conv2D(512, kernel_size=3, padding="same", strides=1, use_bias=False)(x)
x = BatchNormalization()(x)
x = ReLU()(x)
x = UpSampling2D(size=(2, 2))(x)
x = Conv2D(256, kernel_size=3, padding="same", strides=1, use_bias=False)(x)
x = BatchNormalization()(x)
x = ReLU()(x)
x = UpSampling2D(size=(2, 2))(x)
x = Conv2D(128, kernel_size=3, padding="same", strides=1, use_bias=False)(x)
x = BatchNormalization()(x)
x = ReLU()(x)
x = UpSampling2D(size=(2, 2))(x)
x = Conv2D(64, kernel_size=3, padding="same", strides=1, use_bias=False)(x)
x = BatchNormalization()(x)
x = ReLU()(x)
x = Conv2D(3, kernel_size=3, padding="same", strides=1, use_bias=False)(x)
x = Activation(activation='tanh')(x)
stage1_gen = Model(inputs=[input_layer, input_layer2], outputs=[x, mean_logsigma])
return stage1_gen
def build_stage1_discriminator():
"""
Create a model which takes two inputs
1. One from above network
2. One from the embedding layer
3. Concatenate along the axis dimension and feed it to the last module which produces final logits
"""
input_layer = Input(shape=(64, 64, 3))
x = Conv2D(64, (4, 4),
padding='same', strides=2,
input_shape=(64, 64, 3), use_bias=False)(input_layer)
x = LeakyReLU(alpha=0.2)(x)
x = Conv2D(128, (4, 4), padding='same', strides=2, use_bias=False)(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha=0.2)(x)
x = Conv2D(256, (4, 4), padding='same', strides=2, use_bias=False)(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha=0.2)(x)
x = Conv2D(512, (4, 4), padding='same', strides=2, use_bias=False)(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha=0.2)(x)
input_layer2 = Input(shape=(4, 4, 128))
merged_input = concatenate([x, input_layer2])
x2 = Conv2D(64 * 8, kernel_size=1,
padding="same", strides=1)(merged_input)
x2 = BatchNormalization()(x2)
x2 = LeakyReLU(alpha=0.2)(x2)
x2 = Flatten()(x2)
x2 = Dense(1)(x2)
x2 = Activation('sigmoid')(x2)
stage1_dis = Model(inputs=[input_layer, input_layer2], outputs=[x2])
return stage1_dis
def build_adversarial_model(gen_model, dis_model):
input_layer = Input(shape=(1024,))
input_layer2 = Input(shape=(100,))
input_layer3 = Input(shape=(4, 4, 128))
x, mean_logsigma = gen_model([input_layer, input_layer2])
dis_model.trainable = False
valid = dis_model([x, input_layer3])
model = Model(inputs=[input_layer, input_layer2, input_layer3], outputs=[valid, mean_logsigma])
return model
###Output
_____no_output_____
###Markdown
Define loss.
###Code
def KL_loss(y_true, y_pred):
mean = y_pred[:, :128]
logsigma = y_pred[:, :128]
loss = -logsigma + .5 * (-1 + K.exp(2. * logsigma) + K.square(mean))
loss = K.mean(loss)
return loss
def custom_generator_loss(y_true, y_pred):
# Calculate binary cross entropy loss
return K.binary_crossentropy(y_true, y_pred)
def save_rgb_img(img, path):
"""
Save an rgb image
"""
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.imshow(img)
ax.axis("off")
ax.set_title("Image")
plt.savefig(path)
plt.close()
def write_log(callback, name, loss, batch_no):
"""
Write training summary to TensorBoard
"""
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = loss
summary_value.tag = name
callback.writer.add_summary(summary, batch_no)
callback.writer.flush()
###Output
_____no_output_____
###Markdown
Download the dataset.
###Code
!wget http://www.vision.caltech.edu/visipedia-data/CUB-200-2011/CUB_200_2011.tgz
!ls
import tarfile
tar = tarfile.open("CUB_200_2011.tgz")
tar.extractall()
tar.close()
!ls
cd CUB_200_2011
!ls
os.chdir("/content/")
!ls
!gdown https://drive.google.com/uc?id=0B3y_msrWZaXLT1BZdVdycDY5TEE
!ls -al
!unzip birds.zip
ls -al birds
!mkdir results
if __name__ == '__main__':
data_dir = "/content/birds/"
train_dir = data_dir + "/train"
test_dir = data_dir + "/test"
image_size = 64
batch_size = 64
z_dim = 100
stage1_generator_lr = 0.0002
stage1_discriminator_lr = 0.0002
stage1_lr_decay_step = 600
epochs = 50
condition_dim = 128
embeddings_file_path_train = train_dir + "/char-CNN-RNN-embeddings.pickle"
embeddings_file_path_test = test_dir + "/char-CNN-RNN-embeddings.pickle"
filenames_file_path_train = train_dir + "/filenames.pickle"
filenames_file_path_test = test_dir + "/filenames.pickle"
class_info_file_path_train = train_dir + "/class_info.pickle"
class_info_file_path_test = test_dir + "/class_info.pickle"
cub_dataset_dir = "/content/CUB_200_2011"
# Define optimizers
dis_optimizer = Adam(lr=stage1_discriminator_lr, beta_1=0.5, beta_2=0.999)
gen_optimizer = Adam(lr=stage1_generator_lr, beta_1=0.5, beta_2=0.999)
""""
Load datasets
"""
X_train, y_train, embeddings_train = load_dataset(filenames_file_path=filenames_file_path_train,
class_info_file_path=class_info_file_path_train,
cub_dataset_dir=cub_dataset_dir,
embeddings_file_path=embeddings_file_path_train,
image_size=(64, 64))
X_test, y_test, embeddings_test = load_dataset(filenames_file_path=filenames_file_path_test,
class_info_file_path=class_info_file_path_test,
cub_dataset_dir=cub_dataset_dir,
embeddings_file_path=embeddings_file_path_test,
image_size=(64, 64))
"""
Build and compile networks
"""
ca_model = build_ca_model()
ca_model.compile(loss="binary_crossentropy", optimizer="adam")
stage1_dis = build_stage1_discriminator()
stage1_dis.load_weights('stage1_dis.h5')
print('Loading stage1_dis weights - Done')
stage1_dis.compile(loss='binary_crossentropy', optimizer=dis_optimizer)
stage1_gen = build_stage1_generator()
stage1_gen.load_weights('stage1_gen.h5')
print('Loading stage1_gen weights - Done')
stage1_gen.compile(loss="mse", optimizer=gen_optimizer)
embedding_compressor_model = build_embedding_compressor_model()
embedding_compressor_model.compile(loss="binary_crossentropy", optimizer="adam")
adversarial_model = build_adversarial_model(gen_model=stage1_gen, dis_model=stage1_dis)
adversarial_model.compile(loss=['binary_crossentropy', KL_loss], loss_weights=[1, 2.0],
optimizer=gen_optimizer, metrics=None)
tensorboard = TensorBoard(log_dir="logs/".format(time.time()))
tensorboard.set_model(stage1_gen)
tensorboard.set_model(stage1_dis)
tensorboard.set_model(ca_model)
tensorboard.set_model(embedding_compressor_model)
# Generate an array containing real and fake values
# Apply label smoothing as well
real_labels = np.ones((batch_size, 1), dtype=float) * 0.9
fake_labels = np.zeros((batch_size, 1), dtype=float) * 0.1
for epoch in range(epochs):
print("========================================")
print("Epoch is:", epoch)
print("Number of batches", int(X_train.shape[0] / batch_size))
gen_losses = []
dis_losses = []
# Load data and train model
number_of_batches = int(X_train.shape[0] / batch_size)
for index in range(number_of_batches):
print("Batch:{}".format(index+1))
"""
Train the discriminator network
"""
# Sample a batch of data
z_noise = np.random.normal(0, 1, size=(batch_size, z_dim))
image_batch = X_train[index * batch_size:(index + 1) * batch_size]
embedding_batch = embeddings_train[index * batch_size:(index + 1) * batch_size]
image_batch = (image_batch - 127.5) / 127.5
# Generate fake images
fake_images, _ = stage1_gen.predict([embedding_batch, z_noise], verbose=3)
# Generate compressed embeddings
compressed_embedding = embedding_compressor_model.predict_on_batch(embedding_batch)
compressed_embedding = np.reshape(compressed_embedding, (-1, 1, 1, condition_dim))
compressed_embedding = np.tile(compressed_embedding, (1, 4, 4, 1))
dis_loss_real = stage1_dis.train_on_batch([image_batch, compressed_embedding],
np.reshape(real_labels, (batch_size, 1)))
dis_loss_fake = stage1_dis.train_on_batch([fake_images, compressed_embedding],
np.reshape(fake_labels, (batch_size, 1)))
dis_loss_wrong = stage1_dis.train_on_batch([image_batch[:(batch_size - 1)], compressed_embedding[1:]],
np.reshape(fake_labels[1:], (batch_size-1, 1)))
d_loss = 0.5 * np.add(dis_loss_real, 0.5 * np.add(dis_loss_wrong, dis_loss_fake))
print("d_loss_real:{}".format(dis_loss_real))
print("d_loss_fake:{}".format(dis_loss_fake))
print("d_loss_wrong:{}".format(dis_loss_wrong))
print("d_loss:{}".format(d_loss))
"""
Train the generator network
"""
g_loss = adversarial_model.train_on_batch([embedding_batch, z_noise, compressed_embedding],[K.ones((batch_size, 1)) * 0.9, K.ones((batch_size, 256)) * 0.9])
print("g_loss:{}".format(g_loss))
dis_losses.append(d_loss)
gen_losses.append(g_loss)
"""
Save losses to Tensorboard after each epoch
"""
#write_log(tensorboard, 'discriminator_loss', np.mean(dis_losses), epoch)
#write_log(tensorboard, 'generator_loss', np.mean(gen_losses[0]), epoch)
# Generate and save images after every 2nd epoch
if epoch % 2 == 0:
# z_noise2 = np.random.uniform(-1, 1, size=(batch_size, z_dim))
z_noise2 = np.random.normal(0, 1, size=(batch_size, z_dim))
embedding_batch = embeddings_test[0:batch_size]
fake_images, _ = stage1_gen.predict_on_batch([embedding_batch, z_noise2])
# Save images
for i, img in enumerate(fake_images[:10]):
save_rgb_img(img, "results/gen_{}_{}.png".format(epoch, i))
# Save models
stage1_gen.save_weights("stage1_gen-50.h5")
stage1_dis.save_weights("stage1_dis-50.h5")
stage1_gen.save_weights("stage1_gen.h5")
stage1_dis.save_weights("stage1_dis.h5")
from google.colab import drive
drive.mount('/content/drive')
!cp stage1_gen.h5 '/content/drive/My Drive/.'
!cp stage1_dis.h5 '/content/drive/My Drive/.'
# Install the PyDrive wrapper & import libraries.
# This only needs to be done once in a notebook.
!pip install -U -q PyDrive
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
# Authenticate and create the PyDrive client.
# This only needs to be done once in a notebook.
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
# Create & upload a file.
uploaded = drive.CreateFile({'title': 'stage1_dis.h5'})
uploaded.SetContentFile('stage1_dis.h5')
uploaded.Upload()
print('Uploaded file with ID {}'.format(uploaded.get('id')))
# Create & upload a file.
uploaded = drive.CreateFile({'title': 'stage1_gen.h5'})
uploaded.SetContentFile('stage1_gen.h5')
uploaded.Upload()
print('Uploaded file with ID {}'.format(uploaded.get('id')))
###Output
_____no_output_____ |
deep-learning/student-admissions/StudentAdmissions.ipynb | ###Markdown
Predicting Student Admissions with Neural NetworksIn this notebook, we predict student admissions to graduate school at UCLA based on three pieces of data:- GRE Scores (Test)- GPA Scores (Grades)- Class rank (1-4)The dataset originally came from here: http://www.ats.ucla.edu/ Loading the dataTo load the data and format it nicely, we will use two very useful packages called Pandas and Numpy. You can read on the documentation here:- https://pandas.pydata.org/pandas-docs/stable/- https://docs.scipy.org/
###Code
# Importing pandas and numpy
import pandas as pd
import numpy as np
# Reading the csv file into a pandas DataFrame
data = pd.read_csv('student_data.csv')
# Printing out the first 10 rows of our data
data[:10]
###Output
_____no_output_____
###Markdown
Plotting the dataFirst let's make a plot of our data to see how it looks. In order to have a 2D plot, let's ingore the rank.
###Code
# Importing matplotlib
import matplotlib.pyplot as plt
# Function to help us plot
def plot_points(data):
X = np.array(data[["gre","gpa"]])
y = np.array(data["admit"])
admitted = X[np.argwhere(y==1)]
rejected = X[np.argwhere(y==0)]
plt.scatter([s[0][0] for s in rejected], [s[0][1] for s in rejected], s = 25, color = 'red', edgecolor = 'k')
plt.scatter([s[0][0] for s in admitted], [s[0][1] for s in admitted], s = 25, color = 'cyan', edgecolor = 'k')
plt.xlabel('Test (GRE)')
plt.ylabel('Grades (GPA)')
# Plotting the points
plot_points(data)
plt.show()
###Output
_____no_output_____
###Markdown
Roughly, it looks like the students with high scores in the grades and test passed, while the ones with low scores didn't, but the data is not as nicely separable as we hoped it would. Maybe it would help to take the rank into account? Let's make 4 plots, each one for each rank.
###Code
# Separating the ranks
data_rank1 = data[data["rank"]==1]
data_rank2 = data[data["rank"]==2]
data_rank3 = data[data["rank"]==3]
data_rank4 = data[data["rank"]==4]
# Plotting the graphs
plot_points(data_rank1)
plt.title("Rank 1")
plt.show()
plot_points(data_rank2)
plt.title("Rank 2")
plt.show()
plot_points(data_rank3)
plt.title("Rank 3")
plt.show()
plot_points(data_rank4)
plt.title("Rank 4")
plt.show()
###Output
_____no_output_____
###Markdown
This looks more promising, as it seems that the lower the rank, the higher the acceptance rate. Let's use the rank as one of our inputs. In order to do this, we should one-hot encode it. TODO: One-hot encoding the rankUse the `get_dummies` function in numpy in order to one-hot encode the data.
###Code
# TODO: Make dummy variables for rank
one_hot_data = pd.concat([data, pd.get_dummies(data['rank'], prefix='rank')], axis=1)
# TODO: Drop the previous rank column
one_hot_data = one_hot_data.drop('rank', axis=1)
# Print the first 10 rows of our data
one_hot_data[:10]
###Output
_____no_output_____
###Markdown
TODO: Scaling the dataThe next step is to scale the data. We notice that the range for grades is 1.0-4.0, whereas the range for test scores is roughly 200-800, which is much larger. This means our data is skewed, and that makes it hard for a neural network to handle. Let's fit our two features into a range of 0-1, by dividing the grades by 4.0, and the test score by 800.
###Code
# Making a copy of our data
processed_data = one_hot_data[:]
# TODO: Scale the columns
processed_data['gre'] = processed_data['gre']/800
processed_data['gpa'] = processed_data['gpa']/4.0
# Printing the first 10 rows of our procesed data
processed_data[:10]
###Output
_____no_output_____
###Markdown
Splitting the data into Training and Testing In order to test our algorithm, we'll split the data into a Training and a Testing set. The size of the testing set will be 10% of the total data.
###Code
sample = np.random.choice(processed_data.index, size=int(len(processed_data)*0.9), replace=False)
train_data, test_data = processed_data.iloc[sample], processed_data.drop(sample)
print("Number of training samples is", len(train_data))
print("Number of testing samples is", len(test_data))
print(train_data[:10])
print(test_data[:10])
###Output
Number of training samples is 360
Number of testing samples is 40
admit gre gpa rank_1 rank_2 rank_3 rank_4
288 0 1.000 0.7875 0 0 0 1
210 0 0.925 0.8350 0 0 0 1
284 1 0.550 0.8475 0 1 0 0
43 0 0.625 0.8275 0 0 1 0
131 0 0.800 0.6975 0 1 0 0
120 1 0.650 0.9350 0 1 0 0
134 0 0.700 0.7375 0 1 0 0
341 1 0.700 0.6625 0 0 1 0
52 0 0.925 0.8425 0 0 0 1
169 0 0.750 0.9050 0 0 1 0
admit gre gpa rank_1 rank_2 rank_3 rank_4
33 1 1.000 1.0000 0 0 1 0
46 1 0.725 0.8650 0 1 0 0
57 0 0.475 0.7350 0 0 1 0
73 0 0.725 1.0000 0 1 0 0
75 0 0.900 1.0000 0 0 1 0
76 0 0.700 0.8400 0 0 1 0
78 0 0.675 0.7800 1 0 0 0
83 0 0.475 0.7275 0 0 0 1
85 0 0.650 0.7450 0 1 0 0
98 0 0.875 0.7200 0 1 0 0
###Markdown
Splitting the data into features and targets (labels)Now, as a final step before the training, we'll split the data into features (X) and targets (y).
###Code
features = train_data.drop('admit', axis=1)
targets = train_data['admit']
features_test = test_data.drop('admit', axis=1)
targets_test = test_data['admit']
print(features[:10])
print(targets[:10])
###Output
gre gpa rank_1 rank_2 rank_3 rank_4
288 1.000 0.7875 0 0 0 1
210 0.925 0.8350 0 0 0 1
284 0.550 0.8475 0 1 0 0
43 0.625 0.8275 0 0 1 0
131 0.800 0.6975 0 1 0 0
120 0.650 0.9350 0 1 0 0
134 0.700 0.7375 0 1 0 0
341 0.700 0.6625 0 0 1 0
52 0.925 0.8425 0 0 0 1
169 0.750 0.9050 0 0 1 0
288 0
210 0
284 1
43 0
131 0
120 1
134 0
341 1
52 0
169 0
Name: admit, dtype: int64
###Markdown
Training the 2-layer Neural NetworkThe following function trains the 2-layer neural network. First, we'll write some helper functions.
###Code
# Activation (sigmoid) function
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_prime(x):
return sigmoid(x) * (1-sigmoid(x))
def error_formula(y, output):
return - y*np.log(output) - (1 - y) * np.log(1-output)
###Output
_____no_output_____
###Markdown
TODO: Backpropagate the errorNow it's your turn to shine. Write the error term. Remember that this is given by the equation $$ -(y-\hat{y}) \sigma'(x) $$
###Code
# TODO: Write the error term formula
def error_term_formula(y, output):
return (y-output) * output * (1 - output)
# Neural Network hyperparameters
epochs = 1000
learnrate = 0.5
# Training function
def train_nn(features, targets, epochs, learnrate):
# Use to same seed to make debugging easier
np.random.seed(42)
n_records, n_features = features.shape
last_loss = None
# Initialize weights
weights = np.random.normal(scale=1 / n_features**.5, size=n_features)
for e in range(epochs):
del_w = np.zeros(weights.shape)
for x, y in zip(features.values, targets):
# Loop through all records, x is the input, y is the target
# Activation of the output unit
# Notice we multiply the inputs and the weights here
# rather than storing h as a separate variable
output = sigmoid(np.dot(x, weights))
# The error, the target minus the network output
error = error_formula(y, output)
# The error term
# Notice we calulate f'(h) here instead of defining a separate
# sigmoid_prime function. This just makes it faster because we
# can re-use the result of the sigmoid function stored in
# the output variable
error_term = error_term_formula(y, output)
# The gradient descent step, the error times the gradient times the inputs
del_w += error_term * x
# Update the weights here. The learning rate times the
# change in weights, divided by the number of records to average
weights += learnrate * del_w / n_records
# Printing out the mean square error on the training set
if e % (epochs / 10) == 0:
out = sigmoid(np.dot(features, weights))
loss = np.mean((out - targets) ** 2)
print("Epoch:", e)
if last_loss and last_loss < loss:
print("Train loss: ", loss, " WARNING - Loss Increasing")
else:
print("Train loss: ", loss)
last_loss = loss
print("=========")
print("Finished training!")
return weights
weights = train_nn(features, targets, epochs, learnrate)
###Output
Epoch: 0
Train loss: 0.27282108628535967
=========
Epoch: 100
Train loss: 0.20774719064515393
=========
Epoch: 200
Train loss: 0.2052850054237043
=========
Epoch: 300
Train loss: 0.20413961986727067
=========
Epoch: 400
Train loss: 0.20353673516905066
=========
Epoch: 500
Train loss: 0.2031664616025387
=========
Epoch: 600
Train loss: 0.20290303171578605
=========
Epoch: 700
Train loss: 0.2026928250562334
=========
Epoch: 800
Train loss: 0.20251149171942945
=========
Epoch: 900
Train loss: 0.2023472203188334
=========
Finished training!
###Markdown
Calculating the Accuracy on the Test Data
###Code
# Calculate accuracy on test data
tes_out = sigmoid(np.dot(features_test, weights))
predictions = tes_out > 0.5
accuracy = np.mean(predictions == targets_test)
print("Prediction accuracy: {:.3f}".format(accuracy))
###Output
Prediction accuracy: 0.675
###Markdown
Predicting Student Admissions with Neural NetworksIn this notebook, we predict student admissions to graduate school at UCLA based on three pieces of data:- GRE Scores (Test)- GPA Scores (Grades)- Class rank (1-4)The dataset originally came from here: http://www.ats.ucla.edu/ Loading the dataTo load the data and format it nicely, we will use two very useful packages called Pandas and Numpy. You can read on the documentation here:- https://pandas.pydata.org/pandas-docs/stable/- https://docs.scipy.org/
###Code
# Importing pandas and numpy
import pandas as pd
import numpy as np
# Reading the csv file into a pandas DataFrame
data = pd.read_csv('student_data.csv')
# Printing out the first 10 rows of our data
data[:10]
###Output
_____no_output_____
###Markdown
Plotting the dataFirst let's make a plot of our data to see how it looks. In order to have a 2D plot, let's ingore the rank.
###Code
# Importing matplotlib
import matplotlib.pyplot as plt
# Function to help us plot
def plot_points(data):
X = np.array(data[["gre","gpa"]])
y = np.array(data["admit"])
admitted = X[np.argwhere(y==1)]
rejected = X[np.argwhere(y==0)]
plt.scatter([s[0][0] for s in rejected], [s[0][1] for s in rejected], s = 25, color = 'red', edgecolor = 'k')
plt.scatter([s[0][0] for s in admitted], [s[0][1] for s in admitted], s = 25, color = 'cyan', edgecolor = 'k')
plt.xlabel('Test (GRE)')
plt.ylabel('Grades (GPA)')
# Plotting the points
plot_points(data)
plt.show()
###Output
_____no_output_____
###Markdown
Roughly, it looks like the students with high scores in the grades and test passed, while the ones with low scores didn't, but the data is not as nicely separable as we hoped it would. Maybe it would help to take the rank into account? Let's make 4 plots, each one for each rank.
###Code
# Separating the ranks
data_rank1 = data[data["rank"]==1]
data_rank2 = data[data["rank"]==2]
data_rank3 = data[data["rank"]==3]
data_rank4 = data[data["rank"]==4]
# Plotting the graphs
plot_points(data_rank1)
plt.title("Rank 1")
plt.show()
plot_points(data_rank2)
plt.title("Rank 2")
plt.show()
plot_points(data_rank3)
plt.title("Rank 3")
plt.show()
plot_points(data_rank4)
plt.title("Rank 4")
plt.show()
###Output
_____no_output_____
###Markdown
This looks more promising, as it seems that the lower the rank, the higher the acceptance rate. Let's use the rank as one of our inputs. In order to do this, we should one-hot encode it. TODO: One-hot encoding the rankUse the `get_dummies` function in numpy in order to one-hot encode the data.
###Code
# TODO: Make dummy variables for rank
one_hot_data = pd.concat([data, pd.get_dummies(data['rank'], prefix='rank')], axis=1)
# TODO: Drop the previous rank column
one_hot_data = one_hot_data.drop(['rank'], axis=1)
# Print the first 10 rows of our data
one_hot_data[:10]
###Output
_____no_output_____
###Markdown
TODO: Scaling the dataThe next step is to scale the data. We notice that the range for grades is 1.0-4.0, whereas the range for test scores is roughly 200-800, which is much larger. This means our data is skewed, and that makes it hard for a neural network to handle. Let's fit our two features into a range of 0-1, by dividing the grades by 4.0, and the test score by 800.
###Code
# Making a copy of our data
processed_data = one_hot_data[:]
# TODO: Scale the columns
processed_data['gpa'] /= 4.0
processed_data['gre'] /= 800
# Printing the first 10 rows of our procesed data
processed_data[:10]
###Output
_____no_output_____
###Markdown
Splitting the data into Training and Testing In order to test our algorithm, we'll split the data into a Training and a Testing set. The size of the testing set will be 10% of the total data.
###Code
sample = np.random.choice(processed_data.index, size=int(len(processed_data)*0.9), replace=False)
train_data, test_data = processed_data.iloc[sample], processed_data.drop(sample)
print("Number of training samples is", len(train_data))
print("Number of testing samples is", len(test_data))
print(train_data[:10])
print(test_data[:10])
###Output
Number of training samples is 360
Number of testing samples is 40
admit gre gpa rank_1 rank_2 rank_3 rank_4
246 0 0.850 0.8350 0 1 0 0
205 1 0.975 0.9500 0 0 1 0
283 0 0.650 0.7750 0 0 0 1
153 0 0.725 0.8650 0 0 1 0
348 0 0.500 0.8400 0 1 0 0
237 0 0.600 1.0000 0 1 0 0
269 0 0.625 0.7025 0 0 1 0
77 1 1.000 1.0000 0 0 1 0
161 0 0.800 0.8750 0 1 0 0
241 1 0.650 0.9525 1 0 0 0
admit gre gpa rank_1 rank_2 rank_3 rank_4
17 0 0.450 0.6400 0 0 1 0
27 1 0.650 0.9350 0 0 0 1
36 0 0.725 0.8125 1 0 0 0
37 0 0.650 0.7250 0 0 1 0
52 0 0.925 0.8425 0 0 0 1
59 0 0.750 0.7050 0 0 0 1
85 0 0.650 0.7450 0 1 0 0
94 1 0.825 0.8600 0 1 0 0
99 0 0.500 0.8275 0 0 1 0
116 1 0.550 0.8625 0 1 0 0
###Markdown
Splitting the data into features and targets (labels)Now, as a final step before the training, we'll split the data into features (X) and targets (y).
###Code
features = train_data.drop('admit', axis=1)
targets = train_data['admit']
features_test = test_data.drop('admit', axis=1)
targets_test = test_data['admit']
print(features[:10])
print(targets[:10])
###Output
gre gpa rank_1 rank_2 rank_3 rank_4
246 0.850 0.8350 0 1 0 0
205 0.975 0.9500 0 0 1 0
283 0.650 0.7750 0 0 0 1
153 0.725 0.8650 0 0 1 0
348 0.500 0.8400 0 1 0 0
237 0.600 1.0000 0 1 0 0
269 0.625 0.7025 0 0 1 0
77 1.000 1.0000 0 0 1 0
161 0.800 0.8750 0 1 0 0
241 0.650 0.9525 1 0 0 0
246 0
205 1
283 0
153 0
348 0
237 0
269 0
77 1
161 0
241 1
Name: admit, dtype: int64
###Markdown
Training the 2-layer Neural NetworkThe following function trains the 2-layer neural network. First, we'll write some helper functions.
###Code
# Activation (sigmoid) function
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_prime(x):
return sigmoid(x) * (1-sigmoid(x))
def error_formula(y, output):
return - y*np.log(output) - (1 - y) * np.log(1-output)
###Output
_____no_output_____
###Markdown
TODO: Backpropagate the errorNow it's your turn to shine. Write the error term. Remember that this is given by the equation $$ -(y-\hat{y}) \sigma'(x) $$
###Code
# TODO: Write the error term formula
def error_term_formula(y, output):
return (y-output) * output * (1 - output)
# Neural Network hyperparameters
epochs = 1000
learnrate = 0.5
# Training function
def train_nn(features, targets, epochs, learnrate):
# Use to same seed to make debugging easier
np.random.seed(42)
n_records, n_features = features.shape
last_loss = None
# Initialize weights
weights = np.random.normal(scale=1 / n_features**.5, size=n_features)
for e in range(epochs):
del_w = np.zeros(weights.shape)
for x, y in zip(features.values, targets):
# Loop through all records, x is the input, y is the target
# Activation of the output unit
# Notice we multiply the inputs and the weights here
# rather than storing h as a separate variable
output = sigmoid(np.dot(x, weights))
# The error, the target minus the network output
error = error_formula(y, output)
# The error term
# Notice we calulate f'(h) here instead of defining a separate
# sigmoid_prime function. This just makes it faster because we
# can re-use the result of the sigmoid function stored in
# the output variable
error_term = error_term_formula(y, output)
# The gradient descent step, the error times the gradient times the inputs
del_w += error_term * x
# Update the weights here. The learning rate times the
# change in weights, divided by the number of records to average
weights += learnrate * del_w / n_records
# Printing out the mean square error on the training set
if e % (epochs / 10) == 0:
out = sigmoid(np.dot(features, weights))
loss = np.mean((out - targets) ** 2)
print("Epoch:", e)
if last_loss and last_loss < loss:
print("Train loss: ", loss, " WARNING - Loss Increasing")
else:
print("Train loss: ", loss)
last_loss = loss
print("=========")
print("Finished training!")
return weights
weights = train_nn(features, targets, epochs, learnrate)
###Output
Epoch: 0
Train loss: 0.27325724871791185
=========
Epoch: 100
Train loss: 0.20681091736169238
=========
Epoch: 200
Train loss: 0.20383377225397695
=========
Epoch: 300
Train loss: 0.20250185920633698
=========
Epoch: 400
Train loss: 0.20181973983585305
=========
Epoch: 500
Train loss: 0.20140516675645617
=========
Epoch: 600
Train loss: 0.2011092169070191
=========
Epoch: 700
Train loss: 0.20087064956911774
=========
Epoch: 800
Train loss: 0.20066268831988188
=========
Epoch: 900
Train loss: 0.2004729025854554
=========
Finished training!
###Markdown
Calculating the Accuracy on the Test Data
###Code
# Calculate accuracy on test data
tes_out = sigmoid(np.dot(features_test, weights))
predictions = tes_out > 0.5
accuracy = np.mean(predictions == targets_test)
print("Prediction accuracy: {:.3f}".format(accuracy))
###Output
Prediction accuracy: 0.575
|
module3/part4.ipynb | ###Markdown
Module 3: Image Segmentation - Part 4
###Code
%matplotlib inline
import matplotlib
from plantcv import plantcv as pcv
from sklearn.cluster import MiniBatchKMeans
import numpy as np
matplotlib.rcParams["figure.max_open_warning"] = False
pcv.params.debug = "plot"
pcv.params.text_size = 30
pcv.params.text_thickness = 20
pcv.params.line_thickness = 10
pcv.__version__
###Output
_____no_output_____
###Markdown
Clustering (unsupervised machine learning)
###Code
# Open image file
###Output
_____no_output_____
###Markdown
mini-batch k-means
###Code
# Create a kmeans model with a defined number of clusters
# Get the shape of the color image
# Train the kmeans model with the linearized RGB values
# Predict labels for the training image
# Reshape the labels into an image
# Plot the colorized labels
# Create an empty binary mask
# Set the mask to white where the kmeans cluster label overlaps plants
# Plot the mask
# Open a new image
# Apply the trained model to the new image
# Predict labels for the training image
# Reshape the labels into an image
# Plot the colorized labels
# Create an empty binary mask
# Set the mask to white where the kmeans cluster label overlaps plants
# Plot the mask
###Output
_____no_output_____ |
Parameters_after_calibration_map.ipynb | ###Markdown
TSUM1
###Code
for years in [[2007,2009],[2010,2012],[2013,2015]][:]:
fig, ax = plt.subplots(nrows=1, ncols=1,figsize=(6,6))
# m=Basemap(rsphere=(6378137.00,6356752.31414),llcrnrlon=79, llcrnrlat=14.5, \
# urcrnrlon=146.5, urcrnrlat=53, projection='aea', \
# lat_1=25, lat_2=47, lon_0=105, ax = ax)
m = Basemap(rsphere=(6378137.00,6356752.31414),llcrnrlon=81, llcrnrlat=15, \
urcrnrlon=138, urcrnrlat=51.5, projection='aea', \
lat_1=25, lat_2=47, lon_0=105, ax = ax)
m.readshapefile('D:/hh/矢量边界/中国9段线边界','China',drawbounds=True,zorder=2,linewidth=0.2,color='#304ffe')
m.readshapefile('D:/hh/@进行中的工作/ESSD_Wheat_Multi-vars/分区标定/cropdata/%d-%d_pars'%(years[0],years[1]), \
'pars',drawbounds=True,zorder=2,linewidth=0.2,color='k')
cmap=plt.cm.jet#.reversed()
item='tsum1_L1'
norm=plt.Normalize(int(np.percentile(data_new.tsum1_L1,5)),int(np.percentile(data_new.tsum1_L1,95)))
parallels = np.arange(20.,90,15.)
m.drawparallels(parallels,labels=[1,0,0,0],fontsize=10, zorder=1, linewidth=0.2) # 绘制纬线
meridians = np.arange(70.,140.,15.)
m.drawmeridians(meridians,labels=[0,0,0,1],fontsize=10, zorder=1, linewidth=0.2) # 绘制经线
zips=zip(m.pars_info, m.pars)
patches = []
for info, shape in zips:
try:
color=cmap(norm(int(float(info['%s'%item]))))
# patches.append( Polygon(np.array(shape), True, color=color) )
patches.append( Circle(m(info['lon'],info['lat']),30000, color=color))
except:
pass
# pc = PatchCollection(patches, match_original=True, edgecolor=None, zorder=1)
pc = PatchCollection(patches, match_original=True, edgecolor=None, zorder=3)
ax.add_collection(pc)
# 比例尺
m.drawmapscale(89, 19.8, 117.5, 36, 1500, units='km',barstyle='fancy',format='%d',fontsize=8,zorder=4)
#指北针
offset = 0.01*(m.ymax-m.ymin)+0.01*(m.xmax-m.xmin)
x,y=m(72,46)
x=x+2*offset
y=y-offset
ax.fill([x,x+offset,x,x-offset,x],[y,y-3.5*offset,y-2.25*offset,y-3.5*offset,y],ec='k',fc='None',zorder=3)
ax.fill([x,x+offset,x,x],[y,y-3.5*offset,y-2.25*offset,y],ec='None',fc='w',zorder=3)
ax.fill([x,x,x-offset,x],[y,y-2.25*offset,y-3.5*offset,y],ec='None',fc='k',zorder=3)
ax.text(x,y+0.5*offset,'N',horizontalalignment='center',fontsize=10,zorder=3)
######局部小地图:九段线区域########
axins = zoomed_inset_axes(ax, 0.38, loc = 4, borderpad=0)
axins.set_xlim(108, 122.5)
axins.set_ylim(3, 25)
map2 = Basemap(rsphere=(6378137.00,6356752.31414),llcrnrlon = 108, llcrnrlat = 3, urcrnrlon = 122.5, \
urcrnrlat = 25,projection='aea', lat_1=25, lat_2=47, lon_0=105, ax = axins)
shpfile = 'D:/hh/矢量边界/中国9段线边界'
map2.readshapefile(shpfile, 'China',linewidth=0.2,color='#304ffe')
mark_inset(ax, axins, loc1=2, loc2=4, fc = "none", ec = "none")
#####################################
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
# sm.set_array(colvals)
cb=fig.colorbar(sm, ax=ax,fraction=0.032,extend='both')
font={'size':13}
# cb.set_label('Relative error',fontdict=font) #设置colorbar的标签字体及其大小
cb.set_label('%s'%'$\mathrm{℃·d}^{-1}$',fontdict=font) #设置colorbar的标签字体及其大小
plt.text(0.5,0.92,'TSUM1 (%d-%d)'%(years[0],years[1]), fontsize=14, transform=ax.transAxes, \
horizontalalignment='center')
fig.savefig('D:/hh/@进行中的工作/ESSD_Wheat_Multi-vars/分区标定/标定png/%d-%d_TSUM1.png'% \
(years[0],years[1]),dpi=600,bbox_inches='tight')
###Output
_____no_output_____
###Markdown
TSUM2
###Code
data_new.tsum2_L1.min(),data_new.tsum2_L1.max(),np.percentile(data_new.tsum2_L1,25),np.percentile(data_new.tsum2_L1,95)
for years in [[2007,2009],[2010,2012],[2013,2015]][:]:
fig, ax = plt.subplots(nrows=1, ncols=1,figsize=(6,6))
# m=Basemap(rsphere=(6378137.00,6356752.31414),llcrnrlon=79, llcrnrlat=14.5, \
# urcrnrlon=146.5, urcrnrlat=53, projection='aea', \
# lat_1=25, lat_2=47, lon_0=105, ax = ax)
m = Basemap(rsphere=(6378137.00,6356752.31414),llcrnrlon=81, llcrnrlat=15, \
urcrnrlon=138, urcrnrlat=51.5, projection='aea', \
lat_1=25, lat_2=47, lon_0=105, ax = ax)
m.readshapefile('D:/hh/矢量边界/中国9段线边界','China',drawbounds=True,zorder=2,linewidth=0.2,color='#304ffe')
m.readshapefile('D:/hh/@进行中的工作/ESSD_Wheat_Multi-vars/分区标定/cropdata/%d-%d_pars'%(years[0],years[1]), \
'pars',drawbounds=True,zorder=2,linewidth=0.2,color='k')
cmap=plt.cm.jet#.reversed()
item='tsum2_L1'
norm=plt.Normalize(int(np.percentile(data_new.tsum2_L1,5)),int(np.percentile(data_new.tsum2_L1,95)))
parallels = np.arange(20.,90,15.)
m.drawparallels(parallels,labels=[1,0,0,0],fontsize=10, zorder=1, linewidth=0.2) # 绘制纬线
meridians = np.arange(70.,140.,15.)
m.drawmeridians(meridians,labels=[0,0,0,1],fontsize=10, zorder=1, linewidth=0.2) # 绘制经线
zips=zip(m.pars_info, m.pars)
patches = []
#用点代替实际的polygon显示颜色
for info, shape in zips:
try:
color=cmap(norm(int(float(info['%s'%item]))))
# patches.append( Polygon(np.array(shape), True, color=color) )
patches.append( Circle(m(info['lon'],info['lat']),30000, color=color))
except:
pass
# pc = PatchCollection(patches, match_original=True, edgecolor=None, zorder=1)
pc = PatchCollection(patches, match_original=True, edgecolor=None, zorder=3)
ax.add_collection(pc)
# 比例尺
m.drawmapscale(89, 19.8, 117.5, 36, 1500, units='km',barstyle='fancy',format='%d',fontsize=8,zorder=4)
#指北针
offset = 0.01*(m.ymax-m.ymin)+0.01*(m.xmax-m.xmin)
x,y=m(72,46)
x=x+2*offset
y=y-offset
ax.fill([x,x+offset,x,x-offset,x],[y,y-3.5*offset,y-2.25*offset,y-3.5*offset,y],ec='k',fc='None',zorder=3)
ax.fill([x,x+offset,x,x],[y,y-3.5*offset,y-2.25*offset,y],ec='None',fc='w',zorder=3)
ax.fill([x,x,x-offset,x],[y,y-2.25*offset,y-3.5*offset,y],ec='None',fc='k',zorder=3)
ax.text(x,y+0.5*offset,'N',horizontalalignment='center',fontsize=10,zorder=3)
######局部小地图:九段线区域########
axins = zoomed_inset_axes(ax, 0.38, loc = 4, borderpad=0)
axins.set_xlim(108, 122.5)
axins.set_ylim(3, 25)
map2 = Basemap(rsphere=(6378137.00,6356752.31414),llcrnrlon = 108, llcrnrlat = 3, urcrnrlon = 122.5, \
urcrnrlat = 25,projection='aea', lat_1=25, lat_2=47, lon_0=105, ax = axins)
shpfile = 'D:/hh/矢量边界/中国9段线边界'
map2.readshapefile(shpfile, 'China',linewidth=0.2,color='#304ffe')
mark_inset(ax, axins, loc1=2, loc2=4, fc = "none", ec = "none")
#####################################
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
# sm.set_array(colvals)
cb=fig.colorbar(sm, ax=ax,fraction=0.032,extend='both')
font={'size':13}
# cb.set_label('Relative error',fontdict=font) #设置colorbar的标签字体及其大小
cb.set_label('%s'%'$\mathrm{℃·d}^{-1}$',fontdict=font) #设置colorbar的标签字体及其大小
plt.text(0.5,0.92,'TSUM2 (%d-%d)'%(years[0],years[1]), fontsize=14, transform=ax.transAxes, \
horizontalalignment='center')
fig.savefig('D:/hh/@进行中的工作/ESSD_Wheat_Multi-vars/分区标定/标定png/%d-%d_TSUM2.png'% \
(years[0],years[1]),dpi=600,bbox_inches='tight')
data_new.columns
###Output
_____no_output_____
###Markdown
实际上,在空跑模型时,是可以生成一个参数空间分布栅格的,按照那个思路生成出图或许更好些
###Code
%%writefile mp_test1.py
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 6 11:36:32 2021
新疆参数的差异在于TBASE叶片生长的最低温设为了-10
@author: Administrator
"""
# import gdal
import os
import time
import datetime
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
import numpy as np
import geopandas as gpd
import scipy.stats as ss
import csv
from multiprocessing import Pool #导入进程池
from osgeo import gdal
import matplotlib.colors as mcolors
from pcse.util import Afgen
from pcse.models import Wofost71_PP,Wofost71_WLP_FD
from pcse.base import ParameterProvider
from pcse.fileinput import YAMLAgroManagementReader, CABOFileReader
from pcse.fileinput import YAMLCropDataProvider,CABOWeatherDataProvider
from pcse.util import WOFOST71SiteDataProvider
from netCDF4 import Dataset,date2index
def mycallback(x):
csv_write.writerow(x)
def sayHi(num):
data_dir='D:\hh\@进行中的工作\ESSD_Wheat_Multi-vars\分区标定'
# data_dir='H:\hh\@进行中的工作\ESSD_Wheat_Multi-vars\分区标定'
x,y,lon,lat,df_v,par_lst,level2=num[0],num[1],num[2],num[3],num[4],num[5],num[6]
# sited,cropd,soild=par_lst
code=df_v['区站号']
if code in replace_code.keys():
code=replace_code[code]
p_file='cropdata/calibrated//%d-%d_%d_V2-3_L%d.txt'%(run_years[0],run_years[-1],code,level2)
if os.path.exists(p_file):
pars=np.loadtxt(p_file)
print('Calibrated crop pars %d-%d_%d_V2-3_L%d.txt 已读取'%(run_years[0],run_years[-1],code,level2))
else:
print('no calibrated crop pars %d-%d_%d_V2-3_L%d.txt'%(run_years[0],run_years[-1],code,level2))
pars=[829,699,0,0.0019,22,32,22,1,0.7,0.66]
soil_pars=[df_v['SMW'],df_v['SMFCF'],df_v['SM0'],df_v['CRAIRC']]
# tsum1,tsum2,SLATB, SPAN, AMAXTB, TMPFTB, TMNFTB, CVO, shift_dvs=pars
tsum1,tsum2,DTSM,SLATB, SPAN, AMAXTB, TMPFTB, TMNFTB, CVO, shift_dvs=pars
i_file='amgt/calibrated//%d-%d_%d_V2-3_L%d.txt'%(run_years[0],run_years[-1],code,level2)
if os.path.exists(i_file):
irragation=np.loadtxt(i_file)
print('Calibrated irragation 已读取')
else:
print('no calibrated irragation')
# irragation=[0.2,4]
return [x,y]+list(pars)+list(soil_pars)+list(irragation)
replace_code = {56768: 56571,
56651: 56571,
56598: 57508,
56778: 56571,
57517: 57411,
57625: 57508,
57432: 57326,
57512: 57411,
52876: 52983,
52868: 52983,
53914: 57002,
53817: 53930}
# run_years=[2001,2002,2003]
# run_years=[2004,2005,2006]
run_years=[2007,2008,2009]
# run_years=[2010,2011,2012]
# run_years=[2013,2014,2015]
# In[]
if __name__ == '__main__':
data_dir='D:\hh\@进行中的工作\ESSD_Wheat_Multi-vars\分区标定'
ds=gdal.Open('D:/hh/@进行中的工作/ESSD_Wheat_Multi-vars/分区标定/11省分区范围/tif_01dg_clip.tif')
data=ds.ReadAsArray()
gt=ds.GetGeoTransform()
gdf = gpd.read_file(data_dir+'/站点生育期整理_3期标定_join_yield_xie_and_soil_he_临县补齐.shp',encoding ='UTF-8')
ds_mask=gdal.Open('D:/hh/@进行中的工作/ESSD_Wheat_Multi-vars/Mask/mask01/union_mask_01deg.tif')
mask=ds_mask.ReadAsArray()
gt_mask=ds_mask.GetGeoTransform()
ds_mean_yield=gdal.Open('D:/hh/@进行中的工作/ESSD_Wheat_Multi-vars/分区标定/县域_mean_yield.tif')
mean_yield=ds_mean_yield.ReadAsArray()
gt_yield=ds_mean_yield.GetGeoTransform()
yield_class_dct=np.load('final_dct.npy',allow_pickle=True).item()
nums=[]
for x in range(ds.RasterXSize):
for y in range(ds.RasterYSize):
lon=gt[0] + x * gt[1]
lat=gt[3] + y * gt[5]
#用一个mask去掩膜,但分辨率不太一样,近似10倍
if lon<gt_mask[0] or lon>gt_mask[0]+ds_mask.RasterXSize*gt_mask[1] \
or lat>gt_mask[3] or lat<gt_mask[3]+ds_mask.RasterYSize*gt_mask[5]:
continue
xx=int((lon-gt_mask[0])/gt_mask[1])
yy=int((lat-gt_mask[3])/gt_mask[5])
xx=5 if xx<5 else xx
yy=5 if yy<5 else yy
data_mask=mask[yy-5:yy+5,xx-5:xx+5]
#判断是否在省界内
v=data[y,x]
if v==255 or np.sum(data_mask)==0 :#255是空,<20是新疆一带or v<180
continue
df_v=gdf.loc[v]
par_lst=[]
#判断yield_level
ix=int((lon-gt_yield[0])/gt_yield[1])
iy=int((lat-gt_yield[3])/gt_yield[5])
pac_yield=mean_yield[iy,ix]
code=int(df_v['区站号'])#最新的用了研究区裁剪,这里偏多,下一步用try筛选
if code in replace_code.keys():
code=replace_code[code]
try:
yields=yield_class_dct[code]
except:
continue
if len(yields)==1:
level=1
elif pac_yield>20000:#空值
level=2
else:
if pac_yield<yields[1]:
level=1
elif pac_yield >yields[3]:
level=3
else:
level=2
nums.append([x,y,lon,lat,df_v,par_lst,level])
# nums=np.load('D:/hh/@进行中的工作/ESSD_Wheat_Multi-vars/分区标定/空跑/nums.npy')
e1 = datetime.datetime.now()
# with open('D:/hh/@进行中的工作/ESSD_Wheat_Multi-vars/分区标定/空跑/TEST%d-%d.csv'%(run_years[0],run_years[-1]), 'w') as csv_file:
csv_file=open('D:/hh/@进行中的工作/ESSD_Wheat_Multi-vars/分区标定/参数空间化结果/%d-%d_V2-3.csv'%(run_years[0],run_years[-1]), 'w')
csv_write = csv.writer(csv_file)
p = Pool(10)
count=1
for i in nums:
# p.apply_async(sayHi, (i,),callback=mycallback)
last=p.apply_async(func=sayHi, args=(i,),callback=mycallback)
count=count+1
# print(count,len(p._cache))
if len(p._cache) > 50000:
# print("waiting for cache to clear...")
last.wait()
p.close()
p.join()
p = Pool(10)
e2 = datetime.datetime.now()
print((e2-e1))
time.sleep( 60 )#似乎不这样会出现文件关闭还没运行完的情况
csv_file.close()
%run mp_test1.py
%run mp_test2.py
%run mp_test3.py
###Output
0:00:00.296907
###Markdown
读取参数生成tif
###Code
run_years=[2007,2008,2009]
# run_years=[2010,2011,2012]
# run_years=[2013,2014,2015]
df=pd.read_csv('./参数空间化结果/%d-%d_V2-3.csv'%(run_years[0],run_years[2]),header=None)
df.columns=['col','row','TSUM1','TSUM2','DTSM0','SLATB', 'SPAN', 'AMAXTB', 'TMPFTB', 'TMNFTB', 'CVO', \
'shift_dvs','SMW','SMFCF','SM0','CRAIRC', 'SMc_%d'%run_years[0], 'SMc_%d'%run_years[1], \
'SMc_%d'%run_years[2], 'irr_%d'%run_years[0], 'irr_%d'%run_years[1], 'irr_%d'%run_years[2]]
df
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 12 21:15:15 2021
空跑结果生成tif
@author: Administrator
"""
from osgeo import gdal
import glob
import numpy as np
import matplotlib.pyplot as plt
# for year in range(2014,2016,1):
# print(year)
g = gdal.Open('D:/hh/@进行中的工作/ESSD_Wheat_Multi-vars/分区标定/11省分区范围/tif_01dg_clip.tif')
geo_transform=g.GetGeoTransform()
rows=g.RasterYSize
cols=g.RasterXSize
items=['TSUM1','TSUM2','DTSM0','SLATB', 'SPAN', 'AMAXTB', 'TMPFTB', 'TMNFTB', 'CVO', \
'shift_dvs','SMW','SMFCF','SM0','CRAIRC', 'SMc_%d'%run_years[0], 'SMc_%d'%run_years[1], \
'SMc_%d'%run_years[2], 'irr_%d'%run_years[0], 'irr_%d'%run_years[1], 'irr_%d'%run_years[2]]
types=[int,int,float,float,int,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float]
type_dct=dict(zip(items,types))
for item in ['TSUM1','TSUM2','DTSM0','SLATB', 'SPAN', 'AMAXTB', 'TMPFTB', 'TMNFTB', 'CVO', \
'shift_dvs','SMW','SMFCF','SM0','CRAIRC', 'SMc_%d'%run_years[0], 'SMc_%d'%run_years[1], \
'SMc_%d'%run_years[2], 'irr_%d'%run_years[0], 'irr_%d'%run_years[1], 'irr_%d'%run_years[2]]:
image = np.zeros((cols, rows), type_dct[item])*0.0
for col,row,value in zip (df.col,df.row,df[item]):
if value:
image[col, row] = value #这里不需要再减1,其他代码也要更改!!!
else:
image[col, row] = value+0.00001
image=np.ma.masked_where(image==0, image)
# plt.imshow(image.transpose(), interpolation='nearest', vmin=500, vmax=900, cmap=plt.cm.jet)
# plt.show()
driver = gdal.GetDriverByName ( "GTiff" ) # Get a handler to a driver
if type_dct[item] is int:
dataset_y = driver.Create ( 'D:/hh/@进行中的工作/ESSD_Wheat_Multi-vars/分区标定/参数空间化结果/%s_%d-%d_V2-3.tif'%(item,run_years[0],run_years[2]),cols, rows,1, gdal.GDT_Int16, options=['COMPRESS=LZW'] )
else:
dataset_y = driver.Create ( 'D:/hh/@进行中的工作/ESSD_Wheat_Multi-vars/分区标定/参数空间化结果/%s_%d-%d_V2-3.tif'%(item,run_years[0],run_years[2]),cols, rows,1, gdal.GDT_Float32, options=['COMPRESS=LZW'] )
dataset_y.SetGeoTransform ( geo_transform)
dataset_y.SetProjection ( g.GetProjectionRef() )
dataset_y.GetRasterBand(1).SetNoDataValue(0)
dataset_y.GetRasterBand(1).WriteArray(image.T)
dataset_y.FlushCache()
del dataset_y
del g
###Output
_____no_output_____
###Markdown
出图(暂时只出了2007-2009年部分参数)
###Code
import glob
import imageio
import logging
import shapefile
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from pathlib import Path
from numpy import meshgrid
from numpy import linspace
from osgeo import gdal, osr
from matplotlib import gridspec
from matplotlib.patches import Polygon
from mpl_toolkits.basemap import Basemap
from matplotlib.patches import PathPatch
from matplotlib.pyplot import MultipleLocator
from matplotlib.collections import PatchCollection
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
fig = plt.figure(figsize=(6,7.9))
items=['TSUM1','TSUM2','DTSM0','SLATB', 'SPAN', 'AMAXTB', 'TMPFTB', 'TMNFTB', 'CVO', 'shift_dvs']
units=['$\mathrm{℃·d}^{-1}$','$\mathrm{℃·d}^{-1}$','℃','$ \mathrm{ha}$'+'$\cdot$'+'$ \mathrm{kg}^{-1}$', \
'd', '$ \mathrm{kg}$'+'$\cdot$'+'$ \mathrm{ha}^{-1}$'+'$\cdot$'+'$ \mathrm{h}^{-1}$', \
'℃', '℃', '$ \mathrm{kg}$'+'$\cdot$'+'$ \mathrm{kg}^{-1}$', '-']
n_colors=[6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6]
def appr(a):
for s in [0.00001,0.0001,0.001,0.01,0.1,1,10,100,1000,10000,100000]:
if abs(a)<s:
a=round(a/s*100)*s/100
break
return a,s/100
for i in range(1,11):
item=items[i-1]
unit=units[i-1]
if i==10:
ax = plt.subplot2grid((4,3), (3, 0))
else:
ax = fig.add_subplot(430+i)
ds=gdal.Open('./参数空间化结果/%s_2007-2009_V2-3.tif'%item)
data=ds.ReadAsArray()
gt=ds.GetGeoTransform() #分辨率严格为0.01°
# extent = (gt[0], gt[0] + ds.RasterXSize * gt[1],
# gt[3] + ds.RasterYSize * gt[5], gt[3])
m = Basemap(rsphere=(6378137.00,6356752.31414),llcrnrlon=82, llcrnrlat=23, \
urcrnrlon=126, urcrnrlat=47, projection='aea', resolution='l', \
lat_1=25, lat_2=53, lon_0=105, ax = ax)
m.fillcontinents(color='#e0e0e0',lake_color=None)
shpfile = 'D:/hh/矢量边界/中国9段线边界'
m.readshapefile(shpfile, 'China')
# #绘制经纬线
# parallels = np.arange(10.,90,5.)
# m.drawparallels(parallels,labels=[1,0,0,0],fontsize=9, zorder=2, linewidth=0.5) # 绘制纬线
# meridians = np.arange(70.,180.,10.)
# m.drawmeridians(meridians,labels=[0,0,1,0],fontsize=9, zorder=2, linewidth=0.5) # 绘制经线
colors = ['red','orangered',"darkorange", "gold", "#72de40", "#42971a",'#27590f']
nodes = [0.0, 0.15, 0.3, 0.45, 0.6, 0.8, 1.0]
cm = LinearSegmentedColormap.from_list("mycmap", list(zip(nodes, colors)))
new_data = np.ma.masked_where(data==0, data)
x = linspace(gt[0], gt[0] + ds.RasterXSize * gt[1], ds.RasterXSize)
y = linspace(gt[3], gt[3] + ds.RasterYSize * gt[5], ds.RasterYSize)
xx, yy = meshgrid(x, y)
temp=[a for a in data.flatten() if a]
q1,q2=np.percentile(temp,0.05),np.percentile(temp,99.95)#自行修改分位数比例
n_color=int((appr(q2)[0]-appr(q1)[0])*2/appr(q1)[1])+1
# print(appr(q2)[0],appr(q1)[0],n_color)
# cs=m.contourf(xx, yy,new_data,cmap=cm,latlon=True,zorder=3,extend='both')
cs=m.contourf(xx, yy,new_data,cmap=cm,levels=np.linspace(q1,q2,n_color),latlon=True,zorder=3,extend='both')
bar=fig.colorbar(cs,orientation='horizontal',fraction=0.05, pad=0.22)#fraction调整colorbar大小,pad间距
# bar.ax.tick_params(size=0,labelsize=1,labelcolor='white')#不显示ticks和labels
bar.ax.tick_params(size=2,labelsize=9,labelcolor='black')
if i==1:
ticks = bar.set_ticks([300,600,900])
if i==2:
ticks = bar.set_ticks([850,1150,1450])
if i==3:
ticks = bar.set_ticks([-13,-9,-5,-1])
if i==4:
ticks = bar.set_ticks([0.00192,0.00220,0.00248])
# bar.ax.ticklabel_format(style='sci')
if i==5:
ticks = bar.set_ticks([22,27,32,37])
if i==6:
ticks = bar.set_ticks([25,30,35,40])
if i==7:
ticks = bar.set_ticks([22,25,28])
if i==8:
ticks = bar.set_ticks([-18,-12,-6,0])
if i==9:
ticks = bar.set_ticks([0.56,0.64,0.72,0.8])
if i==10:
ticks = bar.set_ticks([0.7,0.80,0.9])
bar.ax.set_title(unit, loc='center', fontsize=9)
fig.text(0.09,0.87,'(a)')
fig.text(0.363,0.87,'(b)')
fig.text(0.6375,0.87,'(c)')
fig.text(0.09,0.67,'(d)')
fig.text(0.363,0.67,'(e)')
fig.text(0.6375,0.67,'(f)')
fig.text(0.09,0.475,'(g)')
fig.text(0.363,0.475,'(h)')
fig.text(0.6375,0.475,'(i)')
fig.text(0.09,0.28,'(j)')
# fig.text(0.4,0.25,'(a) TSUM1 (b) TSUM2 (c) ${DTSM}_{0}$')
# fig.text(0.4,0.22,'(d) SLATB (e) SPAN (f) AMAXTB')
# fig.text(0.4,0.19,'(g) TMPFTB (h) TMNFTB (i) FSTB')
# fig.text(0.4,0.16,'(j) FOTB')
fig.savefig('./参数空间化结果/2007-2009_V2-3.png',dpi=600,bbox_inches='tight')
###Output
_____no_output_____ |
08_pavement_crack_segmentation.ipynb | ###Markdown
Pavement Crack SegmentationThis is a custom model which I have trained on pavement crack images. This model is based on UNet architecture. Steps:1. This model already comes with this repository and it is in the tensorflow (.pb) format. 2. Use '02_model_conversion.ipynb' notebook to convert this into IR format with FP16 or FP32 precision. 3. If FP16 is used then we can try to use with either Intel GPU or NCS 2, otherwise FP32 is used with CPU. NOTE: OpenVINO for macOS supports inference on Intel CPU and NCS 2 only, not on Intel GPU.-> In summary, following are the details:* Model is trained on Tensorflow* Input size - 352 x 640* Channel format - RGB
###Code
%load_ext autoreload
%autoreload 2
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
from dataclasses import dataclass
%matplotlib inline
###Output
_____no_output_____
###Markdown
Define parameters
###Code
# Parameters
IMG_SIZE = (352, 640, 3)
IMG_FPATH = "./images/crack1.jpg"
MODEL_FPATH = "./models/custom_crack_detection/fp32_crack_detection.bin"
ARCH_FPATH = "./models/custom_crack_detection/fp32_crack_detection.xml"
#MODEL_FPATH = "./models/custom_crack_detection/crack_detection.pb" # TF original model
###Output
_____no_output_____
###Markdown
Model configuration for the input image
###Code
@dataclass
class ModelConfig:
scalefactor: float = 1.0
size: tuple = (224, 224)
mean: tuple = (0, 0, 0)
swapRB: bool = False
crop: bool = False
ddepth: int = cv2.CV_32F
configs = vars(ModelConfig(size=(IMG_SIZE[1], IMG_SIZE[0]), swapRB=True))
print(configs)
###Output
{'scalefactor': 1.0, 'size': (640, 352), 'mean': (0, 0, 0), 'swapRB': True, 'crop': False, 'ddepth': 5}
###Markdown
Load image, its mask and create a blob
###Code
# Load image and prepare a blob which is going to be input to the model
img = cv2.imread(IMG_FPATH)
if img is None:
raise FileNotFoundError(f'Image not found with the path provided: {IMG_FPATH}')
# Get its corresponding mask image (it is used for visual purpose only)
dir_path, fname = os.path.split(IMG_FPATH)
fname = fname.split(".")[0]
mask_fpath = os.path.join(dir_path, "mask_"+fname+".png")
# Load mask/Ground Truth image
mask_img = cv2.imread(mask_fpath)
if mask_img is None:
raise FileNotFoundError(f'Mask image not found with this path: {mask_fpath}')
# Prepare blob
blob = cv2.dnn.blobFromImage(img,
scalefactor=configs['scalefactor'],
size=configs['size'],
mean=configs['mean'],
swapRB=configs['swapRB'],
crop=configs['crop'],
ddepth=configs['ddepth'])
print(blob.shape) # Blob returns in NCHW format
###Output
(1, 3, 352, 640)
###Markdown
Load model and run inference
###Code
# Load model
net = cv2.dnn.readNet(MODEL_FPATH, ARCH_FPATH)
#net = cv2.dnn.readNetFromTensorflow(MODEL_FPATH)
# Specify target device
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_INFERENCE_ENGINE)
### NOTE: for DNN_TARGET_OPENCL_FP16 (i.e., Intel GPU) it will through error since
# OpenVINO does not support Intel GPU on macOS.
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU) # DNN_TARGET_OPENCL_FP16
# NOTE: input and output nodes names should be taken from the TF model!!!
net.setInput(blob, name='input_image')
pr_mask = net.forward(outputName='final_conv/Sigmoid')
pr_mask = pr_mask.round() # in the format NCHW
###Output
_____no_output_____
###Markdown
Display segmentation results
###Code
# helper functions for image, masks and segmentation map visualizations
def denormalize(x):
"""Scale image to range 0..1 for correct plot."""
x_max = np.percentile(x, 98)
x_min = np.percentile(x, 2)
x = (x - x_min) / (x_max - x_min)
x = x.clip(0, 1)
return x
def visualize(**images):
"""Plot images in a single row."""
num_imgs = len(images)
plt.figure(figsize=(16, 5))
for i, (name, image) in enumerate(images.items()):
plt.subplot(1, num_imgs, i+1)
plt.xticks([])
plt.yticks([])
plt.title(" ".join(name.split("_")).title())
plt.imshow(image)
plt.show()
# show the results
visualize(image=denormalize(img.squeeze()),
gt_mask=mask_img,
pr_mask=pr_mask[0, ...].squeeze())
###Output
_____no_output_____ |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.