path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
Deep-Learning-Notebooks/notebooks/Mozilla_TTS_WaveRNN.ipynb | ###Markdown
Text-to-Speech with Mozilla Tacotron+WaveRNNThis is an English female voice TTS demo using open source projects [mozilla/TTS](https://github.com/mozilla/TTS/) and [erogol/WaveRNN](https://github.com/erogol/WaveRNN).For other deep-learning Colab notebooks, visit [tugstugi/dl-colab-notebooks](https://github.com/tugstugi/dl-colab-notebooks). Install Mozilla TTS and WaveRNN
###Code
import os
import time
from os.path import exists, join, basename, splitext
git_repo_url = 'https://github.com/mozilla/TTS.git'
project_name = splitext(basename(git_repo_url))[0]
if not exists(project_name):
!git clone -q {git_repo_url}
!cd {project_name} && git checkout Tacotron2-iter-260K-824c091
!pip install -q gdown lws librosa Unidecode==0.4.20 tensorboardX git+git://github.com/bootphon/phonemizer@master localimport
!apt-get install -y espeak
git_repo_url = 'https://github.com/erogol/WaveRNN.git'
project_name = splitext(basename(git_repo_url))[0]
if not exists(project_name):
!git clone -q {git_repo_url}
!cd {project_name} && git checkout 8a1c152 && pip install -q -r requirements.txt
import sys
sys.path.append('TTS')
sys.path.append('WaveRNN')
from localimport import localimport
from IPython.display import Audio, display
###Output
_____no_output_____
###Markdown
Download pretrained models
###Code
# WaveRNN
!mkdir -p wavernn_models tts_models
wavernn_pretrained_model = 'wavernn_models/checkpoint_433000.pth.tar'
if not exists(wavernn_pretrained_model):
!gdown -O {wavernn_pretrained_model} https://drive.google.com/uc?id=12GRFk5mcTDXqAdO5mR81E-DpTk8v2YS9
wavernn_pretrained_model_config = 'wavernn_models/config.json'
if not exists(wavernn_pretrained_model_config):
!gdown -O {wavernn_pretrained_model_config} https://drive.google.com/uc?id=1kiAGjq83wM3POG736GoyWOOcqwXhBulv
# TTS
tts_pretrained_model = 'tts_models/checkpoint_261000.pth.tar'
if not exists(tts_pretrained_model):
!gdown -O {tts_pretrained_model} https://drive.google.com/uc?id=1otOqpixEsHf7SbOZIcttv3O7pG0EadDx
tts_pretrained_model_config = 'tts_models/config.json'
if not exists(tts_pretrained_model_config):
!gdown -O {tts_pretrained_model_config} https://drive.google.com/uc?id=1IJaGo0BdMQjbnCcOL4fPOieOEWMOsXE-
###Output
_____no_output_____
###Markdown
Initialize models
###Code
#
# this code is copied from: https://github.com/mozilla/TTS/blob/master/notebooks/Benchmark.ipynb
#
import io
import torch
import time
import numpy as np
from collections import OrderedDict
from matplotlib import pylab as plt
import IPython
%pylab inline
rcParams["figure.figsize"] = (16,5)
import librosa
import librosa.display
from TTS.models.tacotron import Tacotron
from TTS.layers import *
from TTS.utils.data import *
from TTS.utils.audio import AudioProcessor
from TTS.utils.generic_utils import load_config, setup_model
from TTS.utils.text import text_to_sequence
from TTS.utils.synthesis import synthesis
from TTS.utils.visual import visualize
def tts(model, text, CONFIG, use_cuda, ap, use_gl, speaker_id=None, figures=True):
t_1 = time.time()
waveform, alignment, mel_spec, mel_postnet_spec, stop_tokens = synthesis(model, text, CONFIG, use_cuda, ap, truncated=True, enable_eos_bos_chars=CONFIG.enable_eos_bos_chars)
if CONFIG.model == "Tacotron" and not use_gl:
mel_postnet_spec = ap.out_linear_to_mel(mel_postnet_spec.T).T
if not use_gl:
waveform = wavernn.generate(torch.FloatTensor(mel_postnet_spec.T).unsqueeze(0).cuda(), batched=batched_wavernn, target=11000, overlap=550)
print(" > Run-time: {}".format(time.time() - t_1))
if figures:
visualize(alignment, mel_postnet_spec, stop_tokens, text, ap.hop_length, CONFIG, mel_spec)
IPython.display.display(Audio(waveform, rate=CONFIG.audio['sample_rate']))
#os.makedirs(OUT_FOLDER, exist_ok=True)
#file_name = text.replace(" ", "_").replace(".","") + ".wav"
#out_path = os.path.join(OUT_FOLDER, file_name)
#ap.save_wav(waveform, out_path)
return alignment, mel_postnet_spec, stop_tokens, waveform
use_cuda = True
batched_wavernn = True
# initialize TTS
CONFIG = load_config(tts_pretrained_model_config)
from TTS.utils.text.symbols import symbols, phonemes
# load the model
num_chars = len(phonemes) if CONFIG.use_phonemes else len(symbols)
model = setup_model(num_chars, CONFIG)
# load the audio processor
ap = AudioProcessor(**CONFIG.audio)
# load model state
if use_cuda:
cp = torch.load(tts_pretrained_model)
else:
cp = torch.load(tts_pretrained_model, map_location=lambda storage, loc: storage)
# load the model
model.load_state_dict(cp['model'])
if use_cuda:
model.cuda()
model.eval()
print(cp['step'])
model.decoder.max_decoder_steps = 2000
# initialize WaveRNN
VOCODER_CONFIG = load_config(wavernn_pretrained_model_config)
with localimport('/content/WaveRNN') as _importer:
from models.wavernn import Model
bits = 10
wavernn = Model(
rnn_dims=512,
fc_dims=512,
mode="mold",
pad=2,
upsample_factors=VOCODER_CONFIG.upsample_factors, # set this depending on dataset
feat_dims=VOCODER_CONFIG.audio["num_mels"],
compute_dims=128,
res_out_dims=128,
res_blocks=10,
hop_length=ap.hop_length,
sample_rate=ap.sample_rate,
).cuda()
check = torch.load(wavernn_pretrained_model)
wavernn.load_state_dict(check['model'])
if use_cuda:
wavernn.cuda()
wavernn.eval()
print(check['step'])
###Output
_____no_output_____
###Markdown
Sentence to synthesize
###Code
SENTENCE = 'Bill got in the habit of asking himself “Is that thought true?” And if he wasn’t absolutely certain it was, he just let it go.'
###Output
_____no_output_____
###Markdown
Synthetize
###Code
align, spec, stop_tokens, wav = tts(model, SENTENCE, CONFIG, use_cuda, ap, speaker_id=0, use_gl=False, figures=False)
###Output
216000/217800 -- batch_size: 18 -- gen_rate: 17.9 kHz -- x_realtime: 0.8 > Run-time: 20.5927996635437
|
Starter-Code/Recommendation-System-FM-KNN.ipynb | ###Markdown
IntroductionThis notebook outlines how to build a recommendation system using SageMaker's Factorization Machines (FM). The main goal is to showcase how to extend FM model to predict top "X" recommendations using SageMaker's KNN and Batch Transform.There are four parts to this notebook:1. Building a FM Model2. Repackaging FM Model to fit a KNN Model3. Building a KNN model4. Running Batch Transform for predicting top "X" items Part 1 - Building a FM Model using movie lens datasetJulien Simon has written a fantastic blog about how to build a FM model using SageMaker with detailed explanation. Please see the links below for more information. In this part, I utilized his code for the most part to have continutity for performing additional steps.Source - https://aws.amazon.com/blogs/machine-learning/build-a-movie-recommender-with-factorization-machines-on-amazon-sagemaker/
###Code
import sagemaker
import sagemaker.amazon.common as smac
from sagemaker import get_execution_role
from sagemaker.predictor import json_deserializer
from sagemaker.amazon.amazon_estimator import get_image_uri
import numpy as np
from scipy.sparse import lil_matrix
import pandas as pd
import boto3, io, os
###Output
_____no_output_____
###Markdown
Download movie rating data from movie lens
###Code
#download data
!wget http://files.grouplens.org/datasets/movielens/ml-100k.zip
!unzip -o ml-100k.zip
###Output
--2019-01-06 19:26:30-- http://files.grouplens.org/datasets/movielens/ml-100k.zip
Resolving files.grouplens.org (files.grouplens.org)... 128.101.34.235
Connecting to files.grouplens.org (files.grouplens.org)|128.101.34.235|:80... connected.
HTTP request sent, awaiting response... 200 OK
Length: 4924029 (4.7M) [application/zip]
Saving to: ‘ml-100k.zip.1’
ml-100k.zip.1 100%[===================>] 4.70M 12.9MB/s in 0.4s
2019-01-06 19:26:31 (12.9 MB/s) - ‘ml-100k.zip.1’ saved [4924029/4924029]
Archive: ml-100k.zip
inflating: ml-100k/allbut.pl
inflating: ml-100k/mku.sh
inflating: ml-100k/README
inflating: ml-100k/u.data
inflating: ml-100k/u.genre
inflating: ml-100k/u.info
inflating: ml-100k/u.item
inflating: ml-100k/u.occupation
inflating: ml-100k/u.user
inflating: ml-100k/u1.base
inflating: ml-100k/u1.test
inflating: ml-100k/u2.base
inflating: ml-100k/u2.test
inflating: ml-100k/u3.base
inflating: ml-100k/u3.test
inflating: ml-100k/u4.base
inflating: ml-100k/u4.test
inflating: ml-100k/u5.base
inflating: ml-100k/u5.test
inflating: ml-100k/ua.base
inflating: ml-100k/ua.test
inflating: ml-100k/ub.base
inflating: ml-100k/ub.test
###Markdown
Shuffle the data
###Code
%cd ml-100k
!shuf ua.base -o ua.base.shuffled
###Output
/home/ec2-user/SageMaker/AmazonSageMaker-rama-ml/fm-based-recommendation-system/ml-100k
###Markdown
Load Training Data
###Code
user_movie_ratings_train = pd.read_csv('ua.base.shuffled', sep='\t', index_col=False,
names=['user_id' , 'movie_id' , 'rating'])
user_movie_ratings_train.head(5)
###Output
_____no_output_____
###Markdown
Load Test Data
###Code
user_movie_ratings_test = pd.read_csv('ua.test', sep='\t', index_col=False,
names=['user_id' , 'movie_id' , 'rating'])
user_movie_ratings_test.head(5)
nb_users= user_movie_ratings_train['user_id'].max()
nb_movies=user_movie_ratings_train['movie_id'].max()
nb_features=nb_users+nb_movies
nb_ratings_test=len(user_movie_ratings_test.index)
nb_ratings_train=len(user_movie_ratings_train.index)
print " # of users: ", nb_users
print " # of movies: ", nb_movies
print " Training Count: ", nb_ratings_train
print " Test Count: ", nb_ratings_test
print " Features (# of users + # of movies): ", nb_features
###Output
# of users: 943
# of movies: 1682
Training Count: 90570
Test Count: 9430
Features (# of users + # of movies): 2625
###Markdown
FM InputInput to FM is a one-hot encoded sparse matrix. Only ratings 4 and above are considered for the model. We will be ignoring ratings 3 and below.
###Code
def loadDataset(df, lines, columns):
# Features are one-hot encoded in a sparse matrix
X = lil_matrix((lines, columns)).astype('float32')
# Labels are stored in a vector
Y = []
line=0
for index, row in df.iterrows():
X[line,row['user_id']-1] = 1
X[line, nb_users+(row['movie_id']-1)] = 1
if int(row['rating']) >= 4:
Y.append(1)
else:
Y.append(0)
line=line+1
Y=np.array(Y).astype('float32')
return X,Y
X_train, Y_train = loadDataset(user_movie_ratings_train, nb_ratings_train, nb_features)
X_test, Y_test = loadDataset(user_movie_ratings_test, nb_ratings_test, nb_features)
print(X_train.shape)
print(Y_train.shape)
assert X_train.shape == (nb_ratings_train, nb_features)
assert Y_train.shape == (nb_ratings_train, )
zero_labels = np.count_nonzero(Y_train)
print("Training labels: %d zeros, %d ones" % (zero_labels, nb_ratings_train-zero_labels))
print(X_test.shape)
print(Y_test.shape)
assert X_test.shape == (nb_ratings_test, nb_features)
assert Y_test.shape == (nb_ratings_test, )
zero_labels = np.count_nonzero(Y_test)
print("Test labels: %d zeros, %d ones" % (zero_labels, nb_ratings_test-zero_labels))
###Output
(90570, 2625)
(90570,)
Training labels: 49906 zeros, 40664 ones
(9430, 2625)
(9430,)
Test labels: 5469 zeros, 3961 ones
###Markdown
Convert to Protobuf format for saving to S3
###Code
#Change this value to your own bucket name
bucket = 'recommendation-system-12-06'
prefix = 'fm'
train_key = 'train.protobuf'
train_prefix = '{}/{}'.format(prefix, 'train')
test_key = 'test.protobuf'
test_prefix = '{}/{}'.format(prefix, 'test')
output_prefix = 's3://{}/{}/output'.format(bucket, prefix)
def writeDatasetToProtobuf(X, bucket, prefix, key, d_type, Y=None):
buf = io.BytesIO()
if d_type == "sparse":
smac.write_spmatrix_to_sparse_tensor(buf, X, labels=Y)
else:
smac.write_numpy_to_dense_tensor(buf, X, labels=Y)
buf.seek(0)
obj = '{}/{}'.format(prefix, key)
boto3.resource('s3').Bucket(bucket).Object(obj).upload_fileobj(buf)
return 's3://{}/{}'.format(bucket,obj)
fm_train_data_path = writeDatasetToProtobuf(X_train, bucket, train_prefix, train_key, "sparse", Y_train)
fm_test_data_path = writeDatasetToProtobuf(X_test, bucket, test_prefix, test_key, "sparse", Y_test)
print "Training data S3 path: ",fm_train_data_path
print "Test data S3 path: ",fm_test_data_path
print "FM model output S3 path: {}".format(output_prefix)
###Output
Training data S3 path: s3://recommendation-system-12-06/fm/train/train.protobuf
Test data S3 path: s3://recommendation-system-12-06/fm/test/test.protobuf
FM model output S3 path: s3://recommendation-system-12-06/fm/output
###Markdown
Run training jobYou can play around with the hyper parameters until you are happy with the prediction. For this dataset and hyper parameters configuration, after 100 epochs, test accuracy was around 70% on average and the F1 score (a typical metric for a binary classifier) was around 0.74 (1 indicates a perfect classifier). Not great, but you can fine tune the model further.
###Code
instance_type='ml.m5.large'
fm = sagemaker.estimator.Estimator(get_image_uri(boto3.Session().region_name, "factorization-machines"),
get_execution_role(),
train_instance_count=1,
train_instance_type=instance_type,
output_path=output_prefix,
sagemaker_session=sagemaker.Session())
fm.set_hyperparameters(feature_dim=nb_features,
predictor_type='binary_classifier',
mini_batch_size=1000,
num_factors=64,
epochs=100)
fm.fit({'train': fm_train_data_path, 'test': fm_test_data_path})
###Output
INFO:sagemaker:Creating training-job with name: factorization-machines-2019-01-06-19-27-02-023
###Markdown
Part 2 - Repackaging Model data to fit a KNN ModelNow that we have the model created and stored in SageMaker, we can download the same and repackage it to fit a KNN model. Download model data
###Code
import mxnet as mx
model_file_name = "model.tar.gz"
model_full_path = fm.output_path +"/"+ fm.latest_training_job.job_name +"/output/"+model_file_name
print "Model Path: ", model_full_path
#Download FM model
%cd ..
os.system('aws s3 cp '+model_full_path+ './')
#Extract model file for loading to MXNet
os.system('tar xzvf '+model_file_name)
os.system("unzip -o model_algo-1")
os.system("mv symbol.json model-symbol.json")
os.system("mv params model-0000.params")
###Output
Model Path: s3://recommendation-system-12-06/fm/output/factorization-machines-2019-01-06-19-27-02-023/output/model.tar.gz
/home/ec2-user/SageMaker/AmazonSageMaker-rama-ml/fm-based-recommendation-system
###Markdown
Extract model data to create item and user latent matrixes
###Code
#Extract model data
m = mx.module.Module.load('./model', 0, False, label_names=['out_label'])
V = m._arg_params['v'].asnumpy()
w = m._arg_params['w1_weight'].asnumpy()
b = m._arg_params['w0_weight'].asnumpy()
# item latent matrix - concat(V[i], w[i]).
knn_item_matrix = np.concatenate((V[nb_users:], w[nb_users:]), axis=1)
knn_train_label = np.arange(1,nb_movies+1)
#user latent matrix - concat (V[u], 1)
ones = np.ones(nb_users).reshape((nb_users, 1))
knn_user_matrix = np.concatenate((V[:nb_users], ones), axis=1)
###Output
_____no_output_____
###Markdown
Part 3 - Building KNN ModelIn this section, we upload the model input data to S3, create a KNN model and save the same. Saving the model, will display the model in the model section of SageMaker. Also, it will aid in calling batch transform down the line or even deploying it as an end point for real-time inference.This approach uses the default 'index_type' parameter for knn. It is precise but can be slow for large datasets. In such cases, you may want to use a different 'index_type' parameter leading to an approximate, yet fast answer.
###Code
print('KNN train features shape = ', knn_item_matrix.shape)
knn_prefix = 'knn'
knn_output_prefix = 's3://{}/{}/output'.format(bucket, knn_prefix)
knn_train_data_path = writeDatasetToProtobuf(knn_item_matrix, bucket, knn_prefix, train_key, "dense", knn_train_label)
print('uploaded KNN train data: {}'.format(knn_train_data_path))
nb_recommendations = 100
# set up the estimator
knn = sagemaker.estimator.Estimator(get_image_uri(boto3.Session().region_name, "knn"),
get_execution_role(),
train_instance_count=1,
train_instance_type=instance_type,
output_path=knn_output_prefix,
sagemaker_session=sagemaker.Session())
knn.set_hyperparameters(feature_dim=knn_item_matrix.shape[1], k=nb_recommendations, index_metric="INNER_PRODUCT", predictor_type='classifier', sample_size=200000)
fit_input = {'train': knn_train_data_path}
knn.fit(fit_input)
knn_model_name = knn.latest_training_job.job_name
print "created model: ", knn_model_name
# save the model so that we can reference it in the next step during batch inference
sm = boto3.client(service_name='sagemaker')
primary_container = {
'Image': knn.image_name,
'ModelDataUrl': knn.model_data,
}
knn_model = sm.create_model(
ModelName = knn.latest_training_job.job_name,
ExecutionRoleArn = knn.role,
PrimaryContainer = primary_container)
print "saved the model"
###Output
('KNN train features shape = ', (1682, 65))
uploaded KNN train data: s3://recommendation-system-12-06/knn/train.protobuf
###Markdown
Part 4 - Batch TransformIn this section, we will use SageMaker's batch transform option to batch predict top X for all the users.
###Code
#upload inference data to S3
knn_batch_data_path = writeDatasetToProtobuf(knn_user_matrix, bucket, knn_prefix, train_key, "dense")
print "Batch inference data path: ",knn_batch_data_path
# Initialize the transformer object
transformer =sagemaker.transformer.Transformer(
base_transform_job_name="knn",
model_name=knn_model_name,
instance_count=1,
instance_type=instance_type,
output_path=knn_output_prefix,
accept="application/jsonlines; verbose=true"
)
# Start a transform job:
transformer.transform(knn_batch_data_path, content_type='application/x-recordio-protobuf')
transformer.wait()
#Download predictions
results_file_name = "inference_output"
inference_output_file = "knn/output/train.protobuf.out"
s3_client = boto3.client('s3')
s3_client.download_file(bucket, inference_output_file, results_file_name)
with open(results_file_name) as f:
results = f.readlines()
import json
test_user_idx = 89
u_one_json = json.loads(results[test_user_idx])
print "Recommended movie Ids for user #{} : {}".format(test_user_idx+1, [int(movie_id) for movie_id in u_one_json['labels']])
print
print "Movie distances for user #{} : {}".format(test_user_idx+1, [round(distance, 4) for distance in u_one_json['distances']])
###Output
Recommended movie Ids for user #90 : [268, 89, 23, 923, 655, 165, 423, 192, 509, 28, 87, 193, 69, 493, 176, 208, 269, 705, 482, 527, 180, 183, 966, 166, 173, 216, 211, 520, 489, 124, 96, 9, 83, 246, 168, 1, 659, 185, 194, 56, 251, 519, 196, 132, 190, 316, 197, 204, 510, 302, 210, 484, 435, 134, 496, 285, 641, 181, 478, 170, 654, 963, 136, 523, 187, 223, 275, 1142, 313, 100, 1039, 651, 22, 408, 480, 498, 515, 178, 603, 79, 272, 191, 199, 114, 169, 127, 172, 427, 657, 357, 511, 318, 12, 174, 513, 98, 479, 50, 483, 64]
Movie distances for user #90 : [2.5401, 2.5459, 2.5539, 2.5628, 2.5637, 2.5671, 2.5817, 2.583, 2.5832, 2.5901, 2.5923, 2.5979, 2.6317, 2.6386, 2.6395, 2.6731, 2.677, 2.6853, 2.6866, 2.6995, 2.6997, 2.7006, 2.7317, 2.7437, 2.7449, 2.7922, 2.8071, 2.8205, 2.8214, 2.8386, 2.839, 2.8512, 2.8675, 2.8778, 2.8779, 2.8942, 2.9031, 2.9307, 2.9561, 2.9647, 2.9713, 2.9755, 2.9921, 2.9982, 2.9991, 3.0015, 3.0291, 3.0665, 3.0714, 3.0882, 3.0998, 3.1148, 3.1159, 3.1234, 3.1323, 3.1589, 3.1631, 3.2076, 3.2336, 3.2404, 3.2407, 3.2466, 3.2897, 3.3141, 3.3157, 3.3169, 3.3232, 3.3453, 3.4406, 3.4503, 3.4703, 3.4873, 3.4876, 3.5185, 3.5436, 3.5827, 3.6038, 3.6049, 3.6562, 3.6751, 3.7038, 3.7189, 3.7386, 3.769, 3.8471, 4.2482, 4.2502, 4.263, 4.2964, 4.3168, 4.3831, 4.3927, 4.416, 4.4293, 4.4504, 4.6206, 4.7173, 4.8195, 4.8528, 5.2139]
|
content/posts/ut/ut3/pd/pd5/notebook.ipynb | ###Markdown
Parte 1: carga de datos y preparación Crear un archivo Python y agregar las dependencias necesarias
###Code
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sn
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
def plot_confusion_matrix(test_y, y_pred, printMode=True):
cm = confusion_matrix(test_y, y_pred)
if printMode:
print(cm)
sn.heatmap(cm, annot=True)
def load_data(path,printMode=True):
df = pd.read_csv(path, header=0)
if printMode:
print(df.values)
return df
###Output
_____no_output_____
###Markdown
Leer el dataset desde el archivo CSV utilizando la librería Pandas. Y ver como esta compuesto.
###Code
df = load_data("./sample.csv")
###Output
[[2.7810836 2.550537 0. ]
[1.46548937 2.36212508 0. ]
[3.39656169 4.40029353 0. ]
[1.38807019 1.85022032 0. ]
[3.06407232 3.00530597 0. ]
[2.2810836 2.950537 0. ]
[1.86548937 2.86212508 0. ]
[3.89656169 4.00029353 0. ]
[1.08807019 1.15022032 0. ]
[3.96407232 3.00530597 0. ]
[7.62753121 2.75926224 1. ]
[5.33244125 2.08862677 1. ]
[6.92259672 1.77106367 1. ]
[8.37541865 0.52068655 1. ]
[7.67375647 3.50856301 1. ]
[6.62753121 2.99262235 1. ]
[6.33244125 1.08862678 1. ]
[5.92259672 1.88106367 1. ]
[8.67541865 0.30206866 1. ]
[7.67375647 1.50856301 1. ]]
###Markdown
Graficar los datos utilizando la librería.
###Code
colors = ("orange", "blue")
plt.scatter(df['x'], df['y'], s=300, c=df['label'],
cmap=matplotlib.colors.ListedColormap(colors))
plt.show()
###Output
_____no_output_____
###Markdown
Obtener a partir del dataset los datos y las clases.
###Code
attributes = ['x', 'y']
labels = ['label']
X = df[attributes].values
y = df[labels].values
###Output
_____no_output_____
###Markdown
Parte 2: entrenamiento y testing Dividir el conjunto de datos en 2, uno para entrenamiento y otro para prueba.
###Code
train_X, test_X, train_y, test_y = train_test_split(X, y, test_size=0.25,
random_state=0, shuffle=True)
###Output
_____no_output_____
###Markdown
Crear el un modelo de LDA y entrenarlo.
###Code
lda = LinearDiscriminantAnalysis()
lda = lda.fit(train_X, train_y)
###Output
/usr/local/lib/python3.7/dist-packages/sklearn/utils/validation.py:760: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().
y = column_or_1d(y, warn=True)
###Markdown
Parte 3: evaluación Predecir las clases para los datos del conjunto de prueba y ver los resultados.
###Code
y_pred = lda.predict(test_X)
print("Predicted vs Expected")
print(y_pred)
print(test_y)
###Output
Predicted vs Expected
[1 0 1 0 1]
[[1]
[0]
[1]
[0]
[1]]
###Markdown
Probar el modelo y ver el reporte. Observar las columnas y que significan.
###Code
print(classification_report(test_y, y_pred, digits=3))
###Output
precision recall f1-score support
0 1.000 1.000 1.000 2
1 1.000 1.000 1.000 3
accuracy 1.000 5
macro avg 1.000 1.000 1.000 5
weighted avg 1.000 1.000 1.000 5
###Markdown
Ver la matriz de confusión y analizar los resultados.
###Code
plot_confusion_matrix(test_y, y_pred)
###Output
[[2 0]
[0 3]]
###Markdown
Parte 4 (opcional): Regresión Logística
###Code
lr = LogisticRegression()
lr = lr.fit(train_X, train_y)
y_pred = lr.predict(test_X)
print("Predicted vs Expected")
print(y_pred)
print(test_y)
print(classification_report(test_y, y_pred, digits=3))
plot_confusion_matrix(test_y, y_pred)
###Output
[[2 0]
[0 3]]
###Markdown
Ejercicio 2 - Dataset sport-training.csvRealizar nuevamente el ejercicio del trabajo de aplicación 6 para la clasificación de deportistas utilizando scikit-learn. En cada paso comparar los resultados con los obtenidos utilizando RapidMiner.
###Code
df = load_data("./sports_Training.csv")
###Output
[[15.1 3 2 ... 29 4 'Futbol']
[15.4 3 2 ... 18 8 'Rugby']
[13.6 5 5 ... 27 28 'Voleibol']
...
[17.0 5 1 ... 54 40 'Futbol']
[17.3 5 1 ... 65 29 'Basketball']
[15.5 1 1 ... 22 29 'Rugby']]
###Markdown
Eliminar filas cuyo valor para el atributo 'CapacidadDecision' estan fuera de los limites. Estose puede hacer de la siguiente forma utilizando la libreria Pandas
###Code
df = df[(df['CapacidadDecision'] >= 3) &
(df['CapacidadDecision'] <= 100)]
###Output
_____no_output_____
###Markdown
Transformar atributos en string a numeros
###Code
le = LabelEncoder()
y_encoded = le.fit_transform(y)
attributes = ['Edad', 'Fuerza', 'Velocidad', 'Lesiones', 'Vision', 'Resistencia',
'Agilidad', 'CapacidadDecision']
labels = ['DeportePrimario']
X = df[attributes].values
y = df[labels].values
print(df[labels].value_counts())
train_X, test_X, train_y, test_y = train_test_split(X, y, test_size=0.25,
random_state=0, shuffle=True)
lr = LinearDiscriminantAnalysis()
lr = lr.fit(train_X, train_y)
y_pred = lr.predict(test_X)
print("Predicted vs Expected")
print(y_pred)
print(test_y)
print(classification_report(test_y, y_pred, digits=3))
plot_confusion_matrix(test_y, y_pred)
###Output
[[ 3 7 12 6]
[ 1 26 6 5]
[ 3 6 15 3]
[ 2 13 8 5]]
###Markdown
Usar los datos del archivo sports_Scoring.csv para clasificar los nuevos individuos uilizando elmodelo entrenado anteriormente. Comparar los resultados con los obtenidos en el TA6.
###Code
df_test = load_data("./sports_Scoring.csv")
df_test = df_test[(df_test['CapacidadDecision'] >= 3) &
(df_test['CapacidadDecision'] <= 100)]
x_test = df_test[attributes].values
y_pred = lr.predict(x_test)
print("Predicted vs Expected")
print(y_pred)
print(df_test)
df_test["prediction(DeportePrimario)"] = y_pred
df_test.to_csv("output_python.csv")
output_rapidminer = load_data("./output_rapidminer.csv")
output_python = load_data("./output_python.csv")
print(output_python["prediction(DeportePrimario)"].head())
print(output_rapidminer["prediction(DeportePrimario)"].head())
print(len(output_python["prediction(DeportePrimario)"]))
print(len(output_rapidminer["prediction(DeportePrimario)"]))
comparison = output_python["prediction(DeportePrimario)"] == output_rapidminer["prediction(DeportePrimario)"]
print(comparison)
comparison.value_counts()
###Output
_____no_output_____ |
dissecting-neural-odes/image_classification/cifar_zero_aug.ipynb | ###Markdown
CIFAR Image Classification with 0-augmented Neural ODEs
###Code
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import pytorch_lightning as pl
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.metrics.functional import accuracy
from utils import get_cifar_dloaders, CIFARLearner
from torchdyn.models import *; from torchdyn import *
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
trainloader, testloader = get_cifar_dloaders(batch_size=64)
###Output
Files already downloaded and verified
###Markdown
Define the model
###Code
func = nn.Sequential(nn.GroupNorm(42, 42),
nn.Conv2d(42, 42, 3, padding=1, bias=False),
nn.Softplus(),
nn.Conv2d(42, 42, 3, padding=1, bias=False),
nn.Softplus(),
nn.GroupNorm(42, 42),
nn.Conv2d(42, 42, 1)
).to(device)
nde = NeuralDE(func,
solver='dopri5',
sensitivity='adjoint',
atol=1e-4,
rtol=1e-4,
s_span=torch.linspace(0, 1, 2)).to(device)
# NOTE: the first noop `Augmenter` is used only to keep the `nde` at index `2`. Used to extract NFEs in CIFARLearner.
model = nn.Sequential(Augmenter(1, 0), # does nothing
Augmenter(1, 39),
nde,
nn.Conv2d(42, 6, 1),
nn.AdaptiveAvgPool2d(4),
nn.Flatten(),
nn.Linear(6*16, 10)).to(device)
learn = CIFARLearner(model, trainloader, testloader)
trainer = pl.Trainer(max_epochs=20, gpus=1)
trainer.fit(learn)
###Output
_____no_output_____ |
Jour 4/resolution exercices.ipynb | ###Markdown
Résolution des équations du second dégré.
###Code
import math
def resoudre(**equation):
a = equation['a']
b = equation['b']
c = equation['c']
#calcul du discriminant
delta =( b*b) - (4*a*c)
if delta > 0 :
x1 = (-b + math.sqrt(delta))/(2*a)
x2 = (-b - math.sqrt(delta))/(2*a)
return (x1, x2)
elif delta == 0:
x12 = -b/(2*a)
return (x12,)
else :
print(f'Solution impossible dans R')
resoudre(c=1,b=2,a=1)
###Output
_____no_output_____ |
Courses/TensorFlow in Practice/Convolutional Neural Networks in TensorFlow/Week 4/Multiclass Classifications/Exercise_8_Answer.ipynb | ###Markdown
The data for this exercise is available at: https://www.kaggle.com/datamunge/sign-language-mnist/homeSign up and download to find 2 CSV files: sign_mnist_test.csv and sign_mnist_train.csv -- You will upload both of them using this button before you can continue.
###Code
uploaded=files.upload()
def get_data(filename):
with open(filename) as training_file:
csv_reader = csv.reader(training_file, delimiter=',')
first_line = True
temp_images = []
temp_labels = []
for row in csv_reader:
if first_line:
# print("Ignoring first line")
first_line = False
else:
temp_labels.append(row[0])
image_data = row[1:785]
image_data_as_array = np.array_split(image_data, 28)
temp_images.append(image_data_as_array)
images = np.array(temp_images).astype('float')
labels = np.array(temp_labels).astype('float')
return images, labels
training_images, training_labels = get_data('sign_mnist_train.csv')
testing_images, testing_labels = get_data('sign_mnist_test.csv')
print(training_images.shape)
print(training_labels.shape)
print(testing_images.shape)
print(testing_labels.shape)
training_images = np.expand_dims(training_images, axis=3)
testing_images = np.expand_dims(testing_images, axis=3)
train_datagen = ImageDataGenerator(
rescale=1. / 255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
validation_datagen = ImageDataGenerator(
rescale=1. / 255)
print(training_images.shape)
print(testing_images.shape)
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(64, (3, 3), activation='relu', input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dense(26, activation=tf.nn.softmax)])
model.compile(optimizer = tf.train.AdamOptimizer(),
loss = 'sparse_categorical_crossentropy',
metrics=['accuracy'])
history = model.fit_generator(train_datagen.flow(training_images, training_labels, batch_size=32),
steps_per_epoch=len(training_images) / 32,
epochs=15,
validation_data=validation_datagen.flow(testing_images, testing_labels, batch_size=32),
validation_steps=len(testing_images) / 32)
model.evaluate(testing_images, testing_labels)
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'r', label='Training accuracy')
plt.plot(epochs, val_acc, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'r', label='Training Loss')
plt.plot(epochs, val_loss, 'b', label='Validation Loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
###Output
_____no_output_____ |
Packt-Maths-Data-Scientist/CHAPTER 2 - Functions, equations, plots, and limit.ipynb | ###Markdown
HEADING 1: Algebraic, trigonometric, and transcendental functions Polynomial functionWe enocunter polynomial functions in everyday computations. For example, if we say that the cost (___C___) of an industrial product is proportional to the raw material (___M___) it consumes (plus a fixed cost ___F___), then we can write the total cost as,$$C = k.M+F\text{ where } k\text{ is the constant of proportionality}$$Here This cost function (and the type of the equation) is polynomial because it contains the HEADING 2: Python libraries for numerical computing and visualization `NumPy` library In the daily work of a data scientist, reading and manipulating arrays is one of the most important and frequently encountered jobs. These arrays could be a one-dimensional list or multi-dimensional table or matrix, full of numbers. An array could be filled with integers, floating point numbers, Booleans, strings, or even mixed type. However, in the majority of cases, numeric data types are predominant.In this regard, __NumPy arrays__ will be the most important object in Python that you need to know in depth. `NumPy` and `SciPy` are open-source libraries within the Python ecosystem that provide common mathematical and numerical routines in fast (and often pre-compiled) functions. One of the main objects of the `NumPy` module is to handle or create __single- or multi-dimensional arrays__. We will use this advanced data structure more extensively in the Part-III of this book when we discuss linear algebra. For this chapter, however, we will focus on the basic mathematical operations that can be performed using the NumPy library.
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
Create an array from a Python list
###Code
my_list = [2,4,5]
my_array = np.array(my_list)
print(my_array)
lst1 = [1,2,3]
array1 = np.array(lst1)
lst2 = [10,11,12]
array2 = np.array(lst2)
list_sum = lst1+lst2
array_sum = array1 + array2
print("Sum of two lists:",list_sum)
print("Sum of two numpy arrays:",array_sum)
###Output
Sum of two lists: [1, 2, 3, 10, 11, 12]
Sum of two numpy arrays: [11 13 15]
###Markdown
Basic mathematical operations using arrays
###Code
print("array1 multiplied by array2: ",array1*array2)
print("array2 divided by array1: ",array2/array1)
print("array2 raised to the power of array1: ",array2**array1)
###Output
array1 multiplied by array2: [10 22 36]
array2 divided by array1: [10. 5.5 4. ]
array2 raised to the power of array1: [ 10 121 1728]
###Markdown
More advanced mathematical operations on Numpy arrays
###Code
print ("Sine of 0.5:", np.sin(0.5))
print("Exponential of 2.2:", np.exp(2.2))
print("Natural logarithm of 5:", np.log(5))
print("10-base logarithm of 100:", np.log10(100))
print("Inverse cosine of 0.25:", np.arccos(0.25))
print("Hyperbolic sine of 2.5:", np.sinh(2.5))
###Output
Sine of 0.5: 0.479425538604203
Exponential of 2.2: 9.025013499434122
Natural logarithm of 5: 1.6094379124341003
10-base logarithm of 100: 2.0
Inverse cosine of 0.25: 1.318116071652818
Hyperbolic sine of 2.5: 6.0502044810397875
###Markdown
Proving a mathematical identity using NumPy operations
###Code
a = [i for i in range(1,21)]
arr = np.array(a)
x = [0.5]*20
x_arr = np.array(x)
log = ((-1)**(arr+1))*x_arr**arr/arr
print("Result using summation of the NumPy array:",np.sum(log))
print("Result using direct NumPy function:",np.log(1+0.5))
###Output
Result using direct NumPy function: 0.4054651081081644
###Markdown
Visualization using Matplotlib library Creating a basic plotTo create a simple one-dimensional plot we need some data. Let us first generate the data using a NumPy function as we learned in the previous section.
###Code
import matplotlib.pyplot as plt
x = np.arange(1,50.1,0.1)
print(x)
sinx = np.sin(x)
plt.plot(sinx)
###Output
_____no_output_____
###Markdown
Advanced features of the plotting function
###Code
plt.figure(figsize=(10,5))
plt.title("Plot of sin(x) vs. x\n",fontsize=20)
plt.xlabel("x values",fontsize=16)
plt.ylabel("Trigonometric function, sin(x)",fontsize=16)
plt.grid (True)
plt.ylim(-1.5,1.5)
plt.xticks([i*5 for i in range(55)],fontsize=15)
plt.yticks(fontsize=15)
plt.scatter(x=x,y=sinx,c='orange',s=50)
plt.text(x=25,y=1.1,s="A vertical blue dashed line \nis drawn at x=21",fontsize=15)
plt.vlines(x=21,ymin=-1.5,ymax=1.5,linestyles='dashed',color='blue',lw=3)
plt.legend(['Plot of sin(x)'],loc=2,fontsize=14)
plt.show()
###Output
_____no_output_____
###Markdown
Plots of few common functions
###Code
x = np.arange(0,10,0.1)
def plotx(x,y, title):
plt.figure(figsize=(6,4))
plt.title (title, fontsize=20)
plt.plot(x,y,lw=3,c='blue')
plt.grid(True)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.xlabel("x-values",fontsize=15)
plt.ylabel("Function values",fontsize=15)
plt.show()
y = x
title = "Linear function"
plotx(x,y,title)
y = x**2
title = "Quadratic function"
plotx(x,y,title)
y = np.exp(x)
title = "Exponential function"
plotx(x,y,title)
y = np.log(x)
title = "Natural logarithm function"
plotx(x,y,title)
y = np.cos(x)
title = "Cosine function"
plotx(x,y,title)
y = np.exp(-0.3*x)*np.sin(2*x)
title = "Exponentially decayed sine function"
plotx(x,y,title)
###Output
_____no_output_____
###Markdown
Quadratic equation solve
###Code
def solve_quad(a=1,b=2,c=1):
"""
Solves a quadratic equation and returns the roots (real or complex) as a tuple
"""
from math import sqrt
z= b**2-4*a*c
if z >= 0:
x1 = (-b+sqrt(z))/(2*a)
x2 = (-b-sqrt(z))/(2*a)
return (x1,x2)
else:
x1 = complex((-b/2*a),sqrt(-z)/(2*a))
x2 = complex((-b/2*a),-sqrt(-z)/(2*a))
return (x1,x2)
solve_quad(1,-2,1)
solve_quad(2,-5,4)
###Output
_____no_output_____
###Markdown
Solving $$2x^2-7x+4=0$$
###Code
x1,x2=solve_quad(2,-7,4)
x1
x2
###Output
_____no_output_____
###Markdown
Product of the roots
###Code
x1*x2
###Output
_____no_output_____
###Markdown
Sum of the roots
###Code
x1+x2
###Output
_____no_output_____
###Markdown
Growth of functions
###Code
logx = []
linx = []
quadx = []
for x in range(1,21):
logx.append(15*np.log10(x))
linx.append(2*x-6)
quadx.append(x**2-100)
logx=np.array(logx)
linx=np.array(linx)
quadx=np.array(quadx)
plt.figure(figsize=(8,6))
plt.plot(logx,c='r')
plt.plot(linx)
plt.plot(quadx)
plt.grid(True)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.legend(['Logarithmic function','Linear function','Quadratic function'],fontsize=15)
plt.show()
###Output
_____no_output_____ |
tutorials/notebook/cx_site_chart_examples/heatmap_4.ipynb | ###Markdown
Example: CanvasXpress heatmap Chart No. 4This example page demonstrates how to, using the Python package, create a chart that matches the CanvasXpress online example located at:https://www.canvasxpress.org/examples/heatmap-4.htmlThis example is generated using the reproducible JSON obtained from the above page and the `canvasxpress.util.generator.generate_canvasxpress_code_from_json_file()` function.Everything required for the chart to render is included in the code below. Simply run the code block.
###Code
from canvasxpress.canvas import CanvasXpress
from canvasxpress.js.collection import CXEvents
from canvasxpress.render.jupyter import CXNoteBook
cx = CanvasXpress(
render_to="heatmap4",
data={
"z": {
"Type": [
"Pro",
"Tyr",
"Pho",
"Kin",
"Oth",
"Pro",
"Tyr",
"Pho",
"Kin",
"Oth",
"Pro",
"Tyr",
"Pho",
"Kin",
"Oth",
"Pro",
"Tyr",
"Pho",
"Kin",
"Oth",
"Pro",
"Tyr",
"Pho",
"Kin",
"Oth",
"Pro",
"Tyr",
"Pho",
"Kin",
"Oth",
"Pro",
"Tyr",
"Pho",
"Kin",
"Oth",
"Pro",
"Tyr",
"Pho",
"Kin",
"Oth"
],
"Sens": [
1,
2,
3,
4,
1,
2,
3,
4,
1,
2,
3,
4,
1,
2,
3,
4,
1,
2,
3,
4,
1,
2,
3,
4,
1,
2,
3,
4,
1,
2,
3,
4,
1,
2,
3,
4,
1,
2,
3,
4
]
},
"x": {
"Treatment": [
"Control",
"Control",
"Control",
"Control",
"Control",
"TreatmentA",
"TreatmentB",
"TreatmentA",
"TreatmentB",
"TreatmentA",
"TreatmentB",
"TreatmentA",
"TreatmentB",
"TreatmentA",
"TreatmentB",
"TreatmentA",
"TreatmentB",
"TreatmentA",
"TreatmentB",
"TreatmentA",
"TreatmentB",
"TreatmentA",
"TreatmentB",
"TreatmentA",
"TreatmentB"
],
"Site": [
"Site1",
"Site1",
"Site1",
"Site1",
"Site1",
"Site2",
"Site2",
"Site2",
"Site2",
"Site2",
"Site2",
"Site2",
"Site2",
"Site2",
"Site2",
"Site3",
"Site3",
"Site3",
"Site3",
"Site3",
"Site3",
"Site3",
"Site3",
"Site3",
"Site3"
],
"Dose-Type": [
"",
"",
"",
"",
"",
"Dose1",
"Dose1",
"Dose2",
"Dose2",
"Dose3",
"Dose3",
"Dose4",
"Dose4",
"Dose5",
"Dose5",
"Dose1",
"Dose1",
"Dose2",
"Dose2",
"Dose3",
"Dose3",
"Dose4",
"Dose4",
"Dose5",
"Dose5"
],
"Dose": [
0,
0,
0,
0,
0,
5,
5,
10,
10,
15,
15,
20,
20,
25,
25,
5,
5,
10,
10,
15,
15,
20,
20,
25,
25
]
},
"y": {
"smps": [
"S1",
"S2",
"S3",
"S4",
"S5",
"S6",
"S7",
"S8",
"S9",
"S10",
"S11",
"S12",
"S13",
"S14",
"S15",
"S16",
"S17",
"S18",
"S19",
"S20",
"S21",
"S22",
"S23",
"S24",
"S25"
],
"vars": [
"V1",
"V2",
"V3",
"V4",
"V5",
"V6",
"V7",
"V8",
"V9",
"V10",
"V11",
"V12",
"V13",
"V14",
"V15",
"V16",
"V17",
"V18",
"V19",
"V20",
"V21",
"V22",
"V23",
"V24",
"V25",
"V26",
"V27",
"V28",
"V29",
"V30",
"V31",
"V32",
"V33",
"V34",
"V35",
"V36",
"V37",
"V38",
"V39",
"V40"
],
"data": [
[
0.784,
1.036,
-0.641,
1.606,
2.208,
3.879,
0.333,
2.265,
-1.55,
1.678,
-0.639,
-0.533,
-0.078,
0.433,
0.391,
1.013,
0.928,
0.812,
0.072,
3.564,
0.47,
1.836,
0.351,
3.139,
-2.207
],
[
0.222,
0.716,
0.993,
-0.913,
0.996,
1.235,
1.396,
1.817,
0.162,
1.137,
-0.126,
1.56,
1.003,
1.86,
0.43,
0.696,
0.777,
1.6,
0.175,
2.423,
0.044,
3.881,
-0.757,
1.486,
0.01
],
[
0.486,
2.15,
-0.069,
-0.468,
0.402,
0.725,
-1.697,
0.653,
0.101,
2.852,
-0.27,
0.414,
-0.789,
1.877,
1.555,
2.511,
0.07,
0.244,
-0.41,
2.345,
2.401,
-0.033,
0.951,
2.053,
0.725
],
[
-1.857,
0.698,
0.528,
1.024,
-0.035,
2.181,
-0.015,
3.68,
-1.13,
-0.842,
-1.759,
1.784,
-0.673,
0.147,
0.765,
1.585,
0.33,
1.481,
-0.362,
1.456,
-0.719,
0.961,
1.296,
2.375,
0.208
],
[
-1.19,
1.564,
-2.407,
0.642,
-0.51,
4.116,
-0.379,
0.786,
1.508,
3.119,
1.011,
1.54,
1.184,
1.821,
-0.217,
2.752,
0.083,
1.663,
0.568,
2.48,
-1.207,
1.222,
0.296,
1.055,
1.078
],
[
0.256,
1.214,
1.919,
0.577,
1.07,
1.53,
1.537,
3.063,
0.481,
2.332,
-1.466,
0.167,
0.428,
1.401,
-1.716,
3.524,
-0.822,
1.073,
-1.825,
3.923,
-0.542,
2.637,
-1.296,
0.759,
0.836
],
[
-0.443,
0.286,
0.379,
1.076,
0.478,
3.443,
-0.287,
1.206,
-1.275,
2.275,
1.101,
2.821,
-0.638,
0.922,
-0.205,
2.318,
0.494,
1.648,
-0.585,
1.963,
-0.636,
1.229,
0.998,
1.523,
-1.01
],
[
1.023,
-0.417,
0.865,
1.645,
0.324,
1.94,
0.122,
-0.171,
0.352,
1.42,
-0.436,
3.076,
0.434,
0.986,
-1.912,
3.899,
-0.212,
0.716,
0.782,
0.534,
1.939,
1.374,
-0.083,
2.318,
0.982
],
[
-2.33,
0.575,
-0.543,
-0.38,
-2.153,
1.717,
-1.219,
0.725,
0.273,
1.908,
0.488,
1.426,
0.108,
2.586,
0.653,
0.317,
0.112,
3.138,
0.212,
1.393,
-0.506,
1.87,
0.332,
1.893,
1.017
],
[
0.841,
0.146,
0.716,
-0.233,
-0.206,
0.237,
-0.307,
2.499,
-1.619,
1.957,
-0.12,
3.058,
0.511,
3.598,
0.286,
0.922,
0.164,
0.782,
-3.468,
0.262,
0.812,
0.798,
1.209,
2.964,
-1.47
],
[
-0.099,
1.666,
-1.635,
1.475,
-0.186,
0.781,
-1.919,
1.472,
-0.109,
1.588,
-0.379,
0.862,
-1.455,
2.386,
2.783,
0.98,
-0.136,
1.042,
0.532,
1.778,
0.463,
0.647,
0.92,
2.427,
-0.07
],
[
0.663,
-1.411,
-0.69,
-0.216,
-0.735,
1.878,
-0.073,
1.568,
-1.254,
3.792,
-0.345,
3.384,
0.206,
1.572,
0.134,
2.035,
-0.26,
2.42,
0.437,
2.164,
-0.063,
5.027,
-0.166,
3.878,
-1.313
],
[
-0.647,
-1.152,
3.437,
-0.3,
0.358,
1.766,
0.067,
0.149,
-1.005,
1.191,
-1.299,
1.326,
-2.378,
1.8,
-0.858,
2.019,
-1.357,
2.278,
-0.711,
2.196,
-0.243,
3.326,
-0.215,
2.25,
-0.504
],
[
-0.264,
-1.328,
1.228,
1.247,
0.692,
1.763,
-0.978,
2.781,
-0.058,
2.223,
0.796,
2.414,
-1.834,
3.203,
0.459,
2.914,
0.375,
3.309,
0.946,
0.943,
-1.365,
2.452,
0.474,
0.503,
0.025
],
[
0.253,
-0.529,
-0.429,
-1.111,
0.398,
2.332,
-1.334,
2.202,
-0.585,
1.885,
0.398,
1.788,
0.972,
2.025,
-0.835,
0.622,
0.001,
0.837,
-0.776,
2.257,
0.682,
1.304,
2.407,
4.038,
0.518
],
[
-0.876,
-1.41,
0.538,
-1.04,
-0.717,
-0.889,
3.129,
1.202,
3.398,
0.398,
3.857,
1.372,
4.813,
-1.311,
4.029,
-0.432,
3.01,
0.756,
4.688,
0.294,
4.61,
0.859,
4.498,
1.794,
3.319
],
[
-0.363,
0.042,
-0.253,
-0.076,
-1.27,
-0.904,
2.931,
-0.119,
2.669,
-0.165,
6.023,
-0.65,
2.031,
1.424,
2.844,
-1.019,
4.062,
-0.025,
2.637,
-0.317,
4.228,
-0.142,
3.013,
0.611,
3.74
],
[
-1.674,
-0.318,
-0.726,
-1.271,
1.753,
-1.678,
3.341,
-1.772,
3.814,
-1.391,
2.622,
0.677,
3.307,
-0.92,
3.545,
0.305,
2.808,
0.836,
4.532,
-0.378,
4.87,
-0.044,
4.061,
1.684,
5.486
],
[
-0.288,
0.165,
-0.468,
1.219,
-3.353,
-0.578,
3.414,
-0.674,
4.755,
0.033,
4.025,
0.44,
4.186,
1.136,
2.505,
0.436,
3.293,
-0.868,
4.746,
-0.545,
3.666,
-0.295,
3.206,
-0.966,
4.678
],
[
-0.558,
-0.855,
-1.114,
-0.623,
0.368,
-0.182,
4.37,
0.563,
3.75,
0.189,
2.717,
-1.708,
5.274,
0.741,
2.537,
-1.583,
2.832,
-1.515,
3.829,
0.358,
5.306,
0.388,
3.284,
0.661,
3.804
],
[
1.693,
-1.53,
0.057,
-0.217,
0.511,
0.309,
3.998,
0.925,
1.045,
0.379,
2.024,
0.331,
3.612,
0.151,
5.808,
-1.429,
3.402,
-0.297,
4.692,
-0.439,
4.521,
-0.816,
4.693,
0.323,
2.869
],
[
-0.234,
1.999,
-1.603,
-0.292,
-0.91,
-0.766,
6.167,
1.242,
4.219,
-1.291,
6.974,
-0.443,
4.039,
0.72,
3.808,
1.465,
2.86,
2.736,
4.675,
-0.554,
3.091,
0.057,
4.311,
-0.005,
2.605
],
[
0.529,
-1.721,
2.207,
-0.873,
-1.364,
1.139,
3.146,
1.277,
3.963,
-0.234,
4.581,
-1.266,
3.743,
-0.84,
3.682,
-0.566,
4.249,
0.599,
4.202,
0.125,
4.136,
-0.67,
3.433,
-0.954,
3.97
],
[
-0.529,
0.375,
0.204,
-0.529,
1.001,
0.244,
3.922,
-0.904,
3.479,
-0.926,
4.171,
-0.047,
2.158,
0.467,
2.277,
0.429,
3.903,
-1.013,
3.182,
0.73,
3.318,
-1.663,
4.222,
0.264,
3.538
],
[
2.302,
-0.218,
-1.184,
-0.644,
0.118,
-1.35,
4.497,
1.262,
5.131,
-1.095,
4.354,
-1.364,
4.376,
-0.936,
3.278,
0.753,
4.903,
-2.193,
3.336,
0.722,
3.92,
-1.341,
4.762,
1.756,
4.032
],
[
0.957,
1.309,
-1.317,
1.254,
-0.397,
0.004,
3.34,
1.233,
4.681,
-0.875,
2.497,
0.207,
1.703,
-0.614,
3.171,
-0.034,
2.59,
0.968,
3.576,
0.946,
3.85,
1.128,
4.015,
0.633,
3.148
],
[
-0.789,
-1.139,
0.066,
0.418,
0.366,
-0.932,
3.982,
0.151,
4.018,
0.74,
5.374,
0.067,
6.07,
1.178,
6.316,
1.948,
3.389,
0.383,
5.084,
-0.251,
3.874,
-0.715,
3.101,
-0.172,
4.867
],
[
-0.26,
-0.005,
-0.12,
-0.422,
0.629,
1.242,
3.954,
-0.027,
4.352,
-0.074,
4.369,
0.196,
4.847,
-0.763,
3.042,
-1.643,
3.952,
-1.358,
4.105,
-0.257,
4.168,
0.047,
1.782,
-0.585,
5.465
],
[
1.882,
0.869,
-1.305,
1.095,
1.002,
-0.897,
3.248,
1.113,
5.83,
0.298,
4.811,
-0.128,
3.263,
0.186,
4.244,
1.314,
2.832,
0.222,
3.899,
-1.279,
4.133,
-1.523,
4.49,
0.966,
4.658
],
[
-1.052,
0.429,
0.646,
0.642,
1.037,
-1.046,
1.724,
-0.698,
5.316,
-0.403,
2.821,
-0.108,
5.52,
-0.352,
3.298,
-0.716,
2.672,
1.499,
3.919,
0.202,
3.409,
0.841,
5.47,
1.225,
1.988
],
[
-1.862,
-0.589,
0.205,
1.281,
-1.256,
0.924,
4.189,
-1.219,
3.137,
0.142,
5.869,
0.529,
2.138,
-0.034,
3.921,
-1.097,
5.402,
1.468,
5.034,
0.088,
3.055,
1.587,
3.374,
0.377,
2.939
],
[
-0.315,
-0.369,
0.634,
0.495,
-1.059,
-0.481,
1.329,
1.105,
5.3,
0.135,
6.515,
0.001,
4.161,
1.686,
4.747,
-0.911,
3.24,
-1.461,
4.64,
0.698,
5.006,
-1.072,
4.608,
-0.317,
5.208
],
[
0.558,
0.793,
-1.713,
0.055,
2.242,
0.588,
3.785,
2.949,
2.175,
2.055,
3.328,
0.236,
3.549,
-0.009,
1.477,
0.538,
3.116,
-0.621,
5.203,
0.736,
3.606,
-0.313,
4.402,
-1.039,
3.894
],
[
-1.332,
-1.134,
0.153,
0.66,
1.764,
-0.588,
3.417,
-0.547,
3.849,
-1.521,
3.332,
0.88,
5.449,
0.179,
4.596,
0.626,
4.006,
0.33,
2.969,
-0.42,
2.606,
-0.485,
4.581,
-0.199,
5.008
],
[
0.29,
0.228,
0.117,
-0.587,
-2.116,
0.188,
4.009,
0.551,
3.127,
0.682,
3.858,
-1.053,
4.388,
-1.46,
1.468,
0.434,
4.221,
0.782,
2.992,
0.056,
5.223,
-0.747,
6.549,
-0.959,
3.714
],
[
-0.015,
-1.665,
1.007,
0.278,
-0.091,
1.919,
3.861,
-0.318,
3.026,
-1.642,
5.379,
2.097,
4.396,
0.802,
3.66,
0.544,
2.156,
0.87,
4.044,
0.3,
4.422,
-0.788,
4.677,
-0.215,
4.643
],
[
-0.984,
0.915,
0.944,
-1.975,
-1.717,
0.16,
4.748,
1.521,
4.091,
-0.386,
3.802,
-1.134,
5.701,
-0.402,
5.682,
-0.987,
4.281,
0.844,
3.427,
1.368,
3.358,
-1.748,
3.792,
2.125,
5.137
],
[
-0.399,
-0.613,
2.211,
0.238,
2.799,
0.687,
5.522,
0.534,
5.233,
-0.395,
4.387,
-1.733,
4.494,
-1.26,
4.693,
1.679,
4.477,
0.399,
2.508,
1.683,
3.185,
0.865,
4.958,
0.602,
4.371
],
[
1.205,
-0.562,
1.134,
0.202,
0.209,
0.692,
2.419,
0.397,
2.429,
0.911,
6.341,
0.713,
4.548,
-0.688,
3.947,
0.439,
4.69,
-0.324,
3.07,
0.265,
3.757,
-1.535,
5.434,
-0.017,
4.125
],
[
-0.298,
0.118,
1.653,
1.519,
-0.821,
-0.85,
4.602,
1.073,
5.087,
0.155,
6.987,
-0.716,
2.912,
0.581,
2.112,
-0.426,
3.523,
0.188,
4.548,
0.155,
4.256,
0.775,
2.607,
-0.697,
5.338
]
]
}
},
config={
"colorSpectrum": [
"magenta",
"blue",
"black",
"red",
"gold"
],
"graphType": "Heatmap",
"heatmapCellBox": False,
"samplesClustered": True,
"showSmpDendrogram": False,
"showVarDendrogram": False,
"title": "Cluster Heatmap Without Trees",
"variablesClustered": True
},
width=613,
height=613,
events=CXEvents(),
after_render=[],
other_init_params={
"version": 35,
"events": False,
"info": False,
"afterRenderInit": False,
"noValidate": True
}
)
display = CXNoteBook(cx)
display.render(output_file="heatmap_4.html")
###Output
_____no_output_____ |
pytorch/DeepLearningwithPyTorch_Code/DLwithPyTorch-master/Chapter03/Image Classification Dogs and Cats.ipynb | ###Markdown
Peep look into the downloaded data ``` chapter3/ dogsandcats/ train/ dog.183.jpg cat.2.jpg cat.17.jpg dog.186.jpg cat.27.jpg dog.193.jpg `````` chapter3/ dogsandcats/ train/ dog/ dog.183.jpg dog.186.jpg dog.193.jpg cat/ cat.17.jpg cat.2.jpg cat.27.jpg valid/ dog/ dog.173.jpg dog.156.jpg dog.123.jpg cat/ cat.172.jpg cat.20.jpg cat.21.jpg``` Create validation data set
###Code
path = '../chapter3/dogsandcats/'
files = glob(os.path.join(path,'*/*.jpg'))
print(f'Total no of images {len(files)}')
no_of_images = 25000
no_of_images = len(files)
no_of_images*0.8
shuffle = np.random.permutation(no_of_images)
os.mkdir(os.path.join(path,'valid'))
for t in ['train','valid']:
for folder in ['dog/','cat/']:
os.mkdir(os.path.join(path,t,folder))
for i in shuffle[:2000]:
#shutil.copyfile(files[i],'../chapter3/dogsandcats/valid/')
folder = files[i].split('/')[-1].split('.')[0]
image = files[i].split('/')[-1]
os.rename(files[i],os.path.join(path,'valid',folder,image))
for i in shuffle[2000:]:
#shutil.copyfile(files[i],'../chapter3/dogsandcats/valid/')
folder = files[i].split('/')[-1].split('.')[0]
image = files[i].split('/')[-1]
os.rename(files[i],os.path.join(path,'train',folder,image))
###Output
_____no_output_____
###Markdown
Check if GPU is present
###Code
if torch.cuda.is_available():
is_cuda = True
###Output
_____no_output_____
###Markdown
Load data into PyTorch tensors
###Code
simple_transform = transforms.Compose([transforms.Resize((224,224))
,transforms.ToTensor()
,transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
train = ImageFolder('dogsandcats/train/',simple_transform)
valid = ImageFolder('dogsandcats/valid/',simple_transform)
print(train.class_to_idx)
print(train.classes)
imshow(train[50][0])
###Output
_____no_output_____
###Markdown
Create data generators
###Code
train_data_gen = torch.utils.data.DataLoader(train,shuffle=True,batch_size=64,num_workers=3)
valid_data_gen = torch.utils.data.DataLoader(valid,batch_size=64,num_workers=3)
dataset_sizes = {'train':len(train_data_gen.dataset),'valid':len(valid_data_gen.dataset)}
dataloaders = {'train':train_data_gen,'valid':valid_data_gen}
###Output
_____no_output_____
###Markdown
Create a network
###Code
model_ft = models.resnet18(pretrained=True)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, 2)
if torch.cuda.is_available():
model_ft = model_ft.cuda()
model_ft
# Loss and Optimizer
learning_rate = 0.001
criterion = nn.CrossEntropyLoss()
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
def train_model(model, criterion, optimizer, scheduler, num_epochs=5):
since = time.time()
best_model_wts = model.state_dict()
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'valid']:
if phase == 'train':
scheduler.step()
model.train(True) # Set model to training mode
else:
model.train(False) # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for data in dataloaders[phase]:
# get the inputs
inputs, labels = data
# wrap them in Variable
if torch.cuda.is_available():
inputs = Variable(inputs.cuda())
labels = Variable(labels.cuda())
else:
inputs, labels = Variable(inputs), Variable(labels)
# zero the parameter gradients
optimizer.zero_grad()
# forward
outputs = model(inputs)
_, preds = torch.max(outputs.data, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.data[0]
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'valid' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = model.state_dict()
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=2)
###Output
Epoch 0/1
----------
train Loss: 0.0013 Acc: 0.9666
valid Loss: 0.0006 Acc: 0.9875
Epoch 1/1
----------
train Loss: 0.0005 Acc: 0.9887
valid Loss: 0.0005 Acc: 0.9890
Training complete in 1m 49s
Best val Acc: 0.989000
|
notebooks/02_Getz_EDA_First_Pass.ipynb | ###Markdown
Exploratory Data Analysis (EDA)conduct EDA on the Starcraft 2 Data to examine the relationship between variables and other trends in the data. Imports
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
###Output
_____no_output_____
###Markdown
Load the data
###Code
starcraft_loc = '../data/interimStarcraft_cleaned.csv'
#using index_col = 0 to drop the uncessary number column added by saving the data from the previous notebook.
starcraft = pd.read_csv(starcraft_loc,index_col = 0)
starcraft.info()
starcraft.head()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 3338 entries, 0 to 3339
Data columns (total 20 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 GameID 3338 non-null int64
1 LeagueIndex 3338 non-null int64
2 Age 3338 non-null int64
3 HoursPerWeek 3338 non-null int64
4 TotalHours 3338 non-null int64
5 APM 3338 non-null float64
6 SelectByHotkeys 3338 non-null float64
7 AssignToHotkeys 3338 non-null float64
8 UniqueHotkeys 3338 non-null int64
9 MinimapAttacks 3338 non-null float64
10 MinimapRightClicks 3338 non-null float64
11 NumberOfPACs 3338 non-null float64
12 GapBetweenPACs 3338 non-null float64
13 ActionLatency 3338 non-null float64
14 ActionsInPAC 3338 non-null float64
15 TotalMapExplored 3338 non-null int64
16 WorkersMade 3338 non-null float64
17 UniqueUnitsMade 3338 non-null int64
18 ComplexUnitsMade 3338 non-null float64
19 ComplexAbilitiesUsed 3338 non-null float64
dtypes: float64(12), int64(8)
memory usage: 547.6 KB
###Markdown
Data StoryGetting to high rank in Starcraft II is hard. So hard in fact that there is large amounts of content produced on the internet targeted towards helping you get "better" at the game. Yet "better" at the game is a pretty hard idea to quantify in a meaningful way. Given that your rank in Starcraft II is often used as a measure of your overall skill, we aren't particularly interested in some empirically "true" measure of being good at the game and are more interested in what particular game actions and habits are done more often by higher ranking players. Therefore we will be exploring the relationship between League Index and the other independant variables.
###Code
#Given that League Index is a categorical variable, not a continous one
#It is useful for us to find a continous variable with a high correlation to League Index for future predicitive modeling
starcraft.corr()
sns.heatmap(data=starcraft.corr())
###Output
_____no_output_____
###Markdown
At first glance, it appears that APM (actions per minute) and NumberOfPACs seem to be highly positively correlated to league index while ActionLatency and GapBetweenPACs seem to be highly negatively correlated. It may be interesting to plot these values with each other. A PAC is simply a way of measureing a shift in focus. In this case a single PAC is counted when the screen is shifted to a new area for a minimum amount of time and at least one action is performed.
###Code
colsOfInterest = ['APM','NumberOfPACs','ActionLatency','GapBetweenPACs']
for i in range(len(colsOfInterest)):
for j in colsOfInterest[i+1:]:
_ = sns.scatterplot(x=colsOfInterest[i],y=j,data=starcraft,hue='LeagueIndex')
_ = plt.legend()
_ = plt.xlabel(colsOfInterest[i])
_ = plt.ylabel(j)
_ = plt.title(j+ ' vs ' +colsOfInterest[i])
plt.show()
###Output
_____no_output_____
###Markdown
We find that Number of PACs and APM seem to have similar relationships with our other, negative predictors and that our correlation assesment from the above heatmap and table seems to hold true. Given that we have more information about PACs, for example information about the number of actions within a PAC in a given game, of the two high correlation predictors of league it makes the most sense to use NumberOfPACs as our continous variable when moving forward. Lets continue to explore how other factors may effect the number of PACs.
###Code
#We look at age and hours per week compared to Number of PACs
#We want to categorize data into low high numbers of hours per week so first we find summary statistics for the hour column
starcraft['HoursPerWeek'].describe()
#We are going to choose to split our hours around the median, so as to reduce the effect of outliers
starcraft['HoursType']=starcraft['HoursPerWeek'].apply(lambda x: 'High' if x>12.0 else 'Low')
starcraft.head()
_ = sns.scatterplot(y='NumberOfPACs',x='Age',hue='HoursType',data=starcraft,alpha=.5)
_ = plt.title('Number Of PACs vs age, grouped by Low or High weekly hours')
plt.show()
###Output
_____no_output_____
###Markdown
Given that yellow and blue dots are dispersed relatively evenly it doesn't seem as if hours per week has a huge impact on number of PACs. We do notice a relatively steep drop off in number of PACs after about 25 years of age. This is worth keeping an eye on, lets check to see if this is a meaningful factor or if it is mostly impacted by the number of responses we have from younger players.
###Code
#Get summary statistics for age
starcraft['Age'].describe()
#check the summary statistics for number of PACs for players above our 75%
old_craft = starcraft[starcraft['Age'] > 24]
old_craft['NumberOfPACs'].describe()
#compare to number of PACs summary stats of the whole data set
starcraft['NumberOfPACs'].describe()
###Output
_____no_output_____
###Markdown
We do find that while hours per week don't appear to have a consistent impact on number of PACs, as players become 25 years or older they do seem to have a reduced number of PACs.Lets explore how hotkey management might effect our Number of PACs stat. We have 3 hotkey related columns. We will also color by age to see if we continue to find a difference in age.
###Code
hotkey_vars = ['SelectByHotkeys','AssignToHotkeys','UniqueHotkeys']
for i in hotkey_vars:
_ = sns.scatterplot(x=i,y='NumberOfPACs',data=starcraft,hue='Age',alpha=.8)
_ = plt.legend()
_ = plt.title('Number of PACs vs ' + i)
plt.show()
###Output
_____no_output_____
###Markdown
In these figures, it does not appear as if age is a meaningful contributor, though we do see that they all have a slight positive relationship to PACs, AssignToHotkeys has the most noteable positive relationship.So far, we have found that Number of PACs appears to be a good predictor of League Index, and that APM has the largest positive correlation on Number of PACS followed by Assign To Hotkeys while Action Latency seemed to have the greatest negative impact on Number of PACs. Finally we will check these variables effects on League Index to confirm that Number of PACs is a good continous variable substitute for our categorical variable of interest in League Index.
###Code
_ = sns.boxplot(y='NumberOfPACs',x='LeagueIndex',data=starcraft)
plt.show()
_ = sns.boxplot(y='APM',x='LeagueIndex',data=starcraft)
plt.show()
_ = sns.boxplot(y='AssignToHotkeys',x='LeagueIndex',data=starcraft)
plt.show()
_ = sns.boxplot(y='ActionLatency',x='LeagueIndex',data=starcraft)
plt.show()
###Output
_____no_output_____
###Markdown
The above figures confirm what we suspected, those stats that have a meaningful impact on Number of PACs also have a similar impact on Leage Index. This seems to suggest that to increase in league rating, a player should be taking more actions and moving their camera around the map more frequently and assigning more units to hotkeys. However these gaps in stats appear to get more extreme beyond League Index 4, lets subset our data and see if there are higher correlations for other categories at lower ranks.
###Code
bad_craft = starcraft[starcraft['LeagueIndex'] <= 4]
bad_craft.shape
sns.heatmap(data=bad_craft.corr())
bad_craft.corr()
###Output
_____no_output_____
###Markdown
We can see that in general our correlations have moved towards 0, this is expected as we have less variation in league index. Due to this expected decrease, any categories with correlation numbers that increased are worth exploring deeper. In this case, total hours and complex units made. We will plot them as bar plots across all 7 league indexes below.
###Code
_ = sns.barplot(y='TotalHours',x='LeagueIndex',data=starcraft,ci=None)
plt.show()
_ = sns.barplot(y='ComplexUnitsMade',x='LeagueIndex',data=starcraft,ci=None)
plt.show()
###Output
_____no_output_____ |
code/EDA/1.EDA_Session.ipynb | ###Markdown
Session : 분석기간(18.04~18.09)내 상품구매한 방문자의 세션에 관한 정보- 비회원 / 수집과정에서 누락된 정보 등은 제외되었다고 함. - CLNT_ID : 방문자의 쿠키하나에 부여된 고유 ID (브라우저, 기기가 다르면 ID도 다름) - SESS_ID : 세션 ID, 하나의 CLNT_ID에 여러 세션 ID 발급가능 - SESS_SEQ : 세션 일련번호, 해당 SESS_ID가 CLNT_ID의 몇번째 세션인지 - SESS_DT : 세션일자 (YYYYMMDD 형식) - TOT_PAG_VIEW_CT : 총 페이지조회건수, 세션내 총 페이지 뷰 수, 해당 페이지에 몇번 들어왔냐 - TOT_SESS_HR_V : 총 세션 시간값(단위: 초), 세션 내 총 시간, 해당 페이지에 머무른 시간..? - DVC_CTG_NM : 기기유형, desktop=1 / mobile=2 / tablet=3 - ZON_NM : 지역 대분류(세션기준), 광역단위 - CITY_NM : 지역 중분류(세션기준), 도시단위
###Code
sess = pd.read_csv("../../data/05_Session.csv")
sess.tail()
for i in sess.columns[2:]:
print("column : {}, unique count : {}, data-type : {}".format(i, sess[i].nunique(), sess[i].dtype))
print(sess[i].unique())
print()
sess[sess.CLNT_ID==3573758]
###Output
_____no_output_____
###Markdown
궁금증- 세션ID의 순서를 알 수 있다면, SESS_SEQ=37일때 1~37까지 나와야하는거 아닌가? 중간에 없는건 뭐지? $\rightarrow$ 상품구매한 세션만 기록된 것임(해결)- 연령대별로 어떤 기기에서 접속했는지 custom과 조인해보면 알 수 있지 않을까?, 지역도 마찬가지!- SESS_DT를 가지고 구매주기를 계산해볼 수 있지 않을까?- 위 테이블에서 약 1시간(3,711초)동안 총 26개의 페이지를 보고 구매에 이르렀다는 말인가? - SESS_SEQ가 의미가 있을까? 필요할까?
###Code
# TOT_PAG_VIEW_CT, TOT_SESS_HR_V 분포를 보자
# TOT_PAG_VIEW_CT 내에 NaN값 처리 어떻게 할 것 인가?
# TOT_SESS_HR_V의 type 형변환 object --> float(NaN 때문에)
sess['TOT_SESS_HR_V'] = sess['TOT_SESS_HR_V'].map(lambda x: float(''.join(x.split(','))) if ',' in str(x) else float(x))
pd.options.display.float_format = '{:.2f}'.format
sess[['TOT_PAG_VIEW_CT', 'TOT_SESS_HR_V']].describe()
# NaN 처리
###Output
_____no_output_____ |
ipea/iterative-phase-estimation-algorithm.ipynb | ###Markdown
Iterative Phase Estimation SetupFirst, make sure that you have the latest version of Qiskit installed. To upgrade your Qiskit package, run the following command:```bashpip install --upgrade qiskit```Get an API key from IonQ. This will be used by the IonQ provider inside Qiskit to submit circuits to the IonQ platform.After securing an API key, install the python package `qiskit_ionq` using `pip`:```bashpip install qiskit_ionq```(IonQ's adapter for Qiskit is currently in private beta -- your feedback is welcomed!) (Optional) Extra DependenciesSome examples use additional Python dependencies; please make sure to `pip install` them as needed.Dependencies:* `matplotlib`: To run `qiskit.visualization.plot_histogram`.**NOTE**: The provider expects an API key to be supplied via the `token` keyword argument to its constructor. If no token is directly provided, the provider will check for one in the `QISKIT_IONQ_API_TOKEN` environment variable.Now that the Python package has been installed, you can import and instantiate the provider:
###Code
#import Aer here, before calling qiskit_ionq_provider
from qiskit import Aer
from qiskit_ionq import IonQProvider
#Call provider and set token value
provider = IonQProvider(token='my token')
###Output
_____no_output_____
###Markdown
The `provider` instance can now be used to create and submit circuits to IonQ. Backend TypesThe IonQ provider supports two backend types:* `ionq_simulator`: IonQ's simulator backend.* `ionq_qpu`: IonQ's QPU backend.To view all current backend types, use the `.backends` property on the provider instance:
###Code
provider.backends()
###Output
_____no_output_____
###Markdown
Why do I care about the Iterative Phase Estimation Algorithm (IPEA)? What can you do with it?Recall your linear algebra knowledge. More specifically, recall the concepts of eigenvalues and eigenvectors. Once you have these two notions solified, consider a unitary operator $U$ and a state $\left|\psi\right>$ such that the following relation is true : $$U\left|\psi\right>=e^{2\pi i\phi}\left|\psi\right>$$In other words, $U$ has eigenvector $\left|\psi\right>$ with corresponding eigenvalue $e^{2\pi i\phi}$. $\phi$ is a real number, and it is our job to find out what its value is. That's it! That's the problem we are trying to solve. And we will solve it using this algorithm known as Iterative Phase Estimation. Okay, what is this algorithm and how do we use it to solve this problem?At this point, I will provide a peripheral explanation of what the algorithm essentially does. The detailed explanation will follow as I actually code the algorithm. Before I give the basic summary though, keep the following point in mind - IMPORTANT : We assume that $\phi$, the value we are trying to find, can be expressed as $\frac{\phi_1}{2} + \frac{\phi_2}{4} + \frac{\phi_3}{8} + ... + \frac{\phi_n}{2^n}$. Here each of the $\phi_k$'s are either 0 or 1. Another way of thinking about this key assumption is that $\phi_1\phi_2\phi_3...\phi_n$ is the binary representation of $2^n\phi$ and $n$ is the number of iterations that we do Now for the general working : as the name of the algorithm suggests, there will be several iterations. Consider iteration k. The goal of this iteration will be to determine $\phi_{n-k+1}$ in the expression for $\phi$ above. To do this, the algorithm uses a circuit with two qubits : an auxiliary qubit and a main qubit. What will end up happening in every iteration is that $U$ will be acted on the main qubit in such a way that the auxiliary qubit's state, upon measurement, will collapse into $\phi_{n-k+1}$. Isn't there the standard Quantum Phase Estimation algorithm that does the same thing with just one iteration?Before we start, let me answer this good question. Yes, there is the standard Quantum Phase Estimation algorithm (often shortened to QPE) that solves this very problem with one iteration. Why aren't we using that algorithm if it will get the job done faster?Well, there is a trade-off one has to consider while choosing between the two algorithms. Essentially, the QPEA uses a larger circuit with more qubits, and with more qubits comes more cost in the form of hardware and noise. In contrast, the IPEA just uses two qubits (auxiliary and main). But, of course, where this algorithm loses out is in the number of iterations.Okay, now let us see how the IPEA works exactly! We are going to try to deduce the phase of the T-gate (our $U = T$). This gate has the following matrix - $$T = \begin{bmatrix}1 & 0\\0 & e^{i\frac{\pi}{4}}\end{bmatrix}$$which clearly tells you that the state $\left|1\right>$ is an eigenstate with eigenvalue $e^{i\frac{\pi}{4}}$. Let's see how our algorithm tells us this. Keep in mind that $\phi = \frac{1}{8}$ in this case because the algorithm gives us $\phi$ in the expression $2\pi i \phi$, and for the T-gate, $2\pi i \phi = i\frac{\pi}{4} \implies \phi = \frac{1}{8}$ First, we import every library/module that will be needed.
###Code
from qiskit import *
#import qiskit.aqua as aqua
#from qiskit.quantum_info import Pauli
#from qiskit.aqua.operators.primitive_ops import PauliOp
from qiskit.circuit.library import PhaseEstimation
from qiskit import QuantumCircuit
from matplotlib import pyplot as plt
import numpy as np
from qiskit.chemistry.drivers import PySCFDriver, UnitsType
from qiskit.chemistry.drivers import Molecule
###Output
/Users/abhay/opt/anaconda3/lib/python3.8/site-packages/qiskit/chemistry/__init__.py:170: DeprecationWarning: The package qiskit.chemistry is deprecated. It was moved/refactored to qiskit_nature (pip install qiskit-nature). For more information see <https://github.com/Qiskit/qiskit-aqua/blob/main/README.md#migration-guide>
warn_package('chemistry', 'qiskit_nature', 'qiskit-nature')
###Markdown
Now, we write the function `buildControlledT`. As the name suggests, it creates our T-gate and applies it in that special way I alluded to earlier in the basic summary of the IPEA. An even simpler way of describing this function would be that this function performs one iteration of the IPEA. Let's go into more detail and take a look at what happens within one iteration (this description is meant not only to delineate the working of the IPEA, but also to help tracing the code below easier. I highly suggest you read each bullet point, and then find in the code below where this is being implemented before moving on to the next point)- - The first thing this function does is create and initialise the circuit. In every iteration, the auxiliary qubit ($q_0$ in our case) will be initialized in the state $\left |+\right> = \frac{1}{\sqrt{2}}(\left|0\right> + \left|1\right>)$, and the other qubit ($q_1$) will be initialised in the state $\left|\psi\right>$ (this is the eigenvector of $U$ from above; in our case, $U=T$ and $\left|\psi\right>=\left|1\right>$). So at this point, our collective state is $\left |+\right> \otimes \left|\psi\right>$ with $\left|\psi\right> = \left|1\right>$ in our case- It then applies the CT (controlled-T) gate to the circuit 2m times, where the input m is a number that will range from 0 to $n - 1$, where $n$ is the index of the last $\phi_k$ in the expression $\phi = \frac{\phi_1}{2} + \frac{\phi_2}{4} + \frac{\phi_3}{8} + ... + \frac{\phi_n}{2^n}$. In the kth iteration, we will apply the T-gate $2^{n-k}$ times. The CT gate functions exactly like a CNOT gate - the only difference is that if the control qubit, in this case always $q_0$, is in state $\left|1\right>$, the $T$ gate will be applied to the target qubit instead of the $X$ gate. The target qubit in our case will be $q_1$. Now, let's see what happens when this gate is applied 2m times in terms of the quantum mechanics: We were in the state $\left |+\right> \otimes \left|\psi\right> = \frac{1}{\sqrt{2}}(\left|0\right> + \left|1\right>)\otimes \left|\psi\right>$. Let's see what happens to that state for different values of m - If m = 0, then the T gate is applied $2^0 = 1$ time. So $\frac{1}{\sqrt{2}}(\left|0\right> + \left|1\right>)\otimes \left|\psi\right>$ becomes $\frac{1}{\sqrt{2}}(\left|0\right> \otimes \left|\psi\right>+ \left|1\right> \otimes e^{2\pi i\phi}\left|\psi\right>) = \frac{1}{\sqrt{2}}(\left|0\right> + e^{2\pi i\phi}\left|1\right>) \otimes \left|\psi\right> = \frac{1}{\sqrt{2}}(\left|0\right> + e^{2\pi i(\frac{\phi_1}{2} + \frac{\phi_2}{4} + \frac{\phi_3}{8} + ... + \frac{\phi_n}{2^n})}\left|1\right>) \otimes \left|\psi\right>$ If m = 1, $\frac{1}{\sqrt{2}}(\left|0\right> + \left|1\right>)\otimes \left|\psi\right>$ becomes $\frac{1}{\sqrt{2}}(\left|0\right> \otimes \left|\psi\right>+ \left|1\right> \otimes e^{2\times2\pi i\phi}\left|\psi\right>) = \frac{1}{\sqrt{2}}(\left|0\right> + e^{2\times2\pi i\phi}\left|1\right>) \otimes \left|\psi\right> = \frac{1}{\sqrt{2}}(\left|0\right> + e^{2\pi i(\phi_1 + \frac{\phi_2}{2} + \frac{\phi_3}{4} + ... + \frac{\phi_n}{2^{n-1}})}\left|1\right>) \otimes \left|\psi\right> = \frac{1}{\sqrt{2}}(\left|0\right> + e^{2\pi i\phi_1}e^{2\pi i(\frac{\phi_2}{2} + \frac{\phi_3}{4} + ... + \frac{\phi_n}{2^{n-1}})}\left|1\right>) \otimes \left|\psi\right> = \frac{1}{\sqrt{2}}(\left|0\right> + e^{2\pi i(\frac{\phi_2}{2} + \frac{\phi_3}{4} + ... + \frac{\phi_n}{2^{n-1}})}\left|1\right>) \otimes \left|\psi\right>$ If m = n - 1, $\frac{1}{\sqrt{2}}(\left|0\right> + \left|1\right>)\otimes \left|\psi\right>$ becomes $\frac{1}{\sqrt{2}}(\left|0\right> \otimes \left|\psi\right>+ \left|1\right> \otimes e^{2^{n-1}\times2\pi i\phi}\left|\psi\right>) = \frac{1}{\sqrt{2}}(\left|0\right> + e^{2^{n-1}\times2\pi i\phi}\left|1\right>) \otimes \left|\psi\right> = \frac{1}{\sqrt{2}}(\left|0\right> + e^{2\pi i(2^{n-2}\phi_1 + 2^{n-3}\phi_2 + 2^{n-4}\phi_3 + ... + 2^{-1}\phi_n})\left|1\right>) \otimes \left|\psi\right> = \frac{1}{\sqrt{2}}(\left|0\right> + e^{2^{n-2}\times2\pi i\phi_1}e^{2^{n-3}\times2\pi i\phi_2}...e^{2^{-1}\times2\pi i\phi_n}\left|1\right>) \otimes \left|\psi\right> = \frac{1}{\sqrt{2}}(\left|0\right> + e^{2^{-1}\times2\pi i\phi_n}\left|1\right>) \otimes \left|\psi\right> $- The function then performs a phase correction. Why do we need this phase correction? Well, first note the following important point, and then we'll get back to that question : if we apply the T-gate to the main qubit $2^{n - 1}$ times, as we will in the first iteration of the IPEA, then m = $n-1$, and as shown above, our circuit's quantum state will be $\frac{1}{\sqrt{2}}(\left|0\right> + e^{2^{-1}\times2\pi i\phi_n}\left|1\right>) \otimes \left|\psi\right>$. In this iteration, our goal is to find $\phi_n$. If $\phi_n = 0$, then this state is just $\left |+\right> \otimes \left|\psi\right>$, and if $\phi_n = 1$, then it's $\left |-\right> \otimes \left|\psi\right>$. This means that if we measure the auxiliary qubit in the x-basis (the basis {$\left |-\right>, \left |+\right>$}), the auxiliary qubit will become $\phi_n$. This is exactly what the "inverse QFT" in the code below is. It is using a Hadamard gate to convert the state $\frac{1}{\sqrt{2}}(\left|0\right> + e^{2^{-1}\times2\pi i\phi_n}\left|1\right>) \otimes \left|\psi\right>$ to either $\left |1\right> \otimes \left|\psi\right>$ if $\phi_n = 1$ (in which case measuring qubit 0 will yield 1), or $\left |0\right> \otimes \left|\psi\right>$ if $\phi_n = 0$ (in which case measuring qubit 0 will yield 0).Now, back to the original question - why do we need this phase correction? Imagine we apply the T-gate to the main qubit not $2^{n - 1}$ times, but $2^{n - 2}$ times, as we will in the second iteration of the IPEA. Note that in this iteration, our goal will be to find $\phi_{n-1}$. The state will go from $\frac{1}{\sqrt{2}}(\left|0\right> + \left|1\right>)\otimes \left|\psi\right>$ to $\frac{1}{\sqrt{2}}(\left|0\right> + e^{2\pi i (\frac{\phi_{n-1}}{2} + \frac{\phi_n}{4})}\left|1\right>)\otimes \left|\psi\right>$. But ideally, we would like the state to be $\frac{1}{\sqrt{2}}(\left|0\right> + e^{2\pi i (\frac{\phi_{n-1}}{2})}\left|1\right>)\otimes \left|\psi\right>$. That's because if that were the state, all it would take is a simple measurement in the x-basis for the auxiliary qubit to collapse to whatever value $\phi_{n-1}$ held (as shown above in the case of the first iteration). So, how do we get the state of the circuit into the ideal state? We can just apply the rotation operator $R_z(\theta)$ with $\theta = \frac{-2\pi\phi_n}{4}$. You can work out the effect of applying this operator to qubit 0. The state that will result will be $e^{i\frac{\pi\phi_n}{4}}\frac{1}{\sqrt{2}}(\left|0\right> + e^{2\pi i \frac{\phi_{n-1}}{2}}\left|1\right>)$. The overall phase can be ignored, and we have the ideal state. This is why we need the phase correction. To remove these unwanted phases and create the state to which only a measurement in the x-basis is necessary to complete the iteration. Generally speaking, for iteration k, $\theta = -2\pi\times(\frac{\phi_{n-k+2}}{4} + \frac{\phi_{n-k+3}}{8} + ... + \frac{\phi_n}{2^n})$- Finally, the function does this inverse Quantum Fourier Transform (QFT), which is nothing but a measurement in the Hadamard basis as described in previous bullet point. It then returns the circuit ready for execution on a quantum computer.
###Code
def buildControlledT(p, m):
# initialize the circuit
qc = QuantumCircuit(2, 1)
# Hardmard on ancilla, now in |+>
qc.h(0)
# initialize to |1>
qc.x(1)
# applying T gate to qubit 1
for i in range(2**m):
qc.cp(np.pi/4, 0, 1)
# phase correction
qc.rz(p, 0)
# inverse QFT (in other words, just measuring in the x-basis)
qc.h(0)
qc.measure([0],[0])
return qc
###Output
_____no_output_____
###Markdown
The next function, as the name once again suggests, performs the IPEA algorithm. The above function just performed one iteration, and as you can see in the body of this function, there is a `for` loop in which `buildControlledT` is called, which implies that one iteration of this `for` loop represents one iteration of the IPEA. The `for` loop iterates k times (the input of the function). This tells us that the input k of the function signifies the number of iterations in the algorithm. But how many iterations do we want to feed in? Well, as long as $2^n\phi$ can be expressed in binary, we should be good. Remember that each iteration gives you one of the $\phi_k$'s (in particular, the kth iteration gives you $\phi_{n-k+1}$). This function does its iterations, and in each iteration, it is basically just creating the circuit with the two qubits, doing what it needs to do (the four bullet points above) to the circuit, running the circuit, recovering the result (the $\phi_k$ for that iteration), and appending it to the bits list. Once we get the bits, we can use the expression $\phi = \frac{\phi_1}{2} + \frac{\phi_2}{4} + \frac{\phi_3}{8} + ... + \frac{\phi_n}{2^n}$ to find $\phi$.
###Code
def IPEA(k, backend_string):
# get backend
if backend_string == 'qpu':
backend = provider.get_backend('ionq_qpu')
elif backend_string == 'qasm':
backend = Aer.get_backend('qasm_simulator')
# bits
bits = []
# phase correction
phase = 0.0
# loop over iterations
for i in range(k-1, -1, -1):
# construct the circuit
qc = buildControlledT(phase, i)
# run the circuit
job = execute(qc, backend)
if backend_string == 'qpu':
from qiskit.providers.jobstatus import JobStatus
import time
# Check if job is done
while job.status() is not JobStatus.DONE:
print("Job status is", job.status() )
time.sleep(60)
# grab a coffee! This can take up to a few minutes.
# once we break out of that while loop, we know our job is finished
print("Job status is", job.status() )
print(job.get_counts()) # these counts are the “true” counts from the actual QPU Run
# get result
result = job.result()
# get current bit
this_bit = int(max(result.get_counts(), key=result.get_counts().get))
print(result.get_counts())
bits.append(this_bit)
# update phase correction
phase /= 2
phase -= (2 * np.pi * this_bit / 4.0)
return bits
###Output
_____no_output_____
###Markdown
If you have made it this far, then you are doing very well! This algorithm is complicated. Good job! The final function that we will have to define is `eig_from_bits`, which will take in the list of $\phi_k$'s that the above function will return, and return the eigenvalue associated with $\left|\psi\right>$
###Code
def eig_from_bits(bits):
eig = 0.
m = len(bits)
# loop over all bits
for k in range(len(bits)):
eig += bits[k] / (2**(m-k))
#eig *= 2*np.pi
return eig
###Output
_____no_output_____
###Markdown
You have now understood the IPEA! Let's actually perform it and see if we can get our $\frac{1}{8}$
###Code
# perform IPEA
backend = 'qasm'
bits = IPEA(5, backend)
print(bits)
# re-construct energy
eig = eig_from_bits(bits)
print(eig)
###Output
{'0': 1024}
{'0': 1024}
{'1': 1024}
{'0': 1024}
{'0': 1024}
[0, 0, 1, 0, 0]
0.125
###Markdown
It worked! Let's see if we can understand the effect of the choice of input `n` on the result obtained.
###Code
#perform IPEA with different values of n
n_values = []
eig_values = []
for i in range(1, 8):
n_values.append(i)
# perform IPEA
backend = 'qasm'
bits = IPEA(i, backend)
# re-construct energy
eig = eig_from_bits(bits)
eig_values.append(eig)
n_values, eig_values = np.array(n_values), np.array(eig_values)
plt.plot(n_values, eig_values)
plt.xlabel('n (bits)', fontsize=15)
plt.ylabel(r'$\phi$', fontsize=15)
plt.title(r'$\phi$ vs. n', fontsize=15)
###Output
{'1': 147, '0': 877}
{'1': 521, '0': 503}
{'1': 147, '0': 877}
{'1': 1024}
{'0': 1024}
{'0': 1024}
{'0': 1024}
{'1': 1024}
{'0': 1024}
{'0': 1024}
{'0': 1024}
{'0': 1024}
{'1': 1024}
{'0': 1024}
{'0': 1024}
{'0': 1024}
{'0': 1024}
{'0': 1024}
{'1': 1024}
{'0': 1024}
{'0': 1024}
{'0': 1024}
{'0': 1024}
{'0': 1024}
{'0': 1024}
{'1': 1024}
{'0': 1024}
{'0': 1024}
###Markdown
Now, let's try the same thing on actual IonQ hardware
###Code
# perform IPEA
backend = 'qpu'
bits = IPEA(5, backend)
print(bits)
# re-construct energy
eig = eig_from_bits(bits)
print(eig)
#perform IPEA with different values of n
n_values = []
eig_values = []
for i in range(1, 8):
n_values.append(i)
# perform IPEA
backend = 'qpu'
bits = IPEA(i, backend)
# re-construct energy
eig = eig_from_bits(bits)
eig_values.append(eig)
n_values, eig_values = np.array(n_values), np.array(eig_values)
plt.plot(n_values, eig_values)
plt.xlabel('n (bits)', fontsize=15)
plt.ylabel(r'$\phi$', fontsize=15)
plt.title(r'$\phi$ vs. n', fontsize=15)
###Output
_____no_output_____ |
m3_presentation.ipynb | ###Markdown
We would like to work with a prediction / classification problem. We are interested in seeing which font color to use on different background colors (in RGB values).Say we choose a specific color like this one, we would like to make a model which can predict which font type is best suited for it in regards to it's luminence (we are only working with white or black font).There is a formula for solving this problem. The correct font color for the background is decided by how bright the background color is and if background color luminance > 0.5 then you should choose a dark font and of course if the background color luminance < 0.5 you should choose a white font. This formula can be found on StackOverflow: https://stackoverflow.com/questions/1855884/determine-font-color-based-on-background-color1855903But what if we didn't have this formula, or it simply hasn't been discovered yet? In that case, a neural network can help us. We therefore try to make a model, which can solve this prediction problem for us using a neural network. Credits for idea and illustrations go to: Thomas Nield (Youtube: https://www.youtube.com/watch?v=tAioWlhKA90)
###Code
#First we imported the needed libraries
import pandas as pd
import numpy as np
from keras.layers import Dense
from keras.models import Sequential
from keras.callbacks import EarlyStopping
###Output
_____no_output_____
###Markdown
Here, we download our training dataset.
###Code
# Download the color training dataset
!wget https://raw.githubusercontent.com/thomasnield/kotlin_simple_neural_network/master/src/main/resources/color_training_set.csv
predictors = pd.read_csv('color_training_set.csv')
predictors.columns = ['r', 'g', 'b']
predictors.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1344 entries, 0 to 1343
Data columns (total 3 columns):
r 1344 non-null int64
g 1344 non-null int64
b 1344 non-null int64
dtypes: int64(3)
memory usage: 31.6 KB
###Markdown
As can be seen, the training dataset consists of 1344 different background colors in RGB values. We will use these to train our model.Next up, we define a function to calculate the optimal font color using the formula we described before. This will be used to classify which font color is optimal for each of our 1344 background colors.Of course, this is a bit of a cheat since we supposedly haven't discovered this formula yet, but the equivalent would be to manually classify each background color by hand. We simply do this to save time, yay for being lazy! 😏
###Code
# Predict the optimal shade using the formula. This function takes a list as its input.
def FormulaicPredict(color):
r = color[0]
g = color[1]
b = color[2]
d = 0
l = 0
luminance = (0.299 * r + 0.587 * g + 0.114 * b)/255
if luminance > 0.5:
d = 1 # bright background, black font
l = 0
else:
d = 0 # dark background, white font
l = 1
return pd.Series([d,l])
###Output
_____no_output_____
###Markdown
We will now apply the above formula to each row in our training dataset. With this, we create a new DataFrame with the predictions for each background color.
###Code
target = predictors.apply(FormulaicPredict, axis=1)
target.columns = ['dark', 'light']
target.head()
###Output
_____no_output_____
###Markdown
We start building the architecture for our model, first we set up our model with the Sequential() function.We add our first layer using 3 nodes, using the 'relu' activation which is one of the most commonly used activation functions.Here is a picture of some of the other activation functions just to give a understanding of how they work.Here we see a picture of how the idea of our different layers of the model should work with the relu activation function, we take our nodes value and they are multiplied by the weights and added in the next layer and then again multiplied and added for a sum. This should give the network the value to predict which font color to use based on which background color it see's. These weights can change over time as the model get's more information.
###Code
np.random.seed(10) # Set to 10 for a good result, 8 for a worse one
model = Sequential()
model.add(Dense(3, activation='relu', input_dim=3))
model.add(Dense(3, activation='relu'))
model.add(Dense(2, activation='sigmoid'))
# Compile the model
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
early_stopping_monitor = EarlyStopping(patience=3)
# Fit the model
history = model.fit(predictors, target, validation_split=0.3, epochs=250, callbacks=[early_stopping_monitor])
model.summary()
###Output
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dense_85 (Dense) (None, 3) 12
_________________________________________________________________
dense_86 (Dense) (None, 3) 12
_________________________________________________________________
dense_87 (Dense) (None, 2) 8
=================================================================
Total params: 32
Trainable params: 32
Non-trainable params: 0
_________________________________________________________________
###Markdown
Let's plot the results of our trained model. Here we plot the accuracy and loss over time.
###Code
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use('seaborn')
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
###Output
_____no_output_____
###Markdown
There is room for optimizing the model, where you can think about different things. How many layers are needed, how many nodes are needed in every layer and which activation function should you use.You can also tweek the model on learning rate and how to measure for error.Let's quickly try predicting with a black background color.
###Code
bgcolor = pd.DataFrame([[0,0,0]])
prediction = model.predict(bgcolor)
prediction
###Output
_____no_output_____
###Markdown
The left value is dark, the right value is light. Ideally, the preferred font color would be 1, but it would require a lot of optimization to reach that, as that would be a perfectly trained model. To evaluate which font color we should go with, we simply check which of these values is the highest instead. Interactive prediction The following code won't run without Jupyter Notebook since we use some features specific to that.
###Code
from IPython.core.display import display, HTML
def predictColor(r, g, b):
bgcolor = pd.DataFrame([[r,g,b]])
bgcolor_hex = '#%02x%02x%02x' % (r, g, b)
black = '#000'
white = '#fff'
fontcolor = ''
prediction = model.predict(bgcolor)
if prediction[0][0] > prediction[0][1]:
fontcolor = black
else:
fontcolor = white
display(HTML('<div style="background-color:{0}; color:{1};">SOME TEXT</div>'.format(bgcolor_hex, fontcolor)))
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
colorpicker = widgets.ColorPicker(
concise=False,
description='Pick a color',
value='blue',
disabled=False
)
display(colorpicker)
out = widgets.Output()
display(out)
@out.capture()
def on_change(change):
if change['type'] == 'change' and change['name'] == 'value':
h = colorpicker.value
rgb = tuple(int(h.lstrip('#')[i:i+2], 16) for i in (0, 2 ,4))
predictColor(rgb[0], rgb[1], rgb[2])
colorpicker.observe(on_change)
###Output
_____no_output_____ |
jupyter_notebooks/on_model.ipynb | ###Markdown
ON Model
###Code
import numpy as np
import pandas as pd
from pystatplottools.pdf_env.loading_figure_mode import loading_figure_mode
fma, plt = loading_figure_mode(develop=True) # develop=False will export the generated figures as pngs into "./data/RectangleData"
plt.style.use('seaborn-dark-palette')
if 'root_dir' not in locals():
# Navigate to simulations/ONModel directory as simulation root directory
import os
os.chdir("../simulations/ONModel")
root_dir = os.getcwd()
# To be able to compute custom measures
import sys
sys.path.append("./../../python_scripts")
mcmc_model_dir = "ONModelMetropolis/"
mcmc_data_dir = root_dir + "/data/" + mcmc_model_dir
mcmc_results_dir = root_dir + "/results/" + mcmc_model_dir
data_dir = root_dir + "/data/" + mcmc_model_dir
results_dir = root_dir + "/results/" + mcmc_model_dir
###Output
_____no_output_____
###Markdown
MCMC Results Expectation Values
###Code
from mcmctools.modes.expectation_value import load_expectation_value_results
expectation_values = load_expectation_value_results(files_dir="ONModelMetropolis")
# Insert Kappa as column (as floating number)
expectation_values.insert(0, "Kappa", expectation_values.index.values.astype(np.float))
expectation_values = expectation_values.sort_values("Kappa")
expectation_values
# Computation of the total mean and of the two point correlator
total_mean = expectation_values.loc[:, ("ExpVal", "Mean", slice(None))].values.mean(axis=1)
mean = expectation_values.loc[:, ("ExpVal", "Mean", slice(None))].values
two_point_correlator = expectation_values["ExpVal", "TwoPointCorrelation", ""].values - np.power(mean, 2.0).sum(axis=1)
fig, axes = fma.newfig(1.4, ncols=4, figsize=(15, 4))
axes[0].plot(expectation_values["Kappa"], total_mean, "o-")
axes[0].set_xlabel("Kappa")
axes[0].set_ylabel("Mean")
axes[1].plot(expectation_values["Kappa"], expectation_values["ExpVal", "SecondMoment", ""], "o-")
axes[1].set_xlabel("Kappa")
axes[1].set_ylabel("SecondMoment")
axes[2].plot(expectation_values["Kappa"], expectation_values["ExpVal", "Energy", ""], "o-")
axes[2].set_xlabel("Kappa")
axes[2].set_ylabel("Energy")
axes[3].plot(expectation_values["Kappa"], two_point_correlator, "o-")
axes[3].set_xlabel("Kappa")
axes[3].set_ylabel("Two Point Correlator")
plt.tight_layout()
fma.savefig(results_dir, "expectation_values")
###Output
_____no_output_____
###Markdown
Configurations as Pytorch Dataset We show how the mcmc configurations can be stored and loaded as a .pt file.(See also python_scripts/loading_configurations.py and python_scripts/pytorch_data_generation.py) Preparation
###Code
data_generator_args = {
# ConfigDataGenerator Args
"data_type": "target_param",
# Args for ConfigurationLoader
"path": mcmc_data_dir,
"total_number_of_data_per_file": 10000,
"identifier": "expectation_value",
"running_parameter": "kappa",
"chunksize": 100 # If no chunksize is given, all data is loaded at once
}
# Prepare in memory dataset
from pystatplottools.pytorch_data_generation.data_generation.datagenerationroutines import prepare_in_memory_dataset
from mcmctools.pytorch.data_generation.datagenerationroutines import data_generator_factory
prepare_in_memory_dataset(
root=data_dir,
batch_size=89,
data_generator_args=data_generator_args,
data_generator_name="BatchConfigDataGenerator",
data_generator_factory=data_generator_factory
)
###Output
Random seed is set by np.random.seed()
Write new data_config into file - Data will be generated based on the new data_config file
###Markdown
Generating and Loading the Dataset
###Code
# Load in memory dataset
from pystatplottools.pytorch_data_generation.data_generation.datagenerationroutines import load_in_memory_dataset
# The dataset is generated and stored as a .pt file in the data_dir/data directory the first time this function is called. Otherwise the .pt is loaded.
data_loader = load_in_memory_dataset(
root=data_dir, batch_size=128, data_generator_factory=data_generator_factory, slices=None, shuffle=True,
num_workers=0, rebuild=False
# sample_data_generator_name="ConfigDataGenerator" # optional: for a generation of new samples
)
# Load training data
for batch_idx, batch in enumerate(data_loader):
data, target = batch
# print(batch_idx, len(data))
###Output
Processing...
Random seed is set by np.random.seed()
Done!
###Markdown
Inspection of the Dataset - Sample Visualization
###Code
from pystatplottools.visualization import sample_visualization
config_dim = (8, 8) # Dimension of the data
num_std=1
# Random samples
config, label = data_loader.dataset.get_random_sample()
batch, batch_label = data_loader.dataset.get_random_batch(108)
# Single Sample
sample_visualization.fd_im_single_sample(sample=config, label=label, config_dim=config_dim, num_std=num_std,
fma=fma, filename="single_sample", directory=results_dir, figsize=(10, 4));
# Batch with labels
sample_visualization.fd_im_batch(batch, batch_labels=batch_label, num_samples=25, dim=(5, 5),
config_dim=config_dim, num_std=num_std,
fma=fma, filename="batch", directory=results_dir, width=2.3, ratio=1.0, figsize=(12, 12));
# Batch grid
sample_visualization.fd_im_batch_grid(batch, config_dim=config_dim, num_std=num_std,
fma=fma, filename="batch_grid", directory=results_dir);
###Output
_____no_output_____ |
examples/KnativeModOps/manage_model_publish_privateDockerdest.ipynb | ###Markdown
This notebook helps manage SAS Model publish Destinations. Handles Private Docker destinations. This cell sets default values
###Code
import sys
sys.path.append('..')
import mmAuthorization
import getpass
import requests
import json, os, pprint
import base64
###Output
_____no_output_____
###Markdown
Following defines few methods and config values for later resuse
###Code
def list_destinations(destination_url, auth_token):
headers = {
mmAuthorization.AUTHORIZATION_HEADER: mmAuthorization.AUTHORIZATION_TOKEN + auth_token
}
print("List the destinations...")
try:
response = requests.get(destination_url, headers=headers,verify=False)
jsondata = response.json()
destinations = jsondata['items']
if len(destinations) > 0:
for destination in destinations:
print(destination["id"])
print(destination["name"])
print("===========")
except:
raise RuntimeError("ERROR: Could not get a destination list.")
public_ip = "PUBLIC_IP"
host_name = "fsbuwlm.fsbudev-openstack-k8s.unx.sas.com"
port = "PORT"
host_url="https://" + host_name
destination_url = host_url + "/modelPublish/destinations/"
modelrepo_url = host_url + "/modelRepository/models/"
publishmodel_url = host_url + "/modelPublish/models"
domains_url = host_url + "/credentials/domains"
print(host_url)
###Output
_____no_output_____
###Markdown
Following gets Auth token
###Code
mm_auth = mmAuthorization.mmAuthorization("myAuth")
admin_userId = 'whoami'
user_passwd = getpass.getpass()
admin_auth_token = mm_auth.get_auth_token(host_url, admin_userId, user_passwd)
credential_admin_headers = {
mmAuthorization.AUTHORIZATION_HEADER: mmAuthorization.AUTHORIZATION_TOKEN + admin_auth_token
}
credential_domain_headers = {
"If-Match":"false",
"Content-Type":"application/json",
mmAuthorization.AUTHORIZATION_HEADER: mmAuthorization.AUTHORIZATION_TOKEN + admin_auth_token
}
credential_user_headers = {
"If-Match":"false",
"Content-Type":"application/json",
mmAuthorization.AUTHORIZATION_HEADER: mmAuthorization.AUTHORIZATION_TOKEN + admin_auth_token
}
destination_harbor_headers = {
"If-Match":"false",
"Content-Type":"application/vnd.sas.models.publishing.destination.privatedocker+json",
mmAuthorization.AUTHORIZATION_HEADER: mmAuthorization.AUTHORIZATION_TOKEN + admin_auth_token
}
print(admin_auth_token)
##### create Domain
domain_name = "fsbu_domain_1"
description = 'fsbu domain 1'
my_domain_url = domains_url + "/" + domain_name
domain_attrs = {
"id":domain_name,
"type":"base64",
"description": description
}
domain = requests.put(my_domain_url,
data=json.dumps(domain_attrs), headers=credential_domain_headers, verify=False)
print(domain)
pprint.pprint(domain.json())
### Create credential
####
user_credential_name = admin_userId
my_credential_url = my_domain_url + "/users/" + user_credential_name
userId = "fsbu_modeluser"
password = "fsbu_modeluser"
encoded_userId = str(base64.b64encode(userId.encode("utf-8")), "utf-8")
encoded_password = str(base64.b64encode(password.encode("utf-8")), "utf-8")
credential_attrs = {
"domainId":domain_name,
"identityType":"user",
"identityId":user_credential_name,
"domainType":"base64",
"properties":{"dockerRegistryUserId":encoded_userId},
"secrets":{"dockerRegistryPasswd":encoded_password}
}
#credential_attrs = {
# "domainId":domain_name,
# "identityType":"user",
# "identityId":user_credential_name,
# "domainType":"base64"
#}
credential = requests.put(my_credential_url,
data=json.dumps(credential_attrs), headers=credential_user_headers,verify=False)
print(credential)
pprint.pprint(credential.json())
# Creates a new destination, expecting a response code of 201.
dest_name = "fsbu_dest_docker_1"
domainName = "fsbu_domain_1"
baseRepoUrl = "docker-repo.company.com:5003"
# no need of docker host in 1.1.4 since we have kaniko.
destination_attrs = {
"name":dest_name,
"destinationType":"privateDocker",
"properties": [{"name": "credDomainId",
"value": domainName},
{"name": "baseRepoUrl",
"value": baseRepoUrl}
]
}
destination = requests.post(destination_url, data=json.dumps(destination_attrs), headers=destination_harbor_headers, verify=False)
print(destination)
list_destinations(destination_url, admin_auth_token)
deletedURL = destination_url + dest_name
destination = requests.delete(deletedURL, headers=credential_admin_headers)
print(deletedURL)
print(destination)
pprint.pprint(destination.json())
###Output
_____no_output_____ |
project1_main.ipynb | ###Markdown
Code of "Things you need to know to raise your Airbnb review score in Seattle" projectThree business questions investigated by this code: Question 1: are the review scores affected by how the hosts described their Airbnbs? Question 2: are the review scores affected by how the hosts described the neighborhood of their Airbnbs? Question 3: are the review scores affected by objective factors of the listings like price, room type, bed type, etc.?
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error
from collections import defaultdict
%matplotlib inline
# import dataset
df_listings = pd.read_csv('./listings.csv')
# run this cell to take a look at the first 5 rows of the data
df_listings.head()
###Output
_____no_output_____
###Markdown
The whole project cares about what affects the **rating scores** of a Airbnb listing, so the first step is deleting the listings with missing values in **'review_scores_rating'**.
###Code
df_new = df_listings.dropna(subset = ['review_scores_rating'], axis = 0)
# run this cell to take a look at the ditribution of the rating scores of all the Airbnb listings in Seattle.
df_new.review_scores_rating.plot(kind = 'hist');
plt.title('Review scores distribution');
plt.ylabel('Counts');
plt.xlabel('Scores')
# uncomment the following line if you want to save the figure
# plt.savefig('Rating_distribution.png', dpi = 100)
###Output
_____no_output_____
###Markdown
Comparing high rating score listings vs low rating score listingsI will first compare whether the Airbnb listings have higher rating scores are different from those with lower scores in some subjective factors -- such as how they describe the listing and the neighborhood (Question 1 & 2). To have two groups to compare, I extract those listings with higher scores (> 75% percentile) and lower scores(< 25% percentile). Solving Question 1To simplify the question, I will only focus on the **adjectives** used in the descriptions. By looking through the descriptions in column **'description'** (a desciption of the Airbnb listing), I got some possible adjectives listing in the following variable **possible_adj**.
###Code
# We don't have NaN value in the 'description' variable
df_new.description.isnull().sum()
# separate data directly
y = df_new.review_scores_rating
df1_high = df_new[y > np.percentile(y,75)]
df1_low = df_new[y < np.percentile(y,25)]
possible_adj = ['charming', 'private', 'elegant', 'cozy', 'comfortable',
'clean', 'wonderful', 'beautiful', 'modern', 'great',
'functional', 'fresh', 'close', 'historic', 'quiet',
'gorgeous', 'safe', 'convenient', 'lovely', 'vintage',
'amazing', 'walkable', 'adorable', 'bright', 'light',
'new', 'spacious', 'large', 'desirable', 'popular',
'special', 'fantastic', 'fabulous']
###Output
_____no_output_____
###Markdown
Here I use (modify) a code from Udacity class to count the number of above words showing in a column of a dataframe.
###Code
def count_word(df, col1, col2, look_for):
'''
Modified based on code from Udacity Data Scientist Nanodegree Lession 1.
INPUT:
df - the pandas dataframe you want to search
col1 - the column name you want to look through
col2 - the column you want to count values from
look_for - a list of strings you want to search for in each row of df[col1]
OUTPUT:
new_df - a dataframe of each look_for with the count of how often it shows up
'''
new_df = defaultdict(int)
#loop through list of ed types
for val in look_for:
#loop through rows
for idx in range(df.shape[0]):
#if the ed type is in the row add 1
if val in df[col1][idx].lower():
new_df[val] += int(df[col2][idx])
new_df = pd.DataFrame(pd.Series(new_df)).reset_index()
new_df.columns = [col1, col2]
new_df.sort_values('count', ascending=False, inplace=True)
return new_df
###Output
_____no_output_____
###Markdown
The following function preprocess the dataframe you want to use and count the words (e.g. adjectives) of interest by calling the **count_word** function.
###Code
def get_count(df, col = 'description', search_for = possible_adj):
'''
Modified based on code from Udacity Data Scientist Nanodegree Lession 1.
'''
df_group = df[col].value_counts().reset_index()
df_group.rename(columns={'index': 'key_words', col: 'count'}, inplace=True)
df_key_word = count_word(df_group, 'key_words', 'count', search_for)
df_key_word.set_index('key_words', inplace = True)
return df_key_word
# plot out the adjective usage in high score listings and low score listings
adj_high = get_count(df1_high)
adj_low = get_count(df1_low)
count_adj = pd.concat([adj_high, adj_low], axis=1, join='inner')
ax1 = count_adj.plot.bar(legend = None, subplots=True, figsize = (10,10), grid = True)
ax1[0].set_xlabel('adjectives', fontsize = 14)
ax1[0].set_ylabel('Counts', fontsize = 14)
ax1[0].set_title("High review score listings' adjectives usage in description", fontsize = 16);
ax1[1].set_xlabel('adjectives', fontsize = 14)
ax1[1].set_ylabel('Counts', fontsize = 14)
ax1[1].set_title("Low review score listings' adjectives usage in description", fontsize = 16);
# uncomment the following two lines to save figure
#fig = ax1[0].get_figure()
#fig.savefig('Description_difference.png', dpi = 100)
###Output
_____no_output_____
###Markdown
Answer of Question 1It seems there is no significant difference in the adjective usage in the listing description between high rating score listings and low rating score listings -- at least the top three adjectives are the same between two groups. Only the word "modern" seems to be used more in high rating listings. Solving Question 2Next, I will explore whether the **description of the neighborhood** (column **'neighborhood_overview'**) affects the rating score. Similar to question 1, I will compare the adjectives usage between high rating listings and low rating listings.
###Code
# There are NaN values in 'neighborhood_overview'
df_new.neighborhood_overview.isnull().sum()
# Delete rows with NaN in 'neighborhood_overview'
df_q2 = df_new.dropna(subset = ['neighborhood_overview'], axis = 0)
# separate data into high rating group and low rating group
y_q2 = df_q2.review_scores_rating
df2_high = df_q2[y_q2 > np.percentile(y_q2,75)]
df2_low = df_q2[y_q2 < np.percentile(y_q2,25)]
# use get_count funtion to sort out the adjective usage
adj_high_neighbor = get_count(df2_high, col = 'neighborhood_overview')
adj_low_neighbor = get_count(df2_low, col = 'neighborhood_overview')
count_adj_neighbor = pd.concat([adj_high_neighbor, adj_low_neighbor], axis=1, join='inner')
ax2 = count_adj_neighbor.plot.bar(legend = None, subplots=True, figsize = (10,10), grid = True)
ax2[0].set_xlabel('adjectives', fontsize = 14)
ax2[0].set_ylabel('Counts', fontsize = 14)
ax2[0].set_title("High review score listings' adjectives usage in neighborhood description", fontsize = 16);
ax2[1].set_xlabel('adjectives', fontsize = 14)
ax2[1].set_ylabel('Counts', fontsize = 14)
ax2[1].set_title("Low review score listings' adjectives usage in neighborhood description", fontsize = 16);
# uncomment the following two lines to save figure
#fig = ax2[0].get_figure()
#fig.savefig('Neighborhood_description_difference.png', dpi = 100)
###Output
_____no_output_____
###Markdown
Again, it seems the adjectives used in neighborhood overview between these two groups are not quite different from each other. And the top three adjectives are the same in the description of listings. Another factor of the description of neighborhood is nouns related to the entertainment and daily life, such as "shopping" and "coffee". By looking through the column **'neighborhood_overview'** I extract some daily life related nouns in the variable **possible_noun**. I will plot out the noun usage between high rating score listings and low rating score listings.
###Code
possible_noun = ['restaurants', 'food', 'bars', 'coffee', 'cafes',
'shopping', 'grocery', 'mall', 'park', 'movie', 'music']
# use get_count funtion to sort out the noun usage
n_high_neighbor = get_count(df2_high, col = 'neighborhood_overview', search_for = possible_noun)
n_low_neighbor = get_count(df2_low, col = 'neighborhood_overview', search_for = possible_noun)
count_n_neighbor = pd.concat([n_high_neighbor, n_low_neighbor], axis=1, join='inner')
ax3 = count_n_neighbor.plot.bar(legend = None, subplots=True, figsize = (10,10), grid = True)
ax3[0].set_xlabel('nouns', fontsize = 14)
ax3[0].set_ylabel('Counts', fontsize = 14)
ax3[0].set_title("High review score listings' nouns usage in neighborhood description", fontsize = 16);
ax3[1].set_xlabel('nouns', fontsize = 14)
ax3[1].set_ylabel('Counts', fontsize = 14)
ax3[1].set_title("Low review score listings' nouns usage in neighborhood description", fontsize = 16);
# uncomment the following two lines to save fig
#fig = ax3[0].get_figure()
#fig.savefig('Neighborhood_noun_difference.png', dpi = 100)
###Output
_____no_output_____
###Markdown
It seems subjective factors did not affect the review score rating. The next step is to explore the objective factors. Solving Question 3All the objective factors of interests include: **Quantitive variables:** 1) **'price_per_person'**: a new column I will create by dividing 'price' by 'accommodates' for each row 2) 'security_deposit' 3) 'cleaning_fee' **Categorical variables:** 1) 'host_response_time': within an hour, within a few hours, within a day, a few days or more 2) 'host_is_superhost': whether the host is a superhost or not, boolean variable 3) 'host_has_profile_pic': whether the host provides a profile picture or not, boolean variable 4) 'host_identity_verified': whether the host's identity is verified or not 5) 'is_location_exact': whether the location provided is accurate or not 6) 'room_type': entire home/apt, private room, shared room 7) 'bed_type': real bed, futon, pull_out sofa, airbed, couch 8) 'cancellation_policy': strict, moderate, flexible 9) 'instant_bookable': boolean 10) 'require_guest_profile_picture': boolean 11) 'require_guest_phone_verification': boolean **Special varibales:** whether the row is null or not is the information we care about. 1) 'transit': whether transportation method is provided 2) 'host_about': whether the host provides self introduction
###Code
# use this cell to take a look at what variables have NaN values
df_new.isnull().sum().sort_values(ascending=False)
###Output
_____no_output_____
###Markdown
Dealing with NaN
###Code
# for 'security_deposit' and 'cleaning_fee', replace NaN by $0, then clean the data format to make them into float
df_new.fillna(value = {'security_deposit': '$0', 'cleaning_fee': '$0'}, inplace=True)
df_new.security_deposit = df_new.security_deposit.str.lstrip('$');
df_new.cleaning_fee = df_new.cleaning_fee.str.lstrip('$');
df_new.security_deposit = df_new.security_deposit.str.replace(',', '').astype(float)
df_new.cleaning_fee = df_new.cleaning_fee.str.replace(',', '').astype(float)
# for 'price', first make it into float, then create a column "price per person"
df_new.price = df_new.price.str.lstrip('$');
df_new.price = df_new.price.str.replace(',', '').astype(float)
df_new['price_per_person'] = df_new.price/df_new.accommodates
# for 'transit' and 'host_about', use NaN information to recode them into 1 = provided (not NaN) and 0 = not provided (is NaN)
df_new.transit = df_new.transit.notnull().astype(int)
df_new.host_about = df_new.host_about.notnull().astype(int)
# for 'host_response_time', I will delete rows with NaN
df_new = df_new.dropna(subset = ['host_response_time'], axis = 0)
###Output
_____no_output_____
###Markdown
Convert categorical variables to dummy variables, recode boolean variables to '1 vs 0'
###Code
# convert boolean variables (t = true, f = false) to 1 vs 0 coding (1 = true, 0 = false)
bool_process_col = ['host_is_superhost', 'host_has_profile_pic',
'host_identity_verified', 'is_location_exact',
'instant_bookable', 'require_guest_profile_picture',
'require_guest_phone_verification']
df_new[bool_process_col] = (df_new[bool_process_col] == 't').astype(int)
# a list of categorical variables of interest
cat_cols_lst = ['host_response_time', 'room_type', 'bed_type', 'cancellation_policy']
# function to create dummy variables for categorical variables
# this code is from Udacity Data Scientist Nanodegree class
def create_dummy_df(df, cat_cols, dummy_na):
'''
INPUT:
df - pandas dataframe with categorical variables you want to dummy
cat_cols - list of strings that are associated with names of the categorical columns
dummy_na - Bool holding whether you want to dummy NA vals of categorical columns or not
OUTPUT:
df - a new dataframe that has the following characteristics:
1. contains all columns that were not specified as categorical
2. removes all the original columns in cat_cols
3. dummy columns for each of the categorical columns in cat_cols
4. if dummy_na is True - it also contains dummy columns for the NaN values
5. Use a prefix of the column name with an underscore (_) for separating
'''
for col in cat_cols:
try:
# for each cat add dummy var, drop original column
df = pd.concat([df,pd.get_dummies(df[col], prefix=col, prefix_sep='_', dummy_na=dummy_na)], axis=1)
except:
continue
return df
# select data from columns we need for question 3
col_list_needed = [ 'host_has_profile_pic', 'host_identity_verified',
'price_per_person', 'security_deposit', 'cleaning_fee',
'host_response_time', 'host_is_superhost',
'is_location_exact', 'room_type', 'host_about',
'bed_type', 'cancellation_policy', 'instant_bookable',
'require_guest_profile_picture',
'require_guest_phone_verification',
'review_scores_rating']
# select data in these columns
df_needed = df_new[col_list_needed]
# convert categorical variables into dummy variables
df_dummy = create_dummy_df(df_needed, cat_cols_lst, False)
df_dummy = df_dummy.drop(cat_cols_lst, axis = 1)
# linear regression model
y = df_dummy.review_scores_rating
X = df_dummy.drop('review_scores_rating', axis = 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
lm_model = LinearRegression(normalize = True)
lm_model.fit(X_train, y_train)
#Predict using your model
y_test_preds = lm_model.predict(X_test)
y_train_preds = lm_model.predict(X_train)
#Score using your model
test_score = r2_score(y_test, y_test_preds)
train_score = r2_score(y_train, y_train_preds)
print('R2 of training data is {}'.format(train_score))
print('R2 of testing data is {}'.format(test_score))
###Output
R2 of training data is 0.10459122853704617
R2 of testing data is 0.04454226708445119
###Markdown
It seems the model is a little bit overfitting. Try ridge regression and see if it helps.
###Code
# try rdige regression
ridge_model = Ridge(alpha = 100)
ridge_model.fit(X_train, y_train)
y_test_ridge = ridge_model.predict(X_test)
y_train_ridge = ridge_model.predict(X_train)
#Score using your model
test_score2 = r2_score(y_test, y_test_ridge)
train_score2 = r2_score(y_train, y_train_ridge)
print('R2 of training data in ridge regression is {}'.format(train_score2))
print('R2 of testing data in ridge regression is {}'.format(test_score2))
###Output
R2 of training data in ridge regression is 0.0900991732534504
R2 of testing data in ridge regression is 0.06736978959022533
###Markdown
Ridge regression helps to improve the situation a bit. Since the trend of the impact of these variables reflected by the coeffients does not change too much, I will use the result from linear regression model. Statsmodels library provides a traditional regression method which returns the significance of the coeffients
###Code
# use the following two lines to take a look of the coeffients of two regression models
#ridge_model.coef_
#lm_model.coef_
# get the linear regression result summary from statsmodels OLS function
X_OLS = sm.add_constant(X_train)
mod = sm.OLS(y_train, X_OLS)
fii = mod.fit()
fii.summary2()
###Output
/Users/yeli/anaconda3/lib/python3.7/site-packages/numpy/core/fromnumeric.py:2389: FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.
return ptp(axis=axis, out=out, **kwargs)
###Markdown
Plot out group comparisonUse the same method in the following to plot the group comparison on any variable you are interested. In this notebook I only keep the code for variable that have an obvious review score difference.
###Code
# separate data into two groups
y_q3 = df_needed.review_scores_rating
df3_high = df_needed[y_q3 > np.percentile(y_q3,50)]
df3_low = df_needed[y_q3 < np.percentile(y_q3,50)]
# plot numeric results first
labels = ['price/person', 'superhost percentage']
y_price = [df3_high.price_per_person.mean(), df3_low.price_per_person.mean()]
y_superhost = [df3_high.host_is_superhost.mean()*100, df3_low.host_is_superhost.mean()*100]
high_value = [y_price[0], y_superhost[0]]
low_value = [y_price[1], y_superhost[1]]
high_value_round = [round(h) for h in high_value]
low_value_round = [round(r) for r in low_value]
x = np.arange(len(labels)) # the label locations
width = 0.2 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(x - width/2, high_value_round, width, label='High review score group')
rects2 = ax.bar(x + width/2, low_value_round, width, label='Low review score group')
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('Values')
ax.set_title('Group comparison')
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend(loc = 3)
ax.set_ylim(0,50)
def autolabel(rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
ax.annotate('{}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
#fig.tight_layout()
plt.savefig('Numeric_comparison.png', dpi = 100)
###Output
_____no_output_____
###Markdown
let's compare the home type, room type and cancellation policy difference between groups.
###Code
# function to make comparison plot
def plot_compare(labels, high_vect, low_vect, title):
fig, ax = plt.subplots()
x = np.arange(len(labels))
width = 0.3
high_bar = ax.bar(x - width/2, high_vect, width, label = 'High review score group')
low_bar = ax.bar(x + width/2, low_vect, width, label = 'Low review score group')
ax.set_ylabel('Counts')
ax.set_title(title)
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend(loc = 'best')
autolabel(high_bar)
autolabel(low_bar)
save_name = title + '.png'
plt.savefig(save_name, dpi = 100)
# plot room type comparison
# labels
labels_room = ['Entire home/apt','Private room','Shared room']
# value vectors for room_type in two groups
high_vect_room = [df3_high[df3_high.room_type == col].shape[0] for col in labels_room]
low_vect_room = [df3_low[df3_low.room_type == col].shape[0] for col in labels_room]
# plot
plot_compare(labels_room, high_vect_room, low_vect_room, 'Room type comparison')
# plot bed type comparison
# labels of bed_type
labels_bed = ['Real Bed','Futon','Pull-out sofa', 'Airbed', 'Couch']
# value vectors for bed_type in two groups
high_vect_bed = [df3_high[df3_high.bed_type == col].shape[0] for col in labels_bed]
low_vect_bed = [df3_low[df3_low.bed_type ==col].shape[0] for col in labels_bed]
plot_compare(labels_bed, high_vect_bed, low_vect_bed, 'Bed type comparison')
# labels for host_response_time
labels_response = ['within an hour', 'within a few hours', 'within a day', 'a few days or more']
high_response = [df3_high[df3_high.host_response_time == col].shape[0] for col in labels_response]
low_response = [df3_low[df3_low.host_response_time == col].shape[0] for col in labels_response]
plot_compare(labels_response, high_response, low_response, 'Response time comparison')
# cancellation policy
labels_cancel = ['strict', 'moderate', 'flexible']
high_cancel = [df3_high[df3_high.cancellation_policy == col].shape[0] for col in labels_cancel]
low_cancel = [df3_low[df3_low.cancellation_policy == col].shape[0] for col in labels_cancel]
plot_compare(labels_cancel, high_cancel, low_cancel, 'Cancellation policy comparison')
###Output
_____no_output_____ |
ai2/lectures/AI2_02_word_embeddings.ipynb | ###Markdown
CS4619: Artificial Intelligence IIWord Embeddings Derek Bridge School of Computer Science and Information Technology University College Cork Initialization$\newcommand{\Set}[1]{\{1\}}$ $\newcommand{\Tuple}[1]{\langle1\rangle}$ $\newcommand{\v}[1]{\pmb{1}}$ $\newcommand{\cv}[1]{\begin{bmatrix}1\end{bmatrix}}$ $\newcommand{\rv}[1]{[1]}$ $\DeclareMathOperator{\argmax}{arg\,max}$ $\DeclareMathOperator{\argmin}{arg\,min}$ $\DeclareMathOperator{\dist}{dist}$$\DeclareMathOperator{\abs}{abs}$
###Code
%load_ext autoreload
%autoreload 2
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.initializers import Constant
from tensorflow.keras import Input
from tensorflow.keras import Model
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Embedding
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.callbacks import EarlyStopping
# This OneHot layer comes from https://fdalvi.github.io/blog/2018-04-07-keras-sequential-onehot/
from tensorflow.keras.layers import Lambda
from tensorflow.keras import backend as K
def OneHot(input_dim=None, input_length=None):
# Check if inputs were supplied correctly
if input_dim is None or input_length is None:
raise TypeError("input_dim or input_length is not set")
# Helper method (not inlined for clarity)
def _one_hot(x, num_classes):
return K.one_hot(K.cast(x, 'uint8'),
num_classes=num_classes)
# Final layer representation as a Lambda layer
return Lambda(_one_hot,
arguments={'num_classes': input_dim},
input_shape=(input_length,))
###Output
_____no_output_____
###Markdown
Acknowledgements Part of the code comes from chapter 6 of: François Chollet: Deep Learning with Python, Manning Publications, 2018 Natural Language Processing In the previous lecture, we represented each document as a single vector (bag-of-words). This is OK for some applications, e.g. spam filtering. But for many applications of natural language processing (NLP), we may need to treat documents as sequences (lists) of words (and maybe of punctuation symbols also): Sentiment analysis, e.g. of movie reviews or tweets; Machine translation; Image captioning; Question-answering and chatbots. There are other applications where each example is a sequence of features too, e.g.: processing speech; processing genomic data; timeseries prediction; clickstream prediction Sequences of integers Now, each word will be given a unique integer (an index, e.g. "a" might be word number 1, "the" might be word number 2, and so on). It is common to restrict to a certain vocabulary, e.g. in the code below, we restrict to the most common 1000 words, so the indexes are from 1 to 1000. In real applications, this might be tens-of-thousands or even 100s-of-thosuands of the most common words. If someone uses words that are not within the vocabulary, then either these words are ignored or they are all treated as a special token UNK and hence are all assigned to the same unique integer (e.g. they are all word number 1000). A document will be a sequence of these integers. We may add special symbols to the start and end of the document, also given an index. If we have a batch of documents, we may prefer them all to be the same length (e.g. maxlen = 200 words). In which case, we will need to: truncate documents that are longer than 200 words; and pad documents that have fewer than 200 words using a separate index, e.g. 0. IMDB Reviews, Again Let's read in our small IMDB reviews dataset again and turn it into sequences of integers.
###Code
df = pd.read_csv("../datasets/dataset_5000_reviews.csv")
# Dataset size
m = len(df)
# We'll keep only the 1000 most common words in the reviews.
vocab_size = 1000
# We'll truncate/pad so that each review has 200 words
maxlen = 200
tokenizer = Tokenizer(num_words=vocab_size)
tokenizer.fit_on_texts(df["review"])
sequences = tokenizer.texts_to_sequences(df["review"])
padded_sequences = pad_sequences(sequences, maxlen=maxlen)
# Let's look at the first review
padded_sequences[0]
# Let's look at how the indexes relate to words
tokenizer.word_index
# Train/test split
split_point = int(m * 0.8)
dev_X = padded_sequences[:split_point]
test_X = padded_sequences[split_point:]
# Target values, encoded and converted to a 1D numpy array
label_encoder = LabelEncoder()
label_encoder.fit(df["sentiment"])
dev_y = label_encoder.transform(df["sentiment"][:split_point])
test_y = label_encoder.transform(df["sentiment"][split_point:])
###Output
_____no_output_____
###Markdown
One-Hot Encoding We probably should not use the indexes directly. Why not? So we could one-hot encode each word. IMDB In our IMDB example, each review will now be represented by a list (of length 200) of binary-valued vectors (where the dimenion of the vector is 1000. Why?) Converting from integer indexes to binary vectors can be done in many ways. We will do it using a layer in our network using some code given earlier. Then we will flatten the input into a single vector (maxlen * vocab_size) and then use a few dense layers.
###Code
inputs = Input(shape=(maxlen,))
x = OneHot(input_dim=vocab_size, input_length=maxlen)(inputs)
x = Flatten()(x)
x = Dense(32, activation="relu")(x)
outputs = Dense(1, activation="sigmoid")(x)
one_hot_model = Model(inputs, outputs)
one_hot_model.compile(optimizer=SGD(lr=0.001), loss="binary_crossentropy", metrics=["acc"])
one_hot_model.summary()
one_hot_history = one_hot_model.fit(dev_X, dev_y, epochs=10, batch_size=32, validation_split=0.25,
callbacks=[EarlyStopping(monitor="val_loss", patience=2)], verbose=0)
pd.DataFrame(one_hot_history.history).plot()
###Output
_____no_output_____
###Markdown
Not great results but not surprising: Small dataset One-hot encoding is a poor choice.
###Code
# An illustration of why one-hot encoding is not great.
def cosine_similarity(x, xprime):
# Assumes x and xprime are already normalized
# Converts from sparse matrices because np.dot does not work on them
return x.dot(xprime.T)
# Word indexes
print("like: ", tokenizer.word_index["like"] )
print("love: ", tokenizer.word_index["love"] )
print("hate: ", tokenizer.word_index["hate"] )
# One hot encodings
one_hot_like = np.zeros(vocab_size)
one_hot_like[ tokenizer.word_index["like"] ] = 1
one_hot_love = np.zeros(vocab_size)
one_hot_love[ tokenizer.word_index["love"] ] = 1
one_hot_hate = np.zeros(vocab_size)
one_hot_hate[ tokenizer.word_index["love"] ] = 1
# Similarities
print("like and love: ", one_hot_like.dot(one_hot_love) )
print("like and hate: ", one_hot_like.dot(one_hot_hate) )
###Output
like: 37
love: 120
hate: 818
like and love: 0.0
like and hate: 0.0
###Markdown
Word Embeddings One-hot encoding uses large, sparse vectors. Word embeddings are small, non-sparse vectors, e.g. the dimension might be 100 or 200. To illustrate the ideas, we will use vectors of size 2 (so we can draw 2D diagrams). Perhaps we will have the following word embeddings: Dog: $\langle 0.4, 0.3\rangle$ Hound: $\langle 0.38, 0.32\rangle$ Wolf: $\langle 0.4, 0.8\rangle$ Cat: $\langle 0.75, 0.2\rangle$ Tiger: $\langle 0.75, 0.7\rangle$ The word embeddings we choose should reflect semantic relationships between the words: Words with similar meanings should be close together (as with Dog and Hound) and in general the distance between embeddings should reflect how closely related the meanings are. Geometric transformations might encode semantic relationships, e.g.: Adding $\langle 0, 0.5\rangle$ to the word embedding for Dog gives us the word embedding for Wolf; adding the same vector to the embedding for Cat gives the embedding for Tiger; $\langle 0, 0.5\rangle$ is the "from pet to wild animal" transformation. Similarly $\langle 0.35, -0.1\rangle$ is the "from canine to feline" transformation. Why? There is a Google visiualization here: https://pair.withgoogle.com/explorables/fill-in-the-blank/ Learning word embeddings We learn the word embeddings from the dataset of documents. Conceptually, The values in the vectors are initialized randomly; Then they are adjusted during learning. But Keras does it using a network layer: During learning, the weights of the layer are adjusted. The activations of the units in the layer are the word embeddings. IMDB
###Code
# Throughout the code, we will use 100-dimensional word embeddings (including the pretrained GloVe embeddings later)
embedding_dimension = 100
inputs = Input(shape=(maxlen,))
embedding = Embedding(input_dim=vocab_size, input_length=maxlen, output_dim=embedding_dimension)(inputs)
x = Flatten()(embedding)
x = Dense(32, activation="relu")(x)
outputs = Dense(1, activation="sigmoid")(x)
embedding_model = Model(inputs, outputs)
embedding_model.compile(optimizer=SGD(lr=0.001), loss="binary_crossentropy", metrics=["acc"])
embedding_model.summary()
embedding_history = embedding_model.fit(dev_X, dev_y, epochs=10, batch_size=32, validation_split=0.25,
callbacks=[EarlyStopping(monitor="val_loss", patience=2)], verbose=0)
pd.DataFrame(embedding_history.history).plot()
###Output
_____no_output_____
###Markdown
Possibly worse in this case! Perhaps because so little training data. Pretrained Word Embeddings Above, we got our neural network to learn the word embeddings. Advantage: they are based on our IMDB data and therefore tailored to helping us to predict the sentiment of restaurant reviews. Disadvantage: the IMDB dataset (and especially the subset that we are using) is probably too small to learn really powerful word embeddings. To some extent, word embeddings are fairly generic, so it can make sense to reuse pretrained embeddings from very large datasets, as we did with image data. word2vec (https://code.google.com/archive/p/word2vec/): This is Google's famous algorithm for learning word embeddings. The URL contains code and also pretrained embeddings learned from news articles. GloVe (Global Vectors for Word Representation, https://nlp.stanford.edu/projects/glove/): This is a Stanford University algorithm. The URL has code and pretrained embeddings learned from Wikipedia. Although we'll use GloVe, let's briefly explain how Google learns its word2vec word embeddings: It takes a large body of text (e.g. Wikipedia) and builds a model (a two-layer neural network classifier) that predicts words. E.g. in what is known as CBOW (continuous bag-of-words), it predicts the current word from a window of surrounding words. Or e.g. in what is know as continuous skip-gram, it predicts the surrounding words from the current word. IMDB To run the code that follows, you need to download and unzip the file called glove.6B.zip (>800MB) from the URL above; save space by deleting all files except glove.6B.100d.txt (it's still >300MB) The code comes from Chollet's book — details are not important in CS4619
###Code
# Parse the GloVe word embeddings file: produces a dictionary from words to their vectors
path = "../datasets/glove.6B.100d.txt" # Edit this to point to your copy of the file
embeddings_index = {}
f = open(path)
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype="float32")
embeddings_index[word] = coefs
f.close()
# Create a matrix that associates the words that we obtained from the IMDB reviews earlier
# (in the word_index) with their GloVe word embeddings
embedding_matrix = np.zeros((vocab_size, embedding_dimension))
for word, i in tokenizer.word_index.items():
if i < vocab_size:
word_embedding = embeddings_index.get(word)
if word_embedding is not None:
embedding_matrix[i] = word_embedding
# Let's take a look at some of the embeddings
glove_like = embedding_matrix[ tokenizer.word_index["like"] ]
glove_love = embedding_matrix[ tokenizer.word_index["love"] ]
glove_hate = embedding_matrix[ tokenizer.word_index["hate"] ]
print("like: ", glove_like)
# Similarities
print("like and love: ", glove_like.dot(glove_love) )
print("like and hate: ", glove_like.dot(glove_hate) )
# A similar neural network to earlier but this time the embedding layer's weights come from GloVe and are
# not adjusted during training
inputs = Input(shape=(maxlen,))
x = Embedding(input_dim=vocab_size, input_length=maxlen, output_dim=embedding_dimension,
embeddings_initializer=Constant(embedding_matrix), trainable=False)(inputs)
x = Flatten()(x)
x = Dense(32, activation="relu")(x)
outputs = Dense(1, activation="sigmoid")(x)
pretrained_embedding_model = Model(inputs, outputs)
pretrained_embedding_model.compile(optimizer=SGD(lr=0.001), loss="binary_crossentropy", metrics=["acc"])
pretrained_history = pretrained_embedding_model.fit(dev_X, dev_y, epochs=10, batch_size=32, validation_split=0.25,
callbacks=[EarlyStopping(monitor="val_loss", patience=2)], verbose=0)
pd.DataFrame(pretrained_history.history).plot()
###Output
_____no_output_____ |
sampling/00 - Inverse transform sampling.ipynb | ###Markdown
Inverse Transform sampling Rationale**Inverse transform sampling** allows to transform samples from uniform distribution $U$ to any other distribution $D$, given the $CDF$ of $D$.How can we do it?Let's take$$\large T(U) = X$$where:* $U$ is a uniform random variable* $T$ is some kind of a transformation* $X$ is the target random variable (let's use **exponential** distribution as an example)Now, we said that to perform **inverse transformation sampling**, we need a $CDF$.By definition $CDF$ (we'll call it $F_X(x)$ here) is given by: $$\large F_X(x) \triangleq P(X \leq x)$$We said before that to get $X$, we'll apply certain transformation $T$ to a uniform random variable.We can then say, that:$$\large P(X \leq x) = P(T(U) \leq x)$$Now, let's apply an ibnverse of $T$ to the both sides of the inequality:$$\large = P(U \leq T^{-1}(x))$$Uniform distribution has a nice property that it's $CDF$ at any given point $x$ is equal to the value of $x$.Therefore, we can say that:$$\large = T^{-1}(x)$$and conclude that:$$\large F_X(x) = T^{-1}(x)$$ ConclusionWe demonstrated how to sample from any density $D$ using a sample from a uniform distribution and an inverse of $CDF$ od $D$. Now, let's apply it in practice! CodeLet's see how to apply this in Python. We'll use **exponential distribution** as an example.
###Code
# Define params
SAMPLE_SIZE = 100000
N_BINS = np.sqrt(SAMPLE_SIZE).astype('int') // 2
LAMBDA = 8
###Output
_____no_output_____
###Markdown
Let's instantiate distributions.We will instantiate an exponential distribution expicitly for comparison purposes.___________Note that **`scipy.stats`** has a slightly **different parametrization** of exponential than the populuar $\lambda$ parametrization. In the [documentation](https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.expon.html), we read:*A common parameterization for expon is in terms of the rate parameter lambda, such that pdf = lambda * exp(-lambda * x). This parameterization corresponds to using scale = 1 / lambda.*____________Therefore, we're going to use **`scale=1/LAMBDA`** to parametrize our test **exponential distribution**.
###Code
# Instantiate U(0, 1)
unif = stats.uniform(0, 1)
# Instantiate Exp(8) for comparison purposes
exp = stats.expon(loc=0, scale=1/LAMBDA)
###Output
_____no_output_____
###Markdown
Now, we need to define the inverse transformation $T^{-1}(x)$ that will allow us to translate between uniform and exponential samples.The $CDF$ of exponential distribution is defined as:$$\large\begin{equation} F_X(x) \triangleq \begin{cases} 1 - e^{-\lambda x} \ \text{ for }\ x \geq 0\\ 0 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \text{for }\ x<0 \\ \end{cases} \end{equation}$$Let's take the inverse of this function (solve for $x$):$$\large y = 1 - e^{-\lambda x}$$* subtract $1$ from both sides:$$\large 1 - y = - e^{-\lambda x}$$* take $ln$ of both sides:$$\large ln(1 - y) = \lambda x$$* divide both sides by $\lambda$:$$\large x = \frac{ln(1 - y)}{\lambda}$$**Et voilà!** 🎉🎉🎉 We've got it! 💪🏼Let's translate it to Python code:
###Code
# Define
def transform_to_exp(x, lmbd):
"""Transoforms a uniform sample into an exponential sample"""
return -np.log(1 - x) / lmbd
###Output
_____no_output_____
###Markdown
Take samples:
###Code
# Sample from uniform
sample_unif = unif.rvs(SAMPLE_SIZE)
# Sample from the true exponential
sample_exp = exp.rvs(SAMPLE_SIZE)
# Transform U -> Exp
sample_transform = transform_to_exp(sample_unif, LAMBDA)
###Output
_____no_output_____
###Markdown
A brief sanity check:
###Code
# Sanity check -> U(0, 1)
plt.hist(sample_unif, bins=N_BINS, density=True)
plt.title('Histogarm of $U(0, 1)$')
plt.ylabel('$p(x)$')
plt.xlabel('$x$')
plt.show()
###Output
_____no_output_____
###Markdown
..and let's compare the resutls:
###Code
plt.hist(sample_exp, bins=N_BINS, density=True, alpha=.5, label='Exponential')
plt.hist(sample_transform, bins=N_BINS, density=True, alpha=.5, label='$T(U)$')
plt.legend()
plt.title('Histogram of exponential and transformed distributions', fontsize=12)
plt.ylabel('$p(x)$')
plt.xlabel('$x$')
plt.show()
###Output
_____no_output_____ |
Improving Deep Neural Networks: Hyperparameter tuning, Regularization and Optimization/Regularization.ipynb | ###Markdown
RegularizationWelcome to the second assignment of this week. Deep Learning models have so much flexibility and capacity that **overfitting can be a serious problem**, if the training dataset is not big enough. Sure it does well on the training set, but the learned network **doesn't generalize to new examples** that it has never seen!**You will learn to:** Use regularization in your deep learning models.Let's first import the packages you are going to use.
###Code
# import packages
import numpy as np
import matplotlib.pyplot as plt
from reg_utils import sigmoid, relu, plot_decision_boundary, initialize_parameters, load_2D_dataset, predict_dec
from reg_utils import compute_cost, predict, forward_propagation, backward_propagation, update_parameters
import sklearn
import sklearn.datasets
import scipy.io
from testCases import *
%matplotlib inline
plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
###Output
_____no_output_____
###Markdown
**Problem Statement**: You have just been hired as an AI expert by the French Football Corporation. They would like you to recommend positions where France's goal keeper should kick the ball so that the French team's players can then hit it with their head. **Figure 1** : **Football field** The goal keeper kicks the ball in the air, the players of each team are fighting to hit the ball with their head They give you the following 2D dataset from France's past 10 games.
###Code
train_X, train_Y, test_X, test_Y = load_2D_dataset()
###Output
_____no_output_____
###Markdown
Each dot corresponds to a position on the football field where a football player has hit the ball with his/her head after the French goal keeper has shot the ball from the left side of the football field.- If the dot is blue, it means the French player managed to hit the ball with his/her head- If the dot is red, it means the other team's player hit the ball with their head**Your goal**: Use a deep learning model to find the positions on the field where the goalkeeper should kick the ball. **Analysis of the dataset**: This dataset is a little noisy, but it looks like a diagonal line separating the upper left half (blue) from the lower right half (red) would work well. You will first try a non-regularized model. Then you'll learn how to regularize it and decide which model you will choose to solve the French Football Corporation's problem. 1 - Non-regularized modelYou will use the following neural network (already implemented for you below). This model can be used:- in *regularization mode* -- by setting the `lambd` input to a non-zero value. We use "`lambd`" instead of "`lambda`" because "`lambda`" is a reserved keyword in Python. - in *dropout mode* -- by setting the `keep_prob` to a value less than oneYou will first try the model without any regularization. Then, you will implement:- *L2 regularization* -- functions: "`compute_cost_with_regularization()`" and "`backward_propagation_with_regularization()`"- *Dropout* -- functions: "`forward_propagation_with_dropout()`" and "`backward_propagation_with_dropout()`"In each part, you will run this model with the correct inputs so that it calls the functions you've implemented. Take a look at the code below to familiarize yourself with the model.
###Code
def model(X, Y, learning_rate = 0.3, num_iterations = 30000, print_cost = True, lambd = 0, keep_prob = 1):
"""
Implements a three-layer neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SIGMOID.
Arguments:
X -- input data, of shape (input size, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (output size, number of examples)
learning_rate -- learning rate of the optimization
num_iterations -- number of iterations of the optimization loop
print_cost -- If True, print the cost every 10000 iterations
lambd -- regularization hyperparameter, scalar
keep_prob - probability of keeping a neuron active during drop-out, scalar.
Returns:
parameters -- parameters learned by the model. They can then be used to predict.
"""
grads = {}
costs = [] # to keep track of the cost
m = X.shape[1] # number of examples
layers_dims = [X.shape[0], 20, 3, 1]
# Initialize parameters dictionary.
parameters = initialize_parameters(layers_dims)
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID.
if keep_prob == 1:
a3, cache = forward_propagation(X, parameters)
elif keep_prob < 1:
a3, cache = forward_propagation_with_dropout(X, parameters, keep_prob)
# Cost function
if lambd == 0:
cost = compute_cost(a3, Y)
else:
cost = compute_cost_with_regularization(a3, Y, parameters, lambd)
# Backward propagation.
assert(lambd==0 or keep_prob==1) # it is possible to use both L2 regularization and dropout,
# but this assignment will only explore one at a time
if lambd == 0 and keep_prob == 1:
grads = backward_propagation(X, Y, cache)
elif lambd != 0:
grads = backward_propagation_with_regularization(X, Y, cache, lambd)
elif keep_prob < 1:
grads = backward_propagation_with_dropout(X, Y, cache, keep_prob)
# Update parameters.
parameters = update_parameters(parameters, grads, learning_rate)
# Print the loss every 10000 iterations
if print_cost and i % 10000 == 0:
print("Cost after iteration {}: {}".format(i, cost))
if print_cost and i % 1000 == 0:
costs.append(cost)
# plot the cost
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (x1,000)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
###Output
_____no_output_____
###Markdown
Let's train the model without any regularization, and observe the accuracy on the train/test sets.
###Code
parameters = model(train_X, train_Y)
print ("On the training set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
###Output
Cost after iteration 0: 0.6557412523481002
Cost after iteration 10000: 0.16329987525724213
Cost after iteration 20000: 0.13851642423253263
###Markdown
The train accuracy is 94.8% while the test accuracy is 91.5%. This is the **baseline model** (you will observe the impact of regularization on this model). Run the following code to plot the decision boundary of your model.
###Code
plt.title("Model without regularization")
axes = plt.gca()
axes.set_xlim([-0.75,0.40])
axes.set_ylim([-0.75,0.65])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
###Output
_____no_output_____
###Markdown
The non-regularized model is obviously overfitting the training set. It is fitting the noisy points! Lets now look at two techniques to reduce overfitting. 2 - L2 RegularizationThe standard way to avoid overfitting is called **L2 regularization**. It consists of appropriately modifying your cost function, from:$$J = -\frac{1}{m} \sum\limits_{i = 1}^{m} \large{(}\small y^{(i)}\log\left(a^{[L](i)}\right) + (1-y^{(i)})\log\left(1- a^{[L](i)}\right) \large{)} \tag{1}$$To:$$J_{regularized} = \small \underbrace{-\frac{1}{m} \sum\limits_{i = 1}^{m} \large{(}\small y^{(i)}\log\left(a^{[L](i)}\right) + (1-y^{(i)})\log\left(1- a^{[L](i)}\right) \large{)} }_\text{cross-entropy cost} + \underbrace{\frac{1}{m} \frac{\lambda}{2} \sum\limits_l\sum\limits_k\sum\limits_j W_{k,j}^{[l]2} }_\text{L2 regularization cost} \tag{2}$$Let's modify your cost and observe the consequences.**Exercise**: Implement `compute_cost_with_regularization()` which computes the cost given by formula (2). To calculate $\sum\limits_k\sum\limits_j W_{k,j}^{[l]2}$ , use :```pythonnp.sum(np.square(Wl))```Note that you have to do this for $W^{[1]}$, $W^{[2]}$ and $W^{[3]}$, then sum the three terms and multiply by $ \frac{1}{m} \frac{\lambda}{2} $.
###Code
# GRADED FUNCTION: compute_cost_with_regularization
def compute_cost_with_regularization(A3, Y, parameters, lambd):
"""
Implement the cost function with L2 regularization. See formula (2) above.
Arguments:
A3 -- post-activation, output of forward propagation, of shape (output size, number of examples)
Y -- "true" labels vector, of shape (output size, number of examples)
parameters -- python dictionary containing parameters of the model
Returns:
cost - value of the regularized loss function (formula (2))
"""
m = Y.shape[1]
W1 = parameters["W1"]
W2 = parameters["W2"]
W3 = parameters["W3"]
cross_entropy_cost = compute_cost(A3, Y) # This gives you the cross-entropy part of the cost
### START CODE HERE ### (approx. 1 line)
L2_regularization_cost = (1/m) * (lambd/2) * ( np.sum(np.square(W1)) + np.sum(np.square(W2)) + np.sum(np.square(W3)) )
### END CODER HERE ###
cost = cross_entropy_cost + L2_regularization_cost
return cost
A3, Y_assess, parameters = compute_cost_with_regularization_test_case()
print("cost = " + str(compute_cost_with_regularization(A3, Y_assess, parameters, lambd = 0.1)))
###Output
cost = 1.78648594516
###Markdown
**Expected Output**: **cost** 1.78648594516 Of course, because you changed the cost, you have to change backward propagation as well! All the gradients have to be computed with respect to this new cost. **Exercise**: Implement the changes needed in backward propagation to take into account regularization. The changes only concern dW1, dW2 and dW3. For each, you have to add the regularization term's gradient ($\frac{d}{dW} ( \frac{1}{2}\frac{\lambda}{m} W^2) = \frac{\lambda}{m} W$).
###Code
# GRADED FUNCTION: backward_propagation_with_regularization
def backward_propagation_with_regularization(X, Y, cache, lambd):
"""
Implements the backward propagation of our baseline model to which we added an L2 regularization.
Arguments:
X -- input dataset, of shape (input size, number of examples)
Y -- "true" labels vector, of shape (output size, number of examples)
cache -- cache output from forward_propagation()
lambd -- regularization hyperparameter, scalar
Returns:
gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables
"""
m = X.shape[1]
(Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache
dZ3 = A3 - Y
### START CODE HERE ### (approx. 1 line)
dW3 = 1./m * np.dot(dZ3, A2.T) + ((lambd/m)*W3)
### END CODE HERE ###
db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True)
dA2 = np.dot(W3.T, dZ3)
dZ2 = np.multiply(dA2, np.int64(A2 > 0))
### START CODE HERE ### (approx. 1 line)
dW2 = 1./m * np.dot(dZ2, A1.T) + ((lambd/m)*W2)
### END CODE HERE ###
db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True)
dA1 = np.dot(W2.T, dZ2)
dZ1 = np.multiply(dA1, np.int64(A1 > 0))
### START CODE HERE ### (approx. 1 line)
dW1 = 1./m * np.dot(dZ1, X.T) + ((lambd/m)*W1)
### END CODE HERE ###
db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True)
gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,"dA2": dA2,
"dZ2": dZ2, "dW2": dW2, "db2": db2, "dA1": dA1,
"dZ1": dZ1, "dW1": dW1, "db1": db1}
return gradients
X_assess, Y_assess, cache = backward_propagation_with_regularization_test_case()
grads = backward_propagation_with_regularization(X_assess, Y_assess, cache, lambd = 0.7)
print ("dW1 = "+ str(grads["dW1"]))
print ("dW2 = "+ str(grads["dW2"]))
print ("dW3 = "+ str(grads["dW3"]))
###Output
dW1 = [[-0.25604646 0.12298827 -0.28297129]
[-0.17706303 0.34536094 -0.4410571 ]]
dW2 = [[ 0.79276486 0.85133918]
[-0.0957219 -0.01720463]
[-0.13100772 -0.03750433]]
dW3 = [[-1.77691347 -0.11832879 -0.09397446]]
###Markdown
**Expected Output**: **dW1** [[-0.25604646 0.12298827 -0.28297129] [-0.17706303 0.34536094 -0.4410571 ]] **dW2** [[ 0.79276486 0.85133918] [-0.0957219 -0.01720463] [-0.13100772 -0.03750433]] **dW3** [[-1.77691347 -0.11832879 -0.09397446]] Let's now run the model with L2 regularization $(\lambda = 0.7)$. The `model()` function will call: - `compute_cost_with_regularization` instead of `compute_cost`- `backward_propagation_with_regularization` instead of `backward_propagation`
###Code
parameters = model(train_X, train_Y, lambd = 0.7)
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
###Output
Cost after iteration 0: 0.6974484493131264
Cost after iteration 10000: 0.2684918873282239
Cost after iteration 20000: 0.26809163371273015
###Markdown
Congrats, the test set accuracy increased to 93%. You have saved the French football team!You are not overfitting the training data anymore. Let's plot the decision boundary.
###Code
plt.title("Model with L2-regularization")
axes = plt.gca()
axes.set_xlim([-0.75,0.40])
axes.set_ylim([-0.75,0.65])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
###Output
_____no_output_____
###Markdown
**Observations**:- The value of $\lambda$ is a hyperparameter that you can tune using a dev set.- L2 regularization makes your decision boundary smoother. If $\lambda$ is too large, it is also possible to "oversmooth", resulting in a model with high bias.**What is L2-regularization actually doing?**:L2-regularization relies on the assumption that a model with small weights is simpler than a model with large weights. Thus, by penalizing the square values of the weights in the cost function you drive all the weights to smaller values. It becomes too costly for the cost to have large weights! This leads to a smoother model in which the output changes more slowly as the input changes. **What you should remember** -- the implications of L2-regularization on:- The cost computation: - A regularization term is added to the cost- The backpropagation function: - There are extra terms in the gradients with respect to weight matrices- Weights end up smaller ("weight decay"): - Weights are pushed to smaller values. 3 - DropoutFinally, **dropout** is a widely used regularization technique that is specific to deep learning. **It randomly shuts down some neurons in each iteration.** Watch these two videos to see what this means!<!--To understand drop-out, consider this conversation with a friend:- Friend: "Why do you need all these neurons to train your network and classify images?". - You: "Because each neuron contains a weight and can learn specific features/details/shape of an image. The more neurons I have, the more featurse my model learns!"- Friend: "I see, but are you sure that your neurons are learning different features and not all the same features?"- You: "Good point... Neurons in the same layer actually don't talk to each other. It should be definitly possible that they learn the same image features/shapes/forms/details... which would be redundant. There should be a solution."!--> Figure 2 : Drop-out on the second hidden layer. At each iteration, you shut down (= set to zero) each neuron of a layer with probability $1 - keep\_prob$ or keep it with probability $keep\_prob$ (50% here). The dropped neurons don't contribute to the training in both the forward and backward propagations of the iteration. Figure 3 : Drop-out on the first and third hidden layers. $1^{st}$ layer: we shut down on average 40% of the neurons. $3^{rd}$ layer: we shut down on average 20% of the neurons. When you shut some neurons down, you actually modify your model. The idea behind drop-out is that at each iteration, you train a different model that uses only a subset of your neurons. With dropout, your neurons thus become less sensitive to the activation of one other specific neuron, because that other neuron might be shut down at any time. 3.1 - Forward propagation with dropout**Exercise**: Implement the forward propagation with dropout. You are using a 3 layer neural network, and will add dropout to the first and second hidden layers. We will not apply dropout to the input layer or output layer. **Instructions**:You would like to shut down some neurons in the first and second layers. To do that, you are going to carry out 4 Steps:1. In lecture, we dicussed creating a variable $d^{[1]}$ with the same shape as $a^{[1]}$ using `np.random.rand()` to randomly get numbers between 0 and 1. Here, you will use a vectorized implementation, so create a random matrix $D^{[1]} = [d^{[1](1)} d^{[1](2)} ... d^{[1](m)}] $ of the same dimension as $A^{[1]}$.2. Set each entry of $D^{[1]}$ to be 0 with probability (`1-keep_prob`) or 1 with probability (`keep_prob`), by thresholding values in $D^{[1]}$ appropriately. Hint: to set all the entries of a matrix X to 0 (if entry is less than 0.5) or 1 (if entry is more than 0.5) you would do: `X = (X > 0.5)`. Note that 0 and 1 are respectively equivalent to False and True.3. Set $A^{[1]}$ to $A^{[1]} * D^{[1]}$. (You are shutting down some neurons). You can think of $D^{[1]}$ as a mask, so that when it is multiplied with another matrix, it shuts down some of the values.4. Divide $A^{[1]}$ by `keep_prob`. By doing this you are assuring that the result of the cost will still have the same expected value as without drop-out. (This technique is also called inverted dropout.)
###Code
# GRADED FUNCTION: forward_propagation_with_dropout
def forward_propagation_with_dropout(X, parameters, keep_prob = 0.5):
"""
Implements the forward propagation: LINEAR -> RELU + DROPOUT -> LINEAR -> RELU + DROPOUT -> LINEAR -> SIGMOID.
Arguments:
X -- input dataset, of shape (2, number of examples)
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
W1 -- weight matrix of shape (20, 2)
b1 -- bias vector of shape (20, 1)
W2 -- weight matrix of shape (3, 20)
b2 -- bias vector of shape (3, 1)
W3 -- weight matrix of shape (1, 3)
b3 -- bias vector of shape (1, 1)
keep_prob - probability of keeping a neuron active during drop-out, scalar
Returns:
A3 -- last activation value, output of the forward propagation, of shape (1,1)
cache -- tuple, information stored for computing the backward propagation
"""
np.random.seed(1)
# retrieve parameters
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
W3 = parameters["W3"]
b3 = parameters["b3"]
# LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
Z1 = np.dot(W1, X) + b1
A1 = relu(Z1)
### START CODE HERE ### (approx. 4 lines) # Steps 1-4 below correspond to the Steps 1-4 described above.
D1 = np.random.rand(A1.shape[0], A1.shape[1]) # Step 1: initialize matrix D1 = np.random.rand(..., ...)
D1 = D1 < keep_prob # Step 2: convert entries of D1 to 0 or 1 (using keep_prob as the threshold)
A1 = A1 * D1 # Step 3: shut down some neurons of A1
A1 = A1 / keep_prob # Step 4: scale the value of neurons that haven't been shut down
### END CODE HERE ###
Z2 = np.dot(W2, A1) + b2
A2 = relu(Z2)
### START CODE HERE ### (approx. 4 lines)
D2 = np.random.rand(A2.shape[0], A2.shape[1]) # Step 1: initialize matrix D2 = np.random.rand(..., ...)
D2 = D2 < keep_prob # Step 2: convert entries of D2 to 0 or 1 (using keep_prob as the threshold)
A2 = A2 * D2 # Step 3: shut down some neurons of A2
A2 = A2 / keep_prob # Step 4: scale the value of neurons that haven't been shut down
### END CODE HERE ###
Z3 = np.dot(W3, A2) + b3
A3 = sigmoid(Z3)
cache = (Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3)
return A3, cache
X_assess, parameters = forward_propagation_with_dropout_test_case()
A3, cache = forward_propagation_with_dropout(X_assess, parameters, keep_prob = 0.7)
print ("A3 = " + str(A3))
###Output
A3 = [[ 0.36974721 0.00305176 0.04565099 0.49683389 0.36974721]]
###Markdown
**Expected Output**: **A3** [[ 0.36974721 0.00305176 0.04565099 0.49683389 0.36974721]] 3.2 - Backward propagation with dropout**Exercise**: Implement the backward propagation with dropout. As before, you are training a 3 layer network. Add dropout to the first and second hidden layers, using the masks $D^{[1]}$ and $D^{[2]}$ stored in the cache. **Instruction**:Backpropagation with dropout is actually quite easy. You will have to carry out 2 Steps:1. You had previously shut down some neurons during forward propagation, by applying a mask $D^{[1]}$ to `A1`. In backpropagation, you will have to shut down the same neurons, by reapplying the same mask $D^{[1]}$ to `dA1`. 2. During forward propagation, you had divided `A1` by `keep_prob`. In backpropagation, you'll therefore have to divide `dA1` by `keep_prob` again (the calculus interpretation is that if $A^{[1]}$ is scaled by `keep_prob`, then its derivative $dA^{[1]}$ is also scaled by the same `keep_prob`).
###Code
# GRADED FUNCTION: backward_propagation_with_dropout
def backward_propagation_with_dropout(X, Y, cache, keep_prob):
"""
Implements the backward propagation of our baseline model to which we added dropout.
Arguments:
X -- input dataset, of shape (2, number of examples)
Y -- "true" labels vector, of shape (output size, number of examples)
cache -- cache output from forward_propagation_with_dropout()
keep_prob - probability of keeping a neuron active during drop-out, scalar
Returns:
gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables
"""
m = X.shape[1]
(Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3) = cache
dZ3 = A3 - Y
dW3 = 1./m * np.dot(dZ3, A2.T)
db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True)
dA2 = np.dot(W3.T, dZ3)
### START CODE HERE ### (≈ 2 lines of code)
dA2 = dA2 * D2 # Step 1: Apply mask D2 to shut down the same neurons as during the forward propagation
dA2 = dA2 / keep_prob # Step 2: Scale the value of neurons that haven't been shut down
### END CODE HERE ###
dZ2 = np.multiply(dA2, np.int64(A2 > 0))
dW2 = 1./m * np.dot(dZ2, A1.T)
db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True)
dA1 = np.dot(W2.T, dZ2)
### START CODE HERE ### (≈ 2 lines of code)
dA1 = dA1 * D1 # Step 1: Apply mask D1 to shut down the same neurons as during the forward propagation
dA1 = dA1 / keep_prob # Step 2: Scale the value of neurons that haven't been shut down
### END CODE HERE ###
dZ1 = np.multiply(dA1, np.int64(A1 > 0))
dW1 = 1./m * np.dot(dZ1, X.T)
db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True)
gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,"dA2": dA2,
"dZ2": dZ2, "dW2": dW2, "db2": db2, "dA1": dA1,
"dZ1": dZ1, "dW1": dW1, "db1": db1}
return gradients
X_assess, Y_assess, cache = backward_propagation_with_dropout_test_case()
gradients = backward_propagation_with_dropout(X_assess, Y_assess, cache, keep_prob = 0.8)
print ("dA1 = " + str(gradients["dA1"]))
print ("dA2 = " + str(gradients["dA2"]))
###Output
dA1 = [[ 0.36544439 0. -0.00188233 0. -0.17408748]
[ 0.65515713 0. -0.00337459 0. -0. ]]
dA2 = [[ 0.58180856 0. -0.00299679 0. -0.27715731]
[ 0. 0.53159854 -0. 0.53159854 -0.34089673]
[ 0. 0. -0.00292733 0. -0. ]]
###Markdown
**Expected Output**: **dA1** [[ 0.36544439 0. -0.00188233 0. -0.17408748] [ 0.65515713 0. -0.00337459 0. -0. ]] **dA2** [[ 0.58180856 0. -0.00299679 0. -0.27715731] [ 0. 0.53159854 -0. 0.53159854 -0.34089673] [ 0. 0. -0.00292733 0. -0. ]] Let's now run the model with dropout (`keep_prob = 0.86`). It means at every iteration you shut down each neurons of layer 1 and 2 with 14% probability. The function `model()` will now call:- `forward_propagation_with_dropout` instead of `forward_propagation`.- `backward_propagation_with_dropout` instead of `backward_propagation`.
###Code
parameters = model(train_X, train_Y, keep_prob = 0.86, learning_rate = 0.3)
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
###Output
Cost after iteration 0: 0.6543912405149825
###Markdown
Dropout works great! The test accuracy has increased again (to 95%)! Your model is not overfitting the training set and does a great job on the test set. The French football team will be forever grateful to you! Run the code below to plot the decision boundary.
###Code
plt.title("Model with dropout")
axes = plt.gca()
axes.set_xlim([-0.75,0.40])
axes.set_ylim([-0.75,0.65])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
###Output
_____no_output_____ |
example/h3/h3demo.ipynb | ###Markdown
Match traj to hex
###Code
from fmm import H3MM,hexs2wkt
traj = "LINESTRING (18.024101257324215 59.337523121884225, 18.03852081298828 59.34391321930451, 18.042125701904297 59.35353986273416, 18.056459426879883 59.36080179623859, 18.065214157104492 59.34964577662557)"
hex_level = 9
interpolate = False
result = H3MM.match_wkt(traj, hex_level, interpolate)
print result.traj_id
print list(result.hexs)
print hexs2wkt(result.hexs)
###Output
0
[618737091877535743, 618737091842146303, 618737091929178111, 618737091479863295, 618737091474358271]
MULTIPOLYGON(((18.0228898614 59.3354770125,18.0215924168 59.3365311866,18.0216285645 59.3383172704,18.0229621748 59.3390492484,18.0242596721 59.3379950872,18.0242235064 59.3362089351,18.0228898614 59.3354770125)),((18.0375257823 59.3417433897,18.0362279618 59.3427977119,18.036264136 59.3445842599,18.0375981486 59.3453165539,18.0388960218 59.3442622446,18.0388598297 59.3424756284,18.0375257823 59.3417433897)),((18.0430068757 59.3518196531,18.0417088444 59.3528739237,18.0417449583 59.3546607271,18.0430791214 59.3553933281,18.0443772054 59.3543390705,18.0443410735 59.3525521988,18.0430068757 59.3518196531)),((18.057648911 59.3580921665,18.0563505038 59.3591465851,18.056386644 59.3609338529,18.0577212093 59.3616667704,18.0590196693 59.3606123647,18.0589835112 59.3588250286,18.057648911 59.3580921665)),((18.065330875 59.3464027176,18.0640324062 59.3474574216,18.0640686964 59.349244775,18.0654034733 59.3499774928,18.0667019948 59.3489228018,18.0666656867 59.3471353801,18.065330875 59.3464027176)))
###Markdown
Plot result
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib
from shapely import wkt
def plot_traj_hex(traj_geom, hex_geom, margin = 0.01):
fig,ax = plt.subplots(1,1,figsize=(6,4))
patches = []
tc = "C1"
hc = "C4"
x,y = traj_geom.xy
ax.plot(x,y,c=tc,marker="o",ms=6,lw=2,markeredgewidth=4, markeredgecolor=tc)
for geom in hex_geom.geoms:
x,y = geom.exterior.xy
ax.fill(x, y, fc = hc, ec="w",linewidth=2, alpha = 0.8)
ax.tick_params(axis='both',left=False, top=False, right=False, bottom=False,
labelleft=False, labeltop=False, labelright=False, labelbottom=False)
minx, miny, maxx, maxy = traj_geom.envelope.buffer(margin).bounds
ax.set_xlim(minx,maxx)
ax.set_ylim(miny,maxy)
# ax.set_aspect(1.0)
return fig,ax
level = 8
interpolate = False
result = H3MM.match_wkt(traj, level, interpolate)
traj_geom = wkt.loads(traj)
hex_geom = wkt.loads(hexs2wkt(result.hexs))
fig,ax = plot_traj_hex(traj_geom,hex_geom)
# plt.tight_layout()
ax.set_title("H{} Interpolate {}".format(level,interpolate),fontsize=16)
fig.savefig("h{}{}.png".format(level,("","i")[interpolate]),dpi=300,bbox_inches='tight',pad_inches=0)
level = 9
interpolate = True
result = H3MM.match_wkt(traj, level, interpolate)
traj_geom = wkt.loads(traj)
hex_geom = wkt.loads(hexs2wkt(result.hexs))
fig,ax = plot_traj_hex(traj_geom,hex_geom,margin=0.005)
ax.set_title("H{} Interpolate {}".format(level,interpolate),fontsize=16)
fig.savefig("h{}{}.png".format(level,("","i")[interpolate]),dpi=300,bbox_inches='tight',pad_inches=0)
level = 8
interpolate = True
result = H3MM.match_wkt(traj, level, interpolate)
traj_geom = wkt.loads(traj)
hex_geom = wkt.loads(hexs2wkt(result.hexs))
fig,ax = plot_traj_hex(traj_geom,hex_geom)
ax.set_title("H{} Interpolate {}".format(level,interpolate),fontsize=16)
fig.savefig("h{}{}.png".format(level,("","i")[interpolate]),dpi=300,bbox_inches='tight',pad_inches=0)
level = 7
interpolate = True
result = H3MM.match_wkt(traj, level, interpolate)
traj_geom = wkt.loads(traj)
hex_geom = wkt.loads(hexs2wkt(result.hexs))
fig,ax = plot_traj_hex(traj_geom,hex_geom,margin=0.03)
ax.set_title("H{} Interpolate {}".format(level,interpolate),fontsize=16)
fig.savefig("h{}{}.png".format(level,("","i")[interpolate]),dpi=300,bbox_inches='tight',pad_inches=0)
###Output
_____no_output_____
###Markdown
Plot as a whole
###Code
levels = [8, 8, 9, 7]
interpolates = [False, True, True, True]
fig,axes = plt.subplots(2,2,figsize=(8,6.1))
patches = []
tc = "C1"
hc = "C4"
for level,interpolate,ax in zip(levels,interpolates,axes.flatten()):
result = H3MM.match_wkt(traj, level, interpolate)
traj_geom = wkt.loads(traj)
hex_geom = wkt.loads(hexs2wkt(result.hexs))
x,y = traj_geom.xy
ax.plot(x,y,c=tc,marker="o",ms=5,lw=2,markeredgewidth=4)
for geom in hex_geom.geoms:
x,y = geom.exterior.xy
ax.fill(x, y, fc = hc, ec="w",linewidth=2, alpha = 0.8)
ax.tick_params(axis='both',left=False, top=False, right=False, bottom=False,
labelleft=False, labeltop=False, labelright=False, labelbottom=False)
ax.set_aspect(1.0)
ax.set_title("H{} {}".format(level,("","Interpolate")[interpolate]),position=(0.5, 0.9),fontsize=16)
minx, miny, maxx, maxy = traj_geom.envelope.buffer(0.015).bounds
yoffset = 0.003
ax.set_xlim(minx,maxx)
ax.set_ylim(miny+yoffset,maxy+yoffset)
plt.tight_layout()
plt.subplots_adjust(wspace=0, hspace=0)
fig.savefig("h3demo.png",dpi=300,bbox_inches='tight',pad_inches=0)
###Output
_____no_output_____ |
Data_science_Project.ipynb | ###Markdown
###Code
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
import string
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.metrics import accuracy_score
from sklearn.neural_network import MLPClassifier
%matplotlib inline
%config InlineBackend.figure_format='retina'
yelp_df = pd.read_csv('https://raw.githubusercontent.com/rabin1323/Data_Set/main/yelp_review_csv1.csv')
yelp_df
###Output
_____no_output_____
###Markdown
**EXPLORING DATASET**
###Code
yelp_df.info()
yelp_df.describe()
#to check if we have any null values
sns.heatmap(yelp_df.isnull(), yticklabels=False, cbar= False, cmap="Blues")
#if there is any dot on the plot that means we have null values
yelp_df=yelp_df.drop(['useful','funny','cool','review_id','user_id','business_id','date'], axis=1)
yelp_df
#data cleaning
def preprocess(review_text):
remove_pctn=[char for char in review_text if char not in string.punctuation]
remove_pctn= ''.join(remove_pctn)
lwr = [word.lower() for word in remove_pctn.split()]
final_word = [word for word in lwr if word not in stopwords.words('english')]
return final_word
###Output
_____no_output_____
###Markdown
For the sentiment analsysis we want to consider only two types of stars here i.e, one star for negative reviews and fives stars for positive reviews.We will also use count vectorizer to make a model which will be used to understand the review text. After that we will transform the vectorized text and assign to variable x. Lastly, we will split the entire data to train and test model using train_test_split()
###Code
#Filtering Data
filtered_data = yelp_df[(yelp_df['stars']==1) | (yelp_df['stars']==5)]
x = filtered_data['text'] #assigning review text to variable x
y=filtered_data['stars'] #assigning stars to variable y
vectorizer=CountVectorizer(analyzer=preprocess).fit(x)
x=vectorizer.transform(x) #transforming the vectorized text
X_train, X_test, y_train, y_test= train_test_split(x, y, random_state=42)
sns.countplot(filtered_data['stars'])
model= MLPClassifier()
model.fit(X_train, y_train)
y_predict = model.predict(X_test)
#plotting the reviews using confusion matrix
def conf_matrix(y, y_predict, reviews, title= 'Confusion_Matrix'):
c_matrix = confusion_matrix(y, y_predict)
clsfn_report = classification_report(y, y_predict)
ticks = np.arange(len(reviews))
score = accuracy_score(y, y_predict)
score=round(score*100,2)
print("Accuracy_score:", score)
print('classification_report', clsfn_report)
sns.heatmap(c_matrix, cmap= 'PuBu', annot= True, fmt='g', annot_kws={'size':20})
plt.xticks(ticks, reviews)
plt.yticks(ticks, reviews)
plt.xlabel('predicted', fontsize=20)
plt.ylabel('actual', fontsize=20)
plt.title(title, fontsize=20)
plt.show
conf_matrix(y_test, y_predict, reviews=['negative(1)', 'positive(5'])
###Output
Accuracy_score: 93.86
classification_report precision recall f1-score support
1 0.94 0.76 0.84 234
5 0.94 0.99 0.96 873
accuracy 0.94 1107
macro avg 0.94 0.87 0.90 1107
weighted avg 0.94 0.94 0.94 1107
|
TDDualControl.ipynb | ###Markdown
###Code
! git clone https://github.com/3ammor/Weights-Initializer-pytorch.git
import sys
sys.path
sys.path.append('/content/Weights-Initializer-pytorch')
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import PIL.Image
from weight_initializer import Initializer
class Dynamics:
def __init__(self, environment, g = 1., noise=0.05, lam=0.01, control=None):
self.environment = environment
self.g = g
self.noise = noise
self.xt = []
self.yt = []
self.control = control
self.lam = lam
self.cost = 0.
def compute_force(self, r, t):
upscaled_h_cells = torch.nn.functional.interpolate(environment.h_cells, scale_factor=environment.scale, mode="bilinear", align_corners=True)
_, Gx, Gy = environment.extrapolate(r[:,0], r[:,1], upscaled_h_cells,
activation=lambda x: x,
derivative=True,
d_activation=lambda x: x)
if self.control is not None:
Ux = environment.extrapolate(r[:,0], r[:,1], self.control[t][0],
activation=lambda x: x,
derivative=False,
std=.5,
normalized=True)
Uy = environment.extrapolate(r[:,0], r[:,1], self.control[t][1],
activation=lambda x: x,
derivative=False,
std=.5,
normalized=True)
control_force = torch.cat((Ux, Uy), 1)
else:
control_force = 0.
grad = torch.cat((Gx.unsqueeze(1), Gy.unsqueeze(1)), 1)
env_x_repulsion = 3*torch.sigmoid(-10*r[:,0]) - 3*torch.sigmoid(-10*(environment.resolution - r[:,0]))
env_y_repulsion = 3*torch.sigmoid(-10*r[:,1]) - 3*torch.sigmoid(-10*(environment.resolution - r[:,1]))
repulsion_force = torch.stack((env_x_repulsion, env_y_repulsion),1)
F = (self.g * grad + control_force + repulsion_force)
if self.control is not None:
control_cost = self.lam*torch.sum(control_force**2,1)
else:
control_cost = 0.
return F, control_cost
def compute_reward(self, r):
R = environment.extrapolate(r[:,0], r[:,1], environment.r,
activation=lambda x: x,
derivative=False,
std=.5,
normalized=True)
return torch.sum(R, 1)
def integrate(self, r, dt, N): #Midpoint integration
num_samples = self.environment.num_samples
for n in range(N):
F0, control_cost0 = self.compute_force(r, n)
F, control_cost = self.compute_force(r + 0.5*dt*F0, n)
r = r + (F * dt + torch.normal(0., self.noise, (self.environment.num_samples,2)) * dt**(1/2.))
self.xt += [r.detach().numpy()[:, 0]]
self.yt += [r.detach().numpy()[:, 1]]
self.cost += 0.5*(control_cost0 + control_cost)
self.cost += - 0.0001*self.compute_reward(r)
return r
def sample(self, dt, num_iter):
r0 = torch.empty(self.environment.num_samples, 2).uniform_(10, self.environment.resolution-10)
r = self.integrate(r0, dt, num_iter)
return r
def reset(self):
self.xt = []
self.yt = []
self.cost = 0.
def logit(x):
return np.log(x) + np.log(1 - x)
class GaussianEnvironment:
def __init__(self, resolution, std, num_samples, scale = 5):
if resolution % scale != 0:
raise(ValueError)("The resulition should have {} as a factor".format(scale))
latent_resolution = int(resolution/scale)
self.distribution_mean = torch.zeros([num_samples, 1, latent_resolution, latent_resolution])
self.distribution_std = torch.ones([num_samples, 1, latent_resolution, latent_resolution])
self.r_distribution_logits = logit(3./(latent_resolution*latent_resolution))*torch.ones([num_samples, latent_resolution*latent_resolution])
self.num_samples = num_samples
self.resolution = resolution
self.latent_resolution = latent_resolution
self.scale = scale
self.std = std
self.environment_hardness = 0.01
self.reward_hardness = 0.01
self.is_generated = False
self.colors = [(torch.tensor([0., 0., 1.]).unsqueeze(1).unsqueeze(2).expand(1,3,resolution, resolution), 0, 0.2),
(torch.tensor([0., 1., 0.]).unsqueeze(1).unsqueeze(2).expand(1,3,resolution, resolution), 0.2, 0.4),
(torch.tensor([0.58824, 0.29412, 0]).unsqueeze(1).unsqueeze(2).expand(1,3,resolution, resolution), 0.4, 0.6),
(torch.tensor([0.5, 0.5, 0.5]).unsqueeze(1).unsqueeze(2).expand(1,3,resolution, resolution), 0.6, 0.8),
(torch.tensor([1., 1., 1.]).unsqueeze(1).unsqueeze(2).expand(1,3,resolution, resolution), 0.6, 0.8)]
self.halfsize = None
self.kernel = None
self.set_kernel()
self.c = None
self.h = None
self.dxh = None
self.dyx = None
def visibility_map(self, x0, y0, v0, k0):
arange = torch.arange(0., self.resolution).float()
x, y = torch.meshgrid([arange, arange])
h = self.extrapolate(x0, y0, self.h,
activation=lambda x: x,
derivative=False,
normalized=True)
x0 = x0.unsqueeze(1).unsqueeze(2).unsqueeze(3).expand(self.num_samples,1,self.resolution,self.resolution)
y0 = y0.unsqueeze(1).unsqueeze(2).unsqueeze(3).expand(self.num_samples,1,self.resolution,self.resolution)
h = h.unsqueeze(2).unsqueeze(3).expand(self.num_samples,1,self.resolution,self.resolution)
d_map = torch.sqrt(((x0 - x) ** 2 + (y0 - y) ** 2))
visibility_mask = 1./(1. + F.relu(d_map - h*k0 + 1)**2)
hard_mask = 1. - torch.sigmoid(10000*(d_map - h*k0 + 1))
likelihood_variance = v0 + F.relu(d_map - h*k0 + 1)**3
return likelihood_variance, visibility_mask, hard_mask
def env_bayesian_update(self, inference_net, x0, y0, v0 = 0.00001, k0 = 30., data=None):
prior_mean = self.distribution_mean
prior_std = self.distribution_std
likelihood_variance, visibility_mask, hard_mask = self.visibility_map(x0, y0, v0, k0)
if data is None:
mean = self.h
distribution = torch.distributions.normal.Normal(mean,torch.sqrt(likelihood_variance))
sample = distribution.rsample()
data = hard_mask*sample
posterior_mean, posterior_var = inference_net.get_posterior_parameters(data, likelihood_variance, prior_mean, prior_std)
self.distribution_mean = posterior_mean
self.distribution_std = torch.sqrt(posterior_var)
variational_loss1 = inference_net.neg_ELBO_loss(data, prior_mean, prior_std, self, likelihood_variance,hard_mask)
latent = self.h_cells
variational_loss2 = inference_net.FAVI_loss(data, latent, prior_mean, prior_std, likelihood_variance)
return variational_loss1 + variational_loss2
def rew_bayesian_update(self, inference_net, x0, y0, v0 = 0.001, k0 = 20., data=None):
prior_logits = self.r_distribution_logits
likelihood_variance, visibility_mask, hard_mask = self.visibility_map(x0, y0, v0, k0)
if data is None:
mean = self.r
distribution = torch.distributions.normal.Normal(mean,torch.sqrt(likelihood_variance))
sample = distribution.rsample()
data = hard_mask*sample
posterior_logits = inference_net.get_posterior_parameters(data, likelihood_variance, prior_logits, hard_mask)
self.r_distribution_logits = posterior_logits
#variational_loss = inference_net.neg_ELBO_loss(data, prior_logits, self, likelihood_variance)
latent = self.r_cells
variational_loss = inference_net.FAVI_loss(data, latent, prior_logits, likelihood_variance, hard_mask)
return variational_loss
def dsigmoidd(self, x):
sigmoid = torch.sigmoid(x);
return sigmoid * (1 - sigmoid)
def get_statistics(self):
return self.distribution_mean, self.distribution_std, self.r_distribution_logits
def filter_environment(self, cells):
upscaled_cells = torch.nn.functional.interpolate(cells, scale_factor=self.scale, mode="bilinear", align_corners=True)
pre_map = torch.nn.functional.conv2d(upscaled_cells,
self.kernel.unsqueeze(0).unsqueeze(0), padding = self.halfsize)
env_map = torch.sigmoid(self.environment_hardness * pre_map)
dxh = torch.nn.functional.conv2d(upscaled_cells,
self.dxkernel.unsqueeze(0).unsqueeze(0), padding = self.halfsize)
dyh = torch.nn.functional.conv2d(upscaled_cells,
self.dykernel.unsqueeze(0).unsqueeze(0), padding = self.halfsize)
dxh = dxh * self.environment_hardness * self.dsigmoidd(self.environment_hardness * pre_map)
dyh = dyh * self.environment_hardness * self.dsigmoidd(self.environment_hardness * pre_map)
return env_map, dxh, dyh
def filter_reward(self, r_cells):
upscaled_r_cells = torch.nn.functional.interpolate(r_cells.view((self.num_samples,1,self.latent_resolution, self.latent_resolution)),
scale_factor=self.scale, mode="bilinear", align_corners=True)
reward = (0.1/3)*torch.nn.functional.conv2d(upscaled_r_cells,
self.kernel.unsqueeze(0).unsqueeze(0), padding = self.halfsize)
return reward
def generate(self):
mean = self.distribution_mean
std = self.distribution_std
distribution = torch.distributions.normal.Normal(mean,
std)
cells = distribution.rsample()
r_logits = self.r_distribution_logits
r_distribution = torch.distributions.bernoulli.Bernoulli(logits=r_logits)
r_cells = r_distribution.sample()
env_map, dxh, dyh = self.filter_environment(cells)
reward = self.filter_reward(r_cells)
self.c = self.paint(env_map)
self.h_cells = cells
self.h = env_map
self.r = reward
self.r_cells = r_cells
self.dxh = dxh
self.dyh = dyh
self.is_generated = True
def set_kernel(self):
self.halfsize = 4*int(np.ceil(2 * self.std))
arange = torch.arange(-self.halfsize, self.halfsize + 1).float()
x, y = torch.meshgrid([arange, arange])
self.kernel = torch.exp(-(x ** 2 + y ** 2) / (2 * self.std ** 2))
self.dxkernel = -self.kernel.detach() * x / self.std **2
self.dykernel = -self.kernel.detach() * y / self.std **2
def extrapolate(self, x0, y0, image, activation, derivative=False, d_activation = None, std=None, normalized=False):
if std is None: #
std = self.std
arange = torch.arange(0., self.resolution).float()
x, y = torch.meshgrid([arange, arange])
x = x.unsqueeze(0).unsqueeze(0).expand(self.num_samples,1,self.resolution,self.resolution)
y = y.unsqueeze(0).unsqueeze(0).expand(self.num_samples,1,self.resolution,self.resolution)
x0 = x0.unsqueeze(1).unsqueeze(2).unsqueeze(3).expand(self.num_samples,1,self.resolution,self.resolution)
y0 = y0.unsqueeze(1).unsqueeze(2).unsqueeze(3).expand(self.num_samples,1,self.resolution,self.resolution)
weights = torch.exp(-((x0 - x) ** 2 + (y0 - y) ** 2) / (2 * std ** 2))
if derivative:
dx_weights = -(x - x0)*weights / self.std **2
dy_weights = -(y - y0)*weights / self.std **2
if normalized:
weights = weights/torch.sum(weights, (1,2,3), keepdim=True).expand(self.num_samples,1,self.resolution,self.resolution)
extr = torch.sum(image * weights, (1,2,3))
if derivative:
dx_extr = d_activation(extr)*torch.sum(image * dx_weights, (1,2,3))
dy_extr = d_activation(extr)*torch.sum(image * dy_weights, (1,2,3))
return activation(extr), dx_extr, dy_extr
else:
extr = activation(torch.sum(image * weights, (2,3)))
return activation(extr)
def soft_indicator(self, lower, upper, soft):
indicator = lambda height: torch.sigmoid(soft * (height - lower)) * (1 - torch.sigmoid(soft * (height - upper)))
return indicator
def paint(self, x):
return sum([color.expand(self.num_samples,3,self.resolution,self.resolution) * self.soft_indicator(lower, upper, 10.)(x) for color, lower, upper in self.colors])
class HJB(torch.nn.Module):
def __init__(self, image_size, x_force, y_force, noise_map, reward, lam, dt, intermediate_reward=False):
super(HJB, self).__init__()
self.image_size = image_size
self.x_force = x_force
self.y_force = y_force
self.noise_map = noise_map
self.reward = reward
self.lam = lam
self.dt = dt
self.kx, self.ky, self.k_laplace = self._get_derivative_filters()
self.intermediate_reward = intermediate_reward
#self.kx_minus, self.kx_plus, self.ky_minus, self.ky_plus = self._get_derivative_filters()
def _get_derivative_filters(self): #Upwind method
ky = torch.tensor([[1., 2. , 1.], [0., 0., 0.], [-1., -2. , -1.]])/4.
ky = ky.expand(1,1,3,3)
kx = torch.transpose(ky, 3, 2)
k_laplace = torch.tensor([[1., 1. , 1.], [1., -8. , 1.], [1., 1. , 1.]])
k_laplace = k_laplace.expand(1,1,3,3)
return kx, ky, k_laplace
def backward_update(self, V, control=False):
Vpad = torch.nn.functional.pad(V, (1,1,1,1), "reflect")
dVx = torch.nn.functional.conv2d(Vpad, self.kx, padding = 0)
dVy = torch.nn.functional.conv2d(Vpad, self.ky, padding = 0)
LV = torch.nn.functional.conv2d(Vpad, self.k_laplace, padding = 0)
if self.intermediate_reward:
r = self.reward
else:
r = 0.
update = (-r - dVx**2/(2*self.lam) - dVy**2/(2*self.lam) + self.x_force * dVx + self.y_force * dVy + self.noise_map**2*LV)
if control:
Ux = -(1/self.lam)*dVx
Uy = -(1/self.lam)*dVy
return update, Ux, Uy
else:
return update
def backward_step(self, V):
update, Ux, Uy = self.backward_update(V, control=True)
Vprev = V + self.dt*update
return Vprev, Ux, Uy
def RK_backward_step(self, V):
k1, Ux, Uy = self.backward_update(V, control=True)
k1 *= self.dt
k2 = self.dt*self.backward_update(V + k1/2)
k3 = self.dt*self.backward_update(V + k2/2)
k4 = self.dt*self.backward_update(V + k3)
return V + (k1 + 2*k2 + 2*k3 + k4)/6., Ux, Uy
def compute_value(self, N, RK = False, plot=False):
Vn = -self.reward
V_list = [-Vn]
U_list = [None]
for n in reversed(range(N)):
if n % 20 == 0:
if plot:
x,y = (np.arange(0, resolution), np.arange(0, resolution))
plt.imshow(Vn[0,:,:,:].detach().numpy().squeeze(), extent = [0, resolution, 0, resolution], origin="lower")
plt.quiver(x, y, environment.dyh[0,:,:,:].detach().numpy().squeeze(), environment.dxh[0,:,:,:].detach().numpy().squeeze())
#plt.quiver(x, y, Ux[0,:,:,:].detach().numpy().squeeze(), Uy[0,:,:,:].numpy().squeeze(), color="red")
fig = plt.gcf()
fig.set_size_inches(18.5, 18.5)
plt.show()
if not RK:
Vn, Ux, Uy = self.backward_step(Vn)
else:
Vn, Ux, Uy = self.RK_backward_step(Vn)
V_list.append(-Vn)
U_list.append((-Uy, -Ux)) #TODO: flipped/sign flipped
return list(reversed(V_list)), list(reversed(U_list))
class EnvInferenceNet(nn.Module):
def __init__(self, gain, h_size=30, k_size=3, var_k_size=3, latent_resolution = 8, scale_factor = 5):
super(EnvInferenceNet, self).__init__()
self.conv_in = nn.Conv2d(h_size, h_size, k_size, padding=0) #Input: h_mean, h_std, r_mean, r_std times h_size
self.out = nn.Linear(latent_resolution*latent_resolution*h_size*scale_factor**2, latent_resolution*latent_resolution)
self.var_l1 = nn.Conv2d(1, h_size, 1, padding=0, bias=False)
self.var_out = nn.Conv2d(h_size, 1 , 1, padding=0, bias=False)
# Parameters
self.h_size = h_size
self.k_size = k_size
self.k_pad = int((k_size - 1)/2)
self.var_k_pad = int((var_k_size - 1)/2)
self.latent_resolution = latent_resolution
self.scale_factor = scale_factor
self.gain = gain
def forward(self, data, likelihood_var):
activation = lambda x: torch.relu(x)
b_size = data.shape[0]
x = data.repeat(1,self.h_size,1,1)
x_pad = torch.nn.functional.pad(x, (self.k_pad,self.k_pad,self.k_pad,self.k_pad), "reflect")
h = activation(self.conv_in(x_pad)).view(b_size, self.h_size*self.latent_resolution*self.latent_resolution*self.scale_factor**2)
latent_data = self.out(h)
latent_data = latent_data.view(b_size,1,self.latent_resolution,self.latent_resolution)
x = F.interpolate(likelihood_var, scale_factor=1/self.scale_factor, mode="bilinear", align_corners=True)
x = activation(self.var_l1(x))
x = self.var_out(x)**2
return self.gain*latent_data, x
def get_posterior_parameters(self, data, likelihood_var, prior_mean, prior_std):
latent_data, latent_variance = self(data, likelihood_var)
posterior_var = 1/(1/prior_std**2 + 1/latent_variance)
posterior_mean = (prior_mean/prior_std**2 + latent_data/latent_variance)*posterior_var
return posterior_mean, posterior_var
def neg_ELBO_loss(self, data, prior_mean, prior_std, environment, lk_variance, mask):
prior_distribution = torch.distributions.normal.Normal(prior_mean, prior_std)
posterior_mean, posterior_var = self.get_posterior_parameters(data, lk_variance, prior_mean, prior_std)
post_distribution = torch.distributions.normal.Normal(posterior_mean,torch.sqrt(posterior_var))
posterior_sample = post_distribution.rsample()
lik_filter = lambda x: environment.filter_environment(x)[0]
avg_log_lik = torch.mean(-0.5*mask*(data - lik_filter(posterior_sample))**2/lk_variance)
KL_regularization = torch.distributions.kl.kl_divergence(post_distribution, prior_distribution)
return torch.mean(-avg_log_lik + KL_regularization)
def FAVI_loss(self, data, latent, prior_mean, prior_std, lk_variance):
posterior_mean, posterior_var = self.get_posterior_parameters(data, lk_variance, prior_mean, prior_std)
loss = torch.mean(0.5*(latent - posterior_mean)**2/posterior_var + 0.5*torch.log(2*np.pi*posterior_var))
return loss
class RewInferenceNet(nn.Module):
def __init__(self, gain, h_size=60, k_size=5, latent_resolution = 8, scale_factor = 5):
super(RewInferenceNet, self).__init__()
self.l = nn.Linear(latent_resolution*latent_resolution*scale_factor**2, latent_resolution*latent_resolution)
#self.var_l1 = nn.Conv2d(1, h_size, 1, padding=0, bias=False)
#self.var_out = nn.Conv2d(h_size, 1 , 1, padding=0, bias=False)
# Parameters
self.h_size = h_size
self.k_size = k_size
self.k_pad = int((k_size - 1)/2)
self.latent_resolution = latent_resolution
self.scale_factor = scale_factor
self.gain = gain
def forward(self, data, likelihood_var, hard_mask):
b_size = data.shape[0]
mask = F.interpolate(hard_mask, scale_factor=1/self.scale_factor, mode="bilinear", align_corners=True).view((b_size,self.latent_resolution*self.latent_resolution))
x = mask*(0.1*F.softplus(self.l(data.view(b_size, self.latent_resolution*self.latent_resolution*self.scale_factor**2))) - 2.)
return x
def get_posterior_parameters(self, data, likelihood_var, prior_logits, hard_mask):
latent_logits = self(data, likelihood_var, hard_mask)
posterior_logits = prior_logits + latent_logits
return posterior_logits
def neg_ELBO_loss(self, data, prior_logits, environment, lk_variance):
prior_distribution = torch.distributions.categorical.Categorical(logits=prior_logits)
posterior_logits = self.get_posterior_parameters(data, lk_variance, prior_logits)
post_distribution = torch.distributions.categorical.Categorical(logits=posterior_logits)
enumeration = post_distribution.enumerate_support(expand=False)
log_probs = post_distribution.log_prob(enumeration).transpose(1,0)
probs = torch.exp(log_probs).unsqueeze(2).unsqueeze(3)
log_lk = torch.sum(-0.5*(data - environment.filter_reward(enumeration[:,0]).transpose(1,0))**2/lk_variance, (2,3))
avg_log_lik = torch.mean(probs*log_lk.detach())
#
KL_regularization = torch.distributions.kl.kl_divergence(post_distribution, prior_distribution)
return torch.mean(-avg_log_lik + KL_regularization)
def FAVI_loss(self, data, latent, prior_logits, lk_variance, hard_mask):
b_size = data.shape[0]
weights = F.interpolate(hard_mask, scale_factor=1/self.scale_factor, mode="bilinear", align_corners=True).view((b_size,self.latent_resolution*self.latent_resolution))
loss_fn = torch.nn.BCEWithLogitsLoss(weight=weights.detach())
posterior_logits = self.get_posterior_parameters(data, lk_variance, prior_logits, hard_mask)
loss = loss_fn(posterior_logits, latent.detach())
if False and iteration % 10 == 0:
plot_map(data)
mean_r_cells = torch.sigmoid(torch.nn.functional.interpolate(posterior_logits.view((b_size,1,self.latent_resolution, self.latent_resolution)),
scale_factor=self.scale_factor, mode="bilinear", align_corners=True))
r_mean = (0.1/3)*torch.nn.functional.conv2d(mean_r_cells,
environment.kernel.unsqueeze(0).unsqueeze(0), padding = environment.halfsize)
r_var = r_mean*(1 - r_mean)
plot_map(r_mean)
plot_map(r_var)
return loss
# Policy network TODO: Work in progress
class ValueNet(nn.Module):
def __init__(self, environment, smoothing_std=2, h_size=40, k_size=1):
super(ValueNet, self).__init__()
self.conv_in = nn.Conv2d(3, h_size, k_size, padding=0, bias=False)
self.out = nn.Linear(environment.latent_resolution*environment.latent_resolution*h_size,
environment.latent_resolution*environment.latent_resolution, bias=False)
self.conv_out = nn.Conv2d(h_size, 1, k_size, padding=0, bias=False)
# Smoothing layer
self.smoothing_std = smoothing_std
# Parameters
self.h_size = h_size
self.k_size = k_size
self.k_pad = int((k_size - 1)/2)
self.halfsize = 4*int(np.ceil(2 * smoothing_std))
arange = torch.arange(-self.halfsize, self.halfsize + 1).float()
x, y = torch.meshgrid([arange, arange])
self.smoothing_ker = torch.exp(-(x ** 2 + y ** 2) / (2 * smoothing_std ** 2))
self.environment = environment
self.V_trace = None
def forward(self, h_mean, h_std, r_logits, N, g, dt, exploit=False):
activation = lambda x: F.softplus(x)
predicted_reward = 0.1*environment.filter_reward(torch.sigmoid(r_logits))
x = torch.cat((h_mean,
h_std,
r_logits.view((environment.num_samples,
1,
environment.latent_resolution,
environment.latent_resolution))),
1)
x = activation(self.conv_in(x))
y = x.view((environment.num_samples, self.h_size*environment.latent_resolution**2))
z = self.conv_out(x)
x = z #+ 0.1*self.out(y).view((environment.num_samples, 1, environment.latent_resolution, environment.latent_resolution))
x = F.interpolate(x, scale_factor=environment.scale,
mode="bilinear", align_corners=True)
x_pad = torch.nn.functional.pad(x, (self.halfsize,self.halfsize,self.halfsize,self.halfsize), "reflect")
output = torch.nn.functional.conv2d(x_pad.view(environment.num_samples,
1,
x_pad.shape[2],
x_pad.shape[3]), self.smoothing_ker.unsqueeze(0).unsqueeze(0)).view(environment.num_samples,
1,
environment.resolution,
environment.resolution)
value = 0.01*F.softplus(output)
if exploit is False:
hjb_input = value
else:
hjb_input = predicted_reward
hjb = HJB(image_size=environment.resolution,
x_force= -g*environment.dyh, #TODO: this should be changed
y_force= -g*environment.dxh, #TODO: this should be changed
noise_map= 0.25, #TODO: this should be changed
reward=hjb_input,
lam= 0.02,
dt=dt)
_, Ulist = hjb.compute_value(N, RK=True)
return value, Ulist
def TDloss(self, reward, value, future_value, kernel, gamma=0.9):
reward = reward.unsqueeze(1).unsqueeze(2).unsqueeze(3)
future_value = future_value.unsqueeze(2).unsqueeze(3)
#
TD_target = (reward + gamma*future_value).detach()
loss = torch.mean(kernel*(value - TD_target)**2)
return loss
def TD_lambda_loss(self, reward, value, future_value, kernel, step, gamma=0.95, lam = 0.2):
if step == 0:
self.V_trace = 0.
else:
self.V_trace = gamma*lam*self.V_trace + value
future_value = future_value.unsqueeze(2).unsqueeze(3)
loss = torch.mean(kernel*(reward + gamma*future_value.detach() - value).detach()*V_trace)
return loss
def plot_trajectories(Ulist, environment, dynamics, value):
x0_range = [20., 40.]
y0_range = [20., 40.]
_, _, r_logits = environment.get_statistics()
r_map = environment.filter_reward(torch.sigmoid(r_logits))
x,y = (np.arange(0, environment.resolution), np.arange(0, environment.resolution))
#plt.imshow(environment.h[0,0,:,:].detach().numpy().squeeze(), extent = [0, environment.resolution, 0, environment.resolution], origin="lower")
#plt.contour(x, y, environment.h[0,0,:,:].detach().numpy().squeeze(), colors='red')
plt.imshow(r_map[0,0,:,:].detach().numpy().squeeze(), extent = [0, environment.resolution, 0, environment.resolution], origin="lower")
plt.quiver(x, y, environment.dyh[0,0,:,:].detach().numpy().squeeze(), environment.dxh[0,:,:,:].detach().numpy().squeeze())
plt.plot(np.array(dynamics.yt)[:,0], np.array(dynamics.xt)[:,0], linewidth=4, color = "red")
plt.plot(np.array(dynamics.yt)[0,0], np.array(dynamics.xt)[0,0], "xb")
plt.colorbar()
fig = plt.gcf()
fig.set_size_inches(18.5, 18.5)
plt.show()
def plot_map(mp, norm=False, lim=1.):
x0_range = [20., 40.]
y0_range = [20., 40.]
x,y = (np.arange(0, environment.resolution), np.arange(0, environment.resolution))
plt.imshow(mp[0,0,:,:].detach().numpy().squeeze(), extent = [0, environment.resolution, 0, environment.resolution], origin="lower")
plt.colorbar()
fig = plt.gcf()
fig.set_size_inches(18.5, 18.5)
#plt.clim(0,1.)
if norm:
plt.clim(0,1.)
plt.show()
import pickle
def save_network(net, name):
pickle.dump(net, open( "{}.p".format(name), "wb" ))
def load_network(name):
return pickle.load( open( "{}.p".format(name), "rb" ) )
# Train
N_iters = 2000
RL_batch_size = 3
VI_batch_size = 20
N_steps = 5
N_intergration_steps = 400 #200
N_VI_iterations = 400
resolution = 40 #40
scale = 8 #5
std = 7.5
g = 0.0005 #0.005
noise = 0.3
dt = 0.1
environment = GaussianEnvironment(resolution=resolution, std=std, num_samples=VI_batch_size, scale=scale)
net = ValueNet(environment) #TODO: Multiple networks
#Initializer.initialize(model=net, initialization=nn.init.xavier_uniform, gain=nn.init.calculate_gain('relu'))
optimizer = optim.Adam(net.parameters(), lr=0.00001)
env_inference_net = EnvInferenceNet(gain=1., scale_factor = scale, latent_resolution = int(resolution/scale)) #TODO: Multiple networks
#Initializer.initialize(model=env_inference_net, initialization=nn.init.xavier_uniform, gain=nn.init.calculate_gain('relu'))
env_VI_optimizer = optim.Adam(env_inference_net.parameters(), lr=0.00001)
reward_inference_net = RewInferenceNet(gain=1., scale_factor = scale, latent_resolution = int(resolution/scale)) #TODO: Multiple networks
#Initializer.initialize(model=reward_inference_net, initialization=nn.init.xavier_uniform, gain=nn.init.calculate_gain('relu'))
reward_VI_optimizer = optim.Adam(reward_inference_net.parameters(), lr=0.0001)
loss_list = []
env_VI_loss_list = []
reward_VI_loss_list = []
load_value_net = False
try:
env_inference_net = load_network("env_net")
reward_inference_net = load_network("reward_net")
print("Loading inference networks")
N_VI_itr = 0
except:
print("Training inference networks")
N_VI_itr = N_VI_iterations
if load_value_net:
try:
net = load_network("value_net")
optimizer = optim.Adam(net.parameters(), lr=0.0001)
print("Loading value networks")
except:
print("Training value network")
for iteration in range(N_iters):
if iteration > N_VI_itr:
batch_size = RL_batch_size
else:
batch_size = VI_batch_size
print("Iteration: {}".format(iteration))
environment = GaussianEnvironment(resolution=resolution, std=std, num_samples=batch_size, scale=scale)
dynamics = Dynamics(environment, g=g, noise=noise, lam=0.0000)
environment.generate()
r = dynamics.sample(dt, 1)
if iteration > N_VI_itr:
environment.env_bayesian_update(env_inference_net, r[:,0], r[:,1])
environment.generate()
total_loss = 0.
total_reward = 0.
total_env_VI_loss = 0.
total_reward_VI_loss = 0.
reward = torch.zeros((batch_size,))
for step in range(N_steps):
print("Step: {}".format(step))
# zero the parameter gradients
optimizer.zero_grad()
env_VI_optimizer.zero_grad()
env_VI_optimizer.zero_grad()
## Control ##
if step == N_steps - 1:
exploit = True
else:
exploit = False
if iteration > N_VI_itr:
h_mean, h_std, r_logits = environment.get_statistics()
value, Ulist = net.forward(h_mean,
h_std,
r_logits,
N_intergration_steps, g, dt, exploit=exploit)
dynamics.control = Ulist
print(value.max())
old_r = r
if iteration > N_VI_itr:
r = dynamics.integrate(r, dt, N_intergration_steps).detach()
else:
r = dynamics.sample(dt, 1)
if np.any(np.isnan(r.detach().numpy())):
print("not a number found in the new coordinates")
break
if np.any(r.detach().numpy() > resolution + 8) or np.any(r.detach().numpy() < -8):
print("The agent has left the environment")
break
if iteration % 1 == 0 and iteration > N_VI_itr:
plot_trajectories(Ulist, environment, dynamics, value)
save_network(net, "value_net")
## Reward ##
new_reward = -dynamics.cost
# Bayesian update
env_VI_loss = environment.env_bayesian_update(env_inference_net, r[:,0], r[:,1])
reward_VI_loss = environment.rew_bayesian_update(reward_inference_net, r[:,0], r[:,1])
## Information gain ##
if iteration > N_VI_itr:
if step < N_steps - 1:
new_h_mean, new_h_std, new_r_logits = environment.get_statistics()
future_value_map,_ = net.forward(new_h_mean, new_h_std, new_r_logits, N_intergration_steps, g, dt, exploit=exploit)
future_value = environment.extrapolate(r[:,0], r[:,1], future_value_map,
activation=lambda x: x,
derivative=False,
std=.5,
normalized=True).detach()
else:
future_value = new_reward.unsqueeze(1)
## TD kernel ##
arange = torch.arange(0, environment.resolution).float()
x, y = torch.meshgrid([arange, arange])
x = x.unsqueeze(0).unsqueeze(0).expand(environment.num_samples,1,environment.resolution,environment.resolution)
y = y.unsqueeze(0).unsqueeze(0).expand(environment.num_samples,1,environment.resolution,environment.resolution)
x0 = old_r[:,0].unsqueeze(1).unsqueeze(2).unsqueeze(3).expand(environment.num_samples,1,environment.resolution,environment.resolution)
y0 = old_r[:,1].unsqueeze(1).unsqueeze(2).unsqueeze(3).expand(environment.num_samples,1,environment.resolution,environment.resolution)
kernel = torch.exp(-((x - x0) ** 2 + (y - y0) ** 2) / (2 * 3. ** 2))
## TD learning ##
loss = net.TDloss(reward, value, future_value, kernel, gamma=0.95)
#loss = net.TD_lambda_loss(reward, value, future_value, kernel, step, gamma=0.95, lam = 0.2)
if not np.isnan(loss.detach().numpy()):
loss.backward(retain_graph=True)
optimizer.step()
environment.generate()
total_loss += float(loss.detach().numpy())
total_reward += float(torch.sum(reward).detach().numpy())
else:
break
## Reward ##
reward = new_reward
## VI update ##
if iteration < N_VI_itr:
env_VI_loss.backward(retain_graph=True)
reward_VI_loss.backward(retain_graph=True)
env_VI_loss.backward(retain_graph=True)
reward_VI_loss.backward(retain_graph=True)
env_VI_optimizer.step()
reward_VI_optimizer.step()
total_env_VI_loss += float(env_VI_loss.detach().numpy())
total_reward_VI_loss += float(reward_VI_loss.detach().numpy())
if iteration == N_VI_itr:
save_network(env_inference_net, "env_net")
save_network(reward_inference_net, "reward_net")
if iteration > N_VI_itr:
print("Reward: {}".format(total_reward))
else:
print("VI env loss: {}".format(total_env_VI_loss))
print("VI rew loss: {}".format(total_reward_VI_loss))
#loss_list += [loss.detach().numpy()]#
env_VI_loss_list += [total_env_VI_loss]
reward_VI_loss_list += [total_reward_VI_loss]
if iteration == N_VI_itr:
plt.plot(env_VI_loss_list)
plt.show()
plt.plot(reward_VI_loss_list)
plt.show()
###Output
Training inference networks
Iteration: 0
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 422241042432.0
VI rew loss: 0.22977444529533386
Iteration: 1
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 363338297344.0
VI rew loss: 0.3573695085942745
Iteration: 2
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 406224902144.0
VI rew loss: 0.31365251168608665
Iteration: 3
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 503899504640.0
VI rew loss: 0.2277168594300747
Iteration: 4
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 463332421632.0
VI rew loss: 0.4688900001347065
Iteration: 5
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 406449203200.0
VI rew loss: 0.3349279426038265
Iteration: 6
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 329373618176.0
VI rew loss: 0.2615988291800022
Iteration: 7
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 324248328192.0
VI rew loss: 0.2598825991153717
Iteration: 8
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 320889317376.0
VI rew loss: 0.1518800612539053
Iteration: 9
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 431204198400.0
VI rew loss: 0.27514977008104324
Iteration: 10
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 599227244544.0
VI rew loss: 0.3366161994636059
Iteration: 11
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 466830008320.0
VI rew loss: 0.41192008554935455
Iteration: 12
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 372196616192.0
VI rew loss: 0.26207133010029793
Iteration: 13
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 345649184768.0
VI rew loss: 0.3933437168598175
Iteration: 14
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 379572883456.0
VI rew loss: 0.32232655584812164
Iteration: 15
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 334715971584.0
VI rew loss: 0.1281099859625101
Iteration: 16
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 410146734080.0
VI rew loss: 0.17513592913746834
Iteration: 17
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 556987813888.0
VI rew loss: 0.4188919775187969
Iteration: 18
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 387728510976.0
VI rew loss: 0.21109209023416042
Iteration: 19
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 284152754176.0
VI rew loss: 0.2820042371749878
Iteration: 20
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 328891977728.0
VI rew loss: 0.23192789778113365
Iteration: 21
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 428531464192.0
VI rew loss: 0.2512397300451994
Iteration: 22
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 327167698944.0
VI rew loss: 0.18166988529264927
Iteration: 23
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 245095751680.0
VI rew loss: 0.1800212450325489
Iteration: 24
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 325475930112.0
VI rew loss: 0.2476208209991455
Iteration: 25
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 495919110144.0
VI rew loss: 0.1512489002197981
Iteration: 26
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 422691186688.0
VI rew loss: 0.32298579812049866
Iteration: 27
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 431805562880.0
VI rew loss: 0.1584992278367281
Iteration: 28
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 331360634880.0
VI rew loss: 0.284047894179821
Iteration: 29
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 347667103744.0
VI rew loss: 0.29103031381964684
Iteration: 30
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 279597538304.0
VI rew loss: 0.1230550967156887
Iteration: 31
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 356742987776.0
VI rew loss: 0.13152461126446724
Iteration: 32
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 302686150656.0
VI rew loss: 0.19945281837135553
Iteration: 33
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 364370180096.0
VI rew loss: 0.1612131418660283
Iteration: 34
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 253777700864.0
VI rew loss: 0.10448094364255667
Iteration: 35
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 326113239040.0
VI rew loss: 0.25995614007115364
Iteration: 36
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 400469547008.0
VI rew loss: 0.17383521422743797
Iteration: 37
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 384482287616.0
VI rew loss: 0.26000647619366646
Iteration: 38
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 352392280064.0
VI rew loss: 0.09234283957630396
Iteration: 39
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 372837822464.0
VI rew loss: 0.22625005431473255
Iteration: 40
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 355142504448.0
VI rew loss: 0.15506913792341948
Iteration: 41
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 386779097088.0
VI rew loss: 0.16782874427735806
Iteration: 42
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 257879505920.0
VI rew loss: 0.12468079291284084
Iteration: 43
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 504999710720.0
VI rew loss: 0.22793004289269447
Iteration: 44
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 345266546688.0
VI rew loss: 0.3645718451589346
Iteration: 45
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 362712178688.0
VI rew loss: 0.10626564174890518
Iteration: 46
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 307597252608.0
VI rew loss: 0.1544069554656744
Iteration: 47
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 299287117824.0
VI rew loss: 0.3075915165245533
Iteration: 48
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 218964445184.0
VI rew loss: 0.12400176282972097
Iteration: 49
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 314679769088.0
VI rew loss: 0.27946700528264046
Iteration: 50
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 290103347200.0
VI rew loss: 0.26787421852350235
Iteration: 51
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 317938165760.0
VI rew loss: 0.1698973085731268
Iteration: 52
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 266827040768.0
VI rew loss: 0.22994031198322773
Iteration: 53
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 288600600576.0
VI rew loss: 0.14216136373579502
Iteration: 54
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 387605450752.0
VI rew loss: 0.16833274438977242
Iteration: 55
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 280434954240.0
VI rew loss: 0.3196103163063526
Iteration: 56
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 297720780800.0
VI rew loss: 0.24131536297500134
Iteration: 57
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 338474053632.0
VI rew loss: 0.3960975222289562
Iteration: 58
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 291983589376.0
VI rew loss: 0.23704572021961212
Iteration: 59
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 383311239168.0
VI rew loss: 0.548185508698225
Iteration: 60
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 243736281088.0
VI rew loss: 0.22616184502840042
Iteration: 61
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 325990029312.0
VI rew loss: 0.41880902647972107
Iteration: 62
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 250841183232.0
VI rew loss: 0.37355637177824974
Iteration: 63
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 296662966272.0
VI rew loss: 0.11925788037478924
Iteration: 64
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 208515145728.0
VI rew loss: 0.14472386240959167
Iteration: 65
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 271639558144.0
VI rew loss: 0.044586863834410906
Iteration: 66
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 342637772800.0
VI rew loss: 0.20737509429454803
Iteration: 67
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 275305840640.0
VI rew loss: 0.17766628228127956
Iteration: 68
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 299629541376.0
VI rew loss: 0.26301489770412445
Iteration: 69
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 311327367168.0
VI rew loss: 0.40310613438487053
Iteration: 70
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 323407099904.0
VI rew loss: 0.16575668193399906
Iteration: 71
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 292913217536.0
VI rew loss: 0.08973224135115743
Iteration: 72
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 300654759936.0
VI rew loss: 0.2280220314860344
Iteration: 73
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 265091639296.0
VI rew loss: 0.15607992466539145
Iteration: 74
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 311091361792.0
VI rew loss: 0.23023258335888386
Iteration: 75
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 243729571840.0
VI rew loss: 0.2135176956653595
Iteration: 76
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 252062138368.0
VI rew loss: 0.21704638563096523
Iteration: 77
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 263157078016.0
VI rew loss: 0.10274959355592728
Iteration: 78
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 209720863744.0
VI rew loss: 0.09429359808564186
Iteration: 79
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 278086975488.0
VI rew loss: 0.12100488226860762
Iteration: 80
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 277860771840.0
VI rew loss: 0.3146333023905754
Iteration: 81
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 252288149504.0
VI rew loss: 0.0729137696325779
Iteration: 82
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 251148991488.0
VI rew loss: 0.16064060665667057
Iteration: 83
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 388054022144.0
VI rew loss: 0.16084175743162632
Iteration: 84
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 320258871296.0
VI rew loss: 0.15457004494965076
Iteration: 85
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 311282382848.0
VI rew loss: 0.1759900562465191
Iteration: 86
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 210271819776.0
VI rew loss: 0.08436768176034093
Iteration: 87
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 347898050560.0
VI rew loss: 0.2945910058915615
Iteration: 88
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 250873032704.0
VI rew loss: 0.09418028965592384
Iteration: 89
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 224809363456.0
VI rew loss: 0.12277084775269032
Iteration: 90
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 313884364800.0
VI rew loss: 0.1169918766245246
Iteration: 91
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 260794517504.0
VI rew loss: 0.15752853825688362
Iteration: 92
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 259567470592.0
VI rew loss: 0.1794869713485241
Iteration: 93
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 254978786304.0
VI rew loss: 0.14608252700418234
Iteration: 94
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 304380997632.0
VI rew loss: 0.0948259886354208
Iteration: 95
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 246424089600.0
VI rew loss: 0.12562525551766157
Iteration: 96
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 235872679936.0
VI rew loss: 0.1546556055545807
Iteration: 97
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 177659305984.0
VI rew loss: 0.12247939733788371
Iteration: 98
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 186939974656.0
VI rew loss: 0.126647075638175
Iteration: 99
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 200215781376.0
VI rew loss: 0.0741526186466217
Iteration: 100
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 235366174720.0
VI rew loss: 0.16793852858245373
Iteration: 101
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 197952548864.0
VI rew loss: 0.20960192382335663
Iteration: 102
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 218434132992.0
VI rew loss: 0.12977863289415836
Iteration: 103
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 226767673344.0
VI rew loss: 0.13524346705526114
Iteration: 104
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 224943609856.0
VI rew loss: 0.14345323853194714
Iteration: 105
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 195309808640.0
VI rew loss: 0.1665120478719473
Iteration: 106
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 188563872768.0
VI rew loss: 0.07744942884892225
Iteration: 107
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 251534471168.0
VI rew loss: 0.07055568415671587
Iteration: 108
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 264792907776.0
VI rew loss: 0.09083531238138676
Iteration: 109
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 273540591616.0
VI rew loss: 0.15185515582561493
Iteration: 110
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 220999151616.0
VI rew loss: 0.12703965418040752
Iteration: 111
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 157107107840.0
VI rew loss: 0.13024388067424297
Iteration: 112
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 205792893952.0
VI rew loss: 0.16463632136583328
Iteration: 113
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 232634999808.0
VI rew loss: 0.14512335322797298
Iteration: 114
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 223813782528.0
VI rew loss: 0.15403230674564838
Iteration: 115
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 197662590976.0
VI rew loss: 0.19239988923072815
Iteration: 116
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 261058787328.0
VI rew loss: 0.19355552829802036
Iteration: 117
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 318425749504.0
VI rew loss: 0.24338914081454277
Iteration: 118
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 259062024192.0
VI rew loss: 0.15762257017195225
Iteration: 119
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 137693451264.0
VI rew loss: 0.11716791056096554
Iteration: 120
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 223001248768.0
VI rew loss: 0.189112838357687
Iteration: 121
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 173881904128.0
VI rew loss: 0.20825589448213577
Iteration: 122
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 237359257600.0
VI rew loss: 0.09055909235030413
Iteration: 123
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 221920707584.0
VI rew loss: 0.1466216128319502
Iteration: 124
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 238442391552.0
VI rew loss: 0.14865458011627197
Iteration: 125
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 251549864960.0
VI rew loss: 0.17364568449556828
Iteration: 126
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 210052461568.0
VI rew loss: 0.09903262555599213
Iteration: 127
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 291537330176.0
VI rew loss: 0.2197326086461544
Iteration: 128
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 292872159232.0
VI rew loss: 0.423222903162241
Iteration: 129
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 219923607552.0
VI rew loss: 0.237300094217062
Iteration: 130
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 203433575424.0
VI rew loss: 0.10616797022521496
Iteration: 131
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 255351126016.0
VI rew loss: 0.11670284625142813
Iteration: 132
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 263147750400.0
VI rew loss: 0.2487430591136217
Iteration: 133
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 237638004736.0
VI rew loss: 0.14700455404818058
Iteration: 134
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 154095174656.0
VI rew loss: 0.14485059678554535
Iteration: 135
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 230348187648.0
VI rew loss: 0.08348592184484005
Iteration: 136
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 274861852672.0
VI rew loss: 0.18045745603740215
Iteration: 137
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 163953880064.0
VI rew loss: 0.04714646772481501
Iteration: 138
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 177214498816.0
VI rew loss: 0.11933653615415096
Iteration: 139
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 242643411968.0
VI rew loss: 0.21889785304665565
Iteration: 140
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 231277849600.0
VI rew loss: 0.1936682015657425
Iteration: 141
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 267530881024.0
VI rew loss: 0.20375876873731613
Iteration: 142
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 222792286208.0
VI rew loss: 0.15997080132365227
Iteration: 143
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 212910878720.0
VI rew loss: 0.15422696247696877
Iteration: 144
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 276364955648.0
VI rew loss: 0.12787541188299656
Iteration: 145
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 200667377664.0
VI rew loss: 0.13114825822412968
Iteration: 146
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 177303565312.0
VI rew loss: 0.14015917479991913
Iteration: 147
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 194966250496.0
VI rew loss: 0.15553545951843262
Iteration: 148
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 161703487488.0
VI rew loss: 0.046514492481946945
Iteration: 149
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 162743533568.0
VI rew loss: 0.1256493367254734
Iteration: 150
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 180307544064.0
VI rew loss: 0.13028662092983723
Iteration: 151
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 143436824576.0
VI rew loss: 0.0729379877448082
Iteration: 152
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 158047774720.0
VI rew loss: 0.13202457502484322
Iteration: 153
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 160865912832.0
VI rew loss: 0.11977975815534592
Iteration: 154
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 243217292288.0
VI rew loss: 0.338142529129982
Iteration: 155
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 190414811136.0
VI rew loss: 0.13180006109178066
Iteration: 156
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 211875233792.0
VI rew loss: 0.13611100055277348
Iteration: 157
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 211724924928.0
VI rew loss: 0.14755660854279995
Iteration: 158
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 159070516224.0
VI rew loss: 0.036795263178646564
Iteration: 159
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 205586465792.0
VI rew loss: 0.14636026788502932
Iteration: 160
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 176324969472.0
VI rew loss: 0.12976064160466194
Iteration: 161
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 188818952192.0
VI rew loss: 0.1015365794301033
Iteration: 162
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 159882921984.0
VI rew loss: 0.09688875824213028
Iteration: 163
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 142191460352.0
VI rew loss: 0.0834064818918705
Iteration: 164
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 193941487616.0
VI rew loss: 0.14115168899297714
Iteration: 165
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 187716227072.0
VI rew loss: 0.13645023480057716
Iteration: 166
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 201279457280.0
VI rew loss: 0.14414941146969795
Iteration: 167
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 166751292416.0
VI rew loss: 0.1092663798481226
Iteration: 168
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 202776832000.0
VI rew loss: 0.11920296214520931
Iteration: 169
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 172753268736.0
VI rew loss: 0.10153256356716156
Iteration: 170
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 152422607872.0
VI rew loss: 0.07899864763021469
Iteration: 171
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 194046680064.0
VI rew loss: 0.13171088136732578
Iteration: 172
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 181543312384.0
VI rew loss: 0.08212419878691435
Iteration: 173
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 215730048000.0
VI rew loss: 0.1836035344749689
Iteration: 174
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 189160587264.0
VI rew loss: 0.1037248931825161
Iteration: 175
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 151342760960.0
VI rew loss: 0.0796581357717514
Iteration: 176
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 169695935488.0
VI rew loss: 0.09321767557412386
Iteration: 177
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 187097515008.0
VI rew loss: 0.197247426956892
Iteration: 178
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 165082457088.0
VI rew loss: 0.2014260347932577
Iteration: 179
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 180271771648.0
VI rew loss: 0.15344606153666973
Iteration: 180
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 172187925504.0
VI rew loss: 0.07085285754874349
Iteration: 181
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 168501903360.0
VI rew loss: 0.12880539149045944
Iteration: 182
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 240736499712.0
VI rew loss: 0.23562602326273918
Iteration: 183
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 214326153216.0
VI rew loss: 0.14820203557610512
Iteration: 184
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 116309394432.0
VI rew loss: 0.09848964959383011
Iteration: 185
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 111974277120.0
VI rew loss: 0.0717665278352797
Iteration: 186
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 223457343488.0
VI rew loss: 0.16117745079100132
Iteration: 187
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 199779919872.0
VI rew loss: 0.11707193963229656
Iteration: 188
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 145722027008.0
VI rew loss: 0.09165756683796644
Iteration: 189
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 182713269248.0
VI rew loss: 0.11017641052603722
Iteration: 190
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 170021246976.0
VI rew loss: 0.21075423434376717
Iteration: 191
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 134302699520.0
VI rew loss: 0.08830343699082732
Iteration: 192
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 168613201920.0
VI rew loss: 0.13336113281548023
Iteration: 193
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 166641993728.0
VI rew loss: 0.10129086952656507
Iteration: 194
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 131544517632.0
VI rew loss: 0.09604766219854355
Iteration: 195
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 223332646912.0
VI rew loss: 0.12269244901835918
Iteration: 196
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 211613865984.0
VI rew loss: 0.047410584054887295
Iteration: 197
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 160052350976.0
VI rew loss: 0.09252642188221216
Iteration: 198
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 162992409600.0
VI rew loss: 0.09809926757588983
Iteration: 199
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 221051548672.0
VI rew loss: 0.3039240464568138
Iteration: 200
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 167057232896.0
VI rew loss: 0.13129697181284428
Iteration: 201
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 163935376384.0
VI rew loss: 0.11697721853852272
Iteration: 202
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 188907688960.0
VI rew loss: 0.2334849312901497
Iteration: 203
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 193946085376.0
VI rew loss: 0.20327800326049328
Iteration: 204
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 181984690176.0
VI rew loss: 0.12707754410803318
Iteration: 205
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 110849291264.0
VI rew loss: 0.05485659744590521
Iteration: 206
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 186725289984.0
VI rew loss: 0.19602025486528873
Iteration: 207
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 163301622784.0
VI rew loss: 0.14630595594644547
Iteration: 208
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 182152700928.0
VI rew loss: 0.28762832656502724
Iteration: 209
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 145919255552.0
VI rew loss: 0.16769177466630936
Iteration: 210
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 165326085120.0
VI rew loss: 0.06580573692917824
Iteration: 211
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 180111609856.0
VI rew loss: 0.13163764774799347
Iteration: 212
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 157638571008.0
VI rew loss: 0.13601440843194723
Iteration: 213
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 252460478464.0
VI rew loss: 0.08851789310574532
Iteration: 214
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 157396128768.0
VI rew loss: 0.08676579035818577
Iteration: 215
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 246121678848.0
VI rew loss: 0.224672494456172
Iteration: 216
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 228339559424.0
VI rew loss: 0.12695945799350739
Iteration: 217
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 163975071744.0
VI rew loss: 0.20647362247109413
Iteration: 218
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 194859476992.0
VI rew loss: 0.15751700475811958
Iteration: 219
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 202465806336.0
VI rew loss: 0.19170633144676685
Iteration: 220
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 133770531840.0
VI rew loss: 0.10051118582487106
Iteration: 221
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 129822797824.0
VI rew loss: 0.18532654829323292
Iteration: 222
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 136019631104.0
VI rew loss: 0.08428459474816918
Iteration: 223
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 189418385408.0
VI rew loss: 0.09865020588040352
Iteration: 224
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 173184151552.0
VI rew loss: 0.18365289829671383
Iteration: 225
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 127852124160.0
VI rew loss: 0.2313352096825838
Iteration: 226
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 179202219008.0
VI rew loss: 0.07749115861952305
Iteration: 227
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 148518419456.0
VI rew loss: 0.14466810785233974
Iteration: 228
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 132018975744.0
VI rew loss: 0.17731979116797447
Iteration: 229
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 173103526912.0
VI rew loss: 0.18274514749646187
Iteration: 230
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 149813666816.0
VI rew loss: 0.09398133913055062
Iteration: 231
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 196948517888.0
VI rew loss: 0.11534114368259907
Iteration: 232
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 196643662848.0
VI rew loss: 0.1786652859300375
Iteration: 233
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 133529669632.0
VI rew loss: 0.09851820580661297
Iteration: 234
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 183093883904.0
VI rew loss: 0.1578165302053094
Iteration: 235
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 165705619456.0
VI rew loss: 0.2236646618694067
Iteration: 236
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 132358703104.0
VI rew loss: 0.1223121676594019
Iteration: 237
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 203255888896.0
VI rew loss: 0.29121827706694603
Iteration: 238
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 147219161088.0
VI rew loss: 0.09123740158975124
Iteration: 239
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 177358277632.0
VI rew loss: 0.11394925601780415
Iteration: 240
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 112963330560.0
VI rew loss: 0.07142686937004328
Iteration: 241
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 149064308736.0
VI rew loss: 0.11055933870375156
Iteration: 242
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 160167857152.0
VI rew loss: 0.14458773285150528
Iteration: 243
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 145258217472.0
VI rew loss: 0.09411309938877821
Iteration: 244
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 133913212416.0
VI rew loss: 0.06532234698534012
Iteration: 245
Step: 0
Step: 1
Step: 2
Step: 3
Step: 4
VI env loss: 142535607296.0
VI rew loss: 0.06529761338606477
Iteration: 246
|
notebooks/10_pivotacion_tablas.ipynb | ###Markdown
Pivotación de tablasVamos a ver cómo transformar las tablas de formato ancho a largo y viceversa
###Code
import pandas as pd
air = pd.read_csv("dat/airquality.csv")
air.head()
###Output
_____no_output_____
###Markdown
Melt: de ancho a largoPara pasar de formato ancho a largo, podemos usar [`melt`](https://pandas.pydata.org/pandas-docs/version/0.23/generated/pandas.melt.html)
###Code
air_long = air.melt(id_vars=['month', 'day'])
air_long.head()
###Output
_____no_output_____
###Markdown
Vemos que, para cada mes y día, ahora contamos con dos columnas: la variable medida y su valor.En el formato largo, cada fila cuenta con el índice (en este caso, mes y día), un valor, y etiquetas del valor. Pivot: de largo a anchoPara pasar de formato largo a ancho, podemos usar [`pivot_table`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.pivot_table.html)
###Code
air_wide = air_long.pivot_table(index=['month', 'day'], columns='variable', values='value')
air_wide.head()
###Output
_____no_output_____
###Markdown
Los índices jerárquicos suelen ser incómodos para tratar la tabla. Podemos quitarlo con `reset_index()`
###Code
air_wide = air_wide.reset_index()
air_wide.head()
###Output
_____no_output_____ |
bdc-samples/data-virtualization/mssql_spark_connector_sparkr.ipynb | ###Markdown
Using MsSQLSpark Conenctor from SparkR
The following notebook demostrates usage of MSSQLSpark connector in sparkR. For full set of capability supported by MSSQLSpark Connector refer **mssql_spark_connector_non_ad_pyspark.ipynb** PreReq
-------
- Download [AdultCensusIncome.csv]( https://amldockerdatasets.azureedge.net/AdultCensusIncome.csv ) to your local machine. Upload this file to hdfs folder named *spark_data*.
- The sample uses a SQL database *connector_test_db*, user *connector_user* with password *password123!* and datasource *connector_ds*. The database, user/password and datasource need to be created before running the full sample. Refer **data-virtualization/mssql_spark_connector_user_creation.ipynb** on steps to create this user. Load a CSV file to a dataframe
###Code
people <- read.df("/spark_data/AdultCensusIncome.csv", "csv")
head(people)
###Output
Starting Spark application
###Markdown
User MSQL Spark connector to save the dataframe as a table in SQL Server
###Code
#Using deafault JDBC connector
#Using MSSQLSpark connector
dbname = "connector_test_db"
url = paste("jdbc:sqlserver://master-0.master-svc;databaseName=", "connector_test_db", sep="")
print(url)
dbtable = "AdultCensus_test_sparkr"
user = "connector_user"
password = "password123!#" # Please specify password here
saveDF(people,
dbtable,
source = "com.microsoft.sqlserver.jdbc.spark",
url = url,
dbtable=dbtable,
mode = "overwrite",
user=user,
password=password)
###Output
[1] "jdbc:sqlserver://master-0.master-svc;databaseName=connector_test_db" |
ACMMM.ipynb | ###Markdown
Deep Neural Network Model (AlexNet)
###Code
class KitModel(nn.Module):
def __init__(self):
super(KitModel, self).__init__()
self.conv1 = nn.Conv2d(3, 96, (11, 11), stride=4, padding=0)
self.conv2 = nn.Conv2d(in_channels=96, out_channels=256, kernel_size=5, stride=1, groups=2, padding=2)
self.conv3 = nn.Conv2d(in_channels=256, out_channels=384, kernel_size=3, stride=1, groups=1, padding=1)
self.conv4 = nn.Conv2d(in_channels=384, out_channels=384, kernel_size=3, stride=1, groups=2, padding=1)
self.conv5 = nn.Conv2d(in_channels=384, out_channels=256, kernel_size=3, stride=1, groups=2, padding=1)
self.fc6_1 = nn.Linear(in_features = 9216, out_features = 4096)
self.fc7_1 = nn.Linear(in_features = 4096, out_features = 4096)
self.ip_1 = nn.Linear(in_features = 4096, out_features = 1)
self.relu = nn.ReLU()
self.drop = nn.Dropout()
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2)
def forward(self, x):
conv1 = self.conv1(x)
relu1 = self.relu(conv1)
pool1 = self.maxpool(relu1)
norm1 = self.LRN(size = 5, alpha=0.0001, beta=0.75)(pool1)
conv2 = self.conv2(norm1)
relu2 = self.relu(conv2)
pool2 = self.maxpool(relu2)
norm2 = self.LRN(size = 5, alpha=0.0001, beta=0.75)(pool2)
conv3 = self.conv3(norm2)
relu3 = self.relu(conv3)
conv4 = self.conv4(relu3)
relu4 = self.relu(conv4)
conv5 = self.conv5(relu4)
relu5 = self.relu(conv5)
pool5 = self.maxpool(relu5)
fc6_0 = pool5.view(pool5.size(0), -1)
fc6_1 = self.fc6_1(fc6_0)
relu6 = self.relu(fc6_1)
drop6 = self.drop(relu6)
fc7_1 = self.fc7_1(drop6)
relu7 = self.relu(fc7_1)
ip_0 = self.drop(relu7)
ip_1 = self.ip_1(ip_0)
return ip_1
class LRN(nn.Module):
def __init__(self, size=1, alpha=1.0, beta=0.75, ACROSS_CHANNELS=True):
super(KitModel.LRN, self).__init__()
self.ACROSS_CHANNELS = ACROSS_CHANNELS
if self.ACROSS_CHANNELS:
self.average=nn.AvgPool3d(kernel_size=(size, 1, 1),
stride=1,
padding=(int((size-1.0)/2), 0, 0))
else:
self.average=nn.AvgPool2d(kernel_size=size,
stride=1,
padding=int((size-1.0)/2))
self.alpha = alpha
self.beta = beta
def forward(self, x):
if self.ACROSS_CHANNELS:
div = x.pow(2).unsqueeze(1)
div = self.average(div).squeeze(1)
div = div.mul(self.alpha).add(1.0).pow(self.beta)
else:
div = x.pow(2)
div = self.average(div)
div = div.mul(self.alpha).add(1.0).pow(self.beta)
x = x.div(div)
return x
class PandasDataset(Dataset):
def __init__(self, list_images, list_targets, transform=None):
self.list_images = list_images
self.list_targets = list_targets
# add transforms as well
self.transform = transform
def __getitem__(self, idx):
image = Image.open(self.list_images[idx]).convert('RGB')
image = image.resize((227,227), Image.BILINEAR)
image = np.array(image, dtype='f4')
# Convert RGB to BGR
image = image[:, :, ::-1]
image = image.astype('float32')
# add transforms
if self.transform:
image = self.transform(image)
return image, self.list_targets[idx]
def __len__(self):
return len(self.list_images)
model = KitModel()
model.load_state_dict(torch.load('generated_files/pytorch_state.npy'))
model.train(False)
model.eval()
batch_size = 30
file_list = [
'streetview_image.jpg',
]
# I'm interested only in testing the predictions, so label=0
labels = [
0
]
###Output
_____no_output_____
###Markdown
Example of image
###Code
image = Image.open(file_list[0]).convert('RGB')
imshow(np.array(image))
means = np.load('generated_files/places205CNN_mean_filtered.npy')
transformations = transforms.Compose([lambda x: x - means, # Subtracts image means
transforms.ToTensor(),
lambda x: x*255] # Restore the input range to [0, 255]
)
dataset = PandasDataset(file_list, labels, transformations)
load = DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=10)
preds = np.zeros((len(file_list), 1))
for i, data in enumerate(load):
inputs, labels = data
n = len(inputs)
ifrom = i*batch_size
ito = i*batch_size+n
inputs, labels = Variable(inputs), Variable(labels)
outputs = model(inputs)
preds[ifrom:ito] = outputs.data.numpy()
print("Predicted:", preds)
###Output
Predicted: [[4.77279186]]
|
Submissions/Archive/Project 1 -Attempt 1/Your_first_neural_network.ipynb | ###Markdown
Your first neural networkIn this project, you'll build your first neural network and use it to predict daily bike rental ridership. We've provided some of the code, but left the implementation of the neural network up to you (for the most part). After you've submitted this project, feel free to explore the data and the model more.
###Code
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Load and prepare the dataA critical step in working with neural networks is preparing the data correctly. Variables on different scales make it difficult for the network to efficiently learn the correct weights. Below, we've written the code to load and prepare the data. You'll learn more about this soon!
###Code
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)
rides.head()
###Output
_____no_output_____
###Markdown
Checking out the dataThis dataset has the number of riders for each hour of each day from January 1 2011 to December 31 2012. The number of riders is split between casual and registered, summed up in the `cnt` column. You can see the first few rows of the data above.Below is a plot showing the number of bike riders over the first 10 days or so in the data set. (Some days don't have exactly 24 entries in the data set, so it's not exactly 10 days.) You can see the hourly rentals here. This data is pretty complicated! The weekends have lower over all ridership and there are spikes when people are biking to and from work during the week. Looking at the data above, we also have information about temperature, humidity, and windspeed, all of these likely affecting the number of riders. You'll be trying to capture all this with your model.
###Code
rides[:24*10].plot(x='dteday', y='cnt')
###Output
_____no_output_____
###Markdown
Dummy variablesHere we have some categorical variables like season, weather, month. To include these in our model, we'll need to make binary dummy variables. This is simple to do with Pandas thanks to `get_dummies()`.
###Code
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
data.head()
###Output
_____no_output_____
###Markdown
Scaling target variablesTo make training the network easier, we'll standardize each of the continuous variables. That is, we'll shift and scale the variables such that they have zero mean and a standard deviation of 1.The scaling factors are saved so we can go backwards when we use the network for predictions.
###Code
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean)/std
data.head()
###Output
_____no_output_____
###Markdown
Splitting the data into training, testing, and validation setsWe'll save the data for the last approximately 21 days to use as a test set after we've trained the network. We'll use this set to make predictions and compare them with the actual number of riders.
###Code
# Save data for approximately the last 21 days
test_data = data[-21*24:]
# Now remove the test data from the data set
data = data[:-21*24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
###Output
_____no_output_____
###Markdown
We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set).
###Code
# Hold out the last 60 days or so of the remaining data as a validation set
train_features, train_targets = features[:-60*24], targets[:-60*24]
val_features, val_targets = features[-60*24:], targets[-60*24:]
###Output
_____no_output_____
###Markdown
Time to build the networkBelow you'll build your network. We've built out the structure and the backwards pass. You'll implement the forward pass through the network. You'll also set the hyperparameters: the learning rate, the number of hidden units, and the number of training passes.The network has two layers, a hidden layer and an output layer. The hidden layer will use the sigmoid function for activations. The output layer has only one node and is used for the regression, the output of the node is the same as the input of the node. That is, the activation function is $f(x)=x$. A function that takes the input signal and generates an output signal, but takes into account the threshold, is called an activation function. We work through each layer of our network calculating the outputs for each neuron. All of the outputs from one layer become inputs to the neurons on the next layer. This process is called *forward propagation*.We use the weights to propagate signals forward from the input to the output layers in a neural network. We use the weights to also propagate error backwards from the output back into the network to update our weights. This is called *backpropagation*.> **Hint:** You'll need the derivative of the output activation function ($f(x) = x$) for the backpropagation implementation. If you aren't familiar with calculus, this function is equivalent to the equation $y = x$. What is the slope of that equation? That is the derivative of $f(x)$.Below, you have these tasks:1. Implement the sigmoid function to use as the activation function. Set `self.activation_function` in `__init__` to your sigmoid function.2. Implement the forward pass in the `train` method.3. Implement the backpropagation algorithm in the `train` method, including calculating the output error.4. Implement the forward pass in the `run` method.
###Code
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5,
(self.input_nodes, self.hidden_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.lr = learning_rate
#### TODO: Set self.activation_function to your implemented sigmoid function ####
#
# Note: in Python, you can define a function with a lambda expression,
# as shown below.
self.activation_function = lambda x : 1/(1+np.exp(-x)) # Replace 0 with your sigmoid calculation.
### If the lambda code above is not something you're familiar with,
# You can uncomment out the following three lines and put your
# implementation there instead.
#
#def sigmoid(x):
# return 0 # Replace 0 with your sigmoid calculation here
#self.activation_function = sigmoid
def train(self, features, targets):
''' Train the network on batch of features and targets.
Arguments
---------
features: 2D array, each row is one data record, each column is a feature
targets: 1D array of target values
'''
n_records = features.shape[0]
delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)
delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)
for X, y in zip(features, targets):
#### Implement the forward pass here ####
### Forward pass ###
# TODO: Hidden layer - Replace these values with your calculations.
hidden_inputs = np.dot(X,self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# TODO: Output layer - Replace these values with your calculations.
final_inputs = np.dot(hidden_outputs,self.weights_hidden_to_output) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error - Replace this value with your calculations.
error = y-final_outputs # Output layer error is the difference between desired target and actual output.
# TODO: Calculate the hidden layer's contribution to the error
hidden_error = np.dot(self.weights_hidden_to_output,error)
# TODO: Backpropagated error terms - Replace these values with your calculations.
output_error_term = error
hidden_error_term = hidden_error*hidden_outputs*(1-hidden_outputs)
# Weight step (input to hidden)
delta_weights_i_h += hidden_error_term*X[:,None]
# Weight step (hidden to output)
delta_weights_h_o += output_error_term*hidden_outputs[:,None]
# TODO: Update the weights - Replace these values with your calculations.
self.weights_hidden_to_output += self.lr*delta_weights_h_o/n_records # update hidden-to-output weights with gradient descent step
self.weights_input_to_hidden += self.lr*delta_weights_i_h/n_records # update input-to-hidden weights with gradient descent step
def run(self, features):
''' Run a forward pass through the network with input features
Arguments
---------
features: 1D array of feature values
'''
#### Implement the forward pass here ####
# TODO: Hidden layer - replace these values with the appropriate calculations.
hidden_inputs = np.dot(features,self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# TODO: Output layer - Replace these values with the appropriate calculations.
final_inputs = np.dot(hidden_outputs,self.weights_hidden_to_output) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
return final_outputs
def MSE(y, Y):
return np.mean((y-Y)**2)
###Output
_____no_output_____
###Markdown
Unit testsRun these unit tests to check the correctness of your network implementation. This will help you be sure your network was implemented correctly befor you starting trying to train it. These tests must all be successful to pass the project.
###Code
import unittest
inputs = np.array([[0.5, -0.2, 0.1]])
targets = np.array([[0.4]])
test_w_i_h = np.array([[0.1, -0.2],
[0.4, 0.5],
[-0.3, 0.2]])
test_w_h_o = np.array([[0.3],
[-0.1]])
class TestMethods(unittest.TestCase):
##########
# Unit tests for data loading
##########
def test_data_path(self):
# Test that file path to dataset has been unaltered
self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')
def test_data_loaded(self):
# Test that data frame loaded
self.assertTrue(isinstance(rides, pd.DataFrame))
##########
# Unit tests for network functionality
##########
def test_activation(self):
network = NeuralNetwork(3, 2, 1, 0.5)
# Test that the activation function is a sigmoid
self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))
def test_train(self):
# Test that weights are updated correctly on training
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
network.train(inputs, targets)
self.assertTrue(np.allclose(network.weights_hidden_to_output,
np.array([[ 0.37275328],
[-0.03172939]])))
self.assertTrue(np.allclose(network.weights_input_to_hidden,
np.array([[ 0.10562014, -0.20185996],
[0.39775194, 0.50074398],
[-0.29887597, 0.19962801]])))
def test_run(self):
# Test correctness of run method
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
self.assertTrue(np.allclose(network.run(inputs), 0.09998924))
suite = unittest.TestLoader().loadTestsFromModule(TestMethods())
unittest.TextTestRunner().run(suite)
###Output
.....
----------------------------------------------------------------------
Ran 5 tests in 0.008s
OK
###Markdown
Training the networkHere you'll set the hyperparameters for the network. The strategy here is to find hyperparameters such that the error on the training set is low, but you're not overfitting to the data. If you train the network too long or have too many hidden nodes, it can become overly specific to the training set and will fail to generalize to the validation set. That is, the loss on the validation set will start increasing as the training set loss drops.You'll also be using a method know as Stochastic Gradient Descent (SGD) to train the network. The idea is that for each training pass, you grab a random sample of the data instead of using the whole data set. You use many more training passes than with normal gradient descent, but each pass is much faster. This ends up training the network more efficiently. You'll learn more about SGD later. Choose the number of iterationsThis is the number of batches of samples from the training data we'll use to train the network. The more iterations you use, the better the model will fit the data. However, if you use too many iterations, then the model with not generalize well to other data, this is called overfitting. You want to find a number here where the network has a low training loss, and the validation loss is at a minimum. As you start overfitting, you'll see the training loss continue to decrease while the validation loss starts to increase. Choose the learning rateThis scales the size of weight updates. If this is too big, the weights tend to explode and the network fails to fit the data. A good choice to start at is 0.1. If the network has problems fitting the data, try reducing the learning rate. Note that the lower the learning rate, the smaller the steps are in the weight updates and the longer it takes for the neural network to converge. Choose the number of hidden nodesThe more hidden nodes you have, the more accurate predictions the model will make. Try a few different numbers and see how it affects the performance. You can look at the losses dictionary for a metric of the network performance. If the number of hidden units is too low, then the model won't have enough space to learn and if it is too high there are too many options for the direction that the learning can take. The trick here is to find the right balance in number of hidden units you choose.
###Code
import sys
### Set the hyperparameters here ###
iterations = 3500
learning_rate = 0.5
hidden_nodes = 25
output_nodes = 1
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train':[], 'validation':[]}
for ii in range(iterations):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
X, y = train_features.ix[batch].values, train_targets.ix[batch]['cnt']
network.train(X, y)
# Printing out the training progress
train_loss = MSE(network.run(train_features).T, train_targets['cnt'].values)
val_loss = MSE(network.run(val_features).T, val_targets['cnt'].values)
sys.stdout.write("\rProgress: {:2.1f}".format(100 * ii/float(iterations)) \
+ "% ... Training loss: " + str(train_loss)[:5] \
+ " ... Validation loss: " + str(val_loss)[:5])
sys.stdout.flush()
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
_ = plt.ylim()
###Output
_____no_output_____
###Markdown
Check out your predictionsHere, use the test data to view how well your network is modeling the data. If something is completely wrong here, make sure each step in your network is implemented correctly.
###Code
fig, ax = plt.subplots(figsize=(8,4))
mean, std = scaled_features['cnt']
predictions = network.run(test_features).T*std + mean
ax.plot(predictions[0], label='Prediction')
ax.plot((test_targets['cnt']*std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
###Output
_____no_output_____ |
jupyter_interactive_widgets/notebooks/reference_guides/.ipynb_checkpoints/guide-flex-box-checkpoint.ipynb | ###Markdown
The Flexbox layoutThe `HBox` and `VBox` classes above are special cases of the `Box` widget.The `Box` widget enables the entire CSS flexbox spec as well as the Grid layout spec, enabling rich reactive layouts in the Jupyter notebook. It aims at providing an efficient way to lay out, align and distribute space among items in a container.Again, the whole flexbox spec is exposed via the `layout` attribute of the container widget (`Box`) and the contained items. One may share the same `layout` attribute among all the contained items. AcknowledgementThe following flexbox tutorial on the flexbox layout follows the lines of the article [A Complete Guide to Flexbox](https://css-tricks.com/snippets/css/a-guide-to-flexbox/) by Chris Coyier, and uses text and various images from the article [with permission](https://css-tricks.com/license/). Basics and terminologySince flexbox is a whole module and not a single property, it involves a lot of things including its whole set of properties. Some of them are meant to be set on the container (parent element, known as "flex container") whereas the others are meant to be set on the children (known as "flex items").If regular layout is based on both block and inline flow directions, the flex layout is based on "flex-flow directions". Please have a look at this figure from the specification, explaining the main idea behind the flex layout.Basically, items will be laid out following either the `main axis` (from `main-start` to `main-end`) or the `cross axis` (from `cross-start` to `cross-end`).- `main axis` - The main axis of a flex container is the primary axis along which flex items are laid out. Beware, it is not necessarily horizontal; it depends on the flex-direction property (see below).- `main-start | main-end` - The flex items are placed within the container starting from main-start and going to main-end.- `main size` - A flex item's width or height, whichever is in the main dimension, is the item's main size. The flex item's main size property is either the ‘width’ or ‘height’ property, whichever is in the main dimension.cross axis - The axis perpendicular to the main axis is called the cross axis. Its direction depends on the main axis direction.- `cross-start | cross-end` - Flex lines are filled with items and placed into the container starting on the cross-start side of the flex container and going toward the cross-end side.- `cross size` - The width or height of a flex item, whichever is in the cross dimension, is the item's cross size. The cross size property is whichever of ‘width’ or ‘height’ that is in the cross dimension. Properties of the parent display`display` can be `flex` or `inline-flex`. This defines a flex container (block or inline). flex-flow`flex-flow` is a shorthand for the `flex-direction` and `flex-wrap` properties, which together define the flex container's main and cross axes. Default is `row nowrap`.- `flex-direction` (column-reverse | column | row | row-reverse ) This establishes the main-axis, thus defining the direction flex items are placed in the flex container. Flexbox is (aside from optional wrapping) a single-direction layout concept. Think of flex items as primarily laying out either in horizontal rows or vertical columns.- `flex-wrap` (nowrap | wrap | wrap-reverse) By default, flex items will all try to fit onto one line. You can change that and allow the items to wrap as needed with this property. Direction also plays a role here, determining the direction new lines are stacked in. justify-content`justify-content` can be one of `flex-start`, `flex-end`, `center`, `space-between`, `space-around`. This defines the alignment along the main axis. It helps distribute extra free space left over when either all the flex items on a line are inflexible, or are flexible but have reached their maximum size. It also exerts some control over the alignment of items when they overflow the line.  align-items`align-items` can be one of `flex-start`, `flex-end`, `center`, `baseline`, `stretch`. This defines the default behaviour for how flex items are laid out along the cross axis on the current line. Think of it as the justify-content version for the cross-axis (perpendicular to the main-axis).  align-content`align-content` can be one of `flex-start`, `flex-end`, `center`, `baseline`, `stretch`. This aligns a flex container's lines within when there is extra space in the cross-axis, similar to how justify-content aligns individual items within the main-axis.**Note**: this property has no effect when there is only one line of flex items. Properties of the itemsThe flexbox-related CSS properties of the items have no impact if the parent element is not a flexbox container (i.e. has a `display` attribute equal to `flex` or `inline-flex`). orderBy default, flex items are laid out in the source order. However, the `order` property controls the order in which they appear in the flex container. flex`flex` is shorthand for three properties, `flex-grow`, `flex-shrink` and `flex-basis` combined. The second and third parameters (`flex-shrink` and `flex-basis`) are optional. Default is `0 1 auto`. - `flex-grow` This defines the ability for a flex item to grow if necessary. It accepts a unitless value that serves as a proportion. It dictates what amount of the available space inside the flex container the item should take up. If all items have flex-grow set to 1, the remaining space in the container will be distributed equally to all children. If one of the children a value of 2, the remaining space would take up twice as much space as the others (or it will try to, at least).  - `flex-shrink` This defines the ability for a flex item to shrink if necessary. - `flex-basis` This defines the default size of an element before the remaining space is distributed. It can be a length (e.g. `20%`, `5rem`, etc.) or a keyword. The `auto` keyword means *"look at my width or height property"*. align-self`align-self` allows the default alignment (or the one specified by align-items) to be overridden for individual flex items. The VBox and HBox helpersThe `VBox` and `HBox` helper classes provide simple defaults to arrange child widgets in vertical and horizontal boxes. They are roughly equivalent to:```Pythondef VBox(*pargs, **kwargs): """Displays multiple widgets vertically using the flexible box model.""" box = Box(*pargs, **kwargs) box.layout.display = 'flex' box.layout.flex_flow = 'column' box.layout.align_items = 'stretch' return boxdef HBox(*pargs, **kwargs): """Displays multiple widgets horizontally using the flexible box model.""" box = Box(*pargs, **kwargs) box.layout.display = 'flex' box.layout.align_items = 'stretch' return box``` Examples **Four buttons in a VBox. Items stretch to the maximum width, in a vertical box taking `50%` of the available space.**
###Code
from ipywidgets import Layout, Button, Box
items_layout = Layout( width='auto') # override the default width of the button to 'auto' to let the button grow
box_layout = Layout(display='flex',
flex_flow='column',
align_items='stretch',
border='solid',
width='50%')
words = ['correct', 'horse', 'battery', 'staple']
items = [Button(description=word, layout=items_layout, button_style='danger') for word in words]
box = Box(children=items, layout=box_layout)
box
###Output
_____no_output_____
###Markdown
**Three buttons in an HBox. Items flex proportionally to their weight.**
###Code
from ipywidgets import Layout, Button, Box, VBox
# Items flex proportionally to the weight and the left over space around the text
items_auto = [
Button(description='weight=1; auto', layout=Layout(flex='1 1 auto', width='auto'), button_style='danger'),
Button(description='weight=3; auto', layout=Layout(flex='3 1 auto', width='auto'), button_style='danger'),
Button(description='weight=1; auto', layout=Layout(flex='1 1 auto', width='auto'), button_style='danger'),
]
# Items flex proportionally to the weight
items_0 = [
Button(description='weight=1; 0%', layout=Layout(flex='1 1 0%', width='auto'), button_style='danger'),
Button(description='weight=3; 0%', layout=Layout(flex='3 1 0%', width='auto'), button_style='danger'),
Button(description='weight=1; 0%', layout=Layout(flex='1 1 0%', width='auto'), button_style='danger'),
]
box_layout = Layout(display='flex',
flex_flow='row',
align_items='stretch',
width='70%')
box_auto = Box(children=items_auto, layout=box_layout)
box_0 = Box(children=items_0, layout=box_layout)
VBox([box_auto, box_0])
###Output
_____no_output_____
###Markdown
**A more advanced example: a reactive form.**The form is a `VBox` of width '50%'. Each row in the VBox is an HBox, that justifies the content with space between..
###Code
from ipywidgets import Layout, Button, Box, FloatText, Textarea, Dropdown, Label, IntSlider
form_item_layout = Layout(
display='flex',
flex_flow='row',
justify_content='space-between'
)
form_items = [
Box([Label(value='Age of the captain'), IntSlider(min=40, max=60)], layout=form_item_layout),
Box([Label(value='Egg style'),
Dropdown(options=['Scrambled', 'Sunny side up', 'Over easy'])], layout=form_item_layout),
Box([Label(value='Ship size'),
FloatText()], layout=form_item_layout),
Box([Label(value='Information'),
Textarea()], layout=form_item_layout)
]
form = Box(form_items, layout=Layout(
display='flex',
flex_flow='column',
border='solid 2px',
align_items='stretch',
width='50%'
))
form
###Output
_____no_output_____
###Markdown
**A more advanced example: a carousel.**
###Code
from ipywidgets import Layout, Button, Box, Label
item_layout = Layout(height='100px', min_width='40px')
items = [Button(layout=item_layout, description=str(i), button_style='warning') for i in range(40)]
box_layout = Layout(overflow_x='scroll',
border='3px solid black',
width='500px',
height='',
flex_flow='row',
display='flex')
carousel = Box(children=items, layout=box_layout)
VBox([Label('Scroll horizontally:'), carousel])
###Output
_____no_output_____
###Markdown
*Compatibility note*The `overflow_x` and `overflow_y` options are deprecated in ipywidgets `7.5`. Instead, use the shorthand property `overflow='scroll hidden'`. The first part specificies overflow in `x`, the second the overflow in `y`. A widget for exploring layout optionsThe widgets below was written by ipywidgets user [Doug Redden (@DougRzz)](https://github.com/DougRzz). If you want to look through the source code to see how it works, take a look at this [notebook he contributed](cssJupyterWidgetStyling-UI.ipynb).Use the dropdowns and sliders in the widget to change the layout of the box containing the five colored buttons. Many of the CSS layout optoins described above are available, and the Python code to generate a `Layout` object reflecting the settings is in a `TextArea` in the widget.
###Code
#from layout_preview import layout
#layout
###Output
_____no_output_____ |
Chapter06/ProductRecommendation.ipynb | ###Markdown
1. Load Data
###Code
#May take a minute to run
df = pd.read_excel(io='../data/Online Retail.xlsx', sheet_name='Online Retail', engine='openpyxl') #engine needed to be updated
df.shape
df.head()
df = df.loc[df['Quantity'] > 0]
###Output
_____no_output_____
###Markdown
2. Data Preparation - Handle NaNs in CustomerID field
###Code
df['CustomerID'].describe()
df['CustomerID'].isna().sum()
df.loc[df['CustomerID'].isna()].head()
df.shape
df = df.dropna(subset=['CustomerID'])
df.shape
df.head()
###Output
_____no_output_____
###Markdown
- Customer-Item Matrix
###Code
customer_item_matrix = df.pivot_table(
index='CustomerID',
columns='StockCode',
values='Quantity',
aggfunc='sum'
)
customer_item_matrix.loc[12481:].head()
customer_item_matrix.shape
df['StockCode'].nunique()
df['CustomerID'].nunique()
customer_item_matrix.loc[12348.0].sum()
customer_item_matrix = customer_item_matrix.applymap(lambda x: 1 if x > 0 else 0)
customer_item_matrix.loc[12481:].head()
###Output
_____no_output_____
###Markdown
3. Collaborative Filtering
###Code
from sklearn.metrics.pairwise import cosine_similarity
###Output
_____no_output_____
###Markdown
3.1. User-based Collaborative Filtering - User-to-User Similarity Matrix
###Code
user_user_sim_matrix = pd.DataFrame(
cosine_similarity(customer_item_matrix)
)
user_user_sim_matrix.head()
user_user_sim_matrix.columns = customer_item_matrix.index
user_user_sim_matrix['CustomerID'] = customer_item_matrix.index
user_user_sim_matrix = user_user_sim_matrix.set_index('CustomerID')
user_user_sim_matrix.head()
###Output
_____no_output_____
###Markdown
- Making Recommendations
###Code
user_user_sim_matrix.loc[12350.0].sort_values(ascending=False)
items_bought_by_A = set(customer_item_matrix.loc[12350.0].iloc[
customer_item_matrix.loc[12350.0].to_numpy().nonzero() #update was required
].index)
items_bought_by_A
items_bought_by_B = set(customer_item_matrix.loc[17935.0].iloc[
customer_item_matrix.loc[17935.0].to_numpy().nonzero() #update was required
].index)
items_bought_by_B
items_to_recommend_to_B = items_bought_by_A - items_bought_by_B
items_to_recommend_to_B
df.loc[
df['StockCode'].isin(items_to_recommend_to_B),
['StockCode', 'Description']
].drop_duplicates().set_index('StockCode')
###Output
_____no_output_____
###Markdown
3.2. Item-based Collaborative Filtering - Item-to-Item Similarity Matrix
###Code
item_item_sim_matrix = pd.DataFrame(cosine_similarity(customer_item_matrix.T))
item_item_sim_matrix.columns = customer_item_matrix.T.index
item_item_sim_matrix['StockCode'] = customer_item_matrix.T.index
item_item_sim_matrix = item_item_sim_matrix.set_index('StockCode')
item_item_sim_matrix
###Output
_____no_output_____
###Markdown
- Making Recommendations
###Code
top_10_similar_items = list(
item_item_sim_matrix\
.loc[23166]\
.sort_values(ascending=False)\
.iloc[:10]\
.index
)
top_10_similar_items
df.loc[
df['StockCode'].isin(top_10_similar_items),
['StockCode', 'Description']
].drop_duplicates().set_index('StockCode').loc[top_10_similar_items]
###Output
_____no_output_____ |
04. Performing Analysis with the Python API/04 - geoenrichment/05 - Geoenrichment.ipynb | ###Markdown
GeoEnrichment GeoEnrichment provides the ability to * get facts about a location or area. * information about the people, places, and businesses * in a specific area or * within a certain distance or drive time from a location.* large collection of data sets including population, income, housing, consumer behavior, and the natural environment.* Site analysis is a popular application Login
###Code
from arcgis.gis import GIS
from arcgis.geoenrichment import *
gis = GIS(profile='agol_profile', verify_cert=False)
###Output
_____no_output_____
###Markdown
GeoEnrichment coverage
###Code
countries = get_countries()
print("Number of countries for which GeoEnrichment data is available: " + str(len(countries)))
#print a few countries for a sample
countries[0:10]
###Output
_____no_output_____
###Markdown
Filtering countries by properties
###Code
[c.properties.name for c in countries if c.properties.continent == 'Oceania']
###Output
_____no_output_____
###Markdown
Discovering information for a country* Data collections, * Sub-geographies and * Available reports for a country
###Code
aus = Country.get('Australia')
###Output
_____no_output_____
###Markdown
Commonly used properties for the country are accessible using `Country.properties`.
###Code
aus.properties.name
###Output
_____no_output_____
###Markdown
Data collections and analysis variables
###Code
df = aus.data_collections
df.head()
# call the shape property to get the total number of rows and columns
df.shape
###Output
_____no_output_____
###Markdown
Query the `EducationalAttainment` data collection and get all the unique `analysisVariable`s under that collection
###Code
df.loc['EducationalAttainment']['analysisVariable'].unique()
# view a sample of the `Age` data collection
df.loc['EducationalAttainment'].head()
###Output
_____no_output_____
###Markdown
Enriching an address
###Code
sdf = enrich(study_areas=["Parliament Dr, Canberra ACT 2600, Australia"],
data_collections=['EducationalAttainment'])
sdf.spatial.plot()
###Output
_____no_output_____
###Markdown
Reports
###Code
aus.reports.head(6)
# total number of reports available
aus.reports.shape
###Output
_____no_output_____
###Markdown
Creating Reports
###Code
import tempfile
report = create_report(study_areas=["Parliament Dr, Canberra ACT 2600, Australia"],
report="AustraliaFoodAndBeverageSpendingMDS",
export_format="PDF",
out_folder=tempfile.gettempdir(), out_name="FoodAndBeverageSpending.pdf")
print(report)
###Output
_____no_output_____
###Markdown
Finding named statistical areasEach country has several named statistical areas in a hierarchy of geography levels (such as states, counties, zip codes, etc).
###Code
%config IPCompleter.greedy=True
de = Country.get("Germany")
de.subgeographies.states['Hamburg']
de.subgeographies.states["Hamburg"].districts['Hamburg,_Freie_und_Hansestadt']
de.subgeographies.postcodes2['Berlin']
###Output
_____no_output_____
###Markdown
The named areas can also be drawn on a map, as they include a `geometry` property.
###Code
m1 = gis.map('Hamburg, Germany', zoomlevel=9)
m1
m1.draw(de.subgeographies.states["Hamburg"].districts['Hamburg,_Freie_und_Hansestadt'].geometry)
###Output
_____no_output_____
###Markdown
Different geography levels for different country
###Code
india = Country.get('India')
india.subgeographies.states['Uttar_Pradesh'].districts['Baghpat'].subdistricts['Baraut']
###Output
_____no_output_____
###Markdown
Searching for named areas within a country
###Code
riversides_in_usa = usa.search('Riverside')
print("number of riversides in the US: " + str(len(riversides_in_usa)))
# list a few of them
riversides_in_usa[:10]
###Output
_____no_output_____
###Markdown
For instance, you can make a map of all the riversides in the US
###Code
usamap = gis.map('United States', zoomlevel=4)
usamap
for riverside in riversides_in_usa:
usamap.draw(riverside.geometry)
###Output
_____no_output_____
###Markdown
Filtering named areas by geography level
###Code
[level['id'] for level in usa.levels]
usa.search(query='Riverside', layers=['US.Counties'])
###Output
_____no_output_____
###Markdown
Study Areas Accepted forms of study areas- **Street address locations** - Locations can be passed as strings of input street addresses, points of interest or place names. + **Example:** `"380 New York St, Redlands, CA"`- **Multiple field input addresses** - Locations described as multiple field input addresses, using dictionaries. + **Example:** {"Address" : "380 New York Street", "City" : "Redlands", "Region" : "CA", "Postal" : 92373} - **Point and line geometries** - Point and line locations, using `arcgis.geometry` instances. + **Example Point Location: ** `arcgis.geometry.Geometry({"x":-122.435,"y":37.785})` + ** Example Point location obtained using find_businesses() above: ** `arcgis.geometry.Geometry(businesses.iloc[0]['SHAPE'])`- **Buffered study areas** - `BufferStudyArea` instances to change the ring buffer size or create drive-time service areas around points specified using one of the above methods. BufferStudyArea allows you to buffer point and street address study areas. They can be created using the following parameters: * area: the point geometry or street address (string) study area to be buffered * radii: list of distances by which to buffer the study area, eg. [1, 2, 3] * units: distance unit, eg. Miles, Kilometers, Minutes (when using drive times/travel_mode) * overlap: boolean, uses overlapping rings/network service areas when True, or non-overlapping disks when False * travel_mode: None or string, one of the supported travel modes when using network service areas + **Example Buffered Location: ** `pt = arcgis.geometry.Geometry({"x":-122.435,"y":37.785}) buffered_area = BufferStudyArea(area=pt, radii=[1,2,3], units="Miles", overlap=False)` - **Network service areas** - `BufferStudyArea` also allows you to define drive time service areas around points as well as other advanced service areas such as walking and trucking. + **Example: ** `pt = arcgis.geometry.Geometry({"x":-122.435,"y":37.785}) buffered_area = BufferStudyArea(area=pt, radii=[1,2,3], units="Minutes", travel_mode="Driving")` - **Named statistical areas** - + **Example:** `usa.subgeographies.states['California'].zip5['92373']` - **Polygon geometries** - Locations can given as polygon geometries. + **Example Polygon geometry: ** `arcgis.geometry.Geometry({"rings":[[[-117.185412,34.063170],[-122.81,37.81],[-117.200570,34.057196],[-117.185412,34.063170]]],"spatialReference":{"wkid":4326}})` Example: Enriching a named statistical areaEnriching zip code 92373 in California using the 'Age' data collection:
###Code
redlands = usa.subgeographies.states['California'].zip5['92373']
enrich(study_areas=[redlands], data_collections=['Age'] )
###Output
_____no_output_____
###Markdown
Example: Enrich all counties in a state
###Code
ca_counties = usa.subgeographies.states['California'].counties
counties_df = enrich(study_areas=ca_counties, data_collections=['Age'])
counties_df.head(10)
m2 = gis.map('California')
m2
item = gis.content.import_data(df=counties_df, title="CA county population")
item
m2.add_layer(item.layers[0], {'renderer': 'ClassedColorRenderer',
'field_name':'FEM0'})
item.delete()
###Output
_____no_output_____
###Markdown
Example: Using comparison levels
###Code
enrich(study_areas=[redlands], data_collections=['Age'],
comparison_levels=['US.Counties', 'US.States'])
###Output
_____no_output_____
###Markdown
Example: Buffering locations using non overlapping disks The example below creates non-overlapping disks of radii 1, 3 and 5 Miles respectively from a street address and enriches these using the 'Age' data collection.
###Code
buffered = BufferStudyArea(area='380 New York St Redlands CA 92373',
radii=[1,3,5], units='Miles', overlap=False)
enrich(study_areas=[buffered], data_collections=['Age'])
###Output
_____no_output_____
###Markdown
Example: Using drive times as study areas The example below creates 5 and 10 minute drive times from a street address and enriches these using the 'Age' data collection.
###Code
buffered = BufferStudyArea(area='380 New York St Redlands CA 92373',
radii=[5, 10], units='Minutes',
travel_mode='Driving')
drive_time_df = enrich(study_areas=[buffered], data_collections=['Age'])
drive_time_df
###Output
_____no_output_____
###Markdown
Visualize results on a map The returned spatial dataframe can be visualized on a map as shown below:
###Code
redlands_map = gis.map('Redlands, CA')
redlands_map.basemap = 'dark-gray-vector'
redlands_map
drive_time_df.spatial.plot(redlands_map,
renderer_type='c', # for class breaks renderer
method='esriClassifyNaturalBreaks', # classification algorithm
class_count=3, # choose the number of classes
col='bufferRadii', # numeric column to classify
cmap='prism', # color map to pick colors from for each class
alpha=0.7) # specify opacity
###Output
_____no_output_____
###Markdown
Saving GeoEnrichment Results
###Code
gis.content.import_data(df=drive_time_df, title="Age statistics within 5,10 minutes of drive time from Esri")
###Output
_____no_output_____ |
03 Exception Handling/00_Exception_Handling_A.ipynb | ###Markdown
Fast Python3 For Beginners___ ```try...except...finally...``` 1.try
###Code
try:
print('try...')
r = 10/0
print('result:', r)
except ZeroDivisionError as e:
print('except:', e)
finally:
print('finally')
print('END')
###Output
try...
except: division by zero
finally
END
###Markdown
**logging error**> Python内置的`logging`模块可以非常容易地**记录错误信息**: > Python's built-in `logging` module makes it very easy to record error messages:
###Code
# err_logging.py
import logging
def foo(s):
return 10 / int(s)
def bar(s):
return foo(s) * 2
def main():
try:
bar('0')
except Exception as e:
logging.exception(e)
main()
print('END')
###Output
ERROR:root:division by zero
Traceback (most recent call last):
File "<ipython-input-2-16a5e7b57b85>", line 12, in main
bar('0')
File "<ipython-input-2-16a5e7b57b85>", line 8, in bar
return foo(s) * 2
File "<ipython-input-2-16a5e7b57b85>", line 5, in foo
return 10 / int(s)
ZeroDivisionError: division by zero
###Markdown
**raising error**
###Code
try:
a = input("type a number")
r = 10 / int(a)
if int(a) < 0:
raise ValueError
print('r:', r)
except ZeroDivisionError as e:
logging.exception(e)
finally:
print("finally")
print("END")
###Output
type a number-1
finally
###Markdown
2.Debug **Method_1: `print()`**> 用print()把可能有问题的变量打印出来看看: > Use `print()` to print out variables that may be problematic. **Method_2: `assert`**> 凡是用print()来辅助查看的地方,都可以用断言(assert)来替代: assert的意思是,表达式n != 0应该是True,否则,根据程序运行的逻辑,后面的代码肯定会出错。如果断言失败,assert语句本身就会抛出AssertionError > `print()` can be replaced with `assert`: Assert means that the judgement expression like `n != 0` should be True, otherwise, according to the logic of the program, the following code will definitely make mistakes. If the assertion fails, the assert statement itself throws an `Assertion Error`
###Code
def foo(s):
n = int(s)
assert n != 0, 'n is zero!'
return 10 / n
def main():
foo('0')
main()
###Output
_____no_output_____
###Markdown
**Method_3: `logging`**> 和assert比,logging不会抛出错误,而且可以输出到文件: > Compared with `assert`, `logging` won't through `errors`, and can write error messages to file.
###Code
import logging
logging.basicConfig(level=logging.INFO)
s = '0'
n = int(s)
logging.info('n = %d' % n)
print(10 / n)
'Done!\N{Cat}'
###Output
_____no_output_____ |
notebooks/context_design/pymc3_samplers.ipynb | ###Markdown
Using PyMC3 samplers on PyMC4 models
###Code
%load_ext autoreload
%autoreload 2
import pymc4 as pm
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# Create simple pymc4 model
@pm.model(auto_name=True)
def t_test():
mu = pm.Normal(0, 1)
model = t_test.configure()
model._forward_context.vars
func = model.make_log_prob_function()
# Create function to evaluate logp and dlogp over array of inputs
@tf.function
def logp_array(array):
#mu = array[0]
with tf.GradientTape() as tape:
tape.watch(array)
logp = func(array)
grad = tape.gradient(logp, array)
return logp, grad
# As the above function expects TF inputs and outputs, wrap it as PyMC3's samplers want numpy
def logp_wrapper(array):
logp, grad = logp_array(tf.convert_to_tensor(array))
return logp.numpy(), grad.numpy()
from pymc4.hmc import HamiltonianMC
size = 1
n_samples = 500
tf.random.set_seed(123)
np.random.seed(123)
hmc = HamiltonianMC(logp_dlogp_func=logp_wrapper, size=size, adapt_step_size=False)
curr = np.ones(size, dtype='float32') * .05
posterior_samples = []
stats = []
# %%time # NB: uncommenting cell magic %%time will prevent variable from escaping local cell scope
for i in range(n_samples):
curr, stat = hmc.step(curr)
posterior_samples.append(curr)
stats.append(stat)
if i % 10 == 0:
print(i)
print(hmc.step_size)
trace = np.array(posterior_samples)
###Output
0
0.25
10
2.4338525715809687
20
0.8835782022550135
30
2.1327029448576513
40
0.6636198591746387
50
0.7703623177523709
60
1.8082085533140866
70
2.016802124717785
80
0.6217015104339201
90
1.207940992513313
100
1.3240099603905968
110
1.346048853571611
120
1.4427577775129503
130
0.8565491707334623
140
0.9624691955794613
150
2.4720718932582906
160
1.5805445755750056
170
2.1380879550318665
180
1.420551901927288
190
1.772666882095104
200
1.4909935183397898
210
0.9564664645510188
220
1.0094633430712847
230
0.9415898885107509
240
0.9789093380505235
250
1.006542783540078
260
0.9103849564026153
270
1.55622156748228
280
3.088911988511081
290
1.4383936543460296
300
1.1121804701838218
310
1.424086887611018
320
2.20499869286127
330
1.519518011454376
340
1.3419589165701242
350
1.9586890867688238
360
1.6645487485996129
370
0.9915868782338413
380
1.0335682127160022
390
2.580381765734188
400
0.8539483762542414
410
1.6204564049940595
420
1.7482972684183884
430
1.0932177164504921
440
1.1029667164138135
450
1.2918166766210588
460
1.2928383047613468
470
1.6941335213881863
480
2.3599326552982838
490
1.3764049670244904
###Markdown
Compare with `PyMC3`
###Code
import pymc3 as pm3
with pm3.Model() as model3:
pm3.Normal('x', 0, 1)
np.random.seed(123)
with model3:
hmc3 = pm3.HamiltonianMC(adapt_step_size=True)
point = {'x': np.array(.05)}
trace3 = []
%%time
for i in range(n_samples):
point, _ = hmc3.step(point)
trace3.append(point['x'])
if i % 10 == 0:
print(i)
print(hmc3.step_size)
import seaborn as sns
sns.distplot(trace)
sns.distplot(trace3)
###Output
_____no_output_____
###Markdown
There still seems to be a problem here where in the PyMC4 implementation, the step_size keeps getting smaller and smaller, causing the sampler to take very long. Haven't figured it out yet.
###Code
hmc.step_size
hmc3.step_size
hmc.potential._stds
hmc3.potential._stds
###Output
_____no_output_____ |
mdn-tf2.ipynb | ###Markdown
MDN Hands On TutorialThis notebook demonstrates the construction of a simple MDN, and compares it to a regular neural network.Read about MDNs on the [original paper](https://publications.aston.ac.uk/373/1/NCRG_94_004.pdf) by C. Bishop. > This revision has been adapted to work with Tensorflow 2. Note that `tensorflow_probability` has been moved from the main code to it's own package (`pip install tensorflow_probability`)
###Code
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
import seaborn as sns
import tensorflow as tf
import tensorflow_probability as tfp
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from tensorflow import keras
from keras import optimizers
from keras.models import Sequential, Model
from keras.layers import Dense, Activation, Layer, Input
###Output
Using TensorFlow backend.
###Markdown
The network we'll construct will try to learn the following relation between $x$ and $f(x)$: $$f(x) = x^2-6x+9$$Note that this simply $y = x^2$ shifted three steps to the left (global minimum is at $x=3$).
###Code
def f(x):
return x**2-6*x+9
###Output
_____no_output_____
###Markdown
In order to make the data a little bit more realistic, we'll add a normally-distributed noise, which will be location-dependent - the larger $x$ is, the larger the noisier the data will be. So, our data generator will obey the following relation:$$g(x) = f(x) + \epsilon(x) $$ $$ \text{where}: \epsilon(x) = N(0,\sigma_0 x)$$Where $N(\mu,\sigma)$ is the normal distribution with mean $\mu$ and STD of $\sigma$.The `data_generator` below function creates $n$ nosiy data samples for a given `x`, where $n$ is defined by `samples`. Notice that technically, `data_generator` yields $g(x) = N(f(x),\sigma_0 x)$, as mathematically that's the same thing.
###Code
def data_generator(x,sigma_0,samples):
return np.random.normal(f(x),sigma_0*x,samples)
###Output
_____no_output_____
###Markdown
We'll now generate our dataset for $1<x<5$.The purple line in the plot presents the "clean" function $f(x)$ for this range.
###Code
sigma_0 = 0.1
x_vals = np.arange(1,5.2,0.2)
x_arr = np.array([])
y_arr = np.array([])
samples = 50
for x in x_vals:
x_arr = np.append(x_arr, np.full(samples,x))
y_arr = np.append(y_arr, data_generator(x,sigma_0,samples))
x_arr, y_arr = shuffle(x_arr, y_arr)
x_test = np.arange(1.1,5.1,0.2)
fig, ax = plt.subplots(figsize=(10,10))
plt.grid(True)
plt.xlabel('x')
plt.ylabel('g(x)')
ax.scatter(x_arr,y_arr,label='sampled data')
ax.plot(x_vals,list(map(f,x_vals)),c='m',label='f(x)')
ax.legend(loc='upper center',fontsize='large',shadow=True)
plt.show()
###Output
_____no_output_____
###Markdown
Regular neural networkWe'll now train a neural network which will receive $x$ as input and our noisy $g(x)$ but will have to learn the relation $x \rightarrow f(x)$.The network is constructed of two hidden layers, each with 12 nodes and the $\tanh(x)$ activation function (note we use a linear activation on the last output layer which is the same as no activation function at all, since keras defaults to sigmoid if none is given for Dense layers).We set the learning rate $\alpha=0.0003$, 50 examples per mini-batch and a total of 500 epoches.
###Code
epochs = 500
batch_size = 50
learning_rate = 0.0003
model = Sequential()
model.add(Dense(12,input_shape=(1,),activation="tanh"))
model.add(Dense(12,activation="tanh"))
model.add(Dense(1,activation="linear"))
adamOptimizer = optimizers.Adam(learning_rate=learning_rate)
model.compile(loss='mse',optimizer=adamOptimizer,metrics=['mse'])
history_cache = model.fit(x_arr,
y_arr,
verbose=0, # write =1 if you wish to see the progress for each epoch
epochs=epochs,
batch_size=batch_size)
y_pred = model.predict(x_test)
fig, ax = plt.subplots(figsize=(10,10))
plt.grid(True)
plt.xlabel('x')
plt.ylabel('y')
ax.scatter(x_arr,y_arr,c='b',label='sampled data')
ax.scatter(x_test,y_pred,c='r',label='predicted values')
ax.plot(x_vals,list(map(f,x_vals)),c='m',label='f(x)')
ax.legend(loc='upper center',fontsize='large',shadow=True)
plt.show()
print('Final cost: {0:.4f}'.format(history_cache.history['mse'][-1]))
###Output
_____no_output_____
###Markdown
It seems to be doing quite good in predicting $f(x)$, but we can clearly see that the network learnt nothing about the size of the noise. Mixture density network (MDN)Let's try an MDN now. We'll use the same network as in the previous section, with one important change:the output layer now has two nodes (which are constructed as two layers of 1 node for technical simplicity), which we named `mu` and `sigma`Note the new cost function: we create a normal distribution out of the predicted `mu` and `sigma`, and then minimize the negative log-likelihood of this distribution yielding the target value `y`. Mathematically, our cost function is the negative logarithm of the normal distribution's probability density function (PDF):$$Cost = -\log (PDF) = -\log\left(\frac{1}{\sqrt{2\pi}\sigma}\cdot\exp{\left[-\frac{(y-\mu)^{2}}{2\sigma^{2}}\right]}\right)$$
###Code
def mdn_cost(mu, sigma, y):
dist = tfp.distributions.Normal(loc=mu, scale=sigma)
return tf.reduce_mean(-dist.log_prob(y))
###Output
_____no_output_____
###Markdown
We'll use `elu + 1` as the activation function for `sigma`, as it must always be non-negative. The Exponential Linear Unit (ELU) is defined as:$$ ELU(x) = \begin{cases} x & x\ge0 \\ \exp{(x)}-1 & x < 0 \end{cases} $$
###Code
epochs = 500
batch_size = 50
learning_rate = 0.0003
InputLayer = Input(shape=(1,))
Layer_1 = Dense(12,activation="tanh")(InputLayer)
Layer_2 = Dense(12,activation="tanh")(Layer_1)
mu = Dense(1, activation="linear")(Layer_2)
sigma = Dense(1, activation=lambda x: tf.nn.elu(x) + 1)(Layer_2)
y_real = Input(shape=(1,))
lossF = mdn_cost(mu,sigma,y_real)
model = Model(inputs=[InputLayer, y_real], outputs=[mu, sigma])
model.add_loss(lossF)
adamOptimizer = optimizers.Adam(learning_rate=learning_rate)
model.compile(optimizer=adamOptimizer,metrics=['mse'])
history_cache = model.fit([x_arr, y_arr], #notice we are using an input to pass the real values due to the inner workings of keras
verbose=0, # write =1 if you wish to see the progress for each epoch
epochs=epochs,
batch_size=batch_size)
print('Final cost: {0:.4f}'.format(history_cache.history['loss'][-1]))
mu_pred, sigma_pred = model.predict(list((x_test,x_test))) # the model expects a list of arrays as it has 2 inputs
fig, ax = plt.subplots(figsize=(10,10))
plt.grid(True)
plt.xlabel('x')
plt.ylabel('y')
ax.errorbar(x_test,mu_pred,yerr=np.absolute(sigma_pred),c='r',ls='None',marker='.',ms=10,label='predicted distributions')
ax.scatter(x_arr,y_arr,c='b',alpha=0.05,label='sampled data')
ax.errorbar(x_vals,list(map(f,x_vals)),yerr=list(map(lambda x: sigma_0*x,x_vals)),c='b',lw=2,ls='None',marker='.',ms=10,label='true distributions')
ax.plot(x_vals,list(map(f,x_vals)),c='m',label='f(x)')
ax.legend(loc='upper center',fontsize='large',shadow=True)
plt.show()
###Output
Final cost: 0.2591
|
.ipynb_checkpoints/7-Extraindo uma imagem PNG do GEE com o Pillow-checkpoint.ipynb | ###Markdown
Extraindo uma imagem PNG do GEE com o PillowNeste notebook, utilizando o exemplo anterior de extração de máscaras, iremos exibir as imagens diretamente no notebook utilizando a biblioteca Pillow e o Request e, posteriormente, savá-la em disco.Primeiramente, vamos importar as bibliotecas e inicializar o GEE:
###Code
# importação da bibliotecas
import ee
import PIL
import requests
from PIL import Image
from io import BytesIO
# inicialização do GEE
ee.Initialize()
###Output
_____no_output_____
###Markdown
Funções principais utilizadas por esse notebook (comentadas no notebook anterior):
###Code
# Função para aplicar à imagem vinda da coleção a máscara de água
def mascara_agua(imagem):
qa = imagem.select('pixel_qa')
return qa.bitwiseAnd(1 << 2).eq(0)
# Função para aplicar à imagem vinda da coléção a máscara de nuvem/sombra de nuvem
def mascara_nuvem(imagem):
qa = imagem.select('pixel_qa')
return qa.bitwiseAnd(1 << 3).eq(0) and (qa.bitwiseAnd(1 << 5).eq(0)) and (qa.bitwiseAnd(1 << 6).eq(0)) and (qa.bitwiseAnd(1 << 7).eq(0))
# função para aplicar as máscaras
def aplicar_mascaras(imagem):
# criar uma imagem em branco/vazio para evitar problemas no fundo ao gerar um PNG
# usamos valores dummies (neste caso, branco)
vazio = ee.Image(99999)
# máscara de água
agua = vazio.updateMask(mascara_agua(imagem).Not()).rename('agua')
# máscara de nuvem (criará uma imagem com apenas nuvens)
# caso a imagem não tenha nuvens, ela ficará toda branca
nuvem = vazio.updateMask(mascara_nuvem(imagem).Not()).rename('nuvem')
# podemos ainda, ao contrário da linha anterior, REMOVER as nuvens
# notem que retiramos a função .Not (negação)
sem_nuvem = vazio.updateMask(mascara_nuvem(imagem)).rename('sem_nuvem')
# aplicar o indice NDVI
ndvi = imagem.expression('(nir - red) / (nir + red)',{'nir':imagem.select('B5'),'red':imagem.select('B4')}).rename('ndvi')
# assim como fizemos para o NDVI, retornamos uma imagem com as novas bandas
return imagem.addBands([ndvi,agua,nuvem,sem_nuvem])
# função para aplicar uma máscara em uma banda específica
# A mascará a ser aplicada
def aplicar_mascara_banda(imagem, banda_mascara, banda_origem, band_destino):
# Primeiramente, temos que aplicar a máscara desejada na banda de origem, que será nomeada para a banda de destino
# Podemos, inclusive, sobscrever a banda de origem, sem problemas
imagem_mascara = imagem.select(banda_origem).updateMask(imagem.select(banda_mascara)).rename(band_destino)
# Depois, temos que criar uma imagem em branco que receberá a máscara, renomeando também para banda de destino
imagem_mascara = ee.Image(99999).blend(imagem_mascara).rename(band_destino)
# Retornar a imagem com a nova banda nomeada com a string da banda_destino
return imagem.addBands([imagem_mascara])
###Output
_____no_output_____
###Markdown
Agora, vamos definir a geometria e as datas (baseada na Latitude e Longitude) da nossa área de estudo e consultá-la no GEE (mesmo do notebook anterior):
###Code
# Notem que foi criada uma coordenada (Latitude e Longitude) através de uma string, posteriormente repartida pelas virgulas
# Essa abordagem é importante para quando utilizarmos a linha da comando
coordenadas = "-48.53801472648439,-22.503806214013736,-48.270222978437516,-22.7281869567509"
# Aqui, usamos uma ferramenta do Python chamada de unpacking
x1,y1,x2,y2 = coordenadas.split(",")
# Criamos a geometria com base nas coordenadas 'quebradas' acima
geometria = geometry = ee.Geometry.Polygon(
[[[float(x1),float(y2)],
[float(x2),float(y2)],
[float(x2),float(y1)],
[float(x1),float(y1)],
[float(x1),float(y2)]]])
# String de datas
datas = "2014-10-13,2014-10-14"
# Divisão das duas datas pela vírgula, novamente usando a técnica de unpacking
inicio,fim = datas.split(",")
# Consultando a coleção com base na área de estudo e datas selecionadas
colecao = ee.ImageCollection('LANDSAT/LC08/C01/T1_SR').filterBounds(geometria).filterDate(inicio,fim).filterMetadata('CLOUD_COVER','less_than', 30)
# aplicar a função 'aplicar_mascaras' em todas as imagens (irá adicionar as bandas 'agua', 'nuvem', 'sem_nuvem' nas imagens):
colecao = colecao.map(aplicar_mascaras)
# extraindo a imagem mediana da coleção
imagem = colecao.median()
###Output
_____no_output_____
###Markdown
Agora, vamos aplicar as máscaras individualmente na banda NDVI:
###Code
# Aplicamos as três máscaras individualmente na banda NDVI
# A função irá adicionar as já mencionadas bandas de origem a medida que for sendo executada, linha a linha
imagem = aplicar_mascara_banda(imagem, 'agua', 'ndvi', 'ndvi_agua')
imagem = aplicar_mascara_banda(imagem, 'nuvem', 'ndvi', 'ndvi_nuvem')
imagem = aplicar_mascara_banda(imagem, 'sem_nuvem', 'ndvi', 'ndvi_sem_nuvem')
imagem = aplicar_mascara_banda(imagem, 'agua', 'ndvi_sem_nuvem', 'ndvi_agua_sem_nuvem')
# Depois, cortamos a imagem
# scale = escala do sensor. No caso do Landsat-8/OLI são 30 metros
imagem_corte = imagem.clipToBoundsAndScale(geometry=geometria,scale=30)
###Output
_____no_output_____
###Markdown
Utilizando o Pillow e o Request, iremos exibir extrair uma imagem da banda 'ndvi_agua_sem_nuvem' através da plataforma GEE:
###Code
# Exibindo a imagem com o Pillow, Requests e BytesIO diretamente no notebook
PIL.Image.open(BytesIO(requests.get(imagem_corte.select(['ndvi_agua_sem_nuvem']).getThumbUrl({'min':-1, 'max':1})).content))
###Output
_____no_output_____
###Markdown
Essa imagem pode ser, inclusive, salva em um arquivo com o comando:
###Code
# Salvar imagem em um arquivo
imagem_pillow = PIL.Image.open(BytesIO(requests.get(imagem_corte.select(['ndvi_agua_sem_nuvem']).getThumbUrl({'min':-1, 'max':1})).content))
imagem_pillow.save('images/7-ndvi_bbhr.png')
###Output
_____no_output_____ |
notebooks/.ipynb_checkpoints/completeness_and_contamination-checkpoint.ipynb | ###Markdown
This demonstrates all the steps in my candidate selection before conducting visual inspection
###Code
import numpy as np
import splat
import wisps.data_analysis as wispd
from wisps.data_analysis import selection_criteria as sel_crt
import shapey
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from scipy import stats
import wisps
import matplotlib as mpl
from tqdm import tqdm
import random
import matplotlib.pyplot as plt
%matplotlib inline
#some functions
def get_indices(x):
if x is None :
return pd.Series({})
else:
return pd.concat([pd.Series(x.indices), pd.Series(x.mags), pd.Series(x.snr)])
def get_spt(x):
if x is None:
return np.nan
else:
return x.spectral_type[0]
#change f-test definition
def f_test_fx(x, df1, df2):
return stats.f.cdf(x, df1, df2)
def box_parameters(idx, spt_range):
bs=idx.shapes
b=[x for x in bs if x.shape_name==spt_range][0]
print ('{} {} m: {} b: {} s:{}, comp : {}, cont: {}'.format(spt_range, idx, round(b.coeffs[0], 2), round(b.coeffs[1], 2), round(b.scatter, 2), round(idx.completeness[spt_range], 2), round(idx.contamination[spt_range], 3)))
cands=pd.read_pickle(wisps.LIBRARIES+'/new_real_ucds.pkl')
#use the same columns for all data sets
alldata=wisps.get_big_file()
spex=wisps.Annotator.reformat_table(wisps.datasets['spex'])
cands['line_chi']=cands.spectra.apply(lambda x : x.line_chi)
cands['spex_chi']=cands.spectra.apply(lambda x: x.spex_chi)
cands['f_test']=cands.spectra.apply(lambda x: x.f_test)
spex_df=wisps.Annotator.reformat_table(wisps.datasets['spex']).reset_index(drop=True)
manj=wisps.Annotator.reformat_table(wisps.datasets['manjavacas']).reset_index(drop=True)
schn=wisps.Annotator.reformat_table(wisps.datasets['schneider']).reset_index(drop=True)
ydwarfs=(manj[manj['spt'].apply(wisps.make_spt_number)>38].append(schn)).reset_index(drop=True)
spex_df['spt']=np.vstack(spex_df['spt'].values)[:,0]
manj['spt']=np.vstack(manj['spt'].values)[:,0]
schn['spt']=np.vstack(schn['spt'].values)[:,0]
cands.grism_id=cands.grism_id.apply(lambda x: x.lower())
cands['spt']=np.vstack(cands['spt'].values)
#add x values
spex['x']=spex.spex_chi/spex.line_chi
alldata['x']=alldata.spex_chi/alldata.line_chi
cands['x']=cands.spex_chi/cands.line_chi
spex['f_test']=f_test_fx(spex.x.values, spex.dof.values-1, spex.dof.values-2)
alldata['f_test']=f_test_fx(alldata.x.values, alldata.nG141.values-1, alldata.nG141.values-2)
alldata=alldata.sort_values('x')
spex=spex.sort_values('x')
cands=cands.sort_values('x')
alldata['datalabel']='alldata'
spex['datalabel']='spex'
cands['datalabel']='ucds'
combined_ftest_df=pd.concat([cands, spex, alldata[(alldata.snr1>=3.) & (alldata.mstar_flag !=0)]])
#stats.f.cdf(.85564068, 108-1, 108+2)
#list(spex[['x', 'dof']][spex.f_test.values >0.2].values)
len(spex[np.logical_and(spex.f_test.values > 0.9, np.vstack(spex.spt.values)[:,0] >=17.)])/len(spex)
len(spex[np.logical_and(spex.f_test.values < 0.02, np.vstack(spex.spt.values)[:,0] >=17.)])/len(spex)
len(cands[np.logical_and(cands.f_test.values > 0.9, np.vstack(cands.spt.values)[:,0] >=17.)])/len(cands)
len(cands[np.logical_and(cands.f_test.values < 0.02, np.vstack(cands.spt.values)[:,0] >=17.)])/len(cands)
#star_ids=alldata[alldata['class_star'] !=0]
#stars=wisps.Annotator.reformat_table(star_ids).reset_index(drop=True)
#cy=stars[stars.grism_id.isin(cx.grism_id)]
plt.plot(cands.x[cands.x<1.], '.')
dt=alldata[(alldata.f_test<0.02) & (alldata.snr1>=3.) & (alldata.mstar_flag !=0)].reset_index(drop=True)
dt['spt']=(dt['spt']).apply(wisps.make_spt_number).apply(float)
dt=wisps.Annotator.reformat_table(dt).reset_index(drop=True)
len(alldata[(alldata.f_test<0.02) & (alldata.snr1>=3.) & (alldata.mstar_flag !=0)])
wisps.datasets.keys()
#wisps.Annotator.reformat_table(wisps.datasets['subd'])
#get criteria
##only run this if new data
gbhio=sel_crt.save_criteria(conts=dt)
crts=sel_crt.crts_from_file()
contamns=pd.DataFrame([ x.contamination for x in crts.values()])
compls=pd.DataFrame([ x.completeness for x in crts.values()])
contamns.index=[x for x in crts.keys()]
compls.index=[x for x in crts.keys()]
%%capture
'''
contamns.style.apply(lambda x: ["background-color: #7FDBFF"
if (i >= 0 and (v < 0.1
and v > 0. ))
else "" for i, v in enumerate(x)], axis = 1)
'''
def get_toplowest_contam(subtype, n):
top=contamns.sort_values('L5-T0')[:n]
return {subtype: [x for x in top.index]}
ordered={}
for k in ['M7-L0', 'L0-L5', 'L5-T0', 'T0-T5', 'T5-T9', 'Y dwarfs', 'subdwarfs']:
ordered.update(get_toplowest_contam(k, 6))
to_use= ordered
spex['spt']=np.vstack(spex.spt.values)[:,0]
from tqdm import tqdm
def multiplte_indices_selection(k):
stat_dict={}
indices= [crts[index_name] for index_name in to_use[k]]
#make selections for each index separately
cand_bools=[]
spex_bools=[]
trash_bools=[]
for idx in indices:
xkey=idx.xkey
ykey=idx.ykey
bx=[x for x in idx.shapes if x.shape_name==k][0]
_, cbools=bx._select(np.array([cands[xkey].values,cands[ykey].values]))
_, spbools=bx._select(np.array([spex[xkey].values,spex[ykey].values]))
_, trbools=bx._select(np.array([dt[xkey].values, dt[ykey].values]))
cand_bools.append(cbools)
spex_bools.append(spbools)
trash_bools.append(trbools)
cands_in_that_class_bool=cands.spt.apply(lambda x: wisps.is_in_that_classification(x, k))
spex_in_that_class_bool=spex.spt.apply(lambda x: wisps.is_in_that_classification(x, k))
cand_bools.append(cands_in_that_class_bool)
spex_bools.append(spex_in_that_class_bool)
cands_selected=cands[np.logical_and.reduce(cand_bools, axis=0)]
spexs_selected=spex[np.logical_and.reduce(spex_bools, axis=0)]
print (' {} selected {} out of {} UCDS'.format(k, len( cands_selected), len(cands[cands_in_that_class_bool])))
print ('overall completeness {}'.format( len(spexs_selected)/len(spex[spex_in_that_class_bool])))
print ('total contaminants {}'.format(len(dt[np.logical_and.reduce(trash_bools)])))
print ('-------------------------------------------')
#for k in ['M7-L0', 'L0-L5', 'L5-T0', 'T0-T5', 'T5-T9', 'Y dwarfs']:
# multiplte_indices_selection(k)
contamns.idxmin(axis=0)
from collections import OrderedDict
ordered=[(k, contamns.idxmin(axis=0)[k]) for k in ['M7-L0', 'L0-L5', 'L5-T0', 'T0-T5', 'T5-T9', 'Y dwarfs', 'subdwarfs']]
to_use= [ (y, x) for x, y in ordered]
to_use
import pickle
#save the random forest
output_file=wisps.OUTPUT_FILES+'/best_indices_to_use.pkl'
with open(output_file, 'wb') as file:
pickle.dump(to_use,file)
fp={}
cands=cands[cands.grism_id.isin(dt.grism_id)]
def plot_index_box(index_name, box_name, ax):
#get the index and the box
idx=crts[index_name]
bx=[x for x in idx.shapes if x.shape_name==box_name][0]
xkey=idx.xkey
ykey=idx.ykey
to_use_df=spex_df
if box_name.lower()=='y dwarfs':
to_use_df=ydwarfs
if box_name.lower()=='subdwarfs':
to_use_df=wisps.Annotator.reformat_table(idx.subdwarfs)
to_use_df['spt']=17
xlim=[ bx.xrange[0]-.5*abs(np.ptp(bx.xrange)), bx.xrange[1]+.5*abs(np.ptp(bx.xrange))]
ylim=[ bx.yrange[0]-.5*abs(np.ptp(bx.yrange)), bx.yrange[1]+.5*abs(np.ptp(bx.yrange))]
if box_name.upper()=='T5-T9':
print ('changin scale')
print (bx.xrange[1])
xlim=[ bx.xrange[0]-0.2*abs(np.ptp(bx.xrange)), np.round(bx.xrange[1]+0.2*abs(np.ptp(bx.xrange)))]
#remove nans from background
bckgrd= dt[[xkey, ykey]].replace(-np.inf, np.nan).replace(np.inf, np.nan).dropna()
# ax.scatter(bckgrd[xkey], bckgrd[ykey], s=1, c='#111111', label='Background')
bckgrd=bckgrd[(bckgrd[xkey].between(xlim[0], xlim[1])) & (bckgrd[ykey].between(ylim[0], ylim[1]))]
h=ax.hist2d(bckgrd[xkey].apply(float).values, bckgrd[ykey].apply(float).values, \
cmap='gist_yarg', vmin=50, vmax=1000)
cands_slctd, cands_bools=bx._select(np.array([cands[xkey].values,cands[ykey].values]))
trash_slctd, trsh_bools=bx._select(np.array([dt[xkey].values, dt[ykey].values]))
#simul_slctd, simul_bools=bx._select(np.array([simulated_data[xkey].values, simulated_data[ykey].values]))
print (len(cands_slctd[0]), len((cands)))
cands_in_that_class_bool=(cands).spt.apply(lambda x: wisps.is_in_that_classification(x, box_name))
spexs_slctd_in_that_class_bool= (to_use_df).spt.apply(lambda x: wisps.is_in_that_classification(x, box_name))
#simulated_in_that_class_bool=(simulated_data[simul_bools]).spt.apply(lambda x: wisps.is_in_that_classification(x, box_name))
if box_name.lower()=='subdwarfs':
spexs_slctd_in_that_class_bool=np.ones(len(to_use_df), dtype=bool)
cands_in_that_class=np.array([cands_slctd[0], \
cands_slctd[1]])
#simulated_in_that_class= np.array([simul_slctd[0][simulated_in_that_class_bool], simul_slctd[1][simulated_in_that_class_bool]])
spexs_slctd_in_that_class=np.array([to_use_df[xkey][spexs_slctd_in_that_class_bool], to_use_df[ykey][spexs_slctd_in_that_class_bool]])
#ax.scatter( simulated_in_that_class[0], simulated_in_that_class[1], facecolors='none', s=10,
# edgecolors='#001f3f', label='simulated')
ax.scatter(spexs_slctd_in_that_class[0], spexs_slctd_in_that_class[1], facecolors='none',\
edgecolors='#0074D9', label='Templates', s=50.)
#ax.scatter(cands[xkey], cands[ykey], marker='x', facecolors='#FF851B', s=40., alpha=0.5)
ax.scatter( cands_in_that_class[0], cands_in_that_class[1], marker ='+', s=150., alpha=1.,
facecolors='#FF851B', label='Discovered UCDs')
ax.scatter(cands[xkey].values, cands[ykey].values, marker='+', s=150., alpha=0.3,
facecolors='#FF851B')
bx.color='None'
bx.alpha=1.
bx.linewidth=3
bx.linestyle='-'
bx.edgecolor='#0074D9'
bx.plot(ax=ax, only_shape=True, highlight=False)
#cb = plt.colorbar(h[3], ax=ax, orientation='horizontal')
#cb.set_label('Counts in bin', fontsize=16)
plt.tight_layout()
ax.set_xlabel(r'$'+str(idx.name.split(' ')[0])+'$', fontsize=14)
ax.set_ylabel(r'$'+str(idx.name.split(' ')[1])+'$', fontsize=14)
ax.set_title(box_name, fontsize=18)
xbuffer=np.nanstd(to_use_df[[xkey,ykey]])
ax.minorticks_on()
if (trash_slctd.shape[1])==0:
fprate=0.0
else:
fprate=(trash_slctd.shape[1]- cands_slctd.shape[1])/trash_slctd.shape[1]
if box_name.lower()=='subdwarfs':
fprate=1.
fp[box_name]= fprate
ax.set_xlim(xlim)
ax.set_ylim(ylim)
plt.tight_layout()
print (' {} selected {}'.format(box_name, len(bx.select( bckgrd))))
return {str(box_name): bx}
to_use
###Output
_____no_output_____
###Markdown
cands
###Code
idx=crts[to_use[1][0]]
import matplotlib
fig, ax=plt.subplots(nrows=3, ncols=3, figsize=(12, 14))
bxs=[]
for idx, k in enumerate(to_use):
print (idx, k)
b=plot_index_box( k[0], k[1], np.concatenate(ax)[idx])
bxs.append(b)
plt.tight_layout()
cax = fig.add_axes([0.5, 0.1, .3, 0.03])
norm= matplotlib.colors.Normalize(vmin=50,vmax=1000)
mp=matplotlib.cm.ScalarMappable(norm=norm, cmap='gist_yarg')# vmin=10, vmax=5000)
cbar=plt.colorbar(mp, cax=cax, orientation='horizontal')
cbar.ax.set_xlabel(r'Number of Contaminants', fontsize=18)
fig.delaxes(np.concatenate(ax)[-1])
fig.delaxes(np.concatenate(ax)[-2])
np.concatenate(ax)[-4].set_title(r'$\geq$ T9 ', fontsize=18)
#subdindx_index_crt=crts['H_2O-1/J-Cont H_2O-2/H_2O-1']
#subdrfs=wisps.Annotator.reformat_table(dummy_index_crt.subdwarfs)
#tpls=wisps.Annotator.reformat_table(spex_df[spex_df.metallicity_class.isna()])
#a=np.concatenate(ax)[-1]
#tpls=tpls[tpls.spt>16]
#a.scatter(dt[subdindx_index_crt.xkey], dt[subdindx_index_crt.ykey], s=1., c='#111111', alpha=0.1)
#a.scatter(tpls[subdindx_index_crt.xkey], tpls[subdindx_index_crt.ykey], marker='+', facecolors='#0074D9', label='SpeX', s=5.)
#a.scatter(subdrfs[subdindx_index_crt.xkey], subdrfs[subdindx_index_crt.ykey], marker='+', facecolors='#2ECC40', label='SpeX', s=30.)
#a.set_xlim([0., 1.35])
#a.set_ylim([0., 1.25])
#a.set_title('subdwarfs', fontsize=18)
#a.set_xlabel(r'$'+str(subdindx_index_crt.name.split(' ')[0])+'$', fontsize=15)
#a.set_ylabel(r'$'+str(subdindx_index_crt.name.split(' ')[1])+'$', fontsize=15)
np.concatenate(ax)[-3].legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig(wisps.OUTPUT_FIGURES+'/index_index_plots.pdf',
bbox_inches='tight', rasterized=True, dpi=150)
#.grism_id.to_csv('/users/caganze/desktop/true_brown_dwarfs.csv')
bx_dict={}
for b in bxs:
bx_dict.update(b)
#invert to use
inv_to_use = {v: k for k, v in to_use}
ncandidates=[]
for spt_range in bx_dict.keys():
idx_name=inv_to_use[spt_range]
idx=crts[idx_name]
s, bools=(bx_dict[spt_range])._select(np.array([dt[idx.xkey].values, dt[idx.ykey].values]))
ncandidates.append(dt[bools])
candsss=(pd.concat(ncandidates).drop_duplicates(subset='grism_id'))
cands.grism_id=cands.grism_id.apply(lambda x: x.lower().strip())
good_indices=[crts[x] for x in inv_to_use.values()]
len(candsss), len(candsss[candsss.grism_id.isin(cands.grism_id.apply(lambda x: x.lower().strip())) & (candsss.spt.apply(wisps.make_spt_number)>16)])
len(candsss.drop_duplicates('grism_id'))/len(alldata)
len(candsss[candsss.grism_id.isin(cands.grism_id) & (candsss.spt.apply(wisps.make_spt_number).between(35, 40))])
len(candsss), len(dt), len(alldata[alldata.mstar_flag !=0])
len(dt)/len(alldata)
candsss.to_pickle(wisps.OUTPUT_FILES+'/selected_by_indices.pkl')
#print out table
def round_tuple(tpl, n=2):
return round(tpl[0], n), round(tpl[1],n)
for index, k in to_use:
spt_range=k
sindex=crts[index]
bs=sindex.shapes
bs=[x for x in bs if x.shape_name==spt_range]
bx=bs[0]
print (" {} & {} & {} & {} & {} & {} & {} & {} & {} & {} \\\ ".format(spt_range,sindex.xkey, sindex.ykey,
round_tuple(bx.vertices[0]), round_tuple(bx.vertices[1])
, round_tuple(bx.vertices[2]), round_tuple(bx.vertices[3]),
round(sindex.completeness[spt_range], 2),
round(sindex.contamination[spt_range], 7),
round(fp[spt_range],6)))
len(candsss)
#ghjk
stars= alldata[alldata.mstar_flag !=0]
cands_dff=(cands[np.logical_and(cands['snr1'] >=3., cands['spt'] >=17)]).sort_values('spt')
spex_df=spex_df.sort_values('spt')
star_snr=stars[['snr1', 'snr2', 'snr3', 'snr4']].apply(np.log10).dropna()
star_snr=(star_snr[star_snr.snr1.between(-1, 4) & star_snr.snr3.between(-1, 4) & star_snr.snr4.between(-1, 4)]).reset_index(drop=True)
fig, (ax, ax1)=plt.subplots(ncols=2, figsize=(12, 6))
h=ax.hist2d(star_snr['snr1'], star_snr['snr3'], cmap='gist_yarg', bins=10, label='Point Sources')
#ax.scatter(star_snr['snr1'], star_snr['snr3'], c='#111111', s=1, alpha=0.1)
cb = plt.colorbar(h[3], ax=ax, orientation='horizontal')
cb.set_label('Counts in bin', fontsize=16)
plt.tight_layout()
#ax.scatter(star_snr['snr1'], star_snr['snr4'], s=1., c='k', alpha=0.1,
# label='3D-HST or WISP')
ax.scatter(spex_df['snr1'].apply(np.log10), spex_df['snr3'].apply(np.log10),
s=10, c=spex_df.spt,
cmap='coolwarm', marker='o', alpha=0.1, vmin=15, vmax=40)
ax.scatter(spex_df['snr1'].apply(np.log10)[0], spex_df['snr3'].apply(np.log10)[0],
s=10, c=spex_df.spt[0],
cmap='coolwarm', label='Templates', marker='o', alpha=1., vmin=15, vmax=40)
ax.scatter(cands_dff['snr1'].apply(np.log10), cands_dff['snr3'].apply(np.log10),
c=cands_dff['spt'], s=40, marker='*', cmap='coolwarm', label='UCDs'
, vmin=15, vmax=40)
ax.set_xlim([-0.5, 4])
ax.set_ylim([-0.5, 4])
ax.set_xlabel('Log J-SNR', fontsize=18)
ax.set_ylabel('Log H-SNR', fontsize=18)
ax.legend(fontsize=18, loc='upper left')
ax.axhline(np.log10(3), c='k', xmin=np.log10(3)-0.2, linestyle='--')
ax.axvline(np.log10(3), c='k', ymin=np.log10(3)-0.2, linestyle='--')
#ax1.scatter(stars['snr1'].apply(np.log10), stars['snr4'].apply(np.log10), s=1., c='k', alpha=0.1,
# label='3D-HST or WISP')
#ax1.scatter(star_snr['snr1'], star_snr['snr4'], c='#111111', s=1, alpha=0.1)
h1=ax1.hist2d(star_snr['snr1'], star_snr['snr4'], cmap='gist_yarg', bins=10, label='Point Sources')
mp=ax1.scatter(spex_df['snr1'].apply(np.log10), spex_df['snr4'].apply(np.log10), s=10, c=spex_df.spt,
cmap='coolwarm', label='Templates', marker='o', alpha=0.1, vmin=15, vmax=40)
ax1.scatter(cands_dff['snr1'].apply(np.log10), cands_dff['snr4'].apply(np.log10),
c=cands_dff['spt'], s=40, marker='*', cmap='coolwarm', label='UCDs', vmin=15, vmax=40)
ax1.set_xlim([-0.5, 4])
ax1.set_ylim([-0.5, 4])
ax1.set_xlabel(' Log J-SNR', fontsize=18)
ax1.set_ylabel('Log MEDIAN-SNR', fontsize=18)
#ax.legend(fontsize=18)
ax1.axhline(np.log10(3), c='k', xmin=np.log10(3)-0.2, linestyle='--')
ax1.axvline(np.log10(3), c='k', ymin=np.log10(3)-0.2, linestyle='--')
cb1 = plt.colorbar(h1[3], ax=ax1, orientation='horizontal')
cb1.set_label('Counts in bin', fontsize=16)
#plt.tight_layout()
import matplotlib
cax = fig.add_axes([1.01, 0.21, .03, 0.7])
norm= matplotlib.colors.Normalize(vmin=15,vmax=40)
mp=matplotlib.cm.ScalarMappable(norm=norm, cmap='coolwarm')
cbar=plt.colorbar(mp, cax=cax, orientation='vertical')
cbar.ax.set_ylabel(r'Spectral Type', fontsize=18)
ax.minorticks_on()
ax1.minorticks_on()
cbar.ax.set_yticks([ 17, 20, 25, 30, 35, 40])
cbar.ax.set_yticklabels(['M5', 'L0', 'L5', 'T0', 'T5', 'Y0'])
plt.tight_layout()
plt.savefig(wisps.OUTPUT_FIGURES+'/snr_cutplots.pdf', \
bbox_inches='tight',rasterized=True, dpi=100)
#import wisps
big=wisps.get_big_file()
bigsnr=big[big.snr1>=3.]
#
fig, ax=plt.subplots(figsize=(10, 6))
h=ax.hist(big.snr1.apply(np.log10).values, range=[-3, 4], bins=32, histtype='step', linestyle=':',
label='All', log=True, linewidth=3)
h=ax.hist(stars.snr1.apply(np.log10).values, range=[-3, 4], bins=32, histtype='step', linewidth=3, label='Point Sources',
linestyle='--', log=True)
h=ax.hist(stars[stars.snr1>3].snr1.apply(np.log10).values, range=[-3, 4], bins=32, histtype='step', linewidth=3,
label='Selected',
log=True)
#h=ax.hist(bigsnr.snr1.apply(np.log10).values, range=[-3, 4], bins=32, histtype='step', linewidth=3, log=True)
ax.minorticks_on()
plt.xlabel('Log SNR')
plt.ylabel('Number')
plt.legend()
plt.savefig(wisps.OUTPUT_FIGURES+'/snr_distribution.pdf', bbox_inches='tight', facecolor='white', transparent=False)
#s3=wisps.Source(filename='goodss-01-G141_47749')
#s4=wisps.Source(filename='goodss-01-G141_45524')
bools=np.logical_and(stars.snr1.between(3, 1000), stars.f_test.between(1e-3, 1))
#s4._best_fit_line
###Output
_____no_output_____
###Markdown
fig, ax=plt.subplots(figsize=(8, 8))plt.plot(s4.wave, s4.flux, color='111111', label='Flux')plt.plot(s4.wave, s4.noise, '39CCCC', label='Noise')std=splat.getStandard(s4.spectral_type[0])std.normalize(range=[1.2, 1.5])chi, scale=splat.compareSpectra(s4.splat_spectrum, std, comprange=[[1.2, 1.5]], statistic='chisqr', scale=True) std.scale(scale)plt.plot(std.wave, std.flux, color='y', label='Best fit template')plt.plot( s4._best_fit_line[0], color='FF4136', label='Best fit line')plt.xlim([1.1, 1.7])plt.ylim([0, 0.1])plt.xlabel('Wavelength (micron)')plt.ylabel('Normalized Flux')plt.legend()plt.savefig(wisps.OUTPUT_FIGURES+'/example_line_fit.pdf', bbox_inches='tight', facecolor='white', transparent=False)
###Code
compls.keys()
fig, ax=plt.subplots(figsize=(8,6))
#for k in ['L0-L5', 'L5-T0', 'M7-L0', 'T0-T5', 'T5-T9','subdwarfs']:
ax.scatter(compls['M7-L0'].values, contamns['M7-L0'].values, facecolors='none', edgecolors='#0074D9',
label='M7-L0')
ax.scatter(compls['L0-L5'].values, contamns['L0-L5'].values, marker='^', facecolors='none',\
edgecolors='#FF851B', label='L0-L5')
ax.scatter(compls['L5-T0'].values, contamns['L5-T0'].values, marker='s', facecolors='none',
edgecolors='#2ECC40', label='L5-T0')
ax.scatter(compls['T0-T5'].values, contamns['T0-T5'].values, marker='$...$', facecolors='none',
edgecolors='#FF4136',
label='T0-T5')
ax.scatter(compls['T5-T9'].values, contamns['T5-T9'].values, marker='X', facecolors='none',
edgecolors='#111111',
label='T5-T9')
#h=plt.hist(contams[k].values, bins='auto', histtype='step',
# label='All', log=True, linewidth=3)
ax.set_xlabel('Completeness')
ax.set_ylabel('Contamination')
plt.legend()
ax.set_yscale('log')
plt.savefig(wisps.OUTPUT_FIGURES+'/completeness_contam.pdf', bbox_inches='tight', facecolor='white', transparent=False)
compl_contam_table=pd.DataFrame(columns=contamns.columns, index=contamns.index)
for k in compl_contam_table.columns:
for idx in compl_contam_table.index:
compl_contam_table.loc[idx, k]=(round(compls.loc[idx, k], 2), \
round(contamns.loc[idx, k], 3))
(compl_contam_table[['M7-L0', 'L0-L5', 'T0-T5',\
'T5-T9', 'Y dwarfs', 'subdwarfs']]).to_latex()
###Output
_____no_output_____ |
Notebook/stalkroot_rooted_0.5.ipynb | ###Markdown
MUSHROOMS Binary Classification Imports
###Code
import os
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Load Data
###Code
DATA_PATH = '../DATA/'
FILE_NAME = 'mushrooms.csv'
def load_data(data_path=DATA_PATH, file_name=FILE_NAME):
csv_path = os.path.join(data_path, file_name)
return pd.read_csv(csv_path)
dataset = load_data()
###Output
_____no_output_____
###Markdown
View Data and Informations
###Code
dataset.head()
dataset.info()
edible, poisonous = dataset['class'].value_counts()
print("Edible:\t ", edible,"\nPoisonous:", poisonous)
# Categorical to numerical
labels = {'e': 0, 'p': 1}
dataset['class'].replace(labels, inplace=True)
edible, poisonous = dataset['class'].value_counts()
print("0 - Edible: ", edible,"\n1 - Poisonous:", poisonous)
###Output
0 - Edible: 4208
1 - Poisonous: 3916
###Markdown
Split Dataset Get the Labels
###Code
X, y = dataset.drop('class', axis=1), dataset['class'].copy()
print("X:",X.shape,"\ny:",y.shape)
###Output
X: (8124, 22)
y: (8124,)
###Markdown
Train Set and Test Set
###Code
from sklearn.model_selection import train_test_split
X_white = pd.DataFrame()
X_not_white = pd.DataFrame()
y_white = pd.Series(dtype='float64')
y_not_white = pd.Series(dtype='float64')
for i in range(0,len(X)):
if X.loc[i,"stalk-root"] == "r":
X_white = X_white.append(X.iloc[i,:])
y_white = y_white.append(pd.Series(y.iloc[i]))
else:
X_not_white = X_not_white.append(X.iloc[i,:])
y_not_white = y_not_white.append(pd.Series(y.iloc[i]))
X_train_not_white, X_test_not_white, y_train_not_white, y_test_not_white = train_test_split(X_not_white, y_not_white, test_size=1-(6905/(8124-len(X_white))), random_state=37)
# print(X_test_white)
X_train_white = (X_train_not_white)
X_test_white = X_white.append(X_test_not_white)
y_train_white = (y_train_not_white)
y_test_white = y_white.append(y_test_not_white)
from sklearn.utils import shuffle
X_train_full = shuffle(X_train_white, random_state=37)
X_test = shuffle(X_test_white, random_state=37)
y_train_full = shuffle(y_train_white, random_state=37)
y_test = shuffle(y_test_white, random_state=37)
# print(X_test[:5])
# print(y_test.loc[:,"0"])
# from sklearn.model_selection import train_test_split
# X_train_full, X_test, y_train_full, y_test = train_test_split(X, y, test_size=0.15, random_state=37)
# print("85% - X_train size:", X_train_full.shape[0], " y_train size:", y_train_full.shape[0])
# print("15% - X_test size: ", X_test.shape[0], " y_test size: ", y_test.shape[0])
###Output
_____no_output_____
###Markdown
Validation Set
###Code
X_valid, X_train = X_train_full[:500], X_train_full[500:]
y_valid, y_train = y_train_full[:500], y_train_full[500:]
print("X_train:", X_train.shape[0], "y_train", y_train.shape[0])
print("X_valid: ", X_valid.shape[0], "y_valid ", y_valid.shape[0])
print("X_test: ", X_test.shape[0])
###Output
X_train: 6404 y_train 6404
X_valid: 500 y_valid 500
X_test: 1220
###Markdown
Prepare the Data Data Transformation
###Code
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OrdinalEncoder
from sklearn.compose import ColumnTransformer
cat_attr_pipeline = Pipeline([
('encoder', OrdinalEncoder())
])
cols = list(X)
pipeline = ColumnTransformer([
('cat_attr_pipeline', cat_attr_pipeline, cols)
])
X_train = pipeline.fit_transform(X_train)
X_valid = pipeline.fit_transform(X_valid)
X_test = pipeline.fit_transform(X_test)
###Output
_____no_output_____
###Markdown
Neural Network Model
###Code
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import InputLayer, Dense
tf.random.set_seed(37)
model = Sequential([
InputLayer(input_shape=(22,)), # input layer
Dense(45, activation='relu'), # hidden layer
Dense(1, activation='sigmoid') # output layer
])
model.summary()
###Output
Model: "sequential_3"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dense_6 (Dense) (None, 45) 1035
dense_7 (Dense) (None, 1) 46
=================================================================
Total params: 1,081
Trainable params: 1,081
Non-trainable params: 0
_________________________________________________________________
###Markdown
Compile the Model
###Code
model.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Prepare Callbacks
###Code
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
checkpoint_cb = ModelCheckpoint('../SavedModels/best_model.h5',
save_best_only=True)
early_stopping_cb = EarlyStopping(patience=3,
restore_best_weights=True)
###Output
_____no_output_____
###Markdown
Training
###Code
train_model = model.fit(X_train, y_train,
epochs=100,
validation_data=(X_valid, y_valid),
callbacks=[checkpoint_cb, early_stopping_cb])
###Output
Epoch 1/100
201/201 [==============================] - 1s 3ms/step - loss: 0.3921 - accuracy: 0.8370 - val_loss: 0.4117 - val_accuracy: 0.8040
Epoch 2/100
201/201 [==============================] - 0s 2ms/step - loss: 0.2996 - accuracy: 0.8857 - val_loss: 0.3193 - val_accuracy: 0.8620
Epoch 3/100
201/201 [==============================] - 0s 2ms/step - loss: 0.2714 - accuracy: 0.8980 - val_loss: 0.3075 - val_accuracy: 0.8800
Epoch 4/100
201/201 [==============================] - 1s 3ms/step - loss: 0.2504 - accuracy: 0.9040 - val_loss: 0.2786 - val_accuracy: 0.8900
Epoch 5/100
201/201 [==============================] - 1s 3ms/step - loss: 0.2318 - accuracy: 0.9113 - val_loss: 0.2519 - val_accuracy: 0.8860
Epoch 6/100
201/201 [==============================] - 0s 1ms/step - loss: 0.2147 - accuracy: 0.9199 - val_loss: 0.3326 - val_accuracy: 0.8740
Epoch 7/100
201/201 [==============================] - 0s 1ms/step - loss: 0.1979 - accuracy: 0.9288 - val_loss: 0.2303 - val_accuracy: 0.9280
Epoch 8/100
201/201 [==============================] - 0s 2ms/step - loss: 0.1822 - accuracy: 0.9383 - val_loss: 0.2055 - val_accuracy: 0.9320
Epoch 9/100
201/201 [==============================] - 0s 1ms/step - loss: 0.1683 - accuracy: 0.9432 - val_loss: 0.1909 - val_accuracy: 0.9220
Epoch 10/100
201/201 [==============================] - 0s 1ms/step - loss: 0.1565 - accuracy: 0.9461 - val_loss: 0.1807 - val_accuracy: 0.9420
Epoch 11/100
201/201 [==============================] - 0s 2ms/step - loss: 0.1447 - accuracy: 0.9505 - val_loss: 0.1908 - val_accuracy: 0.9560
Epoch 12/100
201/201 [==============================] - 0s 2ms/step - loss: 0.1345 - accuracy: 0.9542 - val_loss: 0.2237 - val_accuracy: 0.9420
Epoch 13/100
201/201 [==============================] - 0s 2ms/step - loss: 0.1259 - accuracy: 0.9583 - val_loss: 0.2921 - val_accuracy: 0.8960
###Markdown
Learning Curves
###Code
pd.DataFrame(train_model.history).plot(figsize=(8,5))
plt.grid(True)
plt.gca().set_ylim(0,1)
plt.show()
###Output
_____no_output_____
###Markdown
Evaluate the Best Model on Test Set
###Code
results = model.evaluate(X_test, y_test)
print("test loss, test acc:", results)
###Output
39/39 [==============================] - 0s 2ms/step - loss: 0.2619 - accuracy: 0.8811
test loss, test acc: [0.2618843913078308, 0.881147563457489]
###Markdown
Confusion Matrix
###Code
import seaborn as sns
#Parameters
title = 'Confusion Matrix'
custom_color = '#ffa600'
#Function for drawing confusion matrix
def draw_confusion_matrix(cm, title = title, color = custom_color):
palette = sns.light_palette(color, as_cmap=True)
ax = plt.subplot()
sns.heatmap(cm, annot=True, ax=ax, fmt='d', cmap=palette)
# Title
ax.set_title('\n' + title + '\n',
fontweight='bold',
fontstyle='normal',
)
# x y labels
ax.set_xlabel('Predicted', fontweight='bold')
ax.set_ylabel('Actual', fontweight='bold');
# Classes names
x_names = ['Poisonous', 'Edible']
y_names = ['Poisonous', 'Edible']
ax.xaxis.set_ticklabels(x_names, ha = 'center')
ax.yaxis.set_ticklabels(y_names, va = 'center')
from sklearn.metrics import confusion_matrix
y_test_pred = (model.predict(X_test) > 0.5).astype("int32")
cm = confusion_matrix(y_test, y_test_pred)
draw_confusion_matrix(cm)
###Output
_____no_output_____
###Markdown
ROC Curve
###Code
#Function for plotting the ROC curve
def plot_roc_curve(fpr, tpr, roc_auc):
plt.plot(fpr, tpr, custom_color, label='Area: %0.3f' %roc_auc, linewidth=2)
plt.plot([0, 1], [0, 1], 'k--')
plt.title('ROC Curve')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate - Recall')
plt.legend(loc='lower right')
plt.show()
from sklearn.metrics import roc_curve, auc
y_test_prob = model.predict(X_test)
fpr, tpr, _ = roc_curve(y_test, y_test_prob)
roc_auc = auc(fpr, tpr)
plot_roc_curve(fpr, tpr, roc_auc)
###Output
_____no_output_____
###Markdown
Make Some Predictions
###Code
X_new = X_test[:5]
y_prob = model.predict(X_new)
# print(y_prob.round(3))
y_pred = (model.predict(X_new) > 0.5).astype("int32")
# print(y_pred)
###Output
_____no_output_____
###Markdown
KL Divergence
###Code
# X_new = X_test[:5]
X_df = pd.DataFrame(model.predict(X_test))
y_test_pred = pd.DataFrame(y_test_pred).reset_index(drop=True)
X_df = pd.concat([X_df, y_test_pred], axis=1)
y_test = y_test.reset_index(drop=True)
X_df = pd.concat([X_df, y_test], axis=1)
X_df.columns = ["X_pred","y_pred","y_actual"]
print(X_df)
import math
table = pd.DataFrame(columns=["KL_div","abs distance","correctness"])
for i in range(0,len(X_df)):
# KL divergence
p = X_df.loc[i,"X_pred"]
kl = -(p*math.log(p) + (1-p)*math.log(1-p))
table.loc[i,"KL_div"] = kl
# absolute distance
abs_dist = 2*abs(0.5-p)
table.loc[i,"abs distance"] = abs_dist
# correctness
y_pred = X_df.loc[i,"y_pred"]
y_act = X_df.loc[i,"y_actual"]
if y_pred == y_act:
table.loc[i,"correctness"] = 1 # correct prediction
else:
table.loc[i,"correctness"] = 0 # wrong prediction
print(table)
table["count"] = 1
correctness = table[["correctness","count"]].groupby(pd.cut(table["KL_div"], np.arange(0, 0.8, 0.1))).apply(sum)
correctness["percent"] = 100*(correctness["correctness"]/correctness["count"])
print(correctness)
index = []
for i in (correctness.index):
index.append(str(i))
plt.bar(index,correctness["percent"], width=0.7)
for index,data in enumerate(correctness["percent"]):
plt.text(x=index , y =data+1 , s=f"{round(data,2)}" , fontdict=dict(fontsize=15),ha='center')
plt.ylim(0,110)
plt.xlabel("KL Divergence")
plt.ylabel("% correct")
###Output
_____no_output_____
###Markdown
Confidence
###Code
kl = table[["correctness","count"]].groupby(pd.cut(table["KL_div"], np.arange(0, 0.8, 0.05))).apply(sum)
kl["percent"] = (kl["correctness"]/kl["count"])
kl.dropna(inplace=True)
plt.scatter(np.arange(0, 0.70, 0.05), kl["percent"])
# print(kl)
# print(np.arange(0, 0.7, 0.05))
# Linear Regression
from sklearn.linear_model import LinearRegression
x = np.arange(0, 0.70, 0.05).reshape((-1, 1))
y = kl["percent"]
model = LinearRegression().fit(x,y)
print('intercept(alpha):', model.intercept_)
print('slope(theta):', model.coef_)
###Output
intercept(alpha): 1.0645665973511222
slope(theta): [-0.70525649]
|
analysis/ENGR418_project_group_31_stage_2.ipynb | ###Markdown
ENGR418 Project Stage 2 Group 31By: Jared Paull (63586572), Liam Ross (75469692)
###Code
import numpy as np
import pandas as pd
import os
from sklearn.metrics import confusion_matrix
from PIL import Image, ImageFilter
import PIL
from sklearn.neighbors import KNeighborsClassifier
import sklearn
###Output
_____no_output_____
###Markdown
Single Function CallThe function in the cell below can be called to run the entire algorithm. Before running, be sure to run the cells containing the functions at the bottom of this page, or else errors will be thrown.
###Code
import numpy as np
import pandas as pd
import os
from sklearn.metrics import confusion_matrix
from PIL import Image, ImageFilter
import PIL
from sklearn.neighbors import KNeighborsClassifier
import sklearn
# first param is the relative training data directory
# second param is the relative testing data directory
training_data_relative_dir = "../data/training"
testing_data_relative_dir = "../data/testing"
# this will take a 1-2 minutes to run (depending on device capabilities)
test_function(training_data_relative_dir, testing_data_relative_dir)
###Output
Shape Predicted Circle Rectangle Square
Shape Actual
Circle 27 0 0
Rectangle 0 27 0
Square 0 0 27
Percentage of correct classification from model on training data set: 100.00%
Shape Predicted Circle Rectangle Square
Shape Actual
Circle 24 0 3
Rectangle 0 27 0
Square 0 0 27
Percentage of correct classification from model on testing data set: 96.30%
###Markdown
Setting Tuning Parameterspefore starting, these parameters must be set, they can be tuned to optimize performance though. The optimal values found are considered as default below
###Code
# image_size=64, filter_value=4, angles=0->180 increments by 2, 100% & 96.30%
# image size will dictate the size each image will be reshapet to later, used for tuning
image_size = 64
# filter value sets a threshold value on the edge detection image, used for tuning
filter_value = 4
# list of angles that the algorithm will rotate though, used for tuning
angles = []
for i in range(90):
angles.append(2*i)
###Output
_____no_output_____
###Markdown
Feature EngineeringNext, all image data is scrapped from the image files in their respective relative directory. Refer to get_image_feature_data function for line by line description. In essense, each image will have 4 feature vectors. One for maximum Lego brick length, one for minimum lego brick length, and one for both average and median Lego brick length. These values will come from the rotated image, which is further discussed in their own respective functions
###Code
# gets all training data from relative directory.
# refer to functions at bottom for line-by line commenting
x,y = get_image_feature_data("../data/training", image_size, filter_value, angles)
# gets all testing data from relative directory.
xt, yt = get_image_feature_data("../data/testing", image_size, filter_value, angles)
###Output
_____no_output_____
###Markdown
Classifier TrainingNow with the feature vectors selected and image data collected, a classifier can be trained. After testing, we opted to use a k neighbors classifier which provided the best results. The classifier will classify a point based on the classification of points near it, making it simple and quick to calcualte. With strong engineered features, the k neighbors classifier provides a high accuracy solution.
###Code
# creates a non linear k-nearest neighbor classifier that considers the 8th nearest neighbors where each neighbor is weighted by distance
from sklearn.neighbors import KNeighborsClassifier
k_neighbors = KNeighborsClassifier(n_neighbors=8, weights="distance")
# fits data to the classifier
k_neighbors.fit(x,y);
###Output
_____no_output_____
###Markdown
**Prediction, Confusion Matrices, and Accuracy of Classifier** Classifier Prediction: Training DataNow, the classifier can be tested. First it is tested with the training image data. The results of the confusion matrix, as well as the accuracy of the algorithm are shown below.
###Code
# feeds the training data back into the classifier for predicition
pred = k_neighbors.predict(x)
# formats the prediction values to string labels (refer to function below)
predicted = confusion_format(pred)
# foramts the actual labels to string labels (refer to function below)
actual = confusion_format(y)
# prints the confusion matrix using string labels
print(pd.crosstab(actual, predicted, rownames=["Shape Actual"], colnames=["Shape Predicted"]))
# prints the error percentage (refer to function below)
print(f"Percentage of correct classification from model on training data set: {100-error_percentage(pred,y):.2f}%\n")
###Output
Shape Predicted Circle Rectangle Square
Shape Actual
Circle 27 0 0
Rectangle 0 27 0
Square 0 0 27
Percentage of correct classification from model on training data set: 100.00%
###Markdown
Classifier Prediction: Testing DataNext it is tested with the testing image data. The testing image is a better recognition of the classifiers accuracy since it is being fed images that it has never seen before. The results of the confusion matrix, as well as the accuracy of the algorithm are shown below.
###Code
# feeds the testing data into the classifier for prediction
pred = k_neighbors.predict(xt)
# formats the prediction values to string labels (refer to function below)
predicted = confusion_format(pred)
# formats teh actual labels to string values (refer to function below)
actual = confusion_format(yt)
# prints the confusion matrix using string labels
print(pd.crosstab(actual, predicted, rownames=["Shape Actual"], colnames=["Shape Predicted"]))
# prints the error percentage (refer to function below)
print(f"Percentage of correct classification from model on testing data set: {100-error_percentage(pred,y):.2f}%")
###Output
Shape Predicted Circle Rectangle Square
Shape Actual
Circle 24 0 3
Rectangle 0 27 0
Square 0 0 27
Percentage of correct classification from model on testing data set: 96.30%
###Markdown
------------ **Functions**All of these functions **must** be ran before anything else. Each function has its purpose discussed, and are each well commented on. edge_imageThis function takes in a raw Lego brick image, then exports a filtered, binary, edge detected version. This means, it will output an image that only contains 0/1 in monochrome. All 1's will dictate edges of the Lego brick, while 0's indicate blank space that is not useful. All noise outside of the lego bricks edge should be filtered to allow the get_len to get the correct brick length. Noise going into the get_len function will make the classifier unrealiable. The function is commented on in detail below.
###Code
# returns image that is the filtered, reshaped, edge detection version
def edge_image(image, image_size, filter_value):
# takes input image and converts to monochrome
image = image.convert("L")
# converts the monochrome image to an edge detection version (note this image is ripe with noise)
image = image.filter(ImageFilter.FIND_EDGES)
# Compress image down to 18x18 image, will blur specific noise in the image to make lego brick obvious
image = image.resize((16 + 2,16 + 2))
# simply slices off the outter pixel of the image, border/edge pixels are recognized as ...
# a "change" in colour, thus are labeled as an edge, the next line will slice out this edge error.
# will output a 16x16 image
image = PIL.Image.fromarray(np.array(image)[int(1) : int(image.height -1), int(1) : int(image.width - 1)])
# resizes image from 16x16 to desizered image size, return information to the plot.
# resizing down then back up was to blur out and specific noise, so all noise can be easily filtered later.
image = image.resize((image_size,image_size))
# converts the image to a numpy array
data = np.asarray(image)
# filters out any noise in the image
data[data <= filter_value] = 0
# converts image from monochrome values to binary (for ease of interpretation)
data[data > 0] = 1
# converts the image data back to a Pillow image object for further use
image = PIL.Image.fromarray(data)
return image
###Output
_____no_output_____
###Markdown
get_lenused to get the length between the top-most pixel, and the bottom-most pixel in the image. Takes image from edge_image function and will return a single integer representing the lenght described above by rotating slowly, and taking length at each step. We can piece together what the lego brick is by examining the values it takes as it rotates. Function takes image from edge_image function above, thus requires very little background noise to work effectively.Expect circles to remain similar in value as it rotates. Expect rectangles to have a large maximum value. Expect circles to have a maximum value greater than circle but less then rectangle. Will use max/min/avg/med later to examine the changes over angle
###Code
def get_len(image):
# converts image to numpy array
data = np.array(image)
# represents the lowest pixel index (index represents height where bottom of image is zero)
# initialize quantity to top of image to guarantee it will decrease (assuming image has a non-zero pixel)
min_index = image.height
# represents the highest pixel index (index represents height where top of image is maximum value, i.e. image height)
# initialize quantity to bottom of image to guarantee it will increase (assuming image has a non-zero pixel)
max_index = 0
# first loop starts from bottom of image and will crawl upwards
for i in range(image.height):
# nested loop will examing each pixel from left to right by height
for j in range(image.width):
# if a edge is detected (lego brick is found)
if( data[i][j] == 1):
# sets min index if current height index is less then smallest index found far
if (min_index > i):
min_index = i
# sets max index if current height index is greater than greatest index found thus far
if( max_index < i):
max_index = i
# finally, return difference between max height and min height to get vertical length the image takes up
return max_index - min_index
###Output
_____no_output_____
###Markdown
get_image_feature_dataWill iterate through a directory and gather feature data from each image, as well as its corresponding correct label. Is largely an accumulation of edge_image and get_len functions that is iterated for each image. Will return *x*, and *y* which are the feature vectors, and feature labels respectively.
###Code
def get_image_feature_data(rel_dir, image_size, filter_value, angles):
# initializes feature data and labels for use population later
x = []
y = []
# will loop through each file in rel_dir directory
for pic in os.listdir(rel_dir):
# creates new Pillow image object from pic in relative directory
image = PIL.Image.open(f"{rel_dir}/{pic}")
# calls function to get filtered, reshaped, edge detection version of image.
image = edge_image(image, image_size, filter_value)
# initialize list to propogate with lengths for different angles
vec = []
# for each loop to rotate through all angles the algorithm considers
for angle in angles:
# rotates original image by angle in for each loop
img = image.rotate(angle)
# for specific angle, find the length using the function described above
length = get_len(img)
# append new length to list containing length for each angle
vec.append(length)
# converts list to array to make math more efficient
vec = np.array(vec)
# maximum length recorded between all angles, normalized by height
# useful for identifying rectangles
max_len = np.max(vec) / img.height
# minimum length recorded between all angles, normalized by height
min_len = np.min(vec) / img.height
# average length recorded between all angles, normalized by height
# useful for identifying circles
avg_len = np.average(vec) / img.height
# median length recorded between all angles, normalized by height
# useful for identifying circles
med_len = np.median(vec) / img.height
# dynamically override vec to be list of 4 key values from list of lengths by angle
vec = [max_len, min_len, avg_len, med_len]
# examine the name of the picture file, can find correct label based on first letter of the file name.
# c indicates the picture is a circle
if( str.lower(pic[0]) == "c"):
# classify circles as a 0
y.append(0)
# r indicates the picture is a rectangle
elif (str.lower(pic[0]) == "r"):
# classify rectangle as a 1
y.append(1)
# only other situation is the image is a square
else:
# classify square as a 2
y.append(2)
x.append(vec) # each image has 1536 features
# convert feature vector data/labels from lists to arrays for ease of use in the classifier model
x = np.array(x)
y = np.array(y)
return x,y
# This function will convert from decimal label to strings. Very easy to comprehend
# 0=>Circle, 1=>Rectangle, 2=>Square
def confusion_format(labels):
test = []
for i in labels:
if i == 0:
test.append("Circle")
elif i == 1:
test.append("Rectangle")
else:
test.append("Square")
test = np.array(test)
return test
# Used to calculate error percentage by looking at number of differences between prediction and actual labels.
def error_percentage(pred, y):
# the number of errors is the number of differences between the model's labels and the correct labels
errors = 0
for i in range(pred.size):
# pred is the predicted array labels, while y is the actual
if pred[i] != y[i]:
errors = errors + 1
# then the percentage of errors is the number of errors divided by the total number of image samples times 100 for percentage.
return errors / pred.size * 100
# Function is exclusively to demonstrate all code in a single function call. Refer to cells above, or functions described for ...
# in depth description/line-by-line description.
def test_function(training_dir, testing_dir):
image_size = 64
filter_value = 4
angles = []
for i in range(90):
angles.append(2*i)
x,y = get_image_feature_data(training_dir, image_size, filter_value, angles)
xt, yt = get_image_feature_data(testing_dir, image_size, filter_value, angles)
k_neighbors = KNeighborsClassifier(8, weights="distance")
k_neighbors.fit(x,y);
pred = k_neighbors.predict(x)
predicted = confusion_format(pred)
actual = confusion_format(y)
print(pd.crosstab(actual, predicted, rownames=["Shape Actual"], colnames=["Shape Predicted"]))
print(f"Percentage of correct classification from model on training data set: {100-error_percentage(pred,y):.2f}%\n")
pred = k_neighbors.predict(xt)
predicted = confusion_format(pred)
actual = confusion_format(yt)
print(pd.crosstab(actual, predicted, rownames=["Shape Actual"], colnames=["Shape Predicted"]))
print(f"Percentage of correct classification from model on testing data set: {100-error_percentage(pred,y):.2f}%")
###Output
_____no_output_____ |
preprocessing_a.ipynb | ###Markdown
**Obtaining Regional Multi-Year SST Maps from GODAS**by Ding **Section 1 Data Analysis and Visualization** We use the following dataset to forecast marine heatwaves.[GODAS](https://www.cpc.ncep.noaa.gov/products/GODAS/) Connect Google Drive with Colab.
###Code
from google.colab import drive
drive.mount('/gdrive', force_remount=True)
###Output
Mounted at /gdrive
###Markdown
Import the data analysis libraries.
###Code
!pip install netcdf4
!pip install h5netcdf
from netCDF4 import Dataset
import matplotlib.pyplot as plt
import xarray as xr
import glob
import h5netcdf.legacyapi as netCDF4
###Output
_____no_output_____
###Markdown
Read one GODAS data file (the global marine potential temperatures in 1980) as an xarray dataset type.
###Code
pottmp_1980 = xr.open_dataset('/gdrive/My Drive/GODAS_pottmp/pottmp.1980.nc', decode_times=False)
###Output
_____no_output_____
###Markdown
Have a look at the imported data.
###Code
pottmp_1980
###Output
_____no_output_____
###Markdown
xarray provides a convinient way to read all files in one directory and combines them into one xarray dataset.Read all data files (the global marine potential temperatures since 1980) as an integrated xarray dataset type.
###Code
pottmp_all = xr.open_mfdataset('/gdrive/My Drive/GODAS_pottmp/*.nc', decode_times=False)
###Output
/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:1: FutureWarning: In xarray version 0.15 the default behaviour of `open_mfdataset`
will change. To retain the existing behavior, pass
combine='nested'. To use future default behavior, pass
combine='by_coords'. See
http://xarray.pydata.org/en/stable/combining.html#combining-multi
"""Entry point for launching an IPython kernel.
/usr/local/lib/python3.7/dist-packages/xarray/backends/api.py:941: FutureWarning: The datasets supplied have global dimension coordinates. You may want
to use the new `combine_by_coords` function (or the
`combine='by_coords'` option to `open_mfdataset`) to order the datasets
before concatenation. Alternatively, to continue concatenating based
on the order the datasets are supplied in future, please use the new
`combine_nested` function (or the `combine='nested'` option to
open_mfdataset).
from_openmfds=True,
###Markdown
Have a look at it!"time: 492" means 492 months.
###Code
pottmp_all
###Output
_____no_output_____
###Markdown
Visualize the global potential temperatures at the level 5 and the time 73048.
###Code
pottmp_all.pottmp.isel(level=0,time=240).plot()
###Output
_____no_output_____
###Markdown
Visualize the potential temperature time series at the lattitude -71.2, the longitude 170.5 and the level 5.
###Code
pottmp_all.pottmp.isel(lat=10,lon=170,level=0).plot(marker="o")
###Output
_____no_output_____
###Markdown
We select a small area to create a few baseline models.Extract the ocean region next to southeastern Australia.$lat \in (-35, -45)$$lon \in (145, 155)$We also denote the potential temperature at the level 5 as the sea surface temperature (SST).$level = 5$
###Code
pottmp_seau = pottmp_all.where(pottmp_all.lat < -35, drop=True)
pottmp_seau = pottmp_seau.where(pottmp_seau.lat > -45, drop=True)
pottmp_seau = pottmp_seau.where(pottmp_seau.lon < 155, drop=True)
pottmp_seau = pottmp_seau.where(pottmp_seau.lon > 145, drop=True)
pottmp_seau = pottmp_seau.where(pottmp_seau.level == 5.0, drop=True)
pottmp_seau
###Output
_____no_output_____
###Markdown
Visualize the SST in this small region at the time 73048.
###Code
pottmp_seau.pottmp.isel(level=0,time=240).plot()
###Output
_____no_output_____
###Markdown
There are 492 time points. We select the prvious 394 (80%) for training and the latter 98 (20%) for validation.
###Code
pottmp_seau_train = pottmp_seau.where(pottmp_seau.time[0:394], drop=True)
pottmp_seau_val = pottmp_seau.where(pottmp_seau.time[394:], drop=True)
###Output
_____no_output_____
###Markdown
**Section 2 Data Preprocessing**
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
Based on the analysis, create (empty) numpy arrays with the shapes for modeling.
###Code
train_set = np.zeros((394,1,30,10))
val_set = np.zeros((98,1,30,10))
###Output
_____no_output_____
###Markdown
Load the data from the xarray type to the numpy array type.
###Code
train_set[:,:,:,:] = pottmp_seau_train.variables['pottmp'][0:394,:,:,:]
val_set[:,:,:,:] = pottmp_seau_val.variables['pottmp'][0:98,:,:,:]
###Output
_____no_output_____
###Markdown
Look at their shapes, which is important for machine learning models.
###Code
print(train_set.shape)
print(val_set.shape)
###Output
(394, 1, 30, 10)
(98, 1, 30, 10)
###Markdown
For convenience, convert "nans" to zeroes.
###Code
train_set = np.where(np.isnan(train_set), 0, train_set)
val_set = np.where(np.isnan(val_set), 0, val_set)
###Output
_____no_output_____
###Markdown
Remove the unnecessary dimension of levels, which contains only one value 5.
###Code
train_set = train_set[:,0,:,:]
val_set = val_set[:,0,:,:]
print(train_set.shape)
print(val_set.shape)
###Output
(394, 30, 10)
(98, 30, 10)
###Markdown
Check the matrix at one timepoint.The temperature unit is kelvin.
###Code
val_set[1]
###Output
_____no_output_____
###Markdown
Exclude the zeros and check the mean.
###Code
np.nanmean(np.where(val_set[1]!=0, val_set[1], np.nan))
###Output
_____no_output_____
###Markdown
We want to use the SST maps in three consecutive months to predict the area mean SST (one value) in the fourth month.Convert the sets into the following format.From [Month 1], [Month 2], [Month 3], ..., to [[Month 1], [Month 2], [Month 3]], [[Month 2], [Month 3], [Month 4]], [[Month 3], [Month 4], [Month 5]], ...Create the label sets in the following format accordingly.Month 4 Mean, Month 5 Mean, Month 6 Mean, ...
###Code
train_set_3_list = []
train_label_list = []
val_set_3_list = []
val_label_list = []
for i in range(len(train_set) - 3):
train_set_3_list.append([train_set[i], train_set[i+1], train_set[i+2]])
train_label_list.append(np.nanmean(np.where(train_set[i+3]!=0, train_set[i+3], np.nan)))
for i in range(len(val_set) - 3):
val_set_3_list.append([val_set[i], val_set[i+1], val_set[i+2]])
val_label_list.append(np.nanmean(np.where(val_set[i+3]!=0, val_set[i+3], np.nan)))
###Output
_____no_output_____
###Markdown
Convert the list type into the numpy array type.
###Code
train_set_3 = np.array(train_set_3_list)
train_label = np.array(train_label_list)
val_set_3 = np.array(val_set_3_list)
val_label = np.array(val_label_list)
###Output
_____no_output_____
###Markdown
Look at their shapes.
###Code
print(train_set_3.shape)
print(val_set_3.shape)
print(train_label.shape)
print(val_label.shape)
###Output
(391, 3, 30, 10)
(95, 3, 30, 10)
(391,)
(95,)
###Markdown
Put the four tensors into one list for saving.
###Code
data_sets = [train_set_3.tolist(), val_set_3.tolist(), train_label.tolist(), val_label.tolist()]
###Output
_____no_output_____
###Markdown
Save this list in Google Drive for further use.
###Code
import json
with open('/gdrive/My Drive/GODAS/data_sets.txt', 'w') as out_file:
json.dump(data_sets, out_file)
###Output
_____no_output_____ |
allesfitter/GUI.ipynb | ###Markdown

###Code
#:::: clean up csv file
def clean_up_csv(fname, N_last_rows=0):
with open(fname, "r") as f:
params_csv = list(csv.reader(f))
with open(fname, "w") as f:
writer = csv.writer(f)
for i in range(len(params_csv)-N_last_rows):
row = params_csv[i]
writer.writerow(row)
#:::: append a row into csv file
def fwrite_params_line(text):
with open(INPUT['fname_params'], 'a') as f:
f.write(text+'\n')
#:::: write params into csv file
def fwrite_params(key, label, unit, physical_bounds, return_str=False):
if INPUT[key+'_bounds_type'].value == 'uniform':
bounds = 'uniform ' \
+ str( np.max( [physical_bounds[0], float(INPUT[key+'_median'].value)-float(INPUT[key+'_lerr'].value)] ) ) + ' ' \
+ str( np.min( [physical_bounds[1], float(INPUT[key+'_median'].value)+float(INPUT[key+'_uerr'].value)] ) )
elif INPUT[key+'_bounds_type'].value == 'uniform * 5':
bounds = 'uniform ' \
+ str( np.max( [physical_bounds[0], float(INPUT[key+'_median'].value)-5*float(INPUT[key+'_lerr'].value)] ) ) + ' ' \
+ str( np.min( [physical_bounds[1], float(INPUT[key+'_median'].value)+5*float(INPUT[key+'_uerr'].value)] ) )
elif INPUT[key+'_bounds_type'].value == 'trunc_normal':
bounds = 'trunc_normal ' \
+ str(physical_bounds[0]) + ' ' \
+ str(physical_bounds[1]) + ' ' \
+ str(INPUT[key+'_median'].value) + ' ' \
+ str(np.max( [ float(INPUT[key+'_lerr'].value), float(INPUT[key+'_uerr'].value) ] ))
elif INPUT[key+'_bounds_type'].value == 'trunc_normal * 5':
bounds = 'trunc_normal ' \
+ str(physical_bounds[0]) + ' ' \
+ str(physical_bounds[1]) + ' ' \
+ str(INPUT[key+'_median'].value) + ' ' \
+ str(5*np.max( [ float(INPUT[key+'_lerr'].value), float(INPUT[key+'_uerr'].value) ] ))
string = key + ',' + str(INPUT[key+'_median'].value) + ',' + str(int(INPUT[key+'_fit'].value)) + ',' + bounds + ',' + label + ',' + unit
if not return_str:
fwrite_params_line(string)
else:
return string
#unique
def unique(array):
uniq, index = np.unique(array, return_index=True)
return uniq[index.argsort()]
###Output
_____no_output_____
###Markdown
1. working directory Select the working directory for this fit, for example `/Users/me/TESS-1b/`. Then you can run a fit using `allesfitter.ns_fit('/Users/me/TESS-1b/')`.
###Code
BUTTONS['datadir'] = widgets.Button(description='Select directory', button_style='')
text_af_directory = widgets.Text(value='', placeholder='for example: /Users/me/TESS-1b/', disable=True)
hbox = widgets.HBox([BUTTONS['datadir'], text_af_directory])
display(hbox)
def select_datadir(change):
root = Tk()
root.withdraw()
root.call('wm', 'attributes', '.', '-topmost', True)
INPUT['datadir'] = filedialog.askdirectory()
%gui tk
if INPUT['datadir'] != '':
text_af_directory.value = INPUT['datadir']
BUTTONS['datadir'].style.button_color = 'lightgreen'
INPUT['show_step_2a'] = True
display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()+1, IPython.notebook.ncells())'))
BUTTONS['datadir'].on_click(select_datadir)
###Output
_____no_output_____
###Markdown
2. settings
###Code
if 'show_step_2a' in INPUT and INPUT['show_step_2a'] == True:
display(Markdown('### General settings'))
DROPDOWNS['planet_or_EB'] = widgets.Dropdown(options=['Planets', 'EBs'])
display( widgets.HBox([widgets.Label(value='Fitting planets or EBs?', layout=layout), DROPDOWNS['planet_or_EB']]) )
display(Markdown('Give the companion letters and instruments, space-separated. Leave empty if not applicable.'))
hbox_list = []
text_companions_phot = widgets.Text(value='', placeholder='for example: b')
hbox_list.append( widgets.HBox([widgets.Label(value='Companions in photometry', layout=layout), text_companions_phot]) )
text_companions_rv = widgets.Text(value='', placeholder='for example: b c')
hbox_list.append( widgets.HBox([widgets.Label(value='Companions in RV', layout=layout), text_companions_rv]) )
text_inst_phot = widgets.Text(value='', placeholder='for example: TESS NGTS')
hbox_list.append( widgets.HBox([widgets.Label(value='Instruments for photometry', layout=layout), text_inst_phot]) )
text_inst_rv = widgets.Text(value='', placeholder='for example: HARPS Coralie')
hbox_list.append( widgets.HBox([widgets.Label(value='Instruments for RV', layout=layout), text_inst_rv]) )
display(widgets.VBox(hbox_list))
def confirm(change):
#::: set stuff
if len(text_inst_phot.value): INPUT['inst_phot'] = str(text_inst_phot.value).split(' ')
else: INPUT['inst_phot'] = []
if len(text_inst_rv.value): INPUT['inst_rv'] = str(text_inst_rv.value).split(' ')
else: INPUT['inst_rv'] = []
if len(text_companions_phot.value): INPUT['companions_phot'] = str(text_companions_phot.value).split(' ')
else: INPUT['companions_phot'] = []
if len(text_companions_rv.value): INPUT['companions_rv'] = str(text_companions_rv.value).split(' ')
else: INPUT['companions_rv'] = []
INPUT['companions_all'] = list(np.unique(INPUT['companions_phot']+INPUT['companions_rv'])) #sorted by b, c, d...
INPUT['inst_all'] = list(unique(INPUT['inst_phot']+INPUT['inst_rv'])) #sorted like user input
button_2a.style.button_color = 'lightgreen'
INPUT['show_step_2b'] = True
display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()+1, IPython.notebook.ncells())'))
button_2a = widgets.Button(description='Confirm', button_style='')
display(button_2a)
button_2a.on_click(confirm)
if 'show_step_2b' in INPUT and INPUT['show_step_2b'] == True:
display(Markdown('### Advanced settings'))
vbox_list = []
#::: Fitting & performance
hbox_list = []
max_cores = cpu_count()
DROPDOWNS['multiprocessing'] = widgets.Dropdown(options=['No'] + ['on '+str(i)+' of my '+str(max_cores)+' cores' for i in range(2,max_cores)] + ['always on all - 1 cores on any system'])
hbox_list.append(widgets.HBox([widgets.Label(value='Multiprocessing', layout=layout), DROPDOWNS['multiprocessing']]))
DROPDOWNS['fit_type'] = widgets.Dropdown(options=['Transit (fast)', 'Transit and occultation (fast)', 'Full lightcurve (slow)'])
hbox_list.append(widgets.HBox([widgets.Label(value='Fit type', layout=layout), DROPDOWNS['fit_type']]))
DROPDOWNS['shift_epoch'] = widgets.Dropdown(options=['Yes', 'No'])
hbox_list.append(widgets.HBox([widgets.Label(value='Automatically shift epoch?', layout=layout), DROPDOWNS['shift_epoch']]))
DROPDOWNS['mcmc_settings'] = widgets.Dropdown(options=['Default'])
hbox_list.append(widgets.HBox([widgets.Label(value='MCMC settings', layout=layout), DROPDOWNS['mcmc_settings']]))
DROPDOWNS['ns_settings'] = widgets.Dropdown(options=['Default'])
hbox_list.append(widgets.HBox([widgets.Label(value='Nested Sampling settings', layout=layout), DROPDOWNS['ns_settings']]))
vbox_list.append( widgets.VBox(hbox_list) )
#::: Limb darkening
hbox_list = []
for inst in INPUT['inst_phot']:
DROPDOWNS['host_ld_law_'+inst] = widgets.Dropdown(options=['None','Linear','Quadratic','Sing'], value='Quadratic')
hbox_list.append( widgets.HBox([widgets.Label(value='Host limb darkening '+inst, layout=layout), DROPDOWNS['host_ld_law_'+inst]]) )
if DROPDOWNS['planet_or_EB'].value == 'EBs':
for companion in INPUT['companions_all']:
DROPDOWNS[companion+'_ld_law_'+inst] = widgets.Dropdown(options=['None','Linear','Quadratic','Sing'])
hbox_list.append( widgets.HBox([widgets.Label(value=companion+' limb darkening '+inst, layout=layout), DROPDOWNS[companion+'_ld_law_'+inst]]) )
vbox_list.append( widgets.VBox(hbox_list) )
#::: Baseline settings
hbox_list = []
for inst in INPUT['inst_phot']:
DROPDOWNS['baseline_flux_'+inst] = widgets.Dropdown(options=['sample_offset', 'sample_linear', 'sample_GP_Matern32', 'sample_GP_SHO', 'sample_GP_real', 'sample_GP_complex', 'hybrid_offset', 'hybrid_poly_1', 'hybrid_poly_2', 'hybrid_poly_3', 'hybrid_poly_4', 'hybrid_spline'], value='hybrid_offset')
hbox_list.append( widgets.HBox([widgets.Label(value='Baseline flux '+inst, layout=layout), DROPDOWNS['baseline_flux_'+inst]]) )
for inst in INPUT['inst_rv']:
DROPDOWNS['baseline_rv_'+inst] = widgets.Dropdown(options=['sample_offset', 'sample_linear', 'sample_GP_Matern32', 'sample_GP_SHO', 'sample_GP_real', 'sample_GP_complex', 'hybrid_offset', 'hybrid_poly_1', 'hybrid_poly_2', 'hybrid_poly_3', 'hybrid_poly_4', 'hybrid_spline'], value='hybrid_offset')
hbox_list.append( widgets.HBox([widgets.Label(value='Baseline RV '+inst, layout=layout), DROPDOWNS['baseline_rv_'+inst]]) )
vbox_list.append( widgets.VBox(hbox_list) )
#::: Error settings
hbox_list = []
for inst in INPUT['inst_phot']:
DROPDOWNS['error_flux_'+inst] = widgets.Dropdown(options=['sample', 'hybrid'], value='sample')
hbox_list.append( widgets.HBox([widgets.Label(value='Error flux '+inst, layout=layout), DROPDOWNS['error_flux_'+inst]]) )
for inst in INPUT['inst_rv']:
DROPDOWNS['error_rv_'+inst] = widgets.Dropdown(options=['sample', 'hybrid'], value='sample')
hbox_list.append( widgets.HBox([widgets.Label(value='Error RV '+inst, layout=layout), DROPDOWNS['error_rv_'+inst]]) )
vbox_list.append( widgets.VBox(hbox_list) )
#::: Exposure time interpolation
hbox_list = []
for inst in INPUT['inst_all']:
DROPDOWNS['t_exp_'+inst] = widgets.Text( placeholder='None' )
hbox_list.append( widgets.HBox([widgets.Label(value='Exposure time '+inst, layout=layout), DROPDOWNS['t_exp_'+inst], widgets.Label(value='days', layout=layout)]) )
for inst in INPUT['inst_all']:
DROPDOWNS['t_exp_n_int_'+inst] = widgets.Text( placeholder='None' )
hbox_list.append( widgets.HBox([widgets.Label(value='Interpolation points '+inst, layout=layout), DROPDOWNS['t_exp_n_int_'+inst], widgets.Label(value='(integer)', layout=layout)]) )
vbox_list.append( widgets.VBox(hbox_list) )
#::: Number of spots
hbox_list = []
for inst in INPUT['inst_all']:
DROPDOWNS['host_N_spots_'+inst] = widgets.Text( placeholder='None' )
hbox_list.append( widgets.HBox([widgets.Label(value='host: Nr. of spots '+inst, layout=layout), DROPDOWNS['host_N_spots_'+inst], widgets.Label(value='(integer)', layout=layout)]) )
vbox_list.append( widgets.VBox(hbox_list) )
#::: Number of flares
hbox_list = []
DROPDOWNS['N_flares'] = widgets.Text( placeholder='None' )
hbox_list.append( widgets.HBox([widgets.Label(value='Nr. of flares', layout=layout), DROPDOWNS['N_flares'], widgets.Label(value='(integer)', layout=layout)]) )
vbox_list.append( widgets.VBox(hbox_list) )
#::: Fit TTVs?
hbox_list = []
DROPDOWNS['fit_ttvs'] = widgets.Dropdown(options=["yes","no"], value="no")
hbox_list.append( widgets.HBox([widgets.Label(value='Fit TTVs?', layout=layout), DROPDOWNS['fit_ttvs']]) )
vbox_list.append( widgets.VBox(hbox_list) )
#::: Stellar grid (e.g. use "sparse" to speed up intense spot computations)
hbox_list = []
for inst in INPUT['inst_all']:
DROPDOWNS['host_grid_'+inst] = widgets.Dropdown(options=["very_sparse", "sparse", "default", "fine", "very_fine"], value="default")
hbox_list.append( widgets.HBox([widgets.Label(value='Host grid '+inst, layout=layout), DROPDOWNS['host_grid_'+inst]]) )
if DROPDOWNS['planet_or_EB'].value == 'EBs':
for companion in INPUT['companions_all']:
DROPDOWNS[companion+'_grid_'+inst] = widgets.Dropdown(options=["very_sparse", "sparse", "default", "fine", "very_fine"], value="default")
hbox_list.append( widgets.HBox([widgets.Label(value=companion+' grid '+inst, layout=layout), DROPDOWNS[companion+'_grid_'+inst]]) )
vbox_list.append( widgets.VBox(hbox_list) )
#::: Stellar shape (e.g. use "roche" for ellipsoidal variablity)
hbox_list = []
for inst in INPUT['inst_all']:
DROPDOWNS['host_shape_'+inst] = widgets.Dropdown(options=["roche", "roche_v", "sphere", "poly1p5", "poly3p0", "love"], value="sphere")
hbox_list.append( widgets.HBox([widgets.Label(value='Host shape '+inst, layout=layout), DROPDOWNS['host_shape_'+inst]]) )
if DROPDOWNS['planet_or_EB'].value == 'EBs':
for companion in INPUT['companions_all']:
DROPDOWNS[companion+'_shape_'+inst] = widgets.Dropdown(options=["roche", "roche_v", "sphere", "poly1p5", "poly3p0", "love"], value="sphere")
hbox_list.append( widgets.HBox([widgets.Label(value=companion+' shape '+inst, layout=layout), DROPDOWNS[companion+'_shape_'+inst]]) )
vbox_list.append( widgets.VBox(hbox_list) )
#::: Flux weighted RVs ("Yes" for Rossiter-McLaughlin effect)
hbox_list = []
for inst in INPUT['inst_rv']:
for companion in INPUT['companions_rv']:
DROPDOWNS[companion+'_flux_weighted_'+inst] = widgets.Dropdown(options=['No', 'Yes'])
hbox_list.append( widgets.HBox([widgets.Label(value=companion+' flux weighted RV '+inst, layout=layout), DROPDOWNS[companion+'_flux_weighted_'+inst]]) )
vbox_list.append( widgets.VBox(hbox_list) )
#::: accordion
accordion = widgets.Accordion(children=vbox_list)
accordion.set_title(0, 'Fitting & performance')
accordion.set_title(1, 'Limb darkening laws')
accordion.set_title(2, 'Baseline sampling')
accordion.set_title(3, 'Error sampling')
accordion.set_title(4, 'Exposure time interpolation')
accordion.set_title(5, 'Number of spots')
accordion.set_title(6, 'Number of flares')
accordion.set_title(7, 'TTVs')
accordion.set_title(8, 'Stellar grid (e.g. use "very_sparse" to speed up computations)')
accordion.set_title(9, 'Stellar shape (e.g. use "roche" for ellipsoidal variablity)')
accordion.set_title(10, 'Flux weighted RVs (e.g. use "true" for Rossiter-McLaughlin effect)')
display(accordion)
#::: confirm button
button_2b = widgets.Button(description='Confirm', button_style='')
display(button_2b)
def confirm(change):
button_2b.style.button_color = 'lightgreen'
INPUT['show_step_2c'] = True
display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()+1, IPython.notebook.ncells())'))
button_2b.on_click(confirm)
if 'show_step_2c' in INPUT and INPUT['show_step_2c'] == True:
BUTTONS['2c'] = widgets.Button(description='Create settings.csv', button_style='')
checkbox_2c = widgets.Checkbox(description='Overwrite old settings.csv (if existing)', value=False)
display(widgets.HBox([BUTTONS['2c'], checkbox_2c]))
def create_settings_file(change):
clear_output()
display(widgets.HBox([BUTTONS['2c'], checkbox_2c]))
go_ahead = True
if 'datadir' not in INPUT:
warnings.warn('No allesfitter woking directory selected yet. Please go back to step 1) and fill in all fields.')
go_ahead = False
if os.path.exists(os.path.join(INPUT['datadir'],'settings.csv')) and (checkbox_2c.value==False):
warnings.warn('The selected working directory '+os.path.join(INPUT['datadir'],'settings.csv')+' already exists. To proceed, give permission to overwrite it.')
go_ahead = False
if go_ahead:
fname_settings = os.path.join(INPUT['datadir'], 'settings.csv')
with open(fname_settings, 'w+') as f:
f.write('#name,value\n')
def fwrite_settings(text):
with open(fname_settings, 'a') as f:
f.write(text+'\n')
fwrite_settings('###############################################################################,')
fwrite_settings('# General settings,')
fwrite_settings('###############################################################################,')
fwrite_settings('companions_phot,'+text_companions_phot.value)
fwrite_settings('companions_rv,'+text_companions_rv.value)
fwrite_settings('inst_phot,'+text_inst_phot.value)
fwrite_settings('inst_rv,'+text_inst_rv.value)
fwrite_settings('###############################################################################,')
fwrite_settings('# Fit performance settings,')
fwrite_settings('###############################################################################,')
if DROPDOWNS['multiprocessing'].value=='No':
fwrite_settings('multiprocess,False')
elif DROPDOWNS['multiprocessing'].value=='always on all - 1 cores on any system':
fwrite_settings('multiprocess,True')
fwrite_settings('multiprocess_cores,all')
else:
fwrite_settings('multiprocess,True')
fwrite_settings('multiprocess_cores,'+DROPDOWNS['multiprocessing'].value.split(' ')[1])
if DROPDOWNS['fit_type'].value=='Transit (fast)':
fwrite_settings('fast_fit,True')
fwrite_settings('fast_fit_width,0.3333333333333333')
fwrite_settings('secondary_eclipse,False')
fwrite_settings('phase_curve,False')
elif DROPDOWNS['fit_type'].value=='Transit and occultation (fast)':
fwrite_settings('fast_fit,True')
fwrite_settings('fast_fit_width,0.3333333333333333')
fwrite_settings('secondary_eclipse,True')
fwrite_settings('phase_curve,False')
elif DROPDOWNS['fit_type'].value=='Full lightcurve (slow)':
fwrite_settings('fast_fit,False')
fwrite_settings('fast_fit_width,')
fwrite_settings('secondary_eclipse,True')
fwrite_settings('phase_curve,True')
fwrite_settings('phase_curve_style,GP')
if DROPDOWNS['shift_epoch'].value=='Yes':
fwrite_settings('shift_epoch,True')
for companion in INPUT['companions_all']:
fwrite_settings('inst_for_'+companion+'_epoch,all')
fwrite_settings('###############################################################################,')
fwrite_settings('# MCMC settings,')
fwrite_settings('###############################################################################,')
if DROPDOWNS['mcmc_settings'].value=='Default':
fwrite_settings('mcmc_nwalkers,100')
fwrite_settings('mcmc_total_steps,2000')
fwrite_settings('mcmc_burn_steps,1000')
fwrite_settings('mcmc_thin_by,1')
fwrite_settings('###############################################################################,')
fwrite_settings('# Nested Sampling settings,')
fwrite_settings('###############################################################################,')
if DROPDOWNS['ns_settings'].value=='Default':
fwrite_settings('ns_modus,dynamic')
fwrite_settings('ns_nlive,500')
fwrite_settings('ns_bound,single')
fwrite_settings('ns_sample,rwalk')
fwrite_settings('ns_tol,0.01')
fwrite_settings('###############################################################################,')
fwrite_settings("# Limb darkening law per object and instrument,")
fwrite_settings("# if 'lin' one corresponding parameter called 'ldc_q1_inst' has to be given in params.csv,")
fwrite_settings("# if 'quad' two corresponding parameter called 'ldc_q1_inst' and 'ldc_q2_inst' have to be given in params.csv,")
fwrite_settings("# if 'sing' three corresponding parameter called 'ldc_q1_inst'; 'ldc_q2_inst' and 'ldc_q3_inst' have to be given in params.csv,")
fwrite_settings('###############################################################################,')
def translate_ld(x):
if x=='None': return ''
elif x=='Linear': return 'lin'
elif x=='Quadratic': return 'quad'
elif x=='Sing': return 'sing'
for inst in INPUT['inst_phot']:
fwrite_settings('host_ld_law_'+inst+','+translate_ld(DROPDOWNS['host_ld_law_'+inst].value))
if DROPDOWNS['planet_or_EB'].value == 'EBs':
for companion in INPUT['companions_all']:
fwrite_settings(companion+'_ld_law_'+inst+','+translate_ld(DROPDOWNS[companion+'_ld_law_'+inst].value))
fwrite_settings('###############################################################################,')
fwrite_settings("# Baseline settings per instrument,")
fwrite_settings("# baseline params per instrument: sample_offset / sample_linear / sample_GP / hybrid_offset / hybrid_poly_1 / hybrid_poly_2 / hybrid_poly_3 / hybrid_pol_4 / hybrid_spline / hybrid_GP,")
fwrite_settings("# if 'sample_offset' one corresponding parameter called 'baseline_offset_key_inst' has to be given in params.csv,")
fwrite_settings("# if 'sample_linear' two corresponding parameters called 'baseline_a_key_inst' and 'baseline_b_key_inst' have to be given in params.csv,")
fwrite_settings("# if 'sample_GP' two corresponding parameters called 'baseline_gp1_key_inst' and 'baseline_gp2_key_inst' have to be given in params.csv,")
fwrite_settings('###############################################################################,')
for inst in INPUT['inst_phot']:
fwrite_settings('baseline_flux_'+inst+','+DROPDOWNS['baseline_flux_'+inst].value)
for inst in INPUT['inst_rv']:
fwrite_settings('baseline_rv_'+inst+','+DROPDOWNS['baseline_rv_'+inst].value)
fwrite_settings('###############################################################################,')
fwrite_settings("# Error settings per instrument,")
fwrite_settings("# errors (overall scaling) per instrument: sample / hybrid,")
fwrite_settings("# if 'sample' one corresponding parameter called 'ln_err_key_inst' (photometry) or 'ln_jitter_key_inst' (RV) has to be given in params.csv,")
fwrite_settings('###############################################################################,')
for inst in INPUT['inst_phot']:
fwrite_settings('error_flux_'+inst+','+DROPDOWNS['error_flux_'+inst].value)
for inst in INPUT['inst_rv']:
fwrite_settings('error_rv_'+inst+','+DROPDOWNS['error_rv_'+inst].value)
fwrite_settings('###############################################################################,')
fwrite_settings('# Exposure times for interpolation,')
fwrite_settings('# needs to be in the same units as the time series,')
fwrite_settings('# if not given the observing times will not be interpolated leading to biased results,')
fwrite_settings('###############################################################################,')
for inst in INPUT['inst_all']:
fwrite_settings('t_exp_'+inst+','+DROPDOWNS['t_exp_'+inst].value)
fwrite_settings('###############################################################################,')
fwrite_settings('# Number of points for exposure interpolation,')
fwrite_settings('# Sample as fine as possible; generally at least with a 2 min sampling for photometry,')
fwrite_settings('# n_int=5 was found to be a good number of interpolation points for any short photometric cadence t_exp;,')
fwrite_settings('# increase to at least n_int=10 for 30 min phot. cadence,')
fwrite_settings('# the impact on RV is not as drastic and generally n_int=5 is fine enough,')
fwrite_settings('###############################################################################,')
for inst in INPUT['inst_all']:
fwrite_settings('t_exp_n_int_'+inst+','+DROPDOWNS['t_exp_n_int_'+inst].value)
fwrite_settings('###############################################################################,')
fwrite_settings('# Number of spots per object and instrument,')
fwrite_settings('###############################################################################,')
for inst in INPUT['inst_all']:
fwrite_settings('host_N_spots_'+inst+','+DROPDOWNS['host_N_spots_'+inst].value)
fwrite_settings('###############################################################################,')
fwrite_settings('# Number of flares (in total),')
fwrite_settings('###############################################################################,')
fwrite_settings('N_flares'+','+DROPDOWNS['N_flares'].value)
fwrite_settings('###############################################################################,')
fwrite_settings('# TTVs,')
fwrite_settings('###############################################################################,')
if DROPDOWNS['fit_ttvs'].value == 'no':
fwrite_settings('fit_ttvs'+',False')
elif DROPDOWNS['fit_ttvs'].value == 'yes':
fwrite_settings('fit_ttvs'+',True')
fwrite_settings('###############################################################################,')
fwrite_settings('# Stellar grid per object and instrument,')
fwrite_settings('###############################################################################,')
for inst in INPUT['inst_all']:
fwrite_settings('host_grid_'+inst+','+DROPDOWNS['host_grid_'+inst].value)
if DROPDOWNS['planet_or_EB'].value == 'EBs':
for companion in INPUT['companions_all']:
fwrite_settings(companion+'_grid_'+inst+','+DROPDOWNS[companion+'_grid_'+inst].value)
fwrite_settings('###############################################################################,')
fwrite_settings('# Stellar shape per object and instrument,')
fwrite_settings('###############################################################################,')
for inst in INPUT['inst_all']:
fwrite_settings('host_shape_'+inst+','+DROPDOWNS['host_shape_'+inst].value)
if DROPDOWNS['planet_or_EB'].value == 'EBs':
for companion in INPUT['companions_all']:
fwrite_settings(companion+'_shape_'+inst+','+DROPDOWNS[companion+'_shape_'+inst].value)
fwrite_settings('###############################################################################,')
fwrite_settings('# Flux weighted RVs per object and instrument,')
fwrite_settings('# ("Yes" for Rossiter-McLaughlin effect),')
fwrite_settings('###############################################################################,')
for inst in INPUT['inst_rv']:
for companion in INPUT['companions_rv']:
fwrite_settings(companion+'_flux_weighted_'+inst+','+DROPDOWNS[companion+'_flux_weighted_'+inst].value)
BUTTONS['2c'].style.button_color = 'lightgreen'
print('Done.')
INPUT['show_step_3'] = True
display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()+1, IPython.notebook.ncells())'))
BUTTONS['2c'].on_click(create_settings_file)
###Output
_____no_output_____
###Markdown
3. parameters
###Code
if 'show_step_3' in INPUT and INPUT['show_step_3'] == True:
#::: placeholder
placeholder = widgets.Label(value='', visible=False, layout=layout)
#::: helper function
def add_row(key, label, hbox_list, median=0, lerr=0, uerr=0, transform='trunc_normal * 5', fit_value=False):
INPUT[key+'_median'] = widgets.FloatText(value=median, placeholder='NaN', layout=layout_textbox)
INPUT[key+'_lerr'] = widgets.FloatText(value=lerr, placeholder='NaN', layout=layout_textbox)
INPUT[key+'_uerr'] = widgets.FloatText(value=uerr, placeholder='NaN', layout=layout_textbox)
INPUT[key+'_bounds_type'] = widgets.Dropdown(options=['uniform', 'uniform * 5', 'trunc_normal', 'trunc_normal * 5'], value=transform, layout=layout)
INPUT[key+'_fit'] = widgets.Checkbox(value=fit_value, description='fit?', layout=layout_checkbox)
buf = placeholder
if key in [ companion+'_rsuma' for companion in INPUT['companions_all'] ]:
INPUT[key+'_input_type'] = widgets.Dropdown(options=['(R_comp + R_host) / a', 'R_host / a', 'a / R_host'], layout=layout)
buf = INPUT[key+'_input_type']
elif key in [ companion+'_cosi' for companion in INPUT['companions_all'] ]:
INPUT[key+'_input_type'] = widgets.Dropdown(options=['cos(i)', 'i (degree)', 'i (rad)'], layout=layout)
buf = INPUT[key+'_input_type']
hbox_list.append( widgets.HBox([widgets.Label(value=label, layout=layout),
INPUT[key+'_median'],
widgets.Label(value="-"), INPUT[key+'_lerr'],
widgets.Label(value="+"), INPUT[key+'_uerr'],
buf,
INPUT[key+'_bounds_type'],
INPUT[key+'_fit']]) )
#::: start
display(Markdown('### Initial guess and error bars'))
display(Markdown('These values will be converted into either uniform or truncated normal priors (with physical boundaries). The errors can be blown up by a factor of 5.'))
display(Markdown('#### Astrophysical params per companion'))
vbox_list = []
for companion in INPUT['companions_all']:
# display(Markdown('##### Companion '+companion))
hbox_list = []
add_row(companion+'_rsuma', 'Radii & semi-major axis:', hbox_list)
add_row(companion+'_rr', '$R_'+companion+' / R_\star$:', hbox_list)
add_row(companion+'_cosi', 'Inclination:', hbox_list)
add_row(companion+'_epoch', 'Epoch (d):', hbox_list)
add_row(companion+'_period', 'Period (d):', hbox_list)
if companion in INPUT['companions_rv']:
add_row(companion+'_K', 'K (km/s):', hbox_list)
add_row(companion+'_f_c', '$\sqrt{e} \cos{\omega}$:', hbox_list)
add_row(companion+'_f_s', '$\sqrt{e} \sin{\omega}$:', hbox_list)
vbox_list.append( widgets.VBox(hbox_list) )
tab = widgets.Tab(children=vbox_list)
for i, comp in enumerate(INPUT['companions_all']):
tab.set_title(i, 'Companion '+comp)
display(tab)
# else:
# print('Complete previous steps first.')
if 'show_step_3' in INPUT and INPUT['show_step_3'] == True:
display(Markdown('### Advanced params'))
vbox_list = []
#::: Dilution per instrument
hbox_list = []
for inst in INPUT['inst_phot']:
add_row('dil_'+inst, 'Dilution '+inst, hbox_list)
vbox_list.append( widgets.VBox(hbox_list) )
#::: Limb darkening per object and instrument
hbox_list = []
for inst in INPUT['inst_phot']:
if DROPDOWNS['host_ld_law_'+inst].value=='None':
pass
elif DROPDOWNS['host_ld_law_'+inst].value=='Linear':
add_row('host_ldc_q1_'+inst, 'host LD q1 '+inst, hbox_list, median=0.5, lerr=0.5, uerr=0.5, transform='uniform', fit_value=True)
elif DROPDOWNS['host_ld_law_'+inst].value=='Quadratic':
add_row('host_ldc_q1_'+inst, 'host LD q1 '+inst, hbox_list, median=0.5, lerr=0.5, uerr=0.5, transform='uniform', fit_value=True)
add_row('host_ldc_q2_'+inst, 'host LD q2 '+inst, hbox_list, median=0.5, lerr=0.5, uerr=0.5, transform='uniform', fit_value=True)
elif DROPDOWNS['host_ld_law_'+inst].value=='Sing':
add_row('host_ldc_q1_'+inst, 'host LD q1 '+inst, hbox_list, median=0.5, lerr=0.5, uerr=0.5, transform='uniform', fit_value=True)
add_row('host_ldc_q2_'+inst, 'host LD q2 '+inst, hbox_list, median=0.5, lerr=0.5, uerr=0.5, transform='uniform', fit_value=True)
add_row('host_ldc_q3_'+inst, 'host LD q3 '+inst, hbox_list, median=0.5, lerr=0.5, uerr=0.5, transform='uniform', fit_value=True)
if DROPDOWNS['planet_or_EB']=='EBs':
for companion in INPUT['companions_phot']:
if DROPDOWNS[companion+'_ld_law_'+inst].value=='None':
pass
elif DROPDOWNS[companion+'_ld_law_'+inst].value=='Linear':
add_row(companion+'_ldc_q1_'+inst, companion+' LD q1 '+inst, hbox_list, median=0.5, lerr=0.5, uerr=0.5, transform='uniform', fit_value=True)
elif DROPDOWNS[companion+'_ld_law_'+inst].value=='Quadratic':
add_row(companion+'_ldc_q1_'+inst, companion+' LD q1 '+inst, hbox_list, median=0.5, lerr=0.5, uerr=0.5, transform='uniform', fit_value=True)
add_row(companion+'_ldc_q2_'+inst, companion+' LD q2 '+inst, hbox_list, median=0.5, lerr=0.5, uerr=0.5, transform='uniform', fit_value=True)
elif DROPDOWNS[companion+'_ld_law_'+inst].value=='Sing':
add_row(companion+'_ldc_q1_'+inst, companion+' LD q1 '+inst, hbox_list, median=0.5, lerr=0.5, uerr=0.5, transform='uniform', fit_value=True)
add_row(companion+'_ldc_q2_'+inst, companion+' LD q2 '+inst, hbox_list, median=0.5, lerr=0.5, uerr=0.5, transform='uniform', fit_value=True)
add_row(companion+'_ldc_q3_'+inst, companion+' LD q3 '+inst, hbox_list, median=0.5, lerr=0.5, uerr=0.5, transform='uniform', fit_value=True)
vbox_list.append( widgets.VBox(hbox_list) )
#::: Surface brightness ratio per system and instrument
hbox_list = []
for inst in INPUT['inst_all']:
for companion in INPUT['companions_all']:
add_row(companion+'_sbratio_'+inst, companion+' sbratio '+inst, hbox_list)
vbox_list.append( widgets.VBox(hbox_list) )
#::: Geometric albedo per object and instrument
hbox_list = []
for inst in INPUT['inst_all']:
add_row('host_geom_albedo_'+inst, 'host geom. alb. '+inst, hbox_list)
for companion in INPUT['companions_all']:
add_row(companion+'_geom_albedo_'+inst, companion+' geom. alb. '+inst, hbox_list)
vbox_list.append( widgets.VBox(hbox_list) )
#::: Gravity darkening per object and instrument
hbox_list = []
for inst in INPUT['inst_all']:
add_row('host_gdc_'+inst, 'host grav. dark. '+inst, hbox_list)
if DROPDOWNS['planet_or_EB']=='EBs':
for companion in INPUT['companions_all']:
add_row(companion+'_gdc_'+inst, companion+' grav. dark. '+inst, hbox_list)
vbox_list.append( widgets.VBox(hbox_list) )
#::: Stellar spots per object and instrument
hbox_list = []
for inst in INPUT['inst_all']:
if len(DROPDOWNS['host_N_spots_'+inst].value):
N_spots = int(DROPDOWNS['host_N_spots_'+inst].value)
for i in range(1,N_spots+1):
add_row('host_spot_'+str(i)+'_lat_'+inst, 'host spot '+str(i)+' lat. '+inst+' (deg)', hbox_list)
add_row('host_spot_'+str(i)+'_long_'+inst, 'host spot '+str(i)+' long. '+inst+' (deg)', hbox_list)
add_row('host_spot_'+str(i)+'_size_'+inst, 'host spot '+str(i)+' size '+inst+' (deg)', hbox_list)
add_row('host_spot_'+str(i)+'_brightness_'+inst,'host spot '+str(i)+' brightness '+inst, hbox_list)
# To keep the GUI simplistic, spots on companions are only available by manually editing the params.csv and settings.csv files
# if DROPDOWNS['planet_or_EB'].value == 'EBs':
# for companion in INPUT['companions_all']:
# if len(DROPDOWNS[companion+'_N_spots_'+inst].value):
# N_spots = int(DROPDOWNS[companion+'_N_spots_'+inst].value)
# for i in range(1,N_spots+1):
# add_row(companion+'_spot_'+str(i)+'_lat_'+inst, companion+' spot '+str(i)+' lat. '+inst+' (deg)', hbox_list)
# add_row(companion+'_spot_'+str(i)+'_long_'+inst, companion+' spot '+str(i)+' long. '+inst+' (deg)', hbox_list)
# add_row(companion+'_spot_'+str(i)+'_size_'+inst, companion+' spot '+str(i)+' size '+inst+' (deg)', hbox_list)
# add_row(companion+'_spot_'+str(i)+'_brightness_'+inst, companion+' spot '+str(i)+' brightness '+inst, hbox_list)
if len(hbox_list)==0:
pass
#hbox_list.append(widgets.Label(value='N_spots was set to "None" for all objects and instruments.'))
vbox_list.append( widgets.VBox(hbox_list) )
#::: Flares
hbox_list = []
if len(DROPDOWNS['N_flares'].value):
N_flares = int(DROPDOWNS['N_flares'].value)
for i in range(1,N_flares+1):
add_row('flare_tpeak_'+str(i), 'Flare tpeak '+str(i), hbox_list)
add_row('flare_fwhm_'+str(i), 'Flare fwhm '+str(i), hbox_list)
add_row('flare_ampl_'+str(i), 'Flare ampl '+str(i), hbox_list)
vbox_list.append( widgets.VBox(hbox_list) )
#::: TTV per transit
hbox_list = []
if (DROPDOWNS['fit_ttvs'].value)=='yes':
for companion in INPUT['companions_all']:
add_row(companion+'_ttv_per_transit', 'TTV per transit', hbox_list, median=0, lerr=0.00347222, uerr=0.00347222, transform='uniform', fit_value=True)
vbox_list.append( widgets.VBox(hbox_list) )
#::: Errors per instrument
hbox_list = []
for inst in INPUT['inst_phot']:
if DROPDOWNS['error_flux_'+inst].value == 'sample':
add_row('ln_err_flux_'+inst, 'ln err flux '+inst, hbox_list, median=-7, lerr=8, uerr=7, transform='uniform', fit_value=True)
else:
pass
#hbox_list.append(widgets.Label(value='Not applicable, error sampling was set to "hybrid".'))
for inst in INPUT['inst_rv']:
if DROPDOWNS['error_rv_'+inst].value == 'sample':
add_row('ln_jitter_rv_'+inst, 'ln jitter rv '+inst, hbox_list, median=-3, lerr=12, uerr=3, transform='uniform', fit_value=True)
else:
pass
#hbox_list.append(widgets.Label(value='Not applicable, error sampling was set to "hybrid".'))
vbox_list.append( widgets.VBox(hbox_list) )
#::: Baselines per instrument
hbox_list = []
for inst in INPUT['inst_all']:
if inst in INPUT['inst_phot']:
key = 'flux'
elif inst in INPUT['inst_rv']:
key = 'rv'
if DROPDOWNS['baseline_'+key+'_'+inst].value == 'sample_GP_Matern32':
add_row('baseline_gp_matern32_lnsigma_'+key+'_'+inst, 'baseline gp Matern32 lnsigma '+inst, hbox_list, median=0, lerr=15, uerr=15, transform='uniform', fit_value=True)
add_row('baseline_gp_matern32_lnrho_'+key+'_'+inst, 'baseline gp Matern32 lnrho '+inst, hbox_list, median=0, lerr=15, uerr=15, transform='uniform', fit_value=True)
elif DROPDOWNS['baseline_'+key+'_'+inst].value == 'sample_GP_SHO':
add_row('baseline_gp_sho_lnS0_'+key+'_'+inst, 'baseline gp SHO lnS0 '+inst, hbox_list, median=0, lerr=15, uerr=15, transform='uniform', fit_value=True)
add_row('baseline_gp_sho_lnQ_'+key+'_'+inst, 'baseline gp SHO lnQ '+inst, hbox_list, median=0, lerr=15, uerr=15, transform='uniform', fit_value=True)
add_row('baseline_gp_sho_lnomega0_'+key+'_'+inst, 'baseline gp SHO lnomega0 '+inst, hbox_list, median=0, lerr=15, uerr=15, transform='uniform', fit_value=True)
elif DROPDOWNS['baseline_'+key+'_'+inst].value == 'sample_GP_real':
add_row('baseline_gp_real_lna_'+key+'_'+inst, 'baseline gp real lna '+inst, hbox_list, median=0, lerr=15, uerr=15, transform='uniform', fit_value=True)
add_row('baseline_gp_real_lnc_'+key+'_'+inst, 'baseline gp real lnc '+inst, hbox_list, median=0, lerr=15, uerr=15, transform='uniform', fit_value=True)
elif DROPDOWNS['baseline_'+key+'_'+inst].value == 'sample_GP_complex':
add_row('baseline_gp_complex_lna_'+key+'_'+inst, 'baseline gp complex lna '+inst, hbox_list, median=0, lerr=15, uerr=15, transform='uniform', fit_value=True)
add_row('baseline_gp_complex_lnc_'+key+'_'+inst, 'baseline gp complex lnc '+inst, hbox_list, median=0, lerr=15, uerr=15, transform='uniform', fit_value=True)
add_row('baseline_gp_complex_lnb_'+key+'_'+inst, 'baseline gp complex lnb '+inst, hbox_list, median=0, lerr=15, uerr=15, transform='uniform', fit_value=True)
add_row('baseline_gp_complex_lnd_'+key+'_'+inst, 'baseline gp complex lnd '+inst, hbox_list, median=0, lerr=15, uerr=15, transform='uniform', fit_value=True)
elif DROPDOWNS['baseline_'+key+'_'+inst].value == 'sample_offset':
add_row('baseline_offset_'+key+'_'+inst, 'baseline offset '+inst, hbox_list, median=0, lerr=0, uerr=0, transform='uniform', fit_value=True)
elif DROPDOWNS['baseline_'+key+'_'+inst].value == 'sample_linear':
add_row('baseline_offset_'+key+'_'+inst, 'baseline offset '+inst, hbox_list, median=0, lerr=0, uerr=0, transform='uniform', fit_value=True)
add_row('baseline_slope_'+key+'_'+inst, 'baseline slope '+inst, hbox_list, median=0, lerr=0, uerr=0, transform='uniform', fit_value=True)
vbox_list.append( widgets.VBox(hbox_list) )
#::: accordion
accordion = widgets.Accordion(children=vbox_list)
accordion.set_title(0, 'Dilution')
accordion.set_title(1, 'Limb darkening')
accordion.set_title(2, 'Surface brightness ratio')
accordion.set_title(3, 'Geometric albedo')
accordion.set_title(4, 'Gravity darkening')
accordion.set_title(5, 'Stellar spots')
accordion.set_title(6, 'Flares')
accordion.set_title(7, 'TTVs')
accordion.set_title(8, 'Errors & jitter')
accordion.set_title(9, 'Baselines')
display(accordion)
if 'show_step_3' in INPUT and INPUT['show_step_3'] == True:
nan_fields = False
button_create_params_file = widgets.Button(description='Create params.csv', button_style='')
checkbox_overwrite_params_file = widgets.Checkbox(description='Overwrite old params.csv (if existing)', value=False)
hbox_params_file = widgets.HBox([button_create_params_file, checkbox_overwrite_params_file])
display(hbox_params_file)
def create_params_file(change):
clear_output()
display(hbox_params_file)
print('Calculating... this might take a few seconds. Please be patient, you will get notified once everything is completed.')
go_ahead = True
if 'datadir' not in INPUT:
warnings.warn('No allesfitter woking directory selected yet. Please go back to step 1) and fill in all fields.')
go_ahead = False
if os.path.exists(os.path.join(INPUT['datadir'],'params.csv')) and (checkbox_overwrite_params_file.value==False):
warnings.warn('The selected working directory '+os.path.join(INPUT['datadir'],'params.csv')+' already exists. To proceed, give permission to overwrite it.')
go_ahead = False
if go_ahead:
INPUT['fname_params'] = os.path.join(INPUT['datadir'], 'params.csv')
with open(INPUT['fname_params'], 'w+') as f:
f.write('#name,value,fit,bounds,label,unit\n')
def get_median_and_error_strings(text_median, text_lerr, text_uerr):
if (text_median.value == ''):
median = 'NaN'
nan_fields = True
else:
median = text_median.value
if (text_lerr.value == '') or (text_uerr.value == ''):
err = 'NaN'
nan_fields = True
else:
err = str( 5.* np.max( [float(text_lerr.value), float(text_uerr.value)] ) )
median, err, _ = round_txt_separately( float(median), float(err), float(err) )
return median, err
#:::: astrophysical parameters per system
for companion in INPUT['companions_all']:
fwrite_params_line('#companion '+companion+' astrophysical params,,,,,')
#::: rr
fwrite_params(companion+'_rr', '$R_'+companion+' / R_\star$', '', [0,1])
#::: rsuma
if INPUT[companion+'_rsuma_input_type'].value=='(R_comp + R_host) / a':
pass
elif INPUT[companion+'_rsuma_input_type'].value=='R_host / a':
Rstar_over_a = [ float(INPUT[companion+'_rsuma_median'].value), float(INPUT[companion+'_rsuma_lerr'].value), float(INPUT[companion+'_rsuma_uerr'].value) ]
Rp_over_Rstar = [ float(INPUT[companion+'_rr_median'].value), float(INPUT[companion+'_rr_lerr'].value), float(INPUT[companion+'_rr_uerr'].value) ]
INPUT[companion+'_rsuma_median'].value, INPUT[companion+'_rsuma_lerr'].value, INPUT[companion+'_rsuma_uerr'].value \
= get_Rsuma_from_Rstar_over_a(Rstar_over_a, Rp_over_Rstar)
INPUT[companion+'_rsuma_input_type'].value = '(R_comp + R_host) / a'
elif INPUT[companion+'_rsuma_input_type'].value=='a / R_host':
a_over_Rstar = [ float(INPUT[companion+'_rsuma_median'].value), float(INPUT[companion+'_rsuma_lerr'].value), float(INPUT[companion+'_rsuma_uerr'].value) ]
Rp_over_Rstar = [ float(INPUT[companion+'_rr_median'].value), float(INPUT[companion+'_rr_lerr'].value), float(INPUT[companion+'_rr_uerr'].value) ]
INPUT[companion+'_rsuma_median'].value, INPUT[companion+'_rsuma_lerr'].value, INPUT[companion+'_rsuma_uerr'].value \
= get_Rsuma_from_a_over_Rstar(a_over_Rstar, Rp_over_Rstar)
INPUT[companion+'_rsuma_input_type'].value = '(R_comp + R_host) / a'
else:
raise ValueError('Oops, something went wrong.')
fwrite_params(companion+'_rsuma', '$(R_\star + R_'+companion+') / a_'+companion+'$', '', [0,1])
#::: cosi
if INPUT[companion+'_cosi_input_type'].value=='cos(i)':
pass
elif INPUT[companion+'_cosi_input_type'].value=='i (degree)':
incl = [ float(INPUT[companion+'_cosi_median'].value), float(INPUT[companion+'_cosi_lerr'].value), float(INPUT[companion+'_cosi_uerr'].value) ]
INPUT[companion+'_cosi_median'].value, INPUT[companion+'_cosi_lerr'].value, INPUT[companion+'_cosi_uerr'].value \
= get_cosi_from_i(incl)
INPUT[companion+'_cosi_input_type'].value = 'cos(i)'
elif INPUT[companion+'_cosi_input_type'].value=='i (rad)':
incl = [ float(INPUT[companion+'_cosi_median'].value)/180.*np.pi, float(INPUT[companion+'_cosi_lerr'].value)/180.*np.pi, float(INPUT[companion+'_cosi_uerr'].value)/180.*np.pi ]
INPUT[companion+'_cosi_median'].value, INPUT[companion+'_cosi_lerr'].value, INPUT[companion+'_cosi_uerr'].value \
= get_cosi_from_i(incl)
INPUT[companion+'_cosi_input_type'].value = 'cos(i)'
fwrite_params(companion+'_cosi', '$\cos{i_'+companion+'}$', '', [0,1])
#::: epoch
fwrite_params(companion+'_epoch', '$T_{0;'+companion+'}$', '$\mathrm{BJD}$', [-1e12,1e12])
#::: period
fwrite_params(companion+'_period', '$P_'+companion+'$', '$\mathrm{d}$', [-1e12,1e12])
#::: RV semi-amplitude
if companion in INPUT['companions_rv']:
fwrite_params(companion+'_K', '$K_'+companion+'$', '$\mathrm{km/s}$', [-1e12,1e12])
#::: eccentricity f_c
fwrite_params(companion+'_f_c', '$\sqrt{e_'+companion+'} \cos{\omega_'+companion+'}$', '', [-1,1])
#::: eccentricity f_s
fwrite_params(companion+'_f_s', '$\sqrt{e_'+companion+'} \sin{\omega_'+companion+'}$', '', [-1,1])
#::: dilution per instrument
if len(INPUT['inst_phot']):
fwrite_params_line('#dilution per instrument,,,,,')
for inst in INPUT['inst_phot']:
fwrite_params('dil_'+inst, '$D_\mathrm{0; '+inst+'}$', '', [0,1])
#fwrite_params('dil_'+inst+',0,0,trunc_normal 0 1 0 0,$D_\mathrm{0; '+inst+'}$,')
#::: limb darkening coefficients per instrument
if len(INPUT['inst_phot']):
fwrite_params_line('#limb darkening coefficients per instrument,,,,,')
for inst in INPUT['inst_phot']:
#::: host
if DROPDOWNS['host_ld_law_'+inst].value=='None':
pass
elif DROPDOWNS['host_ld_law_'+inst].value=='Linear':
fwrite_params('host_ldc_q1_'+inst, '$q_{1; \mathrm{'+inst+'}}$', '', [0,1])
elif DROPDOWNS['host_ld_law_'+inst].value=='Quadratic':
fwrite_params('host_ldc_q1_'+inst, '$q_{1; \mathrm{'+inst+'}}$', '', [0,1])
fwrite_params('host_ldc_q2_'+inst, '$q_{2; \mathrm{'+inst+'}}$', '', [0,1])
elif DROPDOWNS['host_ld_law_'+inst].value=='Sing':
fwrite_params('host_ldc_q1_'+inst, '$q_{1; \mathrm{'+inst+'}}$', '', [0,1])
fwrite_params('host_ldc_q2_'+inst, '$q_{2; \mathrm{'+inst+'}}$', '', [0,1])
fwrite_params('host_ldc_q3_'+inst, '$q_{3; \mathrm{'+inst+'}}$', '', [0,1])
#::: companion (if EB)
if DROPDOWNS['planet_or_EB']=='EBs':
if DROPDOWNS[companion+'_ld_law_'+inst].value=='None':
pass
elif DROPDOWNS[companion+'_ld_law_'+inst].value=='Linear':
fwrite_params(companion+'_ldc_q1_'+inst, '$q_{1; \mathrm{'+inst+'}}$', '', [0,1])
elif DROPDOWNS[companion+'_ld_law_'+inst].value=='Quadratic':
fwrite_params(companion+'_ldc_q1_'+inst, '$q_{1; \mathrm{'+inst+'}}$', '', [0,1])
fwrite_params(companion+'_ldc_q2_'+inst, '$q_{2; \mathrm{'+inst+'}}$', '', [0,1])
elif DROPDOWNS[companion+'_ld_law_'+inst].value=='Sing':
fwrite_params(companion+'_ldc_q1_'+inst, '$q_{1; \mathrm{'+inst+'}}$', '', [0,1])
fwrite_params(companion+'_ldc_q2_'+inst, '$q_{2; \mathrm{'+inst+'}}$', '', [0,1])
fwrite_params(companion+'_ldc_q3_'+inst, '$q_{3; \mathrm{'+inst+'}}$', '', [0,1])
#::: brightness ratio per system and instrument
if len(INPUT['inst_all']):
fwrite_params_line('#surface brightness per instrument and companion,,,,,')
for companion in INPUT['companions_all']:
for inst in INPUT['inst_all']:
fwrite_params(companion+'_sbratio_'+inst, '$J_{'+companion+'; \mathrm{'+inst+'}}$', '', [0,1])
#::: geometric albedo per system and instrument
if len(INPUT['inst_all']):
fwrite_params_line('#albedo per instrument and companion,,,,,')
for inst in INPUT['inst_all']:
fwrite_params('host_geom_albedo_'+inst, '$A_{\mathrm{geom}; host; \mathrm{'+inst+'}}$', '', [0,1])
for companion in INPUT['companions_all']:
for inst in INPUT['inst_all']:
fwrite_params(companion+'_geom_albedo_'+inst, '$A_{\mathrm{geom}; '+companion+'; \mathrm{'+inst+'}}$', '', [0,1])
#::: gravity darkening per object and instrument
if len(INPUT['inst_all']):
fwrite_params_line('#gravity darkening per instrument and companion,,,,,')
for inst in INPUT['inst_all']:
#::: host
fwrite_params('host_gdc_'+inst, '$Grav. dark._{'+companion+'; \mathrm{'+inst+'}}$', '', [0,1])
#::: companion (if EB)
if DROPDOWNS['planet_or_EB']=='EBs':
for companion in INPUT['companions_all']:
fwrite_params(companion+'_sbratio_'+inst, '$Grav. dark._{'+companion+'; \mathrm{'+inst+'}}$', '', [0,1])
#::: spots per object and instrument
if len(INPUT['inst_all']):
fwrite_params_line('#spots per instrument and companion,,,,,')
for inst in INPUT['inst_all']:
if len(DROPDOWNS['host_N_spots_'+inst].value):
N_spots = int(DROPDOWNS['host_N_spots_'+inst].value)
for i in range(1,N_spots+1):
#::: host
fwrite_params('host_spot_'+str(i)+'_long_'+inst, '$\mathrm{host: spot '+str(i)+' long. '+inst+'}$', '\mathrm{deg}', [0,360])
fwrite_params('host_spot_'+str(i)+'_lat_'+inst, '$\mathrm{host: spot '+str(i)+' lat. '+inst+'}$', '\mathrm{deg}', [-90,90])
fwrite_params('host_spot_'+str(i)+'_size_'+inst, '$\mathrm{host: spot '+str(i)+' size '+inst+'}$', '\mathrm{deg}', [0,30])
fwrite_params('host_spot_'+str(i)+'_brightness_'+inst, '$\mathrm{host: spot '+str(i)+' brightness '+inst+'}$', '', [0,1])
#::: companion (if EB)
if DROPDOWNS['planet_or_EB']=='EBs':
for companion in INPUT['companions_all']:
if len(DROPDOWNS[companion+'_N_spots_'+inst].value):
N_spots = int(DROPDOWNS[companion+'_N_spots_'+inst].value)
fwrite_params(companion+'_spot_'+str(i)+'_long_'+inst, '$\mathrm{'+companion+': spot '+str(i)+' long. '+inst+'}$', '\mathrm{deg}', [0,360])
fwrite_params(companion+'_spot_'+str(i)+'_lat_'+inst, '$\mathrm{'+companion+': spot '+str(i)+' lat. '+inst+'}$', '\mathrm{deg}', [-90,90])
fwrite_params(companion+'_spot_'+str(i)+'_size_'+inst, '$\mathrm{'+companion+': spot '+str(i)+' size '+inst+'}$', '\mathrm{deg}', [0,30])
fwrite_params(companion+'_spot_'+str(i)+'_brightness_'+inst, '$\mathrm{'+companion+': spot '+str(i)+' brightness '+inst+'}$', '', [0,1])
#::: flares
if len(DROPDOWNS['N_flares'].value):
fwrite_params_line('#flares,,,,,')
N_flares = int(DROPDOWNS['N_flares'].value)
for i in range(1,N_flares+1):
fwrite_params('flare_tpeak_'+str(i), '$t_\mathrm{peak; flare '+str(i)+'}$', '$\mathrm{BJD}$', [-1e12,1e12])
fwrite_params('flare_ampl_'+str(i), '$A_\mathrm{flare '+str(i)+'}$', '$\mathrm{rel. flux.}$', [-1e12,1e12])
fwrite_params('flare_fwhm_'+str(i), '$FWHM_\mathrm{flare '+str(i)+'}$', '$\mathrm{BJD}$', [-1e12,1e12])
#::: TTV per instrument
if (DROPDOWNS['fit_ttvs'].value=='yes'):
fwrite_params_line('#TTV per transit,,,,,')
warnings.warn('TTV priors in params.csv will not be set until you also complete step 4 (adding the data files).')
# for inst in INPUT['inst_phot']:
# fwrite_params('ttv_'+inst, '$\mathrm{TTV_'+inst+'}$', '$\mathrm{d}$', [-1e12,1e12])
#::: errors and baselines - keep track of rows
INPUT['N_last_rows'] = 0
#::: errors per instrument
if any( [ 'sample' in DROPDOWNS['error_flux_'+inst].value for inst in INPUT['inst_phot'] ] ) \
or any( [ 'sample' in DROPDOWNS['error_rv_'+inst].value for inst in INPUT['inst_rv'] ] ):
fwrite_params_line('#errors per instrument,')
INPUT['N_last_rows'] += 1
for inst in INPUT['inst_phot']:
if 'hybrid' not in DROPDOWNS['error_flux_'+inst].value:
fwrite_params('ln_err_flux_'+inst, '$\ln{\sigma_\mathrm{'+inst+'}}$', '$\ln{ \mathrm{rel. flux.} }$', [-15,0])
INPUT['N_last_rows'] += 1
for inst in INPUT['inst_rv']:
if 'hybrid' not in DROPDOWNS['error_rv_'+inst].value:
fwrite_params('ln_jitter_rv_'+inst, '$\ln{\sigma_\mathrm{jitter; '+inst+'}}$', '$\ln{ \mathrm{km/s} }$', [-15,0])
INPUT['N_last_rows'] += 1
#::: baseline
if any( [ 'sample' in DROPDOWNS['baseline_flux_'+inst].value for inst in INPUT['inst_phot'] ] ) \
or any( [ 'sample' in DROPDOWNS['baseline_rv_'+inst].value for inst in INPUT['inst_rv'] ] ):
fwrite_params_line('#baseline per instrument,')
INPUT['N_last_rows'] += 1
for inst in INPUT['inst_all']:
if inst in INPUT['inst_phot']:
key = 'flux'
elif inst in INPUT['inst_rv']:
key = 'rv'
if DROPDOWNS['baseline_'+key+'_'+inst].value == 'sample_GP_Matern32':
fwrite_params('baseline_gp_matern32_lnsigma_'+key+'_'+inst, '$\mathrm{gp: \ln{\sigma} ('+inst+')}$', '', [-15,15])
fwrite_params('baseline_gp_matern32_lnrho_'+key+'_'+inst, '$\mathrm{gp: \ln{\\rho} ('+inst+')}$', '', [-15,15])
INPUT['N_last_rows'] += 2
elif DROPDOWNS['baseline_'+key+'_'+inst].value == 'sample_GP_SHO':
fwrite_params('baseline_gp_sho_lnS0_'+key+'_'+inst, '$\mathrm{gp: \ln{S_0} ('+inst+')}$', '', [-15,15])
fwrite_params('baseline_gp_sho_lnQ_'+key+'_'+inst, '$\mathrm{gp: \ln{Q} ('+inst+')}$', '', [-15,15])
fwrite_params('baseline_gp_sho_lnomega0_'+key+'_'+inst, '$\mathrm{gp: \ln{\omega_0} ('+inst+')}$', '', [-15,15])
INPUT['N_last_rows'] += 3
elif DROPDOWNS['baseline_'+key+'_'+inst].value == 'sample_GP_real':
fwrite_params('baseline_gp_real_lna_'+key+'_'+inst, '$\mathrm{gp: \ln{a} ('+inst+')}$', '', [-15,15])
fwrite_params('baseline_gp_real_lnc_'+key+'_'+inst, '$\mathrm{gp: \ln{c} ('+inst+')}$', '', [-15,15])
INPUT['N_last_rows'] += 2
elif DROPDOWNS['baseline_'+key+'_'+inst].value == 'sample_GP_complex':
fwrite_params('baseline_gp_real_lna_'+key+'_'+inst, '$\mathrm{gp: \ln{a} ('+inst+')}$', '', [-15,15])
fwrite_params('baseline_gp_real_lnc_'+key+'_'+inst, '$\mathrm{gp: \ln{c} ('+inst+')}$', '', [-15,15])
fwrite_params('baseline_gp_real_lnb_'+key+'_'+inst, '$\mathrm{gp: \ln{b} ('+inst+')}$', '', [-15,15])
fwrite_params('baseline_gp_real_lnd_'+key+'_'+inst, '$\mathrm{gp: \ln{d} ('+inst+')}$', '', [-15,15])
INPUT['N_last_rows'] += 4
elif DROPDOWNS['baseline_'+key+'_'+inst].value == 'sample_offset':
fwrite_params('baseline_offset_flux_'+inst, 'offset ('+inst+')', '', [-1e12,1e12])
INPUT['N_last_rows'] += 1
elif DROPDOWNS['baseline_'+key+'_'+inst].value == 'sample_linear':
fwrite_params('baseline_a_flux_'+inst, 'lin. a ('+inst+')', '', [-1e12,1e12])
fwrite_params('baseline_b_flux_'+inst, 'lin. b ('+inst+')', '', [-1e12,1e12])
INPUT['N_last_rows'] += 2
#::: continue
button_create_params_file.style.button_color = 'lightgreen'
print('Done.')
INPUT['show_step_4'] = True
display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()+1, IPython.notebook.ncells())'))
if nan_fields:
warnings.warn('You left some fields empty. These will be set NaN in params.csv. Make sure to fix this manually later.')
button_create_params_file.on_click(create_params_file)
###Output
_____no_output_____
###Markdown
4. data filesPlease put all data files into the selected directory, and click the button to confirm.
###Code
if 'show_step_4' in INPUT and INPUT['show_step_4']==True:
BUTTONS['confirm_data_files'] = widgets.Button(description='Confirm', button_style='')
display(BUTTONS['confirm_data_files'])
def check_data_files(change):
clear_output()
display(BUTTONS['confirm_data_files'])
all_data_exists = True
for inst in INPUT['inst_all']:
if not os.path.exists( os.path.join(INPUT['datadir'], inst+'.csv') ):
warnings.warn('Data file '+os.path.join(INPUT['datadir'], inst+'.csv')+' does not exist. Please include the data file into the directory and then repeat this step.')
all_data_exists = False
if all_data_exists:
BUTTONS['confirm_data_files'].style.button_color = 'lightgreen'
INPUT['show_step_5'] = True
display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()+1, IPython.notebook.ncells())'))
BUTTONS['confirm_data_files'].on_click(check_data_files)
# else:
# print('Complete previous steps first.')
############################################################################
#::: time to include those TTV lines into the folder!
############################################################################
if 'show_step_5' in INPUT and INPUT['show_step_5']==True and DROPDOWNS['fit_ttvs'].value=='yes':
from allesfitter import config
config.init(INPUT['datadir'])
new_lines = ''
for companion in INPUT['companions_all']:
N_observed_transits = len(config.BASEMENT.data[companion+'_tmid_observed_transits'])
for i in range(N_observed_transits):
string = fwrite_params(companion+'_ttv_per_transit', 'TTV$_\mathrm{'+str(i+1)+'}}$', '$\mathrm{d}$', [-15,15], return_str=True) + '\n'
string = string.replace('per_transit', 'transit_'+str(i+1))
new_lines += string
with open(INPUT['fname_params'], "r") as f:
contents = f.readlines()
for i, line in enumerate(contents):
line = line.rstrip() # remove '\n' at end of line
if line == '#TTV per transit,,,,,':
index = i+1
contents.insert(index, new_lines)
with open(INPUT['fname_params'], "w") as f:
contents = "".join(contents)
f.write(contents)
print('TTVs per transit were added to params.csv.')
print('params.csv and settings.csv are now ready to use.')
###Output
_____no_output_____
###Markdown
5. check
###Code
if 'show_step_5' in INPUT and INPUT['show_step_5']==True:
from allesfitter.general_output import show_initial_guess
import matplotlib.pyplot as plt
fig_list = show_initial_guess(INPUT['datadir'], do_logprint=False, return_figs=True)
for fig in fig_list:
plt.show(fig)
if 'show_step_5' in INPUT and INPUT['show_step_5']==True:
BUTTONS['confirm_plots'] = widgets.Button(description='Looks good', button_style='')
display(BUTTONS['confirm_plots'])
def check_plots(change):
clear_output()
display(BUTTONS['confirm_plots'])
BUTTONS['confirm_plots'].style.button_color = 'lightgreen'
INPUT['show_step_6'] = True
display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()+1, IPython.notebook.ncells())'))
BUTTONS['confirm_plots'].on_click(check_plots)
# else:
# print('Complete previous steps first.')
###Output
_____no_output_____
###Markdown
6. tighter priors on errors and baselinesThis will take a couple of minutes. Make sure your initial guess above is very good. This will subtract the model from the data and evaluate the remaining noise patterns to estimate errors, jitter and GP baselines.
###Code
if 'show_step_6' in INPUT and INPUT['show_step_6']==True:
def estimate_tighter_priors(change):
print('\nEstimating errors and baselines... this will take a couple of minutes. Please be patient, you will get notified once everything is completed.\n')
#::: run MCMC fit to estimate errors and baselines
estimate_noise(INPUT['datadir'])
#::: delete the rows containing the default (zero) errors and baselines from the params.csv file
clean_up_csv( os.path.join( INPUT['datadir'], 'params.csv' ), N_last_rows=INPUT['N_last_rows'] )
#::: write new rows into params.csv
#::: errors
fwrite_params_line('#errors per instrument,')
for i, inst in enumerate(INPUT['inst_phot']):
#::: read in the summary file
summaryfile = os.path.join( INPUT['datadir'], 'priors', 'summary_phot.csv' )
priors2 = np.genfromtxt(summaryfile, names=True, delimiter=',', dtype=None)
priors = {}
for key in priors2.dtype.names:
priors[key] = np.atleast_1d(priors2[key])
median = priors['ln_yerr_median'][i]
err = 5.*np.max([ float(priors['ln_yerr_ll'][i]), float(priors['ln_yerr_ul'][i]) ])
median, err, _ = round_txt_separately(median,err,err)
fwrite_params_line('ln_err_flux_'+inst+','+median+',1,trunc_normal -15 0 '+median+' '+err+',$\ln{\sigma_\mathrm{'+inst+'}}$,')
for i, inst in enumerate(INPUT['inst_rv']):
#::: read in the summary file
summaryfile = os.path.join( INPUT['datadir'], 'priors', 'summary_rv.csv' )
priors2 = np.genfromtxt(summaryfile, names=True, delimiter=',', dtype=None)
priors = {}
for key in priors2.dtype.names:
priors[key] = np.atleast_1d(priors2[key])
median = priors['ln_yerr_median'][i]
err = 5.*np.max([ float(priors['ln_yerr_ll'][i]), float(priors['ln_yerr_ul'][i]) ])
median, err, _ = round_txt_separately(median,err,err)
fwrite_params('ln_jitter_rv_'+inst+','+median+',1,trunc_normal -15 0 '+median+' '+err+',$\ln{\sigma_\mathrm{jitter; '+inst+'}}$,')
#::: write new rows into params.csv
#::: baselines
fwrite_params_line('#baseline per instrument,')
for i, inst in enumerate(INPUT['inst_phot']):
#::: read in the summary file
summaryfile = os.path.join( INPUT['datadir'], 'priors', 'summary_phot.csv' )
priors2 = np.genfromtxt(summaryfile, names=True, delimiter=',', dtype=None)
priors = {}
for key in priors2.dtype.names:
priors[key] = np.atleast_1d(priors2[key])
median = priors['gp_ln_sigma_median'][i]
err = 5.*np.max([ float(priors['gp_ln_sigma_ll'][i]), float(priors['gp_ln_sigma_ul'][i]) ])
median, err, _ = round_txt_separately(median,err,err)
fwrite_params_line('baseline_gp1_flux_'+inst+','+median+',1,trunc_normal -15 15 '+median+' '+err+',$\mathrm{gp: \ln{\sigma} ('+inst+')}$,')
median = priors['gp_ln_rho_median'][i]
err = 5.*np.max([ float(priors['gp_ln_rho_ll'][i]), float(priors['gp_ln_rho_ul'][i]) ])
median, err, _ = round_txt_separately(median,err,err)
fwrite_params_line('baseline_gp2_flux_'+inst+','+median+',1,trunc_normal -15 15 '+median+' '+err+',$\mathrm{gp: \ln{\\rho} ('+inst+')}$,')
#::: confirm
BUTTONS['estimate_tighter_priors'].style.button_color = 'lightgreen'
print('Done.')
INPUT['show_step_7'] = True
display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()+1, IPython.notebook.ncells())'))
def skip(change):
BUTTONS['skip'].style.button_color = 'lightgreen'
print('Skipped.')
INPUT['show_step_7'] = True
display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()+1, IPython.notebook.ncells())'))
# else:
# print('Complete previous steps first.')
if 'show_step_6' in INPUT and INPUT['show_step_6']==True:
BUTTONS['estimate_tighter_priors'] = widgets.Button(value=False, description='Estimate tighter priors')
BUTTONS['skip'] = widgets.Button(value=False, description='Skip')
display( widgets.HBox([BUTTONS['estimate_tighter_priors'],BUTTONS['skip']]))
BUTTONS['estimate_tighter_priors'].on_click(estimate_tighter_priors)
BUTTONS['skip'].on_click(skip)
###Output
_____no_output_____
###Markdown
7. run the fit
###Code
if 'show_step_7' in INPUT and INPUT['show_step_7']==True:
try:
from importlib import reload
except:
pass
try:
from imp import reload
except:
pass
import allesfitter
reload(allesfitter)
button_run_ns_fit = widgets.Button(description='Run NS fit', button_style='')
button_run_mcmc_fit = widgets.Button(description='Run MCMC fit', button_style='')
hbox = widgets.HBox([button_run_ns_fit, button_run_mcmc_fit])
display(hbox)
def run_ns_fit(change):
button_run_ns_fit.style.button_color = 'lightgreen'
allesfitter.ns_fit(INPUT['datadir'])
allesfitter.ns_output(INPUT['datadir'])
def run_mcmc_fit(change):
button_run_mcmc_fit.style.button_color = 'lightgreen'
allesfitter.mcmc_fit(INPUT['datadir'])
allesfitter.mcmc_output(INPUT['datadir'])
button_run_ns_fit.on_click(run_ns_fit)
button_run_mcmc_fit.on_click(run_mcmc_fit)
# else:
# print('Complete previous steps first.')
###Output
_____no_output_____ |
Build_On_Colab.ipynb | ###Markdown
###Code
# The basics
import numpy as np
import pandas as pd
import os
import subprocess
###Output
_____no_output_____
###Markdown
**Mount G-Drive to download or save files**
###Code
from google.colab import drive
drive.mount('/content/gdrive', force_remount=True)
###Output
Mounted at /content/gdrive
###Markdown
**Copy Kaggle configuration JSON file to download data from kaggle**
###Code
!mkdir /root/.kaggle/
!cp "/content/gdrive/My Drive/Colab Notebooks/.kaggle/kaggle.json" '/root/.kaggle/kaggle.json'
###Output
_____no_output_____
###Markdown
**Clone the code from github**
###Code
!git clone https://github.com/codefupanda/customer_interaction_summary.git
###Output
fatal: destination path 'customer_interaction_summary' already exists and is not an empty directory.
###Markdown
**Init step**: Download required data and dependencies
###Code
!cd customer_interaction_summary && make requirements && make data
###Output
_____no_output_____
###Markdown
**TRAIN**: Train the model, src /models/model_configs.py as the configuration for which models to train
###Code
!cd customer_interaction_summary && git pull
!cd customer_interaction_summary && python3 src/data/make_dataset.py data/raw data/processed
!cd customer_interaction_summary && rm -rf random_search
!cd customer_interaction_summary && make train
###Output
rm -rf random_search
python3 src/models/train_model.py --input_filepath=data/processed --output_filepath=models/ --pad_sequences_maxlen=1000 --max_words=30000 --epochs=20 --batch_size=128 --output_dim=300
2020-10-28 07:29:18.184864: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcudart.so.10.1
2020-10-28 07:29:19,435 - __main__ - INFO - starting the training process
2020-10-28 07:29:19,435 - __main__ - INFO - --input_filepath data/processed
2020-10-28 07:29:19,435 - __main__ - INFO - --output_filepath models/
2020-10-28 07:29:19,435 - __main__ - INFO - --pad_sequences_maxlen 1000
2020-10-28 07:29:19,435 - __main__ - INFO - --max_words 30000
2020-10-28 07:29:19,435 - __main__ - INFO - --epochs 20
2020-10-28 07:29:19,435 - __main__ - INFO - --batch_size 128
Found 5799 unique tokens.
Found 400000 word vectors.
Training the model: BiLSTM
2020-10-28 07:29:48.304404: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcuda.so.1
2020-10-28 07:29:48.341474: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:982] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2020-10-28 07:29:48.342101: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1716] Found device 0 with properties:
pciBusID: 0000:00:04.0 name: Tesla T4 computeCapability: 7.5
coreClock: 1.59GHz coreCount: 40 deviceMemorySize: 14.73GiB deviceMemoryBandwidth: 298.08GiB/s
2020-10-28 07:29:48.342188: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcudart.so.10.1
2020-10-28 07:29:48.343944: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcublas.so.10
2020-10-28 07:29:48.345790: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcufft.so.10
2020-10-28 07:29:48.346169: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcurand.so.10
2020-10-28 07:29:48.347800: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcusolver.so.10
2020-10-28 07:29:48.348625: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcusparse.so.10
2020-10-28 07:29:48.352533: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcudnn.so.7
2020-10-28 07:29:48.352657: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:982] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2020-10-28 07:29:48.353241: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:982] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2020-10-28 07:29:48.353824: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1858] Adding visible gpu devices: 0
2020-10-28 07:29:48.359627: I tensorflow/core/platform/profile_utils/cpu_utils.cc:104] CPU Frequency: 2200000000 Hz
2020-10-28 07:29:48.359821: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x1668f40 initialized for platform Host (this does not guarantee that XLA will be used). Devices:
2020-10-28 07:29:48.359850: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): Host, Default Version
2020-10-28 07:29:48.466938: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:982] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2020-10-28 07:29:48.467701: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x16692c0 initialized for platform CUDA (this does not guarantee that XLA will be used). Devices:
2020-10-28 07:29:48.467733: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): Tesla T4, Compute Capability 7.5
2020-10-28 07:29:48.468067: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:982] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2020-10-28 07:29:48.468661: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1716] Found device 0 with properties:
pciBusID: 0000:00:04.0 name: Tesla T4 computeCapability: 7.5
coreClock: 1.59GHz coreCount: 40 deviceMemorySize: 14.73GiB deviceMemoryBandwidth: 298.08GiB/s
2020-10-28 07:29:48.468746: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcudart.so.10.1
2020-10-28 07:29:48.468801: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcublas.so.10
2020-10-28 07:29:48.468836: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcufft.so.10
2020-10-28 07:29:48.468861: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcurand.so.10
2020-10-28 07:29:48.468884: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcusolver.so.10
2020-10-28 07:29:48.468908: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcusparse.so.10
2020-10-28 07:29:48.468934: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcudnn.so.7
2020-10-28 07:29:48.469024: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:982] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2020-10-28 07:29:48.469697: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:982] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2020-10-28 07:29:48.470232: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1858] Adding visible gpu devices: 0
2020-10-28 07:29:48.470318: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcudart.so.10.1
2020-10-28 07:29:48.980105: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1257] Device interconnect StreamExecutor with strength 1 edge matrix:
2020-10-28 07:29:48.980184: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1263] 0
2020-10-28 07:29:48.980199: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1276] 0: N
2020-10-28 07:29:48.980455: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:982] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2020-10-28 07:29:48.981200: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:982] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2020-10-28 07:29:48.981795: W tensorflow/core/common_runtime/gpu/gpu_bfc_allocator.cc:39] Overriding allow_growth setting because the TF_FORCE_GPU_ALLOW_GROWTH environment variable is set. Original config value was 0.
2020-10-28 07:29:48.981861: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1402] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 13962 MB memory) -> physical GPU (device: 0, name: Tesla T4, pci bus id: 0000:00:04.0, compute capability: 7.5)
INFO:tensorflow:Using MirroredStrategy with devices ('/job:localhost/replica:0/task:0/device:GPU:0',)
2020-10-28 07:29:48,984 - tensorflow - INFO - Using MirroredStrategy with devices ('/job:localhost/replica:0/task:0/device:GPU:0',)
[33m[Search space summary][0m
[36m |-Default search space size: 0[0m
Epoch 1/20
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/data/ops/multi_device_iterator_ops.py:601: get_next_as_optional (from tensorflow.python.data.ops.iterator_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.data.Iterator.get_next_as_optional()` instead.
2020-10-28 07:29:50,449 - tensorflow - WARNING - From /usr/local/lib/python3.6/dist-packages/tensorflow/python/data/ops/multi_device_iterator_ops.py:601: get_next_as_optional (from tensorflow.python.data.ops.iterator_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.data.Iterator.get_next_as_optional()` instead.
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
2020-10-28 07:29:51,667 - tensorflow - INFO - Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
2020-10-28 07:29:51,670 - tensorflow - INFO - Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
2020-10-28 07:29:51,674 - tensorflow - INFO - Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
2020-10-28 07:29:51,676 - tensorflow - INFO - Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
2020-10-28 07:29:51,679 - tensorflow - INFO - Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
2020-10-28 07:29:51,680 - tensorflow - INFO - Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
2020-10-28 07:29:53,772 - tensorflow - INFO - Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
2020-10-28 07:29:53,774 - tensorflow - INFO - Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
2020-10-28 07:29:53,776 - tensorflow - INFO - Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
2020-10-28 07:29:53,778 - tensorflow - INFO - Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
2020-10-28 07:29:55.044013: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcublas.so.10
2020-10-28 07:29:55.438659: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcudnn.so.7
162/162 [==============================] - 17s 107ms/step - loss: 2.8539 - accuracy: 0.3444 - f1_m: 0.2791 - val_loss: 1.3751 - val_accuracy: 0.4991 - val_f1_m: 0.4656
Epoch 2/20
162/162 [==============================] - 15s 95ms/step - loss: 0.9914 - accuracy: 0.6442 - f1_m: 0.6160 - val_loss: 1.3410 - val_accuracy: 0.5235 - val_f1_m: 0.5059
Epoch 3/20
162/162 [==============================] - 16s 97ms/step - loss: 0.6011 - accuracy: 0.7996 - f1_m: 0.7846 - val_loss: 1.3610 - val_accuracy: 0.5791 - val_f1_m: 0.5625
Epoch 4/20
162/162 [==============================] - 16s 96ms/step - loss: 0.3726 - accuracy: 0.8881 - f1_m: 0.8793 - val_loss: 1.4406 - val_accuracy: 0.5687 - val_f1_m: 0.5657
Epoch 5/20
162/162 [==============================] - 15s 94ms/step - loss: 0.2572 - accuracy: 0.9322 - f1_m: 0.9281 - val_loss: 1.5078 - val_accuracy: 0.5704 - val_f1_m: 0.5630
Epoch 6/20
162/162 [==============================] - 15s 94ms/step - loss: 0.2022 - accuracy: 0.9486 - f1_m: 0.9464 - val_loss: 1.5801 - val_accuracy: 0.5478 - val_f1_m: 0.5636
Epoch 7/20
162/162 [==============================] - 15s 95ms/step - loss: 0.1689 - accuracy: 0.9598 - f1_m: 0.9589 - val_loss: 1.6309 - val_accuracy: 0.5548 - val_f1_m: 0.5638
Epoch 8/20
162/162 [==============================] - 15s 95ms/step - loss: 0.1533 - accuracy: 0.9644 - f1_m: 0.9642 - val_loss: 1.6619 - val_accuracy: 0.5548 - val_f1_m: 0.5623
Epoch 9/20
162/162 [==============================] - 15s 96ms/step - loss: 0.1351 - accuracy: 0.9700 - f1_m: 0.9692 - val_loss: 1.6869 - val_accuracy: 0.5583 - val_f1_m: 0.5700
Epoch 10/20
162/162 [==============================] - 15s 95ms/step - loss: 0.1317 - accuracy: 0.9726 - f1_m: 0.9714 - val_loss: 1.6953 - val_accuracy: 0.5513 - val_f1_m: 0.5644
Epoch 11/20
162/162 [==============================] - 15s 95ms/step - loss: 0.1283 - accuracy: 0.9739 - f1_m: 0.9731 - val_loss: 1.7081 - val_accuracy: 0.5565 - val_f1_m: 0.5612
Epoch 12/20
162/162 [==============================] - 15s 95ms/step - loss: 0.1258 - accuracy: 0.9751 - f1_m: 0.9739 - val_loss: 1.7157 - val_accuracy: 0.5496 - val_f1_m: 0.5603
Epoch 13/20
162/162 [==============================] - 15s 95ms/step - loss: 0.1240 - accuracy: 0.9747 - f1_m: 0.9747 - val_loss: 1.7213 - val_accuracy: 0.5496 - val_f1_m: 0.5613
Epoch 14/20
162/162 [==============================] - 15s 95ms/step - loss: 0.1199 - accuracy: 0.9753 - f1_m: 0.9750 - val_loss: 1.7238 - val_accuracy: 0.5496 - val_f1_m: 0.5601
Epoch 15/20
162/162 [==============================] - 16s 96ms/step - loss: 0.1205 - accuracy: 0.9753 - f1_m: 0.9761 - val_loss: 1.7275 - val_accuracy: 0.5461 - val_f1_m: 0.5589
Epoch 16/20
162/162 [==============================] - 15s 95ms/step - loss: 0.1195 - accuracy: 0.9766 - f1_m: 0.9758 - val_loss: 1.7294 - val_accuracy: 0.5461 - val_f1_m: 0.5577
Epoch 17/20
162/162 [==============================] - 15s 95ms/step - loss: 0.1191 - accuracy: 0.9739 - f1_m: 0.9750 - val_loss: 1.7306 - val_accuracy: 0.5478 - val_f1_m: 0.5582
Epoch 18/20
162/162 [==============================] - 15s 95ms/step - loss: 0.1203 - accuracy: 0.9731 - f1_m: 0.9732 - val_loss: 1.7310 - val_accuracy: 0.5461 - val_f1_m: 0.5577
Epoch 19/20
162/162 [==============================] - 15s 95ms/step - loss: 0.1183 - accuracy: 0.9756 - f1_m: 0.9751 - val_loss: 1.7310 - val_accuracy: 0.5478 - val_f1_m: 0.5578
Epoch 20/20
162/162 [==============================] - 15s 95ms/step - loss: 0.1204 - accuracy: 0.9720 - f1_m: 0.9741 - val_loss: 1.7313 - val_accuracy: 0.5461 - val_f1_m: 0.5578
[33m[Trial complete][0m
[33m[Trial summary][0m
[36m |-Trial ID: ecf26268a50275fb20f157773678ac33[0m
[36m |-Score: 0.5699636936187744[0m
[36m |-Best step: 0[0m
[2m[35m > Hyperparameters:[0m
[36m |-default configuration[0m
INFO:tensorflow:Oracle triggered exit
2020-10-28 07:35:08,838 - tensorflow - INFO - Oracle triggered exit
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/training/tracking/tracking.py:111: Model.state_updates (from tensorflow.python.keras.engine.training) is deprecated and will be removed in a future version.
Instructions for updating:
This property should not be used in TensorFlow 2.0, as updates are applied automatically.
2020-10-28 07:35:17,758 - tensorflow - WARNING - From /usr/local/lib/python3.6/dist-packages/tensorflow/python/training/tracking/tracking.py:111: Model.state_updates (from tensorflow.python.keras.engine.training) is deprecated and will be removed in a future version.
Instructions for updating:
This property should not be used in TensorFlow 2.0, as updates are applied automatically.
2020-10-28 07:35:17.759100: W tensorflow/python/util/util.cc:348] Sets are not currently considered sequences, but this may change in the future, so consider avoiding using them.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/training/tracking/tracking.py:111: Layer.updates (from tensorflow.python.keras.engine.base_layer) is deprecated and will be removed in a future version.
Instructions for updating:
This property should not be used in TensorFlow 2.0, as updates are applied automatically.
2020-10-28 07:35:17,768 - tensorflow - WARNING - From /usr/local/lib/python3.6/dist-packages/tensorflow/python/training/tracking/tracking.py:111: Layer.updates (from tensorflow.python.keras.engine.base_layer) is deprecated and will be removed in a future version.
Instructions for updating:
This property should not be used in TensorFlow 2.0, as updates are applied automatically.
INFO:tensorflow:Assets written to: models/BiLSTM/assets
2020-10-28 07:35:23,775 - tensorflow - INFO - Assets written to: models/BiLSTM/assets
Training the model: BiLSTM_Glove
INFO:tensorflow:Using MirroredStrategy with devices ('/job:localhost/replica:0/task:0/device:GPU:0',)
2020-10-28 07:35:24,056 - tensorflow - INFO - Using MirroredStrategy with devices ('/job:localhost/replica:0/task:0/device:GPU:0',)
INFO:tensorflow:Reloading Oracle from existing project random_search/sentiment_analysis_BiLSTMModel/oracle.json
2020-10-28 07:35:24,057 - tensorflow - INFO - Reloading Oracle from existing project random_search/sentiment_analysis_BiLSTMModel/oracle.json
INFO:tensorflow:Reloading Tuner from random_search/sentiment_analysis_BiLSTMModel/tuner0.json
2020-10-28 07:35:25,048 - tensorflow - INFO - Reloading Tuner from random_search/sentiment_analysis_BiLSTMModel/tuner0.json
[33m[Search space summary][0m
[36m |-Default search space size: 0[0m
INFO:tensorflow:Oracle triggered exit
2020-10-28 07:35:25,051 - tensorflow - INFO - Oracle triggered exit
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer.iter
2020-10-28 07:35:32,045 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer.iter
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer.beta_1
2020-10-28 07:35:32,045 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer.beta_1
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer.beta_2
2020-10-28 07:35:32,045 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer.beta_2
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer.decay
2020-10-28 07:35:32,045 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer.decay
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-2.kernel
2020-10-28 07:35:32,045 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-2.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-2.bias
2020-10-28 07:35:32,045 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-2.bias
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.forward_layer.cell.kernel
2020-10-28 07:35:32,045 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.forward_layer.cell.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.forward_layer.cell.recurrent_kernel
2020-10-28 07:35:32,045 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.forward_layer.cell.recurrent_kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.forward_layer.cell.bias
2020-10-28 07:35:32,045 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.forward_layer.cell.bias
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.backward_layer.cell.kernel
2020-10-28 07:35:32,045 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.backward_layer.cell.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.backward_layer.cell.recurrent_kernel
2020-10-28 07:35:32,045 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.backward_layer.cell.recurrent_kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.backward_layer.cell.bias
2020-10-28 07:35:32,045 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.backward_layer.cell.bias
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-2.kernel
2020-10-28 07:35:32,045 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-2.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-2.bias
2020-10-28 07:35:32,045 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-2.bias
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.forward_layer.cell.kernel
2020-10-28 07:35:32,045 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.forward_layer.cell.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.forward_layer.cell.recurrent_kernel
2020-10-28 07:35:32,046 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.forward_layer.cell.recurrent_kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.forward_layer.cell.bias
2020-10-28 07:35:32,046 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.forward_layer.cell.bias
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.backward_layer.cell.kernel
2020-10-28 07:35:32,046 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.backward_layer.cell.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.backward_layer.cell.recurrent_kernel
2020-10-28 07:35:32,046 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.backward_layer.cell.recurrent_kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.backward_layer.cell.bias
2020-10-28 07:35:32,046 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.backward_layer.cell.bias
WARNING:tensorflow:A checkpoint was restored (e.g. tf.train.Checkpoint.restore or tf.keras.Model.load_weights) but not all checkpointed values were used. See above for specific issues. Use expect_partial() on the load status object, e.g. tf.train.Checkpoint.restore(...).expect_partial(), to silence these warnings, or use assert_consumed() to make the check explicit. See https://www.tensorflow.org/guide/checkpoint#loading_mechanics for details.
2020-10-28 07:35:32,046 - tensorflow - WARNING - A checkpoint was restored (e.g. tf.train.Checkpoint.restore or tf.keras.Model.load_weights) but not all checkpointed values were used. See above for specific issues. Use expect_partial() on the load status object, e.g. tf.train.Checkpoint.restore(...).expect_partial(), to silence these warnings, or use assert_consumed() to make the check explicit. See https://www.tensorflow.org/guide/checkpoint#loading_mechanics for details.
INFO:tensorflow:Assets written to: models/BiLSTM_Glove/assets
2020-10-28 07:35:39,678 - tensorflow - INFO - Assets written to: models/BiLSTM_Glove/assets
Training the model: StackedBiLSTM
INFO:tensorflow:Using MirroredStrategy with devices ('/job:localhost/replica:0/task:0/device:GPU:0',)
2020-10-28 07:35:39,963 - tensorflow - INFO - Using MirroredStrategy with devices ('/job:localhost/replica:0/task:0/device:GPU:0',)
[33m[Search space summary][0m
[36m |-Default search space size: 0[0m
Epoch 1/20
162/162 [==============================] - 29s 179ms/step - loss: 4.0378 - accuracy: 0.2344 - f1_m: 0.1258 - val_loss: 1.6648 - val_accuracy: 0.3809 - val_f1_m: 0.2436
Epoch 2/20
162/162 [==============================] - 27s 165ms/step - loss: 1.3185 - accuracy: 0.5157 - f1_m: 0.4444 - val_loss: 1.1971 - val_accuracy: 0.5757 - val_f1_m: 0.5172
Epoch 3/20
162/162 [==============================] - 27s 164ms/step - loss: 0.9975 - accuracy: 0.6453 - f1_m: 0.6091 - val_loss: 1.2398 - val_accuracy: 0.5843 - val_f1_m: 0.5569
Epoch 4/20
162/162 [==============================] - 27s 164ms/step - loss: 0.7774 - accuracy: 0.7269 - f1_m: 0.7070 - val_loss: 1.2887 - val_accuracy: 0.5739 - val_f1_m: 0.5642
Epoch 5/20
162/162 [==============================] - 27s 164ms/step - loss: 0.6239 - accuracy: 0.7770 - f1_m: 0.7742 - val_loss: 1.4014 - val_accuracy: 0.5704 - val_f1_m: 0.5727
Epoch 6/20
162/162 [==============================] - 26s 163ms/step - loss: 0.5218 - accuracy: 0.8206 - f1_m: 0.8149 - val_loss: 1.4973 - val_accuracy: 0.5739 - val_f1_m: 0.5770
Epoch 7/20
162/162 [==============================] - 27s 164ms/step - loss: 0.4332 - accuracy: 0.8496 - f1_m: 0.8489 - val_loss: 1.6805 - val_accuracy: 0.5583 - val_f1_m: 0.5675
Epoch 8/20
162/162 [==============================] - 27s 164ms/step - loss: 0.3804 - accuracy: 0.8726 - f1_m: 0.8668 - val_loss: 1.7069 - val_accuracy: 0.5617 - val_f1_m: 0.5642
Epoch 9/20
162/162 [==============================] - 27s 164ms/step - loss: 0.3488 - accuracy: 0.8806 - f1_m: 0.8823 - val_loss: 1.7975 - val_accuracy: 0.5600 - val_f1_m: 0.5645
Epoch 10/20
162/162 [==============================] - 27s 164ms/step - loss: 0.3258 - accuracy: 0.8875 - f1_m: 0.8866 - val_loss: 1.8373 - val_accuracy: 0.5670 - val_f1_m: 0.5652
Epoch 11/20
162/162 [==============================] - 27s 164ms/step - loss: 0.3164 - accuracy: 0.8947 - f1_m: 0.8936 - val_loss: 1.8705 - val_accuracy: 0.5774 - val_f1_m: 0.5597
Epoch 12/20
162/162 [==============================] - 27s 164ms/step - loss: 0.3020 - accuracy: 0.8939 - f1_m: 0.8997 - val_loss: 1.8819 - val_accuracy: 0.5757 - val_f1_m: 0.5615
Epoch 13/20
162/162 [==============================] - 27s 164ms/step - loss: 0.3048 - accuracy: 0.8947 - f1_m: 0.8965 - val_loss: 1.8870 - val_accuracy: 0.5722 - val_f1_m: 0.5627
Epoch 14/20
162/162 [==============================] - 27s 164ms/step - loss: 0.3016 - accuracy: 0.8945 - f1_m: 0.8982 - val_loss: 1.8880 - val_accuracy: 0.5774 - val_f1_m: 0.5624
Epoch 15/20
162/162 [==============================] - 27s 164ms/step - loss: 0.2962 - accuracy: 0.9016 - f1_m: 0.9040 - val_loss: 1.8935 - val_accuracy: 0.5774 - val_f1_m: 0.5634
Epoch 16/20
162/162 [==============================] - 27s 164ms/step - loss: 0.2964 - accuracy: 0.9009 - f1_m: 0.9012 - val_loss: 1.8957 - val_accuracy: 0.5809 - val_f1_m: 0.5628
Epoch 17/20
162/162 [==============================] - 27s 164ms/step - loss: 0.2932 - accuracy: 0.9026 - f1_m: 0.9036 - val_loss: 1.9015 - val_accuracy: 0.5739 - val_f1_m: 0.5618
Epoch 18/20
162/162 [==============================] - 27s 164ms/step - loss: 0.2887 - accuracy: 0.9034 - f1_m: 0.9000 - val_loss: 1.9037 - val_accuracy: 0.5757 - val_f1_m: 0.5617
Epoch 19/20
162/162 [==============================] - 27s 164ms/step - loss: 0.2904 - accuracy: 0.9039 - f1_m: 0.9044 - val_loss: 1.9045 - val_accuracy: 0.5774 - val_f1_m: 0.5629
Epoch 20/20
162/162 [==============================] - 27s 164ms/step - loss: 0.2937 - accuracy: 0.8972 - f1_m: 0.9004 - val_loss: 1.9056 - val_accuracy: 0.5739 - val_f1_m: 0.5617
[33m[Trial complete][0m
[33m[Trial summary][0m
[36m |-Trial ID: 0495da558d74bd3048cb2062d941570c[0m
[36m |-Score: 0.5770310163497925[0m
[36m |-Best step: 0[0m
[2m[35m > Hyperparameters:[0m
[36m |-default configuration[0m
INFO:tensorflow:Oracle triggered exit
2020-10-28 07:44:46,341 - tensorflow - INFO - Oracle triggered exit
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer.iter
2020-10-28 07:44:54,891 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer.iter
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer.beta_1
2020-10-28 07:44:54,891 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer.beta_1
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer.beta_2
2020-10-28 07:44:54,891 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer.beta_2
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer.decay
2020-10-28 07:44:54,891 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer.decay
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-2.kernel
2020-10-28 07:44:54,891 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-2.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-2.bias
2020-10-28 07:44:54,892 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-2.bias
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.forward_layer.cell.kernel
2020-10-28 07:44:54,892 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.forward_layer.cell.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.forward_layer.cell.recurrent_kernel
2020-10-28 07:44:54,892 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.forward_layer.cell.recurrent_kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.forward_layer.cell.bias
2020-10-28 07:44:54,892 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.forward_layer.cell.bias
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.backward_layer.cell.kernel
2020-10-28 07:44:54,892 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.backward_layer.cell.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.backward_layer.cell.recurrent_kernel
2020-10-28 07:44:54,892 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.backward_layer.cell.recurrent_kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.backward_layer.cell.bias
2020-10-28 07:44:54,892 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.backward_layer.cell.bias
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-2.kernel
2020-10-28 07:44:54,892 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-2.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-2.bias
2020-10-28 07:44:54,892 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-2.bias
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.forward_layer.cell.kernel
2020-10-28 07:44:54,892 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.forward_layer.cell.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.forward_layer.cell.recurrent_kernel
2020-10-28 07:44:54,892 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.forward_layer.cell.recurrent_kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.forward_layer.cell.bias
2020-10-28 07:44:54,892 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.forward_layer.cell.bias
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.backward_layer.cell.kernel
2020-10-28 07:44:54,892 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.backward_layer.cell.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.backward_layer.cell.recurrent_kernel
2020-10-28 07:44:54,892 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.backward_layer.cell.recurrent_kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.backward_layer.cell.bias
2020-10-28 07:44:54,892 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.backward_layer.cell.bias
WARNING:tensorflow:A checkpoint was restored (e.g. tf.train.Checkpoint.restore or tf.keras.Model.load_weights) but not all checkpointed values were used. See above for specific issues. Use expect_partial() on the load status object, e.g. tf.train.Checkpoint.restore(...).expect_partial(), to silence these warnings, or use assert_consumed() to make the check explicit. See https://www.tensorflow.org/guide/checkpoint#loading_mechanics for details.
2020-10-28 07:44:54,892 - tensorflow - WARNING - A checkpoint was restored (e.g. tf.train.Checkpoint.restore or tf.keras.Model.load_weights) but not all checkpointed values were used. See above for specific issues. Use expect_partial() on the load status object, e.g. tf.train.Checkpoint.restore(...).expect_partial(), to silence these warnings, or use assert_consumed() to make the check explicit. See https://www.tensorflow.org/guide/checkpoint#loading_mechanics for details.
INFO:tensorflow:Assets written to: models/StackedBiLSTM/assets
2020-10-28 07:45:12,455 - tensorflow - INFO - Assets written to: models/StackedBiLSTM/assets
Training the model: StackedBiLSTM_Glove
INFO:tensorflow:Using MirroredStrategy with devices ('/job:localhost/replica:0/task:0/device:GPU:0',)
2020-10-28 07:45:12,982 - tensorflow - INFO - Using MirroredStrategy with devices ('/job:localhost/replica:0/task:0/device:GPU:0',)
INFO:tensorflow:Reloading Oracle from existing project random_search/sentiment_analysis_StackedBiLSTMModel/oracle.json
2020-10-28 07:45:12,982 - tensorflow - INFO - Reloading Oracle from existing project random_search/sentiment_analysis_StackedBiLSTMModel/oracle.json
INFO:tensorflow:Reloading Tuner from random_search/sentiment_analysis_StackedBiLSTMModel/tuner0.json
2020-10-28 07:45:14,749 - tensorflow - INFO - Reloading Tuner from random_search/sentiment_analysis_StackedBiLSTMModel/tuner0.json
[33m[Search space summary][0m
[36m |-Default search space size: 0[0m
INFO:tensorflow:Oracle triggered exit
2020-10-28 07:45:14,752 - tensorflow - INFO - Oracle triggered exit
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer.iter
2020-10-28 07:45:23,193 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer.iter
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer.beta_1
2020-10-28 07:45:23,193 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer.beta_1
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer.beta_2
2020-10-28 07:45:23,193 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer.beta_2
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer.decay
2020-10-28 07:45:23,193 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer.decay
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-3.kernel
2020-10-28 07:45:23,193 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-3.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-3.bias
2020-10-28 07:45:23,193 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-3.bias
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.forward_layer.cell.kernel
2020-10-28 07:45:23,193 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.forward_layer.cell.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.forward_layer.cell.recurrent_kernel
2020-10-28 07:45:23,193 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.forward_layer.cell.recurrent_kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.forward_layer.cell.bias
2020-10-28 07:45:23,193 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.forward_layer.cell.bias
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.backward_layer.cell.kernel
2020-10-28 07:45:23,193 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.backward_layer.cell.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.backward_layer.cell.recurrent_kernel
2020-10-28 07:45:23,194 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.backward_layer.cell.recurrent_kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.backward_layer.cell.bias
2020-10-28 07:45:23,194 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.backward_layer.cell.bias
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-2.forward_layer.cell.kernel
2020-10-28 07:45:23,194 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-2.forward_layer.cell.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-2.forward_layer.cell.recurrent_kernel
2020-10-28 07:45:23,194 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-2.forward_layer.cell.recurrent_kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-2.forward_layer.cell.bias
2020-10-28 07:45:23,194 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-2.forward_layer.cell.bias
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-2.backward_layer.cell.kernel
2020-10-28 07:45:23,194 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-2.backward_layer.cell.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-2.backward_layer.cell.recurrent_kernel
2020-10-28 07:45:23,194 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-2.backward_layer.cell.recurrent_kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-2.backward_layer.cell.bias
2020-10-28 07:45:23,194 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-2.backward_layer.cell.bias
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-3.kernel
2020-10-28 07:45:23,194 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-3.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-3.bias
2020-10-28 07:45:23,194 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-3.bias
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.forward_layer.cell.kernel
2020-10-28 07:45:23,194 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.forward_layer.cell.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.forward_layer.cell.recurrent_kernel
2020-10-28 07:45:23,194 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.forward_layer.cell.recurrent_kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.forward_layer.cell.bias
2020-10-28 07:45:23,194 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.forward_layer.cell.bias
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.backward_layer.cell.kernel
2020-10-28 07:45:23,194 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.backward_layer.cell.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.backward_layer.cell.recurrent_kernel
2020-10-28 07:45:23,194 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.backward_layer.cell.recurrent_kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.backward_layer.cell.bias
2020-10-28 07:45:23,194 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.backward_layer.cell.bias
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-2.forward_layer.cell.kernel
2020-10-28 07:45:23,194 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-2.forward_layer.cell.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-2.forward_layer.cell.recurrent_kernel
2020-10-28 07:45:23,194 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-2.forward_layer.cell.recurrent_kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-2.forward_layer.cell.bias
2020-10-28 07:45:23,194 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-2.forward_layer.cell.bias
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-2.backward_layer.cell.kernel
2020-10-28 07:45:23,194 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-2.backward_layer.cell.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-2.backward_layer.cell.recurrent_kernel
2020-10-28 07:45:23,195 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-2.backward_layer.cell.recurrent_kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-2.backward_layer.cell.bias
2020-10-28 07:45:23,195 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-2.backward_layer.cell.bias
WARNING:tensorflow:A checkpoint was restored (e.g. tf.train.Checkpoint.restore or tf.keras.Model.load_weights) but not all checkpointed values were used. See above for specific issues. Use expect_partial() on the load status object, e.g. tf.train.Checkpoint.restore(...).expect_partial(), to silence these warnings, or use assert_consumed() to make the check explicit. See https://www.tensorflow.org/guide/checkpoint#loading_mechanics for details.
2020-10-28 07:45:23,195 - tensorflow - WARNING - A checkpoint was restored (e.g. tf.train.Checkpoint.restore or tf.keras.Model.load_weights) but not all checkpointed values were used. See above for specific issues. Use expect_partial() on the load status object, e.g. tf.train.Checkpoint.restore(...).expect_partial(), to silence these warnings, or use assert_consumed() to make the check explicit. See https://www.tensorflow.org/guide/checkpoint#loading_mechanics for details.
INFO:tensorflow:Assets written to: models/StackedBiLSTM_Glove/assets
2020-10-28 07:45:40,624 - tensorflow - INFO - Assets written to: models/StackedBiLSTM_Glove/assets
Training the model: HybridModel
INFO:tensorflow:Using MirroredStrategy with devices ('/job:localhost/replica:0/task:0/device:GPU:0',)
2020-10-28 07:45:41,161 - tensorflow - INFO - Using MirroredStrategy with devices ('/job:localhost/replica:0/task:0/device:GPU:0',)
[33m[Search space summary][0m
[36m |-Default search space size: 0[0m
Epoch 1/20
162/162 [==============================] - 16s 101ms/step - loss: 1.9590 - accuracy: 0.1651 - f1_m: 0.0129 - val_loss: 1.8140 - val_accuracy: 0.2470 - val_f1_m: 0.0584
Epoch 2/20
162/162 [==============================] - 15s 90ms/step - loss: 1.7634 - accuracy: 0.2551 - f1_m: 0.0822 - val_loss: 1.7133 - val_accuracy: 0.3130 - val_f1_m: 0.0900
Epoch 3/20
162/162 [==============================] - 15s 90ms/step - loss: 1.5604 - accuracy: 0.3690 - f1_m: 0.2404 - val_loss: 1.4380 - val_accuracy: 0.4870 - val_f1_m: 0.3278
Epoch 4/20
162/162 [==============================] - 15s 91ms/step - loss: 1.2609 - accuracy: 0.5193 - f1_m: 0.4588 - val_loss: 1.3027 - val_accuracy: 0.5200 - val_f1_m: 0.4550
Epoch 5/20
162/162 [==============================] - 15s 92ms/step - loss: 1.0770 - accuracy: 0.6026 - f1_m: 0.5731 - val_loss: 1.2508 - val_accuracy: 0.5583 - val_f1_m: 0.4969
Epoch 6/20
162/162 [==============================] - 15s 91ms/step - loss: 0.9633 - accuracy: 0.6448 - f1_m: 0.6215 - val_loss: 1.2544 - val_accuracy: 0.5617 - val_f1_m: 0.5292
Epoch 7/20
162/162 [==============================] - 15s 91ms/step - loss: 0.8894 - accuracy: 0.6772 - f1_m: 0.6584 - val_loss: 1.2533 - val_accuracy: 0.5548 - val_f1_m: 0.5249
Epoch 8/20
162/162 [==============================] - 15s 91ms/step - loss: 0.8231 - accuracy: 0.7029 - f1_m: 0.6859 - val_loss: 1.2567 - val_accuracy: 0.5617 - val_f1_m: 0.5323
Epoch 9/20
162/162 [==============================] - 15s 91ms/step - loss: 0.7918 - accuracy: 0.7157 - f1_m: 0.6968 - val_loss: 1.2810 - val_accuracy: 0.5513 - val_f1_m: 0.5241
Epoch 10/20
162/162 [==============================] - 15s 91ms/step - loss: 0.7702 - accuracy: 0.7190 - f1_m: 0.7074 - val_loss: 1.2772 - val_accuracy: 0.5617 - val_f1_m: 0.5224
Epoch 11/20
162/162 [==============================] - 15s 91ms/step - loss: 0.7474 - accuracy: 0.7337 - f1_m: 0.7193 - val_loss: 1.2811 - val_accuracy: 0.5617 - val_f1_m: 0.5230
Epoch 12/20
162/162 [==============================] - 15s 91ms/step - loss: 0.7511 - accuracy: 0.7364 - f1_m: 0.7224 - val_loss: 1.2835 - val_accuracy: 0.5600 - val_f1_m: 0.5244
Epoch 13/20
162/162 [==============================] - 15s 91ms/step - loss: 0.7451 - accuracy: 0.7302 - f1_m: 0.7209 - val_loss: 1.2840 - val_accuracy: 0.5583 - val_f1_m: 0.5259
Epoch 14/20
162/162 [==============================] - 15s 91ms/step - loss: 0.7295 - accuracy: 0.7397 - f1_m: 0.7265 - val_loss: 1.2848 - val_accuracy: 0.5583 - val_f1_m: 0.5262
Epoch 15/20
162/162 [==============================] - 15s 91ms/step - loss: 0.7462 - accuracy: 0.7354 - f1_m: 0.7222 - val_loss: 1.2847 - val_accuracy: 0.5600 - val_f1_m: 0.5281
Epoch 16/20
162/162 [==============================] - 15s 92ms/step - loss: 0.7453 - accuracy: 0.7373 - f1_m: 0.7198 - val_loss: 1.2850 - val_accuracy: 0.5617 - val_f1_m: 0.5270
Epoch 17/20
162/162 [==============================] - 15s 92ms/step - loss: 0.7245 - accuracy: 0.7439 - f1_m: 0.7306 - val_loss: 1.2857 - val_accuracy: 0.5617 - val_f1_m: 0.5264
Epoch 18/20
162/162 [==============================] - 15s 91ms/step - loss: 0.7333 - accuracy: 0.7414 - f1_m: 0.7251 - val_loss: 1.2856 - val_accuracy: 0.5617 - val_f1_m: 0.5264
Epoch 19/20
162/162 [==============================] - 15s 92ms/step - loss: 0.7357 - accuracy: 0.7329 - f1_m: 0.7179 - val_loss: 1.2859 - val_accuracy: 0.5617 - val_f1_m: 0.5264
Epoch 20/20
162/162 [==============================] - 15s 92ms/step - loss: 0.7289 - accuracy: 0.7439 - f1_m: 0.7289 - val_loss: 1.2861 - val_accuracy: 0.5617 - val_f1_m: 0.5264
[33m[Trial complete][0m
[33m[Trial summary][0m
[36m |-Trial ID: bbf27746a0e4d5e51888f02742171524[0m
[36m |-Score: 0.5322573781013489[0m
[36m |-Best step: 0[0m
[2m[35m > Hyperparameters:[0m
[36m |-default configuration[0m
INFO:tensorflow:Oracle triggered exit
2020-10-28 07:50:46,699 - tensorflow - INFO - Oracle triggered exit
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer.iter
2020-10-28 07:50:52,658 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer.iter
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer.beta_1
2020-10-28 07:50:52,659 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer.beta_1
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer.beta_2
2020-10-28 07:50:52,659 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer.beta_2
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer.decay
2020-10-28 07:50:52,659 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer.decay
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-3.kernel
2020-10-28 07:50:52,659 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-3.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-3.bias
2020-10-28 07:50:52,659 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-3.bias
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.forward_layer.cell.kernel
2020-10-28 07:50:52,659 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.forward_layer.cell.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.forward_layer.cell.recurrent_kernel
2020-10-28 07:50:52,659 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.forward_layer.cell.recurrent_kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.forward_layer.cell.bias
2020-10-28 07:50:52,659 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.forward_layer.cell.bias
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.backward_layer.cell.kernel
2020-10-28 07:50:52,659 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.backward_layer.cell.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.backward_layer.cell.recurrent_kernel
2020-10-28 07:50:52,659 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.backward_layer.cell.recurrent_kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.backward_layer.cell.bias
2020-10-28 07:50:52,659 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.backward_layer.cell.bias
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-2.forward_layer.cell.kernel
2020-10-28 07:50:52,659 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-2.forward_layer.cell.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-2.forward_layer.cell.recurrent_kernel
2020-10-28 07:50:52,659 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-2.forward_layer.cell.recurrent_kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-2.forward_layer.cell.bias
2020-10-28 07:50:52,659 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-2.forward_layer.cell.bias
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-2.backward_layer.cell.kernel
2020-10-28 07:50:52,659 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-2.backward_layer.cell.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-2.backward_layer.cell.recurrent_kernel
2020-10-28 07:50:52,659 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-2.backward_layer.cell.recurrent_kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-2.backward_layer.cell.bias
2020-10-28 07:50:52,659 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-2.backward_layer.cell.bias
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-3.kernel
2020-10-28 07:50:52,660 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-3.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-3.bias
2020-10-28 07:50:52,660 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-3.bias
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.forward_layer.cell.kernel
2020-10-28 07:50:52,660 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.forward_layer.cell.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.forward_layer.cell.recurrent_kernel
2020-10-28 07:50:52,660 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.forward_layer.cell.recurrent_kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.forward_layer.cell.bias
2020-10-28 07:50:52,660 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.forward_layer.cell.bias
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.backward_layer.cell.kernel
2020-10-28 07:50:52,660 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.backward_layer.cell.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.backward_layer.cell.recurrent_kernel
2020-10-28 07:50:52,660 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.backward_layer.cell.recurrent_kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.backward_layer.cell.bias
2020-10-28 07:50:52,660 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.backward_layer.cell.bias
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-2.forward_layer.cell.kernel
2020-10-28 07:50:52,660 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-2.forward_layer.cell.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-2.forward_layer.cell.recurrent_kernel
2020-10-28 07:50:52,660 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-2.forward_layer.cell.recurrent_kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-2.forward_layer.cell.bias
2020-10-28 07:50:52,660 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-2.forward_layer.cell.bias
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-2.backward_layer.cell.kernel
2020-10-28 07:50:52,660 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-2.backward_layer.cell.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-2.backward_layer.cell.recurrent_kernel
2020-10-28 07:50:52,660 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-2.backward_layer.cell.recurrent_kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-2.backward_layer.cell.bias
2020-10-28 07:50:52,660 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-2.backward_layer.cell.bias
WARNING:tensorflow:A checkpoint was restored (e.g. tf.train.Checkpoint.restore or tf.keras.Model.load_weights) but not all checkpointed values were used. See above for specific issues. Use expect_partial() on the load status object, e.g. tf.train.Checkpoint.restore(...).expect_partial(), to silence these warnings, or use assert_consumed() to make the check explicit. See https://www.tensorflow.org/guide/checkpoint#loading_mechanics for details.
2020-10-28 07:50:52,660 - tensorflow - WARNING - A checkpoint was restored (e.g. tf.train.Checkpoint.restore or tf.keras.Model.load_weights) but not all checkpointed values were used. See above for specific issues. Use expect_partial() on the load status object, e.g. tf.train.Checkpoint.restore(...).expect_partial(), to silence these warnings, or use assert_consumed() to make the check explicit. See https://www.tensorflow.org/guide/checkpoint#loading_mechanics for details.
INFO:tensorflow:Assets written to: models/HybridModel/assets
2020-10-28 07:50:59,863 - tensorflow - INFO - Assets written to: models/HybridModel/assets
Training the model: HybridModel_Glove
INFO:tensorflow:Using MirroredStrategy with devices ('/job:localhost/replica:0/task:0/device:GPU:0',)
2020-10-28 07:51:00,144 - tensorflow - INFO - Using MirroredStrategy with devices ('/job:localhost/replica:0/task:0/device:GPU:0',)
[33m[Search space summary][0m
[36m |-Default search space size: 0[0m
Epoch 1/20
162/162 [==============================] - 9s 56ms/step - loss: 1.9401 - accuracy: 0.3664 - f1_m: 0.2744 - val_loss: 1.3153 - val_accuracy: 0.4922 - val_f1_m: 0.4705
Epoch 2/20
162/162 [==============================] - 8s 47ms/step - loss: 1.0295 - accuracy: 0.6299 - f1_m: 0.6002 - val_loss: 1.2742 - val_accuracy: 0.5478 - val_f1_m: 0.5248
Epoch 3/20
162/162 [==============================] - 8s 47ms/step - loss: 0.6691 - accuracy: 0.7741 - f1_m: 0.7560 - val_loss: 1.3704 - val_accuracy: 0.5426 - val_f1_m: 0.5370
Epoch 4/20
162/162 [==============================] - 8s 47ms/step - loss: 0.4461 - accuracy: 0.8510 - f1_m: 0.8470 - val_loss: 1.4707 - val_accuracy: 0.5391 - val_f1_m: 0.5402
Epoch 5/20
162/162 [==============================] - 8s 47ms/step - loss: 0.3301 - accuracy: 0.8997 - f1_m: 0.8921 - val_loss: 1.5702 - val_accuracy: 0.5513 - val_f1_m: 0.5534
Epoch 6/20
162/162 [==============================] - 8s 47ms/step - loss: 0.2466 - accuracy: 0.9331 - f1_m: 0.9271 - val_loss: 1.6368 - val_accuracy: 0.5461 - val_f1_m: 0.5502
Epoch 7/20
162/162 [==============================] - 8s 47ms/step - loss: 0.2135 - accuracy: 0.9412 - f1_m: 0.9403 - val_loss: 1.6784 - val_accuracy: 0.5443 - val_f1_m: 0.5474
Epoch 8/20
162/162 [==============================] - 8s 47ms/step - loss: 0.1869 - accuracy: 0.9511 - f1_m: 0.9491 - val_loss: 1.7363 - val_accuracy: 0.5443 - val_f1_m: 0.5458
Epoch 9/20
162/162 [==============================] - 8s 47ms/step - loss: 0.1720 - accuracy: 0.9554 - f1_m: 0.9513 - val_loss: 1.7489 - val_accuracy: 0.5513 - val_f1_m: 0.5447
Epoch 10/20
162/162 [==============================] - 8s 47ms/step - loss: 0.1615 - accuracy: 0.9627 - f1_m: 0.9597 - val_loss: 1.7672 - val_accuracy: 0.5513 - val_f1_m: 0.5449
Epoch 11/20
162/162 [==============================] - 8s 47ms/step - loss: 0.1659 - accuracy: 0.9569 - f1_m: 0.9574 - val_loss: 1.7770 - val_accuracy: 0.5496 - val_f1_m: 0.5457
Epoch 12/20
162/162 [==============================] - 8s 47ms/step - loss: 0.1625 - accuracy: 0.9592 - f1_m: 0.9564 - val_loss: 1.7798 - val_accuracy: 0.5478 - val_f1_m: 0.5479
Epoch 13/20
162/162 [==============================] - 8s 47ms/step - loss: 0.1579 - accuracy: 0.9604 - f1_m: 0.9587 - val_loss: 1.7884 - val_accuracy: 0.5443 - val_f1_m: 0.5481
Epoch 14/20
162/162 [==============================] - 8s 47ms/step - loss: 0.1496 - accuracy: 0.9642 - f1_m: 0.9632 - val_loss: 1.7912 - val_accuracy: 0.5443 - val_f1_m: 0.5479
Epoch 15/20
162/162 [==============================] - 8s 47ms/step - loss: 0.1539 - accuracy: 0.9615 - f1_m: 0.9591 - val_loss: 1.7922 - val_accuracy: 0.5461 - val_f1_m: 0.5463
Epoch 16/20
162/162 [==============================] - 8s 47ms/step - loss: 0.1484 - accuracy: 0.9615 - f1_m: 0.9632 - val_loss: 1.7947 - val_accuracy: 0.5443 - val_f1_m: 0.5470
Epoch 17/20
162/162 [==============================] - 8s 47ms/step - loss: 0.1512 - accuracy: 0.9602 - f1_m: 0.9596 - val_loss: 1.7951 - val_accuracy: 0.5443 - val_f1_m: 0.5476
Epoch 18/20
162/162 [==============================] - 8s 47ms/step - loss: 0.1543 - accuracy: 0.9625 - f1_m: 0.9614 - val_loss: 1.7954 - val_accuracy: 0.5461 - val_f1_m: 0.5476
Epoch 19/20
162/162 [==============================] - 8s 47ms/step - loss: 0.1511 - accuracy: 0.9619 - f1_m: 0.9620 - val_loss: 1.7958 - val_accuracy: 0.5461 - val_f1_m: 0.5476
Epoch 20/20
162/162 [==============================] - 8s 47ms/step - loss: 0.1523 - accuracy: 0.9637 - f1_m: 0.9611 - val_loss: 1.7960 - val_accuracy: 0.5461 - val_f1_m: 0.5476
[33m[Trial complete][0m
[33m[Trial summary][0m
[36m |-Trial ID: f5c87c7f4f8632f84af7b093cee418e7[0m
[36m |-Score: 0.5533604025840759[0m
[36m |-Best step: 0[0m
[2m[35m > Hyperparameters:[0m
[36m |-default configuration[0m
INFO:tensorflow:Oracle triggered exit
2020-10-28 07:53:39,594 - tensorflow - INFO - Oracle triggered exit
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer.iter
2020-10-28 07:53:45,396 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer.iter
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer.beta_1
2020-10-28 07:53:45,396 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer.beta_1
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer.beta_2
2020-10-28 07:53:45,396 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer.beta_2
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer.decay
2020-10-28 07:53:45,396 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer.decay
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-2.kernel
2020-10-28 07:53:45,396 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-2.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-2.bias
2020-10-28 07:53:45,396 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-2.bias
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-3.kernel
2020-10-28 07:53:45,396 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-3.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-3.bias
2020-10-28 07:53:45,396 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-3.bias
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-4.kernel
2020-10-28 07:53:45,397 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-4.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-4.bias
2020-10-28 07:53:45,397 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-4.bias
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.forward_layer.cell.kernel
2020-10-28 07:53:45,397 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.forward_layer.cell.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.forward_layer.cell.recurrent_kernel
2020-10-28 07:53:45,397 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.forward_layer.cell.recurrent_kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.forward_layer.cell.bias
2020-10-28 07:53:45,397 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.forward_layer.cell.bias
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.backward_layer.cell.kernel
2020-10-28 07:53:45,397 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.backward_layer.cell.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.backward_layer.cell.recurrent_kernel
2020-10-28 07:53:45,397 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.backward_layer.cell.recurrent_kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.backward_layer.cell.bias
2020-10-28 07:53:45,397 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.backward_layer.cell.bias
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-2.kernel
2020-10-28 07:53:45,397 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-2.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-2.bias
2020-10-28 07:53:45,397 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-2.bias
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-3.kernel
2020-10-28 07:53:45,397 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-3.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-3.bias
2020-10-28 07:53:45,397 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-3.bias
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-4.kernel
2020-10-28 07:53:45,397 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-4.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-4.bias
2020-10-28 07:53:45,397 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-4.bias
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.forward_layer.cell.kernel
2020-10-28 07:53:45,397 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.forward_layer.cell.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.forward_layer.cell.recurrent_kernel
2020-10-28 07:53:45,397 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.forward_layer.cell.recurrent_kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.forward_layer.cell.bias
2020-10-28 07:53:45,397 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.forward_layer.cell.bias
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.backward_layer.cell.kernel
2020-10-28 07:53:45,397 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.backward_layer.cell.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.backward_layer.cell.recurrent_kernel
2020-10-28 07:53:45,397 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.backward_layer.cell.recurrent_kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.backward_layer.cell.bias
2020-10-28 07:53:45,398 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.backward_layer.cell.bias
WARNING:tensorflow:A checkpoint was restored (e.g. tf.train.Checkpoint.restore or tf.keras.Model.load_weights) but not all checkpointed values were used. See above for specific issues. Use expect_partial() on the load status object, e.g. tf.train.Checkpoint.restore(...).expect_partial(), to silence these warnings, or use assert_consumed() to make the check explicit. See https://www.tensorflow.org/guide/checkpoint#loading_mechanics for details.
2020-10-28 07:53:45,398 - tensorflow - WARNING - A checkpoint was restored (e.g. tf.train.Checkpoint.restore or tf.keras.Model.load_weights) but not all checkpointed values were used. See above for specific issues. Use expect_partial() on the load status object, e.g. tf.train.Checkpoint.restore(...).expect_partial(), to silence these warnings, or use assert_consumed() to make the check explicit. See https://www.tensorflow.org/guide/checkpoint#loading_mechanics for details.
INFO:tensorflow:Assets written to: models/HybridModel_Glove/assets
2020-10-28 07:53:46,029 - tensorflow - INFO - Assets written to: models/HybridModel_Glove/assets
1 ... weighted avg
BiLSTM precision 0.700730 ... 0.539433
recall 0.655290 ... 0.535733
f1-score 0.677249 ... 0.537299
support 293.000000 ... 1917.000000
BiLSTM_Glove precision 0.700730 ... 0.539433
recall 0.655290 ... 0.535733
f1-score 0.677249 ... 0.537299
support 293.000000 ... 1917.000000
StackedBiLSTM precision 0.744526 ... 0.579933
recall 0.675497 ... 0.574857
f1-score 0.708333 ... 0.575210
support 302.000000 ... 1917.000000
StackedBiLSTM_Glove precision 0.744526 ... 0.579933
recall 0.675497 ... 0.574857
f1-score 0.708333 ... 0.575210
support 302.000000 ... 1917.000000
HybridModel precision 0.649635 ... 0.558832
recall 0.635714 ... 0.549296
f1-score 0.642599 ... 0.551071
support 280.000000 ... 1917.000000
HybridModel_Glove precision 0.784672 ... 0.578038
recall 0.628655 ... 0.557642
f1-score 0.698052 ... 0.564800
support 342.000000 ... 1917.000000
[24 rows x 10 columns]
[0mWARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer.iter
2020-10-28 07:53:46,262 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer.iter
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer.beta_1
2020-10-28 07:53:46,262 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer.beta_1
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer.beta_2
2020-10-28 07:53:46,262 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer.beta_2
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer.decay
2020-10-28 07:53:46,262 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer.decay
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-2.kernel
2020-10-28 07:53:46,262 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-2.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-2.bias
2020-10-28 07:53:46,262 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-2.bias
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.cell.kernel
2020-10-28 07:53:46,262 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.cell.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.cell.recurrent_kernel
2020-10-28 07:53:46,263 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.cell.recurrent_kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.cell.bias
2020-10-28 07:53:46,263 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'm' for (root).layer_with_weights-1.cell.bias
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-2.kernel
2020-10-28 07:53:46,263 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-2.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-2.bias
2020-10-28 07:53:46,263 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-2.bias
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.cell.kernel
2020-10-28 07:53:46,263 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.cell.kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.cell.recurrent_kernel
2020-10-28 07:53:46,263 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.cell.recurrent_kernel
WARNING:tensorflow:Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.cell.bias
2020-10-28 07:53:46,263 - tensorflow - WARNING - Unresolved object in checkpoint: (root).optimizer's state 'v' for (root).layer_with_weights-1.cell.bias
WARNING:tensorflow:A checkpoint was restored (e.g. tf.train.Checkpoint.restore or tf.keras.Model.load_weights) but not all checkpointed values were used. See above for specific issues. Use expect_partial() on the load status object, e.g. tf.train.Checkpoint.restore(...).expect_partial(), to silence these warnings, or use assert_consumed() to make the check explicit. See https://www.tensorflow.org/guide/checkpoint#loading_mechanics for details.
2020-10-28 07:53:46,263 - tensorflow - WARNING - A checkpoint was restored (e.g. tf.train.Checkpoint.restore or tf.keras.Model.load_weights) but not all checkpointed values were used. See above for specific issues. Use expect_partial() on the load status object, e.g. tf.train.Checkpoint.restore(...).expect_partial(), to silence these warnings, or use assert_consumed() to make the check explicit. See https://www.tensorflow.org/guide/checkpoint#loading_mechanics for details.
###Markdown
**Results are ready**
###Code
final_report = pd.read_csv("./customer_interaction_summary/models/final_report.csv")
final_report[final_report['Unnamed: 1'] == 'f1-score']
final_report[final_report['Unnamed: 1'] == 'recall']
final_report[final_report['Unnamed: 1'] == 'precision']
!cp "./customer_interaction_summary/models/final_report.csv" "/content/gdrive/My Drive/Colab Notebooks/.kaggle/"
!ls "/content/gdrive/My Drive/Colab Notebooks/.kaggle/"
# from google.colab import files
# files.download('./customer_interaction_summary/models/final_report.csv')
!ls "./customer_interaction_summary/models/"
!cat "./customer_interaction_summary/models/StackedBiLSTM_hyperparameters.json"
###Output
_____no_output_____ |
posts/check-if-duration-affects-spike-sorting.ipynb | ###Markdown
Does the recording's duration affect the quality of spike sorting?This notebook investigates if and how the duration of the recording affects spike sorting.Obviously, each sorter engine needs a minimum number of events to detect a "cluster", and therefore a unit.If a neuron doesn't fire enough during a recording it won't be detected.The number of event per units depends on the recording duration and the each individual firing rates.In order to test this phenomenon, we use the same dataset (with the same neurons and firing rates), but we vary the duration of the recording.The simulated recording is generated with [MEArec](https://github.com/alejoe91/MEArec) using a Neuronexus-32 probe. This specific dataset seems *relatively* easy to sort. The "SYNTH_MEAREC_NEURONEXUS" dataset in [SpikeForest](https://spikeforest.flatironinstitute.org/) (which uses the same probe), in fact, shows quite good results for all sorters. The original duration is 600s (10 min).Here we have generated a new but similar recording with a duration of 1800s. Then we have shortened it to 60s, 300s, 600s and 1800s (original). The recording can be downloaded from Zenodo: https://doi.org/10.5281/zenodo.4058272The dataset name is: **recordings_10cells_Neuronexus-32_1800.0_10.0uV_2020-02-28.h5**. It contains 10 neurons recorded on a Neuronexus-32 probe. The duration is 1800s and the noise level is 10uV.Let's see if spike sorters are robust to fewer events and if are able to deal with long durations or they end up finding too many events.Author: [Samuel Garcia](https://github.com/samuelgarcia), CRNL, Lyon RequirementsFor this need you will need the following Python packages:- numpy- pandas- matplotlib- seaborn- spikeinterfaceTo run the MATLAB-based sorters, you would also need a MATLAB license.For other sorters, please refer to the documentation on [how to install sorters](https://spikeinterface.readthedocs.io/en/latest/sortersinfo.html). Installation and imports
###Code
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from pathlib import Path
import spikeinterface as si
import spikeinterface.extractors as se
import spikeinterface.sorters as ss
import spikeinterface.widgets as sw
from spikeinterface.comparison import GroundTruthStudy
# clone and install MATLAB sorters
# kilosort2
!git clone https://github.com/MouseLand/Kilosort2.git
kilosort2_path = './Kilosort2'
ss.Kilosort2Sorter.set_kilosort2_path(kilosort2_path)
# kilosort
!git clone https://github.com/cortex-lab/KiloSort.git
kilosort_path = './KiloSort'
ss.KilosortSorter.set_kilosort_path(kilosort_path)
# ironclust
!git clone https://github.com/flatironinstitute/ironclust.git
ironclust_path = './ironclust'
ss.IronclustSorter.set_ironclust_path(ironclust_path)
%matplotlib inline
# some matplotlib hack to prettify figure
SMALL_SIZE = 12
MEDIUM_SIZE = 14
BIGGER_SIZE = 16
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', figsize=(10.0, 8.0)) # figsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
def clear_axes(ax):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
###Output
_____no_output_____
###Markdown
Check spikeinterface version and sorter version
###Code
si.print_spikeinterface_version()
ss.print_sorter_versions()
###Output
spikeinterface: 0.9.1
* spikeextractor: 0.7.2
* spiketoolkit: 0.5.2
* spikesorters: 0.2.4
* spikecomparison: 0.2.3
* spikewidgets: 0.3.3
herdingspikes: 0.3.7+git.45665a2b6438
ironclust: 5.9.4
kilosort: git-cd040da1963d
kilosort2: git-67a42a87b866
klusta: 3.0.16
mountainsort4: unknown
spykingcircus: 0.9.2
tridesclous: 1.5.0
###Markdown
Setup global path
###Code
# Change this path to point to where you downloaded the dataset
p = Path('/home/samuel/Documents/DataSpikeSorting/mearec/')
study_folder = p / 'study_mearec_neuronexus_several_durations/'
###Output
_____no_output_____
###Markdown
Setup ground truth study
###Code
mearec_filename = p / 'recordings_10cells_Neuronexus-32_1800.0_10.0uV_2020-02-28.h5'
rec = se.MEArecRecordingExtractor(mearec_filename, locs_2d=True)
gt_sorting = se.MEArecSortingExtractor(mearec_filename)
fs = rec.get_sampling_frequency()
gt_dict = {}
durations = [60, 300, 600, 1800]
for duration in durations:
sub_rec = se.SubRecordingExtractor(rec, start_frame=0, end_frame=int(duration*fs))
sub_sorting = se.SubSortingExtractor(gt_sorting, start_frame=0, end_frame=int(duration*fs))
gt_dict[f'rec{duration}'] = (sub_rec, sub_sorting)
study = GroundTruthStudy.create(study_folder, gt_dict)
###Output
_____no_output_____
###Markdown
Run all sorters
###Code
sorter_list = ['herdingspikes', 'ironclust', 'kilosort2', 'kilosort',
'mountainsort4', 'spykingcircus', 'tridesclous']
study = GroundTruthStudy(study_folder)
sorter_params = {}
study.run_sorters(sorter_list, sorter_params=sorter_params, mode='keep', verbose=True)
###Output
_____no_output_____
###Markdown
Get signal to noise ratio for all unitsUnits are the same in each recording so the snr is the same lets take from the longest one
###Code
study = GroundTruthStudy(study_folder)
snr = study.get_units_snr(rec_name='rec1800')
snr
fig, ax = plt.subplots()
ax.hist(snr['snr'].values, bins=10)
ax.set_xlabel('GT units SNR')
###Output
_____no_output_____
###Markdown
Run comparison with ground truth and retreive result tables
###Code
# this copy sorting is necessary to copy results from sorter
# into a centralize folder with all results
study.copy_sortings()
# this run all comparison sto GT
study.run_comparisons(exhaustive_gt=True, match_score=0.1, overmerged_score=0.2)
# this retrieve results
comparisons = study.comparisons
dataframes = study.aggregate_dataframes()
###Output
_____no_output_____
###Markdown
Run times
###Code
run_times = dataframes['run_times']
run_times
# insert durations
run_times['duration'] = run_times['rec_name'].apply(lambda s: float(s.replace('rec', '')))
g = sns.catplot(data=run_times, x='duration', y='run_time', hue='sorter_name', kind='bar')
###Output
_____no_output_____
###Markdown
Accuracy vs duration
###Code
perf = dataframes['perf_by_units']
perf
# insert durations
perf['duration'] = perf['rec_name'].apply(lambda s: float(s.replace('rec', '')))
g = sns.catplot(data=perf, x='duration', y='accuracy', hue='sorter_name', kind='bar')
###Output
_____no_output_____
###Markdown
Count good, bad, false positive units vs duration
###Code
count_units = dataframes['count_units']
count_units
# insert durations
count_units['duration'] = count_units['rec_name'].apply(lambda s: float(s.replace('rec', '')))
###Output
_____no_output_____
###Markdown
num_well_detected vs durationthe more the better
###Code
g = sns.catplot(data=count_units, x='duration', y='num_well_detected', hue='sorter_name', kind='bar')
###Output
_____no_output_____
###Markdown
num_false_positive vs durationthe less the better
###Code
g = sns.catplot(data=count_units, x='duration', y='num_false_positive', hue='sorter_name', kind='bar')
# same as previous but with other limits
g = sns.catplot(data=count_units, x='duration', y='num_false_positive', hue='sorter_name', kind='bar')
g.fig.axes[0].set_ylim(0, 10)
###Output
_____no_output_____
###Markdown
num_redundant vs durationthe less the better
###Code
g = sns.catplot(data=count_units, x='duration', y='num_redundant', hue='sorter_name', kind='bar')
###Output
_____no_output_____ |
superseded/Phenolopy_old.ipynb | ###Markdown
Phenolopy Load packages Set up a dask cluster
###Code
%matplotlib inline
%load_ext autoreload
import os, sys
import xarray as xr
import numpy as np
import pandas as pd
import datacube
import matplotlib.pyplot as plt
from scipy.signal import savgol_filter, wiener
from scipy.stats import zscore
from statsmodels.tsa.seasonal import STL as stl
from datacube.drivers.netcdf import write_dataset_to_netcdf
sys.path.append('../Scripts')
from dea_datahandling import load_ard
from dea_dask import create_local_dask_cluster
from dea_plotting import display_map, rgb
sys.path.append('./scripts')
import phenolopy
# initialise the cluster. paste url into dask panel for more info.
create_local_dask_cluster()
# open up a datacube connection
dc = datacube.Datacube(app='phenolopy')
###Output
_____no_output_____
###Markdown
Study area and data setup Set study area and time range
###Code
# set lat, lon (y, x) dictionary of testing areas for gdv project
loc_dict = {
'yan_full': (-22.750, 119.10),
'yan_full_1': (-22.725, 119.05),
'yan_full_2': (-22.775, 119.15),
'roy_sign_1': (-22.618, 119.989),
'roy_full': (-22.555, 120.01),
'roy_full_1': (-22.487, 119.927),
'roy_full_2': (-22.487, 120.092),
'roy_full_3': (-22.623, 119.927),
'roy_full_4': (-22.623, 120.092),
'oph_full': (-23.280432, 119.859309),
'oph_full_1': (-23.375319, 119.859309),
'oph_full_2': (-23.185611, 119.859309),
'oph_full_3': (-23.233013, 119.859309),
'oph_full_4': (-23.280432, 119.859309),
'oph_full_5': (-23.327867, 119.859309),
'test': (-31.6069288, 116.9426373)
}
# set buffer length and height (x, y)
buf_dict = {
'yan_full': (0.15, 0.075),
'yan_full_1': (0.09, 0.025),
'yan_full_2': (0.05, 0.0325),
'roy_sign_1': (0.15, 0.21),
'roy_full': (0.33, 0.27),
'roy_full_1': (0.165209/2, 0.135079/2),
'roy_full_2': (0.165209/2, 0.135079/2),
'roy_full_3': (0.165209/2, 0.135079/2),
'roy_full_4': (0.165209/2, 0.135079/2),
'oph_full': (0.08, 0.11863),
'oph_full_1': (0.08, 0.047452/2),
'oph_full_2': (0.08, 0.047452/2),
'oph_full_3': (0.08, 0.047452/2),
'oph_full_4': (0.08, 0.047452/2),
'oph_full_5': (0.08, 0.047452/2),
'test': (0.05, 0.05)
}
# select location from dict
study_area = 'roy_full_2'
# set buffer size in lon, lat (x, y)
lon_buff, lat_buff = buf_dict[study_area][0], buf_dict[study_area][1]
# select time range. for a specific year, set same year with month 01 to 12. multiple years will be averaged.
time_range = ('2016-11', '2018-02')
# select a study area from existing dict
lat, lon = loc_dict[study_area][0], loc_dict[study_area][1]
# combine centroid with buffer to form study boundary
lat_extent = (lat - lat_buff, lat + lat_buff)
lon_extent = (lon - lon_buff, lon + lon_buff)
# display onto interacrive map
display_map(x=lon_extent, y=lat_extent)
###Output
_____no_output_____
###Markdown
Load sentinel-2a, b data for above parameters
###Code
# set measurements (bands)
measurements = [
'nbart_blue',
'nbart_green',
'nbart_red',
'nbart_nir_1',
'nbart_swir_2'
]
# create query from above and expected info
query = {
'x': lon_extent,
'y': lat_extent,
'time': time_range,
'measurements': measurements,
'output_crs': 'EPSG:3577',
'resolution': (-10, 10),
'group_by': 'solar_day',
}
# load sentinel 2 data
ds = load_ard(
dc=dc,
products=['s2a_ard_granule', 's2b_ard_granule'],
min_gooddata=0.90,
dask_chunks={'time': 1},
**query
)
# display dataset
print(ds)
# display a rgb data result of temporary resampled median
#rgb(ds.resample(time='1M').median(), bands=['nbart_red', 'nbart_green', 'nbart_blue'], col='time', col_wrap=12)
###Output
_____no_output_____
###Markdown
Conform DEA band names
###Code
# takes our dask ds and conforms (renames) bands
ds = phenolopy.conform_dea_band_names(ds)
# display dataset
print(ds)
###Output
_____no_output_____
###Markdown
Calculate vegetation index
###Code
# takes our dask ds and calculates veg index from spectral bands
ds = phenolopy.calc_vege_index(ds, index='mavi', drop=True)
# display dataset
print(ds)
###Output
_____no_output_____
###Markdown
Pre-processing phase Temporary - load MODIS dataset
###Code
#ds = phenolopy.load_test_dataset(data_path='./data/')
# resample to bimonth
ds = phenolopy.resample(ds, interval='1M', reducer='median')
# interp
ds = ds.chunk({'time': -1})
ds = phenolopy.interpolate(ds=ds, method='interpolate_na')
# drop years
ds = ds.where(ds['time.year'] == 2017, drop=True)
###Output
_____no_output_____
###Markdown
Group data by month and reduce by median
###Code
# take our dask ds and group and reduce dataset in median weeks (26 for one year)
ds = phenolopy.group(ds, group_by='month', reducer='median')
# display dataset
print(ds)
# show times
ds = ds.compute()
###Output
_____no_output_____
###Markdown
Remove outliers from dataset on per-pixel basis
###Code
# chunk dask to -1 to make compatible with this function
ds = ds.chunk({'time': -1})
# takes our dask ds and remove outliers from data using median method
ds = phenolopy.remove_outliers(ds=ds, method='median', user_factor=2, z_pval=0.05)
# display dataset
print(ds)
###Output
_____no_output_____
###Markdown
Resample dataset down to bi-monthly medians
###Code
# takes our dask ds and resamples data to bi-monthly medians
ds = phenolopy.resample(ds, interval='1W', reducer='median')
# display dataset
print(ds)
###Output
_____no_output_____
###Markdown
Interpolate missing (i.e. nan) values linearly
###Code
# chunk dask to -1 to make compatible with this function
ds = ds.chunk({'time': -1})
# takes our dask ds and interpolates missing values
ds = phenolopy.interpolate(ds=ds, method='interpolate_na')
# display dataset
print(ds)
###Output
_____no_output_____
###Markdown
Smooth data on per-pixel basis
###Code
# chunk dask to -1 to make compatible with this function
ds = ds.chunk({'time': -1})
# take our dask ds and smooth using savitsky golay filter
ds = phenolopy.smooth(ds=ds, method='savitsky', window_length=3, polyorder=1)
# display dataset
print(ds)
###Output
_____no_output_____
###Markdown
Upper envelope correctiontodo
###Code
# todo
###Output
_____no_output_____
###Markdown
Calculate number of seasons
###Code
# chunk dask to -1 to make compatible with this function
ds = ds.chunk({'time': -1})
# take our dask ds and smooth using savitsky golay filter
da_num_seasons = phenolopy.calc_num_seasons(ds=ds)
# display dataset
print(da_num_seasons)
###Output
_____no_output_____
###Markdown
Calculate Phenolometrics
###Code
# compute
ds = ds.compute()
print(ds)
%autoreload
# calc phenometrics via phenolopy!
ds_phenos = phenolopy.calc_phenometrics(da=ds['veg_index'], peak_metric='pos', base_metric='vos', method='seasonal_amplitude', factor=0.2, thresh_sides='two_sided', abs_value=0.1)
# set the metric you want to view
metric_name = 'lios_values'
# plot this on map
ds_phenos[metric_name].plot(robust=True, cmap='Spectral')
from datacube.drivers.netcdf import write_dataset_to_netcdf
write_dataset_to_netcdf(ds_phenos, 'roy_2017_1w_phenos.nc')
###Output
_____no_output_____
###Markdown
Testing
###Code
# set up params
import random
import shutil
# set output filename
filename = 'roy_2_p_pos_b_vos_seas_amp_f_015'
# set seed
random.seed(50)
# gen random x and y lists for specified num pixels (e.g. 250 x, 250 y)
n_pixels = 200
x_list = random.sample(range(0, len(ds_phenos['x'])), n_pixels)
y_list = random.sample(range(0, len(ds_phenos['y'])), n_pixels)
def run_test(ds_raw, ds_phen, filename, x_list, y_list):
# loop through each pixel pair
for x, y in zip(x_list, y_list):
# get pixel and associate phenos pixel
v = ds_raw.isel(x=x, y=y)
p = ds_phen.isel(x=x, y=y)
# create fig
fig = plt.figure(figsize=(12, 5))
# plot main trend
plt.plot(v['time.dayofyear'], v['veg_index'], linestyle='solid', marker='.', color='black')
# plot pos vals and times
plt.plot(p['pos_times'], p['pos_values'],
marker='o', linestyle='', color='blue', label='POS')
plt.annotate('POS', (p['pos_times'], p['pos_values']))
# plot vos vals and times
plt.plot(p['vos_times'], p['vos_values'],
marker='o', linestyle='', color='darkred', label='VOS')
plt.annotate('VOS', (p['vos_times'], p['vos_values']))
# plot bse vals
plt.axhline(p['bse_values'],
marker='', linestyle='dashed', color='red', label='BSE')
# add legend
# plot sos vals and times
plt.plot(p['sos_times'], p['sos_values'],
marker='s', linestyle='', color='green', label='SOS')
plt.annotate('SOS', (p['sos_times'], p['sos_values']))
# plot eos vals and times
plt.plot(p['eos_times'], p['eos_values'],
marker='s', linestyle='', color='orange', label='EOS')
plt.annotate('EOS', (p['eos_times'], p['eos_values']))
# plot aos vals
plt.axvline(p['pos_times'],
marker='', color='magenta', linestyle='dotted', label='AOS')
# plot los vals
plt.axhline((p['sos_values'] + p['eos_values']) / 2,
marker='', color='yellowgreen', linestyle='dashdot', label='LOS')
# plot sios
plt.fill_between(v['time.dayofyear'], v['veg_index'], y2=p['bse_values'],
color='red', alpha=0.1, label='SIOS')
# plot lios
t = ~v.where((v['time.dayofyear'] >= p['sos_times']) & (v['time.dayofyear'] <= p['eos_times'])).isnull()
plt.fill_between(v['time.dayofyear'], v['veg_index'], where=t['veg_index'],
color='yellow', alpha=0.2, label='LIOS')
# plot siot
plt.fill_between(v['time.dayofyear'], v['veg_index'], y2=p['bse_values'],
color='aqua', alpha=0.3, label='SIOT')
# plot liot
plt.fill_between(v['time.dayofyear'], v['veg_index'],
color='aqua', alpha=0.1, label='LIOT')
# add legend
plt.legend(loc='best')
# create output filename
out = os.path.join('testing', filename + '_x_' + str(x) + '_y_' + str(y) + '.jpg')
# save to file without plotting
fig.savefig(out)
plt.close()
# export as zip
shutil.make_archive(filename + '.zip', 'zip', './testing')
# clear all files in dir
for root, dirs, files in os.walk('./testing'):
for file in files:
os.remove(os.path.join(root, file))
# perform test
run_test(ds_raw=ds, ds_phen=ds_phenos, filename=filename, x_list=x_list, y_list=y_list)
from datacube.utils.cog import write_cog
write_cog(geo_im=ds_phenos['lios_values'], fname='lios.tif', overwrite=True)
###Output
_____no_output_____
###Markdown
Working
###Code
# different types of detection, using stl residuals - remove outlier method
#from scipy.stats import median_absolute_deviation
#v = ds.isel(x=0, y=0, time=slice(0, 69))
#v['veg_index'].data = data
#v_med = remove_outliers(v, method='median', user_factor=1, num_dates_per_year=24, z_pval=0.05)
#v_zsc = remove_outliers(v, method='zscore', user_factor=1, num_dates_per_year=24, z_pval=0.1)
#stl_res = stl(v['veg_index'], period=24, seasonal=5, robust=True).fit()
#v_rsd = stl_res.resid
#v_wgt = stl_res.weights
#o = v.copy()
#o['veg_index'].data = v_rsd
#w = v.copy()
#w['veg_index'].data = v_wgt
#m = xr.where(o > o.std('time'), True, False)
#o = v.where(m)
#m = xr.where(w < 1e-8, True, False)
#w = v.where(m)
#fig = plt.figure(figsize=(18, 7))
#plt.plot(v['time'], v['veg_index'], color='black', marker='o')
#plt.plot(o['time'], o['veg_index'], color='red', marker='o', linestyle='-')
#plt.plot(w['time'], w['veg_index'], color='blue', marker='o', linestyle='-')
#plt.axhline(y=float(o['veg_index'].std('time')))
#plt.show()
# working method for stl outlier dection. can't quite get it to match timesat results?
# need to speed this up - very slow for even relatively small datasets
#def func_stl(vec, period, seasonal, jump_l, jump_s, jump_t):
#resid = stl(vec, period=period, seasonal=seasonal,
#seasonal_jump=jump_s, trend_jump=jump_t, low_pass_jump=jump_l).fit()
#return resid.resid
#def do_stl_apply(da, multi_pct, period, seasonal):
# calc jump size for lowpass, season and trend to speed up processing
#jump_l = int(multi_pct * (period + 1))
#jump_s = int(multi_pct * (period + 1))
#jump_t = int(multi_pct * 1.5 * (period + 1))
#f = xr.apply_ufunc(func_stl, da,
#input_core_dims=[['time']],
#output_core_dims=[['time']],
#vectorize=True, dask='parallelized',
#output_dtypes=[ds['veg_index'].dtype],
#kwargs={'period': period, 'seasonal': seasonal,
#'jump_l': jump_l, 'jump_s': jump_s, 'jump_t': jump_t})
#return f
# chunk up to make use of dask parallel
#ds = ds.chunk({'time': -1})
# calculate residuals for each vector stl
#stl_resids = do_stl_apply(ds['veg_index'], multi_pct=0.15, period=24, seasonal=13)
#s = ds['veg_index'].stack(z=('x', 'y'))
#s = s.chunk({'time': -1})
#s = s.groupby('z').map(func_stl)
#out = out.unstack()
#s = ds.chunk({'time': -1})
#t = xr.full_like(ds['veg_index'], np.nan)
#out = xr.map_blocks(func_stl, ds['veg_index'], template=t).compute()
#stl_resids = stl_resids.compute()
# working double logistic - messy though
# https://colab.research.google.com/github/1mikegrn/pyGC/blob/master/colab/Asymmetric_GC_integration.ipynb#scrollTo=upaYKFdBGEAo
# see for asym gaussian example
#da = v.where(v['time.year'] == 2016, drop=True)
#def logi(x, a, b, c, d):
#return a / (1 + xr.ufuncs.exp(-c * (x - d))) + b
# get date at max veg index
#idx = int(da['veg_index'].argmax())
# get left and right of peak of season
#da_l = da.where(da['time'] <= da['time'].isel(time=idx), drop=True)
#da_r = da.where(da['time'] >= da['time'].isel(time=idx), drop=True)
# must sort right curve (da_r) descending to flip data
#da_r = da_r.sortby(da_r['time'], ascending=False)
# get indexes of times (times not compat with exp)
#da_l_x_idxs = np.arange(1, len(da_l['time']) + 1, step=1)
#da_r_x_idxs = np.arange(1, len(da_r['time']) + 1, step=1)
# fit curve
#popt_l, pcov_l = curve_fit(logi, da_l_x_idxs, da_l['veg_index'], method="trf")
#popt_r, pcov_r = curve_fit(logi, da_r_x_idxs, da_r['veg_index'], method="trf")
# apply fit to original data
#da_fit_l = logi(da_l_x_idxs, *popt_l)
#da_fit_r = logi(da_r_x_idxs, *popt_r)
# flip fitted vector back to original da order
#da_fit_r = np.flip(da_fit_r)
# get mean of pos value, remove overlap between l and r
#pos_mean = (da_fit_l[-1] + da_fit_r[0]) / 2
#da_fit_l = np.delete(da_fit_l, -1)
#da_fit_r = np.delete(da_fit_r, 1)
# concat back together with mean val inbetween
#da_logi = np.concatenate([da_fit_l, pos_mean, da_fit_r], axis=None)
# smooth final curve with mild savgol
#da_logi = savgol_filter(da_logi, 3, 1)
#fig = plt.subplots(1, 1, figsize=(6, 4))
#plt.plot(da['time'], da['veg_index'], 'o')
#plt.plot(da['time'], da_logi)
#from scipy.signal import find_peaks
#x, y = 0, 1
#v = da.isel(x=x, y=y)
#height = float(v.quantile(dim='time', q=0.75))
#distance = math.ceil(len(v['time']) / 4)
#p = find_peaks(v, height=height, distance=distance)[0]
#p_dts = v['time'].isel(time=p)
#for p_dt in p_dts:
#plt.axvline(p_dt['time'].dt.dayofyear, color='black', linestyle='--')
#count_peaks = len(num_peaks[0])
#if count_peaks > 0:
#return count_peaks
#else:
#return 0
#plt.plot(v['time.dayofyear'], v)
# flip to get min closest to pos
# if we want closest sos val to pos we flip instead to trick argmin
#flip = dists_sos_v.sortby(dists_sos_v['time'], ascending=False)
#min_right = flip.isel(time=flip.argmin('time'))
#temp_pos_cls = da.isel(x=x, y=0).where(da['time'] == min_right['time'].isel(x=x, y=0))
#plt.plot(temp_pos_cls.time, temp_pos_cls, marker='o', color='black', alpha=0.25)
###Output
_____no_output_____ |
tutorials/test_functions/piston/piston_example.ipynb | ###Markdown
Active Subspaces Example Function: Piston Cycle Time Ryan Howard, CO School of Mines, Paul Constantine, CO School of Mines, In this tutorial, we'll be applying active subspaces to the function$$C = 2\pi\sqrt{\frac{M}{k+S^2\frac{P_0V_0}{T_0}\frac{T_a}{V^2}}},$$where $$V = \frac{S}{2k}\left(\sqrt{A^2+4k\frac{P_0V_0}{T_0}T_a}-A\right),\\A=P_0S+19.62M-\frac{kV_0}{S},$$as seen on [http://www.sfu.ca/~ssurjano/piston.html](http://www.sfu.ca/~ssurjano/piston.html). This function models the cycle time of a piston within a cylinder, and its inputs and their distributions are described in the table below.Variable|Symbol|Distribution (U(min, max)):-----|:-----:|:-----piston Weight|$M$|U(30, 60)piston Surface Area|$S$|U(.005, .02)initial Gas Volume|$V_0$|U(.002, .01)spring Coefficient|$k$|U(1000, 5000)atmospheric Pressure|$P_0$|U(90000, 110000)ambient Temperature|$T_a$|U(290, 296)filling Gas Temperature|$T_0$|U(340, 360)
###Code
import active_subspaces as ac
import numpy as np
%matplotlib inline
# The piston_functions.py file contains two functions: the piston function (piston(xx))
# and its gradient (piston_grad(xx)). Each takes an Mx7 matrix (M is the number of data
# points) with rows being normalized inputs; piston returns a column vector of function
# values at each row of the input and piston_grad returns a matrix whose ith row is the
# gradient of piston at the ith row of xx with respect to the normalized inputs
from piston_functions import *
###Output
_____no_output_____
###Markdown
First we draw M samples randomly from the input space.
###Code
M = 1000 #This is the number of data points to use
#Sample the input space according to the distributions in the table above
M0 = np.random.uniform(30, 60, (M, 1))
S = np.random.uniform(.005, .02, (M, 1))
V0 = np.random.uniform(.002, .01, (M, 1))
k = np.random.uniform(1000, 5000, (M, 1))
P0 = np.random.uniform(90000, 110000, (M, 1))
Ta = np.random.uniform(290, 296, (M, 1))
T0 = np.random.uniform(340, 360, (M, 1))
#the input matrix
x = np.hstack((M0, S, V0, k, P0, Ta, T0))
###Output
_____no_output_____
###Markdown
Now we normalize the inputs, linearly scaling each to the interval $[-1, 1]$.
###Code
#Upper and lower limits for inputs
xl = np.array([30, .005, .002, 1000, 90000, 290, 340])
xu = np.array([60, .02, .01, 5000, 110000, 296, 360])
#XX = normalized input matrix
XX = ac.utils.misc.BoundedNormalizer(xl, xu).normalize(x)
###Output
_____no_output_____
###Markdown
Compute gradients to approximate the matrix on which the active subspace is based.
###Code
#output values (f) and gradients (df)
f = piston(XX)
df = piston_grad(XX)
###Output
_____no_output_____
###Markdown
Now we use our data to compute the active subspace.
###Code
#Set up our subspace using the gradient samples
ss = ac.subspaces.Subspaces()
ss.compute(df=df, nboot=500)
###Output
_____no_output_____
###Markdown
We use plotting utilities to plot eigenvalues, subspace error, components of the first 2 eigenvectors, and 1D and 2D sufficient summary plots (plots of function values vs. active variable values).
###Code
#Component labels
in_labels = ['M', 'S', 'V0', 'k', 'P0', 'Ta', 'T0']
#plot eigenvalues, subspace errors
ac.utils.plotters.eigenvalues(ss.eigenvals, ss.e_br)
ac.utils.plotters.subspace_errors(ss.sub_br)
#manually make the subspace 2D for the eigenvector and 2D summary plots
ss.partition(2)
#Compute the active variable values
y = XX.dot(ss.W1)
#Plot eigenvectors, sufficient summaries
ac.utils.plotters.eigenvectors(ss.W1, in_labels=in_labels)
ac.utils.plotters.sufficient_summary(y, f)
###Output
_____no_output_____
###Markdown
Active Subspaces Example Function: Piston Cycle Time Ryan Howard, CO School of Mines, Paul Constantine, CO School of Mines, In this tutorial, we'll be applying active subspaces to the function$$C = 2\pi\sqrt{\frac{M}{k+S^2\frac{P_0V_0}{T_0}\frac{T_a}{V^2}}},$$where $$V = \frac{S}{2k}\left(\sqrt{A^2+4k\frac{P_0V_0}{T_0}T_a}-A\right),\\A=P_0S+19.62M-\frac{kV_0}{S},$$as seen on [http://www.sfu.ca/~ssurjano/piston.html](http://www.sfu.ca/~ssurjano/piston.html). This function models the cycle time of a piston within a cylinder, and its inputs and their distributions are described in the table below.Variable|Symbol|Distribution (U(min, max)):-----|:-----:|:-----piston Weight|$M$|U(30, 60)piston Surface Area|$S$|U(.005, .02)initial Gas Volume|$V_0$|U(.002, .01)spring Coefficient|$k$|U(1000, 5000)atmospheric Pressure|$P_0$|U(90000, 110000)ambient Temperature|$T_a$|U(290, 296)filling Gas Temperature|$T_0$|U(340, 360)
###Code
import active_subspaces as ac
import numpy as np
%matplotlib inline
# The piston_functions.py file contains two functions: the piston function (piston(xx))
# and its gradient (piston_grad(xx)). Each takes an Mx7 matrix (M is the number of data
# points) with rows being normalized inputs; piston returns a column vector of function
# values at each row of the input and piston_grad returns a matrix whose ith row is the
# gradient of piston at the ith row of xx with respect to the normalized inputs
from piston_functions import *
###Output
_____no_output_____
###Markdown
First we draw M samples randomly from the input space.
###Code
M = 1000 #This is the number of data points to use
#Sample the input space according to the distributions in the table above
M0 = np.random.uniform(30, 60, (M, 1))
S = np.random.uniform(.005, .02, (M, 1))
V0 = np.random.uniform(.002, .01, (M, 1))
k = np.random.uniform(1000, 5000, (M, 1))
P0 = np.random.uniform(90000, 110000, (M, 1))
Ta = np.random.uniform(290, 296, (M, 1))
T0 = np.random.uniform(340, 360, (M, 1))
#the input matrix
x = np.hstack((M0, S, V0, k, P0, Ta, T0))
###Output
_____no_output_____
###Markdown
Now we normalize the inputs, linearly scaling each to the interval $[-1, 1]$.
###Code
#Upper and lower limits for inputs
xl = np.array([30, .005, .002, 1000, 90000, 290, 340])
xu = np.array([60, .02, .01, 5000, 110000, 296, 360])
#XX = normalized input matrix
XX = ac.utils.misc.BoundedNormalizer(xl, xu).normalize(x)
###Output
_____no_output_____
###Markdown
Compute gradients to approximate the matrix on which the active subspace is based.
###Code
#output values (f) and gradients (df)
f = piston(XX)
df = piston_grad(XX)
###Output
_____no_output_____
###Markdown
Now we use our data to compute the active subspace.
###Code
#Set up our subspace using the gradient samples
ss = ac.subspaces.Subspaces()
ss.compute(df=df, nboot=500)
###Output
n should be an integer. Performing conversion.
###Markdown
We use plotting utilities to plot eigenvalues, subspace error, components of the first 2 eigenvectors, and 1D and 2D sufficient summary plots (plots of function values vs. active variable values).
###Code
#Component labels
in_labels = ['M', 'S', 'V0', 'k', 'P0', 'Ta', 'T0']
#plot eigenvalues, subspace errors
ac.utils.plotters.eigenvalues(ss.eigenvals, ss.e_br)
ac.utils.plotters.subspace_errors(ss.sub_br)
#manually make the subspace 2D for the eigenvector and 2D summary plots
ss.partition(2)
#Compute the active variable values
y = XX.dot(ss.W1)
#Plot eigenvectors, sufficient summaries
ac.utils.plotters.eigenvectors(ss.W1, in_labels=in_labels)
ac.utils.plotters.sufficient_summary(y, f)
###Output
/opt/conda/lib/python3.6/site-packages/matplotlib/font_manager.py:1331: UserWarning: findfont: Font family ['arial'] not found. Falling back to DejaVu Sans
(prop.get_family(), self.defaultFamily[fontext]))
|
AWS/AmazonTranscribe/aws-transcribe.ipynb | ###Markdown
AWS TranscribeTranscribe an audio file to text.AWS Transcribe DocumentationThis notebook uploads an audio file to S3, transcribes it, and then deletes the file from S3.
###Code
import boto3
import time
import json
import urllib.request
def aws_s3_upload(file_name, bucket_name):
s3 = boto3.resource('s3')
# Create bucket if it doesn't already exist
bucket_names = [b.name for b in s3.buckets.all()]
if bucket_name not in bucket_names:
s3.create_bucket(Bucket=bucket_name,
CreateBucketConfiguration={'LocationConstraint': 'EU'})
print("Bucket {} created.".format(bucket_name))
s3.meta.client.upload_file(file_name, bucket_name, file_name)
print("{} uploaded to {}.".format(file_name, bucket_name))
return
def aws_s3_delete(file_name, bucket_name, del_bucket=False):
s3 = boto3.resource('s3')
try:
s3.meta.client.delete_object(Bucket=bucket_name, Key=file_name)
print("{} deleted from {}.".format(file_name, bucket_name))
except:
print("Unable to delete {} from {}.".format(file_name, bucket_name))
if del_bucket:
try:
s3.meta.client.delete_bucket(Bucket=bucket_name)
print("Bucket {} deleted.".format(bucket_name))
except:
print("Unable to delete bucket {}.".format(bucket_name))
return
def aws_transcribe(file_name, bucket_name):
client = boto3.client(service_name='transcribe',
region_name='eu-west-1',
use_ssl=True)
job_name = 'example_%s' % round(time.time())
job_uri = 's3://%s/%s' % (bucket_name, file_name)
client.start_transcription_job(
TranscriptionJobName=job_name,
Media={'MediaFileUri': job_uri},
MediaFormat=file_name[-3:],
LanguageCode='en-US'
)
tic = time.time()
while True:
status = client.get_transcription_job(TranscriptionJobName=job_name)
if status['TranscriptionJob']['TranscriptionJobStatus'] in ['COMPLETED', 'FAILED']:
break
toc = time.time()
print("Transcription still processing... cumulative run time: {:.1f}s".format(toc-tic))
time.sleep(10)
print("Transcription completed! Total run time: {:.1f}s".format(toc-tic))
json_url = status['TranscriptionJob']['Transcript']['TranscriptFileUri']
with urllib.request.urlopen(json_url) as url:
text = json.loads(url.read().decode())
return text['results']['transcripts'][0]['transcript']
file_name = 'the_raven.mp3'
bucket_name = 'your_bucket_name'
aws_s3_upload(file_name, bucket_name)
result = aws_transcribe(file_name, bucket_name)
aws_s3_delete(file_name, bucket_name)
# Print transcription
print(result)
###Output
Once upon a midnight dreary, while i pondered, weak and weary over many a quaint and curious volume of forgotten law, while i nodded, nearly napping. Suddenly there came a tapping as of someone gently rapping, rapping at my chamber door. To some visitor, i muttered, tapping at my chamber door. Only this and nothing more.
|
code/cookie_syncing_heuristic.ipynb | ###Markdown
Helper function for getting identifiers
###Code
def get_identifier_cookies(cookie_string, cookie_length = 8):
cookie_set = set()
for cookie in cookie_string.split('\n'):
cookie = cookie.split(';')[0]
if cookie.count('=') >= 1:
cookie = cookie.split('=', 1)
cookie_set |= set(re.split('[^a-zA-Z0-9_=-]', cookie[1]))
cookie_set.add(cookie[0])
else:
cookie_set |= set(re.split('[^a-zA-Z0-9_=-]', cookie))
# remove cookies with length < 8
cookie_set = set([s for s in list(cookie_set) if len(s) >= cookie_length])
return cookie_set
def get_identifiers_from_qs(url, qs_item_length = 8):
qs = URLparse.parse_qsl(URLparse.urlsplit(url).query)
qs_set = set()
for item in qs:
qs_set |= set(re.split('[^a-zA-Z0-9_=-]', item[0]))
qs_set |= set(re.split('[^a-zA-Z0-9_=-]', item[1]))
qs_set = set([s for s in list(qs_set) if len(s) >= qs_item_length])
return qs_set
def get_identifiers_from_uncommon_headers(header_prop, item_length = 8):
splitted_header_prop_set = set()
splitted_header_prop = set(re.split('[^a-zA-Z0-9_=-]', header_prop))
splitted_header_prop_set = set([s for s in list(splitted_header_prop) if len(s) >= item_length])
return splitted_header_prop_set
def get_domain_or_hostname(url):
# we stop if we cannot retrieve the domain or hostanmes
# we won't be able to link domains/hostnames if they are empty or unavailable
current_domain_or_hostname = du.get_ps_plus_1(url)
if current_domain_or_hostname == '' or current_domain_or_hostname == None:
current_domain_or_hostname = du.urlparse(url).hostname
if current_domain_or_hostname == '' or current_domain_or_hostname == None:
return False, ''
return True, current_domain_or_hostname
known_http_headers = set()
known_http_headers_raw = utilities.read_file_newline_stripped('common_headers.txt')
for item in known_http_headers_raw:
if item.strip() != '':
known_http_headers.add(item.strip().lower())
def check_csync_events(identifiers, next_identifiers, key, current_domain_or_hostname, next_url, csync_domains):
for identifier in identifiers:
next_domain_or_hostname = get_domain_or_hostname(next_url)
if not next_domain_or_hostname[0]:
break
next_domain_or_hostname = next_domain_or_hostname[1]
domain_domain = current_domain_or_hostname + '|' + next_domain_or_hostname
if domain_domain not in csync_domains:
csync_domains[domain_domain] = {}
csync_domains[domain_domain]['chains'] = []
csync_domains[domain_domain]['b64_chains'] = []
csync_domains[domain_domain]['md5_chains'] = []
csync_domains[domain_domain]['sha1_chains'] = []
base64_identifier = base64.b64encode(identifier.encode('utf-8')).decode('utf8')
md5_identifier = hashlib.md5(identifier.encode('utf-8')).hexdigest()
sha1_identifier = hashlib.sha1(identifier.encode('utf-8')).hexdigest()
if identifier in next_url or identifier in next_identifiers:
csync_domains[domain_domain]['chains'].append({'chain': key, 'identifier': identifier})
elif base64_identifier in next_url or base64_identifier in next_identifiers:
csync_domains[domain_domain]['b64_chains'].append({'chain':key, 'identifier': identifier, 'encoded': base64_identifier})
elif md5_identifier in next_url or md5_identifier in next_identifiers:
csync_domains[domain_domain]['md5_chains'].append({'chain':key, 'identifier': identifier, 'encoded': md5_identifier})
elif sha1_identifier in next_url or sha1_identifier in next_identifiers:
csync_domains[domain_domain]['sha1_chains'].append({'chain':key, 'identifier': identifier, 'encoded': sha1_identifier})
return csync_domains
###Output
_____no_output_____
###Markdown
Cookie syncing identification code
###Code
def run_csync_heuristic(json_representation, known_http_headers, csync_domains):
pbar = tqdm(total=len(json_representation), position=0, leave=True)
for key in json_representation:
pbar.update(1)
for idx, item in enumerate(json_representation[key]['content']):
current_url = item['url']
current_referrer = item['referrer']
current_identifiers = set()
current_domain_or_hostname = get_domain_or_hostname(current_url)
if not current_domain_or_hostname[0]:
continue
current_domain_or_hostname = current_domain_or_hostname[1]
sent_cookies = ''
for s_item in item['request_headers']:
if s_item[0].lower() == 'cookie':
current_identifiers |= get_identifier_cookies(s_item[1])
if s_item[0].lower() not in known_http_headers:
current_identifiers |= get_identifiers_from_uncommon_headers(s_item[1])
recieved_cookies = ''
for s_item in item['response_headers']:
if s_item[0].lower() == 'set-cookie':
current_identifiers |= get_identifier_cookies(s_item[1])
if s_item[0].lower() not in known_http_headers:
current_identifiers |= get_identifiers_from_uncommon_headers(s_item[1])
current_identifiers |= get_identifiers_from_qs(current_url)
current_identifiers |= get_identifiers_from_qs(current_referrer)
if key.startswith('J|'):
end = len(json_representation[key]['content'])
else:
end = idx + 2
if end > len(json_representation[key]['content']):
continue
for item_1 in json_representation[key]['content'][idx+1:end]:
next_url = item_1['url']
next_headers = item_1['request_headers']
next_identifiers = set()
for s_item in next_headers:
if s_item[0].lower() == 'cookie':
next_identifiers |= get_identifier_cookies(s_item[1])
if s_item[0].lower() not in known_http_headers:
next_identifiers |= get_identifiers_from_uncommon_headers(s_item[1])
csync_domains = check_csync_events(current_identifiers, next_identifiers, key, current_domain_or_hostname, next_url, csync_domains)
return csync_domains
current_csync = {}
current_csync = run_csync_heuristic(http_chains, known_http_headers, results_dict, current_csync)
current_csync = run_csync_heuristic(js_chains, known_http_headers, results_dict, current_csync)
###Output
_____no_output_____
###Markdown
Clean up cysnc events
###Code
def cysnc_clean_up(csync_domains):
to_delete = set()
for domain_domain in csync_domains:
if len(csync_domains[domain_domain]['chains']) == 0 and \
len(csync_domains[domain_domain]['b64_chains']) == 0 and \
len(csync_domains[domain_domain]['md5_chains']) == 0 and \
len(csync_domains[domain_domain]['sha1_chains']) == 0:
to_delete.add(domain_domain)
for key in to_delete:
del csync_domains[key]
return csync_domains
print(len(current_csync))
current_csync = cysnc_clean_up(current_csync)
print(len(current_csync))
###Output
_____no_output_____
###Markdown
Helper function for cookie syncing statistics
###Code
def count_csync_events(_from, _to, sending_json_obj, receiving_json_obj):
if _from not in sending_json_obj:
sending_json_obj[_from] = {}
sending_json_obj[_from]['count'] = 1
sending_json_obj[_from]['domains'] = set({_to})
else:
sending_json_obj[_from]['count'] += 1
sending_json_obj[_from]['domains'].add(_to)
if _to not in receiving_json_obj:
receiving_json_obj[_to] = {}
receiving_json_obj[_to]['count'] = 1
receiving_json_obj[_to]['domains'] = set({_from})
else:
receiving_json_obj[_to]['count'] += 1
receiving_json_obj[_to]['domains'].add(_from)
return sending_json_obj, receiving_json_obj
def get_csynced_chains(chains, chains_synced):
for item in chains:
if item['chain'] not in chains_synced:
chains_synced[item['chain']] = {}
chains_synced[item['chain']]['count'] = 1
else:
chains_synced[item['chain']]['count'] += 1
# break
return chains_synced
def get_unique_domains_in_chains(json_representation, khaleesi_detections):
all_domains = set()
for key in json_representation:
if key not in khaleesi_detections:
continue
for idx, item in enumerate(json_representation[key]['content']):
current_domain_or_hostname = get_domain_or_hostname(item['url'])
if not current_domain_or_hostname[0]:
continue
all_domains.add(current_domain_or_hostname[1])
return all_domains
###Output
_____no_output_____
###Markdown
Finding cookie syncing stats
###Code
def compute_csync_stats(csync_domains, no_of_chains, no_of_domains):
all_domains = set()
sending_to = {}
recieved_from = {}
b64_sending_to = {}
b64_recieved_from = {}
md5_sending_to = {}
md5_recieved_from = {}
sha1_sending_to = {}
sha1_recieved_from = {}
chains_synced_simple = {}
chains_synced_b64 = {}
chains_synced_md5 = {}
chains_synced_sha1 = {}
for domain_domain in csync_domains:
_from = domain_domain.split('|')[0]
_to = domain_domain.split('|')[1]
if _from == _to:
continue
if len(csync_domains[domain_domain]['chains']) > 0:
sending_to, recieved_from = count_csync_events(_from, _to, sending_to, recieved_from)
chains_synced_simple = get_csynced_chains(csync_domains[domain_domain]['chains'], chains_synced_simple)
if len(csync_domains[domain_domain]['b64_chains']) > 0:
sending_to, recieved_from = count_csync_events(_from, _to, sending_to, recieved_from)
b64_sending_to, b64_recieved_from = count_csync_events(_from, _to, b64_sending_to, b64_recieved_from)
chains_synced_b64 = get_csynced_chains(csync_domains[domain_domain]['b64_chains'], chains_synced_b64)
if len(csync_domains[domain_domain]['md5_chains']) > 0:
sending_to, recieved_from = count_csync_events(_from, _to, sending_to, recieved_from)
md5_sending_to, md5_recieved_from = count_csync_events(_from, _to, md5_sending_to, md5_recieved_from)
chains_synced_md5 = get_csynced_chains(csync_domains[domain_domain]['md5_chains'], chains_synced_md5)
if len(csync_domains[domain_domain]['sha1_chains']) > 0:
sending_to, recieved_from = count_csync_events(_from, _to, sending_to, recieved_from)
sha1_sending_to, sha1_recieved_from = count_csync_events(_from, _to, sha1_sending_to, sha1_recieved_from)
chains_synced_sha1 = get_csynced_chains(csync_domains[domain_domain]['sha1_chains'], chains_synced_sha1)
# csync domain statistics
csync_domains = set(sending_to.keys()).union(set(recieved_from.keys())).\
union(set(b64_sending_to.keys())).union(set(b64_recieved_from.keys())).\
union(set(md5_sending_to.keys())).union(set(md5_recieved_from.keys())).\
union(set(sha1_sending_to.keys())).union(set(sha1_recieved_from.keys()))
# csync chain statistics
csync_chains = set(chains_synced_simple.keys()).union(set(chains_synced_b64.keys()))\
.union(set(chains_synced_md5.keys()))\
.union(set(chains_synced_sha1.keys()))
# csync encoded chain statistics
csync_encoded = set(b64_sending_to.keys()).union(set(b64_recieved_from.keys()))\
.union(set(md5_sending_to.keys())).union(set(md5_recieved_from.keys()))\
.union(set(sha1_sending_to.keys())).union(set(sha1_recieved_from.keys()))
# encoded cookie syncing stats can also be returned
return csync_domains, sending_to, recieved_from
csync_domains, sending_to, recieved_from = compute_csync_stats(current_csync)
###Output
_____no_output_____
###Markdown
Print top csync domains
###Code
def print_table(json_obj, count_limit = 20):
count = 0
t = PrettyTable(['Domains', 'Csync count'])
for key in json_obj:
count += 1
if count <= count_limit:
t.add_row([key, json_obj[key]['count']])
print(t)
def average_sharing(syncing_domains):
total = 0
for key in syncing_domains:
total += syncing_domains[key]['count']
print(total / len(syncing_domains))
def get_top_csyncs(sending_to, recieved_from):
sending_to_sorted = OrderedDict(sorted(sending_to.items(), key=lambda k: k[1]['count'], reverse=True))
recieved_from_sorted = OrderedDict(sorted(recieved_from.items(), key=lambda k: k[1]['count'], reverse=True))
print_table(sending_to_sorted)
average_sharing(sending_to)
print_table(recieved_from_sorted)
average_sharing(recieved_from)
get_top_csyncs(sending_to, recieved_from)
###Output
_____no_output_____ |
notebooks/graph_algos/attrib2vec/corpus 2020 audience_overlap lvl data 2.ipynb | ###Markdown
Load audience overlap edges for level 2
###Code
level = 2
audience_overlap_sites = load_level_data(os.path.join(_ALEXA_DATA_PATH, 'corpus_2020_audience_overlap_sites_scrapping_result.json'), level=level)
audience_overlap_sites_NODES = create_audience_overlap_nodes(audience_overlap_sites)
print(audience_overlap_sites_NODES[:5])
edge_df = pd.DataFrame(audience_overlap_sites_NODES, columns=['source', 'target'])
edge_df.head()
###Output
_____no_output_____
###Markdown
Find all unique nodes in edges
###Code
nodes_in_edges = list(set(edge_df.source.unique().tolist() + edge_df.target.unique().tolist()))
print('Number of unique nodes in edges:', len(nodes_in_edges), 'Sample:', nodes_in_edges[:5])
###Output
Number of unique nodes in edges: 26573 Sample: ['herasblog.com', '10bestalternatives.com', 'worldfootball.net', 'ksn.com', 'americanbar.org']
###Markdown
1. Load all node features
###Code
node_features_df = load_node_features()
node_features_df = node_features_df.set_index('site')
node_features_df.head()
###Output
_____no_output_____
###Markdown
Subset node_features
###Code
node_features_df = node_features_df.loc[nodes_in_edges]
node_features_df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Index: 26573 entries, herasblog.com to procuresearch.center
Data columns (total 5 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 alexa_rank 17677 non-null float64
1 daily_pageviews_per_visitor 17684 non-null float64
2 daily_time_on_site 11799 non-null float64
3 total_sites_linking_in 25446 non-null float64
4 bounce_rate 10665 non-null float64
dtypes: float64(5)
memory usage: 1.2+ MB
###Markdown
2. Fill all missing alexa_rank and total_sites_linking_in with 0
###Code
node_features_df.alexa_rank = node_features_df.alexa_rank.fillna(0)
node_features_df.total_sites_linking_in = node_features_df.total_sites_linking_in.fillna(0)
node_features_df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Index: 26573 entries, herasblog.com to procuresearch.center
Data columns (total 5 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 alexa_rank 26573 non-null float64
1 daily_pageviews_per_visitor 17684 non-null float64
2 daily_time_on_site 11799 non-null float64
3 total_sites_linking_in 26573 non-null float64
4 bounce_rate 10665 non-null float64
dtypes: float64(5)
memory usage: 1.2+ MB
###Markdown
3. Normalizing features
###Code
import math
node_features_df['normalized_alexa_rank'] = node_features_df['alexa_rank'].apply(lambda x: 1/x if x else 0)
node_features_df['normalized_total_sites_linked_in'] = node_features_df['total_sites_linking_in'].apply(lambda x: math.log2(x) if x else 0)
###Output
_____no_output_____
###Markdown
Create Graph
###Code
import stellargraph as sg
G = sg.StellarGraph(nodes=node_features_df.loc[nodes_in_edges, ['normalized_alexa_rank', 'normalized_total_sites_linked_in']], edges=edge_df)
print(G.info())
###Output
StellarGraph: Undirected multigraph
Nodes: 26573, Edges: 49372
Node types:
default: [26573]
Features: float32 vector, length 2
Edge types: default-default->default
Edge types:
default-default->default: [49372]
Weights: all 1 (default)
Features: none
###Markdown
Unsupervised Attrib2Vec
###Code
from stellargraph.mapper import Attri2VecLinkGenerator, Attri2VecNodeGenerator
from stellargraph.layer import Attri2Vec, link_classification
from stellargraph.data import UnsupervisedSampler
from tensorflow import keras
# 1. Specify the other optional parameter values: root nodes, the number of walks to take per node, the length of each walk, and random seed.
nodes = list(G.nodes())
number_of_walks = 1
length = 5
# 2. Create the UnsupervisedSampler instance with the relevant parameters passed to it.
unsupervised_samples = UnsupervisedSampler(G, nodes=nodes, length=length, number_of_walks=number_of_walks)
# 3. Create a node pair generator:
batch_size = 50
epochs = 4
num_samples = [10, 5]
generator = Attri2VecLinkGenerator(G, batch_size)
train_gen = generator.flow(unsupervised_samples)
layer_sizes = [128]
attri2vec = Attri2Vec(layer_sizes=layer_sizes, generator=generator, bias=False, normalize=None)
# Build the model and expose input and output sockets of attri2vec, for node pair inputs:
x_inp, x_out = attri2vec.in_out_tensors()
prediction = link_classification(output_dim=1, output_act="sigmoid", edge_embedding_method="ip")(x_out)
model = keras.Model(inputs=x_inp, outputs=prediction)
model.compile(
optimizer=keras.optimizers.Adam(lr=1e-3),
loss=keras.losses.binary_crossentropy,
metrics=[keras.metrics.binary_accuracy],
)
history = model.fit(train_gen, epochs=epochs, verbose=2, use_multiprocessing=False, workers=1, shuffle=True)
x_inp_src = x_inp[0]
x_out_src = x_out[0]
embedding_model = keras.Model(inputs=x_inp_src, outputs=x_out_src)
node_gen = Attri2VecNodeGenerator(G, batch_size).flow(node_features_df.index.tolist())
node_embeddings = embedding_model.predict(node_gen, workers=1, verbose=1)
embeddings_wv = dict(zip(node_features_df.index.tolist(), node_embeddings.tolist()))
print('Sample:', embeddings_wv['crooked.com'][:10], len(embeddings_wv['crooked.com']))
###Output
532/532 [==============================] - 1s 2ms/step
Sample: [1.4709429763115622e-07, 0.02666628360748291, 2.755252523911622e-07, 6.192561130546892e-08, 4.340579096151487e-08, 6.282406417312814e-08, 0.041838258504867554, 0.04803630709648132, 0.06210103631019592, 3.0991598123364383e-06] 128
###Markdown
Export embeddings as feature
###Code
export_model_as_feature(embeddings_wv, f'attrib2vec_audience_overlap_level_{level}_epochs_{epochs}')
run_experiment(features=f'attrib2vec_audience_overlap_level_{level}_epochs_{epochs}')
run_experiment(features=f'attrib2vec_audience_overlap_level_{level}_epochs_{epochs}', task='bias')
###Output
+------+---------+---------------------+---------------+--------------------+----------------------------------------------+
| task | dataset | classification_mode | type_training | normalize_features | features |
+------+---------+---------------------+---------------+--------------------+----------------------------------------------+
| bias | acl2020 | single classifier | combine | False | attrib2vec_audience_overlap_level_2_epochs_4 |
+------+---------+---------------------+---------------+--------------------+----------------------------------------------+
|
MAPS/other/SPCAM5_Other_Visualizations.ipynb | ###Markdown
Frozen moist static energy$FSME = \int_0^{P_s}c_p*T+g*z+L_v*q-L_f*q_{ice}$ Did not create output in SPCAM for this var - will maybe add to next run? Potential Temperature, $\theta$$\theta = T*(\frac{p_0}{p})^{\frac{R}{c_p}}$
###Code
def theta_gen(t_array, p_array):
theta_array = t_array
for i in range(len(p_array)):
theta_array[:,i] = t_array[:,i]*(1013.25/p_array[i])**(287.0/1004.0)
return theta_array
theta = theta_gen(T, P)
def plotting(datas, varname, title, levels):
plt.plot(datas, levels, linewidth = 4)
plt.ylabel('Pressure Level', fontsize = 20)
plt.xlabel(varname, fontsize = 20)
plt.gca().invert_yaxis()
plt.title('Snapshot of '+title+' location')
var = 'Potential Temperature (K)'
location = 'surface'
plotting(theta[0, :], var, location, P)
###Output
_____no_output_____
###Markdown
Equivelent Potential Temperature, $\theta_e$$\theta_e = \theta e^{\frac{L*q}{c_p*T}}$
###Code
def theta_e_gen(t_array, q_array, p_array):
theta_e_array = t_array
theta_array = theta_gen(t_array, p_array)
for i in range(len(theta_e_array)):
for j in range(len(theta_e_array[i])):
theta_e_array[i, j] = theta_array[i,j]*math.exp((2501000.0*q_array[i,j])/(1004.0*t_array[i,j]))
return theta_e_array
theta_e = theta_e_gen(T, Q, P)
var = 'Equivelent Potential Temperature (K)'
location = 'surface'
plotting(theta_e[0, :], var, location, P)
###Output
_____no_output_____
###Markdown
Integrated Sensible Heat $\frac{w}{m^2}$$SH = \int_0^{P_s} \frac{dp}{g}*c_p*T$ Not entirely sure if I am using Scipy's built in trapz function correctly, so for now, I will code a function for a numerical implementation of integration via trapziodal rule:$SH = \frac{cp}{g}\sum_{p=0}^{P_s}\frac{T_i+T_{i+1}}{2}*\delta p_i$
###Code
ps = test_ds.PS.values
levs = np.squeeze(test_ds.lev.values)
hyai = test_ds.hyai.values
hybi = test_ds.hybi.values
g = 9.81
cp = 1004.0
PS = 1e5
P0 = 1e5
P = P0*hyai+PS*hybi # Total pressure [Pa]
dp = P[1:]-P[:-1] # Differential pressure [Pa]
#convert from k/s to w/m^2
def vert_integral(values, diffs):
integrated = np.zeros(shape=len(values)-1)
integrated[:] = np.nan
integrate = 0
for i in range(len(values)):
for j in range(len(values[i])-1):
integrate += 0.5*(values[i,j]+values[i, j+1])*diffs[j]*1004.0/9.81
integrated[i] = integrate
integrate = 0
return integrated
###Output
_____no_output_____
###Markdown
Integrated Latent Heat $\frac{w}{m^2}$$LH = \int_0^{P_s} \frac{dp}{g}*L_v*q$ Mass Weighted Integral w$W = \int_0^{P_s}dpw$
###Code
W = np.squeeze(test_ds.CRM_W.values)
print(W.shape)
###Output
(96, 30, 128)
|
SqlMetrics/Evangelist_Perf_Processing.ipynb | ###Markdown
Data Cleaning
###Code
rawData = pd.read_csv('data/SqlMetric_prepared.csv')
rawData.head()
rawData.drop(['ActivityID', 'Event Name', 'criteria', 'Process Name'], axis=1, inplace=True)
rawData.isnull().sum()
rawData.dropna(inplace=True)
print(rawData.isnull().sum())
print('Dataset shape: {}'.format(rawData.shape))
rawData.head()
rawData['db_state'] = rawData['metrics'].apply(lambda x: x.split(' '))
rawData.drop(['metrics'], axis=1, inplace=True)
print('Dataset shape: {}'.format(rawData.shape))
row = rawData.iloc[0,5]
print('Metrics: {}'.format(row))
dic = {}
for st in row:
splitted = st.split(':')
key = splitted[0]
dic[key] = []
print('Parsing sql metrics')
for index, row in rawData.iterrows():
metrics_row = row['db_state']
for i in range(0,4):
st = metrics_row[i]
splitted = st.split(':')
key = 'Table_{}'.format(i+1)
val = np.NaN
if len(splitted)>1:
val = int(splitted[1])
dic[key].append(val)
print('Adding new metrics collumns')
for key in dic.keys():
rawData[key] = dic[key]
print('Drop db_state column')
rawData.drop(['db_state'], axis=1, inplace=True)
print('Parsing sql metrics finnised')
print('Dataset shape: {}'.format(rawData.shape))
print(rawData.isnull().sum())
rawData.dropna(inplace=True)
print('Dataset shape: {}'.format(rawData.shape))
rawData = rawData.applymap(lambda x: x.replace(',','') if type(x) is str or type(x) is object else x)
rawData['is_cold_start'] = rawData['usn'].apply(lambda x: True if int(x) == 0 else False)
rawData.drop(['usn', 'timestamp'], axis=1, inplace=True)
rawData.rename(columns={'Time MSec': 'timestamp', 'DURATION_MSEC': 'duration'}, inplace=True)
rawData['duration'] = rawData['duration'].apply(lambda x: float(x))
rawData.loc[:, 'second'] = rawData.loc[:, 'timestamp'].apply(lambda x: round(x/1000))
print(rawData.isnull().sum())
rawData.head()
rawData.shape
###Output
_____no_output_____
###Markdown
Studing data Decalre some helpers
###Code
main_color = '#2a6e52'
accent_color = '#6e4b2a'
plt.rcParams["axes.grid"] = False
plt.rcParams["axes.facecolor"] = '#fff'
plt.rcParams["axes.edgecolor"] = '#222'
plt.rcParams["lines.linewidth"] = 2
plt.rcParams["lines.color"] = main_color
def get_top_calling_procedures(df_procedures, limit):
top = df_procedures.groupby('viewName')['viewName'].count().sort_values(ascending=False)
return df_procedures.loc[df_procedures['viewName'].isin(top[top>= 1000].keys())]
def calculate_std(data):
procedure_names = df['viewName'].unique()
for p in procedure_names:
proc = df.loc[df['viewName']== p]
median = proc['duaration'].median()
def draw_dependency_plot(df, tables):
procedure_names = df['viewName'].unique()
proc_num = range(len(procedure_names))
table_count = len(tables)
proc_count = len(proc_num)
rowsCount = table_count + proc_count
fig, ax = plt.subplots(rowsCount,1,figsize=(20,rowsCount*5))
current_row = 0
for p in procedure_names:
proc = df.loc[df['viewName']== p]
x = proc['second'].unique()
y = proc[['duration','second']].groupby('second').mean()
ax[current_row].set_title(p)
ax[current_row].plot(x,y, color = main_color)
ax[current_row].set_xlabel('Time (s)')
ax[current_row].set_ylabel('Duration (ms)')
ax[current_row].grid(True)
current_row+= 1
for table in tables:
df_table = df.loc[:,[table, 'second']]
x2 = df_table['second'].unique()
y2 = df_table.groupby('second').max()
ax[current_row].set_title(table)
ax[current_row].plot(x2,y2, color= accent_color)
ax[current_row].set_xlabel('Time (s)')
ax[current_row].set_ylabel('Table Count')
current_row+=1
plt.show()
from tslearn.clustering import KShape
from tslearn.datasets import CachedDatasets
from tslearn.preprocessing import TimeSeriesScalerMeanVariance
#https://towardsdatascience.com/how-to-apply-k-means-clustering-to-time-series-data-28d04a8f7da3
def clusterize(data):
start_time = time.time()
seed = 0
X_train = data
X_train = TimeSeriesScalerMeanVariance().fit_transform(X_train)
sz = X_train.shape[1]
ks = KShape(n_clusters=3, verbose=True, random_state=seed)
y_pred = ks.fit_predict(X_train)
plt.figure()
for yi in range(3):
plt.subplot(3, 1, 1 + yi)
for xx in X_train[y_pred == yi]:
plt.plot(xx.ravel(), "k-", alpha=.2)
plt.plot(ks.cluster_centers_[yi].ravel(), "r-")
plt.xlim(0, sz)
plt.ylim(-4, 4)
plt.title("Cluster %d" % (yi + 1))
plt.tight_layout()
plt.show()
print('Clustering completed in {:,.2f} secs'.format(time.time()-start_time))
###Output
_____no_output_____
###Markdown
Look at data stats
###Code
rawData.describe()
###Output
_____no_output_____
###Markdown
Get only changing columns
###Code
data = rawData[['viewName','timestamp','second', 'duration', 'is_cold_start','Table_2', 'Table_3']]
normal_execution = data.loc[data['is_cold_start']==False, :]
cold_execution = data.loc[data['is_cold_start']==True, :]
cold_percentage =len(cold_execution)*100/len(data)
normal_percentage = 100 - cold_percentage
print ("Total executions {} Normal execution {}({:,.2f}% from total, Cold execution {}({:,.2f}% form total))".format(len(data), len(normal_execution),normal_percentage,len(cold_execution),cold_percentage))
top_p = get_top_calling_procedures(cold_execution, 1000)
tables = ['Table_2', 'Table_3']
draw_dependency_plot(top_p, tables)
top_p = get_top_calling_procedures(cold_execution, 1000)
top_frequency = top_p.loc[:,['viewName', 'timestamp', 'second']].groupby(['viewName','second']).count()
print(top_frequency.describe())
top_p = get_top_calling_procedures(cold_execution, 1000)
parameters = top_p[['second', 'duration']].to_numpy()
clusterize(parameters)
###Output
Resumed because of empty cluster
Resumed because of empty cluster
Resumed because of empty cluster
Resumed because of empty cluster
Resumed because of empty cluster
Resumed because of empty cluster
Resumed because of empty cluster
Resumed because of empty cluster
Resumed because of empty cluster
Resumed because of empty cluster
|
RadioClassification_keras.ipynb | ###Markdown
Downloading Dataset and required Libs (Colab Only)
###Code
!wget https://github.com/the-redlord/Space-Radio-Signal-Classification_keras/raw/master/dataset.rar
!ls
!unrar x -r ./dataset.rar
!pip install livelossplot
###Output
Requirement already satisfied: livelossplot in /usr/local/lib/python3.6/dist-packages (0.5.1)
Requirement already satisfied: bokeh; python_version >= "3.6" in /usr/local/lib/python3.6/dist-packages (from livelossplot) (1.4.0)
Requirement already satisfied: matplotlib; python_version >= "3.6" in /usr/local/lib/python3.6/dist-packages (from livelossplot) (3.2.2)
Requirement already satisfied: ipython in /usr/local/lib/python3.6/dist-packages (from livelossplot) (5.5.0)
Requirement already satisfied: tornado>=4.3 in /usr/local/lib/python3.6/dist-packages (from bokeh; python_version >= "3.6"->livelossplot) (4.5.3)
Requirement already satisfied: six>=1.5.2 in /usr/local/lib/python3.6/dist-packages (from bokeh; python_version >= "3.6"->livelossplot) (1.12.0)
Requirement already satisfied: PyYAML>=3.10 in /usr/local/lib/python3.6/dist-packages (from bokeh; python_version >= "3.6"->livelossplot) (3.13)
Requirement already satisfied: numpy>=1.7.1 in /usr/local/lib/python3.6/dist-packages (from bokeh; python_version >= "3.6"->livelossplot) (1.18.5)
Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.6/dist-packages (from bokeh; python_version >= "3.6"->livelossplot) (2.8.1)
Requirement already satisfied: Jinja2>=2.7 in /usr/local/lib/python3.6/dist-packages (from bokeh; python_version >= "3.6"->livelossplot) (2.11.2)
Requirement already satisfied: packaging>=16.8 in /usr/local/lib/python3.6/dist-packages (from bokeh; python_version >= "3.6"->livelossplot) (20.4)
Requirement already satisfied: pillow>=4.0 in /usr/local/lib/python3.6/dist-packages (from bokeh; python_version >= "3.6"->livelossplot) (7.0.0)
Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib; python_version >= "3.6"->livelossplot) (0.10.0)
Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib; python_version >= "3.6"->livelossplot) (1.2.0)
Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib; python_version >= "3.6"->livelossplot) (2.4.7)
Requirement already satisfied: setuptools>=18.5 in /usr/local/lib/python3.6/dist-packages (from ipython->livelossplot) (47.3.1)
Requirement already satisfied: pickleshare in /usr/local/lib/python3.6/dist-packages (from ipython->livelossplot) (0.7.5)
Requirement already satisfied: decorator in /usr/local/lib/python3.6/dist-packages (from ipython->livelossplot) (4.4.2)
Requirement already satisfied: simplegeneric>0.8 in /usr/local/lib/python3.6/dist-packages (from ipython->livelossplot) (0.8.1)
Requirement already satisfied: prompt-toolkit<2.0.0,>=1.0.4 in /usr/local/lib/python3.6/dist-packages (from ipython->livelossplot) (1.0.18)
Requirement already satisfied: traitlets>=4.2 in /usr/local/lib/python3.6/dist-packages (from ipython->livelossplot) (4.3.3)
Requirement already satisfied: pygments in /usr/local/lib/python3.6/dist-packages (from ipython->livelossplot) (2.1.3)
Requirement already satisfied: pexpect; sys_platform != "win32" in /usr/local/lib/python3.6/dist-packages (from ipython->livelossplot) (4.8.0)
Requirement already satisfied: MarkupSafe>=0.23 in /usr/local/lib/python3.6/dist-packages (from Jinja2>=2.7->bokeh; python_version >= "3.6"->livelossplot) (1.1.1)
Requirement already satisfied: wcwidth in /usr/local/lib/python3.6/dist-packages (from prompt-toolkit<2.0.0,>=1.0.4->ipython->livelossplot) (0.2.5)
Requirement already satisfied: ipython-genutils in /usr/local/lib/python3.6/dist-packages (from traitlets>=4.2->ipython->livelossplot) (0.2.0)
Requirement already satisfied: ptyprocess>=0.5 in /usr/local/lib/python3.6/dist-packages (from pexpect; sys_platform != "win32"->ipython->livelossplot) (0.6.0)
###Markdown
Classify Radio Signals from Outer Space with Keras [Allen Telescope Array](https://flickr.com/photos/93452909@N00/5656086917) by [brewbooks](https://www.flickr.com/people/93452909@N00) is licensed under [CC BY 2.0](https://creativecommons.org/licenses/by/2.0/) Task 1: Import Libraries
###Code
from livelossplot.inputs.tf_keras import PlotLossesCallback
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from sklearn.metrics import confusion_matrix
from sklearn import metrics
import numpy as np
np.random.seed(42)
import warnings;warnings.simplefilter('ignore')
%matplotlib inline
print('Tensorflow version:', tf.__version__)
tf.config.list_physical_devices('GPU')
###Output
Tensorflow version: 2.2.0
###Markdown
Task 2: Load and Preprocess SETI Data
###Code
train_images = pd.read_csv('dataset/train/images.csv',header=None)
train_labels = pd.read_csv('dataset/train/labels.csv',header=None)
val_images = pd.read_csv('dataset/validation/images.csv',header=None)
val_labels = pd.read_csv('dataset/validation/labels.csv',header=None)
train_images.head(3)
train_labels.head(3)
print("Training set shape: ", train_images.shape, train_labels.shape)
print('Validation set shape:',val_images.shape, val_labels.shape)
# reshape the data to spectograms (images)
x_train = train_images.values.reshape(3200,64,128,1)
x_val = val_images.values.reshape(800,64,128,1)
y_train = train_labels.values
y_val = val_labels.values
###Output
_____no_output_____
###Markdown
Task 3: Plot 2D Spectrograms
###Code
plt.figure(0, figsize=(12,12))
for i in range(1,4):
plt.subplot(1,3,i)
img = np.squeeze(x_train[np.random.randint(0, x_train.shape[0])])
plt.xticks([])
plt.yticks([])
plt.imshow(img,cmap='gray')
plt.imshow(np.squeeze(x_train[5]))
###Output
_____no_output_____
###Markdown
Task 4: Create Training and Validation Data Generators
###Code
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# image augmentation
datagen_train = ImageDataGenerator(horizontal_flip=True)
datagen_train.fit(x_train)
datagen_val = ImageDataGenerator(horizontal_flip=True)
datagen_val.fit(x_val)
###Output
_____no_output_____
###Markdown
Task 5: Creating the CNN Model
###Code
from tensorflow.keras.layers import Dense, Input, Dropout,Flatten, Conv2D
from tensorflow.keras.layers import BatchNormalization, Activation, MaxPooling2D
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.optimizers import Adam, SGD
from tensorflow.keras.callbacks import ModelCheckpoint
# Initialising the CNN
model = Sequential()
# 1st Convolution
model.add(Input(shape=(64,128,1)))
model.add(Conv2D(32,(5,5),padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
# 2nd Convolution layer
model.add(Conv2D(64,(5,5),padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
# Flattening
model.add(Flatten())
# Fully connected layer
model.add(Dense(1024))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.4))
model.add(Dense(4,activation='softmax'))
###Output
_____no_output_____
###Markdown
Task 6: Learning Rate Scheduling and Compile the Model
###Code
initial_learning_rate = 0.005
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate = initial_learning_rate,
decay_steps = 5,
decay_rate = 0.96,
staircase=True
)
opt = Adam(learning_rate=lr_schedule)
model.compile(optimizer=opt,loss='categorical_crossentropy',metrics=['accuracy'])
model.summary()
###Output
_____no_output_____
###Markdown
Task 7: Training the Model
###Code
checkpoint = ModelCheckpoint('model_weights.h5',monitor='val_loss',
save_weights_only=True, mode='min',verbose=0)
callbacks = [PlotLossesCallback(), checkpoint]
batch_size = 32
history = model.fit(
datagen_train.flow(x_train,y_train, batch_size=batch_size,shuffle=True),
steps_per_epoch = len(x_train) // batch_size,
validation_data = datagen_val.flow(x_val,y_val,batch_size=batch_size,shuffle=True),
validation_steps = len(x_val) // batch_size,
epochs = 12,
callbacks=callbacks
)
###Output
_____no_output_____
###Markdown
Task 8: Model Evaluation
###Code
model.evaluate(x_val,y_val)
from sklearn.metrics import confusion_matrix
from sklearn import metrics
import seaborn as sns
y_true = np.argmax(y_val,1)
y_pred = np.argmax(model.predict(x_val),1)
print(metrics.classification_report(y_true,y_pred))
print('Classification accuracy: %0.6f' %metrics.accuracy_score(y_true,y_pred))
labels = ["squiggle", "narrowband", "noise", "narrowbanddrd"]
###Output
_____no_output_____ |
Employee_Attrition_notebook_.ipynb | ###Markdown
Data PreProcessing-
###Code
#importing Required Libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime ,timedelta
#load the data
from google.colab import files #upload the data on the colab notebook
uploded = files.upload()
#store the data into dataframe
df= pd.read_csv('train_MpHjUjU.csv' ) # reading the data
#printing the first 10 rows
df.head(5)
df.shape #checking no. of column & rows
df.dtypes #checking datatypes of the column
#get count of the empty value of each column
df.isna().sum()
#check for any missing value
df.isnull().values.any()
#Checking some statistics
df.describe()
#Deletig for dublicate values
df = df.drop_duplicates(subset=['Emp_ID' ,'Age', 'Gender','City','Education_Level', 'Salary','Dateofjoining'],keep = 'last')
df.head()
###Output
_____no_output_____
###Markdown
Feature Engineering * Adding two new feature 1. Attrition - From last working date clumn2. Year = Number of years employee worked in the company
###Code
#adding one new column attrition form last ''last working date 'column
df['Attrition'] = df['LastWorkingDate']
df['Attrition'] = df['Attrition'].fillna(0) #Replacing NAN values to 0
y = pd.DataFrame(df['Attrition'], columns = ['attrition'])
y['attrition']= df['Attrition']
y['attrition'] = y['attrition'].str.isnumeric()
def datetime_to_int(y):
return int(y['attrition'].strftime("%Y-%m-%d")) #replace date to false value
y =y.replace(to_replace=False,value = 1) #reaplce false value to the in 1 for all of those who have left the organisation
y= y.fillna(0) ##making new feature attrition from lastworkingday column
y.head()
#Adding new feature attrition from lastworkingday column
df['Attrition']=y['attrition']
df['Attrition'].value_counts()
#the number of employees that stayed and left the company
sns.countplot(df['Attrition'])
df.shape
###Output
_____no_output_____
###Markdown
* We have unique 3786 data points and 14 column,Out of them 2170 data point belonging to Class NO(0) and 1616 data point belonging to class 'Yes(1)'
###Code
plt.subplots(figsize=(12,4))
sns.countplot(x='Age',hue='Attrition',data=df, palette='colorblind')
###Output
_____no_output_____
###Markdown
* From 33 onwards age group, most of the employees did not leave job and majority in attrition is contributed by age group of 31 & 32 . **Replacing NaN values to prediction date date so we can calculate number of years employee work**
###Code
#filling Na/NaN values
df['LastWorkingDate'] = df['LastWorkingDate'].fillna('2018-01-01') #filling nan values to prediction date date so we can calculate number of years employee work
df.head(5)
###Output
_____no_output_____
###Markdown
**Introducing new feature using first working day and last working day variables**
###Code
#Introducing new feature using first working day and last working day variables-
df["Dateofjoining"] = pd.to_datetime(df["Dateofjoining"]) #handling timeseries data
df["LastWorkingDate"] = pd.to_datetime(df["LastWorkingDate"])
df4 = pd.DataFrame( columns = ['day','year']) #creating 2 new features number of days employee work and number of years employee work
df4['day'] = df['LastWorkingDate'] - df['Dateofjoining']
df4['year'] = df4["day"] / timedelta(days=365)
print(df4)
###Output
day year
2 78 days 0.213699
4 56 days 0.153425
9 141 days 0.386301
12 58 days 0.158904
17 154 days 0.421918
... ... ...
19082 885 days 2.424658
19090 419 days 1.147945
19096 335 days 0.917808
19097 207 days 0.567123
19103 207 days 0.567123
[3786 rows x 2 columns]
###Markdown
Dropping unwanted column from Dataset -
###Code
df=df.drop(labels=['MMM-YY','Dateofjoining','LastWorkingDate','Gender','City','Education_Level'],axis=1) # dropping the table because they are useless in our prediction
#adding new features to our data
df['year'] = df4['year']
df.head()
df.to_csv('test matching data.csv') # creating csv before dropping emp_id column so we can merge test csv with train csv for prediction
df.isna().sum() #checking the na values
###Output
_____no_output_____
###Markdown
**Dropping id table-**
###Code
#dropping id table
df = df.drop (labels='Emp_ID',axis = 1)
df['attrition']= df['Attrition']
df= df.drop(labels='Attrition',axis = 1) #shifting attrition table at last
df.head()
###Output
_____no_output_____
###Markdown
Visualizing our Variables-
###Code
#get the correlation
df.corr()
#visualize the correlation
plt.subplots(figsize=(12,5))
sns.heatmap(df.corr(), annot=True,fmt ='.0%')
df.to_csv('processedtrain.csv') # processed trainnig data csv
df.shape #checking shape of data
###Output
_____no_output_____
###Markdown
Dividing the Data into Two Parts "TRAINING" and "TESTING" -
###Code
#split the data
x = df.iloc[:,0:7].values # splitting all the rows from column 0 to 6 in x
y = df.iloc[: ,7].values #splitting attrtion column in y
print('x{}'.format(x))
print('================================================')
print('y:{}'.format(y))
###Output
x[[2.80000000e+01 5.73870000e+04 1.00000000e+00 ... 0.00000000e+00
2.00000000e+00 2.13698630e-01]
[3.10000000e+01 6.70160000e+04 2.00000000e+00 ... 0.00000000e+00
1.00000000e+00 1.53424658e-01]
[4.30000000e+01 6.56030000e+04 2.00000000e+00 ... 0.00000000e+00
1.00000000e+00 3.86301370e-01]
...
[2.80000000e+01 6.94980000e+04 1.00000000e+00 ... 0.00000000e+00
1.00000000e+00 9.17808219e-01]
[2.90000000e+01 7.02540000e+04 2.00000000e+00 ... 0.00000000e+00
1.00000000e+00 5.67123288e-01]
[3.00000000e+01 7.02540000e+04 2.00000000e+00 ... 4.11480000e+05
2.00000000e+00 5.67123288e-01]]
================================================
y:[1. 0. 1. ... 1. 0. 0.]
###Markdown
Building The model from "TRAINING DATA SET"-
###Code
#split the data into 25% testing and 75% training
from sklearn.model_selection import train_test_split #importing train_test_split
x_train, x_test, y_train, y_test = train_test_split(x,y,test_size = 0.20,train_size= 0.80 , random_state =0 ) #splitting data , 80-20=train-test split
#using KNN Classifier as our model
from sklearn.neighbors import KNeighborsClassifier #importing knnClassifier
knn =KNeighborsClassifier(n_neighbors=7,weights='uniform',algorithm='auto') # n=7
knn.fit(x_train, y_train) #fitting the model
y_pred = knn.predict(x_test) # predicting cross validation test data
###Output
_____no_output_____
###Markdown
Evaluating KNNclassifier using macro f1_score metrics -
###Code
from sklearn.metrics import f1_score #importing f1_score from sklearn
f1 = f1_score(y_test,y_pred) #getting the f1_score
print(f1)
###Output
0.7378640776699029
###Markdown
4. Test data: * Creating test file from given 'test_hXY9mYw.csv'
###Code
#Creating test file from given 'test_hXY9mYw.csv'
df1=pd.read_csv('/content/test matching data.csv')
df2=pd.read_csv('/content/test_hXY9mYw.csv')
test_data = pd.merge(df2,df1) #merginf df1 and df2 using EMP_ID column
#dropping duplicate values
test_data = test_data.drop(labels=['Attrition','Unnamed: 0'],axis=1)
test_data=test_data.drop_duplicates(subset= 'Emp_ID',keep='last')
test_data.to_csv('test_data.csv')
test_data.shape
###Output
_____no_output_____
###Markdown
Dropping the Emp_ID column from test data
###Code
#dropping the Emp_ID column -
test_data= test_data.drop(labels='Emp_ID',axis=1)
test_data.head()
###Output
_____no_output_____
###Markdown
**Predicting the given test-data point** :-
###Code
Final_result= knn.predict(test_data) #Predicting the given test-data point where how many employee will leave the company in given jan-2018 quarter
#Creating submission file of the final result-
df6=pd.read_csv('/content/test_data.csv')
df6=df6.drop(labels='Unnamed: 0', axis=1)
test= pd.DataFrame(columns=['Emp_ID','Target'])
test['Emp_ID']=df6['Emp_ID']
test['Target']=Final_result
test.head(20)
test['Target'].value_counts() #checking predicted values_count
###Output
_____no_output_____
###Markdown
Conclusion :- * From above result we can assume that out of 741 employees ,we can expect that 160 employees will leave the company in the upcoming two quarters (01 Jan 2018 - 01 July 2018) and 581 employees will be remain at their designation. Suggestion:-* As data scientist i would suggest From above Analysis and Prediction that ,company should offer more salary to more experience 1. company should offer more salary to more experience person.2. Company should give better designation to old employees. Submission File :-
###Code
test.to_csv('Final_result.csv') #submission file
###Output
_____no_output_____ |
Natural_PQC_sensing.ipynb | ###Markdown
Natural parameterized quantum circuit for multi-parameter sensing "Natural parameterized quantum circuit" by T. Haug, M.S. KimThe Natural parameterized quantum circuit is a parameterized quantum circuit which has euclidean quantum geometry. That means that the quantum Fisher information metric is the identity for a particular parameter set, which we call the reference parameter. This NPQC is very useful for various applications.- Training variational quantum algorithms- Multi-parameter quantum sensing- Preparation of superposition statesHere, we study multi-parameter sensing using the NPQC. The goal is determine the unknown parameters of the NPQC by measuring the quantum state. We can sense many parameters at the same time by sampling in the computational basis.The implementation is based on qutip@author: Tobias Haug, github txhaugImperial College London
###Code
import qutip as qt
from functools import partial
import operator
from functools import reduce
import numpy as np
import scipy
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Set parameters for NPQC here
###Code
n_qubits=6 #number qubits
depth=6 #number of layers, is the number of layers of parameterized single qubit rotations
type_circuit=1##0: natural parameterized quantum circuit (NPQC), 1: natural parameterized quantum circuit with y rotations only for sensing
initial_angles=1 ##0: random angles 1: reference parameters \theta_r that has QFIM =I
distance_parameters_estimation=0.4 # norm of parameters to be estimated
random_seed=1#seed of random generator
n_samples=10**7 ##number of measurements for sensing
def prod(factors):
return reduce(operator.mul, factors, 1)
def flatten(l):
return [item for sublist in l for item in sublist]
#tensors operators together
def genFockOp(op,position,size,levels=2,opdim=0):
opList=[qt.qeye(levels) for x in range(size-opdim)]
opList[position]=op
return qt.tensor(opList)
#construct from parameter 1D list a 2D array with [depth,n_qubits], ignore unused rotations where paulis2d=0
def construct_2d_parameters(angles,paulis2d,extraangles=0):
depth,n_qubits=np.shape(paulis2d)
angles2d=np.zeros([depth,n_qubits])
counter=0
for i in range(depth):
for j in range(n_qubits):
if(paulis2d[i,j]>0): #only take parameters where paulis is greater 0, indicating they are variable parameters
angles2d[i,j]=angles[counter]
counter+=1
if(extraangles==0):
return angles2d
else:
return angles2d,angles[counter:]
#take parameters as a 2D array with [depth,n_qubits] to do 1D list, ignore unused rotations where paulis2d=0
def construct_1d_parameters(angles2d,paulis2d):
depth,n_qubits=np.shape(paulis2d)
angles1d=[]
for i in range(depth):
for j in range(n_qubits):
if(paulis2d[i,j]>0): #only take parameters where paulis is greater 0, indicating they are variable parameters
angles1d.append(angles2d[i,j])
return np.array(angles1d)
if(n_qubits%2==1):
raise NameError("Only even number of qubits allowed")
#random generator used
rng = np.random.default_rng(random_seed)
#define angles for circuit
ini_angles=np.zeros([depth,n_qubits])
if(initial_angles==0):
ini_angles=rng.random([depth,n_qubits])*2*np.pi
elif(initial_angles==1): #choose angles as \theta_r as defined in paper
ini_angles[1:depth:2,:]=0
ini_angles[0:depth:2,:]=np.pi/2
#note that not all angles are actually used, the ones where ini_pauli=0 are ignored
#define rotations for circuit in each layer, 0: identity, 1: X, 2:Y 3:Z
ini_pauli=np.zeros([depth,n_qubits],dtype=int)
##set initial layer of pauli rotations
if(type_circuit==0):#NPQC
#set first and second layer, rest comes later
ini_pauli[0,:]=2 #y rotation
if(depth>1):
ini_pauli[1,:]=3 #z rotation
elif(type_circuit==1): #NPQC with y rotations only for sensing
#set first and second layer, rest comes later
ini_pauli[0,0:n_qubits:2]=2 #y rotation
ini_pauli[0,1:n_qubits:2]=-22 #fix y pi/2 rotation on odd qubit index
##define entangling layers and add more pauli rotations
if(type_circuit==0 or type_circuit==1):
#construct natural parameterized circuit
entangling_gate_index_list=[[] for i in range(depth)] ##stores where entangling gates are placed
orderList=[]
for i in range(n_qubits//2):
if(i%2==0):
orderList.append(i//2)
else:
orderList.append((n_qubits-i)//2)
if(n_qubits>1):
shiftList=[orderList[0]]
else:
shiftList=[]
for i in range(1,n_qubits//2):
shiftList.append(orderList[i])
shiftList+=shiftList[:-1]
#this list gives which entangling gates are applied in which layer
for j in range(min(len(shiftList),int(np.ceil(depth/2))-1)):
entangling_gate_index_list[1+2*j]=[[2*i,(2*i+1+2*shiftList[j])%n_qubits,3] for i in range(n_qubits//2)]
#this is the 2 qubit entangling operation, it is a pi/2 y rotation on first qubit with CPHASE gate
U_entangling=qt.qip.operations.csign(2,0,1)*qt.tensor(qt.qip.operations.ry(np.pi/2),qt.qeye(2))
for i in range(len(entangling_gate_index_list)-1):
if(len(entangling_gate_index_list[i])>0):
for j in range(len(entangling_gate_index_list[i])):
ini_pauli[i+1,entangling_gate_index_list[i][j][0]]=2
if(i+2<depth and type_circuit==0):##add z rotations, but not for sensing NPQC
ini_pauli[i+2,entangling_gate_index_list[i][j][0]]=3
#operators for circuit
levels=2#
opZ=[genFockOp(qt.sigmaz(),i,n_qubits,levels) for i in range(n_qubits)]
opX=[genFockOp(qt.sigmax(),i,n_qubits,levels) for i in range(n_qubits)]
opY=[genFockOp(qt.sigmay(),i,n_qubits,levels) for i in range(n_qubits)]
opId=genFockOp(qt.qeye(levels),0,n_qubits)
opZero=opId*0
zero_state=qt.tensor([qt.basis(levels,0) for i in range(n_qubits)])
#construct unitaries for entangling layer
all_entangling_layers=[]
for ind in range(len(entangling_gate_index_list)):
if(type_circuit==0 or type_circuit==1):
entangling_gate_index=entangling_gate_index_list[ind]
if(len(entangling_gate_index)==0):
entangling_layer=opId
else:
entangling_layer=prod([qt.qip.operations.gate_expand_2toN(U_entangling,n_qubits,j,k) for j,k,n in entangling_gate_index[::-1]])
all_entangling_layers.append(entangling_layer)
#calculate number of parameters
n_parameters=len(construct_1d_parameters(ini_angles,ini_pauli))
##check which paulis at what depth and qubit is identitity or not
parameter_where=np.zeros([n_parameters,2],dtype=int)
counter=0
for i in range(depth):
for j in range(n_qubits):
if(ini_pauli[i,j]>0): #count only paulis with entry greater zero, indicating its a parameter
parameter_where[counter]=[i,j]
counter+=1
#save single qubit rotations unitary with fixed ini_angles. Use them later for the adjoint circuit needed for sensing
save_initial_rot_op=[]
for j in range(depth):
rot_op=[]
for k in range(n_qubits):
angle=ini_angles[j][k]
type_pauli=ini_pauli[j][k]
if(type_pauli==1):
rot_op.append(qt.qip.operations.rx(angle))
elif(type_pauli==2):
rot_op.append(qt.qip.operations.ry(angle))
elif(type_pauli==3):
rot_op.append(qt.qip.operations.rz(angle))
elif(type_pauli==0):
rot_op.append(qt.qeye(2))
elif(type_pauli==-22): #fixed rotation around y axis
rot_op.append(qt.qip.operations.ry(np.pi/2))
save_initial_rot_op.append(qt.tensor(rot_op))
##H=opZ[0]*opZ[1] #local Hamiltonian to calculate energy and gradient from
print("Number of parameters of PQC",n_parameters)
##calc_mode #0: calc all gradients 1: calc frame potential only 2: calc both, 3: only get gradient
##can apply adjoint unitary with fixed angles "add_adjoint_unitary" for sensing
def do_calc(input_angles,input_paulis,get_gradients=True,add_adjoint_unitary=False):
initial_state_save=qt.tensor([qt.basis(levels,0) for i in range(n_qubits)])
#save here quantum state of gradient for qfi
grad_state_list=[]
#list of values of gradient
gradient_list=np.zeros(n_parameters)
save_rot_op=[]
#save single-qubit rotations here so we can reuse them
for j in range(depth):
rot_op=[]
for k in range(n_qubits):
angle=input_angles[j][k]
type_pauli=input_paulis[j][k]
if(type_pauli==1):
rot_op.append(qt.qip.operations.rx(angle))
elif(type_pauli==2):
rot_op.append(qt.qip.operations.ry(angle))
elif(type_pauli==3):
rot_op.append(qt.qip.operations.rz(angle))
elif(type_pauli==0):
rot_op.append(qt.qeye(2))
elif(type_pauli==-22):
rot_op.append(qt.qip.operations.ry(np.pi/2))
save_rot_op.append(qt.tensor(rot_op))
#p goes from -1 to n_parameters-1. -1 is to calculate quantum state, rest for gradient
if(get_gradients==True):
#calculate gradients by doing n_parameters+1 calcuations
n_p=n_parameters
else:
#without gradient, need only one calculation
n_p=0
for p in range(-1,n_p):
initial_state=qt.Qobj(initial_state_save)
for j in range(depth):
apply_rot_op=save_rot_op[j]
#for p>=0, we are calculating gradients. Here, we need to add derivative of repsective parameter
if(p!=-1 and j==parameter_where[p][0]):
which_qubit=parameter_where[p][1]
type_pauli=input_paulis[j][which_qubit]
if(type_pauli==1):
apply_rot_op=apply_rot_op*(-1j*opX[which_qubit]/2)
elif(type_pauli==2):
apply_rot_op=apply_rot_op*(-1j*opY[which_qubit]/2)
elif(type_pauli==3):
apply_rot_op=apply_rot_op*(-1j*opZ[which_qubit]/2)
#apply single qubit rotations
initial_state=apply_rot_op*initial_state
#apply entangling layer
initial_state=all_entangling_layers[j]*initial_state
#after constructing the circuit, apply inverse with parameters fixed to ini_angles
if(add_adjoint_unitary==True):#apply inverse of circuit for sensing
for j in np.arange(depth)[::-1]:
initial_state=all_entangling_layers[j].dag()*initial_state
initial_state=save_initial_rot_op[j].dag()*initial_state
if(p==-1):
#calculate loss
circuit_state=qt.Qobj(initial_state)#state generated by circuit
if(loss_hamiltonian==True):
#loss is hamiltonian
loss=qt.expect(H,circuit_state)
else:
#loss is infidelity with target state H_state
loss=1-np.abs(circuit_state.overlap(H_state))**2
else:
#calculate gradient
grad_state_list.append(qt.Qobj(initial_state))#state with gradient applied for p-th parameter
if(loss_hamiltonian==True):
gradient_list[p]=2*np.real(circuit_state.overlap(H*initial_state))
else:
gradient_list[p]=2*np.real(circuit_state.overlap(initial_state)-circuit_state.overlap(H_state)*H_state.overlap(initial_state))
return circuit_state,grad_state_list,loss,gradient_list
#construct parameters of state to be estimated
loss_hamiltonian=False #loss is inifidelity 1-F
#we shift parameterized quantum circuit from initial parameters by a fixed distance.
#we know approximatly what distance corresponds to what fidelity
#get random normalized parameter vector
random_vector_opt_normed=(2*rng.random(np.shape(ini_pauli))-1)*(ini_pauli>0)
random_vector_opt_normed=construct_1d_parameters(random_vector_opt_normed,ini_pauli)
random_vector_opt_normed=random_vector_opt_normed/np.sqrt(np.sum(np.abs(random_vector_opt_normed)**2))
random_vector_opt_normed=construct_2d_parameters(random_vector_opt_normed,ini_pauli)
#shift parameters by the following distance,. We use resulting state for estimation
factor_rand_vector=distance_parameters_estimation
#construct parameter of state to be learned
target_angles=ini_angles+random_vector_opt_normed*factor_rand_vector
H_state=zero_state #set so do_calc runs properly
#quantum fisher information metric
#calculated as \text{Re}(\braket{\partial_i \psi}{\partial_j \psi}-\braket{\partial_i \psi}{\psi}\braket{\psi}{\partial_j \psi})
##get gradients for quantum state
circuit_state,grad_state_list,energy,gradient_list=do_calc(ini_angles,ini_pauli,get_gradients=True)
#first, calculate elements \braket{\psi}{\partial_j \psi})
single_qfi_elements=np.zeros(n_parameters,dtype=np.complex128)
for p in range(n_parameters):
#print(circuit_state.overlap(grad_state_list[p]))
single_qfi_elements[p]=circuit_state.overlap(grad_state_list[p])
#calculcate the qfi matrix
qfi_matrix=np.zeros([n_parameters,n_parameters])
for p in range(n_parameters):
for q in range(p,n_parameters):
qfi_matrix[p,q]=np.real(grad_state_list[p].overlap(grad_state_list[q])-np.conjugate(single_qfi_elements[p])*single_qfi_elements[q])
#use fact that qfi matrix is real and hermitian
for p in range(n_parameters):
for q in range(p+1,n_parameters):
qfi_matrix[q,p]=qfi_matrix[p,q]
##plot the quantum Fisher information metric (QFIM)
#should be a diagonal with zero off-diagonal entries for initial_angles=1
plt.imshow(qfi_matrix)
if(type_circuit==1): #NPQC with y rotations only for sensing
hilbertspace=2**n_qubits
##get reference state and gradients to determine which parameter belongs to which computational state
circuit_state_reuse,grad_state_list_reuse,_,gradient_list=do_calc(ini_angles,ini_pauli,get_gradients=True,add_adjoint_unitary=True)
##first, figure out which parameter changes which computational basis state
parameter_which_state=np.zeros(n_parameters,dtype=int) #tells us which state belongs to which parameter
state_which_parameter=np.ones(hilbertspace,dtype=int)*-1
for i in range(n_parameters):
grad_abs=np.abs(grad_state_list_reuse[i].data.toarray()[:,0])**2
index=(np.arange(hilbertspace)[grad_abs>10**-14])
if(len(index)!=1):
raise NameError("More than one direction!")
else:
parameter_which_state[i]=index[0]
state_which_parameter[index[0]]=i
#check if a computational basis states belongs to more than one parameter
if(len(np.unique(parameter_which_state))!=len(parameter_which_state)):
raise NameError("Double occupations of computational states for sensing!")
#get difference between target angles and reference angles. We now want to estimate this from measurements!
exact_sensing_parameters=construct_1d_parameters(target_angles-ini_angles,ini_pauli)
norm_sensing_parameters=np.sqrt(np.sum(np.abs(exact_sensing_parameters)**2))
print("Norm of parameters to be sensed",norm_sensing_parameters)
##get state that we use for sensing and want to know its parameters
target_state,_,energy,_=do_calc(target_angles,ini_pauli,get_gradients=False,add_adjoint_unitary=True)
#sample from target state, then identify parameters
probs=np.abs(target_state.data.toarray()[:,0])**2
print("Probability zero state",probs[0])
#get exact probability term assoicate with each parameter
prob_parameters=np.zeros(n_parameters)
for i in range(n_parameters):
prob_parameters[i]=probs[parameter_which_state[i]]
#now sample probabilities to simulate measurements with finite number of measurements
##get sampled probabilities for each sensing parameter
sampled_probs=np.zeros(n_parameters)
sample_index = np.random.choice(hilbertspace,n_samples,p=probs)
for k in range(n_samples):
index_parameter=state_which_parameter[sample_index[k]]
if(index_parameter>=0):
sampled_probs[index_parameter]+=1
sampled_probs/=n_samples
##parameters we estimated by sampling state
sampled_estimation_parameters=2*np.sqrt(sampled_probs)
MSE_bound=n_parameters/n_samples
##parameters as estimated by our protocol for infinite number of shots
infinite_shots_estimation_parameters=2*np.sqrt(prob_parameters)
##error for infinite sampling
MSE_infinite=np.mean(np.abs(infinite_shots_estimation_parameters-np.abs(exact_sensing_parameters))**2)
rel_RMSE_error_infinite=np.sqrt(MSE_infinite)/np.mean(np.abs(exact_sensing_parameters))
MSE_sampled=np.mean(np.abs(sampled_estimation_parameters-np.abs(exact_sensing_parameters))**2)
rel_RMSE_error_sampled=np.sqrt(MSE_sampled)/np.mean(np.abs(exact_sensing_parameters))
#MSE_sampled=np.mean(np.abs(sampled_estimation_parameters-np.abs(infinite_shots_estimation_parameters))**2)
print("Sensing",n_parameters,"parameters with",n_samples)
print("Mean-square error of infinite samples",MSE_infinite)
print("MSE of infinite samples relative to exact norm of exact parameters",rel_RMSE_error_infinite)
print("Mean-square error of finite samples",MSE_sampled)
print("MSE sampled with finite shots relative to norm of exact parameters",rel_RMSE_error_sampled)
###Output
Norm of parameters to be sensed 0.39999999999999986
Probability zero state 0.960759461930595
Sensing 9 parameters with 10000000
Mean-square error of infinite samples 6.414871113159221e-06
MSE of infinite samples relative to exact norm of exact parameters 0.022273670207939626
Mean-square error of finite samples 6.599418880539516e-06
MSE sampled with finite shots relative to norm of exact parameters 0.022591791173830828
|
Phase II/Surrogate Model using VarrImp Plot.ipynb | ###Markdown
Analysis IIIn this thesis that I am preparing is another method for analyzing the importance of hyperparameter. In this method, my new aproach is considering the below assumption: Assumtion* Why not the Data Base of `Hyperparameter` that will be turned in by the BD team be itself considered as a Individual Dataset - This approach is called as building a `Surrogate Model`* The dataset will consist of MetaData, Algorithm, Leaderboard(collected from H2O), Hyperparameter for the respective Algorithms All arranged as a columns with it's corresponding respective values in it. Now, this I am going o use it to build a model around it.Below are the columns, which are dummy as far now, as I have built it individually according to my assumptions. The DB team is yet to turn in it's DataBase, I am waiting for the good model amongst what they turn in to be considered for my thesis - to build a model.The columns that I have taken till now are as below with it's description:* runid - Run id for a dataset and its iteration* dataset - The dataset and its link from where it's being fetched* problem - if it is a classification/regression* runtime - runtime for which H2O was run on this dataset* columns - number of columns in the dataset* rows - number rows in the dataset* tags - What genre it belongs to* algo - algorithm applied on this dataset by H2O* model_id - the model id generated* ntree - value for n_estimator* max_depth - value for the hyperaparameter* learn_rate - value for the hyperaparameter* mean_residual_deviance - value for the hyperaparameter* rmse - value for the metric* mse - value for the metric* mae - value for the metric* rmsle - value for the metric
###Code
import h2o
from h2o.automl import H2OAutoML
import pandas as pd
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedKFold
import matplotlib.pyplot as plt
h2o.init(min_mem_size_GB=2)
df = h2o.import_file('./hyperparamter_db_test.csv')
###Output
Parse progress: |█████████████████████████████████████████████████████████| 100%
###Markdown
This dataset is created on own, and the data are not true. This is just t test the thesis and the hypothesis to check if this will give us a good analysis results
###Code
df.head()
###Output
_____no_output_____
###Markdown
I am running for a simple AutoML runtime of 100 seconds. The results will be better if I run for more time.
###Code
aml = H2OAutoML(max_runtime_secs=100)
X=df.columns
###Output
_____no_output_____
###Markdown
Below are the predictors we have considered
###Code
X = ['runid',
'dataset',
'problem',
'runtime',
'columns',
'rows',
'tags',
'model_id',
'ntree',
'max_depth',
'learn_rate',
'mean_residual_deviance',
'rmse',
'mse',
'mae',
'rmsle']
y='algo'
aml.train(x=X,y=y,training_frame=df)
###Output
AutoML progress: |████████████████████████████████████████████████████████| 100%
###Markdown
On running H2O AutoML for 100 seconds, for this simple Surrogate Dataset, we are getting 68 models generated by H2O
###Code
aml_leaderboard_df=aml.leaderboard.as_data_frame()
aml_leaderboard_df
model_set=aml_leaderboard_df['model_id']
mod_best=h2o.get_model(model_set[0])
mod_best
###Output
Model Details
=============
H2OGradientBoostingEstimator : Gradient Boosting Machine
Model Key: GBM_grid_0_AutoML_20190423_134333_model_32
ModelMetricsMultinomial: gbm
** Reported on train data. **
MSE: 0.005379815592386542
RMSE: 0.07334722620785698
LogLoss: 0.07493269418958665
Mean Per-Class Error: 0.0
Confusion Matrix: Row labels: Actual class; Column labels: Predicted class
###Markdown
Now, what we perform here is important and is going to give us a definite result for our analysis. Something called Variable importance is brought into the picture to analysis our thesis. `Variable importance is an indication of which predictors are most useful for predicting the response variable. Various measures of variable importance have been proposed in the data mining literature. The most important variables might not be the ones near the top of the tree.`Below is something that H2O turns in when one of the models except `StackedEnsemble` is called
###Code
mod_best.varimp_plot()
###Output
_____no_output_____ |
chapter02-quickstart/chapter2.ipynb | ###Markdown
2.2 PyTorch第一步PyTorch的简洁设计使得它入门很简单,在深入介绍PyTorch之前,本节将先介绍一些PyTorch的基础知识,使得读者能够对PyTorch有一个大致的了解,并能够用PyTorch搭建一个简单的神经网络。部分内容读者可能暂时不太理解,可先不予以深究,本书的第3章和第4章将会对此进行深入讲解。本节内容参考了PyTorch官方教程[^1]并做了相应的增删修改,使得内容更贴合新版本的PyTorch接口,同时也更适合新手快速入门。另外本书需要读者先掌握基础的Numpy使用,其他相关知识推荐读者参考CS231n的教程[^2]。[^1]: http://pytorch.org/tutorials/beginner/deep_learning_60min_blitz.html[^2]: http://cs231n.github.io/python-numpy-tutorial/ TensorTensor是PyTorch中重要的数据结构,可认为是一个高维数组。它可以是一个数(标量)、一维数组(向量)、二维数组(矩阵)以及更高维的数组。Tensor和Numpy的ndarrays类似,但Tensor可以使用GPU进行加速。Tensor的使用和Numpy及Matlab的接口十分相似,下面通过几个例子来看看Tensor的基本使用。
###Code
from __future__ import print_function
import torch as t
t.__version__
# 构建 5x3 矩阵,只是分配了空间,未初始化
x = t.Tensor(5, 3)
x = t.Tensor([[1,2],[3,4]])
x
# 使用[0,1]均匀分布随机初始化二维数组
x = t.rand(5, 3)
x
print(x.size()) # 查看x的形状
x.size()[1], x.size(1) # 查看列的个数, 两种写法等价
###Output
torch.Size([5, 3])
###Markdown
`torch.Size` 是tuple对象的子类,因此它支持tuple的所有操作,如x.size()[0]
###Code
y = t.rand(5, 3)
# 加法的第一种写法
x + y
# 加法的第二种写法
t.add(x, y)
# 加法的第三种写法:指定加法结果的输出目标为result
result = t.Tensor(5, 3) # 预先分配空间
t.add(x, y, out=result) # 输入到result
result
print('最初y')
print(y)
print('第一种加法,y的结果')
y.add(x) # 普通加法,不改变y的内容
print(y)
print('第二种加法,y的结果')
y.add_(x) # inplace 加法,y变了
print(y)
###Output
最初y
tensor([[0.4018, 0.0661, 0.1942],
[0.9261, 0.1729, 0.3974],
[0.3494, 0.5539, 0.0621],
[0.5915, 0.7290, 0.3228],
[0.9858, 0.6441, 0.4047]])
第一种加法,y的结果
tensor([[0.4018, 0.0661, 0.1942],
[0.9261, 0.1729, 0.3974],
[0.3494, 0.5539, 0.0621],
[0.5915, 0.7290, 0.3228],
[0.9858, 0.6441, 0.4047]])
第二种加法,y的结果
tensor([[1.3989, 0.5617, 0.9267],
[1.4302, 0.4502, 0.5107],
[0.5054, 1.4337, 0.4496],
[1.4012, 1.5774, 0.9716],
[1.5367, 1.2256, 0.5118]])
###Markdown
注意,函数名后面带下划线**`_`** 的函数会修改Tensor本身。例如,`x.add_(y)`和`x.t_()`会改变 `x`,但`x.add(y)`和`x.t()`返回一个新的Tensor, 而`x`不变。
###Code
# Tensor的选取操作与Numpy类似
x[:, 1]
###Output
_____no_output_____
###Markdown
Tensor还支持很多操作,包括数学运算、线性代数、选择、切片等等,其接口设计与Numpy极为相似。更详细的使用方法,会在第三章系统讲解。Tensor和Numpy的数组之间的互操作非常容易且快速。对于Tensor不支持的操作,可以先转为Numpy数组处理,之后再转回Tensor。c
###Code
a = t.ones(5) # 新建一个全1的Tensor
a
b = a.numpy() # Tensor -> Numpy
b
import numpy as np
a = np.ones(5)
b = t.from_numpy(a) # Numpy->Tensor
print(a)
print(b)
###Output
[1. 1. 1. 1. 1.]
tensor([1., 1., 1., 1., 1.], dtype=torch.float64)
###Markdown
Tensor和numpy对象共享内存,所以他们之间的转换很快,而且几乎不会消耗什么资源。但这也意味着,如果其中一个变了,另外一个也会随之改变。
###Code
b.add_(1) # 以`_`结尾的函数会修改自身
print(a)
print(b) # Tensor和Numpy共享内存
###Output
[2. 2. 2. 2. 2.]
tensor([2., 2., 2., 2., 2.], dtype=torch.float64)
###Markdown
如果你想获取某一个元素的值,可以使用`scalar.item`。 直接`tensor[idx]`得到的还是一个tensor: 一个0-dim 的tensor,一般称为scalar.
###Code
scalar = b[0]
scalar
scalar.size() #0-dim
scalar.item() # 使用scalar.item()能从中取出python对象的数值
tensor = t.tensor([2]) # 注意和scalar的区别
tensor,scalar
tensor.size(),scalar.size()
# 只有一个元素的tensor也可以调用`tensor.item()`
tensor.item(), scalar.item()
###Output
_____no_output_____
###Markdown
此外在pytorch中还有一个和`np.array` 很类似的接口: `torch.tensor`, 二者的使用十分类似。
###Code
tensor = t.tensor([3,4]) # 新建一个包含 3,4 两个元素的tensor
scalar = t.tensor(3)
scalar
old_tensor = tensor
new_tensor = old_tensor.clone()
new_tensor[0] = 1111
old_tensor, new_tensor
###Output
_____no_output_____
###Markdown
需要注意的是,`t.tensor()`或者`tensor.clone()`总是会进行数据拷贝,新tensor和原来的数据不再共享内存。所以如果你想共享内存的话,建议使用`torch.from_numpy()`或者`tensor.detach()`来新建一个tensor, 二者共享内存。
###Code
new_tensor = old_tensor.detach()
new_tensor[0] = 1111
old_tensor, new_tensor
###Output
_____no_output_____
###Markdown
Tensor可通过`.cuda` 方法转为GPU的Tensor,从而享受GPU带来的加速运算。
###Code
# 在不支持CUDA的机器下,下一步还是在CPU上运行
device = t.device("cuda:0" if t.cuda.is_available() else "cpu")
x = x.to(device)
y = y.to(x.device)
z = x+y
###Output
_____no_output_____
###Markdown
此外,还可以使用`tensor.cuda()` 的方式将tensor拷贝到gpu上,但是这种方式不太推荐。 此处可能发现GPU运算的速度并未提升太多,这是因为x和y太小且运算也较为简单,而且将数据从内存转移到显存还需要花费额外的开销。GPU的优势需在大规模数据和复杂运算下才能体现出来。 autograd: 自动微分深度学习的算法本质上是通过反向传播求导数,而PyTorch的**`autograd`**模块则实现了此功能。在Tensor上的所有操作,autograd都能为它们自动提供微分,避免了手动计算导数的复杂过程。 ~~`autograd.Variable`是Autograd中的核心类,它简单封装了Tensor,并支持几乎所有Tensor有的操作。Tensor在被封装为Variable之后,可以调用它的`.backward`实现反向传播,自动计算所有梯度~~ ~~Variable的数据结构如图2-6所示。~~ *从0.4起, Variable 正式合并入Tensor, Variable 本来实现的自动微分功能,Tensor就能支持。读者还是可以使用Variable(tensor), 但是这个操作其实什么都没做。建议读者以后直接使用tensor*. 要想使得Tensor使用autograd功能,只需要设置`tensor.requries_grad=True`. ~~Variable主要包含三个属性。~~~~- `data`:保存Variable所包含的Tensor~~~~- `grad`:保存`data`对应的梯度,`grad`也是个Variable,而不是Tensor,它和`data`的形状一样。~~~~- `grad_fn`:指向一个`Function`对象,这个`Function`用来反向传播计算输入的梯度,具体细节会在下一章讲解。~~
###Code
# 为tensor设置 requires_grad 标识,代表着需要求导数
# pytorch 会自动调用autograd 记录操作
x = t.ones(2, 2, requires_grad=True)
# 上一步等价于
# x = t.ones(2,2)
# x.requires_grad = True
x
y = x.sum()
y
y.grad_fn
y.backward() # 反向传播,计算梯度
# y = x.sum() = (x[0][0] + x[0][1] + x[1][0] + x[1][1])
# 每个值的梯度都为1
x.grad
###Output
_____no_output_____
###Markdown
注意:`grad`在反向传播过程中是累加的(accumulated),这意味着每一次运行反向传播,梯度都会累加之前的梯度,所以反向传播之前需把梯度清零。
###Code
y.backward()
x.grad
y.backward()
x.grad
# 以下划线结束的函数是inplace操作,会修改自身的值,就像add_
x.grad.data.zero_()
y.backward()
x.grad
###Output
_____no_output_____
###Markdown
神经网络Autograd实现了反向传播功能,但是直接用来写深度学习的代码在很多情况下还是稍显复杂,torch.nn是专门为神经网络设计的模块化接口。nn构建于 Autograd之上,可用来定义和运行神经网络。nn.Module是nn中最重要的类,可把它看成是一个网络的封装,包含网络各层定义以及forward方法,调用forward(input)方法,可返回前向传播的结果。下面就以最早的卷积神经网络:LeNet为例,来看看如何用`nn.Module`实现。LeNet的网络结构如图2-7所示。这是一个基础的前向传播(feed-forward)网络: 接收输入,经过层层传递运算,得到输出。 定义网络定义网络时,需要继承`nn.Module`,并实现它的forward方法,把网络中具有可学习参数的层放在构造函数`__init__`中。如果某一层(如ReLU)不具有可学习的参数,则既可以放在构造函数中,也可以不放,但建议不放在其中,而在forward中使用`nn.functional`代替。
###Code
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
# nn.Module子类的函数必须在构造函数中执行父类的构造函数
# 下式等价于nn.Module.__init__(self)
super(Net, self).__init__()
# 卷积层 '1'表示输入图片为单通道, '6'表示输出通道数,'5'表示卷积核为5*5
self.conv1 = nn.Conv2d(1, 6, 5)
# 卷积层
self.conv2 = nn.Conv2d(6, 16, 5)
# 仿射层/全连接层,y = Wx + b
self.fc1 = nn.Linear(16*5*5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
# 卷积 -> 激活 -> 池化
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
# reshape,‘-1’表示自适应
x = x.view(x.size()[0], -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
net = Net()
print(net)
###Output
Net(
(conv1): Conv2d(1, 6, kernel_size=(5, 5), stride=(1, 1))
(conv2): Conv2d(6, 16, kernel_size=(5, 5), stride=(1, 1))
(fc1): Linear(in_features=400, out_features=120, bias=True)
(fc2): Linear(in_features=120, out_features=84, bias=True)
(fc3): Linear(in_features=84, out_features=10, bias=True)
)
###Markdown
只要在nn.Module的子类中定义了forward函数,backward函数就会自动被实现(利用`autograd`)。在`forward` 函数中可使用任何tensor支持的函数,还可以使用if、for循环、print、log等Python语法,写法和标准的Python写法一致。网络的可学习参数通过`net.parameters()`返回,`net.named_parameters`可同时返回可学习的参数及名称。
###Code
params = list(net.parameters())
print(len(params))
for name,parameters in net.named_parameters():
print(name,':',parameters.size())
###Output
conv1.weight : torch.Size([6, 1, 5, 5])
conv1.bias : torch.Size([6])
conv2.weight : torch.Size([16, 6, 5, 5])
conv2.bias : torch.Size([16])
fc1.weight : torch.Size([120, 400])
fc1.bias : torch.Size([120])
fc2.weight : torch.Size([84, 120])
fc2.bias : torch.Size([84])
fc3.weight : torch.Size([10, 84])
fc3.bias : torch.Size([10])
###Markdown
forward函数的输入和输出都是Tensor。
###Code
input = t.randn(1, 1, 32, 32)
out = net(input)
out.size()
net.zero_grad() # 所有参数的梯度清零
out.backward(t.ones(1,10)) # 反向传播
###Output
_____no_output_____
###Markdown
需要注意的是,torch.nn只支持mini-batches,不支持一次只输入一个样本,即一次必须是一个batch。但如果只想输入一个样本,则用 `input.unsqueeze(0)`将batch_size设为1。例如 `nn.Conv2d` 输入必须是4维的,形如$nSamples \times nChannels \times Height \times Width$。可将nSample设为1,即$1 \times nChannels \times Height \times Width$。 损失函数nn实现了神经网络中大多数的损失函数,例如nn.MSELoss用来计算均方误差,nn.CrossEntropyLoss用来计算交叉熵损失。
###Code
output = net(input)
target = t.arange(0,10).view(1,10).float()
criterion = nn.MSELoss()
loss = criterion(output, target)
loss # loss是个scalar
###Output
_____no_output_____
###Markdown
如果对loss进行反向传播溯源(使用`gradfn`属性),可看到它的计算图如下:```input -> conv2d -> relu -> maxpool2d -> conv2d -> relu -> maxpool2d -> view -> linear -> relu -> linear -> relu -> linear -> MSELoss -> loss```当调用`loss.backward()`时,该图会动态生成并自动微分,也即会自动计算图中参数(Parameter)的导数。
###Code
# 运行.backward,观察调用之前和调用之后的grad
net.zero_grad() # 把net中所有可学习参数的梯度清零
print('反向传播之前 conv1.bias的梯度')
print(net.conv1.bias.grad)
loss.backward()
print('反向传播之后 conv1.bias的梯度')
print(net.conv1.bias.grad)
###Output
反向传播之前 conv1.bias的梯度
tensor([0., 0., 0., 0., 0., 0.])
反向传播之后 conv1.bias的梯度
tensor([ 0.1366, 0.0885, -0.0036, 0.1410, 0.0144, 0.0562])
###Markdown
优化器 在反向传播计算完所有参数的梯度后,还需要使用优化方法来更新网络的权重和参数,例如随机梯度下降法(SGD)的更新策略如下:```weight = weight - learning_rate * gradient```手动实现如下:```pythonlearning_rate = 0.01for f in net.parameters(): f.data.sub_(f.grad.data * learning_rate) inplace 减法````torch.optim`中实现了深度学习中绝大多数的优化方法,例如RMSProp、Adam、SGD等,更便于使用,因此大多数时候并不需要手动写上述代码。
###Code
import torch.optim as optim
#新建一个优化器,指定要调整的参数和学习率
optimizer = optim.SGD(net.parameters(), lr = 0.01)
# 在训练过程中
# 先梯度清零(与net.zero_grad()效果一样)
optimizer.zero_grad()
# 计算损失
output = net(input)
loss = criterion(output, target)
#反向传播
loss.backward()
#更新参数
optimizer.step()
###Output
_____no_output_____
###Markdown
数据加载与预处理在深度学习中数据加载及预处理是非常复杂繁琐的,但PyTorch提供了一些可极大简化和加快数据处理流程的工具。同时,对于常用的数据集,PyTorch也提供了封装好的接口供用户快速调用,这些数据集主要保存在torchvison中。`torchvision`实现了常用的图像数据加载功能,例如Imagenet、CIFAR10、MNIST等,以及常用的数据转换操作,这极大地方便了数据加载,并且代码具有可重用性。 小试牛刀:CIFAR-10分类下面我们来尝试实现对CIFAR-10数据集的分类,步骤如下: 1. 使用torchvision加载并预处理CIFAR-10数据集2. 定义网络3. 定义损失函数和优化器4. 训练网络并更新网络参数5. 测试网络 CIFAR-10数据加载及预处理CIFAR-10[^3]是一个常用的彩色图片数据集,它有10个类别: 'airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'。每张图片都是$3\times32\times32$,也即3-通道彩色图片,分辨率为$32\times32$。[^3]: http://www.cs.toronto.edu/~kriz/cifar.html
###Code
import torchvision as tv
import torchvision.transforms as transforms
from torchvision.transforms import ToPILImage
show = ToPILImage() # 可以把Tensor转成Image,方便可视化
# 第一次运行程序torchvision会自动下载CIFAR-10数据集,
# 大约100M,需花费一定的时间,
# 如果已经下载有CIFAR-10,可通过root参数指定
# 定义对数据的预处理
transform = transforms.Compose([
transforms.ToTensor(), # 转为Tensor
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), # 归一化
])
# 训练集
trainset = tv.datasets.CIFAR10(
root='/home/cy/tmp/data/',
train=True,
download=True,
transform=transform)
trainloader = t.utils.data.DataLoader(
trainset,
batch_size=4,
shuffle=True,
num_workers=2)
# 测试集
testset = tv.datasets.CIFAR10(
'/home/cy/tmp/data/',
train=False,
download=True,
transform=transform)
testloader = t.utils.data.DataLoader(
testset,
batch_size=4,
shuffle=False,
num_workers=2)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
###Output
Downloading https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz to /home/cy/tmp/data/cifar-10-python.tar.gz
Files already downloaded and verified
###Markdown
Dataset对象是一个数据集,可以按下标访问,返回形如(data, label)的数据。
###Code
(data, label) = trainset[100]
print(classes[label])
# (data + 1) / 2是为了还原被归一化的数据
show((data + 1) / 2).resize((100, 100))
###Output
ship
###Markdown
Dataloader是一个可迭代的对象,它将dataset返回的每一条数据拼接成一个batch,并提供多线程加速优化和数据打乱等操作。当程序对dataset的所有数据遍历完一遍之后,相应的对Dataloader也完成了一次迭代。
###Code
dataiter = iter(trainloader)
images, labels = dataiter.next() # 返回4张图片及标签
print(' '.join('%11s'%classes[labels[j]] for j in range(4)))
show(tv.utils.make_grid((images+1)/2)).resize((400,100))
###Output
bird truck truck car
###Markdown
定义网络拷贝上面的LeNet网络,修改self.conv1第一个参数为3通道,因CIFAR-10是3通道彩图。
###Code
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16*5*5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
x = x.view(x.size()[0], -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
net = Net()
print(net)
###Output
Net(
(conv1): Conv2d(3, 6, kernel_size=(5, 5), stride=(1, 1))
(conv2): Conv2d(6, 16, kernel_size=(5, 5), stride=(1, 1))
(fc1): Linear(in_features=400, out_features=120, bias=True)
(fc2): Linear(in_features=120, out_features=84, bias=True)
(fc3): Linear(in_features=84, out_features=10, bias=True)
)
###Markdown
定义损失函数和优化器(loss和optimizer)
###Code
from torch import optim
criterion = nn.CrossEntropyLoss() # 交叉熵损失函数
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
###Output
_____no_output_____
###Markdown
训练网络所有网络的训练流程都是类似的,不断地执行如下流程:- 输入数据- 前向传播+反向传播- 更新参数
###Code
t.set_num_threads(8)
for epoch in range(2):
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# 输入数据
inputs, labels = data
# 梯度清零
optimizer.zero_grad()
# forward + backward
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
# 更新参数
optimizer.step()
# 打印log信息
# loss 是一个scalar,需要使用loss.item()来获取数值,不能使用loss[0]
running_loss += loss.item()
if i % 2000 == 1999: # 每2000个batch打印一下训练状态
print('[%d, %5d] loss: %.3f' \
% (epoch+1, i+1, running_loss / 2000))
running_loss = 0.0
print('Finished Training')
###Output
[1, 2000] loss: 2.157
[1, 4000] loss: 1.815
[1, 6000] loss: 1.676
[1, 8000] loss: 1.599
[1, 10000] loss: 1.557
[1, 12000] loss: 1.466
[2, 2000] loss: 1.410
[2, 4000] loss: 1.381
[2, 6000] loss: 1.331
[2, 8000] loss: 1.312
[2, 10000] loss: 1.281
[2, 12000] loss: 1.249
Finished Training
###Markdown
此处仅训练了2个epoch(遍历完一遍数据集称为一个epoch),来看看网络有没有效果。将测试图片输入到网络中,计算它的label,然后与实际的label进行比较。
###Code
dataiter = iter(testloader)
images, labels = dataiter.next() # 一个batch返回4张图片
print('实际的label: ', ' '.join(\
'%08s'%classes[labels[j]] for j in range(4)))
show(tv.utils.make_grid(images / 2 - 0.5)).resize((400,100))
###Output
实际的label: cat ship ship plane
###Markdown
接着计算网络预测的label:
###Code
# 计算图片在每个类别上的分数
outputs = net(images)
# 得分最高的那个类
_, predicted = t.max(outputs.data, 1)
print('预测结果: ', ' '.join('%5s'\
% classes[predicted[j]] for j in range(4)))
###Output
预测结果: dog ship plane plane
###Markdown
已经可以看出效果,准确率50%,但这只是一部分的图片,再来看看在整个测试集上的效果。
###Code
correct = 0 # 预测正确的图片数
total = 0 # 总共的图片数
# 由于测试的时候不需要求导,可以暂时关闭autograd,提高速度,节约内存
with t.no_grad():
for data in testloader:
images, labels = data
outputs = net(images)
_, predicted = t.max(outputs, 1)
total += labels.size(0)
correct += (predicted == labels).sum()
print('10000张测试集中的准确率为: %d %%' % (100 * correct / total))
###Output
10000张测试集中的准确率为: 53 %
###Markdown
训练的准确率远比随机猜测(准确率10%)好,证明网络确实学到了东西。 在GPU训练就像之前把Tensor从CPU转到GPU一样,模型也可以类似地从CPU转到GPU。
###Code
device = t.device("cuda:0" if t.cuda.is_available() else "cpu")
net.to(device)
images = images.to(device)
labels = labels.to(device)
output = net(images)
loss= criterion(output,labels)
loss
###Output
_____no_output_____ |
src_optimization/05_openmp_stencil_01/e_plot.ipynb | ###Markdown
Cache Plots
###Code
import matplotlib.pyplot as plt
from matplotlib import rcParams
import pandas as pd
import seaborn as sns
PHYSICAL_CORES=64
def plot(p_data, p_yId, p_xId, p_hueId, p_styleId, p_logScale=False, p_smt_marker=False, p_export_filename=None, p_xLabel=None, p_yLabel=None):
rcParams['figure.figsize'] = 12,8
rcParams['font.size'] = 12
rcParams['svg.fonttype'] = 'none'
plot = sns.lineplot(x=p_xId,
y=p_yId,
hue=p_hueId,
style=p_styleId,
data=p_data)
if p_logScale == True:
plot.set_yscale('log')
plot.set_xscale('log')
if p_xLabel != None:
plot.set(xlabel=p_xLabel)
else:
plot.set(xlabel=p_xId)
if p_yLabel != None:
plot.set(ylabel=p_yLabel)
else:
plot.set(ylabel=p_yId)
plt.grid(color='gainsboro')
plt.grid(True,which='minor', linestyle='--', linewidth=0.5, color='gainsboro')
if(p_smt_marker == True):
plt.axvline(PHYSICAL_CORES, linestyle='--', color='red', label='using SMT')
plt.legend()
if(p_export_filename != None):
plt.savefig(p_export_filename)
plt.show()
###Output
_____no_output_____
###Markdown
Gauss3 Strong Scaling
###Code
import pandas as pd
data_frame = pd.read_csv('./runtime.csv')
data_frame = data_frame[data_frame.region_id == 'apply']
#
# NOTE: calc absolute efficiency
#
ref_runtime = data_frame[data_frame.bench_id == '\Verb{nobind}'][data_frame.threads == 1]['runtime'].values[0]
data_frame = data_frame.assign(efficiency_abs=lambda p_entry: ref_runtime/(p_entry.runtime * p_entry.threads))
plot(p_data=data_frame,
p_yId='runtime',
p_xId='threads',
p_hueId='bench_id',
p_styleId=None,
p_logScale=True,
p_smt_marker=True,
p_export_filename='runtime.svg',
p_xLabel="Threads",
p_yLabel="Runtime [s]")
plot(p_data=data_frame,
p_yId='efficiency_abs',
p_xId='threads',
p_hueId='bench_id',
p_styleId=None,
p_logScale=True,
p_smt_marker=True,
p_export_filename='efficiency.svg',
p_xLabel="Threads",
p_yLabel="Absolute Efficiency")
###Output
/var/folders/zt/h71khkbd7ll9krscx1zncwlc0000gn/T/ipykernel_39266/1546138061.py:9: UserWarning: Boolean Series key will be reindexed to match DataFrame index.
ref_runtime = data_frame[data_frame.bench_id == '\Verb{nobind}'][data_frame.threads == 1]['runtime'].values[0]
|
PUBG/pubg-survivor-kit.ipynb | ###Markdown
Why this kernel?Why should you read through this kernel? The goal is to implement the full chain (skipping over EDA) from data access to preparation of submission and to provide functional example of LightGBM advanced usage:- the data will be read and **memory footprint will be reduced**;- **missing data** will be checked;- _feature engineering is not implemented yet_;- a baseline model will be trained: - Gradient boosting model as implemented in **LightGBM** is used; - **Mean absolute error (MAE) is used as the loss function** in the training (consistently with the final evaluation metric). **FAIR loss** is also tried and was found to lead similar results - Training is performed with **early stopping based on MAE metric**. - **Learning rate in the training is reduced (decays) from iteration to iteration** to improve convergence (one starts with high and finishes with low learning rate) - The training is implemented in a cross validation (CV) loop and **out-of-fold (OOF) predictions are stored** for future use in stacking. - **Test predictions** are obtained as an **average over predictions from models trained on k-1 fold subsets**.- Predictions are **clipped to `[0,1]` range**See another my kernel showing how to significantly improve the score by using relative ranking of teams within games: https://www.kaggle.com/mlisovyi/relativerank-of-predictions Side note: score of 0.0635 can be achieved with only 50k entries from the train set
###Code
# The number of entries to read in. Use it to have fast turn-around. The values are separate for train and test sets
max_events_trn=None
max_events_tst=None
# Number on CV folds
n_cv=3
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
%matplotlib inline
import warnings
warnings.simplefilter(action='ignore', category=Warning)
from sklearn.metrics import mean_squared_error, mean_absolute_error
import os
print(os.listdir("../input"))
###Output
_____no_output_____
###Markdown
Define a function to reduce memory foorprint
###Code
def reduce_mem_usage(df):
""" iterate through all the columns of a dataframe and modify the data type
to reduce memory usage.
"""
start_mem = df.memory_usage().sum() / 1024**2
print('Memory usage of dataframe is {:.2f} MB'.format(start_mem))
for col in df.columns:
col_type = df[col].dtype
if col_type != object and col_type.name != 'category' and 'datetime' not in col_type.name:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
elif 'datetime' not in col_type.name:
df[col] = df[col].astype('category')
end_mem = df.memory_usage().sum() / 1024**2
print('Memory usage after optimization is: {:.2f} MB'.format(end_mem))
print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))
return df
###Output
_____no_output_____
###Markdown
Read in the data
###Code
df_trn = pd.read_csv('../input/train.csv', nrows=max_events_trn)
df_trn = reduce_mem_usage(df_trn)
df_tst = pd.read_csv('../input/test.csv', nrows=max_events_tst)
df_tst = reduce_mem_usage(df_tst)
###Output
_____no_output_____
###Markdown
How do the data look like?
###Code
df_trn.head()
df_trn.info(memory_usage='deep', verbose=False)
df_tst.info(memory_usage='deep', verbose=False)
###Output
_____no_output_____
###Markdown
- The training dataset has 4.3M entries, which is not small and aloows for advanced models like GBM and NN to dominate.- The test dataset is only 1.9M entries- There are 25 features (+ the target in the train dataset) Are there missing data?
###Code
df_trn.isnull().sum().sum()
###Output
_____no_output_____
###Markdown
Good news: **There are no entries with `np.nan`**, so at the first glance we do not need to do anything fancy about missing data. There might be some default values pre-filled into missing entries- this would have to be discovered. Feature engineering to come here... [tba] Prepare the data
###Code
y = df_trn['winPlacePerc']
df_trn.drop('winPlacePerc', axis=1, inplace=True)
###Output
_____no_output_____
###Markdown
We will **NOT** use `Id, groupId, matchId`. The first one is a unique identifier and can be useful only in the case of data leakage. The other two would be useful in feature engineering with grouped stats per match and per team.
###Code
# we will NOT use
features_not2use = ['Id', 'groupId', 'matchId']
for df in [df_trn, df_tst]:
df.drop(features_not2use, axis=1, inplace=True)
###Output
_____no_output_____
###Markdown
Train and evaluate a modelStart by defining handy helper functions...
###Code
from sklearn.model_selection import StratifiedKFold, KFold
from sklearn.base import clone, ClassifierMixin, RegressorMixin
import lightgbm as lgb
def learning_rate_decay_power(current_iter):
'''
The function defines learning rate deay for LGBM
'''
base_learning_rate = 0.10
min_lr = 5e-2
lr = base_learning_rate * np.power(.996, current_iter)
return lr if lr > min_lr else min_lr
def train_single_model(clf_, X_, y_, random_state_=314, opt_parameters_={}, fit_params_={}):
'''
A wrapper to train a model with particular parameters
'''
c = clone(clf_)
c.set_params(**opt_parameters_)
c.set_params(random_state=random_state_)
return c.fit(X_, y_, **fit_params_)
def train_model_in_CV(model, X, y, metric, metric_args={},
model_name='xmodel',
seed=31416, n=5,
opt_parameters_={}, fit_params_={},
verbose=True):
# the list of classifiers for voting ensable
clfs = []
# performance
perf_eval = {'score_i_oof': 0,
'score_i_ave': 0,
'score_i_std': 0,
'score_i': []
}
# full-sample oof prediction
y_full_oof = pd.Series(np.zeros(shape=(y.shape[0],)),
index=y.index)
if 'sample_weight' in metric_args:
sample_weight=metric_args['sample_weight']
doSqrt=False
if 'sqrt' in metric_args:
doSqrt=True
del metric_args['sqrt']
cv = KFold(n, shuffle=True, random_state=seed) #Stratified
# The out-of-fold (oof) prediction for the k-1 sample in the outer CV loop
y_oof = pd.Series(np.zeros(shape=(X.shape[0],)),
index=X.index)
scores = []
clfs = []
for n_fold, (trn_idx, val_idx) in enumerate(cv.split(X, (y!=0).astype(np.int8))):
X_trn, y_trn = X.iloc[trn_idx], y.iloc[trn_idx]
X_val, y_val = X.iloc[val_idx], y.iloc[val_idx]
if fit_params_:
# use _stp data for early stopping
fit_params_["eval_set"] = [(X_trn,y_trn), (X_val,y_val)]
fit_params_['verbose'] = verbose
clf = train_single_model(model, X_trn, y_trn, 314+n_fold, opt_parameters_, fit_params_)
clfs.append(('{}{}'.format(model_name,n_fold), clf))
# evaluate performance
if isinstance(clf, RegressorMixin):
y_oof.iloc[val_idx] = clf.predict(X_val)
elif isinstance(clf, ClassifierMixin):
y_oof.iloc[val_idx] = clf.predict_proba(X_val)[:,1]
else:
raise TypeError('Provided model does not inherit neither from a regressor nor from classifier')
if 'sample_weight' in metric_args:
metric_args['sample_weight'] = y_val.map(sample_weight)
scores.append(metric(y_val, y_oof.iloc[val_idx], **metric_args))
#cleanup
del X_trn, y_trn, X_val, y_val
# Store performance info for this CV
if 'sample_weight' in metric_args:
metric_args['sample_weight'] = y_oof.map(sample_weight)
perf_eval['score_i_oof'] = metric(y, y_oof, **metric_args)
perf_eval['score_i'] = scores
if doSqrt:
for k in perf_eval.keys():
if 'score' in k:
perf_eval[k] = np.sqrt(perf_eval[k])
scores = np.sqrt(scores)
perf_eval['score_i_ave'] = np.mean(scores)
perf_eval['score_i_std'] = np.std(scores)
return clfs, perf_eval, y_oof
def print_perf_clf(name, perf_eval):
print('Performance of the model:')
print('Mean(Val) score inner {} Classifier: {:.4f}+-{:.4f}'.format(name,
perf_eval['score_i_ave'],
perf_eval['score_i_std']
))
print('Min/max scores on folds: {:.4f} / {:.4f}'.format(np.min(perf_eval['score_i']),
np.max(perf_eval['score_i'])))
print('OOF score inner {} Classifier: {:.4f}'.format(name, perf_eval['score_i_oof']))
print('Scores in individual folds: {}'.format(perf_eval['score_i']))
###Output
_____no_output_____
###Markdown
Now let's define the parameter and model in a scalable fashion (we can add later on further models to the list and it will work out-of-the-box). The format is a dictionary with keys that are user model names and items being an array (or tuple) of:- model to be fitted;- additional model parameters to be set;- model fit parameters (they are passed to `model.fit()` call);- target variable.
###Code
mdl_inputs = {
# This will be with MAE loss
'lgbm1_reg': (lgb.LGBMRegressor(max_depth=-1, min_child_samples=400, random_state=314, silent=True, metric='None', n_jobs=4, n_estimators=5000, learning_rate=0.05),
{'objective': 'mae', 'colsample_bytree': 0.75, 'min_child_weight': 10.0, 'num_leaves': 30, 'reg_alpha': 1, 'subsample': 0.75},
{"early_stopping_rounds":100,
"eval_metric" : 'mae',
'eval_names': ['train', 'early_stop'],
'verbose': False,
'callbacks': [lgb.reset_parameter(learning_rate=learning_rate_decay_power)],
'categorical_feature': 'auto'},
y
),
# 'lgbm45_reg': (lgb.LGBMRegressor(max_depth=-1, min_child_samples=400, random_state=314, silent=True, metric='None', n_jobs=4, n_estimators=5000, learning_rate=0.05),
# {'objective': 'mae', 'colsample_bytree': 0.75, 'min_child_weight': 10.0, 'num_leaves': 45, 'reg_alpha': 1, 'subsample': 0.75},
# {"early_stopping_rounds":100,
# "eval_metric" : 'mae',
# 'eval_names': ['train', 'early_stop'],
# 'verbose': False,
# 'callbacks': [lgb.reset_parameter(learning_rate=learning_rate_decay_power)],
# 'categorical_feature': 'auto'},
# y
# ),
# 'lgbm60_reg': (lgb.LGBMRegressor(max_depth=-1, min_child_samples=400, random_state=314, silent=True, metric='None', n_jobs=4, n_estimators=5000, learning_rate=0.05),
# {'objective': 'mae', 'colsample_bytree': 0.75, 'min_child_weight': 10.0, 'num_leaves': 60, 'reg_alpha': 1, 'subsample': 0.75},
# {"early_stopping_rounds":100,
# "eval_metric" : 'mae',
# 'eval_names': ['train', 'early_stop'],
# 'verbose': False,
# 'callbacks': [lgb.reset_parameter(learning_rate=learning_rate_decay_power)],
# 'categorical_feature': 'auto'},
# y
# ),
# 'lgbm90_reg': (lgb.LGBMRegressor(max_depth=-1, min_child_samples=400, random_state=314, silent=True, metric='None', n_jobs=4, n_estimators=5000, learning_rate=0.05),
# {'objective': 'mae', 'colsample_bytree': 0.75, 'min_child_weight': 10.0, 'num_leaves': 90, 'reg_alpha': 1, 'subsample': 0.75},
# {"early_stopping_rounds":100,
# "eval_metric" : 'mae',
# 'eval_names': ['train', 'early_stop'],
# 'verbose': False,
# 'callbacks': [lgb.reset_parameter(learning_rate=learning_rate_decay_power)],
# 'categorical_feature': 'auto'},
# y
# ),
# This will be with FAIR loss
# 'lgbm2_reg': (lgb.LGBMRegressor(max_depth=-1, min_child_samples=400, random_state=314, silent=True, metric='None', n_jobs=4, n_estimators=5000, learning_rate=0.05),
# {'objective': 'fair', 'colsample_bytree': 0.75, 'min_child_weight': 10.0, 'num_leaves': 30, 'reg_alpha': 1, 'subsample': 0.75},
# {"early_stopping_rounds":100,
# "eval_metric" : 'mae',
# 'eval_names': ['train', 'early_stop'],
# 'verbose': False,
# 'callbacks': [lgb.reset_parameter(learning_rate=learning_rate_decay_power)],
# 'categorical_feature': 'auto'},
# y
# ),
}
###Output
_____no_output_____
###Markdown
Do the actual model training
###Code
%%time
mdls = {}
results = {}
y_oofs = {}
for name, (mdl, mdl_pars, fit_pars, y_) in mdl_inputs.items():
print('--------------- {} -----------'.format(name))
mdl_, perf_eval_, y_oof_ = train_model_in_CV(mdl, df_trn, y_, mean_absolute_error,
metric_args={},
model_name=name,
opt_parameters_=mdl_pars,
fit_params_=fit_pars,
n=n_cv,
verbose=False)
results[name] = perf_eval_
mdls[name] = mdl_
y_oofs[name] = y_oof_
print_perf_clf(name, perf_eval_)
###Output
_____no_output_____
###Markdown
Let's plot how predictions look like
###Code
k = list(y_oofs.keys())[0]
_ = y_oofs[k].plot('hist', bins=100, figsize=(15,6))
plt.xlabel('Predicted winPlacePerc OOF')
###Output
_____no_output_____
###Markdown
Note, that predictions are spilled outside of the `[0,1]` range, which is not meaningful for percentage value. **We will clip test predictions to be within the meaningful range.** This will improve the score slightly Visualise importance of features
###Code
import matplotlib.pyplot as plt
import seaborn as sns
def display_importances(feature_importance_df_, n_feat=30, silent=False, dump_strs=[],
fout_name=None, title='Features (avg over folds)'):
'''
Make a plot of most important features from a tree-based model
Parameters
----------
feature_importance_df_ : pd.DataFrame
The input dataframe.
Must contain columns `'feature'` and `'importance'`.
The dataframe will be first grouped by `'feature'` and the mean `'importance'` will be calculated.
This allows to calculate and plot importance averaged over folds,
when the same features appear in the dataframe as many time as there are folds in CV.
n_feats : int [default: 20]
The maximum number of the top features to be plotted
silent : bool [default: False]
Dump additionsl information, in particular the mean importances for features
defined by `dump_strs` and the features with zero (<1e-3) importance
dump_strs : list of strings [default: []]
Features containing either of these srings will be printed to the screen
fout_name : str or None [default: None]
The name of the file to dump the figure.
If `None`, no file is created (to be used in notebooks)
'''
# Plot feature importances
cols = feature_importance_df_[["feature", "importance"]].groupby("feature").mean().sort_values(
by="importance", ascending=False)[:n_feat].index
mean_imp = feature_importance_df_[["feature", "importance"]].groupby("feature").mean()
df_2_neglect = mean_imp[mean_imp['importance'] < 1e-3]
if not silent:
print('The list of features with 0 importance: ')
print(df_2_neglect.index.values.tolist())
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
for feat_prefix in dump_strs:
feat_names = [x for x in mean_imp.index if feat_prefix in x]
print(mean_imp.loc[feat_names].sort_values(by='importance', ascending=False))
del mean_imp, df_2_neglect
best_features = feature_importance_df_.loc[feature_importance_df_.feature.isin(cols)]
plt.figure(figsize=(8,10))
sns.barplot(x="importance", y="feature",
data=best_features.sort_values(by="importance", ascending=False))
plt.title(title)
plt.tight_layout()
if fout_name is not None:
plt.savefig(fout_name)
display_importances(pd.DataFrame({'feature': df_trn.columns,
'importance': mdls['lgbm1_reg'][0][1].booster_.feature_importance('gain')}),
n_feat=20,
title='GAIN feature importance',
fout_name='feature_importance_gain.png'
)
###Output
_____no_output_____
###Markdown
Prepare submission
###Code
%%time
y_subs= {}
for c in mdl_inputs:
mdls_= mdls[c]
y_sub = np.zeros(df_tst.shape[0])
for mdl_ in mdls_:
y_sub += np.clip(mdl_[1].predict(df_tst), 0, 1)
y_sub /= n_cv
y_subs[c] = y_sub
df_sub = pd.read_csv('../input/sample_submission.csv', nrows=max_events_tst)
for c in mdl_inputs:
df_sub['winPlacePerc'] = y_subs[c]
df_sub.to_csv('sub_{}.csv'.format(c), index=False)
oof = pd.DataFrame(y_oofs[c].values)
oof.columns = ['winPlacePerc']
oof.clip(0, 1, inplace=True)
oof.to_csv('oof_{}.csv'.format(c), index=False)
!ls
###Output
_____no_output_____ |
sTock_Prediction NSE TATA-GLOBAL.ipynb | ###Markdown
importing Libraries
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime as dt
from matplotlib import pyplot as plt
from sklearn import linear_model , model_selection
from sklearn.metrics import confusion_matrix , mean_squared_error
from sklearn.preprocessing import StandardScaler , MinMaxScaler
from sklearn.model_selection import train_test_split , TimeSeriesSplit
from keras.models import Sequential
from keras.layers import Dense , LSTM , Dropout
import math
from IPython.display import display
###Output
_____no_output_____
###Markdown
importing Dataset | Training Data (Train-Test Split)
###Code
data = pd.read_csv('Stock_Data.csv')
dataset_train=data.iloc[0:930,1:2]
dataset_test=data.iloc[930:,1:2]
training_set = data.iloc[0:930, 3:4].values
testing_set=data.iloc[930:,3:4].values
data.head()
###Output
_____no_output_____
###Markdown
Removing uncessary Data
###Code
data.drop('Last', axis=1, inplace=True)
data.drop('Total Trade Quantity', axis=1, inplace=True)
data.drop('Turnover (Lacs)', axis=1, inplace=True)
print(data.head())
data.to_csv('tata_preprocessed.csv',index= False)
data = data.iloc[::-1]
###Output
Date Open High Low Close
0 2018-10-08 208.00 222.25 206.85 215.15
1 2018-10-05 217.00 218.60 205.90 209.20
2 2018-10-04 223.50 227.80 216.15 218.20
3 2018-10-03 230.00 237.50 225.75 227.60
4 2018-10-01 234.55 234.60 221.05 230.90
###Markdown
Visualising Data
###Code
plt.figure(figsize = (18,9))
plt.plot(range(data.shape[0]),(data['Close']))
plt.xticks(range(0,data.shape[0],500),data['Date'].loc[::500],rotation=45)
plt.xlabel('Date',fontsize=18)
plt.ylabel('Close Price',fontsize=18)
plt.show()
###Output
_____no_output_____
###Markdown
Data Normalization
###Code
sc = MinMaxScaler(feature_range = (0, 1))
training_set_scaled = sc.fit_transform(training_set)
###Output
_____no_output_____
###Markdown
Incorporating Timesteps Into Data
###Code
len(training_set_scaled)
X_train = []
y_train = []
for i in range(10,930):
X_train.append(training_set_scaled[i-10:i, 0])
y_train.append(training_set_scaled[i, 0])
X_train, y_train = np.array(X_train), np.array(y_train)
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
###Output
_____no_output_____
###Markdown
Creating LSTM Model
###Code
regressor = Sequential()
regressor.add(LSTM(units = 75, return_sequences = True, input_shape = (X_train.shape[1], 1)))
regressor.add(Dropout(0.1))
regressor.add(LSTM(units = 50, return_sequences = True))
regressor.add(Dropout(0.2))
regressor.add(LSTM(units = 50, return_sequences = True))
regressor.add(Dropout(0.1))
regressor.add(LSTM(units = 75))
regressor.add(Dropout(0.2))
regressor.add(Dense(units = 1))
regressor.compile(optimizer = 'adam', loss = 'mean_squared_error')
regressor.fit(X_train, y_train, epochs = 200, batch_size = 64)
###Output
Epoch 1/200
15/15 [==============================] - 6s 22ms/step - loss: 0.1147
Epoch 2/200
15/15 [==============================] - 0s 22ms/step - loss: 0.0136
Epoch 3/200
15/15 [==============================] - 0s 24ms/step - loss: 0.0061
Epoch 4/200
15/15 [==============================] - 0s 24ms/step - loss: 0.0042
Epoch 5/200
15/15 [==============================] - 0s 23ms/step - loss: 0.0035
Epoch 6/200
15/15 [==============================] - 0s 25ms/step - loss: 0.0032
Epoch 7/200
15/15 [==============================] - 0s 24ms/step - loss: 0.0041
Epoch 8/200
15/15 [==============================] - 0s 24ms/step - loss: 0.0035
Epoch 9/200
15/15 [==============================] - 0s 23ms/step - loss: 0.0032
Epoch 10/200
15/15 [==============================] - 0s 25ms/step - loss: 0.0031
Epoch 11/200
15/15 [==============================] - 0s 24ms/step - loss: 0.0035
Epoch 12/200
15/15 [==============================] - 0s 23ms/step - loss: 0.0039
Epoch 13/200
15/15 [==============================] - 0s 26ms/step - loss: 0.0031
Epoch 14/200
15/15 [==============================] - 0s 24ms/step - loss: 0.0030
Epoch 15/200
15/15 [==============================] - 0s 23ms/step - loss: 0.0032
Epoch 16/200
15/15 [==============================] - 0s 23ms/step - loss: 0.0035
Epoch 17/200
15/15 [==============================] - 0s 23ms/step - loss: 0.0028
Epoch 18/200
15/15 [==============================] - 0s 23ms/step - loss: 0.0032
Epoch 19/200
15/15 [==============================] - 0s 23ms/step - loss: 0.0030
Epoch 20/200
15/15 [==============================] - 0s 22ms/step - loss: 0.0028
Epoch 21/200
15/15 [==============================] - 0s 22ms/step - loss: 0.0029
Epoch 22/200
15/15 [==============================] - 0s 22ms/step - loss: 0.0029
Epoch 23/200
15/15 [==============================] - 0s 23ms/step - loss: 0.0034
Epoch 24/200
15/15 [==============================] - 0s 24ms/step - loss: 0.0029
Epoch 25/200
15/15 [==============================] - 0s 23ms/step - loss: 0.0027
Epoch 26/200
15/15 [==============================] - 0s 22ms/step - loss: 0.0026
Epoch 27/200
15/15 [==============================] - 0s 25ms/step - loss: 0.0025
Epoch 28/200
15/15 [==============================] - 0s 23ms/step - loss: 0.0027
Epoch 29/200
15/15 [==============================] - 0s 22ms/step - loss: 0.0025
Epoch 30/200
15/15 [==============================] - 0s 24ms/step - loss: 0.0030
Epoch 31/200
15/15 [==============================] - 0s 22ms/step - loss: 0.0032
Epoch 32/200
15/15 [==============================] - 0s 23ms/step - loss: 0.0028
Epoch 33/200
15/15 [==============================] - 0s 22ms/step - loss: 0.0028
Epoch 34/200
15/15 [==============================] - 0s 26ms/step - loss: 0.0026
Epoch 35/200
15/15 [==============================] - 0s 23ms/step - loss: 0.0024
Epoch 36/200
15/15 [==============================] - 0s 23ms/step - loss: 0.0028
Epoch 37/200
15/15 [==============================] - 0s 23ms/step - loss: 0.0021
Epoch 38/200
15/15 [==============================] - 0s 23ms/step - loss: 0.0027
Epoch 39/200
15/15 [==============================] - 0s 23ms/step - loss: 0.0029
Epoch 40/200
15/15 [==============================] - 0s 23ms/step - loss: 0.0025
Epoch 41/200
15/15 [==============================] - 0s 22ms/step - loss: 0.0025
Epoch 42/200
15/15 [==============================] - 0s 23ms/step - loss: 0.0024
Epoch 43/200
15/15 [==============================] - 0s 21ms/step - loss: 0.0027
Epoch 44/200
15/15 [==============================] - 0s 23ms/step - loss: 0.0027
Epoch 45/200
15/15 [==============================] - 0s 22ms/step - loss: 0.0024
Epoch 46/200
15/15 [==============================] - 0s 22ms/step - loss: 0.0024
Epoch 47/200
15/15 [==============================] - 0s 23ms/step - loss: 0.0021
Epoch 48/200
15/15 [==============================] - 0s 23ms/step - loss: 0.0025
Epoch 49/200
15/15 [==============================] - 0s 23ms/step - loss: 0.0023
Epoch 50/200
15/15 [==============================] - 0s 23ms/step - loss: 0.0024
Epoch 51/200
15/15 [==============================] - 0s 22ms/step - loss: 0.0022
Epoch 52/200
15/15 [==============================] - 0s 22ms/step - loss: 0.0023
Epoch 53/200
15/15 [==============================] - 0s 22ms/step - loss: 0.0023
Epoch 54/200
15/15 [==============================] - 0s 22ms/step - loss: 0.0023
Epoch 55/200
15/15 [==============================] - 0s 22ms/step - loss: 0.0029
Epoch 56/200
15/15 [==============================] - 0s 23ms/step - loss: 0.0024
Epoch 57/200
15/15 [==============================] - 0s 22ms/step - loss: 0.0024
Epoch 58/200
15/15 [==============================] - 0s 23ms/step - loss: 0.0021
Epoch 59/200
15/15 [==============================] - 0s 24ms/step - loss: 0.0022
Epoch 60/200
15/15 [==============================] - 0s 23ms/step - loss: 0.0023
Epoch 61/200
15/15 [==============================] - 0s 23ms/step - loss: 0.0024
Epoch 62/200
15/15 [==============================] - 0s 23ms/step - loss: 0.0025
Epoch 63/200
15/15 [==============================] - 0s 23ms/step - loss: 0.0020
Epoch 64/200
15/15 [==============================] - 0s 23ms/step - loss: 0.0021
Epoch 65/200
15/15 [==============================] - 0s 22ms/step - loss: 0.0022
Epoch 66/200
15/15 [==============================] - 0s 23ms/step - loss: 0.0018
Epoch 67/200
15/15 [==============================] - 0s 26ms/step - loss: 0.0021
Epoch 68/200
15/15 [==============================] - 0s 26ms/step - loss: 0.0024
Epoch 69/200
15/15 [==============================] - 0s 24ms/step - loss: 0.0021
Epoch 70/200
15/15 [==============================] - 0s 24ms/step - loss: 0.0024
Epoch 71/200
15/15 [==============================] - 0s 24ms/step - loss: 0.0024
Epoch 72/200
15/15 [==============================] - 0s 24ms/step - loss: 0.0025
Epoch 73/200
15/15 [==============================] - 0s 23ms/step - loss: 0.0017
Epoch 74/200
15/15 [==============================] - 0s 25ms/step - loss: 0.0019
Epoch 75/200
15/15 [==============================] - 0s 25ms/step - loss: 0.0019
Epoch 76/200
15/15 [==============================] - 0s 26ms/step - loss: 0.0019
Epoch 77/200
15/15 [==============================] - 0s 30ms/step - loss: 0.0024
Epoch 78/200
15/15 [==============================] - 0s 31ms/step - loss: 0.0021
Epoch 79/200
15/15 [==============================] - 0s 28ms/step - loss: 0.0018
Epoch 80/200
15/15 [==============================] - 0s 25ms/step - loss: 0.0017
Epoch 81/200
15/15 [==============================] - 0s 25ms/step - loss: 0.0021
Epoch 82/200
15/15 [==============================] - 0s 27ms/step - loss: 0.0018
Epoch 83/200
15/15 [==============================] - 0s 25ms/step - loss: 0.0021
Epoch 84/200
15/15 [==============================] - 0s 23ms/step - loss: 0.0025
Epoch 85/200
15/15 [==============================] - 0s 22ms/step - loss: 0.0019
Epoch 86/200
15/15 [==============================] - 0s 22ms/step - loss: 0.0023
Epoch 87/200
15/15 [==============================] - 0s 25ms/step - loss: 0.0020
Epoch 88/200
15/15 [==============================] - 0s 23ms/step - loss: 0.0020
Epoch 89/200
15/15 [==============================] - 0s 25ms/step - loss: 0.0019
Epoch 90/200
15/15 [==============================] - 0s 23ms/step - loss: 0.0012
Epoch 91/200
15/15 [==============================] - 0s 27ms/step - loss: 0.0019
Epoch 92/200
15/15 [==============================] - 0s 26ms/step - loss: 0.0019
Epoch 93/200
15/15 [==============================] - 0s 27ms/step - loss: 0.0016
Epoch 94/200
15/15 [==============================] - 0s 25ms/step - loss: 0.0016
Epoch 95/200
15/15 [==============================] - 0s 24ms/step - loss: 0.0017
Epoch 96/200
15/15 [==============================] - 0s 25ms/step - loss: 0.0015
Epoch 97/200
15/15 [==============================] - 0s 26ms/step - loss: 0.0017
Epoch 98/200
15/15 [==============================] - 0s 25ms/step - loss: 0.0017
Epoch 99/200
15/15 [==============================] - 0s 24ms/step - loss: 0.0017
Epoch 100/200
15/15 [==============================] - 0s 27ms/step - loss: 0.0023
Epoch 101/200
###Markdown
Making Predictions on Test Set
###Code
real_stock_price = testing_set
dataset_total = pd.concat((dataset_train['Open'], dataset_test['Open']), axis = 0)
inputs = dataset_total[len(dataset_total) - len(dataset_test) - 10:].values
inputs = inputs.reshape(-1,1)
inputs = sc.transform(inputs)
X_test = []
for i in range(10,305):
X_test.append(inputs[i-10:i, 0])
X_test = np.array(X_test)
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
predicted_stock_price = regressor.predict(X_test)
predicted_stock_price = sc.inverse_transform(predicted_stock_price)
###Output
_____no_output_____
###Markdown
Plotting The Results
###Code
%matplotlib inline
plt.plot(real_stock_price, color = 'red', label = 'TATA Stock Price')
plt.plot(predicted_stock_price, color = 'green', label = 'Predicted TATA Stock Price')
plt.title('TATA Stock Price Prediction')
plt.xlabel('Time')
plt.ylabel('TATA Stock Price')
plt.legend()
plt.show()
###Output
_____no_output_____ |
experiments/tl_2/cores_wisig-oracle.run1.framed/trials/2/trial.ipynb | ###Markdown
Transfer Learning Template
###Code
%load_ext autoreload
%autoreload 2
%matplotlib inline
import os, json, sys, time, random
import numpy as np
import torch
from torch.optim import Adam
from easydict import EasyDict
import matplotlib.pyplot as plt
from steves_models.steves_ptn import Steves_Prototypical_Network
from steves_utils.lazy_iterable_wrapper import Lazy_Iterable_Wrapper
from steves_utils.iterable_aggregator import Iterable_Aggregator
from steves_utils.ptn_train_eval_test_jig import PTN_Train_Eval_Test_Jig
from steves_utils.torch_sequential_builder import build_sequential
from steves_utils.torch_utils import get_dataset_metrics, ptn_confusion_by_domain_over_dataloader
from steves_utils.utils_v2 import (per_domain_accuracy_from_confusion, get_datasets_base_path)
from steves_utils.PTN.utils import independent_accuracy_assesment
from torch.utils.data import DataLoader
from steves_utils.stratified_dataset.episodic_accessor import Episodic_Accessor_Factory
from steves_utils.ptn_do_report import (
get_loss_curve,
get_results_table,
get_parameters_table,
get_domain_accuracies,
)
from steves_utils.transforms import get_chained_transform
###Output
_____no_output_____
###Markdown
Allowed ParametersThese are allowed parameters, not defaultsEach of these values need to be present in the injected parameters (the notebook will raise an exception if they are not present)Papermill uses the cell tag "parameters" to inject the real parameters below this cell.Enable tags to see what I mean
###Code
required_parameters = {
"experiment_name",
"lr",
"device",
"seed",
"dataset_seed",
"n_shot",
"n_query",
"n_way",
"train_k_factor",
"val_k_factor",
"test_k_factor",
"n_epoch",
"patience",
"criteria_for_best",
"x_net",
"datasets",
"torch_default_dtype",
"NUM_LOGS_PER_EPOCH",
"BEST_MODEL_PATH",
}
from steves_utils.CORES.utils import (
ALL_NODES,
ALL_NODES_MINIMUM_1000_EXAMPLES,
ALL_DAYS
)
from steves_utils.ORACLE.utils_v2 import (
ALL_DISTANCES_FEET_NARROWED,
ALL_RUNS,
ALL_SERIAL_NUMBERS,
)
standalone_parameters = {}
standalone_parameters["experiment_name"] = "STANDALONE PTN"
standalone_parameters["lr"] = 0.001
standalone_parameters["device"] = "cuda"
standalone_parameters["seed"] = 1337
standalone_parameters["dataset_seed"] = 1337
standalone_parameters["n_way"] = 8
standalone_parameters["n_shot"] = 3
standalone_parameters["n_query"] = 2
standalone_parameters["train_k_factor"] = 1
standalone_parameters["val_k_factor"] = 2
standalone_parameters["test_k_factor"] = 2
standalone_parameters["n_epoch"] = 50
standalone_parameters["patience"] = 10
standalone_parameters["criteria_for_best"] = "source_loss"
standalone_parameters["datasets"] = [
{
"labels": ALL_SERIAL_NUMBERS,
"domains": ALL_DISTANCES_FEET_NARROWED,
"num_examples_per_domain_per_label": 100,
"pickle_path": os.path.join(get_datasets_base_path(), "oracle.Run1_framed_2000Examples_stratified_ds.2022A.pkl"),
"source_or_target_dataset": "source",
"x_transforms": ["unit_mag", "minus_two"],
"episode_transforms": [],
"domain_prefix": "ORACLE_"
},
{
"labels": ALL_NODES,
"domains": ALL_DAYS,
"num_examples_per_domain_per_label": 100,
"pickle_path": os.path.join(get_datasets_base_path(), "cores.stratified_ds.2022A.pkl"),
"source_or_target_dataset": "target",
"x_transforms": ["unit_power", "times_zero"],
"episode_transforms": [],
"domain_prefix": "CORES_"
}
]
standalone_parameters["torch_default_dtype"] = "torch.float32"
standalone_parameters["x_net"] = [
{"class": "nnReshape", "kargs": {"shape":[-1, 1, 2, 256]}},
{"class": "Conv2d", "kargs": { "in_channels":1, "out_channels":256, "kernel_size":(1,7), "bias":False, "padding":(0,3), },},
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm2d", "kargs": {"num_features":256}},
{"class": "Conv2d", "kargs": { "in_channels":256, "out_channels":80, "kernel_size":(2,7), "bias":True, "padding":(0,3), },},
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm2d", "kargs": {"num_features":80}},
{"class": "Flatten", "kargs": {}},
{"class": "Linear", "kargs": {"in_features": 80*256, "out_features": 256}}, # 80 units per IQ pair
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm1d", "kargs": {"num_features":256}},
{"class": "Linear", "kargs": {"in_features": 256, "out_features": 256}},
]
# Parameters relevant to results
# These parameters will basically never need to change
standalone_parameters["NUM_LOGS_PER_EPOCH"] = 10
standalone_parameters["BEST_MODEL_PATH"] = "./best_model.pth"
# Parameters
parameters = {
"experiment_name": "cores+wisig -> oracle.run1.framed",
"device": "cuda",
"lr": 0.001,
"seed": 1337,
"dataset_seed": 1337,
"n_shot": 3,
"n_query": 2,
"train_k_factor": 3,
"val_k_factor": 2,
"test_k_factor": 2,
"torch_default_dtype": "torch.float32",
"n_epoch": 50,
"patience": 3,
"criteria_for_best": "target_loss",
"x_net": [
{"class": "nnReshape", "kargs": {"shape": [-1, 1, 2, 256]}},
{
"class": "Conv2d",
"kargs": {
"in_channels": 1,
"out_channels": 256,
"kernel_size": [1, 7],
"bias": False,
"padding": [0, 3],
},
},
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm2d", "kargs": {"num_features": 256}},
{
"class": "Conv2d",
"kargs": {
"in_channels": 256,
"out_channels": 80,
"kernel_size": [2, 7],
"bias": True,
"padding": [0, 3],
},
},
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm2d", "kargs": {"num_features": 80}},
{"class": "Flatten", "kargs": {}},
{"class": "Linear", "kargs": {"in_features": 20480, "out_features": 256}},
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm1d", "kargs": {"num_features": 256}},
{"class": "Linear", "kargs": {"in_features": 256, "out_features": 256}},
],
"NUM_LOGS_PER_EPOCH": 10,
"BEST_MODEL_PATH": "./best_model.pth",
"n_way": 16,
"datasets": [
{
"labels": [
"1-10.",
"1-11.",
"1-15.",
"1-16.",
"1-17.",
"1-18.",
"1-19.",
"10-4.",
"10-7.",
"11-1.",
"11-14.",
"11-17.",
"11-20.",
"11-7.",
"13-20.",
"13-8.",
"14-10.",
"14-11.",
"14-14.",
"14-7.",
"15-1.",
"15-20.",
"16-1.",
"16-16.",
"17-10.",
"17-11.",
"17-2.",
"19-1.",
"19-16.",
"19-19.",
"19-20.",
"19-3.",
"2-10.",
"2-11.",
"2-17.",
"2-18.",
"2-20.",
"2-3.",
"2-4.",
"2-5.",
"2-6.",
"2-7.",
"2-8.",
"3-13.",
"3-18.",
"3-3.",
"4-1.",
"4-10.",
"4-11.",
"4-19.",
"5-5.",
"6-15.",
"7-10.",
"7-14.",
"8-18.",
"8-20.",
"8-3.",
"8-8.",
],
"domains": [1, 2, 3, 4, 5],
"num_examples_per_domain_per_label": 100,
"pickle_path": "/mnt/wd500GB/CSC500/csc500-main/datasets/cores.stratified_ds.2022A.pkl",
"source_or_target_dataset": "source",
"x_transforms": [],
"episode_transforms": [],
"domain_prefix": "C_A_",
},
{
"labels": [
"1-10",
"1-12",
"1-14",
"1-16",
"1-18",
"1-19",
"1-8",
"10-11",
"10-17",
"10-4",
"10-7",
"11-1",
"11-10",
"11-19",
"11-20",
"11-4",
"11-7",
"12-19",
"12-20",
"12-7",
"13-14",
"13-18",
"13-19",
"13-20",
"13-3",
"13-7",
"14-10",
"14-11",
"14-12",
"14-13",
"14-14",
"14-19",
"14-20",
"14-7",
"14-8",
"14-9",
"15-1",
"15-19",
"15-6",
"16-1",
"16-16",
"16-19",
"16-20",
"17-10",
"17-11",
"18-1",
"18-10",
"18-11",
"18-12",
"18-13",
"18-14",
"18-15",
"18-16",
"18-17",
"18-19",
"18-2",
"18-20",
"18-4",
"18-5",
"18-7",
"18-8",
"18-9",
"19-1",
"19-10",
"19-11",
"19-12",
"19-13",
"19-14",
"19-15",
"19-19",
"19-2",
"19-20",
"19-3",
"19-4",
"19-6",
"19-7",
"19-8",
"19-9",
"2-1",
"2-13",
"2-15",
"2-3",
"2-4",
"2-5",
"2-6",
"2-7",
"2-8",
"20-1",
"20-12",
"20-14",
"20-15",
"20-16",
"20-18",
"20-19",
"20-20",
"20-3",
"20-4",
"20-5",
"20-7",
"20-8",
"3-1",
"3-13",
"3-18",
"3-2",
"3-8",
"4-1",
"4-10",
"4-11",
"5-1",
"5-5",
"6-1",
"6-15",
"6-6",
"7-10",
"7-11",
"7-12",
"7-13",
"7-14",
"7-7",
"7-8",
"7-9",
"8-1",
"8-13",
"8-14",
"8-18",
"8-20",
"8-3",
"8-8",
"9-1",
"9-7",
],
"domains": [1, 2, 3, 4],
"num_examples_per_domain_per_label": 100,
"pickle_path": "/mnt/wd500GB/CSC500/csc500-main/datasets/wisig.node3-19.stratified_ds.2022A.pkl",
"source_or_target_dataset": "source",
"x_transforms": [],
"episode_transforms": [],
"domain_prefix": "W_A_",
},
{
"labels": [
"3123D52",
"3123D65",
"3123D79",
"3123D80",
"3123D54",
"3123D70",
"3123D7B",
"3123D89",
"3123D58",
"3123D76",
"3123D7D",
"3123EFE",
"3123D64",
"3123D78",
"3123D7E",
"3124E4A",
],
"domains": [32, 38, 8, 44, 14, 50, 20, 26],
"num_examples_per_domain_per_label": 2000,
"pickle_path": "/mnt/wd500GB/CSC500/csc500-main/datasets/oracle.Run1_framed_2000Examples_stratified_ds.2022A.pkl",
"source_or_target_dataset": "target",
"x_transforms": [],
"episode_transforms": [],
"domain_prefix": "ORACLE.run1_",
},
],
}
# Set this to True if you want to run this template directly
STANDALONE = False
if STANDALONE:
print("parameters not injected, running with standalone_parameters")
parameters = standalone_parameters
if not 'parameters' in locals() and not 'parameters' in globals():
raise Exception("Parameter injection failed")
#Use an easy dict for all the parameters
p = EasyDict(parameters)
supplied_keys = set(p.keys())
if supplied_keys != required_parameters:
print("Parameters are incorrect")
if len(supplied_keys - required_parameters)>0: print("Shouldn't have:", str(supplied_keys - required_parameters))
if len(required_parameters - supplied_keys)>0: print("Need to have:", str(required_parameters - supplied_keys))
raise RuntimeError("Parameters are incorrect")
###################################
# Set the RNGs and make it all deterministic
###################################
np.random.seed(p.seed)
random.seed(p.seed)
torch.manual_seed(p.seed)
torch.use_deterministic_algorithms(True)
###########################################
# The stratified datasets honor this
###########################################
torch.set_default_dtype(eval(p.torch_default_dtype))
###################################
# Build the network(s)
# Note: It's critical to do this AFTER setting the RNG
###################################
x_net = build_sequential(p.x_net)
start_time_secs = time.time()
p.domains_source = []
p.domains_target = []
train_original_source = []
val_original_source = []
test_original_source = []
train_original_target = []
val_original_target = []
test_original_target = []
# global_x_transform_func = lambda x: normalize(x.to(torch.get_default_dtype()), "unit_power") # unit_power, unit_mag
# global_x_transform_func = lambda x: normalize(x, "unit_power") # unit_power, unit_mag
def add_dataset(
labels,
domains,
pickle_path,
x_transforms,
episode_transforms,
domain_prefix,
num_examples_per_domain_per_label,
source_or_target_dataset:str,
iterator_seed=p.seed,
dataset_seed=p.dataset_seed,
n_shot=p.n_shot,
n_way=p.n_way,
n_query=p.n_query,
train_val_test_k_factors=(p.train_k_factor,p.val_k_factor,p.test_k_factor),
):
if x_transforms == []: x_transform = None
else: x_transform = get_chained_transform(x_transforms)
if episode_transforms == []: episode_transform = None
else: raise Exception("episode_transforms not implemented")
episode_transform = lambda tup, _prefix=domain_prefix: (_prefix + str(tup[0]), tup[1])
eaf = Episodic_Accessor_Factory(
labels=labels,
domains=domains,
num_examples_per_domain_per_label=num_examples_per_domain_per_label,
iterator_seed=iterator_seed,
dataset_seed=dataset_seed,
n_shot=n_shot,
n_way=n_way,
n_query=n_query,
train_val_test_k_factors=train_val_test_k_factors,
pickle_path=pickle_path,
x_transform_func=x_transform,
)
train, val, test = eaf.get_train(), eaf.get_val(), eaf.get_test()
train = Lazy_Iterable_Wrapper(train, episode_transform)
val = Lazy_Iterable_Wrapper(val, episode_transform)
test = Lazy_Iterable_Wrapper(test, episode_transform)
if source_or_target_dataset=="source":
train_original_source.append(train)
val_original_source.append(val)
test_original_source.append(test)
p.domains_source.extend(
[domain_prefix + str(u) for u in domains]
)
elif source_or_target_dataset=="target":
train_original_target.append(train)
val_original_target.append(val)
test_original_target.append(test)
p.domains_target.extend(
[domain_prefix + str(u) for u in domains]
)
else:
raise Exception(f"invalid source_or_target_dataset: {source_or_target_dataset}")
for ds in p.datasets:
add_dataset(**ds)
# from steves_utils.CORES.utils import (
# ALL_NODES,
# ALL_NODES_MINIMUM_1000_EXAMPLES,
# ALL_DAYS
# )
# add_dataset(
# labels=ALL_NODES,
# domains = ALL_DAYS,
# num_examples_per_domain_per_label=100,
# pickle_path=os.path.join(get_datasets_base_path(), "cores.stratified_ds.2022A.pkl"),
# source_or_target_dataset="target",
# x_transform_func=global_x_transform_func,
# domain_modifier=lambda u: f"cores_{u}"
# )
# from steves_utils.ORACLE.utils_v2 import (
# ALL_DISTANCES_FEET,
# ALL_RUNS,
# ALL_SERIAL_NUMBERS,
# )
# add_dataset(
# labels=ALL_SERIAL_NUMBERS,
# domains = list(set(ALL_DISTANCES_FEET) - {2,62}),
# num_examples_per_domain_per_label=100,
# pickle_path=os.path.join(get_datasets_base_path(), "oracle.Run2_framed_2000Examples_stratified_ds.2022A.pkl"),
# source_or_target_dataset="source",
# x_transform_func=global_x_transform_func,
# domain_modifier=lambda u: f"oracle1_{u}"
# )
# from steves_utils.ORACLE.utils_v2 import (
# ALL_DISTANCES_FEET,
# ALL_RUNS,
# ALL_SERIAL_NUMBERS,
# )
# add_dataset(
# labels=ALL_SERIAL_NUMBERS,
# domains = list(set(ALL_DISTANCES_FEET) - {2,62,56}),
# num_examples_per_domain_per_label=100,
# pickle_path=os.path.join(get_datasets_base_path(), "oracle.Run2_framed_2000Examples_stratified_ds.2022A.pkl"),
# source_or_target_dataset="source",
# x_transform_func=global_x_transform_func,
# domain_modifier=lambda u: f"oracle2_{u}"
# )
# add_dataset(
# labels=list(range(19)),
# domains = [0,1,2],
# num_examples_per_domain_per_label=100,
# pickle_path=os.path.join(get_datasets_base_path(), "metehan.stratified_ds.2022A.pkl"),
# source_or_target_dataset="target",
# x_transform_func=global_x_transform_func,
# domain_modifier=lambda u: f"met_{u}"
# )
# # from steves_utils.wisig.utils import (
# # ALL_NODES_MINIMUM_100_EXAMPLES,
# # ALL_NODES_MINIMUM_500_EXAMPLES,
# # ALL_NODES_MINIMUM_1000_EXAMPLES,
# # ALL_DAYS
# # )
# import steves_utils.wisig.utils as wisig
# add_dataset(
# labels=wisig.ALL_NODES_MINIMUM_100_EXAMPLES,
# domains = wisig.ALL_DAYS,
# num_examples_per_domain_per_label=100,
# pickle_path=os.path.join(get_datasets_base_path(), "wisig.node3-19.stratified_ds.2022A.pkl"),
# source_or_target_dataset="target",
# x_transform_func=global_x_transform_func,
# domain_modifier=lambda u: f"wisig_{u}"
# )
###################################
# Build the dataset
###################################
train_original_source = Iterable_Aggregator(train_original_source, p.seed)
val_original_source = Iterable_Aggregator(val_original_source, p.seed)
test_original_source = Iterable_Aggregator(test_original_source, p.seed)
train_original_target = Iterable_Aggregator(train_original_target, p.seed)
val_original_target = Iterable_Aggregator(val_original_target, p.seed)
test_original_target = Iterable_Aggregator(test_original_target, p.seed)
# For CNN We only use X and Y. And we only train on the source.
# Properly form the data using a transform lambda and Lazy_Iterable_Wrapper. Finally wrap them in a dataloader
transform_lambda = lambda ex: ex[1] # Original is (<domain>, <episode>) so we strip down to episode only
train_processed_source = Lazy_Iterable_Wrapper(train_original_source, transform_lambda)
val_processed_source = Lazy_Iterable_Wrapper(val_original_source, transform_lambda)
test_processed_source = Lazy_Iterable_Wrapper(test_original_source, transform_lambda)
train_processed_target = Lazy_Iterable_Wrapper(train_original_target, transform_lambda)
val_processed_target = Lazy_Iterable_Wrapper(val_original_target, transform_lambda)
test_processed_target = Lazy_Iterable_Wrapper(test_original_target, transform_lambda)
datasets = EasyDict({
"source": {
"original": {"train":train_original_source, "val":val_original_source, "test":test_original_source},
"processed": {"train":train_processed_source, "val":val_processed_source, "test":test_processed_source}
},
"target": {
"original": {"train":train_original_target, "val":val_original_target, "test":test_original_target},
"processed": {"train":train_processed_target, "val":val_processed_target, "test":test_processed_target}
},
})
from steves_utils.transforms import get_average_magnitude, get_average_power
print(set([u for u,_ in val_original_source]))
print(set([u for u,_ in val_original_target]))
s_x, s_y, q_x, q_y, _ = next(iter(train_processed_source))
print(s_x)
# for ds in [
# train_processed_source,
# val_processed_source,
# test_processed_source,
# train_processed_target,
# val_processed_target,
# test_processed_target
# ]:
# for s_x, s_y, q_x, q_y, _ in ds:
# for X in (s_x, q_x):
# for x in X:
# assert np.isclose(get_average_magnitude(x.numpy()), 1.0)
# assert np.isclose(get_average_power(x.numpy()), 1.0)
###################################
# Build the model
###################################
model = Steves_Prototypical_Network(x_net, device=p.device, x_shape=(2,256))
optimizer = Adam(params=model.parameters(), lr=p.lr)
###################################
# train
###################################
jig = PTN_Train_Eval_Test_Jig(model, p.BEST_MODEL_PATH, p.device)
jig.train(
train_iterable=datasets.source.processed.train,
source_val_iterable=datasets.source.processed.val,
target_val_iterable=datasets.target.processed.val,
num_epochs=p.n_epoch,
num_logs_per_epoch=p.NUM_LOGS_PER_EPOCH,
patience=p.patience,
optimizer=optimizer,
criteria_for_best=p.criteria_for_best,
)
total_experiment_time_secs = time.time() - start_time_secs
###################################
# Evaluate the model
###################################
source_test_label_accuracy, source_test_label_loss = jig.test(datasets.source.processed.test)
target_test_label_accuracy, target_test_label_loss = jig.test(datasets.target.processed.test)
source_val_label_accuracy, source_val_label_loss = jig.test(datasets.source.processed.val)
target_val_label_accuracy, target_val_label_loss = jig.test(datasets.target.processed.val)
history = jig.get_history()
total_epochs_trained = len(history["epoch_indices"])
val_dl = Iterable_Aggregator((datasets.source.original.val,datasets.target.original.val))
confusion = ptn_confusion_by_domain_over_dataloader(model, p.device, val_dl)
per_domain_accuracy = per_domain_accuracy_from_confusion(confusion)
# Add a key to per_domain_accuracy for if it was a source domain
for domain, accuracy in per_domain_accuracy.items():
per_domain_accuracy[domain] = {
"accuracy": accuracy,
"source?": domain in p.domains_source
}
# Do an independent accuracy assesment JUST TO BE SURE!
# _source_test_label_accuracy = independent_accuracy_assesment(model, datasets.source.processed.test, p.device)
# _target_test_label_accuracy = independent_accuracy_assesment(model, datasets.target.processed.test, p.device)
# _source_val_label_accuracy = independent_accuracy_assesment(model, datasets.source.processed.val, p.device)
# _target_val_label_accuracy = independent_accuracy_assesment(model, datasets.target.processed.val, p.device)
# assert(_source_test_label_accuracy == source_test_label_accuracy)
# assert(_target_test_label_accuracy == target_test_label_accuracy)
# assert(_source_val_label_accuracy == source_val_label_accuracy)
# assert(_target_val_label_accuracy == target_val_label_accuracy)
experiment = {
"experiment_name": p.experiment_name,
"parameters": dict(p),
"results": {
"source_test_label_accuracy": source_test_label_accuracy,
"source_test_label_loss": source_test_label_loss,
"target_test_label_accuracy": target_test_label_accuracy,
"target_test_label_loss": target_test_label_loss,
"source_val_label_accuracy": source_val_label_accuracy,
"source_val_label_loss": source_val_label_loss,
"target_val_label_accuracy": target_val_label_accuracy,
"target_val_label_loss": target_val_label_loss,
"total_epochs_trained": total_epochs_trained,
"total_experiment_time_secs": total_experiment_time_secs,
"confusion": confusion,
"per_domain_accuracy": per_domain_accuracy,
},
"history": history,
"dataset_metrics": get_dataset_metrics(datasets, "ptn"),
}
ax = get_loss_curve(experiment)
plt.show()
get_results_table(experiment)
get_domain_accuracies(experiment)
print("Source Test Label Accuracy:", experiment["results"]["source_test_label_accuracy"], "Target Test Label Accuracy:", experiment["results"]["target_test_label_accuracy"])
print("Source Val Label Accuracy:", experiment["results"]["source_val_label_accuracy"], "Target Val Label Accuracy:", experiment["results"]["target_val_label_accuracy"])
json.dumps(experiment)
###Output
_____no_output_____ |
examples/tutorial-first-steps-cdqa-rh.ipynb | ###Markdown
Notebook [1]: First steps with cdQA This notebook shows how to use the `cdQA` pipeline to perform question answering on a custom dataset. ***Note:*** *If you are using colab, you will need to install `cdQA` by executing `!pip install cdqa` in a cell.*
###Code
!pip install cdqa
import os
import pandas as pd
from ast import literal_eval
from cdqa.utils.filters import filter_paragraphs
from cdqa.pipeline import QAPipeline
###Output
/usr/local/lib/python3.6/dist-packages/tqdm/autonotebook/__init__.py:18: TqdmExperimentalWarning: Using `tqdm.autonotebook.tqdm` in notebook mode. Use `tqdm.tqdm` instead to force console mode (e.g. in jupyter console)
" (e.g. in jupyter console)", TqdmExperimentalWarning)
###Markdown
Download pre-trained reader model and example dataset
###Code
from cdqa.utils.download import download_model, download_bnpp_data
download_bnpp_data(dir='./data/bnpp_newsroom_v1.1/')
download_model(model='bert-squad_1.1', dir='./models')
###Output
Downloading BNP data...
Downloading trained model...
###Markdown
Visualize the dataset
###Code
df = pd.read_csv('./data/bnpp_newsroom_v1.1/bnpp_newsroom-v1.1.csv', converters={'paragraphs': literal_eval})
df = filter_paragraphs(df)
df.head()
df.loc[0].paragraphs
''.join(df.loc[0].paragraphs)
###Output
_____no_output_____
###Markdown
Instantiate the cdQA pipeline from a pre-trained reader model
###Code
cdqa_pipeline = QAPipeline(reader='./models/bert_qa.joblib')
cdqa_pipeline.fit_retriever(df=df)
###Output
100%|██████████| 231508/231508 [00:00<00:00, 901046.84B/s]
###Markdown
Execute a query
###Code
query = 'Since when does the Excellence Program of BNP Paribas exist?'
prediction = cdqa_pipeline.predict(query)
###Output
_____no_output_____
###Markdown
Explore predictions
###Code
print('query: {}'.format(query))
print('answer: {}'.format(prediction[0]))
print('title: {}'.format(prediction[1]))
print('paragraph: {}'.format(prediction[2]))
query2 = "Who's BNP Paribas' CEO?"
prediction2 = cdqa_pipeline.predict(query2)
print('query: {}'.format(query2))
print('answer: {}'.format(prediction2[0]))
print('title: {}'.format(prediction2[1]))
print('paragraph: {}'.format(prediction2[2]))
###Output
query: Who's BNP Paribas' CEO?
answer: Jean-Laurent Bonnafé
title: BNP Paribas continues its commitment in favour of integrating refugees in Europe
paragraph: Jean-Laurent Bonnafé, BNP Paribas CEO, declared that, “The refugee drama is a major human catastrophe that is mobilising many organisations and volunteers throughout Europe who are dealing with emergency conditions and providing refugees with the possibility of shelter, work, and a future. BNP Paribas is at their side, not only with financial assistance, but also with help finding employment and, for some, with recruitment. In today’s difficult context, we are committed to pursuing these efforts.”
|
A14_deep_computer_vision_with_cnns.ipynb | ###Markdown
Notes- when valid padding is used: the sides are not padded, some rows may get ignored in case of stride not equal to 1- when same paddding is used, left & right may be padded so that no rows get ignored Started with chapter 10. Some references were there in chapter 14 **Perceptron**
###Code
import numpy as np
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
# selected column 2 & 3
X = iris.data[:, (2, 3)] # petal length, petal width
y = (iris.target == 0).astype(np.int) # Iris setosa?
per_clf = Perceptron()
per_clf.fit(X, y)
y_pred = per_clf.predict([[2, 0.5]])
print(per_clf.predict([[4, 2.5]]))
iris.target
iris.data[:,0:3]
###Output
_____no_output_____ |
Faixapreta.ipynb | ###Markdown
Introdução a Python
###Code
# Escopo pai
#Escopo filho
# Escopo Neto
###Output
_____no_output_____
###Markdown
Variáveis
###Code
nome = 'elysa'
nome
n1 = 8
n2 = 7
soma=0
soma=n1+n2
soma
###Output
_____no_output_____
###Markdown
Operações Matemáticas
###Code
*soma usamos +
*subtracao usamos -
*multiplicacao usamos *
*divisao usamos + /
soma = 10+15
soma
subtracao = 50-12
subtracao
multiplicacao = 4*9
multiplicacao
divisao = 5/5
divisao
###Output
_____no_output_____
###Markdown
Métodos de Entradaformas de receber dados do usuário ou seja enviar valores para o programaNo python usamos _input()_passamos o texto por parametro
###Code
nome input('Qual é o seu nome?')
nome = input('oiii ')
#conversaodedados de string para int
bool(0)
###Output
_____no_output_____
###Markdown
Concatenação
###Code
nome = 'Elysa rainhaaaaa'
print('Olá, eu me chamo', nome)
###Output
Olá, eu me chamo Elysa rainhaaaaa
###Markdown
Média vamos calcular a média bb
###Code
###Output
_____no_output_____
###Markdown
Listasna pegada dos arrays meu amorzinho s2lembre-se que é o índice que determina o rolê
###Code
lista=['cachorro', 'gato']
mel = lista[0]
pepa = lista[1]
print(lista)
print(lista[0])
print('Lista dos animais', lista)
#todos os valores em um array
type(lista)
lista_dados = ['1', '2'], ['3','4']
print(f'A matriz de leves {lista_dados[0][1]}')
len(lista_dados)
###Output
_____no_output_____
###Markdown
Condicionais if =>se elif => senao, se else => senao _lembre de colocar os:_ ex: if nome =='Felipe': else: print('a') Calculadora
###Code
print('Informe a operação desejada:')
print(' 1. + ')
print(' 2. - ')
print(' 3. * ')
print(' 4. / ')
op = input()
if op == 1 :
print(n1+n2)
elif op=='2':
resultado = n1-n2
n1 = float(input('Informe o primeiro valor: '))
n2 = float(input('Informe o segundo valor: '))
print('A operação ',op,'gerou o resultado: ', resultado)
###Output
_____no_output_____ |
datasets/versioning/unsupervised/wiai-facility/default-unsupervised.ipynb | ###Markdown
Creates a dataset version for unsupervised learning tasks.
###Code
%load_ext autoreload
%autoreload 2
from os import makedirs, symlink, rmdir
from os.path import join, dirname, exists, isdir, basename, splitext
from shutil import rmtree
import math
from collections import defaultdict
import pandas as pd
import numpy as np
from glob import glob
from tqdm import tqdm
from librosa import get_duration
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from termcolor import colored
from cac.utils.io import save_yml
from cac.utils.pandas import apply_filters
random_state = 0
np.random.seed(random_state)
# directory where the data resides
data_root = '/data/wiai-facility/'
save_root = join(data_root, 'processed')
version_dir = join(save_root, 'versions')
makedirs(version_dir, exist_ok=True)
save_audio_dir = join(save_root, 'audio')
attributes = pd.read_csv(join(save_root, 'attributes.csv'))
annotation = pd.read_csv(join(save_root, 'annotation.csv'))
annotation.shape, attributes.shape
type(annotation['unsupervised'][0])
annotation['unsupervised'] = annotation['unsupervised'].apply(eval)
type(annotation['unsupervised'][0])
###Output
_____no_output_____
###Markdown
Split patients in training and validation sets
###Code
all_patients = list(annotation['id'].unique())
len(all_patients)
train_ids, val_test_ids = train_test_split(all_patients, test_size=0.2, random_state=random_state)
val_ids, test_ids = train_test_split(val_test_ids, test_size=0.5, random_state=random_state)
len(train_ids), len(val_ids), len(test_ids)
df_train = apply_filters(annotation, {'id': train_ids}, reset_index=True)
df_train = df_train.drop(columns=['classification', 'users', 'audio_type', 'id'])
df_train.rename({'unsupervised': 'label'}, axis=1, inplace=True)
df_train['label'] = df_train['label'].apply(lambda x: {'unsupervised': x})
df_val = apply_filters(annotation, {'id': val_ids}, reset_index=True)
df_val = df_val.drop(columns=['classification', 'users', 'audio_type', 'id'])
df_val.rename({'unsupervised': 'label'}, axis=1, inplace=True)
df_val['label'] = df_val['label'].apply(lambda x: {'unsupervised': x})
df_test = apply_filters(annotation, {'id': test_ids}, reset_index=True)
df_test = df_test.drop(columns=['classification', 'users', 'audio_type', 'id'])
df_test.rename({'unsupervised': 'label'}, axis=1, inplace=True)
df_test['label'] = df_test['label'].apply(lambda x: {'unsupervised': x})
df_all = apply_filters(annotation, {'id': all_patients}, reset_index=True)
df_all = df_all.drop(columns=['classification', 'users', 'audio_type', 'id'])
df_all.rename({'unsupervised': 'label'}, axis=1, inplace=True)
df_all['label'] = df_all['label'].apply(lambda x: {'unsupervised': x})
df_train.shape, df_val.shape, df_test.shape, df_all.shape
version = 'default-unsupervised'
save_path = join(save_root, 'versions', '{}.yml'.format(version))
description = dict()
description['description'] = 'version for unsupervised task(s) with random split'
for name, _df in zip(['all', 'train', 'val', 'test'], [df_all, df_train, df_val, df_test]):
description[name] = {
'file': _df['file'].values.tolist(),
'label': _df['label'].values.tolist()
}
# save description
makedirs(dirname(save_path), exist_ok=True)
save_yml(description, save_path)
###Output
_____no_output_____ |
project_melanomia_keras_tuner.ipynb | ###Markdown
Install required libraries
###Code
!pip install keras-tuner
###Output
Collecting keras-tuner
[?25l Downloading https://files.pythonhosted.org/packages/a7/f7/4b41b6832abf4c9bef71a664dc563adb25afc5812831667c6db572b1a261/keras-tuner-1.0.1.tar.gz (54kB)
[K |██████ | 10kB 21.8MB/s eta 0:00:01
[K |████████████ | 20kB 26.5MB/s eta 0:00:01
[K |██████████████████ | 30kB 14.9MB/s eta 0:00:01
[K |████████████████████████ | 40kB 10.5MB/s eta 0:00:01
[K |██████████████████████████████ | 51kB 7.0MB/s eta 0:00:01
[K |████████████████████████████████| 61kB 4.6MB/s
[?25hRequirement already satisfied: future in /usr/local/lib/python3.6/dist-packages (from keras-tuner) (0.16.0)
Requirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from keras-tuner) (1.18.5)
Requirement already satisfied: tabulate in /usr/local/lib/python3.6/dist-packages (from keras-tuner) (0.8.7)
Collecting terminaltables
Downloading https://files.pythonhosted.org/packages/9b/c4/4a21174f32f8a7e1104798c445dacdc1d4df86f2f26722767034e4de4bff/terminaltables-3.1.0.tar.gz
Collecting colorama
Downloading https://files.pythonhosted.org/packages/44/98/5b86278fbbf250d239ae0ecb724f8572af1c91f4a11edf4d36a206189440/colorama-0.4.4-py2.py3-none-any.whl
Requirement already satisfied: tqdm in /usr/local/lib/python3.6/dist-packages (from keras-tuner) (4.41.1)
Requirement already satisfied: requests in /usr/local/lib/python3.6/dist-packages (from keras-tuner) (2.23.0)
Requirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from keras-tuner) (1.4.1)
Requirement already satisfied: scikit-learn in /usr/local/lib/python3.6/dist-packages (from keras-tuner) (0.22.2.post1)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests->keras-tuner) (2020.6.20)
Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests->keras-tuner) (1.24.3)
Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests->keras-tuner) (2.10)
Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests->keras-tuner) (3.0.4)
Requirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.6/dist-packages (from scikit-learn->keras-tuner) (0.17.0)
Building wheels for collected packages: keras-tuner, terminaltables
Building wheel for keras-tuner (setup.py) ... [?25l[?25hdone
Created wheel for keras-tuner: filename=keras_tuner-1.0.1-cp36-none-any.whl size=73200 sha256=4f54b7673d6e5ab3d00bad804e74793798257ae3667adf008f5f391c35e0e4ff
Stored in directory: /root/.cache/pip/wheels/b9/cc/62/52716b70dd90f3db12519233c3a93a5360bc672da1a10ded43
Building wheel for terminaltables (setup.py) ... [?25l[?25hdone
Created wheel for terminaltables: filename=terminaltables-3.1.0-cp36-none-any.whl size=15356 sha256=fd580f71c11fcd24acedbe7782c9cef07b966de791de60376d671fc0ddb27ac5
Stored in directory: /root/.cache/pip/wheels/30/6b/50/6c75775b681fb36cdfac7f19799888ef9d8813aff9e379663e
Successfully built keras-tuner terminaltables
Installing collected packages: terminaltables, colorama, keras-tuner
Successfully installed colorama-0.4.4 keras-tuner-1.0.1 terminaltables-3.1.0
###Markdown
Download Dataset
###Code
!wget 'https://s3.amazonaws.com/isic-challenge-data/2019/ISIC_2019_Training_GroundTruth.csv'
!wget 'https://s3.amazonaws.com/isic-challenge-data/2019/ISIC_2019_Training_Metadata.csv'
!wget 'https://s3.amazonaws.com/isic-challenge-data/2019/ISIC_2019_Training_Input.zip'
!unzip 'ISIC_2019_Training_Input.zip'
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import keras
labels = pd.read_csv('ISIC_2019_Training_GroundTruth.csv')
label_df = pd.DataFrame(labels)
info = pd.read_csv('ISIC_2019_Training_Metadata.csv')
info_df = pd.DataFrame(info)
label_df.tail()
info_df.head()
info_df = info_df.drop('image',axis=1)
info_df
data = pd.concat([label_df, info_df], axis=1)
data.head(10)
data['lesion_id'].fillna(method ='bfill', inplace=True)
data["age_approx"].fillna(30.0, inplace = True)
data["sex"].fillna("male", inplace = True)
data["anatom_site_general"].fillna( method ='ffill', inplace = True)
data.head(20)
rows_with_nan = []
for index, row in data.iterrows():
is_nan_series = row.isnull()
if is_nan_series.any():
rows_with_nan.append(index)
print(rows_with_nan)
data['anatom_site_general'].unique()
anatom_site_general = {'anterior torso': 1,'upper extremity': 2,'posterior torso':3,'lower extremity':4, 'lateral torso':5,'head/neck':6,'palms/soles':7,'oral/genital':8}
data['anatom_site_general'] = [anatom_site_general[item] for item in data['anatom_site_general']]
sex = {'male': 0,'female':1}
data['sex'] = [sex[item] for item in data['sex']]
len(data['lesion_id'].unique())
data.head(6)
data = data.drop(['lesion_id'],axis=1)
target = data[['MEL']].values
data = data.drop(['image','MEL','NV'],axis=1)
data
label = target
list0 = [data, label]
list1 = ['x_train','y_train']
for i in range(2):
print('The shape of the {} is {}'.format(list1[i],list0[i].shape))
_,D = data.shape
print(D)
from google.colab import files
import cv2
###Output
_____no_output_____
###Markdown
**I use this part to upload the downloaded images instead of download them in the colab.**
###Code
'''uploaded = files.upload()
train_image = []
for i in uploaded.keys():
train_image.append(cv2.resize(cv2.cvtColor(cv2.imread(i), cv2.COLOR_BGR2RGB), (32,32)))'''
# Upload the images from folder
import os
def load_images_from_folder(folder):
train_image = []
for filename in os.listdir(folder):
img = cv2.imread(os.path.join(folder,filename))
if img is not None:
train_image.append(cv2.resize(cv2.cvtColor(img, cv2.COLOR_BGR2RGB), (32,32)))
return train_image
images = load_images_from_folder('ISIC_2019_Training_Input')
from sklearn.preprocessing import StandardScaler
train_image = images[:20264]
test_image = images[20264:]
x_train = data[:20264]
x_test = data[20264:]
y_train = label[:20264]
y_test = label[20264:]
scaler = StandardScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.fit_transform(x_test)
i = 2
plt.imshow(train_image[i])
plt.xticks([])
plt.yticks([])
plt.show()
print(y_train[i])
i = 0
plt.imshow(test_image[i])
plt.xticks([])
plt.yticks([])
plt.show()
print(y_test[i])
train_image = np.asarray(train_image)
test_image = np.asarray(test_image)
train_image = train_image.astype('float32')
test_image = test_image.astype('float32')
mean = np.mean(train_image,axis=(0,1,2,3))
std = np.std(train_image,axis=(0,1,2,3))
train_image = (train_image-mean)/(std+1e-7)
test_image = (test_image-mean)/(std+1e-7)
from keras.utils import np_utils
nClasses = 2
y_train = np_utils.to_categorical(y_train, nClasses)
y_test = np_utils.to_categorical(y_test, nClasses)
print(test_image.shape)
print(y_train.shape)
print(y_test.shape)
input_shape = (32,32,3)
from keras import layers
from keras.models import Model
from keras.models import Sequential
from keras.layers import Dense, Dropout , Input , Flatten , Conv2D , MaxPooling2D
from keras.layers.merge import concatenate
from keras.callbacks import EarlyStopping , ModelCheckpoint
from keras.optimizers import Adam, SGD, RMSprop
from keras import regularizers
from kerastuner.tuners import RandomSearch
def build_model(hp):
# model_1
model1_in = keras.Input(shape=(32,32,3))
x = layers.Conv2D(64,(2,2),padding='same', activation='relu')(model1_in)
x = layers.Conv2D(64,(2,2), activation='relu')(x)
x = layers.MaxPooling2D(pool_size=(2, 2))(x)
x = layers.Conv2D(128,(2,2),padding='same', activation='relu')(x)
x = layers.Conv2D(128,(2,2), activation='relu')(x)
x = layers.MaxPooling2D(pool_size=(2,2))(x)
x = layers.Conv2D(256,(2,2),padding='same', activation='relu')(x)
x = layers.Conv2D(256,(2,2), activation='relu')(x)
x = layers.MaxPooling2D(pool_size=(2,2))(x)
x = layers.Conv2D(512,(2,2),padding='same', activation='relu')(x)
x = layers.Conv2D(512,(2,2), activation='relu')(x)
x = layers.MaxPooling2D(pool_size=(2,2))(x)
x = layers.Flatten()(x)
x = layers.Dense(256, activation='relu')(x)
x = layers.Dense(512, activation='relu')(x)
x = layers.Dense(1024, activation='relu')(x)
model1_out = layers.Dense(2, activation='sigmoid')(x)
model1 = keras.Model(model1_in, model1_out)
# model_2
model2_in = keras.Input(shape=(D,))
x = layers.Dense(16384, kernel_initializer='normal')(model2_in)
x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
x = layers.Dropout(0.1)(x)
x = layers.Dense(8192, kernel_initializer='normal')(x)
x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
x = layers.Dropout(0.1)(x)
x = layers.Dense(4096, kernel_initializer='normal')(x)
x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
x = layers.Dropout(0.1)(x)
x = layers.Dense(2048, kernel_initializer='normal')(x)
x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
x = layers.Dropout(0.1)(x)
x = layers.Dense(1024, kernel_initializer='normal')(x)
x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
x = layers.Dropout(0.1)(x)
x = layers.Dense(512, kernel_initializer='normal')(x)
x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
x = layers.Dropout(0.1)(x)
x = layers.Dense(128, kernel_initializer='normal')(x)
x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
x = layers.Dropout(0.1)(x)
x = layers.Dense(64, kernel_initializer='normal')(x)
x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
x = layers.Dropout(0.1)(x)
x = layers.Dense(32, kernel_initializer='normal')(x)
x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
x = layers.Dropout(0.1)(x)
x = layers.Dense(16, kernel_initializer='normal')(x)
x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
x = layers.Dropout(0.1)(x)
x = layers.Dense(4, kernel_initializer='normal')(x)
x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
x = layers.Dropout(0.1)(x)
model2_out = layers.Dense(2, kernel_initializer='normal')(x)
model2 = keras.Model(model2_in, model2_out)
concatenated = concatenate([model1_out, model2_out])
x = layers.Dense(units=hp.Int('units', min_value=32, max_value=512, step=32), activation='relu')(concatenated)
out = Dense(2, activation='sigmoid', name='output_layer',kernel_regularizer=regularizers.l1_l2(l1=1e-5, l2=1e-4))(x)
merged_model = Model([model1_in, model2_in], out)
merged_model.compile(optimizer=keras.optimizers.Adam(hp.Choice('learning_rate',values=[1e-2, 1e-3, 1e-4])),loss='binary_crossentropy',metrics=['accuracy'])
return merged_model
tuner = RandomSearch(build_model,objective='val_accuracy',max_trials=5,executions_per_trial=3,directory='my_dir',project_name='helloworld')
tuner.search_space_summary()
final=tuner.search([train_image, x_train], y=y_train, batch_size=32, epochs=5,verbose=1,validation_data=([test_image,x_test],y_test))
best_model = tuner.get_best_models()[0]
tuner.results_summary()
best_model.evaluate([test_image,x_test],y_test)
###Output
159/159 [==============================] - 1s 8ms/step - loss: 0.3454 - accuracy: 0.8496
|
Netflix stock Price Estimation.ipynb | ###Markdown
***Netflix stock Price Estimation***
###Code
import numpy as np
import pandas as pd
from sklearn.tree import DecisionTreeRegressor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
from google.colab import files
uploaded = files.upload()
df = pd.read_csv('NFLX.csv')
df.head(10)
plt.figure(figsize=(20,10))
plt.title('Netflix', fontsize = 20)
plt.xlabel('Days', fontsize= 20)
plt.ylabel('Final Price USD ($)', fontsize = 20)
plt.plot(df['Close'])
plt.show()
#We just need the Stock Closing Values
df = df[['Close']]
df.head(10)
#Create a variable to predict 'n' days out into the future, I'm predicting for 50 days
fd = 50
#Create a new column (the target or dependent variable) shifted 'n' units/days up
df['Estimation'] = df[['Close']].shift(-fd)
#print the data
df.tail(10)
#Create the feature data set((Means X in X,Y pair)
X = np.array(df.drop(['Estimation'], 1))[:-fd]# index 1 for estimation column
print(X)
#Create the target data set((Means Y in X,Y pair)
y = np.array(df['Estimation'])[:-fd]
print(y)
###Output
[ 98.300003 97.610001 97.93 101.580002 95.489998 96.230003
98. 97.360001 97.660004 98.129997 97.860001 99.349998
99.720001 101.120003 101.059998 99.839996 99.589996 98.360001
101.209999 104.129997 102.190002 102.230003 105.699997 104.349998
104.940002 104.830002 104.449997 103.809998 102.68 106.980003
109.650002 110.419998 111.510002 108.400002 94.339996 96.769997
94.980003 95.900002 93.559998 92.43 91.040001 90.279999
90.029999 93.110001 91.540001 90.790001 89.370003 90.839996
90.540001 92.889999 90.019997 87.739998 87.879997 89.120003
88.629997 90.5 89.550003 92.489998 94.889999 97.889999
100.199997 102.809998 103.300003 102.57 101.510002 101.25
99.589996 100.739998 99.889999 97.860001 97.089996 93.75
93.849998 94.120003 94.290001 95.440002 94.449997 93.800003
90.989998 90.010002 91.660004 88.440002 85.330002 87.970001
91.059998 91.480003 96.669998 97.910004 94.599998 95.099998
97.059998 94.669998 95.970001 96.43 98.019997 98.389999
98.809998 85.839996 87.910004 85.989998 85.889999 87.660004
91.410004 92.040001 91.650002 91.25 94.370003 93.559998
93.099998 93.440002 97.029999 95.110001 93.989998 93.93
95.889999 96.589996 95.309998 95.120003 96.370003 96.160004
95.870003 95.260002 95.940002 95.18 97.32 97.580002
97.300003 97.449997 97.449997 97.379997 97.379997 100.089996
99.150002 99.660004 96.5 99.050003 96.089996 97.010002
97.339996 99.480003 98.059998 98.25 94.879997 95.830002
95.940002 94.559998 97.07 97.480003 96.669998 98.550003
102.629997 102.339996 106.279999 105.07 104.82 103.330002
100.589996 99.5 100.230003 101.470001 99.800003 118.790001
121.870003 123.349998 127.5 127.330002 126.510002 126.970001
126.470001 126.57 124.870003 123.300003 122.339996 122.139999
122.029999 124.580002 124.339996 122.190002 115.419998 114.779999
113.379997 113.589996 115.190002 115.029999 115.209999 117.959999
118.040001 117.690002 117.410004 116.93 117.510002 117.
117.220001 120.809998 119.160004 124.57 125.389999 123.239998
122.879997 122.830002 123.779999 123.440002 125. 124.220001
125.449997 125.120003 126.5 125.580002 125.589996 128.350006
125.889999 125.330002 123.800003 127.489998 129.410004 131.809998
131.070007 130.949997 129.889999 130.5 129.179993 133.699997
132.889999 133.259995 138.410004 138.600006 137.389999 140.110001
139.520004 138.960007 142.449997 141.220001 140.710007 140.779999
139.199997 140.25 140.970001 144. 144.740005 144.139999
144.820007 143.199997 140.820007 142.270004 142.009995 142.220001
142.600006 143.860001 142.779999 143.25 143.410004 142.130005
142.649994 139.529999 139.139999 141.940002 141.429993 140.320007
140.529999 140.889999 143.520004 143.190002 145.25 144.389999
145.110001 145.830002 142.419998 142.649994 141.839996 142.020004
144.059998 145.169998 146.470001 148.059998 147.809998 146.919998
145.5 143.619995 143.740005 143.110001 143.850006 144.350006
143.830002 142.919998 147.25 143.360001 139.759995 141.179993
142.869995 143.830002 152.160004 150.169998 153.080002 152.199997
155.350006 156.449997 155.589996 157.25 156.600006 156.380005
157.460007 160.279999 158.539993 160.809998 160.020004 159.410004
153.199997 155.699997 157.020004 157.160004 157.949997 157.75
163.050003 162.429993 163.220001 163.070007 162.990005 165.179993
165.059998 165.169998 165.610001 165.880005 158.029999 151.440002
152.720001 152.199997 151.759995 152.380005 153.399994 152.050003
155.029999 154.889999 158.020004 157.5 151.029999 153.410004
150.089996 149.410004 146.169998 147.610001 146.25 150.179993
152.669998 154.330002 158.75 158.210007 161.119995 161.699997
183.600006 183.860001 183.600006 188.539993 187.910004 186.970001
189.080002 182.679993 184.039993 181.660004 182.029999 180.740005
179.229996 180.270004 181.330002 178.360001 175.779999 169.139999
171.399994 171. 168.5 169.979996 166.089996 166.539993
166.759995 169.339996 169.059998 168.130005 165.949997 167.119995
168.809998 174.690002 174.710007 174.740005 174.520004 179.25
179. 176.419998 181.740005 185.149994 183.639999 182.630005
182.350006 184.619995 185.679993 185.509995 188.779999 187.350006
178.550003 179.380005 181.970001 180.699997 181.350006 177.009995
179.190002 184.449997 194.389999 198.020004 196.869995 195.080002
194.949997 195.860001 199.490005 202.679993 199.479996 195.539993
195.130005 194.160004 192.470001 196.020004 193.770004 195.210007
199.539993 198.369995 196.429993 198. 199.320007 200.009995
200.130005 195.889999 196.440002 193.899994 192.020004 195.080002
195.710007 192.119995 195.509995 193.199997 194.100006 196.229996
196.320007 195.75 195.050003 199.179993 188.149994 187.580002
186.820007 184.039993 184.210007 185.300003 185.199997 188.539993
186.220001 185.729996 187.860001 189.559998 190.119995 190.419998
187.020004 188.820007 188.619995 189.940002 187.759995 186.240005
192.710007 191.960007 201.070007 205.050003 205.630005 209.990005
212.050003 209.309998 212.520004 217.240005 221.229996 221.529999
217.5 220.330002 220.460007 227.580002 250.289993 261.299988
269.700012 274.600006 284.589996 278.799988 270.299988 265.070007
267.429993 254.259995 265.720001 264.559998 250.100006 249.470001
257.950012 258.269989 266. 280.269989 278.519989 278.549988
281.040009 278.140015 285.929993 294.160004 290.609985 291.380005
290.390015 301.049988 315. 325.220001 321.160004 317.
331.440002 321.299988 315.880005 321.549988 321.089996 318.450012
313.480011 317.5 316.480011 306.700012 300.940002 320.350006
300.690002 285.769989 295.350006 280.290009 283.670013 288.940002
293.970001 288.850006 289.929993 298.070007 303.670013 309.25
311.649994 307.779999 336.059998 334.519989 332.700012 327.769989
318.690002 307.019989 305.76001 313.980011 311.76001 312.459991
313.299988 313.359985 311.690002 320.089996 326.26001 326.890015
330.299988 329.600006 326.459991 328.529999 326.130005 328.190002
325.220001 324.179993 331.820007 331.619995 344.720001 349.290009
351.290009 349.730011 353.540009 351.600006 359.929993 361.809998
365.799988 367.450012 361.399994 360.570007 361.450012 363.829987
379.929993 392.869995 391.980011 390.399994 404.980011 416.76001
415.440002 411.089996 384.480011 399.390015 390.390015 395.420013
391.429993 398.179993 390.519989 398.390015 408.25 418.970001
415.630005 418.649994 413.5 395.799988 400.480011 379.480011
375.130005 364.230011 361.049988 362.660004 357.320007 362.869995
363.089996 355.209991 334.959991 337.450012 338.380005 344.5
343.089996 350.920013 351.829987 347.609985 349.359985 345.869995
341.309998 337.48999 326.399994 322.440002 316.779999 327.730011
338.019989 344.440002 339.170013 358.820007 364.579987 368.48999
368.040009 370.980011 367.679993 363.600006 341.179993 346.459991
348.679993 348.410004 355.929993 369.950012 368.149994 364.559998
350.350006 367.649994 366.959991 365.359985 361.190002 369.609985
369.429993 377.880005 380.709991 374.130005 381.429993 377.140015
377.049988 363.649994 351.350006 349.100006 355.709991 325.890015
321.100006 339.559998 333.130005 346.399994 364.700012 346.709991
332.670013 329.540009 333.160004 301.829987 312.869995 299.829987
284.839996 285.809998 301.779999 317.380005 309.100006 315.440002
310.839996 327.5 317.920013 303.470001 294.070007 294.399994
286.730011 290.059998 286.209991 270.600006 266.980011 262.130005
258.820007 261.429993 266.630005 282.649994 288.75 286.130005
290.299988 275.329987 282.880005 265.140015 269.700012 265.320007
274.880005 276.019989 266.839996 262.799988 270.940002 266.769989
260.579987 246.389999 233.880005 253.669998 255.570007 256.079987
267.660004 267.660004 271.200012 297.570007 315.339996 320.269989
319.959991 324.660004 337.589996 332.940002 354.640015 351.390015
353.190002 339.100006 325.160004 321.98999 326.670013 338.049988
335.660004 328.899994 340.660004 339.5 339.850006 351.339996
355.809998 352.190002 344.709991 347.570007 345.730011 359.970001
351.769989 359.070007 356.869995 361.920013 359.910004 356.970001
363.019989 363.910004 364.970001 362.869995 358.100006 357.320007
351.040009 354.299988 359.609985 352.600006 349.600006 358.859985
356.269989 361.209991 358.820007 361.459991 363.440002 358.779999
375.220001 377.869995 361.01001 366.230011 359.970001 353.369995
354.609985 356.559998 366.959991 367.720001 369.75 367.880005
365.48999 361.410004 364.709991 363.920013 367.649994 351.140015
348.869995 359.459991 354.73999 360.350006 377.339996 381.890015
374.230011 368.329987 374.850006 371.829987 370.540009 378.809998
379.059998 385.029999 378.670013 370.459991 364.369995 362.75
361.040009 345.26001 345.609985 354.98999 359.309998 354.450012
348.109985 354.269989 359.730011 352.209991 354.390015 355.059998
349.190002 351.850006 343.279999 336.630005 353.399994 355.730011
357.130005 360.869995 352.01001 351.269989 345.559998 343.429993
339.730011 350.619995 357.119995 363.519989 365.209991 369.209991
371.040009 360.299988 362.200012 370.019989 367.320007 374.600006
375.429993 381.720001 380.549988 376.160004 379.929993 381.
379.5 373.25 366.600006 365.98999 362.440002 325.209991
315.100006 310.619995 307.299988 317.940002 326.459991 335.779999
332.700012 325.929993 322.98999 319.5 318.829987 307.630005
310.100006 304.290009 315.899994 308.929993 310.829987 312.279999
299.109985 295.76001 302.799988 309.380005 298.98999 297.809998
296.929993 291.440002 294.980011 291.029999 291.769989 296.779999
293.75 289.290009 291.519989 293.25 290.170013 294.339996
287.98999 288.269989 288.859985 294.149994 294.290009 298.600006
291.559998 286.600006 270.75 265.920013 254.589996 264.75
263.309998 263.079987 267.619995 269.579987 268.029999 268.149994
272.790009 274.459991 270.720001 267.529999 280.480011 282.929993
285.529999 284.25 286.279999 293.350006 275.299988 278.049988
266.690002 271.269989 271.5 276.820007 281.859985 281.209991
291.450012 287.410004 286.809998 292.859985 288.029999 288.589996
289.570007 291.570007 294.179993 292.01001 283.109985 289.619995
295.029999 302.570007 302.600006 305.160004 311.690002 310.480011
315.549988 312.48999 315.929993 314.660004 309.98999 306.160004
304.320007 302.859985 307.350006 302.5 293.119995 298.929993
298.440002 298.5 304.209991]
###Markdown
Now we split the data into 80% training and 20% testing data sets for Maximum Accuracy(Avoid taking 50/50).
###Code
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size = 0.20)
###Output
_____no_output_____
###Markdown
Here we are testing with 2 different models[tree regressor model, linear regression model]If there is a high non-linearity & complex relationship between dependent & independent variables, a tree model will outperform a classical regression method. If you need to build a model which is easy to explain to people, a decision tree model will always do better than a linear model.
###Code
#Create the decision tree regressor model
tree = DecisionTreeRegressor().fit(x_train, y_train)
#Create the linear regression model
lr = LinearRegression().fit(x_train, y_train)
#Get the feature data,
#AKA all the rows from the original data set except the last 'x' days
x_future = df.drop(['Estimation'], 1)[:-fd]
#Get the last 'x' rows
x_future = x_future.tail(fd)
#Convert the data set into a numpy array
x_future = np.array(x_future)
print(x_future)
#Show the model tree prediction
tree_prediction = tree.predict(x_future)
print( tree_prediction )
print()
#Show the model linear regression prediction
lr_prediction = lr.predict(x_future)
print(lr_prediction)
###Output
[274.459991 331.074997 267.529999 280.480011 282.929993 285.529999
284.25 286.279999 293.350006 275.299988 278.049988 266.690002
271.269989 291.450012 276.820007 281.859985 281.209991 291.450012
312.48999 286.809998 292.859985 288.029999 288.589996 289.570007
291.570007 294.179993 292.01001 283.109985 289.619995 339.5
315.549988 302.600006 305.160004 311.690002 310.480011 315.549988
312.48999 315.929993 314.660004 309.98999 306.160004 304.320007
302.859985 307.350006 302.5 293.119995 298.929993 298.440002
298.5 304.209991]
[329.38447697 326.68391573 320.74790012 318.17007815 315.11002246
314.52254883 304.70230047 306.86802032 301.77376092 311.95350373
305.84214204 307.50807444 308.77945845 297.23186771 294.29457848
300.46729665 306.23671674 297.12665498 296.09202702 295.32043045
290.50675529 293.6106681 290.14726085 290.79609096 295.18891432
292.53218302 288.62162144 290.57688864 292.09377837 289.39321713
293.049497 287.48175269 287.72725842 288.24457239 292.88290148
293.00566793 296.78471338 290.6119689 286.26300179 272.36556915
268.13059163 258.19632737 267.10471335 265.84210621 265.64043042
269.62115166 271.33969087 269.98064698 270.08585971 274.15426801]
###Markdown
Tree prediction model
###Code
#Visualize the data
estimations = tree_prediction
#Plot the data
valid = df[X.shape[0]:]
valid['Estimations'] = estimations #Create a new column called 'Estimations' that will hold the predicted prices
plt.figure(figsize=(40,15))
plt.title('Model')
plt.xlabel('Days',fontsize=20)
plt.ylabel('Final Price USD ($)',fontsize=20)
plt.plot(df['Close'])
plt.plot(valid[['Close','Estimations']])
plt.legend(['Train', 'Val', 'Estimation' ], loc='lower right')
plt.show()
###Output
/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:5: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
"""
###Markdown
Linear regression prediction
###Code
#Visualize the data
estimations = lr_prediction
#Plot the data
valid = df[X.shape[0]:]
valid['Estimations'] = estimations #Create a new column called 'Estimations' that will hold the predicted prices
plt.figure(figsize=(40,15))
plt.title('Model')
plt.xlabel('Days',fontsize=20)
plt.ylabel('Close Price USD ($)',fontsize=20)
plt.plot(df['Close'])
plt.plot(valid[['Close','Estimations']])
plt.legend(['Train', 'Val', 'Estimation' ], loc='lower right')
plt.show()
###Output
/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:5: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
"""
|
demos/Demo_Notebook.ipynb | ###Markdown
Example of MoveIt usageThis is an example of MoveIt
###Code
import sys
import copy
import rospy
import rospy as rp
import moveit_commander
import moveit_msgs.msg
import geometry_msgs.msg
from math import pi
from std_msgs.msg import String
from moveit_commander.conversions import pose_to_list
moveit_commander.roscpp_initialize(sys.argv)
rospy.init_node('move_group_python_interface_tutorial',
anonymous=True)
robot = moveit_commander.RobotCommander()
scene = moveit_commander.PlanningSceneInterface()
group = moveit_commander.MoveGroupCommander("arm")
group
group.get_name()
# group.set_end_effector_link('gripper_eef')
group.has_end_effector_link()
print robot.get_current_state()
group_variable_values = group.get_current_joint_values()
group_variable_values
group.set_joint_value_target(group.get_random_joint_values())
plan2 = group.plan()
group.execute(plan2)
group.get_current_pose()
pose_target = geometry_msgs.msg.Pose()
pose_target.orientation.w = 1
pose_target.position.x = 0.5
pose_target.position.y = 0.5
pose_target.position.z = 0.5
group.set_start_state_to_current_state()
group.set_pose_target(pose_target)
planx = group.plan()
from matplotlib import pyplot as plt
%matplotlib inline
import numpy as np
plt_list = []
for el in planx.joint_trajectory.points:
plt_list.append(el.positions)
for i in range(len(plt_list[0])):
plt.plot(np.asarray(plt_list)[:, i])
plt.show()
?group.execute
group.execute(planx)
group.get_random_joint_values()
group.compute_cartesian_path([pose_target], 0.1, 0.1)
###Output
_____no_output_____ |
05 Anwendungsbeispiel Importing of image data with augmentation and classification.ipynb | ###Markdown
Anwendungsbeispiel Import of image data with augmentation and classificationDas Ziel dieses Beispieles ist es die Organisation, den Import und die Vorbereitung von Bilddaten für eine Klassifikation zu erklären. Dabei werden folgende Schritte durchgeführt:- Dynamisches Laden und entpacken der Bilddaten von einer externen Quelle- Review der Organisation auf dem Filesystem- Laden der Daten- Transformationen- Augmentierung- Training- Analyse- VerbesserungDer verwendete Datensatz heisst caltech101[3] mit 101 Klassen und jeweils 40 bis 800 Bildern pro Klasse. Die Bilder haben 200 - 300 Pixel Auflösung in Farbe.Quellen für die Beispiele und Daten:- [1] [https://machinelearningmastery.com/how-to-develop-a-cnn-from-scratch-for-cifar-10-photo-classification/](https://machinelearningmastery.com/how-to-develop-a-cnn-from-scratch-for-cifar-10-photo-classification/)- [2] [https://github.com/bhavul/Caltech-101-Object-Classification](https://github.com/bhavul/Caltech-101-Object-Classification)- [3] [http://www.vision.caltech.edu/Image_Datasets/Caltech101/](http://www.vision.caltech.edu/Image_Datasets/Caltech101/)
###Code
#
# Abdrehen von Fehlermeldungen
#
from warnings import simplefilter
# ignore all future warnings
simplefilter(action='ignore', category=FutureWarning)
simplefilter(action='ignore', category=Warning)
simplefilter(action='ignore', category=RuntimeWarning)
#
# Import der Module
#
import os
import logging
import tarfile
import operator
import random
from urllib.request import urlretrieve
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
#
# Tensorflow und Keras
#
import tensorflow as tf
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Conv2D, Input, Dropout, Activation, Dense, MaxPooling2D, Flatten, GlobalAveragePooling2D
from tensorflow.keras.optimizers import Adadelta
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.preprocessing.image import ImageDataGenerator
#
# Für GPU Support
#
tflogger = tf.get_logger()
tflogger.setLevel(logging.ERROR)
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR )
physical_devices = tf.config.list_physical_devices('GPU')
print(physical_devices)
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
#
# Einstellen der Grösse von Diagrammen
#
plt.rcParams['figure.figsize'] = [16, 9]
#
# Ausgabe der Versionen
#
print('working on keras version {} on tensorflow {} using sklearn {}'.format ( tf.keras.__version__, tf.version.VERSION, sklearn.__version__ ) )
###Output
_____no_output_____
###Markdown
Hilfsfunktionen
###Code
urlDataSource = 'http://www.vision.caltech.edu/Image_Datasets/Caltech101/101_ObjectCategories.tar.gz'
localExtractionFolder = 'data/caltech101'
localDataArchive = 'data/caltech101/caltech101.tar.gz'
#
# Laden der Daten von einer URL
#
def download_dataset(url,dataset_file_path):
if os.path.exists(localDataArchive):
print("archive already downloaded.")
else:
print("started loading archive from url {}".format(url))
filename, headers = urlretrieve(url, dataset_file_path)
print("finished loading archive from url {}".format(url))
def extract_dataset(dataset_file_path, extraction_directory):
if (not os.path.exists(extraction_directory)):
os.makedirs(extraction_directory)
if (dataset_file_path.endswith("tar.gz") or dataset_file_path.endswith(".tgz")):
tar = tarfile.open(dataset_file_path, "r:gz")
tar.extractall(path=extraction_directory)
tar.close()
elif (dataset_file_path.endswith("tar")):
tar = tarfile.open(dataset_file_path, "r:")
tar.extractall(path=extraction_directory)
tar.close()
print("extraction of dataset from {} to {} done.".format(dataset_file_path,extraction_directory) )
###Output
_____no_output_____
###Markdown
Laden der Daten
###Code
#
# Laden der Daten ausführen
#
download_dataset(urlDataSource,localDataArchive)
#
# Extrahieren der Daten
#
extract_dataset(localDataArchive,localExtractionFolder)
###Output
_____no_output_____
###Markdown
Organisation von Bilddaten auf dem FilesystemEine gute Einführung in das Thema ist zu finden unter- [Brownlee](https://machinelearningmastery.com/how-to-load-large-datasets-from-directories-for-deep-learning-with-keras/) - [Sarkar](https://towardsdatascience.com/a-single-function-to-streamline-image-classification-with-keras-bd04f5cfe6df) Erzeugen der Trainingsdaten
###Code
#
# Hilfsfunktionen
#
def get_images(object_category, data_directory):
if (not os.path.exists(data_directory)):
print("data directory not found.")
return
obj_category_dir = os.path.join(os.path.join(data_directory,"101_ObjectCategories"),object_category)
images = [os.path.join(obj_category_dir,img) for img in os.listdir(obj_category_dir)]
return images
def return_images_per_category(data_directory):
folder = os.path.join(data_directory,"101_ObjectCategories")
#print(folder)
categories=[d for d in os.listdir(folder) if os.path.isdir(os.path.join(folder,d))]
#print(categories)
return categories
#
# Lesen der Bilddaten aus einer Datei. Anpassen der Größe auf 300x200 (Breite x Höhe) Pixel.
#
def read_image(image_path):
#img = cv2.imread(image_path, cv2.IMREAD_COLOR)
#img = cv2.resize(img, (300,200), interpolation=cv2.INTER_CUBIC)
im = Image.open(image_path).convert("RGB").resize((300,200))
np_img = np.array(im)
return np_img
#
# Sammelfunktion die alle Kategorien durchgeht und die Files sammelt
#
def create_training_data(data_directory,fraction):
i = 0
X = []
Y = []
print("started to read dataset from {}.".format(data_directory) )
for category in return_images_per_category(data_directory):
if category == 'BACKGROUND_Google':
continue
print(".",end='')
for image in get_images(category, data_directory):
if not image.endswith('.jpg'):
continue
if random.uniform(0, 1) > fraction:
continue
X.insert(i, read_image(image) )
Y.insert(i, category )
i += 1
print("finished reading dataset.")
X = np.array(X)
return X,Y
#
# Erzeugen der Trainingsdaten. Der Faktor fraction bestimmt, wieviele Daten wirklich in den Speicher geladen werden.
# Achtung: diese Funktion kümmert sich nicht um die Gleichverteilung der Klassen.
#
X, Y = create_training_data(localExtractionFolder,fraction=0.4)
print('data X={}, y={}'.format(X.shape, len(Y)) )
print(Y)
#
# Transformation der Labels in one-hot encoding
#
label_encoder = LabelEncoder()
Y_integer_encoded = label_encoder.fit_transform(Y)
Y_one_hot = to_categorical(Y_integer_encoded)
Y_one_hot.shape
#
# Normalisieren der Bilddaten
#
X_normalized = ( X.astype(np.float64) / 255 ) + 0.001
#
# Löschen von X um Speicher gezielt freizumachen
#
del X
#
# Split der Daten in Train und Test(validation) Datensätze
#
X_train, X_validation, Y_train, Y_validation = train_test_split(X_normalized, Y_one_hot, test_size=0.25, random_state=42)
del X_normalized
#
# gültige Werte in X_train, X_validation, Y_train, Y_validation, label_encoder, data_directory
#
###Output
_____no_output_____
###Markdown
Prüfen der Daten
###Code
#
# Form der Daten
#
print('train: X=%s, y=%s' % (X_train.shape, Y_train.shape))
print('test: X=%s, y=%s' % (X_validation.shape, Y_validation.shape))
#
# Plot von Bildern
#
for i in range(9):
plt.subplot(330 + 1 + i)
plt.imshow(X_train[i])
plt.show()
###Output
_____no_output_____
###Markdown
Bauen eines Modelles
###Code
#
# Erzeugen eines einfache Modelles
#
def createModel():
model = Sequential()
model.add(Conv2D(16, (3,3), activation='relu', input_shape=(200,300,3)))
model.add(Conv2D(32, (3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=2, strides=2))
model.add(Dropout(0.2))
model.add(Conv2D(64, (3,3), activation='relu'))
model.add(Conv2D(128, (3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=2, strides=2))
model.add(Dropout(0.4))
model.add(Flatten())
model.add(Dense(101, activation='softmax'))
return model
#
# Compile und Training des Modelles
#
model_cnn = createModel()
model_cnn.compile(loss='categorical_crossentropy',optimizer='adam', metrics=['accuracy'])
#
# Callbacks steuern das Speichern von Checkpoints und eine Überwachung gegen Overfitting.
#
callbacks = [ModelCheckpoint('model_cnn_weights.h5', monitor='val_acc', save_best_only=True),
EarlyStopping(monitor='val_loss', patience=4, verbose=1, mode='auto')]
history = model_cnn.fit(X_train, Y_train, batch_size=16, epochs=6, verbose=1, validation_data=(X_validation,Y_validation), callbacks=callbacks)
#
# Evaluierung des Modelles
#
_, acc = model_cnn.evaluate(X_validation, Y_validation, verbose=0)
print('accuracy {:.3f} '.format(acc) )
#
# Ausgabe des Trainingsverlaufes
#
def summarize_diagnostics(history,modelname):
plt.subplot(211)
plt.title('Cross Entropy Loss')
plt.plot(history.history['loss'], color='blue', label='train')
plt.plot(history.history['val_loss'], color='lightblue', label='test')
plt.subplot(212)
plt.title('Classification Accuracy')
plt.plot(history.history['accuracy'], color='green', label='train')
plt.plot(history.history['val_accuracy'], color='lightgreen', label='test')
plt.subplots_adjust(hspace=0.5)
plt.savefig( 'results/' + modelname + '_plot.png')
plt.show()
plt.close()
summarize_diagnostics(history,'05_model_cnn')
###Output
_____no_output_____
###Markdown
Optimiertes Laden der BilderDie bisherige Ladefunktion hat alle Bilder in den Speicher geladen. Das führt schnell dazu, dass der Hauptspeicher ausgeht. Daher benötigen wir eine Funktion, die Bilder der Reihe nach in den Speicher lädt und für das Training zur Verfügung stellt.Eine solche Funktion kann mit einem python **Generator** implementiert werden. Die Erklärung von Generatoren ist hier zu finden [2]. Das Tutorial zum Laden mit Generatoren ist hier [1] zu finden.Quellen:- [1] [https://towardsdatascience.com/a-single-function-to-streamline-image-classification-with-keras-bd04f5cfe6df](https://towardsdatascience.com/a-single-function-to-streamline-image-classification-with-keras-bd04f5cfe6df)- [2] [https://www.python-kurs.eu/generatoren.php](https://www.python-kurs.eu/generatoren.php)
###Code
#
# Anlegen eines Generators für Bilder
#
datagen = ImageDataGenerator()
it_train = datagen.flow(X_train, Y_train, batch_size=16)
#
# Training
#
model_cnn = createModel()
model_cnn.compile(loss='categorical_crossentropy',optimizer='adam', metrics=['accuracy'])
#
# Neue Funktion fit_generator
#
steps = int(X_train.shape[0] / 16)
history = model_cnn.fit_generator(it_train, steps_per_epoch=steps, epochs=6, validation_data=(X_validation,Y_validation), verbose=1, callbacks=callbacks)
#
# Evaluierung
#
_, acc = model_cnn.evaluate(X_validation, Y_validation, verbose=0)
print('accuracy {:.3f} '.format(acc) )
summarize_diagnostics(history,'model_cnn_gen')
###Output
_____no_output_____
###Markdown
Optimierung durch AugmentierungAugmentierung erweitert den Trainingsdatensatz um künstlich erzeugte Bilder. Damit wird erreicht, dass ein Modell robuster wird und sich nicht auf einzelne Pixel bezieht. Methoden der Augmentierung für Bilder sind:- Breite und Höhe des Bildinhaltes ändern (width_shift_range, height_shift_range)- Spiegelung (flip)- Rotation (rotation_range)- Zoomen (zoom_range)- Helligkeit (brightness_range)- Verzerrung (shear_range)Das Zufügen von Rauschen kann in Keras nicht direkt über den [ImageDataGenerator](https://keras.io/preprocessing/image/) eingestellt werden. Dies wird aber durch die Verwendung von Dropout annähernd simuliert.
###Code
#
# Anlegen eines Generators für Bilder
#
datagen = ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True, rotation_range=5, zoom_range=0.1)
# prepare iterator
it_train = datagen.flow(X_train, Y_train, batch_size=16)
#
# Training
#
steps = int(X_train.shape[0] / 16)
model_cnn = createModel()
model_cnn.compile(loss='categorical_crossentropy',optimizer='adam', metrics=['accuracy'])
history = model_cnn.fit_generator(it_train, steps_per_epoch=steps, epochs=24, validation_data=(X_validation,Y_validation), verbose=1, callbacks=callbacks)
#
# Evaluierung
#
_, acc = model_cnn.evaluate(X_validation, Y_validation, verbose=0)
print('accuracy {:.3f} '.format(acc) )
summarize_diagnostics(history,'05_model_cnn_aug')
###Output
_____no_output_____ |
jupyter/.ipynb_checkpoints/Momo-checkpoint.ipynb | ###Markdown
Informes de mortalidadActualizado diariamente, este documento se [visualiza mejor aquí](https://nbviewer.jupyter.org/github/jaimevalero/COVID-19/blob/master/jupyter/Momo.ipynb).Datos del Sistema de Monitorización de la Mortalidad diaria, que incluye las defunciones por todas las causas procedentes de 3.929 registros civiles informatizados, que representan el 92% de la población española.
###Code
# Cargamos datos
import Loading_data
from matplotlib import pyplot as plt
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
from IPython.display import display, HTML
df = pd.read_csv('https://momo.isciii.es/public/momo/data')
df.to_csv('/tmp/momo.csv')
df.head()
import janitor
import datetime
def pipeline_basic_with_query(df,query):
''' Basic filtering, using janitor
Carga de datos, enriquecimiento de fechas y filtro por query configurable
'''
LISTA_COLUMNAS_A_BORRAR = ['Unnamed: 0',
'defunciones_observadas_lim_inf',
'defunciones_observadas_lim_sup',
'defunciones_esperadas',
'defunciones_esperadas_q01',
'defunciones_esperadas_q99']
return (
df
# Quitar: columnas
.remove_columns(LISTA_COLUMNAS_A_BORRAR)
.clean_names()
# Enriquecer: fechas con columnas de años, mes y año-mes
.rename_column( "fecha_defuncion", "date")
.to_datetime('date')
.join_apply(lambda x: x['date'].strftime('%Y') , new_column_name="date_year" )
.join_apply(lambda x: x['date'].strftime('%m') , new_column_name="date_month" )
.join_apply(lambda x: x['date'].strftime('%m-%d') , new_column_name="date_month_day" )
.join_apply(lambda x: x['date'].strftime('%U') , new_column_name="date_week" )
.join_apply(lambda x: x['date'].strftime('%Y-%m') , new_column_name="date_year_month" )
.join_apply(lambda x: x['date'].strftime('%Y-%U') , new_column_name="date_year_week" )
# Filtrar:por query
.filter_on( query )
.set_index('date')
)
def pipeline_basic(df):
query = 'ambito == "nacional" & nombre_gedad == "todos" & nombre_sexo == "todos" '
return pipeline_basic_with_query(df,query)
def extraer_defunciones_anuales_por_periodo(periodo_de_tiempo,query):
'''Extrae el cuadro de comparativa por week, or year '''
def pipeline_agregado_anual(periodo_de_tiempo,df,year):
''' Saca un dataframe de los datos agrupados por año'''
return (
df
.filter_on('date_year == "'+year+'"' )
.groupby_agg( by='date_'+periodo_de_tiempo, agg='sum', agg_column_name="defunciones_observadas", new_column_name="agregados")
.rename_column( "agregados", year)
.join_apply(lambda x: x['date_'+periodo_de_tiempo] , new_column_name=periodo_de_tiempo )
.set_index('date_'+periodo_de_tiempo)
[[periodo_de_tiempo,year]]
.drop_duplicates()
)
def pipeline_comparativa_anual(periodo_de_tiempo,df_2018,df_2019,df_2020):
''' Mergea tres dataframes de año, por periodo de tiempo'''
return (
df_2018
.merge( df_2019, on=periodo_de_tiempo, how='right')
.merge( df_2020, on=periodo_de_tiempo, how='left')
.sort_naturally(periodo_de_tiempo)
.set_index(periodo_de_tiempo)
.join_apply(lambda x: x['2020'] - x['2019'] , new_column_name="resta 2020 y 2019" )
)
# Sacamos los datos y limpiamos
df = pd.read_csv('')
df_basic = pipeline_basic_with_query(df,query)
# Sacamos los datos agrupados por años
muertes_2018 = pipeline_agregado_anual(periodo_de_tiempo,df=df_basic,year='2018')
muertes_2019 = pipeline_agregado_anual(periodo_de_tiempo,df=df_basic,year='2019')
muertes_2020 = pipeline_agregado_anual(periodo_de_tiempo,df=df_basic,year='2020')
# Generamos un solo cuadro, con columna por año
df_comparativa_años = pipeline_comparativa_anual(periodo_de_tiempo,muertes_2018,muertes_2019,muertes_2020)
return df_comparativa_años
def debug_extraer_defunciones_anuales_por_periodo():
""" Solo para depurar"""
query = 'ambito == "nacional" & nombre_gedad == "todos" & nombre_sexo == "todos" '
df_muertes_anuales_por_semana = extraer_defunciones_anuales_por_periodo("week",query)
df_muertes_anuales_por_mes = extraer_defunciones_anuales_por_periodo("month",query)
return df_muertes_anuales_por_semana , df_muertes_anuales_por_mes
#df1, df2 = debug_extraer_defunciones_anuales_por_periodo()
#df1
###Output
_____no_output_____
###Markdown
Sacamos el grafico comparativo de fallecimiento, para los años 2019 y 2020, por semana
###Code
from matplotlib import pyplot as plt
from IPython.display import display, HTML
import pandas as pd
import numpy as np
periodo_de_tiempo="week"
query = 'ambito == "nacional" & nombre_gedad == "todos" & nombre_sexo == "todos" '
df = extraer_defunciones_anuales_por_periodo(periodo_de_tiempo,query)
fig = plt.figure(figsize=(8, 6), dpi=80)
plt.xticks(rotation=90)
for ca in ['2018','2019','2020']:
plt.plot(df[ca])
plt.legend(df.columns)
plt.xlabel(periodo_de_tiempo)
plt.ylabel("Deaths by " + periodo_de_tiempo)
fig.suptitle('Comparativa de fallecimientos por año, según MOMO', fontsize=20)
plt.show()
periodo_de_tiempo="week"
query = 'ambito == "nacional" & nombre_gedad == "todos" & nombre_sexo == "todos" '
df = extraer_defunciones_anuales_por_periodo(periodo_de_tiempo,query)
df.style.format({"2020": "{:20,.0f}",
"2018": "{:20,.0f}",
"2019": "{:20,.0f}",
"resta 2020 y 2019": "{:20,.0f}", }).background_gradient(cmap='Wistia',subset=['resta 2020 y 2019'])
def get_current_year_comparison(query):
"""Saca muertos del año en curso en el ambito como argumento"""
df = pd.read_csv('/tmp/momo.csv')
df = pipeline_basic_with_query(df,query)
semana_actual = df.tail(1).date_week.values[0]
year_actual = df.tail(1).date_year.values[0]
date_month_day_actual= df.tail(1).date_month_day.values[0]
year_last = str(int(year_actual)-1)
death_this_year_today = df.query( f"date_year == '{year_actual}' ").defunciones_observadas.sum()
deaht_last_year_today = df.query( f"date_year == '{year_last}' and date_month_day <= '{date_month_day_actual}' ").defunciones_observadas.sum()
deaths_this_year_excess = death_this_year_today - deaht_last_year_today
return deaths_this_year_excess
query = f""" ambito == "nacional" & nombre_gedad == "todos" & nombre_sexo == "todos" """
deaths_this_year_excess = get_current_year_comparison(query)
display(HTML(f"<h4 id='excedente'>Excdente de muertes de este año, respecto al año anterior:</h4><h2>{deaths_this_year_excess:,.0f} </h2>"))
query = f""" nombre_ambito == "Madrid, Comunidad de" & nombre_gedad == "todos" & nombre_sexo == "todos" """
deaths_this_year_excess = get_current_year_comparison(query)
display(HTML(f"<h4 id='excedentemadrid'>Excedente de muertes de este año en Madrid, respecto al año anterior:</h4><h2>{deaths_this_year_excess:,.0f} </h2>"))
# Sacamos las muertes en madrid de hombres y de mujeres
import numpy as np
import seaborn as sns
def pipeline_comparativa_semestral_diaria(df):
return (
df
.filter_on(" defunciones_observadas > 0")
.remove_columns(['nombre_gedad','ambito','cod_ambito','cod_ine_ambito','nombre_ambito','cod_sexo','cod_gedad','date_year','date_week','date_month','date_year_week'])
.rename_column( "nombre_sexo" , "sexo")
.rename_column( "date_year_month", "mes")
)
# Sacamos los datos de 2019
df = pd.read_csv('/tmp/momo.csv')
query = ' date_year == "2019" & nombre_ambito == "Madrid, Comunidad de" & nombre_gedad == "todos" & nombre_sexo != "todos" & date_month < "13" '
df_madrid_2019 = pipeline_basic_with_query(df,query)
df_madrid_2019 = pipeline_comparativa_semestral_diaria(df_madrid_2019)
# Sacamos los datos de 2020
df = pd.read_csv('/tmp/momo.csv')
query = ' date_year == "2020" & nombre_ambito == "Madrid, Comunidad de" & nombre_gedad == "todos" & nombre_sexo != "todos" & date_month < "13" '
df_madrid_2020 = pipeline_basic_with_query(df,query)
df_madrid_2020 = pipeline_comparativa_semestral_diaria(df_madrid_2020)
df_madrid_2019
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
display(HTML("<h2>Distribucion muertes en Madrid </h2>"))
display(HTML("<h3>Comparativa de defunciones, entre el primer semestre de 2019 y el del 2020</h3>"))
f, axes = plt.subplots(1 , 2 ,figsize=(16, 7), sharex=True)
sns.despine(left=True)
# Mismo limites, para poder comparar entre años
axes[0].set_ylim([0,500])
axes[1].set_ylim([0,500])
sns.violinplot(x="mes", y="defunciones_observadas", hue="sexo",
data=df_madrid_2019, split=True, scale="count", ax=axes[0] )
sns.violinplot(x="mes", y="defunciones_observadas", hue="sexo",
data=df_madrid_2020, split=True, scale="count", ax=axes[1])
# Aux functions
def print_categorical_variables(df):
""" Get a dict with categorical variables"""
my_dict = {}
cols = df.columns
num_cols = df._get_numeric_data().columns
# Show categorical values
categorical = list(set(cols) - set(num_cols))
for i in categorical :
if 'echa' not in i.lower() : my_dict[i] = df[i].unique()
return my_dict
df = pd.read_csv('/tmp/momo.csv')
my_dict = print_categorical_variables(df)
my_dict
momo2020 = pd.read_csv("/root/scripts/COVID-19/data/momo2019.csv", sep='\t',)
periodo_de_tiempo="week"
query = 'ambito == "nacional" & nombre_gedad == "todos" & nombre_sexo == "todos" '
df = extraer_defunciones_anuales_por_periodo(periodo_de_tiempo,query)
df = pd.read_csv('/tmp/momo.csv')
periodo_de_tiempo="week"
year=2021
def pipeline_agregado_anual(periodo_de_tiempo,df,year):
''' Saca un dataframe de los datos agrupados por año'''
return (
df
.filter_on('date_year == "'+year+'"' )
.groupby_agg( by='date_'+periodo_de_tiempo, agg='sum', agg_column_name="defunciones_observadas", new_column_name="agregados")
.rename_column( "agregados", year)
.join_apply(lambda x: x['date_'+periodo_de_tiempo] , new_column_name=periodo_de_tiempo )
.set_index('date_'+periodo_de_tiempo)
[[periodo_de_tiempo,year]]
.drop_duplicates()
)
pipeline_agregado_anual)
###Output
_____no_output_____ |
notebooks/cancer-pathway2.ipynb | ###Markdown
Network visualizationThis notebook constructs a network visualization connecting bacterial species to KEGG pathways.
###Code
# Parameters
d_col = 'C(Advanced_Stage_label)[T.Local]' # node color
ew = "rank" # edge weight
taxa_level = 'Rank6' # taxonomy level
# Preliminaries
%matplotlib inline
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import seaborn as sns
from IPython.display import display, HTML
def widen_notebook():
display(HTML("<style>.container { width:100% !important; }</style>"))
widen_notebook()
# data files
!ls ../data/edges/lung-cancer
edges_txt = "../data/edges/lung-cancer/Cancer_related_path2_edges.txt"
kegg_txt = "../data/edges/lung-cancer/LC_KO_metadata.txt"
microbes_txt = "../data/edges/lung-cancer/microbe-metadata.txt"
full_microbes_biom = "../data/edges/lung-cancer/Microbiome_with_paired_RNA_otu_table.a1.biom.txt"
filtered_microbes_biom = "../data/edges/lung-cancer/microbes.txt"
# Read data files into lists of dictionaries
def split_commas(line):
return line.strip().split("\t")
def CSVtodicts(filename):
f = open(filename)
result = []
headers = split_commas(f.readline())
for line in f.readlines():
values = split_commas(line)
dictionary = dict(zip(headers, values))
result.append(dictionary)
return result
edges = CSVtodicts(edges_txt)
keggs = CSVtodicts(kegg_txt)
microbes = CSVtodicts(microbes_txt)
len(edges), len(keggs), len(microbes)
import pandas as pd
microbes = pd.read_table(microbes_txt)
keggs = pd.read_table(kegg_txt)
edges = pd.read_table(edges_txt)
full_microbe_counts = pd.read_table(full_microbes_biom, skiprows=1, index_col=0)
filtered_microbe_counts = pd.read_table(filtered_microbes_biom, skiprows=1, index_col=0)
# scrub dataframe
featureid = '#SampleID'
microbes['abbv_name'] = microbes.apply(lambda x: '%s%d' % (x[taxa_level], x['#SampleID']), axis=1)
taxa = full_microbe_counts.iloc[:, -1]
microbe_counts = full_microbe_counts.iloc[:, :-1]
microbe_counts.shape, filtered_microbe_counts.shape
microbe_counts = microbe_counts.loc[:, filtered_microbe_counts.columns]
sns.distplot(microbe_counts.sum(axis=0))
microbe_props = microbe_counts.apply(lambda x: x / x.sum(), axis=0)
microbe_props = microbe_props.loc[microbes['#SampleID']].T
mean_microbe_abundance = np.log(microbe_props.mean(axis=0))
norm_microbe_abundance = (mean_microbe_abundance - mean_microbe_abundance.min()) / (mean_microbe_abundance.max() - mean_microbe_abundance.min())
fontmin = 8
fontmax = 30
fontsize = norm_microbe_abundance * (fontmax - fontmin) + fontmin
sns.distplot(norm_microbe_abundance)
select_microbes = list(set(edges.src.values))
select_kegg = list(set(edges.dest.values))
microbes.head()
def abbreviate(x):
return x.split('|')[-1]
abbreviate('k__Viruses|p__Viruses_noname|c__Viruses_noname|o__Viruses_noname')
edges['src'] = edges.src.apply(lambda x: int(x.replace('\"', '')))
edges['dest'] = edges.dest.apply(lambda x: x.replace('\"', ''))
microbe_dicts = microbes.T.to_dict().values()
kegg_dicts = keggs.T.to_dict().values()
edge_dicts = edges.T.to_dict().values()
microbe_metadata = microbes.set_index('#SampleID')
# scrub edges, because of R ...
edges['src_abbv'] = edges.src.apply(lambda x: microbe_metadata.loc[x, 'abbv_name'])
# name abbreviation mappings.
def abbreviate(d):
return '%s%d' % (d[taxa_level], d[featureid])
def microbe_name_dict(dicts):
return dict([abbreviate(d), d] for d in dicts)
name2microbe = microbe_name_dict(microbe_dicts)
name2microbe.items()[15]
def kegg_name_dict(dicts):
return dict([d['#OTUID'], d] for d in dicts)
name2kegg = kegg_name_dict(kegg_dicts)
name2kegg.items()[32]
# Construct the network graph from the edges.
from jp_gene_viz import dGraph
G = dGraph.WGraph()
for e in edge_dicts:
name = microbe_metadata.loc[e['src'], 'abbv_name']
G.add_edge(name, e["dest"], e[ew], e)
# Construct the network widget from the graph
from jp_gene_viz import dNetwork
dNetwork.load_javascript_support()
N = dNetwork.NetworkDisplay()
N.load_data(G)
import matplotlib.colors as colors
class MidpointNormalize(colors.Normalize):
def __init__(self, vmin=None, vmax=None, vcenter=None, clip=False):
self.vcenter = vcenter
colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
# I'm ignoring masked values and all kinds of edge cases to make a
# simple example...
x, y = [self.vmin, self.vcenter, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
import matplotlib as mpl
from matplotlib.colors import rgb2hex
# TODO: make parameter
# https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html
#cmap = plt.get_cmap('RdYlGn')
cmap = plt.get_cmap('PiYG')
edge_cmap = plt.get_cmap('Greys')
microbe_norm = MidpointNormalize(vmin=-2., vcenter=0, vmax=1.5)
#microbe_norm = mpl.colors.Normalize(vmin=0, vmax=1)
kegg_norm = mpl.colors.Normalize(vmin=-2, vmax=2)
edges.head()
edge_lookup = edges.set_index(['src_abbv', 'dest'])
# Configure and display the network
# TODO: remove congested labels
N.labels_button.value = True
N.size_slider.value = 1000
# TODO: add node size / font size are variable features
# TODO: swap circles with squares
# TODO: light grey dashed lines for low probability edges
# TODO: add edge weight size
# TODO: remove labels programmatically
# TODO: allow for edges to be colored on a gradient (i.e. greys)
# main goals
# 1. focus on pathways of interest
# 2. advance vs local
# 3. weighted by relative abundance
# colorize the nodes based on weights (hacky, sorry)
dg = N.display_graph
for node_name in dg.node_weights:
svg_name = dg.node_name(node_name)
if node_name in name2microbe:
d = name2microbe[node_name]
value = name2microbe[node_name][d_col]
if np.isnan(value):
value = 0
node_color = rgb2hex(cmap(microbe_norm(value))[:3])
value = norm_microbe_abundance.loc[d['#SampleID']]
#node_color = rgb2hex(cmap(microbe_norm(value))[:3])
N.override_node(node_name, color=node_color, radius=value*10, shape='circle')
N.override_label(node_name, hide=value < 0.3, font_size=value*18)
else:
N.override_node(node_name, shape='rect', radius=5, color='#6193F7')
N.override_label(node_name, font_size=18)
for src, dest in dg.edge_weights:
m = np.log(edges['rank']).min()
p = edge_lookup.loc[(src, dest), 'rank']
width = np.log(p - m)/10
N.override_edge(src, dest, color=rgb2hex(edge_cmap(p+0.1)), stroke_width=width)
# show labels
N.labels_button.value = True
# rerun the layout
N.layout_click()
# draw the network with the new colors and sizes
N.draw()
# show the network
N.show()
sns.distplot(edges['rank'])
cmap(microbe_norm(value))
cmap(microbe_norm(-2))
###Output
_____no_output_____ |
mflix-python/notebooks/.ipynb_checkpoints/cursor_methods_agg_equivalents-checkpoint.ipynb | ###Markdown
Cursor Methods and Aggregation Equivalents In this lesson we're going to discuss methods we can call against Pymongo cursors, and the aggregation stages that would perform the same tasks in a pipeline. Limiting
###Code
import pymongo
from bson.json_util import dumps
uri = "<your_atlas_uri>"
client = pymongo.MongoClient(uri)
mflix = client.sample_mflix
movies = mflix.movies
###Output
_____no_output_____
###Markdown
Here's (point) a collection object for the `movies` collection.
###Code
limited_cursor = movies.find(
{ "directors": "Sam Raimi" },
{ "_id": 0, "title": 1, "cast": 1 }
).limit(2)
print(dumps(limited_cursor, indent=2))
###Output
_____no_output_____
###Markdown
So this is a find query with a predicate (point) and a projection (point). And the find() method is always gonna return a cursor to us. But before assigning that cursor to a variable, we've transformed it with the limit() method, to make sure no more than 2 documents are returned by this cursor.(run command)And we can see we only got two (point) documents back.
###Code
pipeline = [
{ "$match": { "directors": "Sam Raimi" } },
{ "$project": { "_id": 0, "title": 1, "cast": 1 } },
{ "$limit": 2 }
]
limited_aggregation = movies.aggregate( pipeline )
print(dumps(limited_aggregation, indent=2))
###Output
_____no_output_____
###Markdown
Now this is the equivalent operation with the aggregation framework. Instead of tacking a .limit() to the end of the cursor, we add $limit as a stage in our pipeline.(enter command)And it's the same output. And these (point to `$match` and `$project`) aggregation stages represent the query predicate and the projection from when we were using the query language. Sorting
###Code
from pymongo import DESCENDING, ASCENDING
sorted_cursor = movies.find(
{ "directors": "Sam Raimi" },
{ "_id": 0, "year": 1, "title": 1, "cast": 1 }
).sort("year", ASCENDING)
print(dumps(sorted_cursor, indent=2))
###Output
_____no_output_____
###Markdown
This is an example of the `sort()` (point) cursor method. `sort()` takes two parameters, the key we're sorting on and the sorting order. In this example we're sorting on year (point), in increasing (point) order.ASCENDING and DESCENDING are values from the pymongo library to specify sort direction, but they're really just the integers 1 and -1.(enter command)And we can see that the movies were returned to us in order of the year they were made.
###Code
pipeline = [
{ "$match": { "directors": "Sam Raimi" } },
{ "$project": { "_id": 0, "year": 1, "title": 1, "cast": 1 } },
{ "$sort": { "year": ASCENDING } }
]
sorted_aggregation = movies.aggregate( pipeline )
print(dumps(sorted_aggregation, indent=2))
###Output
_____no_output_____
###Markdown
And this is the equivalent pipeline, with a sort (point) stage that corresponds to a dictionary, giving the sort (point) field, and the direction (point) of the sort.(enter command)And the agg framework was able to sort by year here.
###Code
sorted_cursor = movies.find(
{ "cast": "Tom Hanks" },
{ "_id": 0, "year": 1, "title": 1, "cast": 1 }
).sort([("year", ASCENDING), ("title", ASCENDING)])
print(dumps(sorted_cursor, indent=2))
###Output
_____no_output_____
###Markdown
So just a special case to note here, sorting on multiple keys in the cursor method is gonna look a little different.When sorting on one key, the `sort()` method takes two arguments, the key and the sort order.When sorting on two or more keys, the `sort()` method takes a single argument, an array of tuples. And each tuple has a key and a sort order.(enter command)And we can see that after sorting on year, the cursor sorted the movie titles alphabetically.
###Code
pipeline = [
{ "$match": { "cast": "Tom Hanks" } },
{ "$project": { "_id": 0, "year": 1, "title": 1, "cast": 1 } },
{ "$sort": { "year": ASCENDING, "title": ASCENDING } }
]
sorted_aggregation = movies.aggregate( pipeline )
print(dumps(sorted_aggregation, indent=2))
###Output
_____no_output_____
###Markdown
Skipping
###Code
pipeline = [
{ "$match": { "directors": "Sam Raimi" } },
{ "$project": { "_id": 0, "title": 1, "cast": 1 } },
{ "$count": "num_movies" }
]
sorted_aggregation = movies.aggregate( pipeline )
print(dumps(sorted_aggregation, indent=2))
###Output
_____no_output_____
###Markdown
(enter command)So we know from counting the documents in this aggregation, that if we don't specify anything else, we're getting 15 (point) documents returned to us.Note that the cursor method `count()` that counts documents in a cursor has been deprecated. So if you want to know how many documents are returned by a query, you should use the `$count` aggregation stage.
###Code
skipped_cursor = movies.find(
{ "directors": "Sam Raimi" },
{ "_id": 0, "title": 1, "cast": 1 }
).skip(14)
print(dumps(skipped_cursor, indent=2))
###Output
_____no_output_____
###Markdown
The `skip()` method allows us to skip documents in a collection, so only documents we did not skip appear in the cursor. Because we only have 15 documents, skipping 14 of them should only leave us with 1.(enter command)And look at that, we've only got 1 document in our cursor. The issue is, we don't really know which documents we skipped over, because we haven't specified a sort key and really, we have no idea the order in which documents are stored in the cursor.
###Code
skipped_sorted_cursor = movies.find(
{ "directors": "Sam Raimi" },
{ "_id": 0, "title": 1, "year": 1, "cast": 1 }
).sort("year", ASCENDING).skip(10)
print(dumps(skipped_sorted_cursor, indent=2))
###Output
_____no_output_____
###Markdown
So here we've sorted on year (point) and then skipped the first 14. Now we know that when we're skipping 10 documents, we're skipping the 10 oldest Sam Raimi movies in this collection.(enter command)And we only got 5 of those 15 documents back, because we skipped 10 of them.These cursor methods are nice because we can tack them on a cursor in the order we want them applied. It even kinda makes our Python look like Javascript, with this `.sort()` and `.skip()`.
###Code
pipeline = [
{ "$match": { "directors": "Sam Raimi" } },
{ "$project": { "_id": 0, "year": 1, "title": 1, "cast": 1 } },
{ "$sort": { "year": ASCENDING } },
{ "$skip": 10 }
]
sorted_skipped_aggregation = movies.aggregate( pipeline )
print(dumps(sorted_skipped_aggregation, indent=2))
###Output
_____no_output_____ |
ml_engine_training_walkthrough.ipynb | ###Markdown
Model Training and Serving Online predictions on Google AI-platform: OverviewFollowing serves as a brief walkthrough along the files and structure of this repository; Aimed towards imparting clarity in process of- writing a keras code, packaging and staging a training job on GCP's ml-engine.What follows are the steps documented to:* Setup a GCP project.* Authenticate GCP service account, creating gcloud bucket with cloud credentials.* Package and Submit a training job to google's ai-platform.a documents the steps to* Towards the end of it, the notebook also highlights method to **Serve prediction from the model**_Keras is a high-level API for building and training deep learning models.Explore more about it at [tf.keras](https://www.tensorflow.org/guide/keras)**Note:** While it assumes you are running this notebook on **google Colab**, just proceed with listed/prompted instructions; Although the same notebook can also be run on local jupyter with minimal changes. 1. Set up your GCP project1. [Select or create a GCP project.](https://console.cloud.google.com/cloud-resource-manager)2. [Make sure that billing is enabled for your project.](https://cloud.google.com/billing/docs/how-to/modify-project)3. [Enable the AI Platform ("Cloud Machine Learning Engine") and Compute Engine APIs.](https://console.cloud.google.com/flows/enableapi?apiid=ml.googleapis.com,compute_component)4. Enter your project ID in the cell below. Then run the cell to make sure theCloud SDK uses the right project for all the commands in this notebook.**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands. * In case you already have a GCP project configured, just enter suitable details.
###Code
PROJECT_ID = "nifty-episode-231612" #@param {type:"string"}
! gcloud config set project $PROJECT_ID
###Output
Updated property [core/project].
To take a quick anonymous survey, run:
$ gcloud alpha survey
###Markdown
2.Authenticate your GCP account* **Run the following cell and do as prompted, when asked to authenticate your account via oAuth.**
###Code
import sys
if 'google.colab' in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
else:
%env GOOGLE_APPLICATION_CREDENTIALS ''
!gcloud config list
###Output
[component_manager]
disable_update_check = True
[core]
account = [email protected]
project = nifty-episode-231612
Your active configuration is: [default]
###Markdown
2.1 Set Up Cloud Storage bucket* To submit a training job through Cloud SDK, it requires uploading a Python packagecontaining your training code files to a Cloud Storage bucket.* Thus ai-platform runs the code from this package.* Also it saves the resulting trained model & configs from your job in the same bucket, which can thenbe versioned to serve online predictions.In case you didn't create a bucket already set the name of your Cloud Storage bucket below (Ensure that its unique). If you've a Bucket already, enter the name of your existing bucket anyway.Enter `REGION` variable, which is needed throughout the process be it training or online prediction.Explore more on availablity region [here](https://cloud.google.com/ml-engine/docs/tensorflow/regions).
###Code
BUCKET_NAME="nifty-episode-231612-mlengine" #@param {type:"string"}
REGION="asia-east1" #@param {type:"string"}
###Output
_____no_output_____
###Markdown
**Note**: Run following cell **ONLY** if you haven't created a bucket already, Or want to create a New one.
###Code
! gsutil mb -l $REGION gs://$BUCKET_NAME
###Output
_____no_output_____
###Markdown
* **Finally, validate access to your Cloud Storage bucket by examining its contents:**
###Code
! gsutil ls -al gs://$BUCKET_NAME
###Output
209581009 2019-06-24T12:45:15Z gs://nifty-episode-231612-mlengine/flask_app_git_140619.zip#1561380315059208 metageneration=1
1136 2019-03-14T10:52:42Z gs://nifty-episode-231612-mlengine/img_gcloudpath.txt#1552560762918268 metageneration=1
885 2019-05-06T12:32:48Z gs://nifty-episode-231612-mlengine/my_job_filesconfig.pickle#1557145968875019 metageneration=1
10925 2019-03-20T12:29:13Z gs://nifty-episode-231612-mlengine/preprocessing_test.py#1553084953044119 metageneration=1
87552713 2019-07-24T12:21:59Z gs://nifty-episode-231612-mlengine/train_on_gcloud_bilkul_final.zip#1563970919338895 metageneration=1
32636 2019-03-19T12:12:00Z gs://nifty-episode-231612-mlengine/yeaah.jpg#1552997520104445 metageneration=1
gs://nifty-episode-231612-mlengine/cloud_test_package/
gs://nifty-episode-231612-mlengine/cloud_test_package_2/
gs://nifty-episode-231612-mlengine/dev/
gs://nifty-episode-231612-mlengine/food_data/
gs://nifty-episode-231612-mlengine/keras-frcnn/
gs://nifty-episode-231612-mlengine/keras-frcnn_firebase_database_fetching/
gs://nifty-episode-231612-mlengine/my_job_files/
gs://nifty-episode-231612-mlengine/test_job/
gs://nifty-episode-231612-mlengine/test_job_beta1/
gs://nifty-episode-231612-mlengine/test_job_kaafi_late/
gs://nifty-episode-231612-mlengine/test_job_with_txtfileData6/
gs://nifty-episode-231612-mlengine/train_on_gcloud/
gs://nifty-episode-231612-mlengine/training_data_and_annotations_for_cloud_060519/
gs://nifty-episode-231612-mlengine/training_scripts_collection/
TOTAL: 6 objects, 297179304 bytes (283.41 MiB)
###Markdown
3. Submit a training job on AI PlatformFollowing code is a keras implementation of **FRCNN (an object detection model)** used for detecting food objects in image.google ai-platform is used to package the code and submit as training job on google cloud platform.* It outputs a model.hdf5 file (weights of trained FRCNN model) and a config file holding key model archetecture parameters, and saves them at a specified path in cloud Storage bucket.**Run the following cell to:*** First, download the training code(Clone the repo to get code and dependencies).* Although required dependencies are needed to be installed for model training locally. But this code is to be trained in ai-Platform, therefore dependencies come preinstalled based on the [runtime version](https://cloud.google.com/ml-engine/docs/tensorflow/runtime-version-list) one choses during training.* change the notebook's working directory to core directory containing **setup.py** and **trainer/** directory.
###Code
!git clone https://github.com/leoninekev/training-frcnn-google-ml-engine.git
#! pip install -r requirements.txt
# Set the working directory to the sample code directory
%cd training-frcnn-google-ml-engine/move_to_cloudshell/
! ls -pR
###Output
.:
annotations.txt setup.py trainer/
./trainer:
config.py __init__.py RoiPoolingConv.py vgg.py
data_augment.py losses.py simple_parser_pkl.py
data_generators.py resnet.py simple_parser_text.py
FixedBatchNormalization.py roi_helpers.py task.py
###Markdown
**Now prior to submiting a training job few key variables need to be configured as follows:** * Navigate to **trainer/** directory to modify- bucket_path, output model_name, config_name in **task.py** in accordance with your gcp service account.
###Code
%cd trainer
!ls
###Output
/content/train_on_gcloud/training-frcnn-google-ml-engine/move_to_cloudshell/trainer
config.py __init__.py RoiPoolingConv.py vgg.py
data_augment.py losses.py simple_parser_pkl.py
data_generators.py resnet.py simple_parser_text.py
FixedBatchNormalization.py roi_helpers.py task.py
###Markdown
* Run **%pycat task.py** (this draws a pop displayinf content of task.py)* Copy all code to local Python IDE, or a cell below and edit the default arguments values in **parsers** for * **--path** * **--config_filename** * **--output_weight_path** * **--bucket_path**and name for **model_weights** before saving.
###Code
%pycat task.py
#copy the code from popup, paste it to a python IDLE locally, edit it and again copy the whole post edit
###Output
_____no_output_____
###Markdown
* Copy the edited code from local IDE in following colab cell beneath the command: **%%writefile task.py** (as shown below)and run the cell - The new edits will be overwritten to a new task.py file(you may also save a new task_file.py, and later delete the older task.py)
###Code
%%writefile task.py
from __future__ import division
import random
import pprint
import sys
import time
import numpy as np
from optparse import OptionParser
import pickle
from tensorflow.python.lib.io import file_io
from keras import backend as K
from keras.optimizers import Adam, SGD, RMSprop
from keras.layers import Input
from keras.models import Model
import config, data_generators
import losses as losses
import roi_helpers
from keras.utils import generic_utils
sys.setrecursionlimit(40000)
parser = OptionParser()
parser.add_option("-p", "--path", dest="train_path", help="Path to training data(annotation.txt file).",default="gs://nifty-episode-231612-mlengine/training_data_and_annotations_for_cloud_060519/annotations.txt")# /data.pickle -- for pickled annotations
parser.add_option("-o", "--parser", dest="parser", help="Parser to use. One of simple_text or simple_pickle",
default="simple")# simple_pick --for simple_parser_pkl
parser.add_option("-n", "--num_rois", type="int", dest="num_rois", help="Number of RoIs to process at once.", default=32)
parser.add_option("--network", dest="network", help="Base network to use. Supports vgg or resnet50.", default='resnet50')
parser.add_option("--hf", dest="horizontal_flips", help="Augment with horizontal flips in training. (Default=false).", action="store_true", default=False)
parser.add_option("--vf", dest="vertical_flips", help="Augment with vertical flips in training. (Default=false).", action="store_true", default=False)
parser.add_option("--rot", "--rot_90", dest="rot_90", help="Augment with 90 degree rotations in training. (Default=false).",
action="store_true", default=False)
parser.add_option("--num_epochs", type="int", dest="num_epochs", help="Number of epochs.", default=1)# deafult=1 --for test
parser.add_option("--config_filename", dest="config_filename", help=
"Location to store all the metadata related to the training (to be used when testing).",
default="config_new.pickle")
parser.add_option("--output_weight_path", dest="output_weight_path", help="Output path for weights.",default='gs://nifty-episode-231612-mlengine/my_job_files/')
parser.add_option("--input_weight_path", dest="input_weight_path", help="Input path for weights. If not specified, will try to load default weights provided by keras.",
default='https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5')
parser.add_option("--bucket_path", dest="bucket_path", help="bucket path for stroing weights & configs", default='gs://nifty-episode-231612-mlengine/my_job_files/')
(options, args) = parser.parse_args()
if not options.train_path:# if filename is not given
parser.error('Error: path to training data must be specified. Pass --path to command line')
if options.parser == 'simple':
from simple_parser_text import get_data
elif options.parser == 'simple_pick':
from simple_parser_pkl import get_data
else:
raise ValueError("Command line option parser must be one of 'pascal_voc' or 'simple'")
# pass the settings from the command line, and persist them in the config object
C = config.Config()
C.use_horizontal_flips = bool(options.horizontal_flips)
C.use_vertical_flips = bool(options.vertical_flips)
C.rot_90 = bool(options.rot_90)
C.model_path = options.output_weight_path
C.num_rois = int(options.num_rois)
if options.network == 'vgg':
C.network = 'vgg'
import vgg as nn
elif options.network == 'resnet50':
import resnet as nn
C.network = 'resnet50'
else:
print('Not a valid model')
raise ValueError
# check if weight path was passed via command line
if options.input_weight_path:
C.base_net_weights = options.input_weight_path
else:
# set the path to weights based on backend and model
C.base_net_weights = nn.get_weight_path()# 'resnet50_weights_th_dim_ordering_th_kernels_notop.h5'
all_imgs, classes_count, class_mapping = get_data(options.train_path)
if 'bg' not in classes_count:
classes_count['bg'] = 0
class_mapping['bg'] = len(class_mapping)
C.class_mapping = class_mapping
inv_map = {v: k for k, v in class_mapping.items()}
print('Training images per class:')
pprint.pprint(classes_count)
print('Num classes (including bg) = {}'.format(len(classes_count)))
config_output_filename = options.bucket_path + options.config_filename# gs://input-your-bucket-name/train_on_gcloud/my_job_files/config.pickle
def new_open(name, mode, buffering=-1):# to open & load files from gcloud storage
return file_io.FileIO(name, mode)
with new_open(config_output_filename, 'wb') as config_f:
pickle.dump(C,config_f, protocol=2)# dumps config.pickle(compatible for python 2) in gcloud bucket
print('Config has been written to {}, and can be loaded when testing to ensure correct results'.format(config_output_filename))
random.shuffle(all_imgs)
num_imgs = len(all_imgs)
train_imgs = [s for s in all_imgs if s['imageset'] == 'trainval']
val_imgs = [s for s in all_imgs if s['imageset'] == 'test']
print('Num train samples {}'.format(len(train_imgs)))
print('Num val samples {}'.format(len(val_imgs)))
data_gen_train = data_generators.get_anchor_gt(train_imgs, classes_count, C, nn.get_img_output_length, K.image_dim_ordering(), mode='train')
data_gen_val = data_generators.get_anchor_gt(val_imgs, classes_count, C, nn.get_img_output_length,K.image_dim_ordering(), mode='val')
if K.image_dim_ordering() == 'th':
input_shape_img = (3, None, None)
else:
input_shape_img = (None, None, 3)
img_input = Input(shape=input_shape_img)
roi_input = Input(shape=(None, 4))
# define the base network (resnet here, can be VGG, Inception, etc)
shared_layers = nn.nn_base(img_input, trainable=True)
# define the RPN, built on the base layers
num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios)
rpn = nn.rpn(shared_layers, num_anchors)
classifier = nn.classifier(shared_layers, roi_input, C.num_rois, nb_classes=len(classes_count), trainable=True)
model_rpn = Model(img_input, rpn[:2])
model_classifier = Model([img_input, roi_input], classifier)
# this is a model that holds both the RPN and the classifier, used to load/save weights for the models
model_all = Model([img_input, roi_input], rpn[:2] + classifier)
try:
print('loading weights from {}'.format(C.base_net_weights))
weights_path = get_file('base_weights.h5',C.base_net_weights)# downloading and adding weight paths
model_rpn.load_weights(weights_path)
model_classifier.load_weights(weights_path)
print('weights loaded.')
except:
print('Could not load pretrained model weights. Weights can be found in the keras application folder \
https://github.com/fchollet/keras/tree/master/keras/applications')
optimizer = Adam(lr=1e-5)
optimizer_classifier = Adam(lr=1e-5)
model_rpn.compile(optimizer=optimizer, loss=[losses.rpn_loss_cls(num_anchors), losses.rpn_loss_regr(num_anchors)])
model_classifier.compile(optimizer=optimizer_classifier, loss=[losses.class_loss_cls, losses.class_loss_regr(len(classes_count)-1)], metrics={'dense_class_{}'.format(len(classes_count)): 'accuracy'})
model_all.compile(optimizer='sgd', loss='mae')
epoch_length = 1000
num_epochs = int(options.num_epochs)
iter_num = 0
losses = np.zeros((epoch_length, 5))
rpn_accuracy_rpn_monitor = []
rpn_accuracy_for_epoch = []
start_time = time.time()
best_loss = np.Inf
class_mapping_inv = {v: k for k, v in class_mapping.items()}
print('Starting training')
vis = True
for epoch_num in range(num_epochs):
progbar = generic_utils.Progbar(epoch_length)
print('Epoch {}/{}'.format(epoch_num + 1, num_epochs))
while True:
try:
if len(rpn_accuracy_rpn_monitor) == epoch_length and C.verbose:
mean_overlapping_bboxes = float(sum(rpn_accuracy_rpn_monitor))/len(rpn_accuracy_rpn_monitor)
rpn_accuracy_rpn_monitor = []
print('Average number of overlapping bounding boxes from RPN = {} for {} previous iterations'.format(mean_overlapping_bboxes, epoch_length))
if mean_overlapping_bboxes == 0:
print('RPN is not producing bounding boxes that overlap the ground truth boxes. Check RPN settings or keep training.')
X, Y, img_data = next(data_gen_train)
loss_rpn = model_rpn.train_on_batch(X, Y)
P_rpn = model_rpn.predict_on_batch(X)
R = roi_helpers.rpn_to_roi(P_rpn[0], P_rpn[1], C, K.image_dim_ordering(), use_regr=True, overlap_thresh=0.7, max_boxes=300)
# note: calc_iou converts from (x1,y1,x2,y2) to (x,y,w,h) format
X2, Y1, Y2, IouS = roi_helpers.calc_iou(R, img_data, C, class_mapping)
if X2 is None:
rpn_accuracy_rpn_monitor.append(0)
rpn_accuracy_for_epoch.append(0)
continue
neg_samples = np.where(Y1[0, :, -1] == 1)
pos_samples = np.where(Y1[0, :, -1] == 0)
if len(neg_samples) > 0:
neg_samples = neg_samples[0]
else:
neg_samples = []
if len(pos_samples) > 0:
pos_samples = pos_samples[0]
else:
pos_samples = []
rpn_accuracy_rpn_monitor.append(len(pos_samples))
rpn_accuracy_for_epoch.append((len(pos_samples)))
if C.num_rois > 1:
if len(pos_samples) < C.num_rois//2:
selected_pos_samples = pos_samples.tolist()
else:
selected_pos_samples = np.random.choice(pos_samples, C.num_rois//2, replace=False).tolist()
try:
selected_neg_samples = np.random.choice(neg_samples, C.num_rois - len(selected_pos_samples), replace=False).tolist()
except:
selected_neg_samples = np.random.choice(neg_samples, C.num_rois - len(selected_pos_samples), replace=True).tolist()
sel_samples = selected_pos_samples + selected_neg_samples
else:
# in the extreme case where num_rois = 1, we pick a random pos or neg sample
selected_pos_samples = pos_samples.tolist()
selected_neg_samples = neg_samples.tolist()
if np.random.randint(0, 2):
sel_samples = random.choice(neg_samples)
else:
sel_samples = random.choice(pos_samples)
loss_class = model_classifier.train_on_batch([X, X2[:, sel_samples, :]], [Y1[:, sel_samples, :], Y2[:, sel_samples, :]])
losses[iter_num, 0] = loss_rpn[1]
losses[iter_num, 1] = loss_rpn[2]
losses[iter_num, 2] = loss_class[1]
losses[iter_num, 3] = loss_class[2]
losses[iter_num, 4] = loss_class[3]
progbar.update(iter_num+1, [('rpn_cls', losses[iter_num, 0]), ('rpn_regr', losses[iter_num, 1]),
('detector_cls', losses[iter_num, 2]), ('detector_regr', losses[iter_num, 3])])
iter_num += 1
if iter_num == epoch_length:
loss_rpn_cls = np.mean(losses[:, 0])
loss_rpn_regr = np.mean(losses[:, 1])
loss_class_cls = np.mean(losses[:, 2])
loss_class_regr = np.mean(losses[:, 3])
class_acc = np.mean(losses[:, 4])
mean_overlapping_bboxes = float(sum(rpn_accuracy_for_epoch)) / len(rpn_accuracy_for_epoch)
rpn_accuracy_for_epoch = []
if C.verbose:
print('Mean number of bounding boxes from RPN overlapping ground truth boxes: {}'.format(mean_overlapping_bboxes))
print('Classifier accuracy for bounding boxes from RPN: {}'.format(class_acc))
print('Loss RPN classifier: {}'.format(loss_rpn_cls))
print('Loss RPN regression: {}'.format(loss_rpn_regr))
print('Loss Detector classifier: {}'.format(loss_class_cls))
print('Loss Detector regression: {}'.format(loss_class_regr))
print('Elapsed time: {}'.format(time.time() - start_time))
curr_loss = loss_rpn_cls + loss_rpn_regr + loss_class_cls + loss_class_regr
iter_num = 0
start_time = time.time()
if curr_loss < best_loss:
if C.verbose:
print('Total loss decreased from {} to {}, saving weights'.format(best_loss,curr_loss))
best_loss = curr_loss
model_weights= 'model_frcnn_new.hdf5'
model_all.save_weights(model_weights)
with new_open(model_weights, mode='r') as infile:# to write hdf5 file to gs://input-your-bucket-name/train_on_gcloud/my_job_files/
with new_open(C.model_path + model_weights, mode='w+') as outfile:
outfile.write(infile.read())
break
except Exception as e:
print('Exception: {}'.format(e))
continue
print('Training complete, exiting.')
###Output
Overwriting task.py
###Markdown
* Exit out of **trainer/**, to the directory containing **setup.py** for initiating dependency packaging before training on gcloud.* Verify the present working directory
###Code
%cd ..
!pwd
!ls
###Output
/content/train_on_gcloud/training-frcnn-google-ml-engine/move_to_cloudshell
/content/train_on_gcloud/training-frcnn-google-ml-engine/move_to_cloudshell
annotations.txt setup.py trainer
###Markdown
* Define a **JOB_NAME** for training job
###Code
JOB_NAME='test_job_GcloudColab_3'
###Output
_____no_output_____
###Markdown
Run the following cell to package the **`trainer/`** directory:* It uploads the package to specified **gs://$BUCKET_NAME/JOB_NAME/**, and instruct AI Platform to run the **`trainer.task`** module from that package.* The **`--stream-logs`** flag lets you view training logs in the cell below (One canalso view logs and other job details in the GCP Console, if you've enbaled **Stackdriver logging service**.)For staging your code to package and further training, ensure that following crucial parameters are defined priorly (given below are dummy bucket, job, region names, you may input as you will):* **BUCKET_NAME** = 'nifty-episode-231612-mlengine'* **JOB_NAME** = 'test_job_GcloudColab_3'* **REGION**= 'asia-east1'* **package-path**= trainer/* **model-name** = trainer.task* **runtime-version**=1.13 Now submit a training job to AI Platform.* Following runs the training module in gcloud and exports the training package and trained model to Cloud Storage.
###Code
! gcloud ai-platform jobs submit training $JOB_NAME --package-path trainer/ --module-name trainer.task --region $REGION --runtime-version=1.13 --scale-tier=CUSTOM --master-machine-type=standard_gpu --staging-bucket gs://$BUCKET_NAME --stream-logs
###Output
Job [test_job_GcloudColab_3] submitted successfully.
INFO 2019-07-26 10:32:05 +0000 service Validating job requirements...
INFO 2019-07-26 10:32:06 +0000 service Job creation request has been successfully validated.
INFO 2019-07-26 10:32:06 +0000 service Job test_job_GcloudColab_3 is queued.
INFO 2019-07-26 10:32:06 +0000 service Waiting for job to be provisioned.
INFO 2019-07-26 10:32:08 +0000 service Waiting for training program to start.
INFO 2019-07-26 10:33:44 +0000 master-replica-0 Running task with arguments: --cluster={"master": ["127.0.0.1:2222"]} --task={"type": "master", "index": 0} --job={ "scale_tier": "CUSTOM", "master_type": "standard_gpu", "package_uris": ["gs://nifty-episode-231612-mlengine/test_job_GcloudColab_3/a7c878c3589e08ed0c3fe69b3984abe538c68eaa9b63a69a50f1cdb03be1cd44/frcnn_trainer-0.1.tar.gz"], "python_module": "trainer.task", "region": "asia-east1", "runtime_version": "1.13", "run_on_raw_vm": true}
WARNING 2019-07-26 10:34:01 +0000 master-replica-0 From /usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.
WARNING 2019-07-26 10:34:01 +0000 master-replica-0 Instructions for updating:
WARNING 2019-07-26 10:34:01 +0000 master-replica-0 Colocations handled automatically by placer.
INFO 2019-07-26 10:34:26 +0000 master-replica-0 Running module trainer.task.
INFO 2019-07-26 10:34:26 +0000 master-replica-0 Downloading the package: gs://nifty-episode-231612-mlengine/test_job_GcloudColab_3/a7c878c3589e08ed0c3fe69b3984abe538c68eaa9b63a69a50f1cdb03be1cd44/frcnn_trainer-0.1.tar.gz
INFO 2019-07-26 10:34:26 +0000 master-replica-0 Running command: gsutil -q cp gs://nifty-episode-231612-mlengine/test_job_GcloudColab_3/a7c878c3589e08ed0c3fe69b3984abe538c68eaa9b63a69a50f1cdb03be1cd44/frcnn_trainer-0.1.tar.gz frcnn_trainer-0.1.tar.gz
INFO 2019-07-26 10:34:27 +0000 master-replica-0 Installing the package: gs://nifty-episode-231612-mlengine/test_job_GcloudColab_3/a7c878c3589e08ed0c3fe69b3984abe538c68eaa9b63a69a50f1cdb03be1cd44/frcnn_trainer-0.1.tar.gz
INFO 2019-07-26 10:34:27 +0000 master-replica-0 Running command: pip install --user --upgrade --force-reinstall --no-deps frcnn_trainer-0.1.tar.gz
ERROR 2019-07-26 10:34:29 +0000 master-replica-0 DEPRECATION: Python 2.7 will reach the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 won't be maintained after that date. A future version of pip will drop support for Python 2.7.
INFO 2019-07-26 10:34:29 +0000 master-replica-0 Processing ./frcnn_trainer-0.1.tar.gz
INFO 2019-07-26 10:34:30 +0000 master-replica-0 Building wheels for collected packages: frcnn-trainer
INFO 2019-07-26 10:34:30 +0000 master-replica-0 Building wheel for frcnn-trainer (setup.py): started
INFO 2019-07-26 10:34:30 +0000 master-replica-0 Building wheel for frcnn-trainer (setup.py): finished with status 'done'
INFO 2019-07-26 10:34:30 +0000 master-replica-0 Stored in directory: /root/.cache/pip/wheels/5f/dc/f1/44f4d4c2d7e4540b60cecc409bf0e8c1347e3d2de9032c76c7
INFO 2019-07-26 10:34:30 +0000 master-replica-0 Successfully built frcnn-trainer
INFO 2019-07-26 10:34:30 +0000 master-replica-0 Installing collected packages: frcnn-trainer
INFO 2019-07-26 10:34:30 +0000 master-replica-0 Successfully installed frcnn-trainer-0.1
INFO 2019-07-26 10:34:30 +0000 master-replica-0 Running command: pip install --user frcnn_trainer-0.1.tar.gz
ERROR 2019-07-26 10:34:31 +0000 master-replica-0 DEPRECATION: Python 2.7 will reach the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 won't be maintained after that date. A future version of pip will drop support for Python 2.7.
INFO 2019-07-26 10:34:31 +0000 master-replica-0 Processing ./frcnn_trainer-0.1.tar.gz
INFO 2019-07-26 10:34:31 +0000 master-replica-0 Collecting pillow (from frcnn-trainer==0.1)
INFO 2019-07-26 10:34:32 +0000 master-replica-0 Downloading https://files.pythonhosted.org/packages/cc/a4/79b5f36d1e1a2b426073bd62217d1530fcd939950c2936651e6b39127a9b/Pillow-6.1.0-cp27-cp27mu-manylinux1_x86_64.whl (2.1MB)
INFO 2019-07-26 10:34:32 +0000 master-replica-0 Collecting keras (from frcnn-trainer==0.1)
INFO 2019-07-26 10:34:32 +0000 master-replica-0 Downloading https://files.pythonhosted.org/packages/5e/10/aa32dad071ce52b5502266b5c659451cfd6ffcbf14e6c8c4f16c0ff5aaab/Keras-2.2.4-py2.py3-none-any.whl (312kB)
INFO 2019-07-26 10:34:32 +0000 master-replica-0 Requirement already satisfied: h5py in /usr/local/lib/python2.7/dist-packages (from frcnn-trainer==0.1) (2.9.0)
INFO 2019-07-26 10:34:32 +0000 master-replica-0 Requirement already satisfied: scipy>=0.14 in /usr/local/lib/python2.7/dist-packages (from keras->frcnn-trainer==0.1) (1.2.1)
INFO 2019-07-26 10:34:32 +0000 master-replica-0 Requirement already satisfied: keras-preprocessing>=1.0.5 in /usr/local/lib/python2.7/dist-packages (from keras->frcnn-trainer==0.1) (1.1.0)
INFO 2019-07-26 10:34:32 +0000 master-replica-0 Requirement already satisfied: numpy>=1.9.1 in /usr/local/lib/python2.7/dist-packages (from keras->frcnn-trainer==0.1) (1.16.0)
INFO 2019-07-26 10:34:32 +0000 master-replica-0 Requirement already satisfied: six>=1.9.0 in /usr/local/lib/python2.7/dist-packages (from keras->frcnn-trainer==0.1) (1.12.0)
INFO 2019-07-26 10:34:32 +0000 master-replica-0 Requirement already satisfied: pyyaml in /usr/local/lib/python2.7/dist-packages (from keras->frcnn-trainer==0.1) (3.13)
INFO 2019-07-26 10:34:32 +0000 master-replica-0 Requirement already satisfied: keras-applications>=1.0.6 in /usr/local/lib/python2.7/dist-packages (from keras->frcnn-trainer==0.1) (1.0.8)
INFO 2019-07-26 10:34:32 +0000 master-replica-0 Building wheels for collected packages: frcnn-trainer
INFO 2019-07-26 10:34:32 +0000 master-replica-0 Building wheel for frcnn-trainer (setup.py): started
INFO 2019-07-26 10:34:33 +0000 master-replica-0 Building wheel for frcnn-trainer (setup.py): finished with status 'done'
INFO 2019-07-26 10:34:33 +0000 master-replica-0 Stored in directory: /root/.cache/pip/wheels/5f/dc/f1/44f4d4c2d7e4540b60cecc409bf0e8c1347e3d2de9032c76c7
INFO 2019-07-26 10:34:33 +0000 master-replica-0 Successfully built frcnn-trainer
INFO 2019-07-26 10:34:33 +0000 master-replica-0 Installing collected packages: pillow, keras, frcnn-trainer
INFO 2019-07-26 10:34:34 +0000 master-replica-0 Found existing installation: frcnn-trainer 0.1
INFO 2019-07-26 10:34:34 +0000 master-replica-0 Uninstalling frcnn-trainer-0.1:
INFO 2019-07-26 10:34:34 +0000 master-replica-0 Successfully uninstalled frcnn-trainer-0.1
INFO 2019-07-26 10:34:34 +0000 master-replica-0 Successfully installed frcnn-trainer-0.1 keras-2.2.4 pillow-6.1.0
INFO 2019-07-26 10:34:34 +0000 master-replica-0 Running command: python -m trainer.task
ERROR 2019-07-26 10:34:35 +0000 master-replica-0 Using TensorFlow backend.
WARNING 2019-07-26 10:37:14 +0000 master-replica-0 From /usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.
WARNING 2019-07-26 10:37:14 +0000 master-replica-0 Instructions for updating:
WARNING 2019-07-26 10:37:14 +0000 master-replica-0 Colocations handled automatically by placer.
INFO 2019-07-26 10:37:21 +0000 master-replica-0 Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
INFO 2019-07-26 10:37:21 +0000 master-replica-0 successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
INFO 2019-07-26 10:37:21 +0000 master-replica-0 XLA service 0xa8245b0 executing computations on platform CUDA. Devices:
INFO 2019-07-26 10:37:21 +0000 master-replica-0 StreamExecutor device (0): Tesla K80, Compute Capability 3.7
INFO 2019-07-26 10:37:21 +0000 master-replica-0 CPU Frequency: 2200000000 Hz
INFO 2019-07-26 10:37:21 +0000 master-replica-0 XLA service 0xa88d090 executing computations on platform Host. Devices:
INFO 2019-07-26 10:37:21 +0000 master-replica-0 StreamExecutor device (0): <undefined>, <undefined>
INFO 2019-07-26 10:37:21 +0000 master-replica-0 Found device 0 with properties:
ERROR 2019-07-26 10:37:21 +0000 master-replica-0 name: Tesla K80 major: 3 minor: 7 memoryClockRate(GHz): 0.8235
ERROR 2019-07-26 10:37:21 +0000 master-replica-0 pciBusID: 0000:00:04.0
ERROR 2019-07-26 10:37:21 +0000 master-replica-0 totalMemory: 11.17GiB freeMemory: 11.11GiB
INFO 2019-07-26 10:37:21 +0000 master-replica-0 Adding visible gpu devices: 0
INFO 2019-07-26 10:37:21 +0000 master-replica-0 Device interconnect StreamExecutor with strength 1 edge matrix:
INFO 2019-07-26 10:37:21 +0000 master-replica-0 0
INFO 2019-07-26 10:37:21 +0000 master-replica-0 0: N
INFO 2019-07-26 10:37:21 +0000 master-replica-0 Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 10757 MB memory) -> physical GPU (device: 0, name: Tesla K80, pci bus id: 0000:00:04.0, compute capability: 3.7)
WARNING 2019-07-26 10:37:43 +0000 master-replica-0 From /usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
WARNING 2019-07-26 10:37:43 +0000 master-replica-0 Instructions for updating:
WARNING 2019-07-26 10:37:43 +0000 master-replica-0 Use tf.cast instead.
INFO 2019-07-26 10:37:54 +0000 master-replica-0 successfully opened CUDA library libcublas.so.10.0 locally
INFO 2019-07-26 10:37:56 +0000 master-replica-0 Parsing annotation files
INFO 2019-07-26 10:37:56 +0000 master-replica-0 Training images per class:
INFO 2019-07-26 10:37:56 +0000 master-replica-0 {'bg': 0, 'cake': 439, 'donuts': 218, 'dosa': 119}
INFO 2019-07-26 10:37:56 +0000 master-replica-0 Num classes (including bg) = 4
INFO 2019-07-26 10:37:56 +0000 master-replica-0 Config has been written to gs://nifty-episode-231612-mlengine/my_job_files/config_new.pickle, and can be loaded when testing to ensure correct results
INFO 2019-07-26 10:37:56 +0000 master-replica-0 Num train samples 235
INFO 2019-07-26 10:37:56 +0000 master-replica-0 Num val samples 37
INFO 2019-07-26 10:37:56 +0000 master-replica-0 loading weights from https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5
INFO 2019-07-26 10:37:56 +0000 master-replica-0 Could not load pretrained model weights. Weights can be found in the keras application folder https://github.com/fchollet/keras/tree/master/keras/applications
INFO 2019-07-26 10:37:56 +0000 master-replica-0 Starting training
INFO 2019-07-26 10:37:56 +0000 master-replica-0 Epoch 1/1
INFO 2019-07-26 10:38:01 +0000 master-replica-0 1/1000 [..............................] - ETA: 10:46:29 - rpn_cls: 0.0355 - rpn_regr: 0.6953 - detector_cls: 1.3863 - detector_regr: 0.0000e+00
INFO 2019-07-26 10:38:07 +0000 master-replica-0 2/1000 [..............................] - ETA: 6:04:33 - rpn_cls: 1.0024 - rpn_regr: 0.3613 - detector_cls: 0.7019 - detector_regr: 0.0000e+00
INFO 2019-07-26 10:38:10 +0000 master-replica-0 3/1000 [..............................] - ETA: 4:36:41 - rpn_cls: 3.8190 - rpn_regr: 0.2634 - detector_cls: 0.4893 - detector_regr: 0.0000e+00
INFO 2019-07-26 10:38:16 +0000 master-replica-0 4/1000 [..............................] - ETA: 3:40:26 - rpn_cls: 4.9847 - rpn_regr: 0.2146 - detector_cls: 0.3670 - detector_regr: 0.0000e+00
INFO 2019-07-26 10:38:19 +0000 master-replica-0 5/1000 [..............................] - ETA: 3:15:55 - rpn_cls: 4.8022 - rpn_regr: 0.2192 - detector_cls: 0.2936 - detector_regr: 0.0000e+00
INFO 2019-07-26 10:38:21 +0000 master-replica-0 6/1000 [..............................] - ETA: 2:51:57 - rpn_cls: 5.5423 - rpn_regr: 0.2010 - detector_cls: 0.5805 - detector_regr: 0.0716
INFO 2019-07-26 10:38:24 +0000 master-replica-0 7/1000 [..............................] - ETA: 2:31:01 - rpn_cls: 5.0106 - rpn_regr: 0.2156 - detector_cls: 1.5031 - detector_regr: 0.2850
INFO 2019-07-26 10:38:26 +0000 master-replica-0 8/1000 [..............................] - ETA: 2:18:44 - rpn_cls: 4.5348 - rpn_regr: 0.2018 - detector_cls: 1.6300 - detector_regr: 0.3100
INFO 2019-07-26 10:38:27 +0000 master-replica-0 9/1000 [..............................] - ETA: 2:05:51 - rpn_cls: 5.1633 - rpn_regr: 0.1902 - detector_cls: 2.0086 - detector_regr: 0.3185
INFO 2019-07-26 10:38:34 +0000 master-replica-0 10/1000 [..............................] - ETA: 1:55:20 - rpn_cls: 5.5111 - rpn_regr: 0.1831 - detector_cls: 1.8563 - detector_regr: 0.2910
INFO 2019-07-26 10:38:42 +0000 master-replica-0 11/1000 [..............................] - ETA: 1:55:25 - rpn_cls: 5.8313 - rpn_regr: 0.1730 - detector_cls: 2.1454 - detector_regr: 0.6538
INFO 2019-07-26 10:38:43 +0000 master-replica-0 12/1000 [..............................] - ETA: 1:56:00 - rpn_cls: 6.1133 - rpn_regr: 0.1651 - detector_cls: 2.3864 - detector_regr: 0.8608
INFO 2019-07-26 10:38:49 +0000 master-replica-0 13/1000 [..............................] - ETA: 1:48:21 - rpn_cls: 5.7041 - rpn_regr: 0.1619 - detector_cls: 2.5128 - detector_regr: 1.2216
INFO 2019-07-26 10:38:50 +0000 master-replica-0 14/1000 [..............................] - ETA: 1:47:53 - rpn_cls: 5.2967 - rpn_regr: 0.1642 - detector_cls: 2.4772 - detector_regr: 1.1579
INFO 2019-07-26 10:38:52 +0000 master-replica-0 15/1000 [..............................] - ETA: 1:42:06 - rpn_cls: 5.5040 - rpn_regr: 0.1593 - detector_cls: 2.3792 - detector_regr: 1.2459
INFO 2019-07-26 10:38:53 +0000 master-replica-0 16/1000 [..............................] - ETA: 1:36:56 - rpn_cls: 5.2784 - rpn_regr: 0.1560 - detector_cls: 2.4131 - detector_regr: 1.2310
INFO 2019-07-26 10:38:55 +0000 master-replica-0 17/1000 [..............................] - ETA: 1:32:45 - rpn_cls: 5.0561 - rpn_regr: 0.1526 - detector_cls: 2.7452 - detector_regr: 1.1951
INFO 2019-07-26 10:38:56 +0000 master-replica-0 18/1000 [..............................] - ETA: 1:28:42 - rpn_cls: 5.2975 - rpn_regr: 0.1537 - detector_cls: 2.5927 - detector_regr: 1.1287
INFO 2019-07-26 10:38:57 +0000 master-replica-0 19/1000 [..............................] - ETA: 1:25:12 - rpn_cls: 5.5923 - rpn_regr: 0.1491 - detector_cls: 2.5440 - detector_regr: 1.1179
INFO 2019-07-26 10:39:03 +0000 master-replica-0 20/1000 [..............................] - ETA: 1:21:57 - rpn_cls: 5.7739 - rpn_regr: 0.1466 - detector_cls: 2.4383 - detector_regr: 1.1887
INFO 2019-07-26 10:39:05 +0000 master-replica-0 21/1000 [..............................] - ETA: 1:22:31 - rpn_cls: 5.6154 - rpn_regr: 0.1432 - detector_cls: 2.4789 - detector_regr: 1.2931
INFO 2019-07-26 10:39:06 +0000 master-replica-0 22/1000 [..............................] - ETA: 1:19:40 - rpn_cls: 5.5576 - rpn_regr: 0.1405 - detector_cls: 2.4947 - detector_regr: 1.3267
INFO 2019-07-26 10:39:12 +0000 master-replica-0 23/1000 [..............................] - ETA: 1:17:23 - rpn_cls: 5.4793 - rpn_regr: 0.1475 - detector_cls: 2.3862 - detector_regr: 1.2690
INFO 2019-07-26 10:39:13 +0000 master-replica-0 24/1000 [..............................] - ETA: 1:17:41 - rpn_cls: 5.3610 - rpn_regr: 0.1453 - detector_cls: 2.3804 - detector_regr: 1.3305
INFO 2019-07-26 10:39:17 +0000 master-replica-0 25/1000 [..............................] - ETA: 1:15:23 - rpn_cls: 5.5078 - rpn_regr: 0.1477 - detector_cls: 2.2855 - detector_regr: 1.2773
INFO 2019-07-26 10:39:19 +0000 master-replica-0 26/1000 [..............................] - ETA: 1:14:54 - rpn_cls: 5.4839 - rpn_regr: 0.1466 - detector_cls: 2.2458 - detector_regr: 1.3036
INFO 2019-07-26 10:39:20 +0000 master-replica-0 27/1000 [..............................] - ETA: 1:13:02 - rpn_cls: 5.6349 - rpn_regr: 0.1445 - detector_cls: 2.2171 - detector_regr: 1.3048
INFO 2019-07-26 10:39:21 +0000 master-replica-0 28/1000 [..............................] - ETA: 1:11:07 - rpn_cls: 5.5244 - rpn_regr: 0.1472 - detector_cls: 2.1757 - detector_regr: 1.3288
INFO 2019-07-26 10:39:27 +0000 master-replica-0 29/1000 [..............................] - ETA: 1:09:21 - rpn_cls: 5.4895 - rpn_regr: 0.1479 - detector_cls: 2.1122 - detector_regr: 1.3315
INFO 2019-07-26 10:39:28 +0000 master-replica-0 30/1000 [..............................] - ETA: 1:09:54 - rpn_cls: 5.6446 - rpn_regr: 0.1455 - detector_cls: 2.0562 - detector_regr: 1.3145
INFO 2019-07-26 10:39:34 +0000 master-replica-0 31/1000 [..............................] - ETA: 1:08:15 - rpn_cls: 5.6011 - rpn_regr: 0.1438 - detector_cls: 2.0176 - detector_regr: 1.2941
INFO 2019-07-26 10:39:35 +0000 master-replica-0 32/1000 [..............................] - ETA: 1:09:05 - rpn_cls: 5.5594 - rpn_regr: 0.1430 - detector_cls: 1.9659 - detector_regr: 1.2888
INFO 2019-07-26 10:39:37 +0000 master-replica-0 33/1000 [..............................] - ETA: 1:07:38 - rpn_cls: 5.7004 - rpn_regr: 0.1433 - detector_cls: 1.9298 - detector_regr: 1.2700
INFO 2019-07-26 10:39:39 +0000 master-replica-0 34/1000 [>.............................] - ETA: 1:06:12 - rpn_cls: 5.5777 - rpn_regr: 0.1403 - detector_cls: 1.8936 - detector_regr: 1.2524
INFO 2019-07-26 10:39:40 +0000 master-replica-0 35/1000 [>.............................] - ETA: 1:05:09 - rpn_cls: 5.5612 - rpn_regr: 0.1403 - detector_cls: 1.8663 - detector_regr: 1.2297
INFO 2019-07-26 10:39:42 +0000 master-replica-0 36/1000 [>.............................] - ETA: 1:03:57 - rpn_cls: 5.5529 - rpn_regr: 0.1434 - detector_cls: 1.8498 - detector_regr: 1.2076
INFO 2019-07-26 10:39:43 +0000 master-replica-0 37/1000 [>.............................] - ETA: 1:02:43 - rpn_cls: 5.4029 - rpn_regr: 0.1425 - detector_cls: 1.8322 - detector_regr: 1.1852
INFO 2019-07-26 10:39:48 +0000 master-replica-0 38/1000 [>.............................] - ETA: 1:01:35 - rpn_cls: 5.4077 - rpn_regr: 0.1447 - detector_cls: 1.8013 - detector_regr: 1.1637
INFO 2019-07-26 10:39:50 +0000 master-replica-0 39/1000 [>.............................] - ETA: 1:02:07 - rpn_cls: 5.2910 - rpn_regr: 0.1441 - detector_cls: 1.7775 - detector_regr: 1.1518
INFO 2019-07-26 10:39:51 +0000 master-replica-0 40/1000 [>.............................] - ETA: 1:01:05 - rpn_cls: 5.1768 - rpn_regr: 0.1424 - detector_cls: 1.7590 - detector_regr: 1.1352
INFO 2019-07-26 10:39:57 +0000 master-replica-0 41/1000 [>.............................] - ETA: 1:00:02 - rpn_cls: 5.2510 - rpn_regr: 0.1410 - detector_cls: 1.7310 - detector_regr: 1.1075
INFO 2019-07-26 10:39:58 +0000 master-replica-0 42/1000 [>.............................] - ETA: 1:00:47 - rpn_cls: 5.2264 - rpn_regr: 0.1411 - detector_cls: 1.6984 - detector_regr: 1.0811
INFO 2019-07-26 10:40:03 +0000 master-replica-0 43/1000 [>.............................] - ETA: 59:44 - rpn_cls: 5.2443 - rpn_regr: 0.1411 - detector_cls: 1.6892 - detector_regr: 1.0687
INFO 2019-07-26 10:40:04 +0000 master-replica-0 44/1000 [>.............................] - ETA: 1:00:08 - rpn_cls: 5.2973 - rpn_regr: 0.1399 - detector_cls: 1.6585 - detector_regr: 1.0566
INFO 2019-07-26 10:40:06 +0000 master-replica-0 45/1000 [>.............................] - ETA: 59:12 - rpn_cls: 5.1897 - rpn_regr: 0.1386 - detector_cls: 1.6395 - detector_regr: 1.0402
INFO 2019-07-26 10:40:07 +0000 master-replica-0 46/1000 [>.............................] - ETA: 58:19 - rpn_cls: 5.1703 - rpn_regr: 0.1385 - detector_cls: 1.6173 - detector_regr: 1.0277
INFO 2019-07-26 10:40:10 +0000 master-replica-0 47/1000 [>.............................] - ETA: 57:32 - rpn_cls: 5.2082 - rpn_regr: 0.1364 - detector_cls: 1.6112 - detector_regr: 1.0145
INFO 2019-07-26 10:40:12 +0000 master-replica-0 48/1000 [>.............................] - ETA: 57:21 - rpn_cls: 5.0998 - rpn_regr: 0.1355 - detector_cls: 1.5910 - detector_regr: 1.0023
INFO 2019-07-26 10:40:13 +0000 master-replica-0 49/1000 [>.............................] - ETA: 56:32 - rpn_cls: 5.0131 - rpn_regr: 0.1343 - detector_cls: 1.5734 - detector_regr: 0.9903
INFO 2019-07-26 10:40:15 +0000 master-replica-0 50/1000 [>.............................] - ETA: 55:53 - rpn_cls: 5.0798 - rpn_regr: 0.1341 - detector_cls: 1.5555 - detector_regr: 0.9805
INFO 2019-07-26 10:40:16 +0000 master-replica-0 51/1000 [>.............................] - ETA: 55:05 - rpn_cls: 5.1948 - rpn_regr: 0.1436 - detector_cls: 1.5276 - detector_regr: 0.9613
INFO 2019-07-26 10:40:17 +0000 master-replica-0 52/1000 [>.............................] - ETA: 54:21 - rpn_cls: 5.2184 - rpn_regr: 0.1429 - detector_cls: 1.5193 - detector_regr: 0.9508
INFO 2019-07-26 10:40:19 +0000 master-replica-0 53/1000 [>.............................] - ETA: 53:40 - rpn_cls: 5.1812 - rpn_regr: 0.1414 - detector_cls: 1.5201 - detector_regr: 0.9407
INFO 2019-07-26 10:40:20 +0000 master-replica-0 54/1000 [>.............................] - ETA: 53:03 - rpn_cls: 5.2213 - rpn_regr: 0.1418 - detector_cls: 1.5265 - detector_regr: 0.9375
INFO 2019-07-26 10:40:21 +0000 master-replica-0 55/1000 [>.............................] - ETA: 52:26 - rpn_cls: 5.1698 - rpn_regr: 0.1414 - detector_cls: 1.5241 - detector_regr: 0.9287
INFO 2019-07-26 10:40:23 +0000 master-replica-0 56/1000 [>.............................] - ETA: 51:46 - rpn_cls: 5.0775 - rpn_regr: 0.1426 - detector_cls: 1.5094 - detector_regr: 0.9189
INFO 2019-07-26 10:40:24 +0000 master-replica-0 57/1000 [>.............................] - ETA: 51:12 - rpn_cls: 5.0335 - rpn_regr: 0.1414 - detector_cls: 1.5075 - detector_regr: 0.9095
INFO 2019-07-26 10:40:31 +0000 master-replica-0 58/1000 [>.............................] - ETA: 50:39 - rpn_cls: 5.0686 - rpn_regr: 0.1433 - detector_cls: 1.5065 - detector_regr: 0.9032
INFO 2019-07-26 10:40:32 +0000 master-replica-0 59/1000 [>.............................] - ETA: 51:31 - rpn_cls: 5.1396 - rpn_regr: 0.1428 - detector_cls: 1.4877 - detector_regr: 0.9025
INFO 2019-07-26 10:40:34 +0000 master-replica-0 60/1000 [>.............................] - ETA: 50:59 - rpn_cls: 5.1352 - rpn_regr: 0.1445 - detector_cls: 1.4815 - detector_regr: 0.8948
INFO 2019-07-26 10:40:35 +0000 master-replica-0 61/1000 [>.............................] - ETA: 50:25 - rpn_cls: 5.0512 - rpn_regr: 0.1435 - detector_cls: 1.4740 - detector_regr: 0.8899
INFO 2019-07-26 10:40:36 +0000 master-replica-0 62/1000 [>.............................] - ETA: 49:53 - rpn_cls: 4.9813 - rpn_regr: 0.1420 - detector_cls: 1.4621 - detector_regr: 0.8808
INFO 2019-07-26 10:40:37 +0000 master-replica-0 63/1000 [>.............................] - ETA: 49:22 - rpn_cls: 5.0011 - rpn_regr: 0.1411 - detector_cls: 1.4494 - detector_regr: 0.8669
INFO 2019-07-26 10:40:39 +0000 master-replica-0 64/1000 [>.............................] - ETA: 48:52 - rpn_cls: 4.9230 - rpn_regr: 0.1401 - detector_cls: 1.4382 - detector_regr: 0.8603
INFO 2019-07-26 10:40:40 +0000 master-replica-0 65/1000 [>.............................] - ETA: 48:23 - rpn_cls: 4.8821 - rpn_regr: 0.1383 - detector_cls: 1.4299 - detector_regr: 0.8523
INFO 2019-07-26 10:40:41 +0000 master-replica-0 66/1000 [>.............................] - ETA: 47:55 - rpn_cls: 4.8082 - rpn_regr: 0.1383 - detector_cls: 1.4243 - detector_regr: 0.8463
INFO 2019-07-26 10:40:50 +0000 master-replica-0 67/1000 [=>............................] - ETA: 47:24 - rpn_cls: 4.7364 - rpn_regr: 0.1377 - detector_cls: 1.4160 - detector_regr: 0.8420
INFO 2019-07-26 10:40:52 +0000 master-replica-0 68/1000 [=>............................] - ETA: 48:36 - rpn_cls: 4.7129 - rpn_regr: 0.1387 - detector_cls: 1.3993 - detector_regr: 0.8297
INFO 2019-07-26 10:40:58 +0000 master-replica-0 69/1000 [=>............................] - ETA: 48:16 - rpn_cls: 4.6824 - rpn_regr: 0.1416 - detector_cls: 1.3866 - detector_regr: 0.8257
INFO 2019-07-26 10:41:00 +0000 master-replica-0 70/1000 [=>............................] - ETA: 48:55 - rpn_cls: 4.6587 - rpn_regr: 0.1418 - detector_cls: 1.3896 - detector_regr: 0.8185
INFO 2019-07-26 10:41:01 +0000 master-replica-0 71/1000 [=>............................] - ETA: 48:33 - rpn_cls: 4.6947 - rpn_regr: 0.1508 - detector_cls: 1.3768 - detector_regr: 0.8070
INFO 2019-07-26 10:41:03 +0000 master-replica-0 72/1000 [=>............................] - ETA: 48:06 - rpn_cls: 4.6295 - rpn_regr: 0.1494 - detector_cls: 1.3657 - detector_regr: 0.8007
INFO 2019-07-26 10:41:04 +0000 master-replica-0 73/1000 [=>............................] - ETA: 47:44 - rpn_cls: 4.5839 - rpn_regr: 0.1495 - detector_cls: 1.3660 - detector_regr: 0.7961
INFO 2019-07-26 10:41:06 +0000 master-replica-0 74/1000 [=>............................] - ETA: 47:25 - rpn_cls: 4.6436 - rpn_regr: 0.1484 - detector_cls: 1.3540 - detector_regr: 0.7853
INFO 2019-07-26 10:41:09 +0000 master-replica-0 75/1000 [=>............................] - ETA: 47:02 - rpn_cls: 4.6948 - rpn_regr: 0.1479 - detector_cls: 1.3395 - detector_regr: 0.7748
INFO 2019-07-26 10:41:11 +0000 master-replica-0 76/1000 [=>............................] - ETA: 47:02 - rpn_cls: 4.7103 - rpn_regr: 0.1485 - detector_cls: 1.3369 - detector_regr: 0.7716
INFO 2019-07-26 10:41:12 +0000 master-replica-0 77/1000 [=>............................] - ETA: 46:44 - rpn_cls: 4.7424 - rpn_regr: 0.1495 - detector_cls: 1.3405 - detector_regr: 0.7671
INFO 2019-07-26 10:41:14 +0000 master-replica-0 78/1000 [=>............................] - ETA: 46:21 - rpn_cls: 4.7338 - rpn_regr: 0.1484 - detector_cls: 1.3360 - detector_regr: 0.7634
INFO 2019-07-26 10:41:19 +0000 master-replica-0 79/1000 [=>............................] - ETA: 46:03 - rpn_cls: 4.7785 - rpn_regr: 0.1474 - detector_cls: 1.3252 - detector_regr: 0.7619
INFO 2019-07-26 10:41:21 +0000 master-replica-0 80/1000 [=>............................] - ETA: 46:25 - rpn_cls: 4.7345 - rpn_regr: 0.1468 - detector_cls: 1.3184 - detector_regr: 0.7577
INFO 2019-07-26 10:41:22 +0000 master-replica-0 81/1000 [=>............................] - ETA: 46:03 - rpn_cls: 4.6878 - rpn_regr: 0.1463 - detector_cls: 1.3195 - detector_regr: 0.7535
INFO 2019-07-26 10:41:23 +0000 master-replica-0 82/1000 [=>............................] - ETA: 45:42 - rpn_cls: 4.7297 - rpn_regr: 0.1464 - detector_cls: 1.3092 - detector_regr: 0.7494
INFO 2019-07-26 10:41:25 +0000 master-replica-0 83/1000 [=>............................] - ETA: 45:22 - rpn_cls: 4.6944 - rpn_regr: 0.1457 - detector_cls: 1.3122 - detector_regr: 0.7464
INFO 2019-07-26 10:41:26 +0000 master-replica-0 84/1000 [=>............................] - ETA: 45:01 - rpn_cls: 4.6652 - rpn_regr: 0.1460 - detector_cls: 1.3086 - detector_regr: 0.7454
INFO 2019-07-26 10:41:32 +0000 master-replica-0 85/1000 [=>............................] - ETA: 44:40 - rpn_cls: 4.6104 - rpn_regr: 0.1454 - detector_cls: 1.3044 - detector_regr: 0.7426
INFO 2019-07-26 10:41:34 +0000 master-replica-0 86/1000 [=>............................] - ETA: 45:13 - rpn_cls: 4.6063 - rpn_regr: 0.1458 - detector_cls: 1.2974 - detector_regr: 0.7402
INFO 2019-07-26 10:41:44 +0000 master-replica-0 87/1000 [=>............................] - ETA: 44:59 - rpn_cls: 4.6577 - rpn_regr: 0.1480 - detector_cls: 1.2863 - detector_regr: 0.7334
INFO 2019-07-26 10:41:46 +0000 master-replica-0 88/1000 [=>............................] - ETA: 46:05 - rpn_cls: 4.6389 - rpn_regr: 0.1479 - detector_cls: 1.2750 - detector_regr: 0.7250
INFO 2019-07-26 10:41:47 +0000 master-replica-0 89/1000 [=>............................] - ETA: 45:50 - rpn_cls: 4.6850 - rpn_regr: 0.1472 - detector_cls: 1.2688 - detector_regr: 0.7224
INFO 2019-07-26 10:41:52 +0000 master-replica-0 90/1000 [=>............................] - ETA: 45:31 - rpn_cls: 4.6469 - rpn_regr: 0.1463 - detector_cls: 1.2720 - detector_regr: 0.7190
INFO 2019-07-26 10:41:54 +0000 master-replica-0 91/1000 [=>............................] - ETA: 45:49 - rpn_cls: 4.6865 - rpn_regr: 0.1452 - detector_cls: 1.2665 - detector_regr: 0.7173
INFO 2019-07-26 10:41:55 +0000 master-replica-0 92/1000 [=>............................] - ETA: 45:30 - rpn_cls: 4.6408 - rpn_regr: 0.1443 - detector_cls: 1.2674 - detector_regr: 0.7136
INFO 2019-07-26 10:41:56 +0000 master-replica-0 93/1000 [=>............................] - ETA: 45:11 - rpn_cls: 4.5980 - rpn_regr: 0.1434 - detector_cls: 1.2635 - detector_regr: 0.7103
INFO 2019-07-26 10:41:59 +0000 master-replica-0 94/1000 [=>............................] - ETA: 44:51 - rpn_cls: 4.5560 - rpn_regr: 0.1432 - detector_cls: 1.2598 - detector_regr: 0.7070
INFO 2019-07-26 10:42:01 +0000 master-replica-0 95/1000 [=>............................] - ETA: 44:50 - rpn_cls: 4.5529 - rpn_regr: 0.1426 - detector_cls: 1.2563 - detector_regr: 0.7054
INFO 2019-07-26 10:42:03 +0000 master-replica-0 96/1000 [=>............................] - ETA: 44:32 - rpn_cls: 4.5149 - rpn_regr: 0.1424 - detector_cls: 1.2522 - detector_regr: 0.7024
INFO 2019-07-26 10:42:04 +0000 master-replica-0 97/1000 [=>............................] - ETA: 44:21 - rpn_cls: 4.5671 - rpn_regr: 0.1445 - detector_cls: 1.2450 - detector_regr: 0.6975
INFO 2019-07-26 10:42:06 +0000 master-replica-0 98/1000 [=>............................] - ETA: 44:03 - rpn_cls: 4.5350 - rpn_regr: 0.1433 - detector_cls: 1.2463 - detector_regr: 0.6958
INFO 2019-07-26 10:42:14 +0000 master-replica-0 99/1000 [=>............................] - ETA: 43:48 - rpn_cls: 4.5178 - rpn_regr: 0.1427 - detector_cls: 1.2481 - detector_regr: 0.6938
INFO 2019-07-26 10:42:20 +0000 master-replica-0 100/1000 [==>...........................] - ETA: 44:32 - rpn_cls: 4.5056 - rpn_regr: 0.1426 - detector_cls: 1.2394 - detector_regr: 0.6868
INFO 2019-07-26 10:42:22 +0000 master-replica-0 101/1000 [==>...........................] - ETA: 45:00 - rpn_cls: 4.4724 - rpn_regr: 0.1434 - detector_cls: 1.2348 - detector_regr: 0.6834
INFO 2019-07-26 10:42:23 +0000 master-replica-0 102/1000 [==>...........................] - ETA: 44:42 - rpn_cls: 4.4286 - rpn_regr: 0.1430 - detector_cls: 1.2295 - detector_regr: 0.6845
INFO 2019-07-26 10:42:25 +0000 master-replica-0 103/1000 [==>...........................] - ETA: 44:27 - rpn_cls: 4.3856 - rpn_regr: 0.1425 - detector_cls: 1.2286 - detector_regr: 0.6828
INFO 2019-07-26 10:42:26 +0000 master-replica-0 104/1000 [==>...........................] - ETA: 44:12 - rpn_cls: 4.4312 - rpn_regr: 0.1413 - detector_cls: 1.2239 - detector_regr: 0.6811
INFO 2019-07-26 10:42:28 +0000 master-replica-0 105/1000 [==>...........................] - ETA: 43:55 - rpn_cls: 4.4300 - rpn_regr: 0.1404 - detector_cls: 1.2173 - detector_regr: 0.6746
INFO 2019-07-26 10:42:29 +0000 master-replica-0 106/1000 [==>...........................] - ETA: 43:39 - rpn_cls: 4.4077 - rpn_regr: 0.1410 - detector_cls: 1.2220 - detector_regr: 0.6724
INFO 2019-07-26 10:42:34 +0000 master-replica-0 107/1000 [==>...........................] - ETA: 43:23 - rpn_cls: 4.3729 - rpn_regr: 0.1401 - detector_cls: 1.2236 - detector_regr: 0.6718
INFO 2019-07-26 10:42:38 +0000 master-replica-0 108/1000 [==>...........................] - ETA: 43:36 - rpn_cls: 4.4032 - rpn_regr: 0.1395 - detector_cls: 1.2169 - detector_regr: 0.6702
INFO 2019-07-26 10:42:39 +0000 master-replica-0 109/1000 [==>...........................] - ETA: 43:41 - rpn_cls: 4.3746 - rpn_regr: 0.1392 - detector_cls: 1.2124 - detector_regr: 0.6672
INFO 2019-07-26 10:42:46 +0000 master-replica-0 110/1000 [==>...........................] - ETA: 43:25 - rpn_cls: 4.3348 - rpn_regr: 0.1404 - detector_cls: 1.2137 - detector_regr: 0.6663
INFO 2019-07-26 10:42:48 +0000 master-replica-0 111/1000 [==>...........................] - ETA: 43:56 - rpn_cls: 4.3766 - rpn_regr: 0.1397 - detector_cls: 1.2065 - detector_regr: 0.6602
INFO 2019-07-26 10:42:49 +0000 master-replica-0 112/1000 [==>...........................] - ETA: 43:45 - rpn_cls: 4.4238 - rpn_regr: 0.1390 - detector_cls: 1.2001 - detector_regr: 0.6544
INFO 2019-07-26 10:42:51 +0000 master-replica-0 113/1000 [==>...........................] - ETA: 43:29 - rpn_cls: 4.3881 - rpn_regr: 0.1383 - detector_cls: 1.1975 - detector_regr: 0.6533
INFO 2019-07-26 10:42:52 +0000 master-replica-0 114/1000 [==>...........................] - ETA: 43:15 - rpn_cls: 4.3615 - rpn_regr: 0.1380 - detector_cls: 1.2010 - detector_regr: 0.6517
INFO 2019-07-26 10:42:54 +0000 master-replica-0 115/1000 [==>...........................] - ETA: 42:59 - rpn_cls: 4.3236 - rpn_regr: 0.1379 - detector_cls: 1.1976 - detector_regr: 0.6528
INFO 2019-07-26 10:42:59 +0000 master-replica-0 116/1000 [==>...........................] - ETA: 42:45 - rpn_cls: 4.3214 - rpn_regr: 0.1376 - detector_cls: 1.1917 - detector_regr: 0.6510
INFO 2019-07-26 10:43:00 +0000 master-replica-0 117/1000 [==>...........................] - ETA: 42:57 - rpn_cls: 4.2845 - rpn_regr: 0.1377 - detector_cls: 1.1882 - detector_regr: 0.6492
INFO 2019-07-26 10:43:01 +0000 master-replica-0 118/1000 [==>...........................] - ETA: 42:43 - rpn_cls: 4.2482 - rpn_regr: 0.1381 - detector_cls: 1.1912 - detector_regr: 0.6479
INFO 2019-07-26 10:43:03 +0000 master-replica-0 119/1000 [==>...........................] - ETA: 42:29 - rpn_cls: 4.2697 - rpn_regr: 0.1484 - detector_cls: 1.1836 - detector_regr: 0.6424
INFO 2019-07-26 10:43:05 +0000 master-replica-0 120/1000 [==>...........................] - ETA: 42:19 - rpn_cls: 4.3071 - rpn_regr: 0.1507 - detector_cls: 1.1760 - detector_regr: 0.6405
INFO 2019-07-26 10:43:07 +0000 master-replica-0 121/1000 [==>...........................] - ETA: 42:04 - rpn_cls: 4.2715 - rpn_regr: 0.1507 - detector_cls: 1.1736 - detector_regr: 0.6397
INFO 2019-07-26 10:43:09 +0000 master-replica-0 122/1000 [==>...........................] - ETA: 42:00 - rpn_cls: 4.3146 - rpn_regr: 0.1531 - detector_cls: 1.1656 - detector_regr: 0.6345
INFO 2019-07-26 10:43:11 +0000 master-replica-0 123/1000 [==>...........................] - ETA: 41:47 - rpn_cls: 4.2867 - rpn_regr: 0.1529 - detector_cls: 1.1661 - detector_regr: 0.6324
INFO 2019-07-26 10:43:12 +0000 master-replica-0 124/1000 [==>...........................] - ETA: 41:37 - rpn_cls: 4.3238 - rpn_regr: 0.1528 - detector_cls: 1.1576 - detector_regr: 0.6273
INFO 2019-07-26 10:43:13 +0000 master-replica-0 125/1000 [==>...........................] - ETA: 41:24 - rpn_cls: 4.3054 - rpn_regr: 0.1528 - detector_cls: 1.1604 - detector_regr: 0.6262
INFO 2019-07-26 10:43:18 +0000 master-replica-0 126/1000 [==>...........................] - ETA: 41:11 - rpn_cls: 4.2877 - rpn_regr: 0.1519 - detector_cls: 1.1568 - detector_regr: 0.6245
INFO 2019-07-26 10:43:20 +0000 master-replica-0 127/1000 [==>...........................] - ETA: 41:25 - rpn_cls: 4.2884 - rpn_regr: 0.1516 - detector_cls: 1.1497 - detector_regr: 0.6246
INFO 2019-07-26 10:43:25 +0000 master-replica-0 128/1000 [==>...........................] - ETA: 41:11 - rpn_cls: 4.2676 - rpn_regr: 0.1514 - detector_cls: 1.1554 - detector_regr: 0.6232
INFO 2019-07-26 10:43:26 +0000 master-replica-0 129/1000 [==>...........................] - ETA: 41:23 - rpn_cls: 4.2345 - rpn_regr: 0.1512 - detector_cls: 1.1516 - detector_regr: 0.6213
INFO 2019-07-26 10:43:27 +0000 master-replica-0 130/1000 [==>...........................] - ETA: 41:10 - rpn_cls: 4.2019 - rpn_regr: 0.1507 - detector_cls: 1.1487 - detector_regr: 0.6203
INFO 2019-07-26 10:43:29 +0000 master-replica-0 131/1000 [==>...........................] - ETA: 40:57 - rpn_cls: 4.1720 - rpn_regr: 0.1503 - detector_cls: 1.1444 - detector_regr: 0.6179
INFO 2019-07-26 10:43:30 +0000 master-replica-0 132/1000 [==>...........................] - ETA: 40:44 - rpn_cls: 4.1496 - rpn_regr: 0.1501 - detector_cls: 1.1444 - detector_regr: 0.6170
INFO 2019-07-26 10:43:32 +0000 master-replica-0 133/1000 [==>...........................] - ETA: 40:34 - rpn_cls: 4.1463 - rpn_regr: 0.1502 - detector_cls: 1.1382 - detector_regr: 0.6124
INFO 2019-07-26 10:43:33 +0000 master-replica-0 134/1000 [===>..........................] - ETA: 40:21 - rpn_cls: 4.1798 - rpn_regr: 0.1500 - detector_cls: 1.1345 - detector_regr: 0.6133
INFO 2019-07-26 10:43:35 +0000 master-replica-0 135/1000 [===>..........................] - ETA: 40:10 - rpn_cls: 4.1639 - rpn_regr: 0.1499 - detector_cls: 1.1364 - detector_regr: 0.6116
INFO 2019-07-26 10:43:36 +0000 master-replica-0 136/1000 [===>..........................] - ETA: 39:58 - rpn_cls: 4.1493 - rpn_regr: 0.1492 - detector_cls: 1.1345 - detector_regr: 0.6099
INFO 2019-07-26 10:43:37 +0000 master-replica-0 137/1000 [===>..........................] - ETA: 39:47 - rpn_cls: 4.1270 - rpn_regr: 0.1489 - detector_cls: 1.1335 - detector_regr: 0.6088
INFO 2019-07-26 10:43:39 +0000 master-replica-0 138/1000 [===>..........................] - ETA: 39:35 - rpn_cls: 4.1053 - rpn_regr: 0.1485 - detector_cls: 1.1316 - detector_regr: 0.6068
INFO 2019-07-26 10:43:40 +0000 master-replica-0 139/1000 [===>..........................] - ETA: 39:24 - rpn_cls: 4.0880 - rpn_regr: 0.1481 - detector_cls: 1.1335 - detector_regr: 0.6055
INFO 2019-07-26 10:43:46 +0000 master-replica-0 140/1000 [===>..........................] - ETA: 39:12 - rpn_cls: 4.0694 - rpn_regr: 0.1483 - detector_cls: 1.1331 - detector_regr: 0.6041
INFO 2019-07-26 10:43:47 +0000 master-replica-0 141/1000 [===>..........................] - ETA: 39:29 - rpn_cls: 4.0649 - rpn_regr: 0.1480 - detector_cls: 1.1266 - detector_regr: 0.5998
INFO 2019-07-26 10:43:49 +0000 master-replica-0 142/1000 [===>..........................] - ETA: 39:17 - rpn_cls: 4.0405 - rpn_regr: 0.1471 - detector_cls: 1.1248 - detector_regr: 0.5978
INFO 2019-07-26 10:43:56 +0000 master-replica-0 143/1000 [===>..........................] - ETA: 39:07 - rpn_cls: 4.0227 - rpn_regr: 0.1465 - detector_cls: 1.1254 - detector_regr: 0.5958
INFO 2019-07-26 10:43:57 +0000 master-replica-0 144/1000 [===>..........................] - ETA: 39:31 - rpn_cls: 3.9990 - rpn_regr: 0.1457 - detector_cls: 1.1261 - detector_regr: 0.5947
INFO 2019-07-26 10:43:59 +0000 master-replica-0 145/1000 [===>..........................] - ETA: 39:21 - rpn_cls: 3.9756 - rpn_regr: 0.1453 - detector_cls: 1.1268 - detector_regr: 0.5933
INFO 2019-07-26 10:44:00 +0000 master-replica-0 146/1000 [===>..........................] - ETA: 39:10 - rpn_cls: 3.9484 - rpn_regr: 0.1457 - detector_cls: 1.1268 - detector_regr: 0.5926
INFO 2019-07-26 10:44:02 +0000 master-replica-0 147/1000 [===>..........................] - ETA: 38:59 - rpn_cls: 3.9407 - rpn_regr: 0.1457 - detector_cls: 1.1268 - detector_regr: 0.5916
INFO 2019-07-26 10:44:03 +0000 master-replica-0 148/1000 [===>..........................] - ETA: 38:48 - rpn_cls: 3.9266 - rpn_regr: 0.1450 - detector_cls: 1.1291 - detector_regr: 0.5904
INFO 2019-07-26 10:44:04 +0000 master-replica-0 149/1000 [===>..........................] - ETA: 38:37 - rpn_cls: 3.9002 - rpn_regr: 0.1446 - detector_cls: 1.1261 - detector_regr: 0.5886
INFO 2019-07-26 10:44:05 +0000 master-replica-0 150/1000 [===>..........................] - ETA: 38:27 - rpn_cls: 3.8742 - rpn_regr: 0.1443 - detector_cls: 1.1285 - detector_regr: 0.5872
INFO 2019-07-26 10:44:09 +0000 master-replica-0 151/1000 [===>..........................] - ETA: 38:15 - rpn_cls: 3.8486 - rpn_regr: 0.1452 - detector_cls: 1.1269 - detector_regr: 0.5870
INFO 2019-07-26 10:44:10 +0000 master-replica-0 152/1000 [===>..........................] - ETA: 38:16 - rpn_cls: 3.8423 - rpn_regr: 0.1446 - detector_cls: 1.1290 - detector_regr: 0.5854
INFO 2019-07-26 10:44:12 +0000 master-replica-0 153/1000 [===>..........................] - ETA: 38:04 - rpn_cls: 3.8222 - rpn_regr: 0.1440 - detector_cls: 1.1284 - detector_regr: 0.5842
INFO 2019-07-26 10:44:13 +0000 master-replica-0 154/1000 [===>..........................] - ETA: 37:58 - rpn_cls: 3.8596 - rpn_regr: 0.1516 - detector_cls: 1.1260 - detector_regr: 0.5804
INFO 2019-07-26 10:44:14 +0000 master-replica-0 155/1000 [===>..........................] - ETA: 37:48 - rpn_cls: 3.8401 - rpn_regr: 0.1508 - detector_cls: 1.1248 - detector_regr: 0.5793
INFO 2019-07-26 10:44:16 +0000 master-replica-0 156/1000 [===>..........................] - ETA: 37:38 - rpn_cls: 3.8217 - rpn_regr: 0.1506 - detector_cls: 1.1264 - detector_regr: 0.5772
INFO 2019-07-26 10:44:17 +0000 master-replica-0 157/1000 [===>..........................] - ETA: 37:30 - rpn_cls: 3.7990 - rpn_regr: 0.1504 - detector_cls: 1.1273 - detector_regr: 0.5755
INFO 2019-07-26 10:44:19 +0000 master-replica-0 158/1000 [===>..........................] - ETA: 37:20 - rpn_cls: 3.7749 - rpn_regr: 0.1507 - detector_cls: 1.1258 - detector_regr: 0.5761
INFO 2019-07-26 10:44:20 +0000 master-replica-0 159/1000 [===>..........................] - ETA: 37:10 - rpn_cls: 3.7783 - rpn_regr: 0.1512 - detector_cls: 1.1235 - detector_regr: 0.5754
INFO 2019-07-26 10:44:21 +0000 master-replica-0 160/1000 [===>..........................] - ETA: 37:02 - rpn_cls: 3.8052 - rpn_regr: 0.1512 - detector_cls: 1.1202 - detector_regr: 0.5718
INFO 2019-07-26 10:44:23 +0000 master-replica-0 161/1000 [===>..........................] - ETA: 36:52 - rpn_cls: 3.7897 - rpn_regr: 0.1514 - detector_cls: 1.1185 - detector_regr: 0.5718
INFO 2019-07-26 10:44:24 +0000 master-replica-0 162/1000 [===>..........................] - ETA: 36:42 - rpn_cls: 3.7707 - rpn_regr: 0.1511 - detector_cls: 1.1182 - detector_regr: 0.5705
INFO 2019-07-26 10:44:29 +0000 master-replica-0 163/1000 [===>..........................] - ETA: 36:33 - rpn_cls: 3.8006 - rpn_regr: 0.1508 - detector_cls: 1.1135 - detector_regr: 0.5670
INFO 2019-07-26 10:44:31 +0000 master-replica-0 164/1000 [===>..........................] - ETA: 36:43 - rpn_cls: 3.8302 - rpn_regr: 0.1503 - detector_cls: 1.1097 - detector_regr: 0.5685
INFO 2019-07-26 10:44:32 +0000 master-replica-0 165/1000 [===>..........................] - ETA: 36:36 - rpn_cls: 3.8607 - rpn_regr: 0.1511 - detector_cls: 1.1088 - detector_regr: 0.5676
INFO 2019-07-26 10:44:38 +0000 master-replica-0 166/1000 [===>..........................] - ETA: 36:27 - rpn_cls: 3.8484 - rpn_regr: 0.1505 - detector_cls: 1.1070 - detector_regr: 0.5671
INFO 2019-07-26 10:44:44 +0000 master-replica-0 167/1000 [====>.........................] - ETA: 36:38 - rpn_cls: 3.8516 - rpn_regr: 0.1508 - detector_cls: 1.1054 - detector_regr: 0.5675
INFO 2019-07-26 10:44:45 +0000 master-replica-0 168/1000 [====>.........................] - ETA: 36:52 - rpn_cls: 3.8351 - rpn_regr: 0.1504 - detector_cls: 1.1091 - detector_regr: 0.5660
INFO 2019-07-26 10:44:48 +0000 master-replica-0 169/1000 [====>.........................] - ETA: 36:44 - rpn_cls: 3.8543 - rpn_regr: 0.1503 - detector_cls: 1.1104 - detector_regr: 0.5657
INFO 2019-07-26 10:44:49 +0000 master-replica-0 170/1000 [====>.........................] - ETA: 36:40 - rpn_cls: 3.8924 - rpn_regr: 0.1506 - detector_cls: 1.1069 - detector_regr: 0.5649
INFO 2019-07-26 10:44:55 +0000 master-replica-0 171/1000 [====>.........................] - ETA: 36:31 - rpn_cls: 3.8697 - rpn_regr: 0.1502 - detector_cls: 1.1049 - detector_regr: 0.5636
INFO 2019-07-26 10:45:02 +0000 master-replica-0 172/1000 [====>.........................] - ETA: 36:44 - rpn_cls: 3.8956 - rpn_regr: 0.1515 - detector_cls: 1.1011 - detector_regr: 0.5603
INFO 2019-07-26 10:45:07 +0000 master-replica-0 173/1000 [====>.........................] - ETA: 37:00 - rpn_cls: 3.8955 - rpn_regr: 0.1512 - detector_cls: 1.1004 - detector_regr: 0.5592
INFO 2019-07-26 10:45:08 +0000 master-replica-0 174/1000 [====>.........................] - ETA: 37:10 - rpn_cls: 3.9126 - rpn_regr: 0.1507 - detector_cls: 1.0995 - detector_regr: 0.5592
INFO 2019-07-26 10:45:10 +0000 master-replica-0 175/1000 [====>.........................] - ETA: 37:01 - rpn_cls: 3.8929 - rpn_regr: 0.1501 - detector_cls: 1.1018 - detector_regr: 0.5583
INFO 2019-07-26 10:45:11 +0000 master-replica-0 176/1000 [====>.........................] - ETA: 36:52 - rpn_cls: 3.8830 - rpn_regr: 0.1496 - detector_cls: 1.1044 - detector_regr: 0.5578
INFO 2019-07-26 10:45:16 +0000 master-replica-0 177/1000 [====>.........................] - ETA: 36:43 - rpn_cls: 3.8611 - rpn_regr: 0.1493 - detector_cls: 1.1026 - detector_regr: 0.5570
INFO 2019-07-26 10:45:21 +0000 master-replica-0 178/1000 [====>.........................] - ETA: 36:53 - rpn_cls: 3.8443 - rpn_regr: 0.1488 - detector_cls: 1.1017 - detector_regr: 0.5555
INFO 2019-07-26 10:45:23 +0000 master-replica-0 179/1000 [====>.........................] - ETA: 37:00 - rpn_cls: 3.8228 - rpn_regr: 0.1485 - detector_cls: 1.1024 - detector_regr: 0.5551
INFO 2019-07-26 10:45:24 +0000 master-replica-0 180/1000 [====>.........................] - ETA: 36:53 - rpn_cls: 3.8307 - rpn_regr: 0.1484 - detector_cls: 1.1016 - detector_regr: 0.5547
INFO 2019-07-26 10:45:26 +0000 master-replica-0 181/1000 [====>.........................] - ETA: 36:45 - rpn_cls: 3.8127 - rpn_regr: 0.1478 - detector_cls: 1.1024 - detector_regr: 0.5538
INFO 2019-07-26 10:45:27 +0000 master-replica-0 182/1000 [====>.........................] - ETA: 36:36 - rpn_cls: 3.7973 - rpn_regr: 0.1475 - detector_cls: 1.1009 - detector_regr: 0.5532
INFO 2019-07-26 10:45:29 +0000 master-replica-0 183/1000 [====>.........................] - ETA: 36:28 - rpn_cls: 3.7811 - rpn_regr: 0.1472 - detector_cls: 1.1004 - detector_regr: 0.5528
INFO 2019-07-26 10:45:30 +0000 master-replica-0 184/1000 [====>.........................] - ETA: 36:21 - rpn_cls: 3.7814 - rpn_regr: 0.1468 - detector_cls: 1.0968 - detector_regr: 0.5498
INFO 2019-07-26 10:45:32 +0000 master-replica-0 185/1000 [====>.........................] - ETA: 36:13 - rpn_cls: 3.7666 - rpn_regr: 0.1469 - detector_cls: 1.0971 - detector_regr: 0.5492
INFO 2019-07-26 10:45:33 +0000 master-replica-0 186/1000 [====>.........................] - ETA: 36:05 - rpn_cls: 3.7508 - rpn_regr: 0.1466 - detector_cls: 1.0993 - detector_regr: 0.5487
INFO 2019-07-26 10:45:34 +0000 master-replica-0 187/1000 [====>.........................] - ETA: 35:56 - rpn_cls: 3.7381 - rpn_regr: 0.1460 - detector_cls: 1.0984 - detector_regr: 0.5483
INFO 2019-07-26 10:45:36 +0000 master-replica-0 188/1000 [====>.........................] - ETA: 35:48 - rpn_cls: 3.7183 - rpn_regr: 0.1459 - detector_cls: 1.0981 - detector_regr: 0.5473
INFO 2019-07-26 10:45:37 +0000 master-replica-0 189/1000 [====>.........................] - ETA: 35:40 - rpn_cls: 3.7043 - rpn_regr: 0.1455 - detector_cls: 1.0972 - detector_regr: 0.5465
INFO 2019-07-26 10:45:39 +0000 master-replica-0 190/1000 [====>.........................] - ETA: 35:31 - rpn_cls: 3.6899 - rpn_regr: 0.1454 - detector_cls: 1.0964 - detector_regr: 0.5458
INFO 2019-07-26 10:45:40 +0000 master-replica-0 191/1000 [====>.........................] - ETA: 35:24 - rpn_cls: 3.7168 - rpn_regr: 0.1463 - detector_cls: 1.0975 - detector_regr: 0.5455
INFO 2019-07-26 10:45:43 +0000 master-replica-0 192/1000 [====>.........................] - ETA: 35:17 - rpn_cls: 3.7463 - rpn_regr: 0.1463 - detector_cls: 1.0944 - detector_regr: 0.5457
INFO 2019-07-26 10:45:45 +0000 master-replica-0 193/1000 [====>.........................] - ETA: 35:17 - rpn_cls: 3.7490 - rpn_regr: 0.1468 - detector_cls: 1.0909 - detector_regr: 0.5429
INFO 2019-07-26 10:45:46 +0000 master-replica-0 194/1000 [====>.........................] - ETA: 35:09 - rpn_cls: 3.7730 - rpn_regr: 0.1467 - detector_cls: 1.0900 - detector_regr: 0.5426
INFO 2019-07-26 10:45:48 +0000 master-replica-0 195/1000 [====>.........................] - ETA: 35:01 - rpn_cls: 3.7634 - rpn_regr: 0.1462 - detector_cls: 1.0932 - detector_regr: 0.5415
INFO 2019-07-26 10:45:50 +0000 master-replica-0 196/1000 [====>.........................] - ETA: 34:55 - rpn_cls: 3.7852 - rpn_regr: 0.1467 - detector_cls: 1.0909 - detector_regr: 0.5414
INFO 2019-07-26 10:45:51 +0000 master-replica-0 197/1000 [====>.........................] - ETA: 34:50 - rpn_cls: 3.8092 - rpn_regr: 0.1478 - detector_cls: 1.0874 - detector_regr: 0.5386
INFO 2019-07-26 10:45:57 +0000 master-replica-0 198/1000 [====>.........................] - ETA: 34:43 - rpn_cls: 3.7913 - rpn_regr: 0.1473 - detector_cls: 1.0875 - detector_regr: 0.5375
INFO 2019-07-26 10:45:59 +0000 master-replica-0 199/1000 [====>.........................] - ETA: 34:53 - rpn_cls: 3.8154 - rpn_regr: 0.1474 - detector_cls: 1.0864 - detector_regr: 0.5368
INFO 2019-07-26 10:46:00 +0000 master-replica-0 200/1000 [=====>........................] - ETA: 34:46 - rpn_cls: 3.7997 - rpn_regr: 0.1470 - detector_cls: 1.0860 - detector_regr: 0.5373
INFO 2019-07-26 10:46:01 +0000 master-replica-0 201/1000 [=====>........................] - ETA: 34:38 - rpn_cls: 3.8014 - rpn_regr: 0.1477 - detector_cls: 1.0833 - detector_regr: 0.5370
INFO 2019-07-26 10:46:03 +0000 master-replica-0 202/1000 [=====>........................] - ETA: 34:31 - rpn_cls: 3.8237 - rpn_regr: 0.1480 - detector_cls: 1.0800 - detector_regr: 0.5344
INFO 2019-07-26 10:46:06 +0000 master-replica-0 203/1000 [=====>........................] - ETA: 34:23 - rpn_cls: 3.8048 - rpn_regr: 0.1480 - detector_cls: 1.0803 - detector_regr: 0.5339
INFO 2019-07-26 10:46:09 +0000 master-replica-0 204/1000 [=====>........................] - ETA: 34:24 - rpn_cls: 3.8062 - rpn_regr: 0.1489 - detector_cls: 1.0831 - detector_regr: 0.5330
INFO 2019-07-26 10:46:13 +0000 master-replica-0 205/1000 [=====>........................] - ETA: 34:22 - rpn_cls: 3.8304 - rpn_regr: 0.1551 - detector_cls: 1.0789 - detector_regr: 0.5304
INFO 2019-07-26 10:46:14 +0000 master-replica-0 206/1000 [=====>........................] - ETA: 34:26 - rpn_cls: 3.8139 - rpn_regr: 0.1546 - detector_cls: 1.0790 - detector_regr: 0.5295
INFO 2019-07-26 10:46:16 +0000 master-replica-0 207/1000 [=====>........................] - ETA: 34:18 - rpn_cls: 3.7955 - rpn_regr: 0.1542 - detector_cls: 1.0786 - detector_regr: 0.5299
INFO 2019-07-26 10:46:22 +0000 master-replica-0 208/1000 [=====>........................] - ETA: 34:10 - rpn_cls: 3.7806 - rpn_regr: 0.1536 - detector_cls: 1.0775 - detector_regr: 0.5290
INFO 2019-07-26 10:46:23 +0000 master-replica-0 209/1000 [=====>........................] - ETA: 34:23 - rpn_cls: 3.7702 - rpn_regr: 0.1530 - detector_cls: 1.0811 - detector_regr: 0.5295
INFO 2019-07-26 10:46:31 +0000 master-replica-0 210/1000 [=====>........................] - ETA: 34:15 - rpn_cls: 3.7592 - rpn_regr: 0.1525 - detector_cls: 1.0818 - detector_regr: 0.5288
INFO 2019-07-26 10:46:32 +0000 master-replica-0 211/1000 [=====>........................] - ETA: 34:31 - rpn_cls: 3.7853 - rpn_regr: 0.1520 - detector_cls: 1.0800 - detector_regr: 0.5284
INFO 2019-07-26 10:46:34 +0000 master-replica-0 212/1000 [=====>........................] - ETA: 34:24 - rpn_cls: 3.8055 - rpn_regr: 0.1525 - detector_cls: 1.0762 - detector_regr: 0.5284
INFO 2019-07-26 10:46:35 +0000 master-replica-0 213/1000 [=====>........................] - ETA: 34:17 - rpn_cls: 3.8268 - rpn_regr: 0.1520 - detector_cls: 1.0737 - detector_regr: 0.5287
INFO 2019-07-26 10:46:41 +0000 master-replica-0 214/1000 [=====>........................] - ETA: 34:11 - rpn_cls: 3.8173 - rpn_regr: 0.1526 - detector_cls: 1.0729 - detector_regr: 0.5283
INFO 2019-07-26 10:46:42 +0000 master-replica-0 215/1000 [=====>........................] - ETA: 34:17 - rpn_cls: 3.8207 - rpn_regr: 0.1530 - detector_cls: 1.0718 - detector_regr: 0.5291
INFO 2019-07-26 10:46:47 +0000 master-replica-0 216/1000 [=====>........................] - ETA: 34:09 - rpn_cls: 3.8062 - rpn_regr: 0.1526 - detector_cls: 1.0710 - detector_regr: 0.5287
INFO 2019-07-26 10:46:51 +0000 master-replica-0 217/1000 [=====>........................] - ETA: 34:17 - rpn_cls: 3.7926 - rpn_regr: 0.1527 - detector_cls: 1.0683 - detector_regr: 0.5262
INFO 2019-07-26 10:46:59 +0000 master-replica-0 218/1000 [=====>........................] - ETA: 34:20 - rpn_cls: 3.7752 - rpn_regr: 0.1529 - detector_cls: 1.0676 - detector_regr: 0.5269
INFO 2019-07-26 10:47:00 +0000 master-replica-0 219/1000 [=====>........................] - ETA: 34:36 - rpn_cls: 3.7579 - rpn_regr: 0.1540 - detector_cls: 1.0640 - detector_regr: 0.5245
INFO 2019-07-26 10:47:02 +0000 master-replica-0 220/1000 [=====>........................] - ETA: 34:28 - rpn_cls: 3.7439 - rpn_regr: 0.1535 - detector_cls: 1.0627 - detector_regr: 0.5240
INFO 2019-07-26 10:47:03 +0000 master-replica-0 221/1000 [=====>........................] - ETA: 34:21 - rpn_cls: 3.7567 - rpn_regr: 0.1551 - detector_cls: 1.0594 - detector_regr: 0.5216
INFO 2019-07-26 10:47:05 +0000 master-replica-0 222/1000 [=====>........................] - ETA: 34:15 - rpn_cls: 3.7558 - rpn_regr: 0.1557 - detector_cls: 1.0596 - detector_regr: 0.5213
INFO 2019-07-26 10:47:07 +0000 master-replica-0 223/1000 [=====>........................] - ETA: 34:08 - rpn_cls: 3.7428 - rpn_regr: 0.1553 - detector_cls: 1.0612 - detector_regr: 0.5208
INFO 2019-07-26 10:47:14 +0000 master-replica-0 224/1000 [=====>........................] - ETA: 34:03 - rpn_cls: 3.7295 - rpn_regr: 0.1550 - detector_cls: 1.0597 - detector_regr: 0.5203
INFO 2019-07-26 10:47:15 +0000 master-replica-0 225/1000 [=====>........................] - ETA: 34:16 - rpn_cls: 3.7129 - rpn_regr: 0.1551 - detector_cls: 1.0568 - detector_regr: 0.5202
INFO 2019-07-26 10:47:17 +0000 master-replica-0 226/1000 [=====>........................] - ETA: 34:09 - rpn_cls: 3.7088 - rpn_regr: 0.1553 - detector_cls: 1.0567 - detector_regr: 0.5199
INFO 2019-07-26 10:47:18 +0000 master-replica-0 227/1000 [=====>........................] - ETA: 34:02 - rpn_cls: 3.7112 - rpn_regr: 0.1561 - detector_cls: 1.0530 - detector_regr: 0.5176
INFO 2019-07-26 10:47:19 +0000 master-replica-0 228/1000 [=====>........................] - ETA: 33:55 - rpn_cls: 3.7154 - rpn_regr: 0.1563 - detector_cls: 1.0545 - detector_regr: 0.5176
INFO 2019-07-26 10:47:21 +0000 master-replica-0 229/1000 [=====>........................] - ETA: 33:48 - rpn_cls: 3.7043 - rpn_regr: 0.1559 - detector_cls: 1.0558 - detector_regr: 0.5171
INFO 2019-07-26 10:47:22 +0000 master-replica-0 230/1000 [=====>........................] - ETA: 33:41 - rpn_cls: 3.6925 - rpn_regr: 0.1558 - detector_cls: 1.0576 - detector_regr: 0.5168
INFO 2019-07-26 10:47:24 +0000 master-replica-0 231/1000 [=====>........................] - ETA: 33:34 - rpn_cls: 3.7136 - rpn_regr: 0.1552 - detector_cls: 1.0562 - detector_regr: 0.5172
INFO 2019-07-26 10:47:25 +0000 master-replica-0 232/1000 [=====>........................] - ETA: 33:28 - rpn_cls: 3.7033 - rpn_regr: 0.1549 - detector_cls: 1.0579 - detector_regr: 0.5169
INFO 2019-07-26 10:47:27 +0000 master-replica-0 233/1000 [=====>........................] - ETA: 33:21 - rpn_cls: 3.6937 - rpn_regr: 0.1548 - detector_cls: 1.0585 - detector_regr: 0.5166
INFO 2019-07-26 10:47:28 +0000 master-replica-0 234/1000 [======>.......................] - ETA: 33:15 - rpn_cls: 3.6779 - rpn_regr: 0.1547 - detector_cls: 1.0559 - detector_regr: 0.5163
INFO 2019-07-26 10:47:29 +0000 master-replica-0 235/1000 [======>.......................] - ETA: 33:09 - rpn_cls: 3.6653 - rpn_regr: 0.1549 - detector_cls: 1.0559 - detector_regr: 0.5159
INFO 2019-07-26 10:47:31 +0000 master-replica-0 236/1000 [======>.......................] - ETA: 33:01 - rpn_cls: 3.6691 - rpn_regr: 0.1550 - detector_cls: 1.0542 - detector_regr: 0.5162
INFO 2019-07-26 10:47:32 +0000 master-replica-0 237/1000 [======>.......................] - ETA: 32:56 - rpn_cls: 3.6536 - rpn_regr: 0.1551 - detector_cls: 1.0517 - detector_regr: 0.5155
INFO 2019-07-26 10:47:34 +0000 master-replica-0 238/1000 [======>.......................] - ETA: 32:49 - rpn_cls: 3.6383 - rpn_regr: 0.1547 - detector_cls: 1.0519 - detector_regr: 0.5152
INFO 2019-07-26 10:47:35 +0000 master-replica-0 239/1000 [======>.......................] - ETA: 32:43 - rpn_cls: 3.6412 - rpn_regr: 0.1545 - detector_cls: 1.0539 - detector_regr: 0.5146
INFO 2019-07-26 10:47:36 +0000 master-replica-0 240/1000 [======>.......................] - ETA: 32:36 - rpn_cls: 3.6261 - rpn_regr: 0.1543 - detector_cls: 1.0532 - detector_regr: 0.5140
INFO 2019-07-26 10:47:38 +0000 master-replica-0 241/1000 [======>.......................] - ETA: 32:30 - rpn_cls: 3.6145 - rpn_regr: 0.1539 - detector_cls: 1.0542 - detector_regr: 0.5134
INFO 2019-07-26 10:47:39 +0000 master-replica-0 242/1000 [======>.......................] - ETA: 32:24 - rpn_cls: 3.5995 - rpn_regr: 0.1535 - detector_cls: 1.0535 - detector_regr: 0.5131
INFO 2019-07-26 10:47:40 +0000 master-replica-0 243/1000 [======>.......................] - ETA: 32:17 - rpn_cls: 3.6022 - rpn_regr: 0.1542 - detector_cls: 1.0506 - detector_regr: 0.5109
INFO 2019-07-26 10:47:41 +0000 master-replica-0 244/1000 [======>.......................] - ETA: 32:11 - rpn_cls: 3.5875 - rpn_regr: 0.1540 - detector_cls: 1.0494 - detector_regr: 0.5104
INFO 2019-07-26 10:47:43 +0000 master-replica-0 245/1000 [======>.......................] - ETA: 32:04 - rpn_cls: 3.5912 - rpn_regr: 0.1540 - detector_cls: 1.0479 - detector_regr: 0.5111
INFO 2019-07-26 10:47:44 +0000 master-replica-0 246/1000 [======>.......................] - ETA: 31:58 - rpn_cls: 3.5766 - rpn_regr: 0.1539 - detector_cls: 1.0468 - detector_regr: 0.5108
INFO 2019-07-26 10:47:46 +0000 master-replica-0 247/1000 [======>.......................] - ETA: 31:52 - rpn_cls: 3.5909 - rpn_regr: 0.1537 - detector_cls: 1.0491 - detector_regr: 0.5112
INFO 2019-07-26 10:47:47 +0000 master-replica-0 248/1000 [======>.......................] - ETA: 31:47 - rpn_cls: 3.6130 - rpn_regr: 0.1535 - detector_cls: 1.0476 - detector_regr: 0.5128
INFO 2019-07-26 10:47:49 +0000 master-replica-0 249/1000 [======>.......................] - ETA: 31:41 - rpn_cls: 3.6004 - rpn_regr: 0.1531 - detector_cls: 1.0481 - detector_regr: 0.5124
INFO 2019-07-26 10:47:50 +0000 master-replica-0 250/1000 [======>.......................] - ETA: 31:35 - rpn_cls: 3.5885 - rpn_regr: 0.1530 - detector_cls: 1.0505 - detector_regr: 0.5117
INFO 2019-07-26 10:47:52 +0000 master-replica-0 251/1000 [======>.......................] - ETA: 31:29 - rpn_cls: 3.5742 - rpn_regr: 0.1527 - detector_cls: 1.0502 - detector_regr: 0.5116
INFO 2019-07-26 10:47:53 +0000 master-replica-0 252/1000 [======>.......................] - ETA: 31:23 - rpn_cls: 3.5644 - rpn_regr: 0.1524 - detector_cls: 1.0510 - detector_regr: 0.5111
INFO 2019-07-26 10:47:54 +0000 master-replica-0 253/1000 [======>.......................] - ETA: 31:18 - rpn_cls: 3.5735 - rpn_regr: 0.1524 - detector_cls: 1.0493 - detector_regr: 0.5109
INFO 2019-07-26 10:47:56 +0000 master-replica-0 254/1000 [======>.......................] - ETA: 31:12 - rpn_cls: 3.5606 - rpn_regr: 0.1523 - detector_cls: 1.0504 - detector_regr: 0.5106
INFO 2019-07-26 10:47:57 +0000 master-replica-0 255/1000 [======>.......................] - ETA: 31:06 - rpn_cls: 3.5512 - rpn_regr: 0.1520 - detector_cls: 1.0519 - detector_regr: 0.5100
INFO 2019-07-26 10:47:59 +0000 master-replica-0 256/1000 [======>.......................] - ETA: 31:00 - rpn_cls: 3.5510 - rpn_regr: 0.1525 - detector_cls: 1.0525 - detector_regr: 0.5094
INFO 2019-07-26 10:48:00 +0000 master-replica-0 257/1000 [======>.......................] - ETA: 30:55 - rpn_cls: 3.5485 - rpn_regr: 0.1522 - detector_cls: 1.0537 - detector_regr: 0.5088
INFO 2019-07-26 10:48:01 +0000 master-replica-0 258/1000 [======>.......................] - ETA: 30:49 - rpn_cls: 3.5366 - rpn_regr: 0.1521 - detector_cls: 1.0539 - detector_regr: 0.5082
INFO 2019-07-26 10:48:03 +0000 master-replica-0 259/1000 [======>.......................] - ETA: 30:43 - rpn_cls: 3.5231 - rpn_regr: 0.1520 - detector_cls: 1.0542 - detector_regr: 0.5078
INFO 2019-07-26 10:48:04 +0000 master-replica-0 260/1000 [======>.......................] - ETA: 30:37 - rpn_cls: 3.5174 - rpn_regr: 0.1518 - detector_cls: 1.0546 - detector_regr: 0.5075
INFO 2019-07-26 10:48:06 +0000 master-replica-0 261/1000 [======>.......................] - ETA: 30:32 - rpn_cls: 3.5350 - rpn_regr: 0.1525 - detector_cls: 1.0535 - detector_regr: 0.5075
INFO 2019-07-26 10:48:07 +0000 master-replica-0 262/1000 [======>.......................] - ETA: 30:27 - rpn_cls: 3.5215 - rpn_regr: 0.1523 - detector_cls: 1.0527 - detector_regr: 0.5073
INFO 2019-07-26 10:48:09 +0000 master-replica-0 263/1000 [======>.......................] - ETA: 30:21 - rpn_cls: 3.5268 - rpn_regr: 0.1524 - detector_cls: 1.0525 - detector_regr: 0.5074
INFO 2019-07-26 10:48:10 +0000 master-replica-0 264/1000 [======>.......................] - ETA: 30:17 - rpn_cls: 3.5171 - rpn_regr: 0.1520 - detector_cls: 1.0541 - detector_regr: 0.5070
INFO 2019-07-26 10:48:11 +0000 master-replica-0 265/1000 [======>.......................] - ETA: 30:11 - rpn_cls: 3.5073 - rpn_regr: 0.1518 - detector_cls: 1.0566 - detector_regr: 0.5066
INFO 2019-07-26 10:48:13 +0000 master-replica-0 266/1000 [======>.......................] - ETA: 30:05 - rpn_cls: 3.4977 - rpn_regr: 0.1516 - detector_cls: 1.0565 - detector_regr: 0.5063
INFO 2019-07-26 10:48:14 +0000 master-replica-0 267/1000 [=======>......................] - ETA: 30:00 - rpn_cls: 3.4901 - rpn_regr: 0.1513 - detector_cls: 1.0576 - detector_regr: 0.5059
INFO 2019-07-26 10:48:16 +0000 master-replica-0 268/1000 [=======>......................] - ETA: 29:55 - rpn_cls: 3.4837 - rpn_regr: 0.1511 - detector_cls: 1.0571 - detector_regr: 0.5055
INFO 2019-07-26 10:48:17 +0000 master-replica-0 269/1000 [=======>......................] - ETA: 29:49 - rpn_cls: 3.5013 - rpn_regr: 0.1526 - detector_cls: 1.0549 - detector_regr: 0.5036
INFO 2019-07-26 10:48:18 +0000 master-replica-0 270/1000 [=======>......................] - ETA: 29:44 - rpn_cls: 3.4883 - rpn_regr: 0.1528 - detector_cls: 1.0544 - detector_regr: 0.5033
INFO 2019-07-26 10:48:20 +0000 master-replica-0 271/1000 [=======>......................] - ETA: 29:39 - rpn_cls: 3.4984 - rpn_regr: 0.1532 - detector_cls: 1.0535 - detector_regr: 0.5027
INFO 2019-07-26 10:48:21 +0000 master-replica-0 272/1000 [=======>......................] - ETA: 29:34 - rpn_cls: 3.4875 - rpn_regr: 0.1531 - detector_cls: 1.0524 - detector_regr: 0.5023
INFO 2019-07-26 10:48:23 +0000 master-replica-0 273/1000 [=======>......................] - ETA: 29:28 - rpn_cls: 3.4747 - rpn_regr: 0.1529 - detector_cls: 1.0533 - detector_regr: 0.5031
INFO 2019-07-26 10:48:24 +0000 master-replica-0 274/1000 [=======>......................] - ETA: 29:23 - rpn_cls: 3.4653 - rpn_regr: 0.1527 - detector_cls: 1.0520 - detector_regr: 0.5032
INFO 2019-07-26 10:48:26 +0000 master-replica-0 275/1000 [=======>......................] - ETA: 29:18 - rpn_cls: 3.4764 - rpn_regr: 0.1532 - detector_cls: 1.0532 - detector_regr: 0.5029
INFO 2019-07-26 10:48:27 +0000 master-replica-0 276/1000 [=======>......................] - ETA: 29:14 - rpn_cls: 3.4755 - rpn_regr: 0.1529 - detector_cls: 1.0551 - detector_regr: 0.5028
INFO 2019-07-26 10:48:29 +0000 master-replica-0 277/1000 [=======>......................] - ETA: 29:08 - rpn_cls: 3.4739 - rpn_regr: 0.1529 - detector_cls: 1.0552 - detector_regr: 0.5026
INFO 2019-07-26 10:48:30 +0000 master-replica-0 278/1000 [=======>......................] - ETA: 29:04 - rpn_cls: 3.4661 - rpn_regr: 0.1535 - detector_cls: 1.0562 - detector_regr: 0.5021
INFO 2019-07-26 10:48:31 +0000 master-replica-0 279/1000 [=======>......................] - ETA: 28:59 - rpn_cls: 3.4691 - rpn_regr: 0.1531 - detector_cls: 1.0545 - detector_regr: 0.5003
INFO 2019-07-26 10:48:32 +0000 master-replica-0 280/1000 [=======>......................] - ETA: 28:53 - rpn_cls: 3.4639 - rpn_regr: 0.1530 - detector_cls: 1.0555 - detector_regr: 0.5005
INFO 2019-07-26 10:48:34 +0000 master-replica-0 281/1000 [=======>......................] - ETA: 28:48 - rpn_cls: 3.4540 - rpn_regr: 0.1527 - detector_cls: 1.0552 - detector_regr: 0.5007
INFO 2019-07-26 10:48:35 +0000 master-replica-0 282/1000 [=======>......................] - ETA: 28:43 - rpn_cls: 3.4417 - rpn_regr: 0.1524 - detector_cls: 1.0548 - detector_regr: 0.5005
INFO 2019-07-26 10:48:36 +0000 master-replica-0 283/1000 [=======>......................] - ETA: 28:38 - rpn_cls: 3.4332 - rpn_regr: 0.1520 - detector_cls: 1.0553 - detector_regr: 0.5005
INFO 2019-07-26 10:48:38 +0000 master-replica-0 284/1000 [=======>......................] - ETA: 28:32 - rpn_cls: 3.4510 - rpn_regr: 0.1518 - detector_cls: 1.0528 - detector_regr: 0.4997
INFO 2019-07-26 10:48:39 +0000 master-replica-0 285/1000 [=======>......................] - ETA: 28:28 - rpn_cls: 3.4407 - rpn_regr: 0.1516 - detector_cls: 1.0543 - detector_regr: 0.4993
INFO 2019-07-26 10:48:41 +0000 master-replica-0 286/1000 [=======>......................] - ETA: 28:23 - rpn_cls: 3.4459 - rpn_regr: 0.1518 - detector_cls: 1.0543 - detector_regr: 0.4989
INFO 2019-07-26 10:48:42 +0000 master-replica-0 287/1000 [=======>......................] - ETA: 28:18 - rpn_cls: 3.4436 - rpn_regr: 0.1519 - detector_cls: 1.0545 - detector_regr: 0.4987
INFO 2019-07-26 10:48:43 +0000 master-replica-0 288/1000 [=======>......................] - ETA: 28:13 - rpn_cls: 3.4600 - rpn_regr: 0.1520 - detector_cls: 1.0530 - detector_regr: 0.4984
INFO 2019-07-26 10:48:45 +0000 master-replica-0 289/1000 [=======>......................] - ETA: 28:08 - rpn_cls: 3.4480 - rpn_regr: 0.1521 - detector_cls: 1.0544 - detector_regr: 0.4980
INFO 2019-07-26 10:48:46 +0000 master-replica-0 290/1000 [=======>......................] - ETA: 28:04 - rpn_cls: 3.4427 - rpn_regr: 0.1521 - detector_cls: 1.0546 - detector_regr: 0.4978
INFO 2019-07-26 10:48:48 +0000 master-replica-0 291/1000 [=======>......................] - ETA: 27:59 - rpn_cls: 3.4423 - rpn_regr: 0.1524 - detector_cls: 1.0540 - detector_regr: 0.4973
INFO 2019-07-26 10:48:49 +0000 master-replica-0 292/1000 [=======>......................] - ETA: 27:54 - rpn_cls: 3.4305 - rpn_regr: 0.1527 - detector_cls: 1.0517 - detector_regr: 0.4985
INFO 2019-07-26 10:48:50 +0000 master-replica-0 293/1000 [=======>......................] - ETA: 27:50 - rpn_cls: 3.4206 - rpn_regr: 0.1523 - detector_cls: 1.0517 - detector_regr: 0.4982
INFO 2019-07-26 10:48:52 +0000 master-replica-0 294/1000 [=======>......................] - ETA: 27:45 - rpn_cls: 3.4389 - rpn_regr: 0.1524 - detector_cls: 1.0509 - detector_regr: 0.4988
INFO 2019-07-26 10:48:54 +0000 master-replica-0 295/1000 [=======>......................] - ETA: 27:41 - rpn_cls: 3.4272 - rpn_regr: 0.1529 - detector_cls: 1.0487 - detector_regr: 0.4971
INFO 2019-07-26 10:48:55 +0000 master-replica-0 296/1000 [=======>......................] - ETA: 27:37 - rpn_cls: 3.4303 - rpn_regr: 0.1528 - detector_cls: 1.0480 - detector_regr: 0.4968
INFO 2019-07-26 10:48:57 +0000 master-replica-0 297/1000 [=======>......................] - ETA: 27:32 - rpn_cls: 3.4280 - rpn_regr: 0.1524 - detector_cls: 1.0484 - detector_regr: 0.4971
INFO 2019-07-26 10:48:58 +0000 master-replica-0 298/1000 [=======>......................] - ETA: 27:28 - rpn_cls: 3.4185 - rpn_regr: 0.1521 - detector_cls: 1.0484 - detector_regr: 0.4971
INFO 2019-07-26 10:49:00 +0000 master-replica-0 299/1000 [=======>......................] - ETA: 27:23 - rpn_cls: 3.4100 - rpn_regr: 0.1516 - detector_cls: 1.0495 - detector_regr: 0.4966
INFO 2019-07-26 10:49:01 +0000 master-replica-0 300/1000 [========>.....................] - ETA: 27:19 - rpn_cls: 3.4033 - rpn_regr: 0.1514 - detector_cls: 1.0496 - detector_regr: 0.4966
INFO 2019-07-26 10:49:03 +0000 master-replica-0 301/1000 [========>.....................] - ETA: 27:15 - rpn_cls: 3.4211 - rpn_regr: 0.1514 - detector_cls: 1.0477 - detector_regr: 0.4967
INFO 2019-07-26 10:49:04 +0000 master-replica-0 302/1000 [========>.....................] - ETA: 27:10 - rpn_cls: 3.4098 - rpn_regr: 0.1513 - detector_cls: 1.0498 - detector_regr: 0.4966
INFO 2019-07-26 10:49:05 +0000 master-replica-0 303/1000 [========>.....................] - ETA: 27:06 - rpn_cls: 3.4033 - rpn_regr: 0.1511 - detector_cls: 1.0492 - detector_regr: 0.4966
INFO 2019-07-26 10:49:07 +0000 master-replica-0 304/1000 [========>.....................] - ETA: 27:01 - rpn_cls: 3.3942 - rpn_regr: 0.1510 - detector_cls: 1.0484 - detector_regr: 0.4963
INFO 2019-07-26 10:49:08 +0000 master-replica-0 305/1000 [========>.....................] - ETA: 26:56 - rpn_cls: 3.3969 - rpn_regr: 0.1509 - detector_cls: 1.0465 - detector_regr: 0.4947
INFO 2019-07-26 10:49:09 +0000 master-replica-0 306/1000 [========>.....................] - ETA: 26:52 - rpn_cls: 3.3858 - rpn_regr: 0.1510 - detector_cls: 1.0453 - detector_regr: 0.4947
INFO 2019-07-26 10:49:11 +0000 master-replica-0 307/1000 [========>.....................] - ETA: 26:48 - rpn_cls: 3.3747 - rpn_regr: 0.1508 - detector_cls: 1.0451 - detector_regr: 0.4950
INFO 2019-07-26 10:49:12 +0000 master-replica-0 308/1000 [========>.....................] - ETA: 26:43 - rpn_cls: 3.3687 - rpn_regr: 0.1507 - detector_cls: 1.0448 - detector_regr: 0.4950
INFO 2019-07-26 10:49:14 +0000 master-replica-0 309/1000 [========>.....................] - ETA: 26:39 - rpn_cls: 3.3839 - rpn_regr: 0.1516 - detector_cls: 1.0424 - detector_regr: 0.4934
INFO 2019-07-26 10:49:15 +0000 master-replica-0 310/1000 [========>.....................] - ETA: 26:35 - rpn_cls: 3.3757 - rpn_regr: 0.1513 - detector_cls: 1.0416 - detector_regr: 0.4931
INFO 2019-07-26 10:49:17 +0000 master-replica-0 311/1000 [========>.....................] - ETA: 26:31 - rpn_cls: 3.3682 - rpn_regr: 0.1512 - detector_cls: 1.0412 - detector_regr: 0.4931
INFO 2019-07-26 10:49:18 +0000 master-replica-0 312/1000 [========>.....................] - ETA: 26:27 - rpn_cls: 3.3623 - rpn_regr: 0.1511 - detector_cls: 1.0424 - detector_regr: 0.4930
INFO 2019-07-26 10:49:20 +0000 master-replica-0 313/1000 [========>.....................] - ETA: 26:22 - rpn_cls: 3.3516 - rpn_regr: 0.1512 - detector_cls: 1.0423 - detector_regr: 0.4932
INFO 2019-07-26 10:49:21 +0000 master-replica-0 314/1000 [========>.....................] - ETA: 26:18 - rpn_cls: 3.3465 - rpn_regr: 0.1509 - detector_cls: 1.0433 - detector_regr: 0.4930
INFO 2019-07-26 10:49:22 +0000 master-replica-0 315/1000 [========>.....................] - ETA: 26:14 - rpn_cls: 3.3637 - rpn_regr: 0.1518 - detector_cls: 1.0411 - detector_regr: 0.4925
INFO 2019-07-26 10:49:24 +0000 master-replica-0 316/1000 [========>.....................] - ETA: 26:09 - rpn_cls: 3.3563 - rpn_regr: 0.1516 - detector_cls: 1.0436 - detector_regr: 0.4922
INFO 2019-07-26 10:49:25 +0000 master-replica-0 317/1000 [========>.....................] - ETA: 26:05 - rpn_cls: 3.3499 - rpn_regr: 0.1513 - detector_cls: 1.0434 - detector_regr: 0.4917
INFO 2019-07-26 10:49:26 +0000 master-replica-0 318/1000 [========>.....................] - ETA: 26:00 - rpn_cls: 3.3394 - rpn_regr: 0.1512 - detector_cls: 1.0423 - detector_regr: 0.4918
INFO 2019-07-26 10:49:27 +0000 master-replica-0 319/1000 [========>.....................] - ETA: 25:55 - rpn_cls: 3.3601 - rpn_regr: 0.1510 - detector_cls: 1.0410 - detector_regr: 0.4922
INFO 2019-07-26 10:49:28 +0000 master-replica-0 320/1000 [========>.....................] - ETA: 25:51 - rpn_cls: 3.3496 - rpn_regr: 0.1510 - detector_cls: 1.0390 - detector_regr: 0.4924
INFO 2019-07-26 10:49:29 +0000 master-replica-0 321/1000 [========>.....................] - ETA: 25:46 - rpn_cls: 3.3392 - rpn_regr: 0.1508 - detector_cls: 1.0379 - detector_regr: 0.4929
INFO 2019-07-26 10:49:31 +0000 master-replica-0 322/1000 [========>.....................] - ETA: 25:41 - rpn_cls: 3.3309 - rpn_regr: 0.1505 - detector_cls: 1.0369 - detector_regr: 0.4925
INFO 2019-07-26 10:49:32 +0000 master-replica-0 323/1000 [========>.....................] - ETA: 25:37 - rpn_cls: 3.3275 - rpn_regr: 0.1504 - detector_cls: 1.0385 - detector_regr: 0.4922
INFO 2019-07-26 10:49:33 +0000 master-replica-0 324/1000 [========>.....................] - ETA: 25:33 - rpn_cls: 3.3231 - rpn_regr: 0.1503 - detector_cls: 1.0386 - detector_regr: 0.4921
INFO 2019-07-26 10:49:35 +0000 master-replica-0 325/1000 [========>.....................] - ETA: 25:29 - rpn_cls: 3.3142 - rpn_regr: 0.1500 - detector_cls: 1.0389 - detector_regr: 0.4920
INFO 2019-07-26 10:49:36 +0000 master-replica-0 326/1000 [========>.....................] - ETA: 25:25 - rpn_cls: 3.3174 - rpn_regr: 0.1500 - detector_cls: 1.0385 - detector_regr: 0.4924
INFO 2019-07-26 10:49:37 +0000 master-replica-0 327/1000 [========>.....................] - ETA: 25:21 - rpn_cls: 3.3091 - rpn_regr: 0.1498 - detector_cls: 1.0376 - detector_regr: 0.4929
INFO 2019-07-26 10:49:39 +0000 master-replica-0 328/1000 [========>.....................] - ETA: 25:17 - rpn_cls: 3.3041 - rpn_regr: 0.1495 - detector_cls: 1.0382 - detector_regr: 0.4925
INFO 2019-07-26 10:49:40 +0000 master-replica-0 329/1000 [========>.....................] - ETA: 25:13 - rpn_cls: 3.2978 - rpn_regr: 0.1492 - detector_cls: 1.0388 - detector_regr: 0.4922
INFO 2019-07-26 10:49:42 +0000 master-replica-0 330/1000 [========>.....................] - ETA: 25:09 - rpn_cls: 3.2878 - rpn_regr: 0.1491 - detector_cls: 1.0381 - detector_regr: 0.4922
INFO 2019-07-26 10:49:43 +0000 master-replica-0 331/1000 [========>.....................] - ETA: 25:04 - rpn_cls: 3.2796 - rpn_regr: 0.1489 - detector_cls: 1.0399 - detector_regr: 0.4922
INFO 2019-07-26 10:49:45 +0000 master-replica-0 332/1000 [========>.....................] - ETA: 25:01 - rpn_cls: 3.2719 - rpn_regr: 0.1486 - detector_cls: 1.0403 - detector_regr: 0.4917
INFO 2019-07-26 10:49:46 +0000 master-replica-0 333/1000 [========>.....................] - ETA: 24:57 - rpn_cls: 3.2879 - rpn_regr: 0.1487 - detector_cls: 1.0381 - detector_regr: 0.4924
INFO 2019-07-26 10:49:47 +0000 master-replica-0 334/1000 [=========>....................] - ETA: 24:53 - rpn_cls: 3.2781 - rpn_regr: 0.1486 - detector_cls: 1.0384 - detector_regr: 0.4926
INFO 2019-07-26 10:49:48 +0000 master-replica-0 335/1000 [=========>....................] - ETA: 24:49 - rpn_cls: 3.2939 - rpn_regr: 0.1483 - detector_cls: 1.0372 - detector_regr: 0.4926
INFO 2019-07-26 10:49:50 +0000 master-replica-0 336/1000 [=========>....................] - ETA: 24:44 - rpn_cls: 3.2841 - rpn_regr: 0.1487 - detector_cls: 1.0360 - detector_regr: 0.4923
INFO 2019-07-26 10:49:51 +0000 master-replica-0 337/1000 [=========>....................] - ETA: 24:41 - rpn_cls: 3.2832 - rpn_regr: 0.1486 - detector_cls: 1.0356 - detector_regr: 0.4922
INFO 2019-07-26 10:49:53 +0000 master-replica-0 338/1000 [=========>....................] - ETA: 24:37 - rpn_cls: 3.2765 - rpn_regr: 0.1486 - detector_cls: 1.0352 - detector_regr: 0.4919
INFO 2019-07-26 10:49:54 +0000 master-replica-0 339/1000 [=========>....................] - ETA: 24:33 - rpn_cls: 3.2923 - rpn_regr: 0.1487 - detector_cls: 1.0332 - detector_regr: 0.4918
INFO 2019-07-26 10:49:57 +0000 master-replica-0 340/1000 [=========>....................] - ETA: 24:29 - rpn_cls: 3.2826 - rpn_regr: 0.1485 - detector_cls: 1.0324 - detector_regr: 0.4917
INFO 2019-07-26 10:49:59 +0000 master-replica-0 341/1000 [=========>....................] - ETA: 24:28 - rpn_cls: 3.2987 - rpn_regr: 0.1522 - detector_cls: 1.0305 - detector_regr: 0.4903
INFO 2019-07-26 10:50:00 +0000 master-replica-0 342/1000 [=========>....................] - ETA: 24:25 - rpn_cls: 3.3137 - rpn_regr: 0.1524 - detector_cls: 1.0321 - detector_regr: 0.4903
INFO 2019-07-26 10:50:01 +0000 master-replica-0 343/1000 [=========>....................] - ETA: 24:21 - rpn_cls: 3.3088 - rpn_regr: 0.1522 - detector_cls: 1.0327 - detector_regr: 0.4898
INFO 2019-07-26 10:50:03 +0000 master-replica-0 344/1000 [=========>....................] - ETA: 24:17 - rpn_cls: 3.3114 - rpn_regr: 0.1520 - detector_cls: 1.0305 - detector_regr: 0.4884
INFO 2019-07-26 10:50:04 +0000 master-replica-0 345/1000 [=========>....................] - ETA: 24:13 - rpn_cls: 3.3227 - rpn_regr: 0.1535 - detector_cls: 1.0292 - detector_regr: 0.4869
INFO 2019-07-26 10:50:05 +0000 master-replica-0 346/1000 [=========>....................] - ETA: 24:10 - rpn_cls: 3.3312 - rpn_regr: 0.1541 - detector_cls: 1.0278 - detector_regr: 0.4855
INFO 2019-07-26 10:50:07 +0000 master-replica-0 347/1000 [=========>....................] - ETA: 24:05 - rpn_cls: 3.3254 - rpn_regr: 0.1538 - detector_cls: 1.0272 - detector_regr: 0.4854
INFO 2019-07-26 10:50:08 +0000 master-replica-0 348/1000 [=========>....................] - ETA: 24:02 - rpn_cls: 3.3349 - rpn_regr: 0.1537 - detector_cls: 1.0283 - detector_regr: 0.4853
INFO 2019-07-26 10:50:09 +0000 master-replica-0 349/1000 [=========>....................] - ETA: 23:58 - rpn_cls: 3.3500 - rpn_regr: 0.1535 - detector_cls: 1.0257 - detector_regr: 0.4839
INFO 2019-07-26 10:50:10 +0000 master-replica-0 350/1000 [=========>....................] - ETA: 23:54 - rpn_cls: 3.3443 - rpn_regr: 0.1532 - detector_cls: 1.0251 - detector_regr: 0.4833
INFO 2019-07-26 10:50:12 +0000 master-replica-0 351/1000 [=========>....................] - ETA: 23:50 - rpn_cls: 3.3376 - rpn_regr: 0.1530 - detector_cls: 1.0252 - detector_regr: 0.4828
INFO 2019-07-26 10:50:14 +0000 master-replica-0 352/1000 [=========>....................] - ETA: 23:47 - rpn_cls: 3.3558 - rpn_regr: 0.1530 - detector_cls: 1.0229 - detector_regr: 0.4830
INFO 2019-07-26 10:50:16 +0000 master-replica-0 353/1000 [=========>....................] - ETA: 23:44 - rpn_cls: 3.3740 - rpn_regr: 0.1534 - detector_cls: 1.0208 - detector_regr: 0.4817
INFO 2019-07-26 10:50:17 +0000 master-replica-0 354/1000 [=========>....................] - ETA: 23:41 - rpn_cls: 3.3670 - rpn_regr: 0.1534 - detector_cls: 1.0209 - detector_regr: 0.4818
INFO 2019-07-26 10:50:19 +0000 master-replica-0 355/1000 [=========>....................] - ETA: 23:37 - rpn_cls: 3.3803 - rpn_regr: 0.1538 - detector_cls: 1.0203 - detector_regr: 0.4818
INFO 2019-07-26 10:50:20 +0000 master-replica-0 356/1000 [=========>....................] - ETA: 23:33 - rpn_cls: 3.3784 - rpn_regr: 0.1540 - detector_cls: 1.0192 - detector_regr: 0.4818
INFO 2019-07-26 10:50:22 +0000 master-replica-0 357/1000 [=========>....................] - ETA: 23:30 - rpn_cls: 3.3798 - rpn_regr: 0.1537 - detector_cls: 1.0195 - detector_regr: 0.4819
INFO 2019-07-26 10:50:23 +0000 master-replica-0 358/1000 [=========>....................] - ETA: 23:28 - rpn_cls: 3.3943 - rpn_regr: 0.1571 - detector_cls: 1.0175 - detector_regr: 0.4805
INFO 2019-07-26 10:50:25 +0000 master-replica-0 359/1000 [=========>....................] - ETA: 23:24 - rpn_cls: 3.3875 - rpn_regr: 0.1570 - detector_cls: 1.0175 - detector_regr: 0.4808
INFO 2019-07-26 10:50:26 +0000 master-replica-0 360/1000 [=========>....................] - ETA: 23:20 - rpn_cls: 3.3985 - rpn_regr: 0.1567 - detector_cls: 1.0182 - detector_regr: 0.4810
INFO 2019-07-26 10:50:27 +0000 master-replica-0 361/1000 [=========>....................] - ETA: 23:16 - rpn_cls: 3.3922 - rpn_regr: 0.1566 - detector_cls: 1.0185 - detector_regr: 0.4809
INFO 2019-07-26 10:50:29 +0000 master-replica-0 362/1000 [=========>....................] - ETA: 23:12 - rpn_cls: 3.3846 - rpn_regr: 0.1564 - detector_cls: 1.0199 - detector_regr: 0.4808
INFO 2019-07-26 10:50:30 +0000 master-replica-0 363/1000 [=========>....................] - ETA: 23:09 - rpn_cls: 3.3780 - rpn_regr: 0.1562 - detector_cls: 1.0214 - detector_regr: 0.4811
INFO 2019-07-26 10:50:32 +0000 master-replica-0 364/1000 [=========>....................] - ETA: 23:06 - rpn_cls: 3.3751 - rpn_regr: 0.1559 - detector_cls: 1.0229 - detector_regr: 0.4808
INFO 2019-07-26 10:50:34 +0000 master-replica-0 365/1000 [=========>....................] - ETA: 23:03 - rpn_cls: 3.3658 - rpn_regr: 0.1559 - detector_cls: 1.0206 - detector_regr: 0.4794
INFO 2019-07-26 10:50:35 +0000 master-replica-0 366/1000 [=========>....................] - ETA: 23:00 - rpn_cls: 3.3586 - rpn_regr: 0.1556 - detector_cls: 1.0213 - detector_regr: 0.4794
INFO 2019-07-26 10:50:36 +0000 master-replica-0 367/1000 [==========>...................] - ETA: 22:56 - rpn_cls: 3.3556 - rpn_regr: 0.1555 - detector_cls: 1.0215 - detector_regr: 0.4793
INFO 2019-07-26 10:50:38 +0000 master-replica-0 368/1000 [==========>...................] - ETA: 22:53 - rpn_cls: 3.3578 - rpn_regr: 0.1556 - detector_cls: 1.0224 - detector_regr: 0.4792
INFO 2019-07-26 10:50:39 +0000 master-replica-0 369/1000 [==========>...................] - ETA: 22:49 - rpn_cls: 3.3536 - rpn_regr: 0.1554 - detector_cls: 1.0221 - detector_regr: 0.4793
INFO 2019-07-26 10:50:40 +0000 master-replica-0 370/1000 [==========>...................] - ETA: 22:45 - rpn_cls: 3.3445 - rpn_regr: 0.1552 - detector_cls: 1.0215 - detector_regr: 0.4794
INFO 2019-07-26 10:50:42 +0000 master-replica-0 371/1000 [==========>...................] - ETA: 22:41 - rpn_cls: 3.3539 - rpn_regr: 0.1581 - detector_cls: 1.0201 - detector_regr: 0.4781
INFO 2019-07-26 10:50:44 +0000 master-replica-0 372/1000 [==========>...................] - ETA: 22:39 - rpn_cls: 3.3708 - rpn_regr: 0.1580 - detector_cls: 1.0189 - detector_regr: 0.4768
INFO 2019-07-26 10:50:45 +0000 master-replica-0 373/1000 [==========>...................] - ETA: 22:36 - rpn_cls: 3.3777 - rpn_regr: 0.1581 - detector_cls: 1.0203 - detector_regr: 0.4767
INFO 2019-07-26 10:50:46 +0000 master-replica-0 374/1000 [==========>...................] - ETA: 22:32 - rpn_cls: 3.3729 - rpn_regr: 0.1578 - detector_cls: 1.0194 - detector_regr: 0.4763
INFO 2019-07-26 10:50:48 +0000 master-replica-0 375/1000 [==========>...................] - ETA: 22:29 - rpn_cls: 3.3693 - rpn_regr: 0.1578 - detector_cls: 1.0201 - detector_regr: 0.4764
INFO 2019-07-26 10:50:49 +0000 master-replica-0 376/1000 [==========>...................] - ETA: 22:25 - rpn_cls: 3.3621 - rpn_regr: 0.1576 - detector_cls: 1.0211 - detector_regr: 0.4765
INFO 2019-07-26 10:50:51 +0000 master-replica-0 377/1000 [==========>...................] - ETA: 22:22 - rpn_cls: 3.3555 - rpn_regr: 0.1575 - detector_cls: 1.0210 - detector_regr: 0.4765
INFO 2019-07-26 10:50:52 +0000 master-replica-0 378/1000 [==========>...................] - ETA: 22:18 - rpn_cls: 3.3680 - rpn_regr: 0.1574 - detector_cls: 1.0200 - detector_regr: 0.4753
INFO 2019-07-26 10:50:53 +0000 master-replica-0 379/1000 [==========>...................] - ETA: 22:15 - rpn_cls: 3.3636 - rpn_regr: 0.1576 - detector_cls: 1.0194 - detector_regr: 0.4751
INFO 2019-07-26 10:50:54 +0000 master-replica-0 380/1000 [==========>...................] - ETA: 22:11 - rpn_cls: 3.3676 - rpn_regr: 0.1574 - detector_cls: 1.0186 - detector_regr: 0.4751
INFO 2019-07-26 10:50:56 +0000 master-replica-0 381/1000 [==========>...................] - ETA: 22:07 - rpn_cls: 3.3619 - rpn_regr: 0.1572 - detector_cls: 1.0192 - detector_regr: 0.4749
INFO 2019-07-26 10:50:57 +0000 master-replica-0 382/1000 [==========>...................] - ETA: 22:04 - rpn_cls: 3.3565 - rpn_regr: 0.1570 - detector_cls: 1.0206 - detector_regr: 0.4748
INFO 2019-07-26 10:50:59 +0000 master-replica-0 383/1000 [==========>...................] - ETA: 22:01 - rpn_cls: 3.3505 - rpn_regr: 0.1567 - detector_cls: 1.0212 - detector_regr: 0.4747
INFO 2019-07-26 10:51:00 +0000 master-replica-0 384/1000 [==========>...................] - ETA: 21:58 - rpn_cls: 3.3418 - rpn_regr: 0.1566 - detector_cls: 1.0201 - detector_regr: 0.4746
INFO 2019-07-26 10:51:03 +0000 master-replica-0 385/1000 [==========>...................] - ETA: 21:55 - rpn_cls: 3.3492 - rpn_regr: 0.1568 - detector_cls: 1.0193 - detector_regr: 0.4734
INFO 2019-07-26 10:51:04 +0000 master-replica-0 386/1000 [==========>...................] - ETA: 21:53 - rpn_cls: 3.3665 - rpn_regr: 0.1568 - detector_cls: 1.0181 - detector_regr: 0.4732
INFO 2019-07-26 10:51:05 +0000 master-replica-0 387/1000 [==========>...................] - ETA: 21:50 - rpn_cls: 3.3615 - rpn_regr: 0.1567 - detector_cls: 1.0185 - detector_regr: 0.4730
INFO 2019-07-26 10:51:07 +0000 master-replica-0 388/1000 [==========>...................] - ETA: 21:46 - rpn_cls: 3.3564 - rpn_regr: 0.1564 - detector_cls: 1.0187 - detector_regr: 0.4731
INFO 2019-07-26 10:51:08 +0000 master-replica-0 389/1000 [==========>...................] - ETA: 21:43 - rpn_cls: 3.3507 - rpn_regr: 0.1565 - detector_cls: 1.0198 - detector_regr: 0.4734
INFO 2019-07-26 10:51:10 +0000 master-replica-0 390/1000 [==========>...................] - ETA: 21:40 - rpn_cls: 3.3454 - rpn_regr: 0.1562 - detector_cls: 1.0193 - detector_regr: 0.4735
INFO 2019-07-26 10:51:11 +0000 master-replica-0 391/1000 [==========>...................] - ETA: 21:36 - rpn_cls: 3.3369 - rpn_regr: 0.1561 - detector_cls: 1.0189 - detector_regr: 0.4736
INFO 2019-07-26 10:51:12 +0000 master-replica-0 392/1000 [==========>...................] - ETA: 21:33 - rpn_cls: 3.3295 - rpn_regr: 0.1559 - detector_cls: 1.0195 - detector_regr: 0.4734
INFO 2019-07-26 10:51:13 +0000 master-replica-0 393/1000 [==========>...................] - ETA: 21:30 - rpn_cls: 3.3320 - rpn_regr: 0.1559 - detector_cls: 1.0184 - detector_regr: 0.4733
INFO 2019-07-26 10:51:15 +0000 master-replica-0 394/1000 [==========>...................] - ETA: 21:26 - rpn_cls: 3.3266 - rpn_regr: 0.1556 - detector_cls: 1.0192 - detector_regr: 0.4732
INFO 2019-07-26 10:51:16 +0000 master-replica-0 395/1000 [==========>...................] - ETA: 21:23 - rpn_cls: 3.3199 - rpn_regr: 0.1554 - detector_cls: 1.0209 - detector_regr: 0.4732
INFO 2019-07-26 10:51:18 +0000 master-replica-0 396/1000 [==========>...................] - ETA: 21:20 - rpn_cls: 3.3130 - rpn_regr: 0.1551 - detector_cls: 1.0213 - detector_regr: 0.4730
INFO 2019-07-26 10:51:19 +0000 master-replica-0 397/1000 [==========>...................] - ETA: 21:16 - rpn_cls: 3.3093 - rpn_regr: 0.1548 - detector_cls: 1.0221 - detector_regr: 0.4730
INFO 2019-07-26 10:51:20 +0000 master-replica-0 398/1000 [==========>...................] - ETA: 21:13 - rpn_cls: 3.3041 - rpn_regr: 0.1546 - detector_cls: 1.0223 - detector_regr: 0.4727
INFO 2019-07-26 10:51:22 +0000 master-replica-0 399/1000 [==========>...................] - ETA: 21:10 - rpn_cls: 3.2958 - rpn_regr: 0.1545 - detector_cls: 1.0220 - detector_regr: 0.4730
INFO 2019-07-26 10:51:23 +0000 master-replica-0 400/1000 [===========>..................] - ETA: 21:06 - rpn_cls: 3.2913 - rpn_regr: 0.1544 - detector_cls: 1.0217 - detector_regr: 0.4724
INFO 2019-07-26 10:51:24 +0000 master-replica-0 401/1000 [===========>..................] - ETA: 21:03 - rpn_cls: 3.2850 - rpn_regr: 0.1541 - detector_cls: 1.0216 - detector_regr: 0.4723
INFO 2019-07-26 10:51:26 +0000 master-replica-0 402/1000 [===========>..................] - ETA: 21:00 - rpn_cls: 3.2808 - rpn_regr: 0.1539 - detector_cls: 1.0220 - detector_regr: 0.4723
INFO 2019-07-26 10:51:27 +0000 master-replica-0 403/1000 [===========>..................] - ETA: 20:57 - rpn_cls: 3.2727 - rpn_regr: 0.1539 - detector_cls: 1.0215 - detector_regr: 0.4723
INFO 2019-07-26 10:51:28 +0000 master-replica-0 404/1000 [===========>..................] - ETA: 20:54 - rpn_cls: 3.2666 - rpn_regr: 0.1536 - detector_cls: 1.0224 - detector_regr: 0.4718
INFO 2019-07-26 10:51:31 +0000 master-replica-0 405/1000 [===========>..................] - ETA: 20:50 - rpn_cls: 3.2602 - rpn_regr: 0.1534 - detector_cls: 1.0228 - detector_regr: 0.4718
INFO 2019-07-26 10:51:32 +0000 master-replica-0 406/1000 [===========>..................] - ETA: 20:48 - rpn_cls: 3.2759 - rpn_regr: 0.1537 - detector_cls: 1.0221 - detector_regr: 0.4711
INFO 2019-07-26 10:51:34 +0000 master-replica-0 407/1000 [===========>..................] - ETA: 20:46 - rpn_cls: 3.2689 - rpn_regr: 0.1537 - detector_cls: 1.0210 - detector_regr: 0.4712
INFO 2019-07-26 10:51:36 +0000 master-replica-0 408/1000 [===========>..................] - ETA: 20:42 - rpn_cls: 3.2609 - rpn_regr: 0.1535 - detector_cls: 1.0204 - detector_regr: 0.4711
INFO 2019-07-26 10:51:37 +0000 master-replica-0 409/1000 [===========>..................] - ETA: 20:40 - rpn_cls: 3.2738 - rpn_regr: 0.1541 - detector_cls: 1.0191 - detector_regr: 0.4700
INFO 2019-07-26 10:51:38 +0000 master-replica-0 410/1000 [===========>..................] - ETA: 20:37 - rpn_cls: 3.2677 - rpn_regr: 0.1539 - detector_cls: 1.0200 - detector_regr: 0.4696
INFO 2019-07-26 10:51:40 +0000 master-replica-0 411/1000 [===========>..................] - ETA: 20:34 - rpn_cls: 3.2598 - rpn_regr: 0.1538 - detector_cls: 1.0199 - detector_regr: 0.4698
INFO 2019-07-26 10:51:41 +0000 master-replica-0 412/1000 [===========>..................] - ETA: 20:31 - rpn_cls: 3.2519 - rpn_regr: 0.1538 - detector_cls: 1.0202 - detector_regr: 0.4699
INFO 2019-07-26 10:51:42 +0000 master-replica-0 413/1000 [===========>..................] - ETA: 20:27 - rpn_cls: 3.2441 - rpn_regr: 0.1539 - detector_cls: 1.0196 - detector_regr: 0.4701
INFO 2019-07-26 10:51:44 +0000 master-replica-0 414/1000 [===========>..................] - ETA: 20:24 - rpn_cls: 3.2390 - rpn_regr: 0.1539 - detector_cls: 1.0213 - detector_regr: 0.4702
INFO 2019-07-26 10:51:45 +0000 master-replica-0 415/1000 [===========>..................] - ETA: 20:21 - rpn_cls: 3.2318 - rpn_regr: 0.1537 - detector_cls: 1.0225 - detector_regr: 0.4701
INFO 2019-07-26 10:51:46 +0000 master-replica-0 416/1000 [===========>..................] - ETA: 20:18 - rpn_cls: 3.2240 - rpn_regr: 0.1534 - detector_cls: 1.0224 - detector_regr: 0.4704
INFO 2019-07-26 10:51:48 +0000 master-replica-0 417/1000 [===========>..................] - ETA: 20:15 - rpn_cls: 3.2302 - rpn_regr: 0.1535 - detector_cls: 1.0235 - detector_regr: 0.4706
INFO 2019-07-26 10:51:49 +0000 master-replica-0 418/1000 [===========>..................] - ETA: 20:12 - rpn_cls: 3.2323 - rpn_regr: 0.1533 - detector_cls: 1.0229 - detector_regr: 0.4710
INFO 2019-07-26 10:51:50 +0000 master-replica-0 419/1000 [===========>..................] - ETA: 20:09 - rpn_cls: 3.2264 - rpn_regr: 0.1530 - detector_cls: 1.0234 - detector_regr: 0.4710
INFO 2019-07-26 10:51:51 +0000 master-replica-0 420/1000 [===========>..................] - ETA: 20:05 - rpn_cls: 3.2187 - rpn_regr: 0.1531 - detector_cls: 1.0237 - detector_regr: 0.4710
INFO 2019-07-26 10:51:53 +0000 master-replica-0 421/1000 [===========>..................] - ETA: 20:02 - rpn_cls: 3.2110 - rpn_regr: 0.1532 - detector_cls: 1.0235 - detector_regr: 0.4707
INFO 2019-07-26 10:51:55 +0000 master-replica-0 422/1000 [===========>..................] - ETA: 19:59 - rpn_cls: 3.2034 - rpn_regr: 0.1531 - detector_cls: 1.0230 - detector_regr: 0.4707
INFO 2019-07-26 10:51:56 +0000 master-replica-0 423/1000 [===========>..................] - ETA: 19:57 - rpn_cls: 3.1958 - rpn_regr: 0.1529 - detector_cls: 1.0230 - detector_regr: 0.4707
INFO 2019-07-26 10:51:58 +0000 master-replica-0 424/1000 [===========>..................] - ETA: 19:54 - rpn_cls: 3.1901 - rpn_regr: 0.1526 - detector_cls: 1.0229 - detector_regr: 0.4709
INFO 2019-07-26 10:51:59 +0000 master-replica-0 425/1000 [===========>..................] - ETA: 19:51 - rpn_cls: 3.1826 - rpn_regr: 0.1526 - detector_cls: 1.0231 - detector_regr: 0.4709
INFO 2019-07-26 10:52:00 +0000 master-replica-0 426/1000 [===========>..................] - ETA: 19:48 - rpn_cls: 3.1752 - rpn_regr: 0.1529 - detector_cls: 1.0231 - detector_regr: 0.4710
INFO 2019-07-26 10:52:02 +0000 master-replica-0 427/1000 [===========>..................] - ETA: 19:45 - rpn_cls: 3.1700 - rpn_regr: 0.1527 - detector_cls: 1.0232 - detector_regr: 0.4711
INFO 2019-07-26 10:52:03 +0000 master-replica-0 428/1000 [===========>..................] - ETA: 19:42 - rpn_cls: 3.1626 - rpn_regr: 0.1526 - detector_cls: 1.0230 - detector_regr: 0.4711
INFO 2019-07-26 10:52:05 +0000 master-replica-0 429/1000 [===========>..................] - ETA: 19:39 - rpn_cls: 3.1647 - rpn_regr: 0.1530 - detector_cls: 1.0236 - detector_regr: 0.4711
INFO 2019-07-26 10:52:06 +0000 master-replica-0 430/1000 [===========>..................] - ETA: 19:36 - rpn_cls: 3.1595 - rpn_regr: 0.1530 - detector_cls: 1.0241 - detector_regr: 0.4711
INFO 2019-07-26 10:52:07 +0000 master-replica-0 431/1000 [===========>..................] - ETA: 19:33 - rpn_cls: 3.1534 - rpn_regr: 0.1529 - detector_cls: 1.0236 - detector_regr: 0.4708
INFO 2019-07-26 10:52:09 +0000 master-replica-0 432/1000 [===========>..................] - ETA: 19:30 - rpn_cls: 3.1560 - rpn_regr: 0.1528 - detector_cls: 1.0230 - detector_regr: 0.4709
INFO 2019-07-26 10:52:10 +0000 master-replica-0 433/1000 [===========>..................] - ETA: 19:27 - rpn_cls: 3.1685 - rpn_regr: 0.1530 - detector_cls: 1.0219 - detector_regr: 0.4717
INFO 2019-07-26 10:52:11 +0000 master-replica-0 434/1000 [============>.................] - ETA: 19:24 - rpn_cls: 3.1636 - rpn_regr: 0.1528 - detector_cls: 1.0217 - detector_regr: 0.4717
INFO 2019-07-26 10:52:12 +0000 master-replica-0 435/1000 [============>.................] - ETA: 19:21 - rpn_cls: 3.1596 - rpn_regr: 0.1526 - detector_cls: 1.0213 - detector_regr: 0.4716
INFO 2019-07-26 10:52:14 +0000 master-replica-0 436/1000 [============>.................] - ETA: 19:18 - rpn_cls: 3.1732 - rpn_regr: 0.1525 - detector_cls: 1.0206 - detector_regr: 0.4716
INFO 2019-07-26 10:52:15 +0000 master-replica-0 437/1000 [============>.................] - ETA: 19:15 - rpn_cls: 3.1690 - rpn_regr: 0.1522 - detector_cls: 1.0209 - detector_regr: 0.4713
INFO 2019-07-26 10:52:18 +0000 master-replica-0 439/1000 [============>.................] - ETA: 19:09 - rpn_cls: 3.1577 - rpn_regr: 0.1518 - detector_cls: 1.0208 - detector_regr: 0.4708
INFO 2019-07-26 10:52:19 +0000 master-replica-0 440/1000 [============>.................] - ETA: 19:06 - rpn_cls: 3.1539 - rpn_regr: 0.1517 - detector_cls: 1.0201 - detector_regr: 0.4708
INFO 2019-07-26 10:52:21 +0000 master-replica-0 441/1000 [============>.................] - ETA: 19:03 - rpn_cls: 3.1469 - rpn_regr: 0.1515 - detector_cls: 1.0211 - detector_regr: 0.4705
INFO 2019-07-26 10:52:22 +0000 master-replica-0 442/1000 [============>.................] - ETA: 19:01 - rpn_cls: 3.1436 - rpn_regr: 0.1512 - detector_cls: 1.0204 - detector_regr: 0.4704
INFO 2019-07-26 10:52:24 +0000 master-replica-0 443/1000 [============>.................] - ETA: 18:58 - rpn_cls: 3.1377 - rpn_regr: 0.1513 - detector_cls: 1.0207 - detector_regr: 0.4703
INFO 2019-07-26 10:52:25 +0000 master-replica-0 444/1000 [============>.................] - ETA: 18:55 - rpn_cls: 3.1320 - rpn_regr: 0.1512 - detector_cls: 1.0199 - detector_regr: 0.4703
INFO 2019-07-26 10:52:26 +0000 master-replica-0 445/1000 [============>.................] - ETA: 18:52 - rpn_cls: 3.1279 - rpn_regr: 0.1510 - detector_cls: 1.0212 - detector_regr: 0.4702
INFO 2019-07-26 10:52:28 +0000 master-replica-0 446/1000 [============>.................] - ETA: 18:49 - rpn_cls: 3.1348 - rpn_regr: 0.1509 - detector_cls: 1.0211 - detector_regr: 0.4706
INFO 2019-07-26 10:52:30 +0000 master-replica-0 447/1000 [============>.................] - ETA: 18:47 - rpn_cls: 3.1278 - rpn_regr: 0.1508 - detector_cls: 1.0200 - detector_regr: 0.4695
INFO 2019-07-26 10:52:31 +0000 master-replica-0 448/1000 [============>.................] - ETA: 18:44 - rpn_cls: 3.1333 - rpn_regr: 0.1508 - detector_cls: 1.0196 - detector_regr: 0.4698
INFO 2019-07-26 10:52:32 +0000 master-replica-0 449/1000 [============>.................] - ETA: 18:41 - rpn_cls: 3.1268 - rpn_regr: 0.1506 - detector_cls: 1.0198 - detector_regr: 0.4695
INFO 2019-07-26 10:52:33 +0000 master-replica-0 450/1000 [============>.................] - ETA: 18:38 - rpn_cls: 3.1199 - rpn_regr: 0.1505 - detector_cls: 1.0191 - detector_regr: 0.4695
INFO 2019-07-26 10:52:35 +0000 master-replica-0 451/1000 [============>.................] - ETA: 18:35 - rpn_cls: 3.1145 - rpn_regr: 0.1502 - detector_cls: 1.0191 - detector_regr: 0.4692
INFO 2019-07-26 10:52:37 +0000 master-replica-0 452/1000 [============>.................] - ETA: 18:32 - rpn_cls: 3.1076 - rpn_regr: 0.1503 - detector_cls: 1.0182 - detector_regr: 0.4695
INFO 2019-07-26 10:52:40 +0000 master-replica-0 453/1000 [============>.................] - ETA: 18:30 - rpn_cls: 3.1084 - rpn_regr: 0.1502 - detector_cls: 1.0164 - detector_regr: 0.4685
INFO 2019-07-26 10:52:42 +0000 master-replica-0 454/1000 [============>.................] - ETA: 18:29 - rpn_cls: 3.1016 - rpn_regr: 0.1501 - detector_cls: 1.0161 - detector_regr: 0.4687
INFO 2019-07-26 10:52:43 +0000 master-replica-0 455/1000 [============>.................] - ETA: 18:27 - rpn_cls: 3.1017 - rpn_regr: 0.1502 - detector_cls: 1.0142 - detector_regr: 0.4677
INFO 2019-07-26 10:52:44 +0000 master-replica-0 456/1000 [============>.................] - ETA: 18:25 - rpn_cls: 3.0965 - rpn_regr: 0.1500 - detector_cls: 1.0135 - detector_regr: 0.4677
INFO 2019-07-26 10:52:46 +0000 master-replica-0 457/1000 [============>.................] - ETA: 18:22 - rpn_cls: 3.0917 - rpn_regr: 0.1498 - detector_cls: 1.0127 - detector_regr: 0.4673
INFO 2019-07-26 10:52:47 +0000 master-replica-0 458/1000 [============>.................] - ETA: 18:19 - rpn_cls: 3.0877 - rpn_regr: 0.1499 - detector_cls: 1.0126 - detector_regr: 0.4671
INFO 2019-07-26 10:52:50 +0000 master-replica-0 459/1000 [============>.................] - ETA: 18:16 - rpn_cls: 3.0810 - rpn_regr: 0.1501 - detector_cls: 1.0136 - detector_regr: 0.4671
INFO 2019-07-26 10:52:52 +0000 master-replica-0 460/1000 [============>.................] - ETA: 18:15 - rpn_cls: 3.0942 - rpn_regr: 0.1506 - detector_cls: 1.0119 - detector_regr: 0.4661
INFO 2019-07-26 10:52:53 +0000 master-replica-0 461/1000 [============>.................] - ETA: 18:12 - rpn_cls: 3.0875 - rpn_regr: 0.1511 - detector_cls: 1.0101 - detector_regr: 0.4651
INFO 2019-07-26 10:52:55 +0000 master-replica-0 462/1000 [============>.................] - ETA: 18:10 - rpn_cls: 3.0847 - rpn_regr: 0.1510 - detector_cls: 1.0109 - detector_regr: 0.4651
INFO 2019-07-26 10:52:56 +0000 master-replica-0 463/1000 [============>.................] - ETA: 18:07 - rpn_cls: 3.0781 - rpn_regr: 0.1508 - detector_cls: 1.0106 - detector_regr: 0.4650
INFO 2019-07-26 10:52:58 +0000 master-replica-0 464/1000 [============>.................] - ETA: 18:05 - rpn_cls: 3.0751 - rpn_regr: 0.1508 - detector_cls: 1.0113 - detector_regr: 0.4651
INFO 2019-07-26 10:53:00 +0000 master-replica-0 465/1000 [============>.................] - ETA: 18:02 - rpn_cls: 3.0684 - rpn_regr: 0.1510 - detector_cls: 1.0107 - detector_regr: 0.4651
INFO 2019-07-26 10:53:01 +0000 master-replica-0 466/1000 [============>.................] - ETA: 18:00 - rpn_cls: 3.0838 - rpn_regr: 0.1510 - detector_cls: 1.0098 - detector_regr: 0.4653
INFO 2019-07-26 10:53:02 +0000 master-replica-0 467/1000 [=============>................] - ETA: 17:57 - rpn_cls: 3.0772 - rpn_regr: 0.1511 - detector_cls: 1.0086 - detector_regr: 0.4660
INFO 2019-07-26 10:53:04 +0000 master-replica-0 468/1000 [=============>................] - ETA: 17:54 - rpn_cls: 3.0719 - rpn_regr: 0.1510 - detector_cls: 1.0097 - detector_regr: 0.4660
INFO 2019-07-26 10:53:05 +0000 master-replica-0 469/1000 [=============>................] - ETA: 17:51 - rpn_cls: 3.0740 - rpn_regr: 0.1508 - detector_cls: 1.0092 - detector_regr: 0.4662
INFO 2019-07-26 10:53:06 +0000 master-replica-0 470/1000 [=============>................] - ETA: 17:48 - rpn_cls: 3.0767 - rpn_regr: 0.1509 - detector_cls: 1.0083 - detector_regr: 0.4664
INFO 2019-07-26 10:53:08 +0000 master-replica-0 471/1000 [=============>................] - ETA: 17:46 - rpn_cls: 3.0716 - rpn_regr: 0.1507 - detector_cls: 1.0088 - detector_regr: 0.4662
INFO 2019-07-26 10:53:09 +0000 master-replica-0 472/1000 [=============>................] - ETA: 17:43 - rpn_cls: 3.0687 - rpn_regr: 0.1505 - detector_cls: 1.0094 - detector_regr: 0.4660
INFO 2019-07-26 10:53:11 +0000 master-replica-0 473/1000 [=============>................] - ETA: 17:40 - rpn_cls: 3.0644 - rpn_regr: 0.1504 - detector_cls: 1.0093 - detector_regr: 0.4661
INFO 2019-07-26 10:53:13 +0000 master-replica-0 474/1000 [=============>................] - ETA: 17:38 - rpn_cls: 3.0595 - rpn_regr: 0.1501 - detector_cls: 1.0096 - detector_regr: 0.4659
INFO 2019-07-26 10:53:14 +0000 master-replica-0 475/1000 [=============>................] - ETA: 17:36 - rpn_cls: 3.0732 - rpn_regr: 0.1502 - detector_cls: 1.0085 - detector_regr: 0.4658
INFO 2019-07-26 10:53:16 +0000 master-replica-0 476/1000 [=============>................] - ETA: 17:33 - rpn_cls: 3.0667 - rpn_regr: 0.1501 - detector_cls: 1.0095 - detector_regr: 0.4659
INFO 2019-07-26 10:53:18 +0000 master-replica-0 477/1000 [=============>................] - ETA: 17:31 - rpn_cls: 3.0812 - rpn_regr: 0.1500 - detector_cls: 1.0088 - detector_regr: 0.4661
INFO 2019-07-26 10:53:19 +0000 master-replica-0 478/1000 [=============>................] - ETA: 17:29 - rpn_cls: 3.0762 - rpn_regr: 0.1498 - detector_cls: 1.0092 - detector_regr: 0.4657
INFO 2019-07-26 10:53:20 +0000 master-replica-0 479/1000 [=============>................] - ETA: 17:26 - rpn_cls: 3.0800 - rpn_regr: 0.1498 - detector_cls: 1.0084 - detector_regr: 0.4656
INFO 2019-07-26 10:53:21 +0000 master-replica-0 480/1000 [=============>................] - ETA: 17:23 - rpn_cls: 3.0873 - rpn_regr: 0.1504 - detector_cls: 1.0070 - detector_regr: 0.4646
INFO 2019-07-26 10:53:24 +0000 master-replica-0 481/1000 [=============>................] - ETA: 17:20 - rpn_cls: 3.0809 - rpn_regr: 0.1502 - detector_cls: 1.0067 - detector_regr: 0.4645
INFO 2019-07-26 10:53:26 +0000 master-replica-0 482/1000 [=============>................] - ETA: 17:19 - rpn_cls: 3.0950 - rpn_regr: 0.1506 - detector_cls: 1.0051 - detector_regr: 0.4635
INFO 2019-07-26 10:53:27 +0000 master-replica-0 483/1000 [=============>................] - ETA: 17:16 - rpn_cls: 3.1045 - rpn_regr: 0.1509 - detector_cls: 1.0042 - detector_regr: 0.4637
INFO 2019-07-26 10:53:28 +0000 master-replica-0 484/1000 [=============>................] - ETA: 17:14 - rpn_cls: 3.0981 - rpn_regr: 0.1508 - detector_cls: 1.0045 - detector_regr: 0.4638
INFO 2019-07-26 10:53:30 +0000 master-replica-0 485/1000 [=============>................] - ETA: 17:11 - rpn_cls: 3.1097 - rpn_regr: 0.1505 - detector_cls: 1.0042 - detector_regr: 0.4641
INFO 2019-07-26 10:53:31 +0000 master-replica-0 486/1000 [=============>................] - ETA: 17:08 - rpn_cls: 3.1052 - rpn_regr: 0.1505 - detector_cls: 1.0037 - detector_regr: 0.4638
INFO 2019-07-26 10:53:33 +0000 master-replica-0 487/1000 [=============>................] - ETA: 17:06 - rpn_cls: 3.1012 - rpn_regr: 0.1503 - detector_cls: 1.0041 - detector_regr: 0.4637
INFO 2019-07-26 10:53:34 +0000 master-replica-0 488/1000 [=============>................] - ETA: 17:03 - rpn_cls: 3.0971 - rpn_regr: 0.1502 - detector_cls: 1.0039 - detector_regr: 0.4635
INFO 2019-07-26 10:53:36 +0000 master-replica-0 489/1000 [=============>................] - ETA: 17:01 - rpn_cls: 3.1041 - rpn_regr: 0.1501 - detector_cls: 1.0042 - detector_regr: 0.4636
INFO 2019-07-26 10:53:37 +0000 master-replica-0 490/1000 [=============>................] - ETA: 16:59 - rpn_cls: 3.1005 - rpn_regr: 0.1506 - detector_cls: 1.0036 - detector_regr: 0.4635
INFO 2019-07-26 10:53:39 +0000 master-replica-0 491/1000 [=============>................] - ETA: 16:56 - rpn_cls: 3.0941 - rpn_regr: 0.1504 - detector_cls: 1.0032 - detector_regr: 0.4637
INFO 2019-07-26 10:53:40 +0000 master-replica-0 492/1000 [=============>................] - ETA: 16:53 - rpn_cls: 3.0896 - rpn_regr: 0.1502 - detector_cls: 1.0043 - detector_regr: 0.4638
INFO 2019-07-26 10:53:42 +0000 master-replica-0 493/1000 [=============>................] - ETA: 16:51 - rpn_cls: 3.0834 - rpn_regr: 0.1501 - detector_cls: 1.0037 - detector_regr: 0.4638
INFO 2019-07-26 10:53:43 +0000 master-replica-0 494/1000 [=============>................] - ETA: 16:48 - rpn_cls: 3.0818 - rpn_regr: 0.1500 - detector_cls: 1.0036 - detector_regr: 0.4635
INFO 2019-07-26 10:53:44 +0000 master-replica-0 495/1000 [=============>................] - ETA: 16:45 - rpn_cls: 3.0777 - rpn_regr: 0.1498 - detector_cls: 1.0032 - detector_regr: 0.4634
INFO 2019-07-26 10:53:45 +0000 master-replica-0 496/1000 [=============>................] - ETA: 16:43 - rpn_cls: 3.0736 - rpn_regr: 0.1496 - detector_cls: 1.0031 - detector_regr: 0.4633
INFO 2019-07-26 10:53:47 +0000 master-replica-0 497/1000 [=============>................] - ETA: 16:40 - rpn_cls: 3.0683 - rpn_regr: 0.1493 - detector_cls: 1.0024 - detector_regr: 0.4633
INFO 2019-07-26 10:53:48 +0000 master-replica-0 498/1000 [=============>................] - ETA: 16:37 - rpn_cls: 3.0630 - rpn_regr: 0.1492 - detector_cls: 1.0032 - detector_regr: 0.4632
INFO 2019-07-26 10:53:49 +0000 master-replica-0 499/1000 [=============>................] - ETA: 16:34 - rpn_cls: 3.0583 - rpn_regr: 0.1491 - detector_cls: 1.0031 - detector_regr: 0.4634
INFO 2019-07-26 10:53:51 +0000 master-replica-0 500/1000 [==============>...............] - ETA: 16:32 - rpn_cls: 3.0607 - rpn_regr: 0.1493 - detector_cls: 1.0024 - detector_regr: 0.4634
INFO 2019-07-26 10:53:52 +0000 master-replica-0 501/1000 [==============>...............] - ETA: 16:29 - rpn_cls: 3.0580 - rpn_regr: 0.1495 - detector_cls: 1.0018 - detector_regr: 0.4633
INFO 2019-07-26 10:53:53 +0000 master-replica-0 502/1000 [==============>...............] - ETA: 16:27 - rpn_cls: 3.0585 - rpn_regr: 0.1495 - detector_cls: 1.0013 - detector_regr: 0.4633
INFO 2019-07-26 10:53:55 +0000 master-replica-0 503/1000 [==============>...............] - ETA: 16:24 - rpn_cls: 3.0684 - rpn_regr: 0.1494 - detector_cls: 1.0000 - detector_regr: 0.4630
INFO 2019-07-26 10:53:56 +0000 master-replica-0 504/1000 [==============>...............] - ETA: 16:21 - rpn_cls: 3.0623 - rpn_regr: 0.1492 - detector_cls: 0.9994 - detector_regr: 0.4633
INFO 2019-07-26 10:53:57 +0000 master-replica-0 505/1000 [==============>...............] - ETA: 16:19 - rpn_cls: 3.0563 - rpn_regr: 0.1491 - detector_cls: 0.9997 - detector_regr: 0.4631
INFO 2019-07-26 10:53:59 +0000 master-replica-0 506/1000 [==============>...............] - ETA: 16:16 - rpn_cls: 3.0514 - rpn_regr: 0.1490 - detector_cls: 1.0014 - detector_regr: 0.4633
INFO 2019-07-26 10:54:00 +0000 master-replica-0 507/1000 [==============>...............] - ETA: 16:13 - rpn_cls: 3.0469 - rpn_regr: 0.1488 - detector_cls: 1.0022 - detector_regr: 0.4632
INFO 2019-07-26 10:54:02 +0000 master-replica-0 508/1000 [==============>...............] - ETA: 16:11 - rpn_cls: 3.0444 - rpn_regr: 0.1486 - detector_cls: 1.0027 - detector_regr: 0.4630
INFO 2019-07-26 10:54:03 +0000 master-replica-0 509/1000 [==============>...............] - ETA: 16:09 - rpn_cls: 3.0384 - rpn_regr: 0.1486 - detector_cls: 1.0018 - detector_regr: 0.4627
INFO 2019-07-26 10:54:05 +0000 master-replica-0 510/1000 [==============>...............] - ETA: 16:06 - rpn_cls: 3.0347 - rpn_regr: 0.1484 - detector_cls: 1.0010 - detector_regr: 0.4627
INFO 2019-07-26 10:54:06 +0000 master-replica-0 511/1000 [==============>...............] - ETA: 16:04 - rpn_cls: 3.0306 - rpn_regr: 0.1482 - detector_cls: 1.0012 - detector_regr: 0.4626
INFO 2019-07-26 10:54:08 +0000 master-replica-0 512/1000 [==============>...............] - ETA: 16:01 - rpn_cls: 3.0263 - rpn_regr: 0.1481 - detector_cls: 1.0008 - detector_regr: 0.4629
INFO 2019-07-26 10:54:09 +0000 master-replica-0 513/1000 [==============>...............] - ETA: 15:59 - rpn_cls: 3.0254 - rpn_regr: 0.1479 - detector_cls: 1.0016 - detector_regr: 0.4625
INFO 2019-07-26 10:54:10 +0000 master-replica-0 514/1000 [==============>...............] - ETA: 15:56 - rpn_cls: 3.0195 - rpn_regr: 0.1479 - detector_cls: 1.0017 - detector_regr: 0.4623
INFO 2019-07-26 10:54:12 +0000 master-replica-0 515/1000 [==============>...............] - ETA: 15:54 - rpn_cls: 3.0154 - rpn_regr: 0.1478 - detector_cls: 1.0018 - detector_regr: 0.4624
INFO 2019-07-26 10:54:13 +0000 master-replica-0 516/1000 [==============>...............] - ETA: 15:51 - rpn_cls: 3.0105 - rpn_regr: 0.1476 - detector_cls: 1.0021 - detector_regr: 0.4623
INFO 2019-07-26 10:54:14 +0000 master-replica-0 517/1000 [==============>...............] - ETA: 15:49 - rpn_cls: 3.0055 - rpn_regr: 0.1473 - detector_cls: 1.0021 - detector_regr: 0.4620
INFO 2019-07-26 10:54:16 +0000 master-replica-0 518/1000 [==============>...............] - ETA: 15:46 - rpn_cls: 3.0019 - rpn_regr: 0.1473 - detector_cls: 1.0025 - detector_regr: 0.4619
INFO 2019-07-26 10:54:17 +0000 master-replica-0 519/1000 [==============>...............] - ETA: 15:44 - rpn_cls: 3.0074 - rpn_regr: 0.1474 - detector_cls: 1.0031 - detector_regr: 0.4620
INFO 2019-07-26 10:54:18 +0000 master-replica-0 520/1000 [==============>...............] - ETA: 15:41 - rpn_cls: 3.0099 - rpn_regr: 0.1474 - detector_cls: 1.0021 - detector_regr: 0.4621
INFO 2019-07-26 10:54:19 +0000 master-replica-0 521/1000 [==============>...............] - ETA: 15:39 - rpn_cls: 3.0068 - rpn_regr: 0.1471 - detector_cls: 1.0016 - detector_regr: 0.4621
INFO 2019-07-26 10:54:21 +0000 master-replica-0 522/1000 [==============>...............] - ETA: 15:36 - rpn_cls: 3.0035 - rpn_regr: 0.1470 - detector_cls: 1.0016 - detector_regr: 0.4621
INFO 2019-07-26 10:54:23 +0000 master-replica-0 523/1000 [==============>...............] - ETA: 15:33 - rpn_cls: 2.9978 - rpn_regr: 0.1468 - detector_cls: 1.0009 - detector_regr: 0.4617
INFO 2019-07-26 10:54:24 +0000 master-replica-0 524/1000 [==============>...............] - ETA: 15:31 - rpn_cls: 3.0101 - rpn_regr: 0.1468 - detector_cls: 0.9999 - detector_regr: 0.4619
INFO 2019-07-26 10:54:25 +0000 master-replica-0 525/1000 [==============>...............] - ETA: 15:29 - rpn_cls: 3.0044 - rpn_regr: 0.1467 - detector_cls: 1.0001 - detector_regr: 0.4620
INFO 2019-07-26 10:54:27 +0000 master-replica-0 526/1000 [==============>...............] - ETA: 15:26 - rpn_cls: 3.0008 - rpn_regr: 0.1465 - detector_cls: 1.0006 - detector_regr: 0.4619
INFO 2019-07-26 10:54:28 +0000 master-replica-0 527/1000 [==============>...............] - ETA: 15:24 - rpn_cls: 2.9966 - rpn_regr: 0.1464 - detector_cls: 1.0009 - detector_regr: 0.4617
INFO 2019-07-26 10:54:30 +0000 master-replica-0 528/1000 [==============>...............] - ETA: 15:21 - rpn_cls: 2.9909 - rpn_regr: 0.1466 - detector_cls: 0.9997 - detector_regr: 0.4608
INFO 2019-07-26 10:54:31 +0000 master-replica-0 529/1000 [==============>...............] - ETA: 15:19 - rpn_cls: 2.9852 - rpn_regr: 0.1465 - detector_cls: 0.9992 - detector_regr: 0.4607
INFO 2019-07-26 10:54:32 +0000 master-replica-0 530/1000 [==============>...............] - ETA: 15:16 - rpn_cls: 2.9822 - rpn_regr: 0.1463 - detector_cls: 0.9995 - detector_regr: 0.4606
INFO 2019-07-26 10:54:33 +0000 master-replica-0 531/1000 [==============>...............] - ETA: 15:14 - rpn_cls: 2.9765 - rpn_regr: 0.1462 - detector_cls: 0.9993 - detector_regr: 0.4605
INFO 2019-07-26 10:54:35 +0000 master-replica-0 532/1000 [==============>...............] - ETA: 15:11 - rpn_cls: 2.9713 - rpn_regr: 0.1461 - detector_cls: 0.9988 - detector_regr: 0.4605
INFO 2019-07-26 10:54:36 +0000 master-replica-0 533/1000 [==============>...............] - ETA: 15:09 - rpn_cls: 2.9825 - rpn_regr: 0.1462 - detector_cls: 0.9975 - detector_regr: 0.4597
INFO 2019-07-26 10:54:38 +0000 master-replica-0 534/1000 [===============>..............] - ETA: 15:06 - rpn_cls: 2.9789 - rpn_regr: 0.1461 - detector_cls: 0.9973 - detector_regr: 0.4595
INFO 2019-07-26 10:54:39 +0000 master-replica-0 535/1000 [===============>..............] - ETA: 15:04 - rpn_cls: 2.9734 - rpn_regr: 0.1462 - detector_cls: 0.9967 - detector_regr: 0.4596
INFO 2019-07-26 10:54:41 +0000 master-replica-0 536/1000 [===============>..............] - ETA: 15:02 - rpn_cls: 2.9693 - rpn_regr: 0.1460 - detector_cls: 0.9973 - detector_regr: 0.4595
INFO 2019-07-26 10:54:42 +0000 master-replica-0 537/1000 [===============>..............] - ETA: 15:00 - rpn_cls: 2.9754 - rpn_regr: 0.1462 - detector_cls: 0.9967 - detector_regr: 0.4595
INFO 2019-07-26 10:54:44 +0000 master-replica-0 538/1000 [===============>..............] - ETA: 14:57 - rpn_cls: 2.9802 - rpn_regr: 0.1462 - detector_cls: 0.9966 - detector_regr: 0.4599
INFO 2019-07-26 10:54:45 +0000 master-replica-0 539/1000 [===============>..............] - ETA: 14:55 - rpn_cls: 2.9769 - rpn_regr: 0.1461 - detector_cls: 0.9976 - detector_regr: 0.4596
INFO 2019-07-26 10:54:46 +0000 master-replica-0 540/1000 [===============>..............] - ETA: 14:52 - rpn_cls: 2.9727 - rpn_regr: 0.1459 - detector_cls: 0.9980 - detector_regr: 0.4595
INFO 2019-07-26 10:54:47 +0000 master-replica-0 541/1000 [===============>..............] - ETA: 14:50 - rpn_cls: 2.9672 - rpn_regr: 0.1460 - detector_cls: 0.9976 - detector_regr: 0.4594
INFO 2019-07-26 10:54:49 +0000 master-replica-0 542/1000 [===============>..............] - ETA: 14:47 - rpn_cls: 2.9696 - rpn_regr: 0.1461 - detector_cls: 0.9963 - detector_regr: 0.4586
INFO 2019-07-26 10:54:50 +0000 master-replica-0 543/1000 [===============>..............] - ETA: 14:45 - rpn_cls: 2.9678 - rpn_regr: 0.1461 - detector_cls: 0.9967 - detector_regr: 0.4588
INFO 2019-07-26 10:54:51 +0000 master-replica-0 544/1000 [===============>..............] - ETA: 14:42 - rpn_cls: 2.9624 - rpn_regr: 0.1460 - detector_cls: 0.9974 - detector_regr: 0.4587
INFO 2019-07-26 10:54:53 +0000 master-replica-0 545/1000 [===============>..............] - ETA: 14:40 - rpn_cls: 2.9694 - rpn_regr: 0.1482 - detector_cls: 0.9961 - detector_regr: 0.4579
INFO 2019-07-26 10:54:54 +0000 master-replica-0 546/1000 [===============>..............] - ETA: 14:37 - rpn_cls: 2.9772 - rpn_regr: 0.1484 - detector_cls: 0.9958 - detector_regr: 0.4579
INFO 2019-07-26 10:54:55 +0000 master-replica-0 547/1000 [===============>..............] - ETA: 14:35 - rpn_cls: 2.9752 - rpn_regr: 0.1483 - detector_cls: 0.9960 - detector_regr: 0.4577
INFO 2019-07-26 10:54:57 +0000 master-replica-0 548/1000 [===============>..............] - ETA: 14:32 - rpn_cls: 2.9781 - rpn_regr: 0.1484 - detector_cls: 0.9958 - detector_regr: 0.4580
INFO 2019-07-26 10:54:58 +0000 master-replica-0 549/1000 [===============>..............] - ETA: 14:30 - rpn_cls: 2.9847 - rpn_regr: 0.1483 - detector_cls: 0.9964 - detector_regr: 0.4581
INFO 2019-07-26 10:55:00 +0000 master-replica-0 550/1000 [===============>..............] - ETA: 14:27 - rpn_cls: 2.9875 - rpn_regr: 0.1483 - detector_cls: 0.9963 - detector_regr: 0.4584
INFO 2019-07-26 10:55:01 +0000 master-replica-0 551/1000 [===============>..............] - ETA: 14:25 - rpn_cls: 2.9976 - rpn_regr: 0.1482 - detector_cls: 0.9955 - detector_regr: 0.4575
INFO 2019-07-26 10:55:02 +0000 master-replica-0 552/1000 [===============>..............] - ETA: 14:23 - rpn_cls: 2.9944 - rpn_regr: 0.1482 - detector_cls: 0.9953 - detector_regr: 0.4574
INFO 2019-07-26 10:55:05 +0000 master-replica-0 553/1000 [===============>..............] - ETA: 14:21 - rpn_cls: 2.9941 - rpn_regr: 0.1483 - detector_cls: 0.9958 - detector_regr: 0.4573
INFO 2019-07-26 10:55:06 +0000 master-replica-0 554/1000 [===============>..............] - ETA: 14:19 - rpn_cls: 2.9949 - rpn_regr: 0.1485 - detector_cls: 0.9954 - detector_regr: 0.4573
INFO 2019-07-26 10:55:07 +0000 master-replica-0 555/1000 [===============>..............] - ETA: 14:17 - rpn_cls: 2.9923 - rpn_regr: 0.1486 - detector_cls: 0.9966 - detector_regr: 0.4574
INFO 2019-07-26 10:55:08 +0000 master-replica-0 556/1000 [===============>..............] - ETA: 14:14 - rpn_cls: 2.9878 - rpn_regr: 0.1484 - detector_cls: 0.9974 - detector_regr: 0.4573
INFO 2019-07-26 10:55:10 +0000 master-replica-0 557/1000 [===============>..............] - ETA: 14:12 - rpn_cls: 2.9824 - rpn_regr: 0.1484 - detector_cls: 0.9973 - detector_regr: 0.4575
INFO 2019-07-26 10:55:12 +0000 master-replica-0 558/1000 [===============>..............] - ETA: 14:10 - rpn_cls: 2.9942 - rpn_regr: 0.1487 - detector_cls: 0.9965 - detector_regr: 0.4571
INFO 2019-07-26 10:55:13 +0000 master-replica-0 559/1000 [===============>..............] - ETA: 14:07 - rpn_cls: 2.9889 - rpn_regr: 0.1488 - detector_cls: 0.9967 - detector_regr: 0.4571
INFO 2019-07-26 10:55:14 +0000 master-replica-0 560/1000 [===============>..............] - ETA: 14:05 - rpn_cls: 2.9867 - rpn_regr: 0.1487 - detector_cls: 0.9986 - detector_regr: 0.4570
INFO 2019-07-26 10:55:16 +0000 master-replica-0 561/1000 [===============>..............] - ETA: 14:03 - rpn_cls: 2.9959 - rpn_regr: 0.1485 - detector_cls: 0.9977 - detector_regr: 0.4574
INFO 2019-07-26 10:55:17 +0000 master-replica-0 562/1000 [===============>..............] - ETA: 14:00 - rpn_cls: 3.0050 - rpn_regr: 0.1486 - detector_cls: 0.9964 - detector_regr: 0.4565
INFO 2019-07-26 10:55:19 +0000 master-replica-0 563/1000 [===============>..............] - ETA: 13:58 - rpn_cls: 3.0023 - rpn_regr: 0.1487 - detector_cls: 0.9965 - detector_regr: 0.4564
INFO 2019-07-26 10:55:20 +0000 master-replica-0 564/1000 [===============>..............] - ETA: 13:56 - rpn_cls: 2.9986 - rpn_regr: 0.1485 - detector_cls: 0.9964 - detector_regr: 0.4563
INFO 2019-07-26 10:55:21 +0000 master-replica-0 565/1000 [===============>..............] - ETA: 13:53 - rpn_cls: 2.9935 - rpn_regr: 0.1485 - detector_cls: 0.9965 - detector_regr: 0.4561
INFO 2019-07-26 10:55:23 +0000 master-replica-0 566/1000 [===============>..............] - ETA: 13:51 - rpn_cls: 2.9883 - rpn_regr: 0.1483 - detector_cls: 0.9982 - detector_regr: 0.4563
INFO 2019-07-26 10:55:24 +0000 master-replica-0 567/1000 [================>.............] - ETA: 13:48 - rpn_cls: 2.9959 - rpn_regr: 0.1481 - detector_cls: 0.9976 - detector_regr: 0.4567
INFO 2019-07-26 10:55:25 +0000 master-replica-0 568/1000 [================>.............] - ETA: 13:46 - rpn_cls: 3.0062 - rpn_regr: 0.1483 - detector_cls: 0.9973 - detector_regr: 0.4568
INFO 2019-07-26 10:55:27 +0000 master-replica-0 569/1000 [================>.............] - ETA: 13:44 - rpn_cls: 3.0087 - rpn_regr: 0.1484 - detector_cls: 0.9972 - detector_regr: 0.4572
INFO 2019-07-26 10:55:28 +0000 master-replica-0 570/1000 [================>.............] - ETA: 13:41 - rpn_cls: 3.0082 - rpn_regr: 0.1483 - detector_cls: 0.9971 - detector_regr: 0.4572
INFO 2019-07-26 10:55:29 +0000 master-replica-0 571/1000 [================>.............] - ETA: 13:39 - rpn_cls: 3.0065 - rpn_regr: 0.1483 - detector_cls: 0.9976 - detector_regr: 0.4572
INFO 2019-07-26 10:55:31 +0000 master-replica-0 572/1000 [================>.............] - ETA: 13:37 - rpn_cls: 3.0039 - rpn_regr: 0.1482 - detector_cls: 0.9979 - detector_regr: 0.4574
INFO 2019-07-26 10:55:32 +0000 master-replica-0 573/1000 [================>.............] - ETA: 13:34 - rpn_cls: 2.9987 - rpn_regr: 0.1481 - detector_cls: 0.9977 - detector_regr: 0.4576
INFO 2019-07-26 10:55:33 +0000 master-replica-0 574/1000 [================>.............] - ETA: 13:32 - rpn_cls: 3.0025 - rpn_regr: 0.1481 - detector_cls: 0.9972 - detector_regr: 0.4575
INFO 2019-07-26 10:55:35 +0000 master-replica-0 575/1000 [================>.............] - ETA: 13:30 - rpn_cls: 3.0008 - rpn_regr: 0.1485 - detector_cls: 0.9976 - detector_regr: 0.4576
INFO 2019-07-26 10:55:36 +0000 master-replica-0 576/1000 [================>.............] - ETA: 13:28 - rpn_cls: 3.0005 - rpn_regr: 0.1483 - detector_cls: 0.9985 - detector_regr: 0.4577
INFO 2019-07-26 10:55:38 +0000 master-replica-0 577/1000 [================>.............] - ETA: 13:25 - rpn_cls: 3.0002 - rpn_regr: 0.1485 - detector_cls: 0.9989 - detector_regr: 0.4579
INFO 2019-07-26 10:55:40 +0000 master-replica-0 578/1000 [================>.............] - ETA: 13:23 - rpn_cls: 2.9950 - rpn_regr: 0.1486 - detector_cls: 0.9986 - detector_regr: 0.4581
INFO 2019-07-26 10:55:41 +0000 master-replica-0 579/1000 [================>.............] - ETA: 13:21 - rpn_cls: 2.9904 - rpn_regr: 0.1484 - detector_cls: 0.9990 - detector_regr: 0.4581
INFO 2019-07-26 10:55:42 +0000 master-replica-0 580/1000 [================>.............] - ETA: 13:19 - rpn_cls: 2.9866 - rpn_regr: 0.1483 - detector_cls: 0.9987 - detector_regr: 0.4580
INFO 2019-07-26 10:55:43 +0000 master-replica-0 581/1000 [================>.............] - ETA: 13:16 - rpn_cls: 2.9857 - rpn_regr: 0.1482 - detector_cls: 0.9991 - detector_regr: 0.4579
INFO 2019-07-26 10:55:44 +0000 master-replica-0 582/1000 [================>.............] - ETA: 13:14 - rpn_cls: 2.9806 - rpn_regr: 0.1482 - detector_cls: 0.9995 - detector_regr: 0.4579
INFO 2019-07-26 10:55:46 +0000 master-replica-0 583/1000 [================>.............] - ETA: 13:12 - rpn_cls: 2.9755 - rpn_regr: 0.1482 - detector_cls: 0.9991 - detector_regr: 0.4579
INFO 2019-07-26 10:55:48 +0000 master-replica-0 584/1000 [================>.............] - ETA: 13:09 - rpn_cls: 2.9704 - rpn_regr: 0.1483 - detector_cls: 0.9986 - detector_regr: 0.4580
INFO 2019-07-26 10:55:49 +0000 master-replica-0 585/1000 [================>.............] - ETA: 13:07 - rpn_cls: 2.9799 - rpn_regr: 0.1481 - detector_cls: 0.9978 - detector_regr: 0.4583
INFO 2019-07-26 10:55:51 +0000 master-replica-0 586/1000 [================>.............] - ETA: 13:05 - rpn_cls: 2.9770 - rpn_regr: 0.1480 - detector_cls: 0.9975 - detector_regr: 0.4583
INFO 2019-07-26 10:55:52 +0000 master-replica-0 587/1000 [================>.............] - ETA: 13:03 - rpn_cls: 2.9733 - rpn_regr: 0.1479 - detector_cls: 0.9980 - detector_regr: 0.4581
INFO 2019-07-26 10:55:54 +0000 master-replica-0 588/1000 [================>.............] - ETA: 13:01 - rpn_cls: 2.9682 - rpn_regr: 0.1478 - detector_cls: 0.9980 - detector_regr: 0.4580
INFO 2019-07-26 10:55:55 +0000 master-replica-0 589/1000 [================>.............] - ETA: 12:59 - rpn_cls: 2.9769 - rpn_regr: 0.1487 - detector_cls: 0.9972 - detector_regr: 0.4573
INFO 2019-07-26 10:55:57 +0000 master-replica-0 590/1000 [================>.............] - ETA: 12:57 - rpn_cls: 2.9719 - rpn_regr: 0.1487 - detector_cls: 0.9967 - detector_regr: 0.4572
INFO 2019-07-26 10:55:58 +0000 master-replica-0 591/1000 [================>.............] - ETA: 12:54 - rpn_cls: 2.9682 - rpn_regr: 0.1486 - detector_cls: 0.9961 - detector_regr: 0.4572
INFO 2019-07-26 10:56:00 +0000 master-replica-0 592/1000 [================>.............] - ETA: 12:52 - rpn_cls: 2.9651 - rpn_regr: 0.1484 - detector_cls: 0.9955 - detector_regr: 0.4574
INFO 2019-07-26 10:56:01 +0000 master-replica-0 593/1000 [================>.............] - ETA: 12:50 - rpn_cls: 2.9706 - rpn_regr: 0.1487 - detector_cls: 0.9949 - detector_regr: 0.4566
INFO 2019-07-26 10:56:02 +0000 master-replica-0 594/1000 [================>.............] - ETA: 12:48 - rpn_cls: 2.9683 - rpn_regr: 0.1488 - detector_cls: 0.9950 - detector_regr: 0.4567
INFO 2019-07-26 10:56:04 +0000 master-replica-0 595/1000 [================>.............] - ETA: 12:45 - rpn_cls: 2.9711 - rpn_regr: 0.1489 - detector_cls: 0.9945 - detector_regr: 0.4567
INFO 2019-07-26 10:56:05 +0000 master-replica-0 596/1000 [================>.............] - ETA: 12:43 - rpn_cls: 2.9680 - rpn_regr: 0.1488 - detector_cls: 0.9945 - detector_regr: 0.4566
INFO 2019-07-26 10:56:07 +0000 master-replica-0 597/1000 [================>.............] - ETA: 12:41 - rpn_cls: 2.9646 - rpn_regr: 0.1487 - detector_cls: 0.9955 - detector_regr: 0.4565
INFO 2019-07-26 10:56:08 +0000 master-replica-0 598/1000 [================>.............] - ETA: 12:39 - rpn_cls: 2.9596 - rpn_regr: 0.1486 - detector_cls: 0.9946 - detector_regr: 0.4563
INFO 2019-07-26 10:56:09 +0000 master-replica-0 599/1000 [================>.............] - ETA: 12:37 - rpn_cls: 2.9555 - rpn_regr: 0.1485 - detector_cls: 0.9951 - detector_regr: 0.4563
INFO 2019-07-26 10:56:11 +0000 master-replica-0 600/1000 [=================>............] - ETA: 12:34 - rpn_cls: 2.9506 - rpn_regr: 0.1484 - detector_cls: 0.9957 - detector_regr: 0.4563
INFO 2019-07-26 10:56:13 +0000 master-replica-0 601/1000 [=================>............] - ETA: 12:33 - rpn_cls: 2.9457 - rpn_regr: 0.1485 - detector_cls: 0.9948 - detector_regr: 0.4556
INFO 2019-07-26 10:56:15 +0000 master-replica-0 602/1000 [=================>............] - ETA: 12:31 - rpn_cls: 2.9408 - rpn_regr: 0.1489 - detector_cls: 0.9938 - detector_regr: 0.4548
INFO 2019-07-26 10:56:16 +0000 master-replica-0 603/1000 [=================>............] - ETA: 12:29 - rpn_cls: 2.9396 - rpn_regr: 0.1488 - detector_cls: 0.9953 - detector_regr: 0.4548
INFO 2019-07-26 10:56:17 +0000 master-replica-0 604/1000 [=================>............] - ETA: 12:26 - rpn_cls: 2.9482 - rpn_regr: 0.1488 - detector_cls: 0.9944 - detector_regr: 0.4540
INFO 2019-07-26 10:56:19 +0000 master-replica-0 605/1000 [=================>............] - ETA: 12:24 - rpn_cls: 2.9433 - rpn_regr: 0.1489 - detector_cls: 0.9937 - detector_regr: 0.4542
INFO 2019-07-26 10:56:20 +0000 master-replica-0 606/1000 [=================>............] - ETA: 12:22 - rpn_cls: 2.9395 - rpn_regr: 0.1488 - detector_cls: 0.9939 - detector_regr: 0.4541
INFO 2019-07-26 10:56:21 +0000 master-replica-0 607/1000 [=================>............] - ETA: 12:19 - rpn_cls: 2.9359 - rpn_regr: 0.1488 - detector_cls: 0.9934 - detector_regr: 0.4538
INFO 2019-07-26 10:56:22 +0000 master-replica-0 608/1000 [=================>............] - ETA: 12:17 - rpn_cls: 2.9311 - rpn_regr: 0.1487 - detector_cls: 0.9939 - detector_regr: 0.4537
INFO 2019-07-26 10:56:24 +0000 master-replica-0 609/1000 [=================>............] - ETA: 12:15 - rpn_cls: 2.9271 - rpn_regr: 0.1485 - detector_cls: 0.9940 - detector_regr: 0.4536
INFO 2019-07-26 10:56:26 +0000 master-replica-0 610/1000 [=================>............] - ETA: 12:13 - rpn_cls: 2.9241 - rpn_regr: 0.1484 - detector_cls: 0.9937 - detector_regr: 0.4537
INFO 2019-07-26 10:56:28 +0000 master-replica-0 611/1000 [=================>............] - ETA: 12:11 - rpn_cls: 2.9253 - rpn_regr: 0.1485 - detector_cls: 0.9932 - detector_regr: 0.4535
INFO 2019-07-26 10:56:29 +0000 master-replica-0 612/1000 [=================>............] - ETA: 12:09 - rpn_cls: 2.9345 - rpn_regr: 0.1506 - detector_cls: 0.9925 - detector_regr: 0.4528
INFO 2019-07-26 10:56:31 +0000 master-replica-0 613/1000 [=================>............] - ETA: 12:07 - rpn_cls: 2.9306 - rpn_regr: 0.1505 - detector_cls: 0.9922 - detector_regr: 0.4527
INFO 2019-07-26 10:56:32 +0000 master-replica-0 614/1000 [=================>............] - ETA: 12:05 - rpn_cls: 2.9263 - rpn_regr: 0.1503 - detector_cls: 0.9918 - detector_regr: 0.4526
INFO 2019-07-26 10:56:33 +0000 master-replica-0 615/1000 [=================>............] - ETA: 12:03 - rpn_cls: 2.9286 - rpn_regr: 0.1502 - detector_cls: 0.9904 - detector_regr: 0.4519
INFO 2019-07-26 10:56:34 +0000 master-replica-0 616/1000 [=================>............] - ETA: 12:00 - rpn_cls: 2.9253 - rpn_regr: 0.1501 - detector_cls: 0.9901 - detector_regr: 0.4519
INFO 2019-07-26 10:56:36 +0000 master-replica-0 617/1000 [=================>............] - ETA: 11:58 - rpn_cls: 2.9217 - rpn_regr: 0.1499 - detector_cls: 0.9906 - detector_regr: 0.4518
INFO 2019-07-26 10:56:37 +0000 master-replica-0 618/1000 [=================>............] - ETA: 11:56 - rpn_cls: 2.9189 - rpn_regr: 0.1498 - detector_cls: 0.9900 - detector_regr: 0.4520
INFO 2019-07-26 10:56:39 +0000 master-replica-0 619/1000 [=================>............] - ETA: 11:54 - rpn_cls: 2.9149 - rpn_regr: 0.1497 - detector_cls: 0.9912 - detector_regr: 0.4520
INFO 2019-07-26 10:56:40 +0000 master-replica-0 620/1000 [=================>............] - ETA: 11:51 - rpn_cls: 2.9119 - rpn_regr: 0.1495 - detector_cls: 0.9916 - detector_regr: 0.4520
INFO 2019-07-26 10:56:42 +0000 master-replica-0 621/1000 [=================>............] - ETA: 11:49 - rpn_cls: 2.9177 - rpn_regr: 0.1498 - detector_cls: 0.9905 - detector_regr: 0.4520
INFO 2019-07-26 10:56:43 +0000 master-replica-0 622/1000 [=================>............] - ETA: 11:47 - rpn_cls: 2.9130 - rpn_regr: 0.1498 - detector_cls: 0.9893 - detector_regr: 0.4513
INFO 2019-07-26 10:56:44 +0000 master-replica-0 623/1000 [=================>............] - ETA: 11:45 - rpn_cls: 2.9104 - rpn_regr: 0.1496 - detector_cls: 0.9895 - detector_regr: 0.4512
INFO 2019-07-26 10:56:46 +0000 master-replica-0 624/1000 [=================>............] - ETA: 11:43 - rpn_cls: 2.9058 - rpn_regr: 0.1495 - detector_cls: 0.9888 - detector_regr: 0.4511
INFO 2019-07-26 10:56:48 +0000 master-replica-0 625/1000 [=================>............] - ETA: 11:41 - rpn_cls: 2.9140 - rpn_regr: 0.1497 - detector_cls: 0.9881 - detector_regr: 0.4510
INFO 2019-07-26 10:56:49 +0000 master-replica-0 626/1000 [=================>............] - ETA: 11:39 - rpn_cls: 2.9109 - rpn_regr: 0.1496 - detector_cls: 0.9879 - detector_regr: 0.4510
INFO 2019-07-26 10:56:50 +0000 master-replica-0 627/1000 [=================>............] - ETA: 11:37 - rpn_cls: 2.9062 - rpn_regr: 0.1495 - detector_cls: 0.9875 - detector_regr: 0.4512
INFO 2019-07-26 10:56:51 +0000 master-replica-0 628/1000 [=================>............] - ETA: 11:34 - rpn_cls: 2.9034 - rpn_regr: 0.1494 - detector_cls: 0.9876 - detector_regr: 0.4513
INFO 2019-07-26 10:56:53 +0000 master-replica-0 629/1000 [=================>............] - ETA: 11:32 - rpn_cls: 2.9086 - rpn_regr: 0.1495 - detector_cls: 0.9869 - detector_regr: 0.4505
INFO 2019-07-26 10:56:54 +0000 master-replica-0 630/1000 [=================>............] - ETA: 11:30 - rpn_cls: 2.9050 - rpn_regr: 0.1493 - detector_cls: 0.9869 - detector_regr: 0.4506
INFO 2019-07-26 10:56:55 +0000 master-replica-0 631/1000 [=================>............] - ETA: 11:28 - rpn_cls: 2.9025 - rpn_regr: 0.1493 - detector_cls: 0.9874 - detector_regr: 0.4507
INFO 2019-07-26 10:56:57 +0000 master-replica-0 632/1000 [=================>............] - ETA: 11:26 - rpn_cls: 2.9108 - rpn_regr: 0.1492 - detector_cls: 0.9868 - detector_regr: 0.4507
INFO 2019-07-26 10:56:58 +0000 master-replica-0 633/1000 [=================>............] - ETA: 11:23 - rpn_cls: 2.9070 - rpn_regr: 0.1491 - detector_cls: 0.9871 - detector_regr: 0.4506
INFO 2019-07-26 10:56:59 +0000 master-replica-0 634/1000 [==================>...........] - ETA: 11:21 - rpn_cls: 2.9048 - rpn_regr: 0.1490 - detector_cls: 0.9877 - detector_regr: 0.4505
INFO 2019-07-26 10:57:01 +0000 master-replica-0 635/1000 [==================>...........] - ETA: 11:19 - rpn_cls: 2.9002 - rpn_regr: 0.1490 - detector_cls: 0.9879 - detector_regr: 0.4504
INFO 2019-07-26 10:57:02 +0000 master-replica-0 636/1000 [==================>...........] - ETA: 11:17 - rpn_cls: 2.8986 - rpn_regr: 0.1489 - detector_cls: 0.9882 - detector_regr: 0.4502
INFO 2019-07-26 10:57:04 +0000 master-replica-0 637/1000 [==================>...........] - ETA: 11:15 - rpn_cls: 2.9008 - rpn_regr: 0.1493 - detector_cls: 0.9871 - detector_regr: 0.4495
INFO 2019-07-26 10:57:05 +0000 master-replica-0 638/1000 [==================>...........] - ETA: 11:13 - rpn_cls: 2.8972 - rpn_regr: 0.1491 - detector_cls: 0.9881 - detector_regr: 0.4497
INFO 2019-07-26 10:57:07 +0000 master-replica-0 639/1000 [==================>...........] - ETA: 11:11 - rpn_cls: 2.8969 - rpn_regr: 0.1490 - detector_cls: 0.9883 - detector_regr: 0.4497
INFO 2019-07-26 10:57:08 +0000 master-replica-0 640/1000 [==================>...........] - ETA: 11:09 - rpn_cls: 2.8924 - rpn_regr: 0.1490 - detector_cls: 0.9885 - detector_regr: 0.4497
INFO 2019-07-26 10:57:10 +0000 master-replica-0 641/1000 [==================>...........] - ETA: 11:07 - rpn_cls: 2.8966 - rpn_regr: 0.1490 - detector_cls: 0.9886 - detector_regr: 0.4497
INFO 2019-07-26 10:57:11 +0000 master-replica-0 642/1000 [==================>...........] - ETA: 11:05 - rpn_cls: 2.9019 - rpn_regr: 0.1491 - detector_cls: 0.9891 - detector_regr: 0.4497
INFO 2019-07-26 10:57:13 +0000 master-replica-0 643/1000 [==================>...........] - ETA: 11:03 - rpn_cls: 2.9039 - rpn_regr: 0.1491 - detector_cls: 0.9883 - detector_regr: 0.4490
INFO 2019-07-26 10:57:14 +0000 master-replica-0 644/1000 [==================>...........] - ETA: 11:01 - rpn_cls: 2.9006 - rpn_regr: 0.1490 - detector_cls: 0.9883 - detector_regr: 0.4492
INFO 2019-07-26 10:57:16 +0000 master-replica-0 645/1000 [==================>...........] - ETA: 10:58 - rpn_cls: 2.8966 - rpn_regr: 0.1490 - detector_cls: 0.9882 - detector_regr: 0.4491
INFO 2019-07-26 10:57:17 +0000 master-replica-0 646/1000 [==================>...........] - ETA: 10:56 - rpn_cls: 2.8931 - rpn_regr: 0.1490 - detector_cls: 0.9880 - detector_regr: 0.4491
INFO 2019-07-26 10:57:18 +0000 master-replica-0 647/1000 [==================>...........] - ETA: 10:54 - rpn_cls: 2.8903 - rpn_regr: 0.1489 - detector_cls: 0.9880 - detector_regr: 0.4490
INFO 2019-07-26 10:57:20 +0000 master-replica-0 648/1000 [==================>...........] - ETA: 10:52 - rpn_cls: 2.8879 - rpn_regr: 0.1488 - detector_cls: 0.9881 - detector_regr: 0.4491
INFO 2019-07-26 10:57:22 +0000 master-replica-0 649/1000 [==================>...........] - ETA: 10:50 - rpn_cls: 2.8969 - rpn_regr: 0.1491 - detector_cls: 0.9872 - detector_regr: 0.4489
INFO 2019-07-26 10:57:23 +0000 master-replica-0 650/1000 [==================>...........] - ETA: 10:48 - rpn_cls: 2.8938 - rpn_regr: 0.1490 - detector_cls: 0.9876 - detector_regr: 0.4488
INFO 2019-07-26 10:57:24 +0000 master-replica-0 651/1000 [==================>...........] - ETA: 10:46 - rpn_cls: 2.8893 - rpn_regr: 0.1489 - detector_cls: 0.9873 - detector_regr: 0.4489
INFO 2019-07-26 10:57:26 +0000 master-replica-0 652/1000 [==================>...........] - ETA: 10:44 - rpn_cls: 2.8849 - rpn_regr: 0.1488 - detector_cls: 0.9872 - detector_regr: 0.4492
INFO 2019-07-26 10:57:27 +0000 master-replica-0 653/1000 [==================>...........] - ETA: 10:42 - rpn_cls: 2.8805 - rpn_regr: 0.1489 - detector_cls: 0.9874 - detector_regr: 0.4491
INFO 2019-07-26 10:57:28 +0000 master-replica-0 654/1000 [==================>...........] - ETA: 10:40 - rpn_cls: 2.8776 - rpn_regr: 0.1487 - detector_cls: 0.9875 - detector_regr: 0.4490
INFO 2019-07-26 10:57:29 +0000 master-replica-0 655/1000 [==================>...........] - ETA: 10:38 - rpn_cls: 2.8732 - rpn_regr: 0.1486 - detector_cls: 0.9872 - detector_regr: 0.4491
INFO 2019-07-26 10:57:32 +0000 master-replica-0 656/1000 [==================>...........] - ETA: 10:35 - rpn_cls: 2.8702 - rpn_regr: 0.1485 - detector_cls: 0.9867 - detector_regr: 0.4490
INFO 2019-07-26 10:57:34 +0000 master-replica-0 657/1000 [==================>...........] - ETA: 10:34 - rpn_cls: 2.8799 - rpn_regr: 0.1503 - detector_cls: 0.9857 - detector_regr: 0.4483
INFO 2019-07-26 10:57:35 +0000 master-replica-0 658/1000 [==================>...........] - ETA: 10:32 - rpn_cls: 2.8759 - rpn_regr: 0.1502 - detector_cls: 0.9860 - detector_regr: 0.4483
INFO 2019-07-26 10:57:37 +0000 master-replica-0 659/1000 [==================>...........] - ETA: 10:30 - rpn_cls: 2.8715 - rpn_regr: 0.1502 - detector_cls: 0.9856 - detector_regr: 0.4484
INFO 2019-07-26 10:57:38 +0000 master-replica-0 660/1000 [==================>...........] - ETA: 10:28 - rpn_cls: 2.8678 - rpn_regr: 0.1502 - detector_cls: 0.9850 - detector_regr: 0.4482
INFO 2019-07-26 10:57:39 +0000 master-replica-0 661/1000 [==================>...........] - ETA: 10:26 - rpn_cls: 2.8661 - rpn_regr: 0.1501 - detector_cls: 0.9849 - detector_regr: 0.4481
INFO 2019-07-26 10:57:41 +0000 master-replica-0 662/1000 [==================>...........] - ETA: 10:24 - rpn_cls: 2.8617 - rpn_regr: 0.1501 - detector_cls: 0.9842 - detector_regr: 0.4484
INFO 2019-07-26 10:57:42 +0000 master-replica-0 663/1000 [==================>...........] - ETA: 10:21 - rpn_cls: 2.8640 - rpn_regr: 0.1500 - detector_cls: 0.9840 - detector_regr: 0.4485
INFO 2019-07-26 10:57:44 +0000 master-replica-0 664/1000 [==================>...........] - ETA: 10:19 - rpn_cls: 2.8596 - rpn_regr: 0.1500 - detector_cls: 0.9838 - detector_regr: 0.4487
INFO 2019-07-26 10:57:45 +0000 master-replica-0 665/1000 [==================>...........] - ETA: 10:17 - rpn_cls: 2.8553 - rpn_regr: 0.1499 - detector_cls: 0.9830 - detector_regr: 0.4485
INFO 2019-07-26 10:57:47 +0000 master-replica-0 666/1000 [==================>...........] - ETA: 10:15 - rpn_cls: 2.8537 - rpn_regr: 0.1498 - detector_cls: 0.9834 - detector_regr: 0.4486
INFO 2019-07-26 10:57:48 +0000 master-replica-0 667/1000 [===================>..........] - ETA: 10:13 - rpn_cls: 2.8608 - rpn_regr: 0.1498 - detector_cls: 0.9847 - detector_regr: 0.4486
INFO 2019-07-26 10:57:49 +0000 master-replica-0 668/1000 [===================>..........] - ETA: 10:11 - rpn_cls: 2.8655 - rpn_regr: 0.1502 - detector_cls: 0.9850 - detector_regr: 0.4486
INFO 2019-07-26 10:57:51 +0000 master-replica-0 669/1000 [===================>..........] - ETA: 10:09 - rpn_cls: 2.8638 - rpn_regr: 0.1500 - detector_cls: 0.9848 - detector_regr: 0.4485
INFO 2019-07-26 10:57:52 +0000 master-replica-0 670/1000 [===================>..........] - ETA: 10:07 - rpn_cls: 2.8679 - rpn_regr: 0.1500 - detector_cls: 0.9852 - detector_regr: 0.4486
INFO 2019-07-26 10:57:53 +0000 master-replica-0 671/1000 [===================>..........] - ETA: 10:05 - rpn_cls: 2.8664 - rpn_regr: 0.1498 - detector_cls: 0.9848 - detector_regr: 0.4487
INFO 2019-07-26 10:57:55 +0000 master-replica-0 672/1000 [===================>..........] - ETA: 10:03 - rpn_cls: 2.8684 - rpn_regr: 0.1497 - detector_cls: 0.9837 - detector_regr: 0.4480
INFO 2019-07-26 10:57:56 +0000 master-replica-0 673/1000 [===================>..........] - ETA: 10:01 - rpn_cls: 2.8721 - rpn_regr: 0.1496 - detector_cls: 0.9832 - detector_regr: 0.4480
INFO 2019-07-26 10:57:57 +0000 master-replica-0 674/1000 [===================>..........] - ETA: 9:59 - rpn_cls: 2.8695 - rpn_regr: 0.1495 - detector_cls: 0.9832 - detector_regr: 0.4480
INFO 2019-07-26 10:57:59 +0000 master-replica-0 675/1000 [===================>..........] - ETA: 9:57 - rpn_cls: 2.8780 - rpn_regr: 0.1493 - detector_cls: 0.9826 - detector_regr: 0.4473
INFO 2019-07-26 10:58:00 +0000 master-replica-0 676/1000 [===================>..........] - ETA: 9:55 - rpn_cls: 2.8766 - rpn_regr: 0.1491 - detector_cls: 0.9821 - detector_regr: 0.4472
INFO 2019-07-26 10:58:02 +0000 master-replica-0 677/1000 [===================>..........] - ETA: 9:53 - rpn_cls: 2.8741 - rpn_regr: 0.1490 - detector_cls: 0.9819 - detector_regr: 0.4472
INFO 2019-07-26 10:58:03 +0000 master-replica-0 678/1000 [===================>..........] - ETA: 9:51 - rpn_cls: 2.8747 - rpn_regr: 0.1492 - detector_cls: 0.9822 - detector_regr: 0.4471
INFO 2019-07-26 10:58:04 +0000 master-replica-0 679/1000 [===================>..........] - ETA: 9:48 - rpn_cls: 2.8733 - rpn_regr: 0.1491 - detector_cls: 0.9820 - detector_regr: 0.4470
INFO 2019-07-26 10:58:06 +0000 master-replica-0 680/1000 [===================>..........] - ETA: 9:46 - rpn_cls: 2.8795 - rpn_regr: 0.1493 - detector_cls: 0.9813 - detector_regr: 0.4473
INFO 2019-07-26 10:58:07 +0000 master-replica-0 681/1000 [===================>..........] - ETA: 9:44 - rpn_cls: 2.8767 - rpn_regr: 0.1492 - detector_cls: 0.9812 - detector_regr: 0.4474
INFO 2019-07-26 10:58:08 +0000 master-replica-0 682/1000 [===================>..........] - ETA: 9:42 - rpn_cls: 2.8725 - rpn_regr: 0.1493 - detector_cls: 0.9807 - detector_regr: 0.4477
INFO 2019-07-26 10:58:10 +0000 master-replica-0 683/1000 [===================>..........] - ETA: 9:40 - rpn_cls: 2.8690 - rpn_regr: 0.1491 - detector_cls: 0.9803 - detector_regr: 0.4475
INFO 2019-07-26 10:58:12 +0000 master-replica-0 684/1000 [===================>..........] - ETA: 9:38 - rpn_cls: 2.8662 - rpn_regr: 0.1489 - detector_cls: 0.9811 - detector_regr: 0.4475
INFO 2019-07-26 10:58:13 +0000 master-replica-0 685/1000 [===================>..........] - ETA: 9:36 - rpn_cls: 2.8684 - rpn_regr: 0.1489 - detector_cls: 0.9814 - detector_regr: 0.4476
INFO 2019-07-26 10:58:15 +0000 master-replica-0 686/1000 [===================>..........] - ETA: 9:35 - rpn_cls: 2.8642 - rpn_regr: 0.1492 - detector_cls: 0.9809 - detector_regr: 0.4473
INFO 2019-07-26 10:58:16 +0000 master-replica-0 687/1000 [===================>..........] - ETA: 9:32 - rpn_cls: 2.8600 - rpn_regr: 0.1494 - detector_cls: 0.9808 - detector_regr: 0.4473
INFO 2019-07-26 10:58:18 +0000 master-replica-0 688/1000 [===================>..........] - ETA: 9:30 - rpn_cls: 2.8567 - rpn_regr: 0.1493 - detector_cls: 0.9809 - detector_regr: 0.4473
INFO 2019-07-26 10:58:19 +0000 master-replica-0 689/1000 [===================>..........] - ETA: 9:29 - rpn_cls: 2.8536 - rpn_regr: 0.1492 - detector_cls: 0.9802 - detector_regr: 0.4473
INFO 2019-07-26 10:58:20 +0000 master-replica-0 690/1000 [===================>..........] - ETA: 9:26 - rpn_cls: 2.8615 - rpn_regr: 0.1491 - detector_cls: 0.9801 - detector_regr: 0.4475
INFO 2019-07-26 10:58:22 +0000 master-replica-0 691/1000 [===================>..........] - ETA: 9:24 - rpn_cls: 2.8573 - rpn_regr: 0.1492 - detector_cls: 0.9796 - detector_regr: 0.4473
INFO 2019-07-26 10:58:26 +0000 master-replica-0 692/1000 [===================>..........] - ETA: 9:23 - rpn_cls: 2.8641 - rpn_regr: 0.1495 - detector_cls: 0.9788 - detector_regr: 0.4466
INFO 2019-07-26 10:58:27 +0000 master-replica-0 693/1000 [===================>..........] - ETA: 9:21 - rpn_cls: 2.8619 - rpn_regr: 0.1494 - detector_cls: 0.9790 - detector_regr: 0.4466
INFO 2019-07-26 10:58:28 +0000 master-replica-0 694/1000 [===================>..........] - ETA: 9:19 - rpn_cls: 2.8578 - rpn_regr: 0.1493 - detector_cls: 0.9791 - detector_regr: 0.4467
INFO 2019-07-26 10:58:29 +0000 master-replica-0 695/1000 [===================>..........] - ETA: 9:17 - rpn_cls: 2.8557 - rpn_regr: 0.1492 - detector_cls: 0.9786 - detector_regr: 0.4469
INFO 2019-07-26 10:58:31 +0000 master-replica-0 696/1000 [===================>..........] - ETA: 9:15 - rpn_cls: 2.8522 - rpn_regr: 0.1492 - detector_cls: 0.9792 - detector_regr: 0.4468
INFO 2019-07-26 10:58:32 +0000 master-replica-0 697/1000 [===================>..........] - ETA: 9:13 - rpn_cls: 2.8482 - rpn_regr: 0.1491 - detector_cls: 0.9802 - detector_regr: 0.4466
INFO 2019-07-26 10:58:34 +0000 master-replica-0 698/1000 [===================>..........] - ETA: 9:11 - rpn_cls: 2.8441 - rpn_regr: 0.1491 - detector_cls: 0.9793 - detector_regr: 0.4460
INFO 2019-07-26 10:58:35 +0000 master-replica-0 699/1000 [===================>..........] - ETA: 9:09 - rpn_cls: 2.8413 - rpn_regr: 0.1489 - detector_cls: 0.9793 - detector_regr: 0.4459
INFO 2019-07-26 10:58:36 +0000 master-replica-0 700/1000 [====================>.........] - ETA: 9:07 - rpn_cls: 2.8373 - rpn_regr: 0.1488 - detector_cls: 0.9788 - detector_regr: 0.4462
INFO 2019-07-26 10:58:38 +0000 master-replica-0 701/1000 [====================>.........] - ETA: 9:05 - rpn_cls: 2.8332 - rpn_regr: 0.1487 - detector_cls: 0.9784 - detector_regr: 0.4461
INFO 2019-07-26 10:58:40 +0000 master-replica-0 702/1000 [====================>.........] - ETA: 9:03 - rpn_cls: 2.8292 - rpn_regr: 0.1487 - detector_cls: 0.9781 - detector_regr: 0.4461
INFO 2019-07-26 10:58:41 +0000 master-replica-0 703/1000 [====================>.........] - ETA: 9:01 - rpn_cls: 2.8359 - rpn_regr: 0.1489 - detector_cls: 0.9773 - detector_regr: 0.4455
INFO 2019-07-26 10:58:42 +0000 master-replica-0 704/1000 [====================>.........] - ETA: 8:59 - rpn_cls: 2.8333 - rpn_regr: 0.1488 - detector_cls: 0.9768 - detector_regr: 0.4454
INFO 2019-07-26 10:58:44 +0000 master-replica-0 705/1000 [====================>.........] - ETA: 8:57 - rpn_cls: 2.8292 - rpn_regr: 0.1487 - detector_cls: 0.9767 - detector_regr: 0.4457
INFO 2019-07-26 10:58:45 +0000 master-replica-0 706/1000 [====================>.........] - ETA: 8:55 - rpn_cls: 2.8269 - rpn_regr: 0.1486 - detector_cls: 0.9764 - detector_regr: 0.4456
INFO 2019-07-26 10:58:47 +0000 master-replica-0 707/1000 [====================>.........] - ETA: 8:53 - rpn_cls: 2.8229 - rpn_regr: 0.1485 - detector_cls: 0.9758 - detector_regr: 0.4456
INFO 2019-07-26 10:58:49 +0000 master-replica-0 708/1000 [====================>.........] - ETA: 8:51 - rpn_cls: 2.8189 - rpn_regr: 0.1486 - detector_cls: 0.9750 - detector_regr: 0.4450
INFO 2019-07-26 10:58:50 +0000 master-replica-0 709/1000 [====================>.........] - ETA: 8:50 - rpn_cls: 2.8170 - rpn_regr: 0.1485 - detector_cls: 0.9742 - detector_regr: 0.4451
INFO 2019-07-26 10:58:51 +0000 master-replica-0 710/1000 [====================>.........] - ETA: 8:48 - rpn_cls: 2.8139 - rpn_regr: 0.1484 - detector_cls: 0.9742 - detector_regr: 0.4453
INFO 2019-07-26 10:58:53 +0000 master-replica-0 711/1000 [====================>.........] - ETA: 8:46 - rpn_cls: 2.8137 - rpn_regr: 0.1484 - detector_cls: 0.9738 - detector_regr: 0.4452
INFO 2019-07-26 10:58:54 +0000 master-replica-0 712/1000 [====================>.........] - ETA: 8:44 - rpn_cls: 2.8098 - rpn_regr: 0.1484 - detector_cls: 0.9742 - detector_regr: 0.4453
INFO 2019-07-26 10:58:55 +0000 master-replica-0 713/1000 [====================>.........] - ETA: 8:42 - rpn_cls: 2.8091 - rpn_regr: 0.1483 - detector_cls: 0.9746 - detector_regr: 0.4453
INFO 2019-07-26 10:58:58 +0000 master-replica-0 714/1000 [====================>.........] - ETA: 8:40 - rpn_cls: 2.8073 - rpn_regr: 0.1482 - detector_cls: 0.9744 - detector_regr: 0.4453
INFO 2019-07-26 10:59:00 +0000 master-replica-0 715/1000 [====================>.........] - ETA: 8:38 - rpn_cls: 2.8172 - rpn_regr: 0.1483 - detector_cls: 0.9737 - detector_regr: 0.4452
INFO 2019-07-26 10:59:01 +0000 master-replica-0 716/1000 [====================>.........] - ETA: 8:36 - rpn_cls: 2.8269 - rpn_regr: 0.1482 - detector_cls: 0.9730 - detector_regr: 0.4446
INFO 2019-07-26 10:59:02 +0000 master-replica-0 717/1000 [====================>.........] - ETA: 8:34 - rpn_cls: 2.8289 - rpn_regr: 0.1481 - detector_cls: 0.9720 - detector_regr: 0.4448
INFO 2019-07-26 10:59:04 +0000 master-replica-0 718/1000 [====================>.........] - ETA: 8:32 - rpn_cls: 2.8373 - rpn_regr: 0.1480 - detector_cls: 0.9712 - detector_regr: 0.4450
INFO 2019-07-26 10:59:05 +0000 master-replica-0 719/1000 [====================>.........] - ETA: 8:30 - rpn_cls: 2.8333 - rpn_regr: 0.1478 - detector_cls: 0.9712 - detector_regr: 0.4450
INFO 2019-07-26 10:59:06 +0000 master-replica-0 720/1000 [====================>.........] - ETA: 8:28 - rpn_cls: 2.8304 - rpn_regr: 0.1478 - detector_cls: 0.9712 - detector_regr: 0.4449
INFO 2019-07-26 10:59:08 +0000 master-replica-0 721/1000 [====================>.........] - ETA: 8:26 - rpn_cls: 2.8265 - rpn_regr: 0.1477 - detector_cls: 0.9704 - detector_regr: 0.4450
INFO 2019-07-26 10:59:09 +0000 master-replica-0 722/1000 [====================>.........] - ETA: 8:24 - rpn_cls: 2.8248 - rpn_regr: 0.1476 - detector_cls: 0.9702 - detector_regr: 0.4451
INFO 2019-07-26 10:59:11 +0000 master-replica-0 723/1000 [====================>.........] - ETA: 8:22 - rpn_cls: 2.8209 - rpn_regr: 0.1476 - detector_cls: 0.9698 - detector_regr: 0.4452
INFO 2019-07-26 10:59:12 +0000 master-replica-0 724/1000 [====================>.........] - ETA: 8:20 - rpn_cls: 2.8247 - rpn_regr: 0.1476 - detector_cls: 0.9709 - detector_regr: 0.4452
INFO 2019-07-26 10:59:13 +0000 master-replica-0 725/1000 [====================>.........] - ETA: 8:18 - rpn_cls: 2.8208 - rpn_regr: 0.1476 - detector_cls: 0.9707 - detector_regr: 0.4453
INFO 2019-07-26 10:59:15 +0000 master-replica-0 726/1000 [====================>.........] - ETA: 8:16 - rpn_cls: 2.8181 - rpn_regr: 0.1475 - detector_cls: 0.9708 - detector_regr: 0.4456
INFO 2019-07-26 10:59:16 +0000 master-replica-0 727/1000 [====================>.........] - ETA: 8:14 - rpn_cls: 2.8157 - rpn_regr: 0.1474 - detector_cls: 0.9716 - detector_regr: 0.4456
INFO 2019-07-26 10:59:17 +0000 master-replica-0 728/1000 [====================>.........] - ETA: 8:12 - rpn_cls: 2.8129 - rpn_regr: 0.1474 - detector_cls: 0.9733 - detector_regr: 0.4456
INFO 2019-07-26 10:59:19 +0000 master-replica-0 729/1000 [====================>.........] - ETA: 8:10 - rpn_cls: 2.8090 - rpn_regr: 0.1474 - detector_cls: 0.9741 - detector_regr: 0.4455
INFO 2019-07-26 10:59:21 +0000 master-replica-0 730/1000 [====================>.........] - ETA: 8:08 - rpn_cls: 2.8151 - rpn_regr: 0.1477 - detector_cls: 0.9749 - detector_regr: 0.4455
INFO 2019-07-26 10:59:22 +0000 master-replica-0 731/1000 [====================>.........] - ETA: 8:07 - rpn_cls: 2.8130 - rpn_regr: 0.1476 - detector_cls: 0.9760 - detector_regr: 0.4454
INFO 2019-07-26 10:59:24 +0000 master-replica-0 732/1000 [====================>.........] - ETA: 8:05 - rpn_cls: 2.8218 - rpn_regr: 0.1476 - detector_cls: 0.9758 - detector_regr: 0.4455
INFO 2019-07-26 10:59:26 +0000 master-replica-0 733/1000 [====================>.........] - ETA: 8:03 - rpn_cls: 2.8179 - rpn_regr: 0.1475 - detector_cls: 0.9752 - detector_regr: 0.4456
INFO 2019-07-26 10:59:27 +0000 master-replica-0 734/1000 [=====================>........] - ETA: 8:01 - rpn_cls: 2.8159 - rpn_regr: 0.1474 - detector_cls: 0.9749 - detector_regr: 0.4457
INFO 2019-07-26 10:59:29 +0000 master-replica-0 735/1000 [=====================>........] - ETA: 7:59 - rpn_cls: 2.8165 - rpn_regr: 0.1473 - detector_cls: 0.9760 - detector_regr: 0.4457
INFO 2019-07-26 10:59:31 +0000 master-replica-0 736/1000 [=====================>........] - ETA: 7:57 - rpn_cls: 2.8240 - rpn_regr: 0.1475 - detector_cls: 0.9755 - detector_regr: 0.4452
INFO 2019-07-26 10:59:32 +0000 master-replica-0 737/1000 [=====================>........] - ETA: 7:55 - rpn_cls: 2.8237 - rpn_regr: 0.1474 - detector_cls: 0.9764 - detector_regr: 0.4452
INFO 2019-07-26 10:59:34 +0000 master-replica-0 738/1000 [=====================>........] - ETA: 7:54 - rpn_cls: 2.8288 - rpn_regr: 0.1478 - detector_cls: 0.9757 - detector_regr: 0.4446
INFO 2019-07-26 10:59:35 +0000 master-replica-0 739/1000 [=====================>........] - ETA: 7:52 - rpn_cls: 2.8280 - rpn_regr: 0.1477 - detector_cls: 0.9756 - detector_regr: 0.4444
INFO 2019-07-26 10:59:37 +0000 master-replica-0 740/1000 [=====================>........] - ETA: 7:50 - rpn_cls: 2.8361 - rpn_regr: 0.1477 - detector_cls: 0.9756 - detector_regr: 0.4446
INFO 2019-07-26 10:59:38 +0000 master-replica-0 741/1000 [=====================>........] - ETA: 7:48 - rpn_cls: 2.8358 - rpn_regr: 0.1480 - detector_cls: 0.9749 - detector_regr: 0.4447
INFO 2019-07-26 10:59:40 +0000 master-replica-0 742/1000 [=====================>........] - ETA: 7:46 - rpn_cls: 2.8372 - rpn_regr: 0.1479 - detector_cls: 0.9753 - detector_regr: 0.4447
INFO 2019-07-26 10:59:41 +0000 master-replica-0 743/1000 [=====================>........] - ETA: 7:44 - rpn_cls: 2.8397 - rpn_regr: 0.1480 - detector_cls: 0.9757 - detector_regr: 0.4448
INFO 2019-07-26 10:59:42 +0000 master-replica-0 744/1000 [=====================>........] - ETA: 7:42 - rpn_cls: 2.8358 - rpn_regr: 0.1479 - detector_cls: 0.9756 - detector_regr: 0.4448
INFO 2019-07-26 10:59:44 +0000 master-replica-0 745/1000 [=====================>........] - ETA: 7:40 - rpn_cls: 2.8387 - rpn_regr: 0.1480 - detector_cls: 0.9757 - detector_regr: 0.4448
INFO 2019-07-26 10:59:45 +0000 master-replica-0 746/1000 [=====================>........] - ETA: 7:38 - rpn_cls: 2.8349 - rpn_regr: 0.1479 - detector_cls: 0.9755 - detector_regr: 0.4449
INFO 2019-07-26 10:59:46 +0000 master-replica-0 747/1000 [=====================>........] - ETA: 7:36 - rpn_cls: 2.8330 - rpn_regr: 0.1479 - detector_cls: 0.9758 - detector_regr: 0.4449
INFO 2019-07-26 10:59:48 +0000 master-replica-0 748/1000 [=====================>........] - ETA: 7:34 - rpn_cls: 2.8310 - rpn_regr: 0.1478 - detector_cls: 0.9762 - detector_regr: 0.4450
INFO 2019-07-26 10:59:49 +0000 master-replica-0 749/1000 [=====================>........] - ETA: 7:32 - rpn_cls: 2.8272 - rpn_regr: 0.1477 - detector_cls: 0.9765 - detector_regr: 0.4449
INFO 2019-07-26 10:59:50 +0000 master-replica-0 750/1000 [=====================>........] - ETA: 7:30 - rpn_cls: 2.8234 - rpn_regr: 0.1476 - detector_cls: 0.9767 - detector_regr: 0.4449
INFO 2019-07-26 10:59:52 +0000 master-replica-0 751/1000 [=====================>........] - ETA: 7:28 - rpn_cls: 2.8207 - rpn_regr: 0.1475 - detector_cls: 0.9766 - detector_regr: 0.4449
INFO 2019-07-26 10:59:53 +0000 master-replica-0 752/1000 [=====================>........] - ETA: 7:26 - rpn_cls: 2.8226 - rpn_regr: 0.1477 - detector_cls: 0.9758 - detector_regr: 0.4443
INFO 2019-07-26 10:59:54 +0000 master-replica-0 753/1000 [=====================>........] - ETA: 7:24 - rpn_cls: 2.8189 - rpn_regr: 0.1477 - detector_cls: 0.9753 - detector_regr: 0.4446
INFO 2019-07-26 10:59:55 +0000 master-replica-0 754/1000 [=====================>........] - ETA: 7:22 - rpn_cls: 2.8152 - rpn_regr: 0.1476 - detector_cls: 0.9751 - detector_regr: 0.4447
INFO 2019-07-26 10:59:57 +0000 master-replica-0 755/1000 [=====================>........] - ETA: 7:20 - rpn_cls: 2.8201 - rpn_regr: 0.1479 - detector_cls: 0.9753 - detector_regr: 0.4447
INFO 2019-07-26 10:59:58 +0000 master-replica-0 756/1000 [=====================>........] - ETA: 7:18 - rpn_cls: 2.8190 - rpn_regr: 0.1482 - detector_cls: 0.9757 - detector_regr: 0.4448
INFO 2019-07-26 11:00:00 +0000 master-replica-0 757/1000 [=====================>........] - ETA: 7:17 - rpn_cls: 2.8162 - rpn_regr: 0.1481 - detector_cls: 0.9758 - detector_regr: 0.4447
INFO 2019-07-26 11:00:01 +0000 master-replica-0 758/1000 [=====================>........] - ETA: 7:15 - rpn_cls: 2.8152 - rpn_regr: 0.1481 - detector_cls: 0.9760 - detector_regr: 0.4447
INFO 2019-07-26 11:00:03 +0000 master-replica-0 759/1000 [=====================>........] - ETA: 7:13 - rpn_cls: 2.8130 - rpn_regr: 0.1481 - detector_cls: 0.9758 - detector_regr: 0.4445
INFO 2019-07-26 11:00:04 +0000 master-replica-0 760/1000 [=====================>........] - ETA: 7:11 - rpn_cls: 2.8093 - rpn_regr: 0.1480 - detector_cls: 0.9752 - detector_regr: 0.4444
INFO 2019-07-26 11:00:06 +0000 master-replica-0 761/1000 [=====================>........] - ETA: 7:09 - rpn_cls: 2.8072 - rpn_regr: 0.1478 - detector_cls: 0.9752 - detector_regr: 0.4444
INFO 2019-07-26 11:00:08 +0000 master-replica-0 762/1000 [=====================>........] - ETA: 7:07 - rpn_cls: 2.8052 - rpn_regr: 0.1477 - detector_cls: 0.9758 - detector_regr: 0.4445
INFO 2019-07-26 11:00:09 +0000 master-replica-0 763/1000 [=====================>........] - ETA: 7:05 - rpn_cls: 2.8121 - rpn_regr: 0.1479 - detector_cls: 0.9751 - detector_regr: 0.4446
INFO 2019-07-26 11:00:10 +0000 master-replica-0 764/1000 [=====================>........] - ETA: 7:03 - rpn_cls: 2.8139 - rpn_regr: 0.1479 - detector_cls: 0.9745 - detector_regr: 0.4440
INFO 2019-07-26 11:00:12 +0000 master-replica-0 765/1000 [=====================>........] - ETA: 7:01 - rpn_cls: 2.8102 - rpn_regr: 0.1480 - detector_cls: 0.9746 - detector_regr: 0.4441
INFO 2019-07-26 11:00:13 +0000 master-replica-0 766/1000 [=====================>........] - ETA: 7:00 - rpn_cls: 2.8109 - rpn_regr: 0.1483 - detector_cls: 0.9737 - detector_regr: 0.4435
INFO 2019-07-26 11:00:15 +0000 master-replica-0 767/1000 [======================>.......] - ETA: 6:58 - rpn_cls: 2.8072 - rpn_regr: 0.1485 - detector_cls: 0.9741 - detector_regr: 0.4436
INFO 2019-07-26 11:00:17 +0000 master-replica-0 768/1000 [======================>.......] - ETA: 6:56 - rpn_cls: 2.8147 - rpn_regr: 0.1500 - detector_cls: 0.9737 - detector_regr: 0.4430
INFO 2019-07-26 11:00:18 +0000 master-replica-0 769/1000 [======================>.......] - ETA: 6:54 - rpn_cls: 2.8117 - rpn_regr: 0.1500 - detector_cls: 0.9738 - detector_regr: 0.4431
INFO 2019-07-26 11:00:19 +0000 master-replica-0 770/1000 [======================>.......] - ETA: 6:52 - rpn_cls: 2.8080 - rpn_regr: 0.1499 - detector_cls: 0.9736 - detector_regr: 0.4433
INFO 2019-07-26 11:00:21 +0000 master-replica-0 771/1000 [======================>.......] - ETA: 6:50 - rpn_cls: 2.8053 - rpn_regr: 0.1498 - detector_cls: 0.9739 - detector_regr: 0.4431
INFO 2019-07-26 11:00:22 +0000 master-replica-0 772/1000 [======================>.......] - ETA: 6:48 - rpn_cls: 2.8028 - rpn_regr: 0.1496 - detector_cls: 0.9738 - detector_regr: 0.4430
INFO 2019-07-26 11:00:23 +0000 master-replica-0 773/1000 [======================>.......] - ETA: 6:46 - rpn_cls: 2.8091 - rpn_regr: 0.1496 - detector_cls: 0.9736 - detector_regr: 0.4430
INFO 2019-07-26 11:00:24 +0000 master-replica-0 774/1000 [======================>.......] - ETA: 6:44 - rpn_cls: 2.8055 - rpn_regr: 0.1495 - detector_cls: 0.9735 - detector_regr: 0.4428
INFO 2019-07-26 11:00:26 +0000 master-replica-0 775/1000 [======================>.......] - ETA: 6:42 - rpn_cls: 2.8033 - rpn_regr: 0.1493 - detector_cls: 0.9736 - detector_regr: 0.4428
INFO 2019-07-26 11:00:27 +0000 master-replica-0 776/1000 [======================>.......] - ETA: 6:40 - rpn_cls: 2.8108 - rpn_regr: 0.1495 - detector_cls: 0.9727 - detector_regr: 0.4422
INFO 2019-07-26 11:00:29 +0000 master-replica-0 777/1000 [======================>.......] - ETA: 6:39 - rpn_cls: 2.8086 - rpn_regr: 0.1494 - detector_cls: 0.9727 - detector_regr: 0.4422
INFO 2019-07-26 11:00:30 +0000 master-replica-0 778/1000 [======================>.......] - ETA: 6:37 - rpn_cls: 2.8061 - rpn_regr: 0.1493 - detector_cls: 0.9725 - detector_regr: 0.4422
INFO 2019-07-26 11:00:31 +0000 master-replica-0 779/1000 [======================>.......] - ETA: 6:35 - rpn_cls: 2.8025 - rpn_regr: 0.1492 - detector_cls: 0.9727 - detector_regr: 0.4422
INFO 2019-07-26 11:00:33 +0000 master-replica-0 780/1000 [======================>.......] - ETA: 6:33 - rpn_cls: 2.7997 - rpn_regr: 0.1492 - detector_cls: 0.9731 - detector_regr: 0.4424
INFO 2019-07-26 11:00:34 +0000 master-replica-0 781/1000 [======================>.......] - ETA: 6:31 - rpn_cls: 2.8030 - rpn_regr: 0.1493 - detector_cls: 0.9734 - detector_regr: 0.4424
INFO 2019-07-26 11:00:36 +0000 master-replica-0 782/1000 [======================>.......] - ETA: 6:29 - rpn_cls: 2.8013 - rpn_regr: 0.1492 - detector_cls: 0.9731 - detector_regr: 0.4422
INFO 2019-07-26 11:00:37 +0000 master-replica-0 783/1000 [======================>.......] - ETA: 6:27 - rpn_cls: 2.8056 - rpn_regr: 0.1493 - detector_cls: 0.9725 - detector_regr: 0.4417
INFO 2019-07-26 11:00:38 +0000 master-replica-0 784/1000 [======================>.......] - ETA: 6:25 - rpn_cls: 2.8021 - rpn_regr: 0.1492 - detector_cls: 0.9722 - detector_regr: 0.4417
INFO 2019-07-26 11:00:40 +0000 master-replica-0 785/1000 [======================>.......] - ETA: 6:23 - rpn_cls: 2.7985 - rpn_regr: 0.1491 - detector_cls: 0.9727 - detector_regr: 0.4418
INFO 2019-07-26 11:00:41 +0000 master-replica-0 786/1000 [======================>.......] - ETA: 6:21 - rpn_cls: 2.7958 - rpn_regr: 0.1490 - detector_cls: 0.9735 - detector_regr: 0.4418
INFO 2019-07-26 11:00:42 +0000 master-replica-0 787/1000 [======================>.......] - ETA: 6:19 - rpn_cls: 2.7928 - rpn_regr: 0.1489 - detector_cls: 0.9731 - detector_regr: 0.4418
INFO 2019-07-26 11:00:44 +0000 master-replica-0 788/1000 [======================>.......] - ETA: 6:18 - rpn_cls: 2.7911 - rpn_regr: 0.1488 - detector_cls: 0.9728 - detector_regr: 0.4419
INFO 2019-07-26 11:00:45 +0000 master-replica-0 789/1000 [======================>.......] - ETA: 6:16 - rpn_cls: 2.7987 - rpn_regr: 0.1490 - detector_cls: 0.9723 - detector_regr: 0.4415
INFO 2019-07-26 11:00:47 +0000 master-replica-0 790/1000 [======================>.......] - ETA: 6:14 - rpn_cls: 2.7965 - rpn_regr: 0.1489 - detector_cls: 0.9729 - detector_regr: 0.4415
INFO 2019-07-26 11:00:49 +0000 master-replica-0 791/1000 [======================>.......] - ETA: 6:12 - rpn_cls: 2.7980 - rpn_regr: 0.1488 - detector_cls: 0.9730 - detector_regr: 0.4414
INFO 2019-07-26 11:00:50 +0000 master-replica-0 792/1000 [======================>.......] - ETA: 6:10 - rpn_cls: 2.7956 - rpn_regr: 0.1488 - detector_cls: 0.9728 - detector_regr: 0.4414
INFO 2019-07-26 11:00:51 +0000 master-replica-0 793/1000 [======================>.......] - ETA: 6:08 - rpn_cls: 2.7933 - rpn_regr: 0.1488 - detector_cls: 0.9725 - detector_regr: 0.4413
INFO 2019-07-26 11:00:53 +0000 master-replica-0 794/1000 [======================>.......] - ETA: 6:06 - rpn_cls: 2.7907 - rpn_regr: 0.1487 - detector_cls: 0.9734 - detector_regr: 0.4412
INFO 2019-07-26 11:00:54 +0000 master-replica-0 795/1000 [======================>.......] - ETA: 6:05 - rpn_cls: 2.7879 - rpn_regr: 0.1485 - detector_cls: 0.9737 - detector_regr: 0.4411
INFO 2019-07-26 11:00:55 +0000 master-replica-0 796/1000 [======================>.......] - ETA: 6:03 - rpn_cls: 2.7895 - rpn_regr: 0.1486 - detector_cls: 0.9735 - detector_regr: 0.4412
INFO 2019-07-26 11:00:57 +0000 master-replica-0 797/1000 [======================>.......] - ETA: 6:01 - rpn_cls: 2.7868 - rpn_regr: 0.1485 - detector_cls: 0.9735 - detector_regr: 0.4412
INFO 2019-07-26 11:00:58 +0000 master-replica-0 798/1000 [======================>.......] - ETA: 5:59 - rpn_cls: 2.7849 - rpn_regr: 0.1485 - detector_cls: 0.9734 - detector_regr: 0.4412
INFO 2019-07-26 11:00:59 +0000 master-replica-0 799/1000 [======================>.......] - ETA: 5:57 - rpn_cls: 2.7827 - rpn_regr: 0.1484 - detector_cls: 0.9731 - detector_regr: 0.4412
INFO 2019-07-26 11:01:01 +0000 master-replica-0 800/1000 [=======================>......] - ETA: 5:55 - rpn_cls: 2.7846 - rpn_regr: 0.1483 - detector_cls: 0.9722 - detector_regr: 0.4415
INFO 2019-07-26 11:01:02 +0000 master-replica-0 801/1000 [=======================>......] - ETA: 5:53 - rpn_cls: 2.7921 - rpn_regr: 0.1485 - detector_cls: 0.9713 - detector_regr: 0.4409
INFO 2019-07-26 11:01:03 +0000 master-replica-0 802/1000 [=======================>......] - ETA: 5:51 - rpn_cls: 2.7889 - rpn_regr: 0.1484 - detector_cls: 0.9711 - detector_regr: 0.4408
INFO 2019-07-26 11:01:05 +0000 master-replica-0 803/1000 [=======================>......] - ETA: 5:49 - rpn_cls: 2.7924 - rpn_regr: 0.1483 - detector_cls: 0.9708 - detector_regr: 0.4410
INFO 2019-07-26 11:01:06 +0000 master-replica-0 804/1000 [=======================>......] - ETA: 5:48 - rpn_cls: 2.7910 - rpn_regr: 0.1484 - detector_cls: 0.9713 - detector_regr: 0.4410
INFO 2019-07-26 11:01:08 +0000 master-replica-0 805/1000 [=======================>......] - ETA: 5:46 - rpn_cls: 2.7889 - rpn_regr: 0.1482 - detector_cls: 0.9713 - detector_regr: 0.4410
INFO 2019-07-26 11:01:09 +0000 master-replica-0 806/1000 [=======================>......] - ETA: 5:44 - rpn_cls: 2.7859 - rpn_regr: 0.1481 - detector_cls: 0.9724 - detector_regr: 0.4409
INFO 2019-07-26 11:01:10 +0000 master-replica-0 807/1000 [=======================>......] - ETA: 5:42 - rpn_cls: 2.7825 - rpn_regr: 0.1481 - detector_cls: 0.9720 - detector_regr: 0.4410
INFO 2019-07-26 11:01:12 +0000 master-replica-0 808/1000 [=======================>......] - ETA: 5:40 - rpn_cls: 2.7881 - rpn_regr: 0.1480 - detector_cls: 0.9715 - detector_regr: 0.4413
INFO 2019-07-26 11:01:13 +0000 master-replica-0 809/1000 [=======================>......] - ETA: 5:38 - rpn_cls: 2.7897 - rpn_regr: 0.1480 - detector_cls: 0.9711 - detector_regr: 0.4411
INFO 2019-07-26 11:01:14 +0000 master-replica-0 810/1000 [=======================>......] - ETA: 5:36 - rpn_cls: 2.7975 - rpn_regr: 0.1481 - detector_cls: 0.9708 - detector_regr: 0.4411
INFO 2019-07-26 11:01:16 +0000 master-replica-0 811/1000 [=======================>......] - ETA: 5:35 - rpn_cls: 2.7951 - rpn_regr: 0.1480 - detector_cls: 0.9712 - detector_regr: 0.4410
INFO 2019-07-26 11:01:17 +0000 master-replica-0 812/1000 [=======================>......] - ETA: 5:33 - rpn_cls: 2.8026 - rpn_regr: 0.1480 - detector_cls: 0.9705 - detector_regr: 0.4412
INFO 2019-07-26 11:01:19 +0000 master-replica-0 813/1000 [=======================>......] - ETA: 5:31 - rpn_cls: 2.8016 - rpn_regr: 0.1479 - detector_cls: 0.9701 - detector_regr: 0.4411
INFO 2019-07-26 11:01:20 +0000 master-replica-0 814/1000 [=======================>......] - ETA: 5:29 - rpn_cls: 2.7993 - rpn_regr: 0.1478 - detector_cls: 0.9697 - detector_regr: 0.4410
INFO 2019-07-26 11:01:21 +0000 master-replica-0 815/1000 [=======================>......] - ETA: 5:27 - rpn_cls: 2.7959 - rpn_regr: 0.1480 - detector_cls: 0.9691 - detector_regr: 0.4408
INFO 2019-07-26 11:01:22 +0000 master-replica-0 816/1000 [=======================>......] - ETA: 5:25 - rpn_cls: 2.7924 - rpn_regr: 0.1479 - detector_cls: 0.9688 - detector_regr: 0.4410
INFO 2019-07-26 11:01:23 +0000 master-replica-0 817/1000 [=======================>......] - ETA: 5:23 - rpn_cls: 2.7897 - rpn_regr: 0.1478 - detector_cls: 0.9688 - detector_regr: 0.4409
INFO 2019-07-26 11:01:25 +0000 master-replica-0 818/1000 [=======================>......] - ETA: 5:21 - rpn_cls: 2.7862 - rpn_regr: 0.1477 - detector_cls: 0.9684 - detector_regr: 0.4409
INFO 2019-07-26 11:01:26 +0000 master-replica-0 819/1000 [=======================>......] - ETA: 5:19 - rpn_cls: 2.7879 - rpn_regr: 0.1479 - detector_cls: 0.9680 - detector_regr: 0.4408
INFO 2019-07-26 11:01:28 +0000 master-replica-0 820/1000 [=======================>......] - ETA: 5:18 - rpn_cls: 2.7859 - rpn_regr: 0.1478 - detector_cls: 0.9683 - detector_regr: 0.4408
INFO 2019-07-26 11:01:29 +0000 master-replica-0 821/1000 [=======================>......] - ETA: 5:16 - rpn_cls: 2.7825 - rpn_regr: 0.1479 - detector_cls: 0.9686 - detector_regr: 0.4408
INFO 2019-07-26 11:01:30 +0000 master-replica-0 822/1000 [=======================>......] - ETA: 5:14 - rpn_cls: 2.7836 - rpn_regr: 0.1479 - detector_cls: 0.9684 - detector_regr: 0.4407
INFO 2019-07-26 11:01:32 +0000 master-replica-0 823/1000 [=======================>......] - ETA: 5:12 - rpn_cls: 2.7802 - rpn_regr: 0.1480 - detector_cls: 0.9679 - detector_regr: 0.4406
INFO 2019-07-26 11:01:33 +0000 master-replica-0 824/1000 [=======================>......] - ETA: 5:10 - rpn_cls: 2.7779 - rpn_regr: 0.1479 - detector_cls: 0.9678 - detector_regr: 0.4404
INFO 2019-07-26 11:01:34 +0000 master-replica-0 825/1000 [=======================>......] - ETA: 5:08 - rpn_cls: 2.7800 - rpn_regr: 0.1479 - detector_cls: 0.9674 - detector_regr: 0.4406
INFO 2019-07-26 11:01:36 +0000 master-replica-0 826/1000 [=======================>......] - ETA: 5:07 - rpn_cls: 2.7779 - rpn_regr: 0.1479 - detector_cls: 0.9669 - detector_regr: 0.4406
INFO 2019-07-26 11:01:37 +0000 master-replica-0 827/1000 [=======================>......] - ETA: 5:05 - rpn_cls: 2.7752 - rpn_regr: 0.1478 - detector_cls: 0.9668 - detector_regr: 0.4406
INFO 2019-07-26 11:01:38 +0000 master-replica-0 828/1000 [=======================>......] - ETA: 5:03 - rpn_cls: 2.7727 - rpn_regr: 0.1478 - detector_cls: 0.9675 - detector_regr: 0.4406
INFO 2019-07-26 11:01:40 +0000 master-replica-0 829/1000 [=======================>......] - ETA: 5:01 - rpn_cls: 2.7745 - rpn_regr: 0.1477 - detector_cls: 0.9668 - detector_regr: 0.4400
INFO 2019-07-26 11:01:42 +0000 master-replica-0 830/1000 [=======================>......] - ETA: 4:59 - rpn_cls: 2.7713 - rpn_regr: 0.1476 - detector_cls: 0.9667 - detector_regr: 0.4400
INFO 2019-07-26 11:01:43 +0000 master-replica-0 831/1000 [=======================>......] - ETA: 4:57 - rpn_cls: 2.7687 - rpn_regr: 0.1476 - detector_cls: 0.9662 - detector_regr: 0.4400
INFO 2019-07-26 11:01:45 +0000 master-replica-0 832/1000 [=======================>......] - ETA: 4:56 - rpn_cls: 2.7654 - rpn_regr: 0.1477 - detector_cls: 0.9652 - detector_regr: 0.4395
INFO 2019-07-26 11:01:46 +0000 master-replica-0 833/1000 [=======================>......] - ETA: 4:54 - rpn_cls: 2.7675 - rpn_regr: 0.1477 - detector_cls: 0.9648 - detector_regr: 0.4394
INFO 2019-07-26 11:01:47 +0000 master-replica-0 834/1000 [========================>.....] - ETA: 4:52 - rpn_cls: 2.7642 - rpn_regr: 0.1476 - detector_cls: 0.9646 - detector_regr: 0.4395
INFO 2019-07-26 11:01:49 +0000 master-replica-0 835/1000 [========================>.....] - ETA: 4:50 - rpn_cls: 2.7666 - rpn_regr: 0.1477 - detector_cls: 0.9641 - detector_regr: 0.4396
INFO 2019-07-26 11:01:50 +0000 master-replica-0 836/1000 [========================>.....] - ETA: 4:48 - rpn_cls: 2.7647 - rpn_regr: 0.1475 - detector_cls: 0.9645 - detector_regr: 0.4395
INFO 2019-07-26 11:01:52 +0000 master-replica-0 837/1000 [========================>.....] - ETA: 4:46 - rpn_cls: 2.7665 - rpn_regr: 0.1476 - detector_cls: 0.9637 - detector_regr: 0.4390
INFO 2019-07-26 11:01:53 +0000 master-replica-0 838/1000 [========================>.....] - ETA: 4:45 - rpn_cls: 2.7642 - rpn_regr: 0.1476 - detector_cls: 0.9634 - detector_regr: 0.4389
INFO 2019-07-26 11:01:54 +0000 master-replica-0 839/1000 [========================>.....] - ETA: 4:43 - rpn_cls: 2.7625 - rpn_regr: 0.1475 - detector_cls: 0.9637 - detector_regr: 0.4389
INFO 2019-07-26 11:01:56 +0000 master-replica-0 840/1000 [========================>.....] - ETA: 4:41 - rpn_cls: 2.7605 - rpn_regr: 0.1474 - detector_cls: 0.9642 - detector_regr: 0.4388
INFO 2019-07-26 11:01:57 +0000 master-replica-0 841/1000 [========================>.....] - ETA: 4:39 - rpn_cls: 2.7572 - rpn_regr: 0.1474 - detector_cls: 0.9636 - detector_regr: 0.4388
INFO 2019-07-26 11:01:58 +0000 master-replica-0 842/1000 [========================>.....] - ETA: 4:37 - rpn_cls: 2.7614 - rpn_regr: 0.1476 - detector_cls: 0.9631 - detector_regr: 0.4383
INFO 2019-07-26 11:02:00 +0000 master-replica-0 843/1000 [========================>.....] - ETA: 4:35 - rpn_cls: 2.7587 - rpn_regr: 0.1475 - detector_cls: 0.9637 - detector_regr: 0.4380
INFO 2019-07-26 11:02:01 +0000 master-replica-0 844/1000 [========================>.....] - ETA: 4:34 - rpn_cls: 2.7561 - rpn_regr: 0.1475 - detector_cls: 0.9634 - detector_regr: 0.4379
INFO 2019-07-26 11:02:02 +0000 master-replica-0 845/1000 [========================>.....] - ETA: 4:32 - rpn_cls: 2.7624 - rpn_regr: 0.1476 - detector_cls: 0.9634 - detector_regr: 0.4380
INFO 2019-07-26 11:02:04 +0000 master-replica-0 846/1000 [========================>.....] - ETA: 4:30 - rpn_cls: 2.7604 - rpn_regr: 0.1476 - detector_cls: 0.9639 - detector_regr: 0.4381
INFO 2019-07-26 11:02:05 +0000 master-replica-0 847/1000 [========================>.....] - ETA: 4:28 - rpn_cls: 2.7626 - rpn_regr: 0.1477 - detector_cls: 0.9638 - detector_regr: 0.4382
INFO 2019-07-26 11:02:08 +0000 master-replica-0 848/1000 [========================>.....] - ETA: 4:26 - rpn_cls: 2.7612 - rpn_regr: 0.1476 - detector_cls: 0.9639 - detector_regr: 0.4382
INFO 2019-07-26 11:02:09 +0000 master-replica-0 849/1000 [========================>.....] - ETA: 4:25 - rpn_cls: 2.7674 - rpn_regr: 0.1479 - detector_cls: 0.9630 - detector_regr: 0.4377
INFO 2019-07-26 11:02:11 +0000 master-replica-0 850/1000 [========================>.....] - ETA: 4:23 - rpn_cls: 2.7665 - rpn_regr: 0.1479 - detector_cls: 0.9636 - detector_regr: 0.4378
INFO 2019-07-26 11:02:12 +0000 master-replica-0 851/1000 [========================>.....] - ETA: 4:21 - rpn_cls: 2.7649 - rpn_regr: 0.1478 - detector_cls: 0.9641 - detector_regr: 0.4377
INFO 2019-07-26 11:02:15 +0000 master-replica-0 852/1000 [========================>.....] - ETA: 4:19 - rpn_cls: 2.7622 - rpn_regr: 0.1477 - detector_cls: 0.9640 - detector_regr: 0.4376
INFO 2019-07-26 11:02:17 +0000 master-replica-0 853/1000 [========================>.....] - ETA: 4:18 - rpn_cls: 2.7589 - rpn_regr: 0.1476 - detector_cls: 0.9633 - detector_regr: 0.4378
INFO 2019-07-26 11:02:18 +0000 master-replica-0 854/1000 [========================>.....] - ETA: 4:16 - rpn_cls: 2.7598 - rpn_regr: 0.1476 - detector_cls: 0.9625 - detector_regr: 0.4375
INFO 2019-07-26 11:02:20 +0000 master-replica-0 855/1000 [========================>.....] - ETA: 4:14 - rpn_cls: 2.7616 - rpn_regr: 0.1477 - detector_cls: 0.9620 - detector_regr: 0.4374
INFO 2019-07-26 11:02:21 +0000 master-replica-0 856/1000 [========================>.....] - ETA: 4:12 - rpn_cls: 2.7590 - rpn_regr: 0.1475 - detector_cls: 0.9624 - detector_regr: 0.4373
INFO 2019-07-26 11:02:23 +0000 master-replica-0 857/1000 [========================>.....] - ETA: 4:10 - rpn_cls: 2.7608 - rpn_regr: 0.1475 - detector_cls: 0.9619 - detector_regr: 0.4374
INFO 2019-07-26 11:02:24 +0000 master-replica-0 858/1000 [========================>.....] - ETA: 4:09 - rpn_cls: 2.7682 - rpn_regr: 0.1476 - detector_cls: 0.9610 - detector_regr: 0.4369
INFO 2019-07-26 11:02:26 +0000 master-replica-0 859/1000 [========================>.....] - ETA: 4:07 - rpn_cls: 2.7650 - rpn_regr: 0.1478 - detector_cls: 0.9603 - detector_regr: 0.4369
INFO 2019-07-26 11:02:27 +0000 master-replica-0 860/1000 [========================>.....] - ETA: 4:05 - rpn_cls: 2.7706 - rpn_regr: 0.1478 - detector_cls: 0.9605 - detector_regr: 0.4369
INFO 2019-07-26 11:02:29 +0000 master-replica-0 861/1000 [========================>.....] - ETA: 4:03 - rpn_cls: 2.7689 - rpn_regr: 0.1477 - detector_cls: 0.9609 - detector_regr: 0.4369
INFO 2019-07-26 11:02:30 +0000 master-replica-0 862/1000 [========================>.....] - ETA: 4:02 - rpn_cls: 2.7761 - rpn_regr: 0.1482 - detector_cls: 0.9602 - detector_regr: 0.4364
INFO 2019-07-26 11:02:32 +0000 master-replica-0 863/1000 [========================>.....] - ETA: 4:00 - rpn_cls: 2.7729 - rpn_regr: 0.1482 - detector_cls: 0.9603 - detector_regr: 0.4363
INFO 2019-07-26 11:02:33 +0000 master-replica-0 864/1000 [========================>.....] - ETA: 3:58 - rpn_cls: 2.7697 - rpn_regr: 0.1481 - detector_cls: 0.9602 - detector_regr: 0.4363
INFO 2019-07-26 11:02:34 +0000 master-replica-0 865/1000 [========================>.....] - ETA: 3:56 - rpn_cls: 2.7691 - rpn_regr: 0.1480 - detector_cls: 0.9606 - detector_regr: 0.4362
INFO 2019-07-26 11:02:36 +0000 master-replica-0 866/1000 [========================>.....] - ETA: 3:54 - rpn_cls: 2.7716 - rpn_regr: 0.1481 - detector_cls: 0.9604 - detector_regr: 0.4362
INFO 2019-07-26 11:02:37 +0000 master-replica-0 867/1000 [=========================>....] - ETA: 3:52 - rpn_cls: 2.7684 - rpn_regr: 0.1481 - detector_cls: 0.9598 - detector_regr: 0.4361
INFO 2019-07-26 11:02:39 +0000 master-replica-0 868/1000 [=========================>....] - ETA: 3:51 - rpn_cls: 2.7662 - rpn_regr: 0.1479 - detector_cls: 0.9600 - detector_regr: 0.4362
INFO 2019-07-26 11:02:40 +0000 master-replica-0 869/1000 [=========================>....] - ETA: 3:49 - rpn_cls: 2.7647 - rpn_regr: 0.1478 - detector_cls: 0.9601 - detector_regr: 0.4362
INFO 2019-07-26 11:02:42 +0000 master-replica-0 870/1000 [=========================>....] - ETA: 3:47 - rpn_cls: 2.7615 - rpn_regr: 0.1478 - detector_cls: 0.9601 - detector_regr: 0.4364
INFO 2019-07-26 11:02:43 +0000 master-replica-0 871/1000 [=========================>....] - ETA: 3:45 - rpn_cls: 2.7591 - rpn_regr: 0.1477 - detector_cls: 0.9604 - detector_regr: 0.4365
INFO 2019-07-26 11:02:44 +0000 master-replica-0 872/1000 [=========================>....] - ETA: 3:43 - rpn_cls: 2.7606 - rpn_regr: 0.1476 - detector_cls: 0.9602 - detector_regr: 0.4367
INFO 2019-07-26 11:02:46 +0000 master-replica-0 873/1000 [=========================>....] - ETA: 3:42 - rpn_cls: 2.7626 - rpn_regr: 0.1476 - detector_cls: 0.9598 - detector_regr: 0.4368
INFO 2019-07-26 11:02:48 +0000 master-replica-0 874/1000 [=========================>....] - ETA: 3:40 - rpn_cls: 2.7604 - rpn_regr: 0.1476 - detector_cls: 0.9594 - detector_regr: 0.4369
INFO 2019-07-26 11:02:49 +0000 master-replica-0 875/1000 [=========================>....] - ETA: 3:38 - rpn_cls: 2.7613 - rpn_regr: 0.1475 - detector_cls: 0.9588 - detector_regr: 0.4368
INFO 2019-07-26 11:02:50 +0000 master-replica-0 876/1000 [=========================>....] - ETA: 3:36 - rpn_cls: 2.7581 - rpn_regr: 0.1475 - detector_cls: 0.9592 - detector_regr: 0.4369
INFO 2019-07-26 11:02:52 +0000 master-replica-0 877/1000 [=========================>....] - ETA: 3:35 - rpn_cls: 2.7560 - rpn_regr: 0.1474 - detector_cls: 0.9593 - detector_regr: 0.4369
INFO 2019-07-26 11:02:53 +0000 master-replica-0 878/1000 [=========================>....] - ETA: 3:33 - rpn_cls: 2.7604 - rpn_regr: 0.1474 - detector_cls: 0.9597 - detector_regr: 0.4371
INFO 2019-07-26 11:02:55 +0000 master-replica-0 879/1000 [=========================>....] - ETA: 3:31 - rpn_cls: 2.7602 - rpn_regr: 0.1473 - detector_cls: 0.9599 - detector_regr: 0.4371
INFO 2019-07-26 11:02:56 +0000 master-replica-0 880/1000 [=========================>....] - ETA: 3:29 - rpn_cls: 2.7589 - rpn_regr: 0.1472 - detector_cls: 0.9603 - detector_regr: 0.4370
INFO 2019-07-26 11:02:58 +0000 master-replica-0 881/1000 [=========================>....] - ETA: 3:27 - rpn_cls: 2.7574 - rpn_regr: 0.1471 - detector_cls: 0.9599 - detector_regr: 0.4370
INFO 2019-07-26 11:02:59 +0000 master-replica-0 882/1000 [=========================>....] - ETA: 3:26 - rpn_cls: 2.7554 - rpn_regr: 0.1471 - detector_cls: 0.9596 - detector_regr: 0.4370
INFO 2019-07-26 11:03:01 +0000 master-replica-0 883/1000 [=========================>....] - ETA: 3:24 - rpn_cls: 2.7560 - rpn_regr: 0.1472 - detector_cls: 0.9595 - detector_regr: 0.4370
INFO 2019-07-26 11:03:02 +0000 master-replica-0 884/1000 [=========================>....] - ETA: 3:22 - rpn_cls: 2.7538 - rpn_regr: 0.1470 - detector_cls: 0.9598 - detector_regr: 0.4371
INFO 2019-07-26 11:03:03 +0000 master-replica-0 885/1000 [=========================>....] - ETA: 3:20 - rpn_cls: 2.7570 - rpn_regr: 0.1470 - detector_cls: 0.9593 - detector_regr: 0.4372
INFO 2019-07-26 11:03:05 +0000 master-replica-0 886/1000 [=========================>....] - ETA: 3:18 - rpn_cls: 2.7552 - rpn_regr: 0.1469 - detector_cls: 0.9594 - detector_regr: 0.4372
INFO 2019-07-26 11:03:06 +0000 master-replica-0 887/1000 [=========================>....] - ETA: 3:17 - rpn_cls: 2.7536 - rpn_regr: 0.1468 - detector_cls: 0.9590 - detector_regr: 0.4374
INFO 2019-07-26 11:03:08 +0000 master-replica-0 888/1000 [=========================>....] - ETA: 3:15 - rpn_cls: 2.7518 - rpn_regr: 0.1467 - detector_cls: 0.9595 - detector_regr: 0.4375
INFO 2019-07-26 11:03:09 +0000 master-replica-0 889/1000 [=========================>....] - ETA: 3:13 - rpn_cls: 2.7556 - rpn_regr: 0.1466 - detector_cls: 0.9593 - detector_regr: 0.4377
INFO 2019-07-26 11:03:10 +0000 master-replica-0 890/1000 [=========================>....] - ETA: 3:11 - rpn_cls: 2.7534 - rpn_regr: 0.1465 - detector_cls: 0.9596 - detector_regr: 0.4375
INFO 2019-07-26 11:03:12 +0000 master-replica-0 891/1000 [=========================>....] - ETA: 3:10 - rpn_cls: 2.7525 - rpn_regr: 0.1465 - detector_cls: 0.9595 - detector_regr: 0.4375
INFO 2019-07-26 11:03:13 +0000 master-replica-0 892/1000 [=========================>....] - ETA: 3:08 - rpn_cls: 2.7505 - rpn_regr: 0.1464 - detector_cls: 0.9591 - detector_regr: 0.4374
INFO 2019-07-26 11:03:15 +0000 master-replica-0 893/1000 [=========================>....] - ETA: 3:06 - rpn_cls: 2.7475 - rpn_regr: 0.1464 - detector_cls: 0.9593 - detector_regr: 0.4374
INFO 2019-07-26 11:03:16 +0000 master-replica-0 894/1000 [=========================>....] - ETA: 3:04 - rpn_cls: 2.7455 - rpn_regr: 0.1463 - detector_cls: 0.9592 - detector_regr: 0.4374
INFO 2019-07-26 11:03:17 +0000 master-replica-0 895/1000 [=========================>....] - ETA: 3:02 - rpn_cls: 2.7523 - rpn_regr: 0.1462 - detector_cls: 0.9587 - detector_regr: 0.4376
INFO 2019-07-26 11:03:19 +0000 master-replica-0 896/1000 [=========================>....] - ETA: 3:01 - rpn_cls: 2.7496 - rpn_regr: 0.1460 - detector_cls: 0.9588 - detector_regr: 0.4375
INFO 2019-07-26 11:03:20 +0000 master-replica-0 897/1000 [=========================>....] - ETA: 2:59 - rpn_cls: 2.7465 - rpn_regr: 0.1460 - detector_cls: 0.9592 - detector_regr: 0.4376
INFO 2019-07-26 11:03:22 +0000 master-replica-0 898/1000 [=========================>....] - ETA: 2:57 - rpn_cls: 2.7452 - rpn_regr: 0.1459 - detector_cls: 0.9592 - detector_regr: 0.4375
INFO 2019-07-26 11:03:23 +0000 master-replica-0 899/1000 [=========================>....] - ETA: 2:55 - rpn_cls: 2.7511 - rpn_regr: 0.1459 - detector_cls: 0.9587 - detector_regr: 0.4370
INFO 2019-07-26 11:03:26 +0000 master-replica-0 900/1000 [==========================>...] - ETA: 2:53 - rpn_cls: 2.7583 - rpn_regr: 0.1460 - detector_cls: 0.9582 - detector_regr: 0.4371
INFO 2019-07-26 11:03:27 +0000 master-replica-0 901/1000 [==========================>...] - ETA: 2:52 - rpn_cls: 2.7641 - rpn_regr: 0.1473 - detector_cls: 0.9574 - detector_regr: 0.4366
INFO 2019-07-26 11:03:28 +0000 master-replica-0 902/1000 [==========================>...] - ETA: 2:50 - rpn_cls: 2.7628 - rpn_regr: 0.1473 - detector_cls: 0.9576 - detector_regr: 0.4366
INFO 2019-07-26 11:03:30 +0000 master-replica-0 903/1000 [==========================>...] - ETA: 2:48 - rpn_cls: 2.7608 - rpn_regr: 0.1472 - detector_cls: 0.9575 - detector_regr: 0.4365
INFO 2019-07-26 11:03:31 +0000 master-replica-0 904/1000 [==========================>...] - ETA: 2:47 - rpn_cls: 2.7578 - rpn_regr: 0.1471 - detector_cls: 0.9575 - detector_regr: 0.4366
INFO 2019-07-26 11:03:33 +0000 master-replica-0 905/1000 [==========================>...] - ETA: 2:45 - rpn_cls: 2.7560 - rpn_regr: 0.1471 - detector_cls: 0.9579 - detector_regr: 0.4367
INFO 2019-07-26 11:03:34 +0000 master-replica-0 906/1000 [==========================>...] - ETA: 2:43 - rpn_cls: 2.7539 - rpn_regr: 0.1470 - detector_cls: 0.9575 - detector_regr: 0.4366
INFO 2019-07-26 11:03:35 +0000 master-replica-0 907/1000 [==========================>...] - ETA: 2:41 - rpn_cls: 2.7509 - rpn_regr: 0.1470 - detector_cls: 0.9573 - detector_regr: 0.4368
INFO 2019-07-26 11:03:37 +0000 master-replica-0 908/1000 [==========================>...] - ETA: 2:39 - rpn_cls: 2.7498 - rpn_regr: 0.1469 - detector_cls: 0.9587 - detector_regr: 0.4368
INFO 2019-07-26 11:03:38 +0000 master-replica-0 909/1000 [==========================>...] - ETA: 2:38 - rpn_cls: 2.7468 - rpn_regr: 0.1468 - detector_cls: 0.9583 - detector_regr: 0.4367
INFO 2019-07-26 11:03:39 +0000 master-replica-0 910/1000 [==========================>...] - ETA: 2:36 - rpn_cls: 2.7454 - rpn_regr: 0.1468 - detector_cls: 0.9584 - detector_regr: 0.4367
INFO 2019-07-26 11:03:41 +0000 master-replica-0 911/1000 [==========================>...] - ETA: 2:34 - rpn_cls: 2.7478 - rpn_regr: 0.1467 - detector_cls: 0.9581 - detector_regr: 0.4366
INFO 2019-07-26 11:03:42 +0000 master-replica-0 912/1000 [==========================>...] - ETA: 2:32 - rpn_cls: 2.7450 - rpn_regr: 0.1466 - detector_cls: 0.9579 - detector_regr: 0.4367
INFO 2019-07-26 11:03:43 +0000 master-replica-0 913/1000 [==========================>...] - ETA: 2:31 - rpn_cls: 2.7420 - rpn_regr: 0.1465 - detector_cls: 0.9584 - detector_regr: 0.4369
INFO 2019-07-26 11:03:44 +0000 master-replica-0 914/1000 [==========================>...] - ETA: 2:29 - rpn_cls: 2.7398 - rpn_regr: 0.1465 - detector_cls: 0.9586 - detector_regr: 0.4368
INFO 2019-07-26 11:03:46 +0000 master-replica-0 915/1000 [==========================>...] - ETA: 2:27 - rpn_cls: 2.7374 - rpn_regr: 0.1464 - detector_cls: 0.9586 - detector_regr: 0.4367
INFO 2019-07-26 11:03:47 +0000 master-replica-0 916/1000 [==========================>...] - ETA: 2:25 - rpn_cls: 2.7344 - rpn_regr: 0.1465 - detector_cls: 0.9586 - detector_regr: 0.4368
INFO 2019-07-26 11:03:48 +0000 master-replica-0 917/1000 [==========================>...] - ETA: 2:23 - rpn_cls: 2.7314 - rpn_regr: 0.1465 - detector_cls: 0.9581 - detector_regr: 0.4369
INFO 2019-07-26 11:03:50 +0000 master-replica-0 918/1000 [==========================>...] - ETA: 2:22 - rpn_cls: 2.7285 - rpn_regr: 0.1465 - detector_cls: 0.9578 - detector_regr: 0.4370
INFO 2019-07-26 11:03:51 +0000 master-replica-0 919/1000 [==========================>...] - ETA: 2:20 - rpn_cls: 2.7260 - rpn_regr: 0.1465 - detector_cls: 0.9576 - detector_regr: 0.4371
INFO 2019-07-26 11:03:52 +0000 master-replica-0 920/1000 [==========================>...] - ETA: 2:18 - rpn_cls: 2.7241 - rpn_regr: 0.1464 - detector_cls: 0.9573 - detector_regr: 0.4370
INFO 2019-07-26 11:03:53 +0000 master-replica-0 921/1000 [==========================>...] - ETA: 2:16 - rpn_cls: 2.7239 - rpn_regr: 0.1465 - detector_cls: 0.9579 - detector_regr: 0.4371
INFO 2019-07-26 11:03:55 +0000 master-replica-0 922/1000 [==========================>...] - ETA: 2:15 - rpn_cls: 2.7222 - rpn_regr: 0.1464 - detector_cls: 0.9581 - detector_regr: 0.4370
INFO 2019-07-26 11:03:56 +0000 master-replica-0 923/1000 [==========================>...] - ETA: 2:13 - rpn_cls: 2.7271 - rpn_regr: 0.1464 - detector_cls: 0.9588 - detector_regr: 0.4371
INFO 2019-07-26 11:03:58 +0000 master-replica-0 924/1000 [==========================>...] - ETA: 2:11 - rpn_cls: 2.7258 - rpn_regr: 0.1463 - detector_cls: 0.9593 - detector_regr: 0.4370
INFO 2019-07-26 11:03:59 +0000 master-replica-0 925/1000 [==========================>...] - ETA: 2:09 - rpn_cls: 2.7244 - rpn_regr: 0.1464 - detector_cls: 0.9589 - detector_regr: 0.4370
INFO 2019-07-26 11:04:00 +0000 master-replica-0 926/1000 [==========================>...] - ETA: 2:08 - rpn_cls: 2.7226 - rpn_regr: 0.1463 - detector_cls: 0.9592 - detector_regr: 0.4369
INFO 2019-07-26 11:04:02 +0000 master-replica-0 927/1000 [==========================>...] - ETA: 2:06 - rpn_cls: 2.7288 - rpn_regr: 0.1463 - detector_cls: 0.9587 - detector_regr: 0.4369
INFO 2019-07-26 11:04:03 +0000 master-replica-0 928/1000 [==========================>...] - ETA: 2:04 - rpn_cls: 2.7279 - rpn_regr: 0.1462 - detector_cls: 0.9584 - detector_regr: 0.4367
INFO 2019-07-26 11:04:04 +0000 master-replica-0 929/1000 [==========================>...] - ETA: 2:02 - rpn_cls: 2.7323 - rpn_regr: 0.1473 - detector_cls: 0.9580 - detector_regr: 0.4362
INFO 2019-07-26 11:04:06 +0000 master-replica-0 930/1000 [==========================>...] - ETA: 2:00 - rpn_cls: 2.7303 - rpn_regr: 0.1473 - detector_cls: 0.9584 - detector_regr: 0.4364
INFO 2019-07-26 11:04:07 +0000 master-replica-0 931/1000 [==========================>...] - ETA: 1:59 - rpn_cls: 2.7282 - rpn_regr: 0.1472 - detector_cls: 0.9589 - detector_regr: 0.4365
INFO 2019-07-26 11:04:09 +0000 master-replica-0 932/1000 [==========================>...] - ETA: 1:57 - rpn_cls: 2.7354 - rpn_regr: 0.1472 - detector_cls: 0.9585 - detector_regr: 0.4360
INFO 2019-07-26 11:04:10 +0000 master-replica-0 933/1000 [==========================>...] - ETA: 1:55 - rpn_cls: 2.7325 - rpn_regr: 0.1471 - detector_cls: 0.9586 - detector_regr: 0.4362
INFO 2019-07-26 11:04:11 +0000 master-replica-0 934/1000 [===========================>..] - ETA: 1:53 - rpn_cls: 2.7305 - rpn_regr: 0.1470 - detector_cls: 0.9586 - detector_regr: 0.4361
INFO 2019-07-26 11:04:13 +0000 master-replica-0 935/1000 [===========================>..] - ETA: 1:52 - rpn_cls: 2.7276 - rpn_regr: 0.1470 - detector_cls: 0.9587 - detector_regr: 0.4362
INFO 2019-07-26 11:04:14 +0000 master-replica-0 936/1000 [===========================>..] - ETA: 1:50 - rpn_cls: 2.7348 - rpn_regr: 0.1471 - detector_cls: 0.9582 - detector_regr: 0.4358
INFO 2019-07-26 11:04:16 +0000 master-replica-0 937/1000 [===========================>..] - ETA: 1:48 - rpn_cls: 2.7381 - rpn_regr: 0.1471 - detector_cls: 0.9583 - detector_regr: 0.4359
INFO 2019-07-26 11:04:17 +0000 master-replica-0 938/1000 [===========================>..] - ETA: 1:46 - rpn_cls: 2.7352 - rpn_regr: 0.1471 - detector_cls: 0.9584 - detector_regr: 0.4360
INFO 2019-07-26 11:04:18 +0000 master-replica-0 939/1000 [===========================>..] - ETA: 1:45 - rpn_cls: 2.7327 - rpn_regr: 0.1469 - detector_cls: 0.9581 - detector_regr: 0.4358
INFO 2019-07-26 11:04:20 +0000 master-replica-0 940/1000 [===========================>..] - ETA: 1:43 - rpn_cls: 2.7320 - rpn_regr: 0.1469 - detector_cls: 0.9582 - detector_regr: 0.4357
INFO 2019-07-26 11:04:21 +0000 master-replica-0 941/1000 [===========================>..] - ETA: 1:41 - rpn_cls: 2.7303 - rpn_regr: 0.1468 - detector_cls: 0.9581 - detector_regr: 0.4357
INFO 2019-07-26 11:04:23 +0000 master-replica-0 942/1000 [===========================>..] - ETA: 1:40 - rpn_cls: 2.7293 - rpn_regr: 0.1467 - detector_cls: 0.9582 - detector_regr: 0.4356
INFO 2019-07-26 11:04:24 +0000 master-replica-0 943/1000 [===========================>..] - ETA: 1:38 - rpn_cls: 2.7359 - rpn_regr: 0.1466 - detector_cls: 0.9578 - detector_regr: 0.4357
INFO 2019-07-26 11:04:25 +0000 master-replica-0 944/1000 [===========================>..] - ETA: 1:36 - rpn_cls: 2.7330 - rpn_regr: 0.1465 - detector_cls: 0.9574 - detector_regr: 0.4357
INFO 2019-07-26 11:04:26 +0000 master-replica-0 945/1000 [===========================>..] - ETA: 1:34 - rpn_cls: 2.7313 - rpn_regr: 0.1464 - detector_cls: 0.9576 - detector_regr: 0.4357
INFO 2019-07-26 11:04:29 +0000 master-replica-0 946/1000 [===========================>..] - ETA: 1:33 - rpn_cls: 2.7284 - rpn_regr: 0.1463 - detector_cls: 0.9579 - detector_regr: 0.4356
INFO 2019-07-26 11:04:30 +0000 master-replica-0 947/1000 [===========================>..] - ETA: 1:31 - rpn_cls: 2.7358 - rpn_regr: 0.1463 - detector_cls: 0.9575 - detector_regr: 0.4358
INFO 2019-07-26 11:04:32 +0000 master-replica-0 948/1000 [===========================>..] - ETA: 1:29 - rpn_cls: 2.7329 - rpn_regr: 0.1462 - detector_cls: 0.9578 - detector_regr: 0.4357
INFO 2019-07-26 11:04:34 +0000 master-replica-0 949/1000 [===========================>..] - ETA: 1:27 - rpn_cls: 2.7405 - rpn_regr: 0.1463 - detector_cls: 0.9574 - detector_regr: 0.4358
INFO 2019-07-26 11:04:35 +0000 master-replica-0 950/1000 [===========================>..] - ETA: 1:26 - rpn_cls: 2.7377 - rpn_regr: 0.1464 - detector_cls: 0.9567 - detector_regr: 0.4361
INFO 2019-07-26 11:04:37 +0000 master-replica-0 951/1000 [===========================>..] - ETA: 1:24 - rpn_cls: 2.7382 - rpn_regr: 0.1465 - detector_cls: 0.9567 - detector_regr: 0.4361
INFO 2019-07-26 11:04:38 +0000 master-replica-0 952/1000 [===========================>..] - ETA: 1:22 - rpn_cls: 2.7367 - rpn_regr: 0.1464 - detector_cls: 0.9564 - detector_regr: 0.4360
INFO 2019-07-26 11:04:39 +0000 master-replica-0 953/1000 [===========================>..] - ETA: 1:20 - rpn_cls: 2.7342 - rpn_regr: 0.1464 - detector_cls: 0.9565 - detector_regr: 0.4361
INFO 2019-07-26 11:04:41 +0000 master-replica-0 954/1000 [===========================>..] - ETA: 1:19 - rpn_cls: 2.7314 - rpn_regr: 0.1463 - detector_cls: 0.9569 - detector_regr: 0.4360
INFO 2019-07-26 11:04:42 +0000 master-replica-0 955/1000 [===========================>..] - ETA: 1:17 - rpn_cls: 2.7331 - rpn_regr: 0.1462 - detector_cls: 0.9567 - detector_regr: 0.4359
INFO 2019-07-26 11:04:43 +0000 master-replica-0 956/1000 [===========================>..] - ETA: 1:15 - rpn_cls: 2.7302 - rpn_regr: 0.1462 - detector_cls: 0.9567 - detector_regr: 0.4360
INFO 2019-07-26 11:04:45 +0000 master-replica-0 957/1000 [===========================>..] - ETA: 1:13 - rpn_cls: 2.7286 - rpn_regr: 0.1461 - detector_cls: 0.9563 - detector_regr: 0.4359
INFO 2019-07-26 11:04:46 +0000 master-replica-0 958/1000 [===========================>..] - ETA: 1:12 - rpn_cls: 2.7262 - rpn_regr: 0.1460 - detector_cls: 0.9569 - detector_regr: 0.4359
INFO 2019-07-26 11:04:48 +0000 master-replica-0 959/1000 [===========================>..] - ETA: 1:10 - rpn_cls: 2.7302 - rpn_regr: 0.1463 - detector_cls: 0.9562 - detector_regr: 0.4355
INFO 2019-07-26 11:04:49 +0000 master-replica-0 960/1000 [===========================>..] - ETA: 1:08 - rpn_cls: 2.7287 - rpn_regr: 0.1464 - detector_cls: 0.9565 - detector_regr: 0.4354
INFO 2019-07-26 11:04:51 +0000 master-replica-0 961/1000 [===========================>..] - ETA: 1:07 - rpn_cls: 2.7271 - rpn_regr: 0.1463 - detector_cls: 0.9562 - detector_regr: 0.4355
INFO 2019-07-26 11:04:52 +0000 master-replica-0 962/1000 [===========================>..] - ETA: 1:05 - rpn_cls: 2.7243 - rpn_regr: 0.1463 - detector_cls: 0.9559 - detector_regr: 0.4356
INFO 2019-07-26 11:04:53 +0000 master-replica-0 963/1000 [===========================>..] - ETA: 1:03 - rpn_cls: 2.7214 - rpn_regr: 0.1463 - detector_cls: 0.9556 - detector_regr: 0.4357
INFO 2019-07-26 11:04:54 +0000 master-replica-0 964/1000 [===========================>..] - ETA: 1:01 - rpn_cls: 2.7237 - rpn_regr: 0.1464 - detector_cls: 0.9555 - detector_regr: 0.4357
INFO 2019-07-26 11:04:55 +0000 master-replica-0 965/1000 [===========================>..] - ETA: 1:00 - rpn_cls: 2.7209 - rpn_regr: 0.1463 - detector_cls: 0.9553 - detector_regr: 0.4356
INFO 2019-07-26 11:04:57 +0000 master-replica-0 966/1000 [===========================>..] - ETA: 58s - rpn_cls: 2.7194 - rpn_regr: 0.1462 - detector_cls: 0.9549 - detector_regr: 0.4356
INFO 2019-07-26 11:04:58 +0000 master-replica-0 967/1000 [============================>.] - ETA: 56s - rpn_cls: 2.7186 - rpn_regr: 0.1461 - detector_cls: 0.9548 - detector_regr: 0.4357
INFO 2019-07-26 11:04:59 +0000 master-replica-0 968/1000 [============================>.] - ETA: 54s - rpn_cls: 2.7236 - rpn_regr: 0.1462 - detector_cls: 0.9541 - detector_regr: 0.4358
INFO 2019-07-26 11:05:00 +0000 master-replica-0 969/1000 [============================>.] - ETA: 53s - rpn_cls: 2.7208 - rpn_regr: 0.1462 - detector_cls: 0.9536 - detector_regr: 0.4357
INFO 2019-07-26 11:05:02 +0000 master-replica-0 970/1000 [============================>.] - ETA: 51s - rpn_cls: 2.7223 - rpn_regr: 0.1461 - detector_cls: 0.9531 - detector_regr: 0.4352
INFO 2019-07-26 11:05:03 +0000 master-replica-0 971/1000 [============================>.] - ETA: 49s - rpn_cls: 2.7202 - rpn_regr: 0.1460 - detector_cls: 0.9533 - detector_regr: 0.4352
INFO 2019-07-26 11:05:04 +0000 master-replica-0 972/1000 [============================>.] - ETA: 47s - rpn_cls: 2.7232 - rpn_regr: 0.1460 - detector_cls: 0.9535 - detector_regr: 0.4352
INFO 2019-07-26 11:05:06 +0000 master-replica-0 973/1000 [============================>.] - ETA: 46s - rpn_cls: 2.7215 - rpn_regr: 0.1460 - detector_cls: 0.9532 - detector_regr: 0.4353
INFO 2019-07-26 11:05:07 +0000 master-replica-0 974/1000 [============================>.] - ETA: 44s - rpn_cls: 2.7187 - rpn_regr: 0.1459 - detector_cls: 0.9531 - detector_regr: 0.4353
INFO 2019-07-26 11:05:09 +0000 master-replica-0 975/1000 [============================>.] - ETA: 42s - rpn_cls: 2.7172 - rpn_regr: 0.1458 - detector_cls: 0.9533 - detector_regr: 0.4353
INFO 2019-07-26 11:05:10 +0000 master-replica-0 976/1000 [============================>.] - ETA: 41s - rpn_cls: 2.7144 - rpn_regr: 0.1459 - detector_cls: 0.9530 - detector_regr: 0.4353
INFO 2019-07-26 11:05:11 +0000 master-replica-0 977/1000 [============================>.] - ETA: 39s - rpn_cls: 2.7120 - rpn_regr: 0.1459 - detector_cls: 0.9527 - detector_regr: 0.4352
INFO 2019-07-26 11:05:12 +0000 master-replica-0 978/1000 [============================>.] - ETA: 37s - rpn_cls: 2.7136 - rpn_regr: 0.1458 - detector_cls: 0.9519 - detector_regr: 0.4354
INFO 2019-07-26 11:05:14 +0000 master-replica-0 979/1000 [============================>.] - ETA: 35s - rpn_cls: 2.7114 - rpn_regr: 0.1457 - detector_cls: 0.9517 - detector_regr: 0.4354
INFO 2019-07-26 11:05:15 +0000 master-replica-0 980/1000 [============================>.] - ETA: 34s - rpn_cls: 2.7162 - rpn_regr: 0.1457 - detector_cls: 0.9524 - detector_regr: 0.4354
INFO 2019-07-26 11:05:16 +0000 master-replica-0 981/1000 [============================>.] - ETA: 32s - rpn_cls: 2.7134 - rpn_regr: 0.1457 - detector_cls: 0.9525 - detector_regr: 0.4354
INFO 2019-07-26 11:05:18 +0000 master-replica-0 982/1000 [============================>.] - ETA: 30s - rpn_cls: 2.7202 - rpn_regr: 0.1455 - detector_cls: 0.9521 - detector_regr: 0.4357
INFO 2019-07-26 11:05:19 +0000 master-replica-0 983/1000 [============================>.] - ETA: 29s - rpn_cls: 2.7195 - rpn_regr: 0.1458 - detector_cls: 0.9526 - detector_regr: 0.4357
INFO 2019-07-26 11:05:21 +0000 master-replica-0 984/1000 [============================>.] - ETA: 27s - rpn_cls: 2.7181 - rpn_regr: 0.1458 - detector_cls: 0.9528 - detector_regr: 0.4358
INFO 2019-07-26 11:05:22 +0000 master-replica-0 985/1000 [============================>.] - ETA: 25s - rpn_cls: 2.7173 - rpn_regr: 0.1457 - detector_cls: 0.9530 - detector_regr: 0.4358
INFO 2019-07-26 11:05:24 +0000 master-replica-0 986/1000 [============================>.] - ETA: 23s - rpn_cls: 2.7145 - rpn_regr: 0.1457 - detector_cls: 0.9526 - detector_regr: 0.4357
INFO 2019-07-26 11:05:25 +0000 master-replica-0 987/1000 [============================>.] - ETA: 22s - rpn_cls: 2.7127 - rpn_regr: 0.1456 - detector_cls: 0.9525 - detector_regr: 0.4355
INFO 2019-07-26 11:05:27 +0000 master-replica-0 988/1000 [============================>.] - ETA: 20s - rpn_cls: 2.7100 - rpn_regr: 0.1457 - detector_cls: 0.9522 - detector_regr: 0.4356
INFO 2019-07-26 11:05:29 +0000 master-replica-0 989/1000 [============================>.] - ETA: 18s - rpn_cls: 2.7082 - rpn_regr: 0.1456 - detector_cls: 0.9520 - detector_regr: 0.4355
INFO 2019-07-26 11:05:31 +0000 master-replica-0 990/1000 [============================>.] - ETA: 17s - rpn_cls: 2.7144 - rpn_regr: 0.1457 - detector_cls: 0.9513 - detector_regr: 0.4350
INFO 2019-07-26 11:05:32 +0000 master-replica-0 991/1000 [============================>.] - ETA: 15s - rpn_cls: 2.7119 - rpn_regr: 0.1457 - detector_cls: 0.9516 - detector_regr: 0.4349
INFO 2019-07-26 11:05:33 +0000 master-replica-0 992/1000 [============================>.] - ETA: 13s - rpn_cls: 2.7102 - rpn_regr: 0.1456 - detector_cls: 0.9518 - detector_regr: 0.4349
INFO 2019-07-26 11:05:35 +0000 master-replica-0 993/1000 [============================>.] - ETA: 11s - rpn_cls: 2.7086 - rpn_regr: 0.1455 - detector_cls: 0.9523 - detector_regr: 0.4350Average number of overlapping bounding boxes from RPN = 12.653 for 1000 previous iterations
INFO 2019-07-26 11:05:36 +0000 master-replica-0 994/1000 [============================>.] - ETA: 10s - rpn_cls: 2.7069 - rpn_regr: 0.1455 - detector_cls: 0.9528 - detector_regr: 0.4350
INFO 2019-07-26 11:05:38 +0000 master-replica-0 995/1000 [============================>.] - ETA: 8s - rpn_cls: 2.7056 - rpn_regr: 0.1454 - detector_cls: 0.9525 - detector_regr: 0.4350
INFO 2019-07-26 11:05:39 +0000 master-replica-0 996/1000 [============================>.] - ETA: 6s - rpn_cls: 2.7062 - rpn_regr: 0.1456 - detector_cls: 0.9519 - detector_regr: 0.4345
INFO 2019-07-26 11:05:41 +0000 master-replica-0 997/1000 [============================>.] - ETA: 5s - rpn_cls: 2.7077 - rpn_regr: 0.1456 - detector_cls: 0.9513 - detector_regr: 0.4350
INFO 2019-07-26 11:05:42 +0000 master-replica-0 998/1000 [============================>.] - ETA: 3s - rpn_cls: 2.7050 - rpn_regr: 0.1456 - detector_cls: 0.9511 - detector_regr: 0.4351
INFO 2019-07-26 11:05:43 +0000 master-replica-0 999/1000 [============================>.] - ETA: 1s - rpn_cls: 2.7039 - rpn_regr: 0.1455 - detector_cls: 0.9507 - detector_regr: 0.4349
INFO 2019-07-26 11:05:43 +0000 master-replica-0 1000/1000 [==============================] - 1706s 2s/step - rpn_cls: 2.7021 - rpn_regr: 0.1455 - detector_cls: 0.9504 - detector_regr: 0.4349
INFO 2019-07-26 11:06:06 +0000 master-replica-0 Mean number of bounding boxes from RPN overlapping ground truth boxes: 12.632571996
INFO 2019-07-26 11:06:06 +0000 master-replica-0 Classifier accuracy for bounding boxes from RPN: 0.6938125
INFO 2019-07-26 11:06:06 +0000 master-replica-0 Loss RPN classifier: 2.7020942679
INFO 2019-07-26 11:06:06 +0000 master-replica-0 Loss RPN regression: 0.145456151465
INFO 2019-07-26 11:06:06 +0000 master-replica-0 Loss Detector classifier: 0.950382808502
INFO 2019-07-26 11:06:06 +0000 master-replica-0 Loss Detector regression: 0.434891333551
INFO 2019-07-26 11:06:06 +0000 master-replica-0 Elapsed time: 1706.23027396
INFO 2019-07-26 11:06:06 +0000 master-replica-0 Total loss decreased from inf to 4.23282456142, saving weights
INFO 2019-07-26 11:06:06 +0000 master-replica-0 Training complete, exiting.
INFO 2019-07-26 11:06:07 +0000 master-replica-0 Module completed; cleaning up.
INFO 2019-07-26 11:06:07 +0000 master-replica-0 Clean up finished.
INFO 2019-07-26 11:06:07 +0000 master-replica-0 Task completed successfully.
endTime: '2019-07-26T11:09:37'
jobId: test_job_GcloudColab_3
startTime: '2019-07-26T10:33:44'
state: SUCCEEDED
###Markdown
Online predictions in AI Platform Create model and version resources in AI PlatformTo serve online predictions using the model you trained and exported previously,create a **Model Resource** in AI Platform and a **Version Resource**within it.The version resource is what actually uses your trained model toserve predictions. Multiple Model and Versions could be created together in AI Platform to test and experiment with results.Explore more about [modelsandversions](https://cloud.google.com/ml-engine/docs/tensorflow/projects-models-versions-jobs). * First, Define a name and create the model resource;Also Enable Online prediction logging, to stream logs that contain the **stderr and stdout streams** from your prediction nodes, and can be useful for debugging during version creation and inferencing.
###Code
MODEL_NAME = "food_predictor"
! gcloud beta ai-platform models create $MODEL_NAME \
--regions $REGION --enable-console-logging
###Output
_____no_output_____
###Markdown
Now, Create the model version. Since from Previous the training job is exported to a timestamped directory in your Cloud Storage bucket. AI Platform uses this directory to create a model version.* The code packaged during training is stored at **gs://$BUCKET_NAME/JOB_NAME/** from previous steps.Since the model saved as an ouput to training is keras' .hdf5 format. i.e., Not tensorflow's recommended Saved model, the versioning of this model is done using [Custom Prediction routine](https://cloud.google.com/ml-engine/docs/tensorflow/custom-prediction-routines) explained in GCP documentation for Version creation. * **First, Clone the custom prediction implementation**
###Code
!git clone https://github.com/leoninekev/ml-engine-custom-prediction-routine.git
###Output
_____no_output_____
###Markdown
Now proceed as follows:* Navigate to directory containing **setup.py*** package the code by running following cell.* Copy the packaged .tar.gz file to specific folder in cloud storage bucket
###Code
python setup.py sdist --formats=gztar
gsutil cp dist/test_code_new_model_beta5-0.1.tar.gz gs://nifty-episode-231612-mlengine/cloud_test_package_2/cloud_test_package_v
###Output
_____no_output_____
###Markdown
* Define following Model Versioning parameters
###Code
MODEL_NAME="FoodPredictor_060619"
VERSION_NAME='v5_a'
REGION=asia-east1
###Output
_____no_output_____
###Markdown
* Now submit a Version job, running following cell
###Code
gcloud beta ai-platform versions create $VERSION_NAME --model $MODEL_NAME
--python-version 3.5 --runtime-version 1.5 --machine-type mls1-c4-m2
--origin gs://nifty-episode-231612-mlengine/cloud_test_package_2/cloud_test_package_v5
--package-uris gs://nifty-episode-231612-mlengine/cloud_test_package_2/cloud_test_package_v5/test_code_new_model_beta5-0.1.tar.gz
--prediction-class predictor.MyPredictor
###Output
_____no_output_____ |
examples/tutorials/7. Tuning a Pipeline.ipynb | ###Markdown
Tuning a PipelineThis short guide shows how tune a Pipeline using a [BTB](https://github.com/HDI-Project/BTB) Tuner.Note that some steps are not explained for simplicity. Full detailsabout them can be found in the previous parts of the tutorial.Here we will:1. Load a dataset and a pipeline2. Explore the pipeline tunable hyperparameters3. Write a scoring function4. Build a BTB Tunable and BTB Tuner.5. Write a tuning loop Load dataset and the pipelineThe first step will be to load the dataset that we were using in previous tutorials.
###Code
from mlprimitives.datasets import load_dataset
dataset = load_dataset('census')
###Output
_____no_output_____
###Markdown
And load a suitable pipeline.Note how in this case we are using the variable name `template` instead of `pipeline`,because this will only be used as a template for the pipelines that we will createand evaluate during the later tuning loop.
###Code
from mlblocks import MLPipeline
template = MLPipeline('single_table.classification.categorical_encoder.xgboost')
###Output
_____no_output_____
###Markdown
Explore the pipeline tunable hyperparameters Once we have loaded the pipeline, we can now extract the hyperparameters that we will tuneby calling the `get_tunable_hyperparameters` method.In this case we will call it using `flat=True` to obtain the hyperparameters in a formatthat is compatible with BTB.
###Code
tunable_hyperparameters = template.get_tunable_hyperparameters(flat=True)
tunable_hyperparameters
###Output
_____no_output_____
###Markdown
Write a scoring functionTo tune the pipeline we will need to evaluate its performance multiple times with different hyperparameters.For this reason, we will start by writing a scoring function that will expect only oneinput, the hyperparameters dictionary, and evaluate the performance of the pipeline using them.In this case, the evaluation will be done using 5-fold cross validation based on the `get_splits`method from the dataset.
###Code
import numpy as np
def cross_validate(hyperparameters=None):
scores = []
for X_train, X_test, y_train, y_test in dataset.get_splits(5):
pipeline = MLPipeline(template.to_dict()) # Make a copy of the template
if hyperparameters:
pipeline.set_hyperparameters(hyperparameters)
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_test)
scores.append(dataset.score(y_test, y_pred))
return np.mean(scores)
###Output
_____no_output_____
###Markdown
By calling this function without any arguments we will obtain the score obtainedwith the default hyperparameters.
###Code
default_score = cross_validate()
default_score
###Output
_____no_output_____
###Markdown
Optionally, we can certify that by passing a hyperparameters dictionary the new hyperparameterswill be used, resulting on a different score.
###Code
hyperparameters = {
('xgboost.XGBClassifier#1', 'max_depth'): 4
}
cross_validate(hyperparameters)
###Output
_____no_output_____
###Markdown
Create a BTB TunableThe next step is to create the BTB Tunable instance that will be tuned by the BTB Tuner.For this we will use its `from_dict` method, passing our hyperparameters dict.
###Code
from btb.tuning import Tunable
tunable = Tunable.from_dict(tunable_hyperparameters)
###Output
_____no_output_____
###Markdown
Create the BTB TunerAfter creating the Tunable, we need to create a Tuner to tune it.In this case we will use the GPTuner, a Meta-model based tuner that uses a Gaussian Process Regressorfor the optimization.
###Code
from btb.tuning import GPTuner
tuner = GPTuner(tunable)
###Output
_____no_output_____
###Markdown
Optionally, since we already know the score obtained by the default arguments andthese have a high probability of being already decent, we will inform the tunerabout their performance.In order to obtain the default hyperparameters used before we can either callthe template `get_hyperparameters(flat=True)` method, the `tunable.get_defaults()`.
###Code
defaults = tunable.get_defaults()
defaults
tuner.record(defaults, default_score)
###Output
_____no_output_____
###Markdown
Start the Tuning loopOnce we have the tuner ready we can the tuning loop.During this loop we will:1. Ask the tuner for a new hyperparameter proposal2. Run the `cross_validate` function to evaluate these hyperparameters3. Record the obtained score back to the tuner.4. If the obtained score is better than the previous one, store the proposal.
###Code
best_score = default_score
best_proposal = defaults
for iteration in range(10):
print("scoring pipeline {}".format(iteration + 1))
proposal = tuner.propose()
score = cross_validate(proposal)
tuner.record(proposal, score)
if score > best_score:
print("New best found: {}".format(score))
best_score = score
best_proposal = proposal
###Output
scoring pipeline 1
scoring pipeline 2
New best found: 0.8722706212975673
scoring pipeline 3
scoring pipeline 4
scoring pipeline 5
scoring pipeline 6
scoring pipeline 7
scoring pipeline 8
scoring pipeline 9
scoring pipeline 10
###Markdown
After the loop has finished, the best proposal will be stored in the `best_proposal` variable,which can be used to generate a new pipeline instance.
###Code
best_proposal
best_pipeline = MLPipeline(template.to_dict())
best_pipeline.set_hyperparameters(best_proposal)
best_pipeline.fit(dataset.data, dataset.target)
###Output
_____no_output_____
###Markdown
Tuning a PipelineThis short guide shows how tune a Pipeline using a [BTB](https://github.com/MLBazaar/BTB) Tuner.Note that some steps are not explained for simplicity. Full detailsabout them can be found in the previous parts of the tutorial.Here we will:1. Load a dataset and a pipeline2. Explore the pipeline tunable hyperparameters3. Write a scoring function4. Build a BTB Tunable and BTB Tuner.5. Write a tuning loop Load dataset and the pipelineThe first step will be to load the dataset that we were using in previous tutorials.
###Code
from mlprimitives.datasets import load_dataset
dataset = load_dataset('census')
###Output
_____no_output_____
###Markdown
And load a suitable pipeline.Note how in this case we are using the variable name `template` instead of `pipeline`,because this will only be used as a template for the pipelines that we will createand evaluate during the later tuning loop.
###Code
from mlblocks import MLPipeline
template = MLPipeline('single_table.classification.xgb')
###Output
_____no_output_____
###Markdown
Explore the pipeline tunable hyperparameters Once we have loaded the pipeline, we can now extract the hyperparameters that we will tuneby calling the `get_tunable_hyperparameters` method.In this case we will call it using `flat=True` to obtain the hyperparameters in a formatthat is compatible with BTB.
###Code
tunable_hyperparameters = template.get_tunable_hyperparameters(flat=True)
tunable_hyperparameters
###Output
_____no_output_____
###Markdown
Write a scoring functionTo tune the pipeline we will need to evaluate its performance multiple times with different hyperparameters.For this reason, we will start by writing a scoring function that will expect only oneinput, the hyperparameters dictionary, and evaluate the performance of the pipeline using them.In this case, the evaluation will be done using 5-fold cross validation based on the `get_splits`method from the dataset.
###Code
import numpy as np
def cross_validate(hyperparameters=None):
scores = []
for X_train, X_test, y_train, y_test in dataset.get_splits(5):
pipeline = MLPipeline(template.to_dict()) # Make a copy of the template
if hyperparameters:
pipeline.set_hyperparameters(hyperparameters)
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_test)
scores.append(dataset.score(y_test, y_pred))
return np.mean(scores)
###Output
_____no_output_____
###Markdown
By calling this function without any arguments we will obtain the score obtainedwith the default hyperparameters.
###Code
default_score = cross_validate()
default_score
###Output
_____no_output_____
###Markdown
Optionally, we can certify that by passing a hyperparameters dictionary the new hyperparameterswill be used, resulting on a different score.
###Code
hyperparameters = {
('xgboost.XGBClassifier#1', 'max_depth'): 4
}
cross_validate(hyperparameters)
###Output
_____no_output_____
###Markdown
Create a BTB TunableThe next step is to create the BTB Tunable instance that will be tuned by the BTB Tuner.For this we will use its `from_dict` method, passing our hyperparameters dict.
###Code
from btb.tuning import Tunable
tunable = Tunable.from_dict(tunable_hyperparameters)
###Output
_____no_output_____
###Markdown
Create the BTB TunerAfter creating the Tunable, we need to create a Tuner to tune it.In this case we will use the GPTuner, a Meta-model based tuner that uses a Gaussian Process Regressorfor the optimization.
###Code
from btb.tuning import GPTuner
tuner = GPTuner(tunable)
###Output
_____no_output_____
###Markdown
Optionally, since we already know the score obtained by the default arguments andthese have a high probability of being already decent, we will inform the tunerabout their performance.In order to obtain the default hyperparameters used before we can either callthe template `get_hyperparameters(flat=True)` method, the `tunable.get_defaults()`.
###Code
defaults = tunable.get_defaults()
defaults
tuner.record(defaults, default_score)
###Output
_____no_output_____
###Markdown
Start the Tuning loopOnce we have the tuner ready we can the tuning loop.During this loop we will:1. Ask the tuner for a new hyperparameter proposal2. Run the `cross_validate` function to evaluate these hyperparameters3. Record the obtained score back to the tuner.4. If the obtained score is better than the previous one, store the proposal.
###Code
best_score = default_score
best_proposal = defaults
for iteration in range(10):
print("scoring pipeline {}".format(iteration + 1))
proposal = tuner.propose()
score = cross_validate(proposal)
tuner.record(proposal, score)
if score > best_score:
print("New best found: {}".format(score))
best_score = score
best_proposal = proposal
###Output
scoring pipeline 1
scoring pipeline 2
scoring pipeline 3
scoring pipeline 4
New best found: 0.8642241881762839
scoring pipeline 5
scoring pipeline 6
scoring pipeline 7
New best found: 0.8644390957265209
scoring pipeline 8
New best found: 0.8679095503945804
scoring pipeline 9
scoring pipeline 10
###Markdown
After the loop has finished, the best proposal will be stored in the `best_proposal` variable,which can be used to generate a new pipeline instance.
###Code
best_proposal
best_pipeline = MLPipeline(template.to_dict())
best_pipeline.set_hyperparameters(best_proposal)
best_pipeline.fit(dataset.data, dataset.target)
###Output
_____no_output_____ |
Malware Detection/Malware_detection.ipynb | ###Markdown
Tree Classifier
###Code
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.feature_selection import SelectFromModel
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
# Removing non essential columns for training data
data_train = Data.drop(['Name','md5','legitimate'], axis=1).values
labels = Data['legitimate'].values
extratrees = ExtraTreesClassifier().fit(data_train,labels)
select = SelectFromModel(extratrees, prefit=True)
data_train_new = select.transform(data_train)
print(data_train.shape, data_train_new.shape)
# number of selected features for training
features = data_train_new.shape[1]
importances = extratrees.feature_importances_
indices = np.argsort(importances)[::-1]
# sorting the features according to its importance (influence on final result)
for i in range(features):
print("%d"%(i+1), Data.columns[2+indices[i]],importances[indices[i]])
from sklearn.ensemble import RandomForestClassifier
legit_train, legit_test, mal_train, mal_test = train_test_split(data_train_new, labels, test_size = 0.25)
# initialising a RandomForestClassifier model with 50 trees in the forest
randomf =RandomForestClassifier(n_estimators=50)
# training the model
randomf.fit(legit_train, mal_train)
# checking performance of the model
print("Score of algo :", randomf.score(legit_test, mal_test)*100)
from sklearn.metrics import confusion_matrix
result = randomf.predict(legit_test)
''''The first number of the first matrix gives the number of correct predictions of that
particular result which should be obtained and the second number gives the number of incorrect predictions made.
Similarly vice versa for the second matrix present'''
conf_mat = confusion_matrix(mal_test,result)
print(conf_mat)
###Output
[[24037 113]
[ 66 10296]]
###Markdown
**Logistic Regression**
###Code
import pickle
import warnings
warnings.filterwarnings('ignore')
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
# Scaling the data to pass into the logistic regressor
sc = StandardScaler()
legit_train_scale = sc.fit_transform(legit_train)
legit_test_scale = sc.transform(legit_test)
# Training the Model
logclf = LogisticRegression(random_state = 0)
logclf.fit(legit_train_scale, mal_train)
# TRAIN MODEL MULTIPLE TIMES FOR BEST SCORE
best = 0
for _ in range(20):
legit_train, legit_test, mal_train, mal_test = train_test_split(data_train_new, labels, test_size = 0.25)
legit_train_scale = sc.fit_transform(legit_train)
legit_test_scale = sc.transform(legit_test)
logclf = LogisticRegression(random_state = 0)
logclf.fit(legit_train_scale, mal_train)
acc = logclf.score(legit_test_scale, mal_test)
#print("Accuracy: " + str(acc))
if acc > best:
best = acc
with open("malware_log_clf.pickle", "wb") as f:
pickle.dump(logclf, f)
#Checking final accuracy
pickle_in = open("malware_log_clf.pickle", "rb")
logclf = pickle.load(pickle_in)
logclf.score(legit_test_scale, mal_test)
###Output
_____no_output_____ |
sqlite-0.ipynb | ###Markdown
Over deze opdrachten* dit is Jupyter Notebook `sqlite-0.ipynb` - voor het aanmaken van de database.* voor een inleiding over het gebruik van Jupyter Notebooks: [Inleiding Jupyter Notebook](Inleiding-Jupyter.ipynb)* de hele reeks SQLite opdrachten: * [SQLite 0 - init database](sqlite-0.ipynb) (om met een schone lei te beginnnen) * [SQLite 1 - selectie en projectie](sqlite-1.ipynb) * [SQLite 2 - joins](sqlite-2.ipynb) * [SQLite 3 - CRUD](sqlite-3.ipynb) * [SQLite 4 - Schema](sqlite-4.ipynb) Voorbeeld Bij deze opdrachten gebruiken we een voorbeeld-database met drie tabellen: `leden`, `inschrijvingen`, en `events`.Deze database komt uit een webtoepassing; deze vind je op glitch.com. REFDaar kun je de toepassing bekijken, uitproberen, en er een eigen versie ("remix") van maken. Aanmaken van de database In de volgende opdrachten voer je allerlei queries uit op een database.Je moeten dan eerst wel een database met inhoud hebben.Met de onderstaande opdrachten maak je deze database.Deze opdrachten hoef je maar één keer uit te voeren: de database blijft bestaan, met je veranderingen.Je kunt deze opdrachten later ook uitvoeren om opnieuw te beginnen in een goed-gedefinieerde toestand. We maken de tabel(len) aan.We verwijderen eerst een eventueel bestaande versie van de tabel(len):we hebben dan een goed gedefinieerde toestand.> Opmerking: er zijn kleine verschillen in de notatie van de constraints bij het aanmaken van een tabel; MySQL gebruikt bijvoorbeeld een andere notatie dan Oracle. Eerste tabel: ledenDe opdracht bestaat uit de volgende onderdelen:1. het opstarten van `sqlite` (de eerste twee regels). Hierna kunnen we SQL opdrachten geven;2. het verwijderen van de `leden`-tabel als deze bestaat (`DROP TABLE`);3. het aanmaken van de `leden`-tabel (`CREATE TABLE`);4. het vullen van de tabel uit een csv-bestand (dit zijn geen SQL-opdrachten);5. een SQL-`SELECT`-opdracht om te controleren of de tabel inderdaad ingelezen is.
###Code
%%bash
sqlite3 example.db
DROP TABLE IF EXISTS leden;
CREATE TABLE leden(
lidnr INTEGER PRIMARY KEY,
voornaam VARCHAR(255) NOT NULL,
achternaam VARCHAR(255) NOT NULL,
email VARCHAR(255) NOT NULL UNIQUE
);
.mode csv
.import leden.csv leden
SELECT lidnr, voornaam, achternaam, email
FROM leden;
###Output
_____no_output_____
###Markdown
We hebben een voorbeeld-inhoud van de tabel(len) in csv-bestanden.Zo'n csv-bestand kun je gemakkelijk aanpassen in een teksteditor.Voor het importeren van een csv-bestand gebruiken we een speciale SQLite-opdracht. Tweede tabel: eventsDe tabel `events` bevat de events waarvoor de leden kunnen inschrijven.Elk event heeft een datum en een beschrijving.Hiervoor volgen we hetzelfde patroon:
###Code
%%bash
sqlite3 example.db
DROP TABLE IF EXISTS events;
CREATE TABLE events(
eventnr INTEGER,
datum VARCHAR(10) NOT NULL,
beschrijving VARCHAR(255),
PRIMARY KEY (eventnr),
CONSTRAINT name UNIQUE (datum, beschrijving)
);
.mode csv
.import events.csv events
SELECT eventnr, datum, beschrijving
FROM events;
###Output
_____no_output_____
###Markdown
Derde tabel: inschrijvingenDeze tabel beschrijft een N-M relatie tussen leden en inschrijvingen.Naast de verwijzingen (via *foreign keys*) naar de andere tabellen vindt je hier de gegevens over de inschrijving (maaltijd-keuze).
###Code
%%bash
sqlite3 example.db
DROP TABLE IF EXISTS inschrijvingen;
CREATE TABLE inschrijvingen(
eventnr INTEGER,
lidnr INTEGER,
maaltijd VARCHAR(255),
PRIMARY KEY (lidnr, eventnr),
FOREIGN KEY (lidnr) REFERENCES leden (lidnr),
FOREIGN KEY (eventnr) REFERENCES events (eventnr)
);
.mode csv
.import inschrijvingen.csv inschrijvingen
SELECT eventnr, lidnr, maaltijd
FROM inschrijvingen;
###Output
_____no_output_____
###Markdown
Demonstratie: alle inschrijvingenVoor een overzicht van alle inschrijvingen met de gegevens van de leden en van de events gebruiken we een join. Dit is een voorproefje - in een volgend notebook werken we dit verder uit.
###Code
%%bash
sqlite3 example.db
SELECT evt.datum
, evt.beschrijving
, lid.voornaam
, lid.achternaam
, lid.email
, ins.maaltijd
FROM leden lid, events evt, inschrijvingen ins
WHERE lid.lidnr=ins.lidnr AND evt.eventnr=ins.eventnr;
###Output
_____no_output_____ |
result-analysis/Driver Failure.ipynb | ###Markdown
Metrics analysis - Driver FailureThis notebook generates the chart for driver failures.
###Code
# selectivity per run. This is required to compute the input throughput
NOTEBOOK_PHASE = 3
if (NOTEBOOK_PHASE == 1):
inputfactor = 1
elif(NOTEBOOK_PHASE == 2):
inputfactor = 2
elif(NOTEBOOK_PHASE == 3):
inputfactor = 3.166667
print("Will use input factor: " + str(inputfactor))
# settings for saving plots
saveplots = True
dpiResolution = 200
import pyspark.sql.functions as F
import numpy as np
# Import to indent the plots in the notebook
%matplotlib notebook
%matplotlib inline
import matplotlib
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import matplotlib.collections as collections
import seaborn as sns
from IPython.core.display import display, HTML
from PIL import Image
from matplotlib.lines import Line2D
from matplotlib.ticker import AutoMinorLocator
# Python imports
import pandas as pd
from collections import defaultdict
from datetime import timedelta
from datetime import datetime
import numpy as np
import pytz
import math
# SQL imports
from pyspark.sql.functions import *
from pyspark.sql import Window
from pyspark.sql import functions
from pyspark.sql.types import IntegerType, LongType, DoubleType, TimestampType, StringType
# settings to get plots in the right style
plt.style.use('ggplot')
plt.style.use('seaborn-deep')
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['font.serif'] = 'Ubuntu'
plt.rcParams['font.monospace'] = 'Ubuntu Mono'
plt.rcParams['font.size'] = 20
plt.rcParams['axes.labelsize'] = 20
plt.rcParams['axes.labelweight'] = 'normal'
plt.rcParams['axes.labelcolor'] = 'black'
plt.rcParams['axes.facecolor'] = 'white'
plt.rcParams['axes.edgecolor'] = 'lightgrey'
plt.rcParams['axes.titlesize'] = 20
plt.rcParams['axes.titleweight'] = 'normal'
plt.rcParams['figure.edgecolor'] = 'lightgrey'
plt.rcParams['xtick.labelsize'] = 20
plt.rcParams['ytick.labelsize'] = 20
plt.rcParams['xtick.color'] = 'black'
plt.rcParams['ytick.color'] = 'black'
plt.rcParams['legend.fontsize'] = 20
plt.rcParams['legend.edgecolor'] = 'lightgrey'
plt.rcParams['figure.titlesize'] = 20
plt.rcParams['figure.titleweight'] ='bold'
plt.rcParams['grid.color'] = 'grey'
plt.rcParams['grid.linestyle'] = ':'
plt.rcParams['figure.facecolor'] = 'white'
plt.rcParams['axes.spines.top'] = False
plt.rcParams['axes.spines.right'] = False
plt.rcParams['axes.spines.left'] = False
plt.rcParams['axes.spines.bottom'] = True
plt.rcParams['xtick.direction'] = 'out'
plt.rcParams['ytick.direction'] ='out'
plt.rcParams["date.autoformatter.minute"] = "%H:%M"
plt.rcParams["font.family"] = "Times New Roman"
# check if Spark is running
spark
# List of which frameworks should be included in this comparison
# Only Spark frameworks because they only have a driver.
frameworks_that_could_be_in_the_data = ["SPARK", "STRUCTUREDSTREAMING"]
frameworks = ["SPARK_AMO", "SPARK_ALO", "STRUCTUREDSTREAMING_AMO", "STRUCTUREDSTREAMING_ALO"]
frameworksPrintable = {
"SPARK": "Spark Streaming",
"STRUCTUREDSTREAMING":"Structured Streaming"
}
stages=["ingest", "parse", "join", "aggregate", "window"]
dataPath = dict()
for i in frameworks_that_could_be_in_the_data:
dataPath[i + "_AMO"] = "./scalability-data/driver-failure/" + i + "/stage" + str(NOTEBOOK_PHASE) + "/5x-4cpu-20gb/AMO/*"
dataPath[i + "_ALO"] = "./scalability-data/driver-failure/" + i + "/stage" + str(NOTEBOOK_PHASE) + "/5x-4cpu-20gb/ALO/*"
print("The paths that will be read: ")
dataPath
###Output
The paths that will be read:
###Markdown
General Methods
###Code
def datetimeFromEpoch(epoch):
return datetime.utcfromtimestamp(epoch//1000).replace(microsecond=epoch%1000*1000)
datetimeFromEpochUDF = functions.udf(datetimeFromEpoch, TimestampType())
# method to save the image
def save_img_colored_and_grayscale(path_colored_img):
if saveplots:
plt.savefig(path_colored_img + '.png', dpi=dpiResolution, bbox_inches="tight", pad_inches = 0)
im = Image.open(path_colored_img + '.png').convert('L')
im.save(path_colored_img + '_grayscale.png', dpi=(300, 300))
###Output
_____no_output_____
###Markdown
Latency Read in latency data and transform in the right format for plotting.
###Code
# Check if each framework had all containers running during the benchmark. To avoid including runs which had issues with some components.
containerCheck = defaultdict(dict)
for framework in frameworks:
try:
if framework == "KAFKASTREAMS":
requiredAmtContainers = 5
elif framework == "FLINK":
requiredAmtContainers = 6
else:
requiredAmtContainers = 7
containerCheckPhase = spark.read.option("header", "true").option("inferSchema", "true") \
.csv(dataPath[framework] + "/resources-per-container-timeseries.csv/*")
amtOfContainers = containerCheckPhase.select("containerName").distinct().count()
if amtOfContainers != requiredAmtContainers:
containerCheckPhase.select("containerName").distinct().show()
print("WARNING FOR " + framework + " volume: " + str(NOTEBOOK_PHASE) + " amount of containers: " + str(amtOfContainers))
else:
print("all checks passed for " + framework)
except:
print('framework ' + framework + " not in data")
###Output
_____no_output_____
###Markdown
Phases that are present in the data
###Code
latencyTimeseriesDataWithoutStartup = dict()
for framework in frameworks:
latencyTimeseriesDataPhase = spark.read.option("header", "true").option("inferSchema", "true") \
.csv(dataPath[framework] + "/latency-timeseries-data-without-startup.csv/*") \
.withColumn("time", datetimeFromEpochUDF(col("outputBucketTime")))
minTimeSec = int(latencyTimeseriesDataPhase.select("startTime").collect()[0][0]) / 60000
latencyTimeseriesDataWithoutStartup[framework] = latencyTimeseriesDataPhase.withColumn("timeSec", (col("outputBucketTime")/60000.0)-minTimeSec)
###Output
_____no_output_____
###Markdown
Throughput Stage 0 throughput
###Code
throughputTimeseriesDataWithStartup = dict()
for framework in frameworks:
throughputTimeseriesDataWithStartupPhase = spark.read.option("header", "true").option("inferSchema", "true") \
.csv(dataPath[framework] + "/output-throughput-timeseries-second-buckets.csv/*") \
.withColumn("time", datetimeFromEpochUDF(col("outputBucketTime")))
minTimeSec = int(throughputTimeseriesDataWithStartupPhase.select("startTime").collect()[0][0]) / 60000
throughputTimeseriesDataWithStartup[framework] = throughputTimeseriesDataWithStartupPhase.withColumn("timeSec", (col("outputBucketTime")/60000.0)-minTimeSec)
inputThroughputTimeseriesDataWithStartup = dict()
for framework in frameworks:
inputThroughputTimeseriesDataWithStartupPhase = spark.read.option("header", "true").option("inferSchema", "true") \
.csv(dataPath[framework] + "/input-throughput-timeseries-second-buckets.csv/*") \
.withColumn("time", datetimeFromEpochUDF(col("inputBucketTime")))
minTimeSec = int(inputThroughputTimeseriesDataWithStartupPhase.select("startTime").collect()[0][0]) / 60000
inputThroughputTimeseriesDataWithStartup[framework] = inputThroughputTimeseriesDataWithStartupPhase.withColumn("timeSec", (col("inputBucketTime")/60000.0)-minTimeSec)
###Output
_____no_output_____
###Markdown
CPU
###Code
cpuTimeseries = dict()
for framework in frameworks:
try:
cpuTimeseries[framework] = spark.read.option("header", "true").option("inferSchema", "true") \
.csv(dataPath[framework] + "/cpu-per-container-timeseries.csv/*") \
.withColumn("timeParsed", datetimeFromEpochUDF(col("time")))
except:
print("didnt work for " + framework)
containersPandas = dict()
for framework in frameworks:
containersPandas[framework] = cpuTimeseries[framework].select("containerName").distinct().toPandas()
# You can use this to assign different colors to the different workers in the plot. We didn't use this here.
# Map label to RGB
color_map = dict()
for framework in frameworks:
#Assign different color to each container
rgb_values = sns.diverging_palette(255, 133, l=60, n=len(containersPandas[framework]), center="dark")
color_map[framework] = dict(zip(containersPandas[framework]['containerName'], rgb_values))
sns.palplot(sns.diverging_palette(255, 133, l=60, n=len(containersPandas), center="dark"))
# Map label to RGB
color_map = defaultdict(dict)
for framework in frameworks:
#Assign different color to each container:
rgb_values = sns.husl_palette(len(containersPandas[framework]), h=0.4, l=0.65, s=1)
color_map[framework] = dict(zip(containersPandas[framework]['containerName'], rgb_values))
cpuTimeseriesDataWithStartup = dict()
for framework in frameworks:
cpuOfPhasePerContainer = spark.read.option("header", "true").option("inferSchema", "true") \
.csv(dataPath[framework] + "/cpu-per-container-timeseries.csv/*") \
.withColumn("timeParsed", datetimeFromEpochUDF(col("time")))
minTimeSec = int(cpuOfPhasePerContainer.select("startTime").collect()[0][0]) / 60000
cpuTimeseriesDataWithStartup[framework] = cpuOfPhasePerContainer \
.withColumn("timeSec", (col("time")/60000.0)-minTimeSec)
cpuTimeseriesDataWithoutStartup = dict()
for framework in frameworks:
cpuTimeseriesDataPhase = cpuTimeseriesDataWithStartup[framework]
if len(cpuTimeseriesDataPhase.head(1)) > 0:
startTime = cpuTimeseriesDataPhase.agg(F.min("time")).collect()[0][0]
cpuWithoutStartup = cpuTimeseriesDataPhase \
.filter(col("time")>startTime + 120000)
minTimeSec = int(cpuTimeseriesDataPhase.select("startTime").collect()[0][0]) / 60000
cpuTimeseriesDataWithoutStartup[framework] = cpuWithoutStartup \
.withColumn("timeSec", (col("time")/60000.0)-minTimeSec)
else:
print("No data for stage " + str(i))
containersPandasPerPhase = dict()
for framework in frameworks:
containersPandasPerPhase[framework] = cpuTimeseriesDataWithStartup[framework] \
.select("containerName").distinct().orderBy("containerName").toPandas()
###Output
_____no_output_____
###Markdown
Metric correlationsPlotting different metrics for a certain stage together. For generating a chart of the four metrics
###Code
def generateDriverFailureChart(colNum, containersPandas, latencyPandas, throughputPandas, inputThroughputPandas, cpuPandas, start, end):
minor_x_locator = AutoMinorLocator(3) # how many minor grid lines in between two major grid lines for x axis
pct01_line, = ax[0, colNum].plot(latencyPandas["timeSec"], latencyPandas["percentile_01_second"], color="#a8a8a8", linestyle="--", label = "1p")
pct50_line, = ax[0, colNum].plot(latencyPandas["timeSec"], latencyPandas["percentile_50_second"], color="#7e7e7e", linestyle="solid", label = "50p")
pct99_line, = ax[0, colNum].plot(latencyPandas["timeSec"], latencyPandas["percentile_99_second"], color="#151515", linestyle="solid", label = "99p")
ax[0, colNum].xaxis.set_minor_locator(minor_x_locator)
minor_y_locator_1 = AutoMinorLocator(2) # how many minor grid lines in between two major grid lines for y axis
ax[0, colNum].yaxis.set_minor_locator(minor_y_locator_1)
ax[0, colNum].grid(which='minor', color='black')
ax[0, colNum].get_yaxis().set_major_formatter(
matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x/1000.0), ',').replace(',', ' ') + " s"))
tp_line2 = ax[1, colNum].scatter(inputThroughputPandas["timeSec"], inputThroughputPandas["inputMsgCount"].multiply(inputfactor), s=5, label = "input", color="#7e7e7e")
ax[1, colNum].get_yaxis().set_major_formatter(
matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x/1000.0), ',').replace(',', ' ') + "K"))
ax[1, colNum].xaxis.set_minor_locator(minor_x_locator)
ax[1, colNum].grid(which='minor', color='black')
tp_line1 = ax[2, colNum].scatter(throughputPandas["timeSec"], throughputPandas["outputMsgCount"], s=5, label = "output", color="#151515")
ax[2, colNum].get_yaxis().set_major_formatter(
matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x/1000.0), ',').replace(',', ' ') + "K"))
ax[2, colNum].xaxis.set_minor_locator(minor_x_locator)
ax[2, colNum].grid(which='minor', color='black')
cpuTimeseriesDataWithStartupPhase = cpuTimeseriesDataWithStartup[framework]\
.filter((col("timeSec") >start) & (col("timeSec")<end)).toPandas()
for contNum, containerId in enumerate(containersPandasPerPhase[framework]['containerName']):
if "FLINK" in framework: label = "taskmanager-" + str(contNum)
else: label = containerId
data = cpuTimeseriesDataWithStartupPhase.loc[cpuTimeseriesDataWithStartupPhase['containerName'] == containerId]
cpu_worker_line, = ax[3, colNum].plot(data['timeSec'], data['cpuUsagePct'],
c="black", linestyle=":", label="cpu usage worker")
ax[3, colNum].set_ylim(ymin=0, ymax=110)
ax[3, colNum].xaxis.set_minor_locator(minor_x_locator)
minor_y_locator_3 = AutoMinorLocator(2) # how many minor grid lines in between two major grid lines for y axis
ax[3, colNum].yaxis.set_minor_locator(minor_y_locator_3)
ax[3, colNum].grid(which='minor', color='black')
ax[3, colNum].get_yaxis().set_major_formatter(
matplotlib.ticker.FuncFormatter(lambda x, p: str(int(x)) + " %"))
if (colNum == 0):
ax[0, colNum].set_ylabel("latency")
ax[1, colNum].set_ylabel("input\nthroughput\nmsg/s")
ax[2, colNum].set_ylabel("output\nthroughput\nmsg/s")
ax[3, colNum].set_ylabel("CPU")
if (colNum == len(frameworks)-1):
ax[0, colNum].legend(loc = "upper right", ncol=4, bbox_to_anchor=(1, 1.45), framealpha=1.0, frameon=False)
ax[2, colNum].legend(loc = "upper right", ncol=3, bbox_to_anchor=(1, 1.45), framealpha=0.5, frameon=False)
ax[1, colNum].legend(loc = "upper right", ncol=3, bbox_to_anchor=(1, 1.45), framealpha=0.5, frameon=False)
ax[3, colNum].legend([cpu_worker_line], ["per worker"], loc = "upper right", bbox_to_anchor=(1, 1.45), frameon=False)
stageLatencyPandasShortSample = dict()
stageThroughputPandasShortSample = dict()
stageInputThroughputPandasShortSample = dict()
stageCpuPandasShortSample = dict()
start = 10
end = 15
for j, framework in enumerate(frameworks):
print(framework)
stageLatencyPandasShortSample[framework] = latencyTimeseriesDataWithoutStartup[framework].orderBy("timeSec") \
.filter((col("timeSec") >start) & (col("timeSec")<end)).toPandas()
stageThroughputPandasShortSample[framework] = throughputTimeseriesDataWithStartup[framework].orderBy("timeSec") \
.filter((col("timeSec") >start) & (col("timeSec")<end)).toPandas()
stageInputThroughputPandasShortSample[framework] = inputThroughputTimeseriesDataWithStartup[framework].orderBy("timeSec") \
.filter((col("timeSec") >start) & (col("timeSec")<end)).toPandas()
stageCpuPandasShortSample[framework] = cpuTimeseriesDataWithoutStartup[framework].orderBy("timeSec") \
.filter((col("timeSec") >start) & (col("timeSec")<end)).toPandas()
# frameworks=["FLINK", "KAFKASTREAMS", "SPARK", "STRUCTUREDSTREAMING"]
frameworksPrintable2 = {
"SPARK_AMO": "Spark Str. \n at-most-once",
"SPARK_ALO": "Spark Str. \n at-least-once",
"STRUCTUREDSTREAMING_AMO": "Structured Str. \n at-most-once",
"STRUCTUREDSTREAMING_ALO": "Structured Str. \n at-least-once"
}
f, ax = plt.subplots(4, 4,figsize=(9, 8), sharey='row', sharex='col')
pad = 5
for j, framework in enumerate(frameworks):
generateDriverFailureChart(j, containersPandas=containersPandas, \
latencyPandas=stageLatencyPandasShortSample[framework], \
throughputPandas=stageThroughputPandasShortSample[framework], \
inputThroughputPandas=stageInputThroughputPandasShortSample[framework], \
cpuPandas=stageCpuPandasShortSample[framework])
ax[0, j].annotate(frameworksPrintable2[framework], xy=(0.5, 1.25), xytext=(0, pad),
xycoords='axes fraction', textcoords='offset points',
size='medium', ha='center', va='baseline')
plt.subplots_adjust(wspace=0.1, hspace=0.35)
save_img_colored_and_grayscale("./figures/driver-failure/overall/phase" + str(NOTEBOOK_PHASE) + "/driver_failure")
plt.show()
###Output
_____no_output_____ |
Mathematics/FractionMultiplication/fraction-multiplication.ipynb | ###Markdown

###Code
import uiButtons
%uiButtons
###Output
_____no_output_____
###Markdown
Visualizing Fraction Multiplication IntroductionAn important skill to have when it comes to fractions is knowing how to multiply them together.As we know, fractions are of the form $\frac{a}{b}$ with $a$ and $b$ integers and $b\neq 0$. You can think of $\frac{a}{b}$ as the number you get when you do $a\div b$. If we think of a fraction as a division problem then it makes sense that it works well with multiplication.Unlike addition, multiplying fractions is easy and straightforward. In this notebook we will look into two forms of fraction multiplication:- multiplying two fractions together $\bigg($e.g. $\dfrac{4}{7} \times \dfrac{2}{3}\bigg)$ - multiplying a fraction by an integer $\bigg($e.g. $\dfrac{4}{7} \times 3\bigg)$ ProcedureAs mentioned earlier, multiplying two fractions together is simple.Let's say we want to multiply the fractions $\dfrac{4}{7}$ and $\dfrac{2}{3}$.All we have to do is multiply the numerators (top numbers) together, then multiply the denominators (bottom numbers) together. Let's take a look: $$\frac{4}{7} \times \frac{2}{3}=\frac{4\times 2}{7\times 3}=\frac{8}{21}$$ Let's try another example. Take the fractions $\dfrac{3}{5}$ and $\dfrac{2}{3}$. To multiply them we multiply the numerators together and the denominators together: $$\frac{3\times 2}{5\times 3}=\frac{6}{15}$$ In this example, you might notice that the result is not in lowest terms: both 6 and 15 are divisible by 3, so we get $\dfrac{6}{15} = \dfrac25$. In a later notebook, we'll focus on mechanics like this. For now, we want to focus on a visual understanding of the problem.Now that we know how to multiply two fractions, let's think about what it actually means.Recall that a fraction simply represents a part of something. We can think of multiplying fractions together as taking a part of another part. In other words $\dfrac{1}{2}\times\dfrac{1}{2}$ is like saying $\dfrac{1}{2}$ of $\dfrac{1}{2}$ (one half **of** one half). If we have $\dfrac{1}{2}$ of a pizza and we want $\dfrac{1}{2}$ of that half what do we end up with?We get $\dfrac{1}{4}$ because $\dfrac{1}{2}\times\dfrac{1}{2}=\dfrac{1}{4}$.Watch the video below to help us further visualize this concept.
###Code
%%html
<div align="middle">
<iframe id="vid1" width="640" height="360" src="https://www.youtube.com/embed/hr_mTd-oJ-M" frameborder="0" allow="autoplay; encrypted-media" allowfullscreen></iframe>
<p><a href="https://www.youtube.com/channel/UC4a-Gbdw7vOaccHmFo40b9g" target="_blank">Click here</a> for more videos by Khan Academy</p>
</div>
<script>
$(function() {
var reachable = false;
var myFrame = $('#vid1');
var videoSrc = myFrame.attr("src");
myFrame.attr("src", videoSrc)
.on('load', function(){reachable = true;});
setTimeout(function() {
if(!reachable) {
var ifrm = myFrame[0];
ifrm = (ifrm.contentWindow) ? ifrm.contentWindow : (ifrm.contentDocument.document) ? ifrm.contentDocument.document : ifrm.contentDocument;
ifrm.document.open();
ifrm.document.write('If the video does not start click <a href="' + videoSrc + '" target="_blank">here</a>');
ifrm.document.close();
}
}, 2000)
});
</script>
###Output
_____no_output_____
###Markdown
Interactive visualizationThe widget below allows you to visualize fraction multiplication as shown in the video. To begin, enter a fraction in the boxes below.
###Code
%%html
<script src="./d3/d3.min.js"></script>
<!-- <script src="https://d3js.org/d3.v3.min.js"></script> -->
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
tex2jax: {inlineMath: [['$','$'], ['\\(','\\)']]}
});
</script>
<script src="https://code.jquery.com/jquery-1.10.2.js"></script>
<style>
.fractionInput {
max-width: 40px;
}
.fractionBar {
width: 40px;
height: 3px;
background-color: #000000;
}
.ingredientsInput {
margin-left: 10px;
margin-right: 10px;
max-width: 40px;
/* float: right; */
}
#speech {
margin: 50px;
font-size: 150%;
}
li {
margin-bottom: 15px;
}
</style>
%%html
<div class="fractionInputs" style="margin:20px">
<h1 id="leftInputFractionText" style="float: left; display: none"></h1>
<div id="opperandInput" style="float: left; display: block">
<input type="text" class="fractionInput form-control form-control-sm" id="oppNumerator" placeholder="0" style="margin-bottom: -10px;">
<hr align="left" class="fractionBar">
<input type="text" class="fractionInput form-control form-control-sm" id="oppDenominator" placeholder="1" style="margin-top: -10px;">
</div>
<button type="button" id="continueBtn" class="btn btn-primary buttons" style="margin: 30px">Continue</button>
</div>
<div class="canvasDiv" style="clear: left">
<svg height="500" width="500" viewbox="0 0 500 500" mlns="http://www.w3.org/2000/svg" id="mainCanvas" style="float: left">
<rect id="mainBox" height="480" width="480" x="10" y="10" style="outline: solid #000000 3px; fill:#ffffff"></rect>
<rect id="leftOpperand" height="480" width="0" x="10" y="10"></rect>
<rect id="rightOpperand" height="0" width="480" x="10" y="10"></rect>
</svg>
</div>
<div>
<p id="speech">Enter a fraction inside the boxes provided then click continue.</p>
</div>
<div style="clear: left; margin-left: 10px">
<button type="button" id="resetFractionBoxBtn" class="btn btn-primary buttons">Reset</button>
</div>
###Output
_____no_output_____
###Markdown
Multiplying a fraction by an integerIn this section we will talk about multiplying a fraction like $\dfrac{4}{7}$, with an integer such as $3$. A good example of when this could be useful is when you need to double a recipe. Doing multiplication of this form is simply a special case of multiplying two fractions together since any integer, such as $3$ in this case, can be rewritten as $\dfrac{3}{1}$. On a calculator, try inputting any number divided by $1$, and you will always get back the original number. Let's demonstrate this with an example. To multiply the fraction $\dfrac{4}{7}$ and the integer $3$, remember that we can write $3$ as $\dfrac31$. We get$$\frac{4}{7}\times\frac{3}{1} = \frac{4\times 3}{7\times 1}= \frac{12}{7} $$*Note that $\dfrac{3}{1}$ is an improper fraction. Improper fractions follow all the same rules for multiplication as proper fractions.*The big take away from this is that the denominator does not change as it is simply multiplied by $1$. This means we did not change the _"whole"_, we only changed how many parts of the _"whole"_ we have (the numerator). In effect all we did was triple our fraction, since our constant was 3. Let's practice what we just learned with a recipe example. Below you will see the ingredient list for the famous **Fresh Tomato and Basil Pasta Salad** recipe. This recipe makes enough for 4 servings, but we would like to double the recipe in order to serve 8 people. Apply what we have learned so far to double the ingredients list for the **tomato and basil pasta salad** in order to make 8 servings. (Enter your answer in the provided boxes. Fractions should be written using the _forward slash_ key "/" eg. 5/8. When your done click _check answer_ to see if you are correct!)
###Code
%%html
<div class="ingredientsList">
<h1>Fresh Tomato and Basil Pasta Salad</h1>
<img src="./images/pastaSalad.jpg" width=250 style="float: left; margin-right: 50px; box-shadow: 5px 6px 25px 3px grey">
<ul style="max-width: 700px; margin-bottom">
<li><label>3 medium ripe tomatoes, chopped --></label><input id="tomatoes" class="ingredientsInput"></input><label>tomatoes</label></li>
<li><label>1/3 cup thinly sliced fresh basil --></label><input id="basil" class="ingredientsInput"></input><label>cup</label></li>
<li><label>2 Tbsp. olive oil --></label><input id="olivOil" class="ingredientsInput"></input><label>Tbsp.</label></li>
<li><label>1 clove garlic, minced --></label><input id="garlic" class="ingredientsInput"></input><label>clove</label></li>
<li><label>1/2 tsp. salt --></label><input id="salt" class="ingredientsInput"></input><label>tsp.</label></li>
<li><label>1/4 tsp. pepper --></label><input id="pepper" class="ingredientsInput"></input><label>tsp.</label></li>
<li><label>8 oz. rotini pasta pasta, uncooked --></label><input id="pasta" class="ingredientsInput"></input><label>oz.</label></li>
<li><label>3/4 cup Parmesan Style Grated Topping --></label><input id="parmesan" class="ingredientsInput"></input><label>cup</label></li>
</ul>
<button type="button" id="checkAnswerBtn">Check Answers</button>
<button type="button" id="resetBtn">Reset</button>
</div>
<div>
<h2 id="answerStatus"></h2>
</div>
###Output
_____no_output_____
###Markdown
ConclusionThroughout this notebook we looked at how easy multiplying fractions together really is. We also looked at how to work with a fraction multiplied by a constant. Lets recap what we have learned:- When multiplying two fractions together we multiply the numerators together and the denominators together: $\dfrac{a}{b}\times\dfrac{c}{d}=\dfrac{a \times c}{b \times d} = \dfrac{ac}{bd}$- A constant can always be rewritten as the constant over 1: $c = \dfrac{c}{1}$- Multiplying a fraction with a constant, multiply the numerator by the constant and keep the denominator the same: $\dfrac{a}{b}\times c=\dfrac{a\times c}{b}=\dfrac{ac}{b}$- Multiplying two fractions together is the same as saying _a part of a part_: $\dfrac{a}{b}\times\dfrac{c}{d}$ is like saying $\dfrac{a}{b}$ **of** $\dfrac{c}{d}$ (The equation $\dfrac{3}{5}\times\dfrac{1}{4}$ is the same as _three fifths **of** one quarter_)
###Code
%%html
<script>
var leftOpperand = {
id: 'leftOpperand',
numerator: Number(0),
denominator: Number(0),
colour: '#ff0066'
};
var rightOpperand = {
id: 'rightOpperand',
numerator: Number(0),
denominator: Number(0),
colour: '#0000ff'
};
var currentState = 0;
var getOpperandInput = function(numeratorInput, denominatorInput, opperand) {
opperand.numerator = document.getElementById(numeratorInput).value;
opperand.denominator = document.getElementById(denominatorInput).value;
}
var verticalDivide = function(xVal, lineNum) {
var i = xVal;
while(lineNum > 0){
addLine(Number(i + 10), Number(i + 10), 10, Number(d3.select('#mainBox').attr('height')) + 10);
i += xVal;
lineNum --;
}
};
var horizontalDivide = function(xVal, lineNum) {
var i = Number(xVal);
while(lineNum > 0){
addLine(10, Number(d3.select('#mainBox').attr('width')) + 10, Number(i + 10), Number(i +10));
i += xVal;
lineNum --;
}
};
var addLine = function (x1, x2, y1, y2,) {
var dashed = '0,0';
var stroke = 2;
d3.select('#mainCanvas').append('line')
.attr('class', 'divLine ')
.attr('x1', x1)
.attr('x2', x2)
.attr('y1', y1)
.attr('y2', y2)
.style('stroke', 'black')
.style('stroke-width', stroke);
};
var fillBox = function(box, width, height, colour, opacity) {
d3.select('#' + box.id)
.style('fill', colour)
.style('opacity', opacity)
.transition().delay(function (d, i) {
return i * 300;
}).duration(500)
.attr('width', width)
.attr('height', height);
};
var changeOpacity = function(box, opacity) {
d3.select('#' + box.id).transition().delay(function (d, i) {
return i * 300;
}).duration(500)
.style('opacity', opacity);
d3.selectAll('.divLine').transition().delay(function (d, i) {
return i * 100;
}).duration(200)
.style('opacity', opacity);
};
var resetInputs = function() {
d3.select('#continueBtn').attr('disabled', null);
d3.selectAll('.divLine').remove();
d3.select('#leftOpperand').attr('width', 0);
d3.select('#rightOpperand').attr('height', 0);
d3.select('#leftInputFractionText').text('').style('display', 'none');
clearInput('oppNumerator');
clearInput('oppDenominator');
leftOpperand.numerator = Number(0);
leftOpperand.denominator = Number(0);
rightOpperand.numerator = Number(0);
rightOpperand.denominator = Number(0);
};
var isValid = function(numerator, denominator) {
if (numerator < 0 || numerator > 12) {
return false;
}
if (denominator <= 0 || denominator > 12) {
return false;
}
return (numerator < denominator);
};
var updateMathJax = function() {
MathJax.Hub.Queue(["Typeset",MathJax.Hub]);
};
var showInputBox = function(inputId) {
d3.select('#' + inputId).style('display', 'block');
};
var hideInputBox = function(inputId) {
d3.select('#' + inputId).style('display', 'none');
};
var clearInput = function(inputId) {
document.getElementById(inputId).value = '';
}
var stateControler = function(state) {
currentState = state;
setSpeech(state);
switch(state) {
case 0 :
resetInputs();
showInputBox('opperandInput');
break;
case 1 :
getOpperandInput('oppNumerator', 'oppDenominator', leftOpperand);
d3.select('#leftInputFractionText')
.text('$\\frac{'+leftOpperand.numerator+'}{'+leftOpperand.denominator+'} \\times$')
.style('display', 'block');
updateMathJax();
verticalDivide(Number(d3.select('#mainBox').attr('width')/leftOpperand.denominator), Number(leftOpperand.denominator - 1));
hideInputBox('opperandInput');
break;
case 2 :
fillBox(leftOpperand, Number(d3.select('#mainBox').attr('width')/leftOpperand.denominator) * leftOpperand.numerator, Number(d3.select('#mainBox').attr('height')), leftOpperand.colour, 1);
clearInput('oppNumerator');
clearInput('oppDenominator');
showInputBox('opperandInput');
break;
case 3 :
getOpperandInput('oppNumerator', 'oppDenominator', rightOpperand);
d3.select('#leftInputFractionText')
.text('$\\frac{'+leftOpperand.numerator+'}{'+leftOpperand.denominator+'} \\times$' + '$\\frac{'+rightOpperand.numerator+'}{'+rightOpperand.denominator+'}$');
updateMathJax();
changeOpacity(leftOpperand, 0);
horizontalDivide(Number(d3.select('#mainBox').attr('height')/rightOpperand.denominator), Number(rightOpperand.denominator - 1));
hideInputBox('opperandInput');
break;
case 4 :
fillBox(rightOpperand, Number(d3.select('#mainBox').attr('width')), Number(d3.select('#mainBox').attr('height')/rightOpperand.denominator) * rightOpperand.numerator, rightOpperand.colour, 0.5);
break;
case 5 :
changeOpacity(leftOpperand, 1);
d3.select('#continueBtn').attr('disabled', true);
break;
default:
console.log('not a valid of state, returning to state 0');
stateControler(0);
}
};
var speech = [
"Enter a fraction in the boxes provided, then click continue.",
"Great! Now we see that the square has been divided into rectangles of equal size. The number of rectangles is given by the denominator. Click continue when ready.",
"Some of the equal parts have been filled in with pink. The numerator equals the number of pink rectangles. The ratio of the area in pink to the total area is our fraction. Enter another fraction to multiply then click continue.",
"Let’s focus on the second fraction. The first one is temporarily hidden for clarity. As before, the number of rectangles we see equals the denominator. Click continue when ready.",
"Now we have a blue section representing the numerator of the second fraction. Click continue to multiply these two fractions.",
"Awesome! The first fraction is back and overlaid with the second fraction. The number of rectangles in the purple section is the numerator of our answer. Notice that this is the product of the numerators. The total number of rectangles is the denominator of the product, and this is just the product of the two denominators!"
];
function setSpeech(state) {
d3.select('#speech').text(speech[state]);
};
document.getElementById('continueBtn').onclick = function() {
if(!isValid(Number(document.getElementById('oppNumerator').value), Number(document.getElementById('oppDenominator').value))){
alert('Make sure your factions are proper and the denominators less than or equal to 12');
}
else {
stateControler(currentState + 1);
}
};
document.getElementById('resetFractionBoxBtn').onclick = function() {
console.log("hello");
resetInputs();
stateControler(0);
};
</script>
%%html
<script type="text/javascript">
var x = 2; //Recipie multiplyer
getInput('checkAnswerBtn').onclick = function() {
if(checkAnswers()) {
d3.select('#answerStatus').text('Correct!! Good job.');
} else {
d3.select('#answerStatus').text('Not quite, keep trying!');
}
};
getInput('resetBtn').onclick = function() {
var inputs = document.getElementsByClassName('ingredientsInput');
for(var i = 0; i < inputs.length; i++) {
inputs[i].value = '';
}
d3.selectAll('.ingredientsInput').style('background-color', '#ffffff');
d3.select('#answerStatus').text('');
};
function checkAnswers() {
var isCorrect = true;
if(!checkAnswer('tomatoes', x*3))
isCorrect = false;
if(!checkAnswer('basil', x*(1/3)))
isCorrect = false;
if(!checkAnswer('olivOil', x*2))
isCorrect = false;
if(!checkAnswer('garlic', x*1))
isCorrect = false;
if(!checkAnswer('salt', x*(1/2)))
isCorrect = false;
if(!checkAnswer('pepper', x*(1/4)))
isCorrect = false;
if(!checkAnswer('pasta', x*8))
isCorrect = false;
if(!checkAnswer('parmesan', x*(3/4)))
isCorrect = false;
return isCorrect;
};
function checkAnswer(id, ans) {
if(eval(getInput(id).value) === ans) {
return answerCorrect(id);
}
return answerIncorrect(id);
};
function answerCorrect(id) {
d3.select('#' + id).style('background-color', '#76D177');
return true;
}
function answerIncorrect(id) {
d3.select('#' + id).style('background-color', '#BB4646');
return false;
}
function getInput(id) {
return document.getElementById(id);
};
</script>
###Output
_____no_output_____ |
Statistics/Bivariate Data.ipynb | ###Markdown
Bivariate DataWelcome to the Bivariate Data section. Bivariate Data is simply a dataset that has two values instead of one. In this section, we'll go over ways we can visualize and describe relationships between random variables. You'll see the familiar scatter plot, the details of correlation and covariance, $r^2$ measures, some practical tips, and much more!It's a short but an important one. Let's go. Relationships in dataThe general goal of the following techniques are to describe the relationships between two variables. For example, we want to know how house price is related to house size, or salary to experience, or year to GDP. This is such a crucial and fundamental goal of statistics because by finding a good relationship, we can make good predictions. In fact, one of the simplest methods of Machine Learning, linear regression, will model a linear relationship between an explanatory (i.e. input) variable and result (i.e. output) variable. That way, all the machine has to do is plug in the value of the explanatory variable to get the result variable. We'll look at linear regression in more detail in another notebook. Our data: We're going to use one of Seaborn's built-in data sets called 'tips' that has a list of restaurant tips. This makes it easy for anyone to grab the data. But again, don't focus on the details of using Seaborn or Pandas, as here we're just focused on examining our data.What we want to know is if there is a relationship between the total bill and the size of the tip. You've eaten out before, what is your guess?
###Code
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
tips = sns.load_dataset('tips')
type(tips)
###Output
_____no_output_____
###Markdown
The `type` of our tips dataset is a pandas `DataFrame`. Think of a `DataFrame` as Python's version of an excel table. Again, don't worry about it too much. There's plent of resources out there on Pandas if you want to learn more.There are 7 variables in this dataset, but I'm going to scrub the table so we have the two pieces we want to examine: `total_bill` and `tip`.
###Code
tips.drop(labels=["sex", "smoker", "day", "time", "size"], axis=1, inplace=True)
###Output
_____no_output_____
###Markdown
Basic statisticsNow we want to examine our dataset. In particular, we want to see the mean, standard devaition, range, and the distribution. Refer to the Descriptive Statistics notebook for more details on how we do this manually. For now, we'll use the `Series` class from Pandas to do all of it for us:
###Code
tips["total_bill"].describe()
tips["tip"].describe()
###Output
_____no_output_____
###Markdown
Take a couple minutes to examine those statistics. What do they tell you? Which mean?Think back to the previous lesson on Descriptive Statistics. We went over 3 different types of means. If we wanted to get the mean for the tip percent, which should we use? You might see the percent sign and jump straight for the geometric mean. Afterall, that's how we averaged stock returns. But remember that these are not returns but proportions -- in other words, a tip percent has no dependence on any previous value.You may also want to jump straight for the arithmetic mean by creating all of tip percent and averaging the value (in other words, $\frac{0.15 + 0.18 + 0.21}{3}=0.18$. This is a reasonable assumption, but is it the best mean?Well the answer depends on what data you are trying to describe! The arithmetic mean will give you the average tip percent, which answers the question "what tip percent can I expect any given customer to give?"But the harmonic mean answers a slightly different question: for every dollar I sell, how much tip can I expect to receive. (Remember, the harmonic mean is all about proportions). The difference is subtle, but important. Imagine if everyone that has small bills tips 20%, but everyone with large bills tips 10%. The man for my tip will be around 15%. But because those with large bills contribute so much more to my total bill amount, their tips will drag down the proportion and make it closer to 10%. So the harmonic mean may be better for predicting how many tips you expect given a total revenue, whereas the arithmetic mean will be better at predicting what tip percent my next customer will make.For our example, let's take a look at these two values:
###Code
def arithmetic_mean(tip, bills):
n = tip.count()
return sum(tip/bills)/n
arithmetic_mean(tips["tip"], tips["total_bill"])
def harmonic_mean(tip, bills):
return sum(tip)/sum(bills)
harmonic_mean(tips["tip"], tips["total_bill"])
###Output
_____no_output_____
###Markdown
Judging by these two numbers, we can expect any given person to tip about 16.08%. But at the end of the night, we can expect 15 cents for every dollar we sold. How is this so? This is an indication that the larger bills tend to tip less.We won't do any more analysis on tip percentage, but this was a valuable aside on knowing what you can glean by expressing your data in different ways. HistogramsBack to our regularly scheduled program, let's take a look at our two distributions: `tip` and `total_bill`
###Code
fig, axs = plt.subplots(1,2, figsize=(10,5))
hist_kws={"edgecolor":"k", "lw":1}
sns.distplot(tips["total_bill"], kde=False, ax=axs[0], axlabel="Total Bill (dollars)", hist_kws=hist_kws)
sns.distplot(tips["tip"], kde=False, ax=axs[1], color='r', axlabel="Total Tip (dollars)", hist_kws=hist_kws)
###Output
_____no_output_____
###Markdown
From the above, we can see that the distributions are roughly the same. It's not quite normal because of the right skew (that is, the tail is long and to the right). But even though these look similar, it does not say anything definitive about the relationship. For example, each tip could be completely independently matched up with the total bill, like in the extreme case of the \$10.00 max tip getting matched to the \$3.07 minimum bill (hey, it could happen!). For a better sense of this relationship, let's introduce the Scatter PlotAnother familiar graph, it will plot the `total_bill` against the `tip`. In this case, `total_bill` is the explanatory variable and the `tip` is the result variable. This means we are looking at the `total_bill` value to explain the `tip` value.Let's take a look at the scatter plot (or `lmplot` as seaborn calls it) and all will become clear:
###Code
sns.lmplot(x="total_bill", y="tip", data=tips, fit_reg=False)
###Output
_____no_output_____
###Markdown
Look at that! So much more useful than the histogram as we can now see the actual relationships. Each datapoint represents one total bill paired up with it's related tip. Obviously there's a strong relationship here. Since we know the average tip to be 16.1%, and a bill of 0 should give us a tip of 0, we can quickly guess that a linear relationship should be described by the following equation:$$ y = 0 + 0.161x $$Let's plot this line on the graph and see how accurate it might be.
###Code
yvals = 0.161 * tips["total_bill"]
plt.plot(tips["total_bill"], yvals)
sns.regplot(tips["total_bill"], tips["tip"], fit_reg=False)
###Output
_____no_output_____
###Markdown
That looks pretty good, but of course mathematicians aren't excited by "looks pretty good." They need numbers to back that claim up. We'll get in to finding the best line in the next notebook on linear regression, but for now be settled that it there's a clear positive relationship (meaning $y$ goes up as $x$ goes up) between the variables._Refere back tou the section on "Which mean?" See how the higher bills tend to be below the blue line? This again is more confirmation that the larger bills tend to tip less. Of course in a small dataset, you can't call this a significant trend, but it does corrobrate the discrepency in our earlier analysis._ Covariance and Correlation CovarianceLet's now talk about how strong this positive relationship is. In other words, how sure can we be that if $x$ goes up $y$ will follow? Statisticians look at the way these values vary together with a term called _covariance_. When two variables tend to move together, their covariance is positive. If they move opposite, it's negative. If they are completely independent, it's 0.The covariance for $N$ samples with random variables $X$ and $Y$ is given by the formula$$ Q = \frac{1}{N-1} \sum_{i=1}^{N} (x_{i}-\bar{x})(y_i-\bar{y}) $$In Python:
###Code
# A useful helper function will be to define the dot product between two vectors
# dot product is defined as <v1, v2> * <u1, u2> = v1*u1 + v2*u2
from operator import mul
def dot(v, u):
return sum(map(mul, v,u))
# Another useful helper function will be to take a list and output another list
# that contains the differences of the means
def diff_mean(values):
mean = values.mean() # use Panda's arithmetic mean function
deltas = []
for v in values:
deltas.append(v-mean)
return deltas
def covariance(x, y):
n = len(x)
return dot(diff_mean(x), diff_mean(y)) / (n-1)
covariance(tips["total_bill"], tips["tip"])
###Output
_____no_output_____
###Markdown
8.3235 is a value without a lot of context. Think about what the units of that are: it's essentially 2022.61 dollars squared ($\$^2$). Do you every pay for things in dollars squared? Or if we use two variables with totally different units, like salary (dollars) and years, we get units of dollar-years. This is really difficult to get a sense of scale or closeness of relationship. CorrelationInstead, statisticians will often use the correlation (or Pearson Correlation Coefficient to be formal) to report a number that everyone can understand. Correlation is a number between -1 and +1. If it's 1, there's a perfect positive relationship and vice versa. A correlation of 0 means there's absolutely no relationship. But keep in mind, just because there's a correlation of 0, doesn't mean that there's no _linear_ correlation between values.Take this helpful image from Wikipedia:The correlation coefficient is almost always denoted by the variable $r$. The formula for a sample is:$$ r = \frac{\sum\nolimits_{i=1}^{n}(x_i-\bar{x})(y_i-\bar{y}))}{\sqrt{\sum\nolimits_{i=1}^{n}(x_i-\bar{x})^2\sum\nolimits_{i=1}^{n}(y_i-\bar{y})^2}} $$ Before you freak out, we've seen all of these pieces before. This is just the covariance divided by the product of the standard deviations. Perhaps a simpler equation?$$ \rho_{X,Y} = \frac{cov(X,Y)}{\sigma_X\sigma_Y} $$Now in code:
###Code
def correlation(x, y):
std_x = x.std() # again, just use Panda's method
std_y = y.std()
if std_x > 0 and std_y > 0:
return covariance(x,y) / (std_x*std_y)
else:
return 0
correlation(tips["total_bill"], tips["tip"])
###Output
_____no_output_____ |
notebooks/Sofia/onlyFitandPlotfit.ipynb | ###Markdown
Select the folder where the Master Sheet I share with you.
###Code
askdirectory = filedialog.askdirectory() # show an "Open" dialog box and select the folder
path = Path(askdirectory)
data = pd.read_csv(path/('MasterSheet.csv'), encoding='utf-8') #read the Mster Sheet
data
###Output
_____no_output_____
###Markdown
These are the names in my data frame, since I'll use them over and over agian I'd rather just declare them.
###Code
tubulin = '[Tubulin] ' r'$(\mu M)$'
tub = 'tub'
DCXconc = '[DCX] ' r'$(n M)$'
DCX = 'DCX'
Type = 'DCX Type'
Concentration = 'Concentration ' r'$(\mu M)$'
Length = 'Length ' r'$(\mu m)$'
Lifetime = 'Lifetime ' r'$(min)$'
GrowthRate = 'Growth Rate ' r'$(\mu m / min)$'
TimeToNucleate = 'Time to Nucleate ' r'$(min)$'
ShrinkageLength = 'Shrink Length ' r'$(\mu m)$'
ShrinkageLifetime = 'Shrink Lifetime ' r'$(min)$'
ShrinkageRate = 'Shrink Rate ' r'$(\mu m / min)$'
parameters = [GrowthRate,TimeToNucleate,Lifetime,ShrinkageRate]
###Output
_____no_output_____
###Markdown
Fitting Data First declare the functions you are going to fit to. Here x is the variable and the other inputs are the distribution's parameters.
###Code
def gaussian(x, mu, sig):
return (np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.))) )/(sig*np.sqrt(2*np.pi))
def exponential(x, scale):
return ((np.exp(-x/scale) )/(scale))
def gamma(x, shape, scale):
return (np.power(x,shape-1)*np.exp(-x/ scale))/(sp.special.gamma(shape) * np.power(scale,shape))
###Output
_____no_output_____
###Markdown
Then I make a function to extract a particular set of data and make a histogram. When matplotlib.pyplot makes a histogram, it saves the info on the bins used and the value of each bin.
###Code
def make_hist(data, parameter, tubconc, dcxtype, dcxconc): #Dataframe, what paramenter I'm plotting (e.g. GrowthRate), tubulin concentration, which dcx mutant, DCX concentration.
selectdata = data[(data[tubulin]==tubconc)&(data[Type]==dcxtype)&(data[DCXconc]==dcxconc)] #this is specific to how my dataframe is organized, it just filters the data I'm interested in
if parameter == GrowthRate : #The Growthrate histogram ranges from 0 to 1.5 while the other go up to 30
maxbin = 1.5
binsize = 0.05
else:
maxbin = 30
binsize = 1
n, bins, patches = plt.hist(selectdata[parameter], bins=np.arange(0, maxbin + binsize, binsize), density=True); #extracting the histogrm info n is the value of a bin, patches is image info that we don't need
plt.clf() #This is so the image of the histogram doesn't appear, we don't need it right now
return n, bins
###Output
_____no_output_____
###Markdown
Next is the 'Master' fitting function where I only have to give it a dataframe with all my data (the Master Sheet) and the parameter I want to plot (e.g GrowthRate). Inside the function I loop for every tubulin concentration, dcx mutant and DCX concentration. Then it uses the previous function to get the histogram info. With this info it fits a curve with optimize. The optimize function outputs the fitting coefficients and the variance matrix. From the matrix you can get the error after doing some simple math. Finally I make a dataframe that contains the coefficients and error for each condition.
###Code
def equation_fit(data, parameter):
if (parameter == GrowthRate) | (parameter == ShrinkageRate) : #Choose an equation given a paramenter to fit
equation = gaussian
elif parameter == TimeToNucleate :
equation = exponential
elif parameter == Lifetime :
equation = gamma
results = pd.DataFrame(columns=[] , index=[]) #Declare an empty dataframe where we'll later put the results in.
for tubconc in data[tubulin].unique(): #Lopping over all of my conditions
for dcxtype in data[Type].unique():
for dcxconc in data[DCXconc].unique():
n, bins = make_hist(data, parameter, tubconc, dcxtype, dcxconc) #Make one histogram per condition
if np.isnan(np.sum(n)) == True: #If the condition doesn't exist, skip the loop (eg. DCX Type = None, [DCX] = 50nM)
continue
if equation == gamma : #THe optimize function starts with a set of paramenters and iterates to minimize the error.
#The default starting parameter is 1, but the gamma function needs something other than the defualt to work.
coeff, var_matrix = sp.optimize.curve_fit(equation,bins[:-1],n,[2,1])
else :
coeff, var_matrix = sp.optimize.curve_fit(equation,bins[:-1],n) #Give optimize the function of interest, and the info we got from the histogram
variance = np.diagonal(var_matrix) #This is the math you have to do to extract the error from the output matrix
SE = np.sqrt(variance) #SE for Standard Error
#======Making a data frame========
results0 = pd.DataFrame(columns=[] , index=[]) #Declare a dataframe to put this loop's coefficients
for k in np.arange(0,len(coeff)):
header = [np.array([parameter]),np.array(['Coefficient '+ str(k)])]
r0 = pd.DataFrame([coeff[k],SE[k]], index=(['Value','SE']),columns= header)
results0 = pd.concat([results0, r0], axis=1, sort=False)
results0[tubulin] = tubconc #Adding the concentration info to thecoefficients we just saved
results0[Type] = dcxtype
results0[DCXconc] = dcxconc
results = pd.concat([results, results0], sort=False) #Concatenate to the big result dataframe
return results
###Output
_____no_output_____
###Markdown
Then just run the function and violà done. This is all you have to do to get the coeffients, which are also the means for the exponential and gaussian, but for but for Lifetime there are a couple of more steps you have to do to on the gamma coefficients get the mean and mean error. I haven't included them right now to keep thing simple but if you're interested just ask :)
###Code
GrowthRateFit = equation_fit(data, GrowthRate);
TimeToNucleateFit = equation_fit(data, TimeToNucleate);
LifetimeFit = equation_fit(data, Lifetime);
ShrinkageRateFit = equation_fit(data, ShrinkageRate);
###Output
C:\ProgramData\Anaconda3\lib\site-packages\ipykernel_launcher.py:8: RuntimeWarning: divide by zero encountered in power
###Markdown
Concatenate the results from above
###Code
ResultFit = pd.concat([GrowthRateFit, TimeToNucleateFit,LifetimeFit,ShrinkageRateFit], axis=1, sort=False)
ResultFit = ResultFit.loc[:,~ResultFit.columns.duplicated()]
###Output
_____no_output_____
###Markdown
To plot the histogram with the fitted fuctions I use the following:
###Code
def plot_hist(data, tubconc, dcxtype, dcxconc) :
selectdata = data[(data[tubulin]==tubconc)&(data[Type]==dcxtype)&(data[DCXconc]==dcxconc)] #again select data from Master Sheet
fig, ax = plt.subplots(2,2,figsize=(15,15)) #declare figure
n = len(selectdata.dropna().index) #gets the how many microtubules you analyzed per histogram
c=0
for i in np.arange(len(ax)):
for j in np.arange(len(ax)):
parameter = parameters[c]
if parameter == GrowthRate : #same steps to make a histogram
maxbin = 1.5
binsize = 0.025
else:
maxbin = 30
binsize = 0.5
ax[i][j].hist(selectdata[parameter], bins=np.arange(0, maxbin + binsize, binsize), density=True);
ax[i][j].set_title(parameter)
ax[1][1].set_xlim(0,maxbin)
c += 1
selectcoeff = ResultFit[ResultFit[tubulin]==tubconc] #filter datafram for one [Tub]
x = np.arange(0, 1.5 + 0.025, 0.025)
# keep filtering the dataframe to obtain a specific coefficient
mu = selectcoeff[(selectcoeff[Type] == dcxtype)&(selectcoeff[DCXconc] == dcxconc)][parameters[0]]['Coefficient 0'].loc['Value']
sig = selectcoeff[(selectcoeff[Type] == dcxtype)&(selectcoeff[DCXconc] == dcxconc)][parameters[0]]['Coefficient 1'].loc['Value']
ax[0][0].plot(x, gaussian(x, mu, sig)); #plot a curve with the equation and its coefficients you just got. Grwoth rate
x = np.arange(0, 30 + 0.5, 0.5)
scale = selectcoeff[(selectcoeff[Type] == dcxtype)&(selectcoeff[DCXconc] == dcxconc)][parameters[1]]['Coefficient 0'].loc['Value']
ax[0][1].plot(x, exponential(x, scale)); #same for nucleation
shape = selectcoeff[(selectcoeff[Type] == dcxtype)&(selectcoeff[DCXconc] == dcxconc)][parameters[2]]['Coefficient 0'].loc['Value']
scale = selectcoeff[(selectcoeff[Type] == dcxtype)&(selectcoeff[DCXconc] == dcxconc)][parameters[2]]['Coefficient 1'].loc['Value']
ax[1][0].plot(x, gamma(x, shape, scale));#lifetime
mu = selectcoeff[(selectcoeff[Type] == dcxtype)&(selectcoeff[DCXconc] == dcxconc)][parameters[3]]['Coefficient 0'].loc['Value']
sig = selectcoeff[(selectcoeff[Type] == dcxtype)&(selectcoeff[DCXconc] == dcxconc)][parameters[3]]['Coefficient 1'].loc['Value']
ax[1][1].plot(x, gaussian(x, mu, sig)); #shrikage rate
return n
plot_hist(data, 6, 'P191R', 35)
###Output
_____no_output_____ |
Obtencao_dados_cripto.ipynb | ###Markdown
Imports:
###Code
import numpy as np
import datetime as dt
import time
import re
import requests
import matplotlib.pyplot as plt
from pandas import Series, DataFrame, read_json, to_datetime, to_numeric
#from pandas import Timedelta
###Output
_____no_output_____
###Markdown
Exemplos "originais" (via Telegram):
###Code
dados = read_json('https://api.binance.com/api/v3/klines?symbol=BTCUSDT&interval=15m&limit=340')
dados2 = read_json('https://api.binance.com/api/v3/klines?symbol=VETUSDT&interval=15m&startTime=1628125200000&endTime=1628989200000&limit=1000')
###Output
_____no_output_____
###Markdown
Exemplos interativos:
###Code
symbol = 'BTCUSDT'
interval='15m'
limit='340'
dados3 = read_json('https://api.binance.com/api/v3/klines?symbol={}&interval={}&limit={}'.format(
symbol, interval, limit)
)
dados3.head()
fig = dados3[4].plot(figsize = (16,8), grid=True, fontsize = 18, linewidth=2.0)
plt.title(f'{symbol} Close values over the time', fontdict = {'fontsize' : 25})
print(dados3[4].min())
print(dados3[4].max())
print((dados3[4].max() - dados3[4].min())/(dados3[4].max()))
symbol = 'VETUSDT'
interval='15m'
limit='1000'
data_inicio = '2021-08-04'
horario_inicio = '22:00'
data_fim = '2021-08-14'
horario_fim = '22:00'
startTime = int(dt.datetime.fromisoformat(data_inicio+'T'+horario_inicio).timestamp()) * 1000
endTime = int(dt.datetime.fromisoformat(data_fim+'T'+horario_fim).timestamp()) * 1000
dados4 = read_json('https://api.binance.com/api/v3/klines?symbol={}&interval={}&startTime={}&endTime={}&limit={}'.format(
symbol, interval, startTime, endTime, limit)
)
dados4.head()
###Output
_____no_output_____
###Markdown
Coletando dados de um range largo de datas e exportando resultados:
###Code
symbols = ['BTC','ETH','BNB']
#symbols = ['DOGE','CHZ','MATIC','XRP','BNB','XLM','THETA','VET','ETC','BTT','FIL','ADA','LTC','UNI','TRX','ENJ','DOT','EOS','ETH','BCH','ATOM','LINK','AXS','NEO','BTC','XTZ','MKR','AAVE','XMR','FTT','ALGO','SOL']
def gerar_csv_dados_cripto(symbol, interval, limit, data_inicio=None, horario_inicio=None, data_fim=None, horario_fim=None, log=True):
if(interval == '1m'):
delta = dt.timedelta(hours=16)
elif(interval == '3m'):
delta = dt.timedelta(days=2)
elif(interval == '5m'):
delta = dt.timedelta(days=3)
elif(interval == '15m'):
delta = dt.timedelta(days=10)
elif(interval == '30m'):
delta = dt.timedelta(days=20, hours=20)
elif(interval == '1h'):
delta = dt.timedelta(days=40)
elif(interval == '2h'):
delta = dt.timedelta(days=80)
elif(interval == '4h'):
delta = dt.timedelta(days=160)
elif(interval == '6h'):
delta = dt.timedelta(days=240)
elif(interval == '8h'):
delta = dt.timedelta(days=320)
elif(interval == '12h'):
delta = dt.timedelta(days=480)
elif(interval == '1d'):
delta = dt.timedelta(days=1000)
elif(interval == '3d'):
delta = dt.timedelta(days=3000)
elif(interval == '1w'):
delta = dt.timedelta(weeks=1000)
elif(interval == '1M'):
delta = dt.timedelta(weeks=4000)
else:
raise ValueError('intervalError')
if(horario_inicio == None):
horario_inicio = '00:00:00'
if(horario_fim == None):
#horario_fim = dt.datetime.now()
#horario_fim = str(horario_fim.hour)+':'+str(horario_fim.minute)+':'+str(horario_fim.second)
horario_fim = '23:59:59'
if(limit > 1000):
limit = 1000
if(limit <= 0):
raise ValueError('limitError')
if(data_inicio == None):
path_dados = 'https://api.binance.com/api/v3/klines?symbol={}&interval={}&limit={}'.format(
symbol, interval, limit
)
dados_atual = read_json(path_dados).iloc[:, :5]
dados_atual.insert(loc=0, column='Date-Time', value = dados_atual[0].apply(lambda x : dt.datetime.fromtimestamp(x/1000)))
print('Gerando arquivo .csv...')
dados_atual.columns = ['Date-Time', 'Timestamp', 'Open', 'Max', 'Min', 'Close']
dados_atual.set_index('Date-Time', inplace=True)
dados_atual.to_csv(f'dados_{symbol}_{interval}.csv')
print('Procedimento realizado com sucesso!')
return
elif((data_inicio != None) and (data_fim != None)):
path_dados_base = 'https://api.binance.com/api/v3/klines?symbol={}&interval={}&startTime={}&endTime={}&limit={}'
limit = 1000
else:
data_fim = dt.datetime.now()
data_fim = str(data_fim.year)+'-'+str(data_fim.month)+'-'+str(data_fim.day)
path_dados_base = 'https://api.binance.com/api/v3/klines?symbol={}&interval={}&startTime={}&endTime={}&limit={}'
limit = 1000
data_atual = dt.datetime.fromisoformat(data_inicio+'T'+horario_inicio)
dados_list = list()
primeiro_laco = True
momento_final = dt.datetime.fromisoformat(data_fim+'T'+horario_fim)
momento_final_timestamp = int(dt.datetime.timestamp(momento_final)) * 1000
it_count = 1
while(data_atual < momento_final):
if(log):
print('Iteracao Atual:', it_count)
print('Data Atual:', data_atual)
startTime = int(data_atual.timestamp()) * 1000
aux_endTime = int( (data_atual + delta).timestamp()) * 1000
if(aux_endTime < momento_final_timestamp):
endTime = aux_endTime
else:
endTime = momento_final_timestamp
dados_iter = re.findall(r'\[[\d]+,"[\d]+.[\d]+","[\d]+.[\d]+","[\d]+.[\d]+","[\d]+.[\d]+"', requests.get(
path_dados_base.format(symbol, interval, startTime, endTime, limit)).text
)
if(primeiro_laco):
dados_list.extend(dados_iter)
else:
dados_list.extend(dados_iter[1:])
data_atual += delta
it_count += 1
primeiro_laco = False
print('Gerando arquivo .csv...')
dados_completo = Series(dados_list).str.replace('[\["]', '', regex=True)
dados_completo = DataFrame([dados_completo[i].split(',') for i in range(len(dados_completo))])
dados_completo = dados_completo.apply(to_numeric)
dados_completo.insert(0, 'Date-Time', (dados_completo[0]/1000).apply(lambda x : dt.datetime.fromtimestamp(x)))
dados_completo.columns = ['Date-Time', 'Timestamp', 'Open', 'Max', 'Min', 'Close']
dados_completo.set_index('Date-Time', inplace=True)
dados_completo.to_csv(f'dados_{symbol}_{interval}.csv')
print('Procedimento realizado com sucesso!')
return
time_init = time.time()
gerar_csv_dados_cripto(symbol='BTCUSDT', interval='30m', limit=1000, data_inicio='2021-06-01', log=False)
print('Tempo do procedimento:', time.time() - time_init, 's')
###Output
Gerando arquivo .csv...
Procedimento realizado com sucesso!
Tempo do procedimento: 5.530316114425659 s
|
ingest_data/ingest-data-types/ingest_text_data.ipynb | ###Markdown
Ingest Text DataLabeled text data can be in a structured data format, such as reviews for sentiment analysis, news headlines for topic modeling, or documents for text classification. In these cases, you may have one column for the label, one column for the text, and sometimes other columns for attributes. You can treat this structured data like tabular data. Sometimes text data, especially raw text data comes as unstructured data and is often in .json or .txt format, and we will discuss how to ingest these types of data files into a SageMaker Notebook in this section. Set Up Notebook
###Code
%pip install -q 's3fs==0.4.2'
import pandas as pd
import json
import glob
import s3fs
import sagemaker
# Get SageMaker session & default S3 bucket
sagemaker_session = sagemaker.Session()
bucket = sagemaker_session.default_bucket() # replace with your own bucket if you have one
s3 = sagemaker_session.boto_session.resource("s3")
prefix = "text_spam/spam"
prefix_json = "json_jeo"
filename = "SMSSpamCollection.txt"
filename_json = "JEOPARDY_QUESTIONS1.json"
###Output
_____no_output_____
###Markdown
Downloading data from Online Sources Text data (in structured .csv format): Twitter -- sentiment140 **Sentiment140** This is the sentiment140 dataset. It contains 1.6M tweets extracted using the twitter API. The tweets have been annotated with sentiment (0 = negative, 4 = positive) and topics (hashtags used to retrieve tweets). The dataset contains the following columns:* `target`: the polarity of the tweet (0 = negative, 4 = positive)* `ids`: The id of the tweet ( 2087)* `date`: the date of the tweet (Sat May 16 23:58:44 UTC 2009)* `flag`: The query (lyx). If there is no query, then this value is NO_QUERY.* `user`: the user that tweeted (robotickilldozr)* `text`: the text of the tweet (Lyx is cool[Second Twitter data](https://github.com/guyz/twitter-sentiment-dataset) is a Twitter data set collected as an extension to Sanders Analytics Twitter sentiment corpus, originally designed for training and testing Twitter sentiment analysis algorithms. We will use this data to showcase how to aggregate two data sets if you want to enhance your current data set by adding more data to it.
###Code
# helper functions to upload data to s3
def write_to_s3(filename, bucket, prefix):
# put one file in a separate folder. This is helpful if you read and prepare data with Athena
key = "{}/{}".format(prefix, filename)
return s3.Bucket(bucket).upload_file(filename, key)
def upload_to_s3(bucket, prefix, filename):
url = "s3://{}/{}/{}".format(bucket, prefix, filename)
print("Writing to {}".format(url))
write_to_s3(filename, bucket, prefix)
# run this cell if you are in SageMaker Studio notebook
#!apt-get install unzip
# download first twitter dataset
!wget http://cs.stanford.edu/people/alecmgo/trainingandtestdata.zip -O sentimen140.zip
# Uncompressing
!unzip -o sentimen140.zip -d sentiment140
# upload the files to the S3 bucket
csv_files = glob.glob("sentiment140/*.csv")
for filename in csv_files:
upload_to_s3(bucket, "text_sentiment140", filename)
# download second twitter dataset
!wget https://raw.githubusercontent.com/zfz/twitter_corpus/master/full-corpus.csv
filename = "full-corpus.csv"
upload_to_s3(bucket, "text_twitter_sentiment_2", filename)
###Output
_____no_output_____
###Markdown
Text data (in .txt format): SMS Spam data [SMS Spam Data](https://archive.ics.uci.edu/ml/datasets/sms+spam+collection) was manually extracted from the Grumbletext Web site. This is a UK forum in which cell phone users make public claims about SMS spam messages, most of them without reporting the very spam message received. Each line in the text file has the correct class followed by the raw message. We will use this data to showcase how to ingest text data in .txt format.
###Code
!wget http://www.dt.fee.unicamp.br/~tiago/smsspamcollection/smsspamcollection.zip -O spam.zip
!unzip -o spam.zip -d spam
txt_files = glob.glob("spam/*.txt")
for filename in txt_files:
upload_to_s3(bucket, "text_spam", filename)
###Output
_____no_output_____
###Markdown
Text Data (in .json format): Jeopardy Question data[Jeopardy Question](https://j-archive.com/) was obtained by crawling the Jeopardy question archive website. It is an unordered list of questions where each question has the following key-value pairs:* `category` : the question category, e.g. "HISTORY"* `value`: dollar value of the question as string, e.g. "\$200"* `question`: text of question* `answer` : text of answer* `round`: one of "Jeopardy!","Double Jeopardy!","Final Jeopardy!" or "Tiebreaker"* `show_number` : string of show number, e.g '4680'* `air_date` : the show air date in format YYYY-MM-DD
###Code
# json file format
! wget 'https://docs.google.com/uc?export=download&id=0BwT5wj_P7BKXb2hfM3d2RHU1ckE' -O JEOPARDY_QUESTIONS1.json
# Uncompressing
filename = "JEOPARDY_QUESTIONS1.json"
upload_to_s3(bucket, "json_jeo", filename)
###Output
_____no_output_____
###Markdown
Ingest Data into Sagemaker Notebook Method 1: Copying data to the InstanceYou can use the AWS Command Line Interface (CLI) to copy your data from s3 to your SageMaker instance. This is a quick and easy approach when you are dealing with medium sized data files, or you are experimenting and doing exploratory analysis. The documentation can be found [here](https://docs.aws.amazon.com/cli/latest/reference/s3/cp.html).
###Code
# Specify file names
prefix = "text_spam/spam"
prefix_json = "json_jeo"
filename = "SMSSpamCollection.txt"
filename_json = "JEOPARDY_QUESTIONS1.json"
prefix_spam_2 = "text_spam/spam_2"
# copy data to your sagemaker instance using AWS CLI
!aws s3 cp s3://$bucket/$prefix_json/ text/$prefix_json/ --recursive
data_location = "text/{}/{}".format(prefix_json, filename_json)
with open(data_location) as f:
data = json.load(f)
print(data[0])
###Output
_____no_output_____
###Markdown
Method 2: Use AWS compatible Python PackagesWhen you are dealing with large data sets, or do not want to lose any data when you delete your Sagemaker Notebook Instance, you can use pre-built packages to access your files in S3 without copying files into your instance. These packages, such as `Pandas`, have implemented options to access data with a specified path string: while you will use `file://` on your local file system, you will use `s3://` instead to access the data through the AWS boto library. For `pandas`, any valid string path is acceptable. The string could be a URL. Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is expected. You can find additional documentation [here](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html). For text data, most of the time you can read it as line-by-line files or use Pandas to read it as a DataFrame by specifying a delimiter.
###Code
data_s3_location = "s3://{}/{}/{}".format(bucket, prefix, filename) # S3 URL
s3_tabular_data = pd.read_csv(data_s3_location, sep="\t", header=None)
s3_tabular_data.head()
###Output
_____no_output_____
###Markdown
For JSON files, depending on the structure, you can also use `Pandas` `read_json` function to read it if it's a flat json file.
###Code
data_json_location = "s3://{}/{}/{}".format(bucket, prefix_json, filename_json)
s3_tabular_data_json = pd.read_json(data_json_location, orient="records")
s3_tabular_data_json.head()
###Output
_____no_output_____
###Markdown
Method 3: Use AWS Native methods s3fs[S3Fs](https://s3fs.readthedocs.io/en/latest/) is a Pythonic file interface to S3. It builds on top of botocore. The top-level class S3FileSystem holds connection information and allows typical file-system style operations like cp, mv, ls, du, glob, etc., as well as put/get of local files to/from S3.
###Code
fs = s3fs.S3FileSystem()
data_s3fs_location = "s3://{}/{}/".format(bucket, prefix)
# To List all files in your accessible bucket
fs.ls(data_s3fs_location)
# open it directly with s3fs
data_s3fs_location = "s3://{}/{}/{}".format(bucket, prefix, filename) # S3 URL
with fs.open(data_s3fs_location) as f:
print(pd.read_csv(f, sep="\t", nrows=2))
###Output
_____no_output_____
###Markdown
Aggregating datasetsIf you would like to enhance your data with more data collected for your use cases, you can always aggregate your newly-collected data with your current dataset. We will use two datasets -- Sentiment140 and Sanders Twitter Sentiment to show how to aggregate data together.
###Code
prefix_tw1 = "text_sentiment140/sentiment140"
filename_tw1 = "training.1600000.processed.noemoticon.csv"
prefix_added = "text_twitter_sentiment_2"
filename_added = "full-corpus.csv"
###Output
_____no_output_____
###Markdown
Let's read in our original data and take a look at its format and schema:
###Code
data_s3_location_base = "s3://{}/{}/{}".format(bucket, prefix_tw1, filename_tw1) # S3 URL
# we will showcase with a smaller subset of data for demonstration purpose
text_data = pd.read_csv(
data_s3_location_base, header=None, encoding="ISO-8859-1", low_memory=False, nrows=10000
)
text_data.columns = ["target", "tw_id", "date", "flag", "user", "text"]
###Output
_____no_output_____
###Markdown
We have 6 columns, `date`, `text`, `flag` (which is the topic the twitter was queried), `tw_id` (tweet's id), `user` (user account name), and `target` (0 = neg, 4 = pos).
###Code
text_data.head(1)
###Output
_____no_output_____
###Markdown
Let's read in and take a look at the data we want to add to our original data. We will start by checking for columns for both data sets. The new data set has 5 columns, `TweetDate` which maps to `date`, `TweetText` which maps to `text`, `Topic` which maps to `flag`, `TweetId` which maps to `tw_id`, and `Sentiment` mapped to `target`. In this new data set, we don't have `user account name` column, so when we aggregate two data sets we can add this column to the data set to be added and fill it with `NULL` values. You can also remove this column from the original data if it does not provide much valuable information based on your use cases.
###Code
data_s3_location_added = "s3://{}/{}/{}".format(bucket, prefix_added, filename_added) # S3 URL
# we will showcase with a smaller subset of data for demonstration purpose
text_data_added = pd.read_csv(
data_s3_location_added, encoding="ISO-8859-1", low_memory=False, nrows=10000
)
text_data_added.head(1)
###Output
_____no_output_____
###Markdown
Add the missing column to the new data set and fill it with `NULL`
###Code
text_data_added["user"] = ""
###Output
_____no_output_____
###Markdown
Renaming the new data set columns to combine two data sets
###Code
text_data_added.columns = ["flag", "target", "tw_id", "date", "text", "user"]
text_data_added.head(1)
###Output
_____no_output_____
###Markdown
Change the `target` column to the same format as the `target` in the original data setNote that the `target` column in the new data set is marked as "positive", "negative", "neutral", and "irrelevant", whereas the `target` in the original data set is marked as "0" and "4". So let's map "positive" to 4, "neutral" to 2, and "negative" to 0 in our new data set so that they are consistent. For "irrelevant", which are either not English or Spam, you can either remove these if it is not valuable for your use case (In our use case of sentiment analysis, we will remove those since these text does not provide any value in terms of predicting sentiment) or map them to -1.
###Code
# remove tweets labeled as irelevant
text_data_added = text_data_added[text_data_added["target"] != "irelevant"]
# convert strings to number targets
target_map = {"positive": 4, "negative": 0, "neutral": 2}
text_data_added["target"] = text_data_added["target"].map(target_map)
###Output
_____no_output_____
###Markdown
Combine the two data sets and save as one new file
###Code
text_data_new = pd.concat([text_data, text_data_added])
filename = "sentiment_full.csv"
text_data_new.to_csv(filename, index=False)
upload_to_s3(bucket, "text_twitter_sentiment_full", filename)
###Output
_____no_output_____ |
_notebooks/2020-04-24-logistic-regression.ipynb | ###Markdown
"Logistic Regression from Scratch"> "Implementing logistic regression in Python using numpy library"- toc: true- badges: true- comments: true- categories: [ML, jupyter] IntroductionLogistic Regression is a statistical method to predict qualitative response using one or more independent variables. Instead of predicting the response directly, Logistic Regression models probability that the response belongs to a particular category. In logistic regression, we use logistic function. The logistic function will always producr an S shaped curve, so we always get a sensible prediction. $p(X)$ is sometimes also called sigmoid function.$$ \log\Big(\frac{p(X)}{1-p(X)}\Big) = w^TX + c $$$$z = w^TX + c $$$$ p(X) = \frac{1}{1 + e^{-z}} $$
###Code
#collapse-hide
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import normalize
#collapse-hide
def sigmoid(z):
'''Sigmoid function maps from a value between 0 and 1'''
return 1/(1+np.exp(-z))
x = np.linspace(-10,10,100)
y = sigmoid(x)
plt.plot(x,y)
plt.title("Sigmoid Function")
plt.show()
###Output
_____no_output_____
###Markdown
Estimate Regression Coefficients A method called **maximum likelihood** is to estimate the unknown coefficients. The idea behind this method is to estimate coefficients such that predicted probability $\hat{p}(x_i)$ of each observation corresponds closely as possible to the observed value of response variable for the same observation. In other words, we estimate coefficients such that $p(X)$ yields a number close to 1 for the observations with actual response value 1 and a number close to 0 for the observations with actual response value 0. The mathematical eequation for the likelihood function is$$ \ell(W) = \prod_{i;y_i = 1} p(x_i) \prod_{i;y_i = 0} ( 1 - p(x_i)) $$The $W$ estimates are chosen to maximize the likelihood function. Manipulating the above constraint, we arrive at Cross Entropy Loss for binary classification case.\begin{equation}L(y,\hat{y}) = -\frac{1}{N} \sum_{i=1}^N y_i \log(\hat{y_i}) + (1-y_i)\log(\hat{y_i})\end{equation}
###Code
def cross_entropy_loss(w,x,y):
'''Cross entropy loss function'''
z = np.dot(x,w)
h = sigmoid(z)
total_loss = np.sum(-y*np.log(h) - (1-y)*np.log(1-h)).mean()
return total_loss
###Output
_____no_output_____
###Markdown
Use chain rule to calculate gradient of loss\begin{equation}\frac{\partial{L}}{\partial{w_i}} = \frac{\partial{L}}{\partial{\hat{y}}} \frac{\partial{\hat{y}}}{\partial{z}} \frac{\partial{z}}{\partial{w}}\end{equation}Examining each factor in turn\begin{equation}\frac{\partial{L}}{\partial{\hat{y_i}}} = \frac{-y_i}{\hat{y_i}} + \frac{1-y_i}{1-\hat{y_i}} \end{equation}\begin{equation}\frac{\partial{L}}{\partial{\hat{y_i}}} = \frac{\hat{y_i}-y_i}{\hat{y_i}(1-\hat{y_i})} \end{equation}\begin{equation}\frac{\partial{\hat{y}}}{\partial{z}} = \hat{y_i}(1-\hat{y_i}) \end{equation}\begin{equation}\frac{\partial{z}}{\partial{w}} = x_i \end{equation}Multiplying all the indivdual terms, you get \begin{equation}\frac{\partial{L}}{\partial{w_i}} = (\hat{y_i}-y_i)x_i\end{equation}
###Code
def gradient(w,x,y):
'''Gradient for cross entropy loss'''
z = np.dot(x,w)
h = sigmoid(z)
gradient = np.dot(x.T,h - y)
return gradient
###Output
_____no_output_____
###Markdown
We are gonnna use a algorithm called **batch gradient descent** to find the optimal weights. $$ \Delta w_{i} = -\eta \frac{\partial L}{\partial w_{i}} $$$$ w_{i} := w_{i} + \Delta w_{i} $$
###Code
def update_weights(learning_rate = 0.01, n_iters = 1000, loss_threshold = 0.001):
## Initialize the weights with zero values
w = np.random.rand(x.shape[1])
for epoch in range(n_iters):
loss = cross_entropy_loss(w,x,y)
grad = gradient(w,x,y)
w = w - learning_rate * grad
if epoch % 1000 == 0:
print(f"Loss after iteration {epoch} is {round(loss,2)}")
if loss < loss_threshold:
break
return(w)
def accuracy(true, probs, threshold = 0.5):
predicted = (probs > threshold).astype(int)
return 100*(predicted == true).mean()
def predict(w,x):
return sigmoid(np.dot(x,w))
###Output
_____no_output_____
###Markdown
Lets load some binary classification data from `sklearn.datasets` and split the data into train test split.
###Code
cancer_data = load_breast_cancer()
x, x_test, y, y_test = train_test_split(cancer_data.data,
cancer_data.target,
test_size=0.25,
random_state=0)
x = normalize(x)
###Output
_____no_output_____
###Markdown
Lets call the update weights function to find the optimal weights that minimizes cross entropy loss
###Code
w = update_weights(learning_rate = 0.05, n_iters = 10000, loss_threshold = 0.001)
###Output
Loss after iteration 0 is 281.55
Loss after iteration 1000 is 87.93
Loss after iteration 2000 is 83.59
Loss after iteration 3000 is 81.78
Loss after iteration 4000 is 80.63
Loss after iteration 5000 is 79.76
Loss after iteration 6000 is 79.06
Loss after iteration 7000 is 78.46
Loss after iteration 8000 is 77.94
Loss after iteration 9000 is 77.47
###Markdown
Lets use the model to generate predictions for the test data and see what kind of accuracies are we reaching.
###Code
preds = predict(w,normalize(x_test))
acc = accuracy(y_test, preds)
print(f"Accuracy on test data is {round(acc,3)}")
###Output
Accuracy on test data is 93.706
|
Spacy_NER_model.ipynb | ###Markdown
Build a Spacy NER model
###Code
# Define variables requred for the model
model = None
output_dir=Path("/content/drive/MyDrive/Data extraction from financial documents/models/model")
n_iter=100
#load the model
if model is not None:
nlp = spacy.load(model)
print("Loaded model '%s'" % model)
else:
nlp = spacy.blank('en')
print("Created blank 'en' model")
#set up the pipeline
if 'ner' not in nlp.pipe_names:
ner = nlp.create_pipe('ner')
nlp.add_pipe(ner, last=True)
else:
ner = nlp.get_pipe('ner')
#Train the recognizer by disabling the unnecessary pipeline except for NER
for _, annotations in TRAIN_DATA:
for ent in annotations.get('entities'):
ner.add_label(ent[2])
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'ner']
with nlp.disable_pipes(*other_pipes): # only train NER
optimizer = nlp.begin_training()
for itn in range(n_iter):
random.shuffle(TRAIN_DATA)
losses = {}
for text, annotations in tqdm(TRAIN_DATA):
nlp.update(
[text],
[annotations],
drop=0.5,
sgd=optimizer,
losses=losses)
print(losses)
# Save model
if output_dir is not None:
output_dir = Path(output_dir)
if not output_dir.exists():
output_dir.mkdir()
nlp.to_disk(output_dir)
print("Saved model to", output_dir)
# Load the trained model
nlp = spacy.load(output_dir)
doc = nlp("Trad receiable 819.0")
for ent in doc.ents:
print(ent.label_, ent.text)
# Run predictions
path = "/content/drive/MyDrive/Data extraction from financial documents/"
import os
os.chdir(path)
!python predict.py
###Output
_____no_output_____ |
examples/.ipynb_checkpoints/Mutation Effect Prediction-checkpoint.ipynb | ###Markdown
We will walk through the basic functions of loading up a model and predicting the effects of mutations. Downloading pretrained parameters Please first download the pretrained parameters in the "Downloading pretrained parameters.ipynb" notebook. Loading the model
###Code
sys.path.insert(0, "../DeepSequence")
import model
import helper
import train
###Output
_____no_output_____
###Markdown
Mutation effect prediction Mutation effect prediction helper functions are always with respect to the focus sequence of the alignment. We can ask for a prediction of mutation effect individually.For reliable mutation effect prediction results, we recommend taking Monte Carlo 500-2000 samples from the model (with the N_pred_iterations parameter).We can predict the effects of single, double, triple mutants, etc. Mutations are organized as a list of tuples, where the tuples are (uniprot position, wt amino acid, mutant amino acid). PABP First let's load up a model. We don't have to calculate sequence weights here because we are not training a model and this can be slow on the CPU. In the "Explore model parameters.ipynb" notebook, the helper.py code was ammended to prespesify a dataset used for the DataHelper class. However, we can pass in an alignment name and a few more parameters so we don't have to modify the helper.py file.
###Code
data_params = {"alignment_file":"datasets/PABP_YEAST_hmmerbit_plmc_n5_m30_f50_t0.2_r115-210_id100_b48.a2m"}
pabp_data_helper = helper.DataHelper(
alignment_file=data_params["alignment_file"],
working_dir=".",
calc_weights=False
)
model_params = {
"batch_size" : 100,
"encode_dim_zero" : 1500,
"encode_dim_one" : 1500,
"decode_dim_zero" : 100,
"decode_dim_one" : 500,
"n_patterns" : 4,
"n_latent" : 30,
"logit_p" : 0.001,
"sparsity" : "logit",
"encode_nonlin" : "relu",
"decode_nonlin" : "relu",
"final_decode_nonlin": "sigmoid",
"output_bias" : True,
"final_pwm_scale" : True,
"conv_pat" : True,
"d_c_size" : 40
}
pabp_vae_model = model.VariationalAutoencoder(pabp_data_helper,
batch_size = model_params["batch_size"],
encoder_architecture = [model_params["encode_dim_zero"],
model_params["encode_dim_one"]],
decoder_architecture = [model_params["decode_dim_zero"],
model_params["decode_dim_one"]],
n_latent = model_params["n_latent"],
n_patterns = model_params["n_patterns"],
convolve_patterns = model_params["conv_pat"],
conv_decoder_size = model_params["d_c_size"],
logit_p = model_params["logit_p"],
sparsity = model_params["sparsity"],
encode_nonlinearity_type = model_params["encode_nonlin"],
decode_nonlinearity_type = model_params["decode_nonlin"],
final_decode_nonlinearity = model_params["final_decode_nonlin"],
output_bias = model_params["output_bias"],
final_pwm_scale = model_params["final_pwm_scale"],
working_dir = ".")
print ("Model built")
###Output
Encoding sequences
Neff = 151528.0
Data Shape = (151528, 82, 20)
Model built
###Markdown
Load up the parameters of a pretrained model in the 'params' folder.
###Code
file_prefix = "PABP_YEAST"
pabp_vae_model.load_parameters(file_prefix=file_prefix)
print ("Parameters loaded")
print (pabp_data_helper.delta_elbo(pabp_vae_model,[(126,"G","A")], N_pred_iterations=500))
print (pabp_data_helper.delta_elbo(pabp_vae_model,[(126,"G","A"), (137,"I","P")], N_pred_iterations=500))
print (pabp_data_helper.delta_elbo(pabp_vae_model,[(126,"G","A"), (137,"I","P"), (155,"S","A")], N_pred_iterations=500))
###Output
-16.058655309
###Markdown
We can predict the effects of mutations for all single mutations. This and the below function are preferred because they can take advantages of speed-ups from minibatching the mutation data.
###Code
pabp_full_matr_mutant_name_list, pabp_full_matr_delta_elbos \
= pabp_data_helper.single_mutant_matrix(pabp_vae_model, N_pred_iterations=500)
print (pabp_full_matr_mutant_name_list[0], pabp_full_matr_delta_elbos[0])
###Output
('K123A', 0.5887526915685584)
###Markdown
We can also predict the effect of mutations from a file in batched mode.
###Code
pabp_custom_matr_mutant_name_list, pabp_custom_matr_delta_elbos \
= pabp_data_helper.custom_mutant_matrix("mutations/PABP_YEAST_Fields2013-singles.csv", \
pabp_vae_model, N_pred_iterations=500)
print (pabp_custom_matr_mutant_name_list[12], pabp_custom_matr_delta_elbos[12])
###Output
('N127D', -6.426795215037501)
###Markdown
Let's also make a quick function to calculate the spearman rho from a mutation file.
###Code
def generate_spearmanr(mutant_name_list, delta_elbo_list, mutation_filename, phenotype_name):
measurement_df = pd.read_csv(mutation_filename, sep=',')
mutant_list = measurement_df.mutant.tolist()
expr_values_ref_list = measurement_df[phenotype_name].tolist()
mutant_name_to_pred = {mutant_name_list[i]:delta_elbo_list[i] for i in range(len(delta_elbo_list))}
# If there are measurements
wt_list = []
preds_for_spearmanr = []
measurements_for_spearmanr = []
for i,mutant_name in enumerate(mutant_list):
expr_val = expr_values_ref_list[i]
# Make sure we have made a prediction for that mutant
if mutant_name in mutant_name_to_pred:
multi_mut_name_list = mutant_name.split(':')
# If there is no measurement for that mutant, pass over it
if np.isnan(expr_val):
pass
# If it was a codon change, add it to the wt vals to average
elif mutant_name[0] == mutant_name[-1] and len(multi_mut_name_list) == 1:
wt_list.append(expr_values_ref_list[i])
# If it is labeled as the wt sequence, add it to the average list
elif mutant_name == 'wt' or mutant_name == 'WT':
wt_list.append(expr_values_ref_list[i])
else:
measurements_for_spearmanr.append(expr_val)
preds_for_spearmanr.append(mutant_name_to_pred[mutant_name])
if wt_list != []:
measurements_for_spearmanr.append(np.mean(average_wt_list))
preds_for_spearmanr.append(0.0)
num_data = len(measurements_for_spearmanr)
spearman_r, spearman_pval = spearmanr(measurements_for_spearmanr, preds_for_spearmanr)
print ("N: "+str(num_data)+", Spearmanr: "+str(spearman_r)+", p-val: "+str(spearman_pval))
generate_spearmanr(pabp_custom_matr_mutant_name_list, pabp_custom_matr_delta_elbos, \
"mutations/PABP_YEAST_Fields2013-singles.csv", "log")
###Output
N: 1188, Spearmanr: 0.6509305755221257, p-val: 4.0800344026520655e-144
###Markdown
PDZ
###Code
data_params = {"alignment_file":"datasets/DLG4_RAT_hmmerbit_plmc_n5_m30_f50_t0.2_r300-400_id100_b50.a2m"}
pdz_data_helper = helper.DataHelper(
alignment_file=data_params["alignment_file"],
working_dir=".",
calc_weights=False
)
pdz_vae_model = model.VariationalAutoencoder(pdz_data_helper,
batch_size = model_params["batch_size"],
encoder_architecture = [model_params["encode_dim_zero"],
model_params["encode_dim_one"]],
decoder_architecture = [model_params["decode_dim_zero"],
model_params["decode_dim_one"]],
n_latent = model_params["n_latent"],
n_patterns = model_params["n_patterns"],
convolve_patterns = model_params["conv_pat"],
conv_decoder_size = model_params["d_c_size"],
logit_p = model_params["logit_p"],
sparsity = model_params["sparsity"],
encode_nonlinearity_type = model_params["encode_nonlin"],
decode_nonlinearity_type = model_params["decode_nonlin"],
final_decode_nonlinearity = model_params["final_decode_nonlin"],
output_bias = model_params["output_bias"],
final_pwm_scale = model_params["final_pwm_scale"],
working_dir = ".")
print ("Model built")
file_prefix = "DLG4_RAT"
pdz_vae_model.load_parameters(file_prefix=file_prefix)
print ("Parameters loaded\n\n")
pdz_custom_matr_mutant_name_list, pdz_custom_matr_delta_elbos \
= pdz_data_helper.custom_mutant_matrix("mutations/DLG4_RAT_Ranganathan2012.csv", \
pdz_vae_model, N_pred_iterations=500)
generate_spearmanr(pdz_custom_matr_mutant_name_list, pdz_custom_matr_delta_elbos, \
"mutations/DLG4_RAT_Ranganathan2012.csv", "CRIPT")
###Output
Encoding sequences
Neff = 102246.0
Data Shape = (102246, 84, 20)
Model built
Parameters loaded
N: 1577, Spearmanr: 0.6199244929585085, p-val: 4.31636475994128e-168
###Markdown
B-lactamase Larger proteins with more mutations to predict can take much longer to run. For these, we recommend GPU-enabled computation.
###Code
data_params = {"dataset":"BLAT_ECOLX"}
blat_data_helper = helper.DataHelper(
dataset=data_params["dataset"],
working_dir=".",
calc_weights=False
)
blat_vae_model = model.VariationalAutoencoder(blat_data_helper,
batch_size = model_params["batch_size"],
encoder_architecture = [model_params["encode_dim_zero"],
model_params["encode_dim_one"]],
decoder_architecture = [model_params["decode_dim_zero"],
model_params["decode_dim_one"]],
n_latent = model_params["n_latent"],
n_patterns = model_params["n_patterns"],
convolve_patterns = model_params["conv_pat"],
conv_decoder_size = model_params["d_c_size"],
logit_p = model_params["logit_p"],
sparsity = model_params["sparsity"],
encode_nonlinearity_type = model_params["encode_nonlin"],
decode_nonlinearity_type = model_params["decode_nonlin"],
final_decode_nonlinearity = model_params["final_decode_nonlin"],
output_bias = model_params["output_bias"],
final_pwm_scale = model_params["final_pwm_scale"],
working_dir = ".")
print ("Model built")
file_prefix = "BLAT_ECOLX"
blat_vae_model.load_parameters(file_prefix=file_prefix)
print ("Parameters loaded\n\n")
blat_custom_matr_mutant_name_list, blat_custom_matr_delta_elbos \
= blat_data_helper.custom_mutant_matrix("mutations/BLAT_ECOLX_Ranganathan2015.csv", \
blat_vae_model, N_pred_iterations=500)
generate_spearmanr(blat_custom_matr_mutant_name_list, blat_custom_matr_delta_elbos, \
"mutations/BLAT_ECOLX_Ranganathan2015.csv", "2500")
###Output
Encoding sequences
Neff = 8355.0
Data Shape = (8355, 253, 20)
Model built
Parameters loaded
N: 4807, Spearmanr: 0.743886370415797, p-val: 0.0
|
notebooks/amqdn-0.2-baseline-grail-qa.ipynb | ###Markdown
Baseline - Grail QASince we have labels already, it makes sense to consider what performance we can get with straight classification. Topic modeling is fine for large datasets where we don't have the luxury of labeled data, but let's see what a simpler classifier can do.
###Code
import pandas as pd
pd.options.display.max_colwidth = 0
from src.data.utils import *
train = pd.DataFrame(get_domains_and_questions('train', 'grail_qa'))
dev = pd.DataFrame(get_domains_and_questions('dev', 'grail_qa'))
domains = ['medicine', 'computer']
train = filter_domains(train, domains)
dev = filter_domains(dev, domains)
train.loc[train.domains =='medicine'].sample(5)
train.loc[train.domains =='computer'].sample(5)
print(f'---Train Distribution---\n{train.domains.value_counts()}')
print(f'---Dev Distribution---\n{dev.domains.value_counts()}')
###Output
---Train Distribution---
medicine 2002
computer 1923
Name: domains, dtype: int64
---Dev Distribution---
computer 190
medicine 178
Name: domains, dtype: int64
###Markdown
Fairly balanced dataset considering only these two `subdomains`. We'll see how that changes when we incorporate others.
###Code
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf = TfidfVectorizer()
xt = tfidf.fit_transform(train.questions)
xd = tfidf.transform(dev.questions)
import numpy as np
def transform_labels(labels):
labels[np.where(labels == 'medicine')] = 0.
labels[np.where(labels == 'computer')] = 1.
return labels.astype(np.float64)
yt = transform_labels(train.domains.values)
yd = transform_labels(dev.domains.values)
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression()
clf.fit(xt, yt)
from sklearn.metrics import classification_report
print(classification_report(yd, clf.predict(xd)))
###Output
precision recall f1-score support
0.0 1.00 1.00 1.00 178
1.0 1.00 1.00 1.00 190
accuracy 1.00 368
macro avg 1.00 1.00 1.00 368
weighted avg 1.00 1.00 1.00 368
|
master/_downloads/9a20dadaa600955e1b6e24df368031ce/plot_compute_emd.ipynb | ###Markdown
Plot multiple EMDShows how to compute multiple EMD and Sinkhorn with two differentground metrics and plot their values for different distributions.
###Code
# Author: Remi Flamary <[email protected]>
#
# License: MIT License
# sphinx_gallery_thumbnail_number = 3
import numpy as np
import matplotlib.pylab as pl
import ot
from ot.datasets import make_1D_gauss as gauss
###Output
_____no_output_____
###Markdown
Generate data
###Code
n = 100 # nb bins
n_target = 50 # nb target distributions
# bin positions
x = np.arange(n, dtype=np.float64)
lst_m = np.linspace(20, 90, n_target)
# Gaussian distributions
a = gauss(n, m=20, s=5) # m= mean, s= std
B = np.zeros((n, n_target))
for i, m in enumerate(lst_m):
B[:, i] = gauss(n, m=m, s=5)
# loss matrix and normalization
M = ot.dist(x.reshape((n, 1)), x.reshape((n, 1)), 'euclidean')
M /= M.max()
M2 = ot.dist(x.reshape((n, 1)), x.reshape((n, 1)), 'sqeuclidean')
M2 /= M2.max()
###Output
_____no_output_____
###Markdown
Plot data
###Code
pl.figure(1)
pl.subplot(2, 1, 1)
pl.plot(x, a, 'b', label='Source distribution')
pl.title('Source distribution')
pl.subplot(2, 1, 2)
pl.plot(x, B, label='Target distributions')
pl.title('Target distributions')
pl.tight_layout()
###Output
_____no_output_____
###Markdown
Compute EMD for the different losses
###Code
d_emd = ot.emd2(a, B, M) # direct computation of EMD
d_emd2 = ot.emd2(a, B, M2) # direct computation of EMD with loss M2
pl.figure(2)
pl.plot(d_emd, label='Euclidean EMD')
pl.plot(d_emd2, label='Squared Euclidean EMD')
pl.title('EMD distances')
pl.legend()
###Output
_____no_output_____
###Markdown
Compute Sinkhorn for the different losses
###Code
reg = 1e-2
d_sinkhorn = ot.sinkhorn2(a, B, M, reg)
d_sinkhorn2 = ot.sinkhorn2(a, B, M2, reg)
pl.figure(2)
pl.clf()
pl.plot(d_emd, label='Euclidean EMD')
pl.plot(d_emd2, label='Squared Euclidean EMD')
pl.plot(d_sinkhorn, '+', label='Euclidean Sinkhorn')
pl.plot(d_sinkhorn2, '+', label='Squared Euclidean Sinkhorn')
pl.title('EMD distances')
pl.legend()
pl.show()
###Output
_____no_output_____ |
notebooks/estudos_python/machine_learning/naive-bayes/classificacao_naive-bayes3.ipynb | ###Markdown
**NAIVE BAYES** Baseado no teorema de bayes (análise probabilistica)Aplicações mais comuns :- Detecção de SPAN- Detecção de emoções em frases- Separação de documentosO naive bayes a partir da tabela de dados previsores gera uma tabela de probabilidades que é usada como base para classificar novos dados.Vantagens :- Rápido se comparado a abordagens mais complexas ( Ex: redes neurais )- Simples- Capaz de tabalhar com altas dimensões (atributos)- Boas previsões em bases de dados pequenas ( 400 - 1000 registros)Desvantagens :- Presume que os atributos previsores são totalmente independentes, o que nem sempre é verdade.
###Code
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.naive_bayes import GaussianNB
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler,MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report
from sklearn.compose import make_column_transformer
# carregando a base de dados de censo
base = pd.read_csv('../../res/census.csv')
base.shape
# separando os dados de previsao e classificacao
previsores = base.iloc[:, 0:14].values
classificadores = base.iloc[:, 14].values
#gerando uma copia dos dados originais para fazer mais testes abaixo
previsores_escalonados=previsores.copy()
#efetuando correcoes nos dados do censo
#transformando dados categorios da base em dados discretos
labelencoder_prev = LabelEncoder()
previsores[:, 1] = labelencoder_prev.fit_transform(previsores[:, 1])
previsores[:, 3] = labelencoder_prev.fit_transform(previsores[:, 3])
previsores[:, 5] = labelencoder_prev.fit_transform(previsores[:, 5])
previsores[:, 6] = labelencoder_prev.fit_transform(previsores[:, 6])
previsores[:, 7] = labelencoder_prev.fit_transform(previsores[:, 7])
previsores[:, 8] = labelencoder_prev.fit_transform(previsores[:, 8])
previsores[:, 9] = labelencoder_prev.fit_transform(previsores[:, 9])
previsores[:, 13] = labelencoder_prev.fit_transform(previsores[:, 13])
preprocess = make_column_transformer(( OneHotEncoder(categories='auto'), [1,3,5,6,7,8,9,13] ),remainder="passthrough")
previsores = preprocess.fit_transform(previsores).toarray()
# padronizando os valores nao discretos da copia dos previsores ( nao deve ser feito para todos os parametros sob risco de degradar a precisao do algoritimo)
#o min max scaler foi mais interessante para este caso
scaler = MinMaxScaler(feature_range=(0, 200))
#scaler = StandardScaler()
#transformando dados categorios da copia da base em dados discretos
labelencoder_prev = LabelEncoder()
previsores_escalonados[:, 1] = labelencoder_prev.fit_transform(previsores_escalonados[:, 1])
previsores_escalonados[:, 3] = labelencoder_prev.fit_transform(previsores_escalonados[:, 3])
previsores_escalonados[:, 5] = labelencoder_prev.fit_transform(previsores_escalonados[:, 5])
previsores_escalonados[:, 6] = labelencoder_prev.fit_transform(previsores_escalonados[:, 6])
previsores_escalonados[:, 7] = labelencoder_prev.fit_transform(previsores_escalonados[:, 7])
previsores_escalonados[:, 8] = labelencoder_prev.fit_transform(previsores_escalonados[:, 8])
previsores_escalonados[:, 9] = labelencoder_prev.fit_transform(previsores_escalonados[:, 9])
previsores_escalonados[:, 13] = labelencoder_prev.fit_transform(previsores_escalonados[:, 13])
print("\nVisualizando estatisticas dos dados nao discretos antes do escalonamento\n")
for x in range(3):
print('coluna ',x,"\n")
print(previsores_escalonados[:,[4,10,12]][x].min())
print(previsores_escalonados[:,[4,10,12]][x].max())
print(previsores_escalonados[:,[4,10,12]][x].mean())
print(previsores_escalonados[:,[4,10,12]][x].var())
print("\n")
#padronizando dados nao discretos da copia da base original (testes feitos de varias maneiras para daterminar o melhor resultado)
#previsores_escalonados[:,[2,4,10,11,12]] = scaler.fit_transform(previsores_escalonados[:,[2,4,10,11,12]])
#testes com poucas colunas escalonadas
previsores_escalonados[:,[12]] = scaler.fit_transform(previsores_escalonados[:,[12]])
previsores_escalonados[:,[10]] = scaler.fit_transform(previsores_escalonados[:,[10]])
previsores_escalonados[:,[4]] = scaler.fit_transform(previsores_escalonados[:,[4]])
print("\nVisualizando estatisticas dos dados nao discretos depois do escalonamento\n")
for x in range(3):
print('coluna ',x,"\n")
print(previsores_escalonados[:,[4,10,12]][x].min())
print(previsores_escalonados[:,[4,10,12]][x].max())
print(previsores_escalonados[:,[4,10,12]][x].mean())
print(previsores_escalonados[:,[4,10,12]][x].var())
print("\n")
#fazendo o one hot encoder para a copia da base (para os valores discretos)
preprocess = make_column_transformer(( OneHotEncoder(categories='auto'), [1,3,5,6,7,8,9,13] ),remainder="passthrough")
previsores_escalonados = preprocess.fit_transform(previsores_escalonados).toarray()
#separando os valores de teste e treinamento para os previsores escalonados e nao escalonados
previsores_treinamento, previsores_teste, classificadores_treinamento1, classificadores_teste1 = train_test_split(previsores, classificadores, test_size=0.15, random_state=0)
previsores_escalonados_treinamento, previsores_escalonados_teste, classificadores_treinamento, classificadores_teste = train_test_split(previsores_escalonados, classificadores, test_size=0.15, random_state=0)
# instanciando o naive bayes com o scikit
classificador = GaussianNB(priors=(.75,.25))
classificador.fit(previsores_escalonados_treinamento, classificadores_treinamento)
# rodando previsoes com o dados de teste (copia)
previsoes_dados_escalonados = classificador.predict(previsores_escalonados_teste)
# fazendo o fit com os dados normais
classificador.fit(previsores_treinamento, classificadores_treinamento1)
previsoes = classificador.predict(previsores_teste)
#testes dessa instancia algoritimo
# o dado de precisao per se nao quer dizer muita coisa e preciso verificar outras metricas
precisao_escalonados = accuracy_score(classificadores_teste, previsoes_dados_escalonados)
precisao = accuracy_score(classificadores_teste1, previsoes)
# uma dessas metricas eh a matriz de confusao ... ela e capaz de mostrar o desempenho do algoritimo para cada classe
matriz_escalonados = confusion_matrix(classificadores_teste, previsoes_dados_escalonados)
matriz = confusion_matrix(classificadores_teste1, previsoes)
#o scikit tambem possui uma classe utilitaria que prove um report mais detalhado...
report_escalonados = classification_report(classificadores_teste, previsoes_dados_escalonados)
report = classification_report(classificadores_teste1, previsoes)
print("Precisão dados normais / escalonados :\n")
print(precisao,'/',precisao_escalonados)
print("\nMatriz de confusão dados normais / escalonados:\n")
print(matriz)
print("\n")
print(matriz_escalonados)
print("\nReport dados normais / escalonados:\n")
print (report)
print("\n")
print (report_escalonados)
###Output
Precisão dados normais / escalonados :
0.7950870010235415 / 0.7985670419651996
Matriz de confusão dados normais / escalonados:
[[3515 178]
[ 823 369]]
[[3565 128]
[ 856 336]]
Report dados normais / escalonados:
precision recall f1-score support
<=50K 0.81 0.95 0.88 3693
>50K 0.67 0.31 0.42 1192
accuracy 0.80 4885
macro avg 0.74 0.63 0.65 4885
weighted avg 0.78 0.80 0.77 4885
precision recall f1-score support
<=50K 0.81 0.97 0.88 3693
>50K 0.72 0.28 0.41 1192
accuracy 0.80 4885
macro avg 0.77 0.62 0.64 4885
weighted avg 0.79 0.80 0.76 4885
|
macros/Macros.ipynb | ###Markdown
Macros This notebook shows how to use *macros* commands in Jupyter.What is *macro*? It is just a named code snippet. Similarly to functions, we can use macros to wrap frequently used code. For example, we can define a macro, that will load all the libraries for us. Step 1: Define macro To save some code as a macro we need to put that code in a cell and run it.
###Code
import numpy as np
import pandas as pd
from tqdm import tqdm_notebook
import os
import sys
import os.path
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import rc
from cycler import cycler
%matplotlib inline
mpl.rcParams['axes.prop_cycle'] = cycler('color', ['#ff0000', '#0000ff', '#00ffff','#ffA300', '#00ff00',
'#ff00ff', '#990000', '#009999', '#999900', '#009900', '#009999'])
rc('font', size=16)
rc('font',**{'family':'serif','serif':['Computer Modern']})
rc('text', usetex=False)
rc('figure', figsize=(12, 10))
rc('axes', linewidth=.5)
rc('lines', linewidth=1.75)
print('The libraries have been loaded!')
###Output
The libraries have been loaded!
###Markdown
Now you need to remember the number inside squre brackets of `In []`. Now, to save the code, in that cell you need to use macro magic:```%macro __imp ```
###Code
%macro -q __imp 1
###Output
_____no_output_____
###Markdown
Now try it!
###Code
__imp
###Output
The libraries have been loaded!
###Markdown
Step 2: save macroTo this end we've only created a macro, but it will be lost, when the kernel is restarted. We need to somehow store it, so than we can load it easily later. In can be done with `%store` macro.
###Code
%store __imp
###Output
Stored '__imp' (Macro)
###Markdown
Now `__imp` is saved in a kind of Jupyter's global memory. You can list all the stored variables like that:
###Code
%store
###Output
Stored variables and their in-db values:
__imp -> IPython.macro.Macro("import numpy as np\nimport pa
###Markdown
Now **restart the kernel** and get back to this cell without running the previous ones. To run the stored macro you need to retrieve the macro first with the following line:
###Code
%store -r __imp
###Output
_____no_output_____
###Markdown
And only then call the macro:
###Code
__imp
###Output
The libraries have been loaded!
###Markdown
Step 3: auto restore macro So you need to use as many as 2 cells! But, fortunately, Jupyer can load the stored variables (and macros) automatically. To enable it you need to update you `.ipython_profile` [config](http://ipython.readthedocs.io/en/stable/development/config.html). If you've never heared of it, then it is not yet created, otherwise you should know where it lives. On Coursera's notebooks we will create it here: `~/.ipython/profile_default/ipython_profile.py` and notify the ipython, that we want it to automatically restore stored variables.```c.StoreMagics.autorestore = True```
###Code
!echo "c = get_config()\nc.StoreMagics.autorestore = True" > ~/.ipython/profile_default/ipython_config.py
!cat ~/.ipython/profile_default/ipython_config.py
###Output
c = get_config()
c.StoreMagics.autorestore = True
###Markdown
That's it! Now **restart your notebook (kernel)** and **define and store macro** again (step 1 and first code cell from step 2). And finally, to test it, **restart the kernel** again. Now you can immediately access `__imp` macro, so that all the libraries are loaded with a 5 char line of code.
###Code
__imp
###Output
The libraries have been loaded!
|
19-21-itertools/my_code/Itertools_prmutations_combinations.ipynb | ###Markdown
combinations just gives list of combos - doesn't care about order - that's where permutations come in!P gives result and in whatever order they can be in.
###Code
print(list(combinations(friends, 2)))
print(list(permutations(friends, 2)))
# mike, bob and bob, mike
###Output
_____no_output_____ |
module4-classification-metrics/Unit_2_Sprint_2_Module_4_LESSON.ipynb | ###Markdown
Lambda School Data Science*Unit 2, Sprint 2, Module 4*--- Classification Metrics- get and interpret the **confusion matrix** for classification models- use classification metrics: **precision, recall**- understand the relationships between precision, recall, **thresholds, and predicted probabilities**, to help **make decisions and allocate budgets**- Get **ROC AUC** (Receiver Operating Characteristic, Area Under the Curve) SetupRun the code cell below. You can work locally (follow the [local setup instructions](https://lambdaschool.github.io/ds/unit2/local/)) or on Colab.Libraries- category_encoders- ipywidgets- matplotlib- numpy- pandas- scikit-learn- seaborn
###Code
%%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/'
!pip install category_encoders==2.*
# If you're working locally:
else:
DATA_PATH = '../data/'
###Output
_____no_output_____
###Markdown
Get and interpret the confusion matrix for classification models Overview First, load the Tanzania Waterpumps data and fit a model. (This code isn't new, we've seen it all before.)
###Code
%matplotlib inline
import category_encoders as ce
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.impute import SimpleImputer
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import RandomForestClassifier
def wrangle(X):
"""Wrangles train, validate, and test sets in the same way"""
X = X.copy()
# Convert date_recorded to datetime
X['date_recorded'] = pd.to_datetime(X['date_recorded'], infer_datetime_format=True)
# Extract components from date_recorded, then drop the original column
X['year_recorded'] = X['date_recorded'].dt.year
X['month_recorded'] = X['date_recorded'].dt.month
X['day_recorded'] = X['date_recorded'].dt.day
X = X.drop(columns='date_recorded')
# Engineer feature: how many years from construction_year to date_recorded
X['years'] = X['year_recorded'] - X['construction_year']
# Drop recorded_by (never varies) and id (always varies, random)
unusable_variance = ['recorded_by', 'id']
X = X.drop(columns=unusable_variance)
# Drop duplicate columns
duplicate_columns = ['quantity_group']
X = X.drop(columns=duplicate_columns)
# About 3% of the time, latitude has small values near zero,
# outside Tanzania, so we'll treat these like null values
X['latitude'] = X['latitude'].replace(-2e-08, np.nan)
# When columns have zeros and shouldn't, they are like null values
cols_with_zeros = ['construction_year', 'longitude', 'latitude', 'gps_height', 'population']
for col in cols_with_zeros:
X[col] = X[col].replace(0, np.nan)
return X
# Merge train_features.csv & train_labels.csv
train = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'),
pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv'))
# Read test_features.csv & sample_submission.csv
test = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv')
sample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv')
# Split train into train & val. Make val the same size as test.
target = 'status_group'
train, val = train_test_split(train, test_size=len(test),
stratify=train[target], random_state=42)
# Wrangle train, validate, and test sets in the same way
train = wrangle(train)
val = wrangle(val)
test = wrangle(test)
# Arrange data into X features matrix and y target vector
X_train = train.drop(columns=target)
y_train = train[target]
X_val = val.drop(columns=target)
y_val = val[target]
X_test = test
# Make pipeline!
pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(strategy='mean'),
RandomForestClassifier(n_estimators=100, random_state=42, n_jobs=-1)
)
# Fit on train, score on val
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_val)
print('Validation Accuracy', accuracy_score(y_val, y_pred))
###Output
Validation Accuracy 0.8140409527789386
###Markdown
Follow AlongScikit-learn added a [**`plot_confusion_matrix`**](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.plot_confusion_matrix.html) function in version 0.22!
###Code
import sklearn
sklearn.__version__
from sklearn.metrics import plot_confusion_matrix
plot_confusion_matrix(pipeline, X_val, y_val, values_format='.0f', xticks_rotation='vertical')
###Output
_____no_output_____
###Markdown
How many correct predictions were made?
###Code
correct_predictions = 7005 + 332 + 4351
correct_predictions
###Output
_____no_output_____
###Markdown
How many total predictions were made?
###Code
total_predictions = 7005 + 171 + 622 + 555 + 332 + 156 + 1098 + 68 + 4351
total_predictions
###Output
_____no_output_____
###Markdown
What was the classification accuracy?
###Code
correct_predictions / total_predictions
accuracy_score(y_val, y_pred)
sum(y_pred == y_val) / len(y_pred)
###Output
_____no_output_____
###Markdown
Use classification metrics: precision, recall Overview[Scikit-Learn User Guide — Classification Report](https://scikit-learn.org/stable/modules/model_evaluation.htmlclassification-report)
###Code
from sklearn.metrics import classification_report
print (classification_report(y_val, y_pred))
###Output
precision recall f1-score support
functional 0.81 0.90 0.85 7798
functional needs repair 0.58 0.32 0.41 1043
non functional 0.85 0.79 0.82 5517
accuracy 0.81 14358
macro avg 0.75 0.67 0.69 14358
weighted avg 0.81 0.81 0.81 14358
###Markdown
Wikipedia, [Precision and recall](https://en.wikipedia.org/wiki/Precision_and_recall)> Both precision and recall are based on an understanding and measure of relevance.> Suppose a computer program for recognizing dogs in photographs identifies 8 dogs in a picture containing 12 dogs and some cats. Of the 8 identified as dogs, 5 actually are dogs (true positives), while the rest are cats (false positives). The program's precision is 5/8 while its recall is 5/12.> High precision means that an algorithm returned substantially more relevant results than irrelevant ones, while high recall means that an algorithm returned most of the relevant results. Follow Along [We can get precision & recall from the confusion matrix](https://en.wikipedia.org/wiki/Precision_and_recallDefinition_(classification_context))
###Code
cm = plot_confusion_matrix(pipeline, X_val, y_val, values_format='.0f', xticks_rotation='vertical')
cm
# precision = true_positives / (true_positives + positives)
# recall = true_positives / (true_positives + false_negatives)
###Output
_____no_output_____
###Markdown
How many correct predictions of "non functional"?
###Code
correct_predictions_nonfunctional = 4351
###Output
_____no_output_____
###Markdown
How many total predictions of "non functional"?
###Code
total_predictions_nonfunctional = 622 + 156 + 4351
###Output
_____no_output_____
###Markdown
What's the precision for "non functional"?
###Code
correct_predictions_nonfunctional / total_predictions_nonfunctional
print (classification_report(y_val, y_pred))
###Output
precision recall f1-score support
functional 0.81 0.90 0.85 7798
functional needs repair 0.58 0.32 0.41 1043
non functional 0.85 0.79 0.82 5517
accuracy 0.81 14358
macro avg 0.75 0.67 0.69 14358
weighted avg 0.81 0.81 0.81 14358
###Markdown
How many actual "non functional" waterpumps?
###Code
actual_nonfunctional = 1098 + 68 + 4351
###Output
_____no_output_____
###Markdown
What's the recall for "non functional"?
###Code
correct_predictions_nonfunctional / actual_nonfunctional
###Output
_____no_output_____
###Markdown
Understand the relationships between precision, recall, thresholds, and predicted probabilities, to help make decisions and allocate budgets Overview Imagine this scenario...Suppose there are over 14,000 waterpumps that you _do_ have some information about, but you _don't_ know whether they are currently functional, or functional but need repair, or non-functional.
###Code
len(test)
###Output
_____no_output_____
###Markdown
**You have the time and resources to go to just 2,000 waterpumps for proactive maintenance.** You want to predict, which 2,000 are most likely non-functional or in need of repair, to help you triage and prioritize your waterpump inspections.You have historical inspection data for over 59,000 other waterpumps, which you'll use to fit your predictive model.
###Code
len(train) + len(val)
###Output
_____no_output_____
###Markdown
You have historical inspection data for over 59,000 other waterpumps, which you'll use to fit your predictive model.Based on this historical data, if you randomly chose waterpumps to inspect, then about 46% of the waterpumps would need repairs, and 54% would not need repairs.
###Code
y_train.value_counts(normalize=True)
2000 * 0.46
###Output
_____no_output_____
###Markdown
**Can you do better than random at prioritizing inspections?** In this scenario, we should define our target differently. We want to identify which waterpumps are non-functional _or_ are functional but needs repair:
###Code
y_train = y_train != 'functional'
y_val = y_val != 'functional'
y_train.value_counts(normalize=True)
###Output
_____no_output_____
###Markdown
*We* already made our validation set the same size as our test set.
###Code
len(val) == len(test)
###Output
_____no_output_____
###Markdown
We can refit our model, using the redefined target.Then make predictions for the validation set.
###Code
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_val)
###Output
_____no_output_____
###Markdown
Follow Along Look at the confusion matrix:
###Code
plot_confusion_matrix(pipeline, X_val, y_val, values_format='.0f', xticks_rotation='vertical');
###Output
_____no_output_____
###Markdown
How many total predictions of "True" ("non functional" or "functional needs repair") ?
###Code
5032 + 977
###Output
_____no_output_____
###Markdown
We don't have "budget" to take action on all these predictions- But we can get predicted probabilities, to rank the predictions. - Then change the threshold, to change the number of positive predictions, based on our budget. Get predicted probabilities and plot the distribution
###Code
pipeline.predict_proba(X_val)
pipeline.predict(X_val)
#Predicted probabilites for the positive class
pipeline.predict_proba(X_val)[:, 1]
threshold = 0.925
sum(pipeline.predict_proba(X_val)[:, 1] > threshold)
###Output
_____no_output_____
###Markdown
Change the threshold
###Code
import seaborn as sns
y_pred_proba = pipeline.predict_proba(X_val)[:, 1]
ax = sns.distplot(y_pred_proba)
threshold = 0.9
ax.axvline(threshold, color='red')
###Output
_____no_output_____
###Markdown
Or, get exactly 2,000 positive predictions Identify the 2,000 waterpumps in the validation set with highest predicted probabilities.
###Code
from ipywidgets import interact, fixed
import seaborn as sns
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
def my_confusion_matrix(y_true, y_pred):
labels = unique_labels(y_true)
columns = [f'Predicted {label}' for label in labels]
index = [f'Actual {label}' for label in labels]
table = pd.DataFrame(confusion_matrix(y_true, y_pred),
columns=columns, index=index)
return sns.heatmap(table, annot=True, fmt='d', cmap='viridis')
def set_threshold(y_true, y_pred_proba, threshold=0.5):
y_pred = y_pred_proba > threshold
ax = sns.distplot(y_pred_proba)
ax.axvline(threshold, color='red')
plt.show()
print(classification_report(y_true, y_pred))
my_confusion_matrix(y_true, y_pred)
interact(set_threshold,
y_true=fixed(y_val),
y_pred_proba=fixed(y_pred_proba),
threshold=(0, 1, 0.02));
# accuracy 0.83
# accuracy 0.70
###Output
_____no_output_____
###Markdown
Most of these top 2,000 waterpumps will be relevant recommendations, meaning `y_val==True`, meaning the waterpump is non-functional or needs repairs.Some of these top 2,000 waterpumps will be irrelevant recommendations, meaning `y_val==False`, meaning the waterpump is functional and does not need repairs.Let's look at a random sample of 50 out of these top 2,000:
###Code
###Output
_____no_output_____
###Markdown
So how many of our recommendations were relevant? ...
###Code
###Output
_____no_output_____
###Markdown
What's the precision for this subset of 2,000 predictions?
###Code
###Output
_____no_output_____
###Markdown
In this scenario ... Accuracy _isn't_ the best metric!Instead, change the threshold, to change the number of positive predictions, based on the budget. (You have the time and resources to go to just 2,000 waterpumps for proactive maintenance.)Then, evaluate with the precision for "non functional"/"functional needs repair".This is conceptually like **Precision@K**, where k=2,000.Read more here: [Recall and Precision at k for Recommender Systems: Detailed Explanation with examples](https://medium.com/@m_n_malaeb/recall-and-precision-at-k-for-recommender-systems-618483226c54)> Precision at k is the proportion of recommended items in the top-k set that are relevant> Mathematically precision@k is defined as: `Precision@k = ( of recommended items @k that are relevant) / ( of recommended items @k)`> In the context of recommendation systems we are most likely interested in recommending top-N items to the user. So it makes more sense to compute precision and recall metrics in the first N items instead of all the items. Thus the notion of precision and recall at k where k is a user definable integer that is set by the user to match the top-N recommendations objective.We asked, can you do better than random at prioritizing inspections?If we had randomly chosen waterpumps to inspect, we estimate that only 920 waterpumps would be repaired after 2,000 maintenance visits. (46%)But using our predictive model, in the validation set, we succesfully identified over 1,900 waterpumps in need of repair!So we will use this predictive model with the dataset of over 14,000 waterpumps that we _do_ have some information about, but we _don't_ know whether they are currently functional, or functional but need repair, or non-functional.We will predict which 2,000 are most likely non-functional or in need of repair.We estimate that approximately 1,900 waterpumps will be repaired after these 2,000 maintenance visits.So we're confident that our predictive model will help triage and prioritize waterpump inspections. But ...This metric (~1,900 waterpumps repaired after 2,000 maintenance visits) is specific for _one_ classification problem and _one_ possible trade-off.Can we get an evaluation metric that is generic for _all_ classification problems and _all_ possible trade-offs?Yes — the most common such metric is **ROC AUC.** Get ROC AUC (Receiver Operating Characteristic, Area Under the Curve)[Wikipedia explains,](https://en.wikipedia.org/wiki/Receiver_operating_characteristic) "A receiver operating characteristic curve, or ROC curve, is a graphical plot that illustrates the diagnostic ability of a binary classifier system as its discrimination threshold is varied. **The ROC curve is created by plotting the true positive rate (TPR) against the false positive rate (FPR) at various threshold settings.**"ROC AUC is the area under the ROC curve. [It can be interpreted](https://stats.stackexchange.com/questions/132777/what-does-auc-stand-for-and-what-is-it) as "the expectation that a uniformly drawn random positive is ranked before a uniformly drawn random negative." ROC AUC measures **how well a classifier ranks predicted probabilities.** So, when you get your classifier’s ROC AUC score, you need to **use predicted probabilities, not discrete predictions.**ROC AUC ranges **from 0 to 1.** Higher is better. A naive majority class **baseline** will have an ROC AUC score of **0.5.** Scikit-Learn docs- [User Guide: Receiver operating characteristic (ROC)](https://scikit-learn.org/stable/modules/model_evaluation.htmlreceiver-operating-characteristic-roc)- [sklearn.metrics.roc_curve](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_curve.html)- [sklearn.metrics.roc_auc_score](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_auc_score.html) More links- [ROC curves and Area Under the Curve explained](https://www.dataschool.io/roc-curves-and-auc-explained/)- [The philosophical argument for using ROC curves](https://lukeoakdenrayner.wordpress.com/2018/01/07/the-philosophical-argument-for-using-roc-curves/)
###Code
# "The ROC curve is created by plotting the true positive rate (TPR)
# against the false positive rate (FPR)
# at various threshold settings."
# Use scikit-learn to calculate TPR & FPR at various thresholds
from sklearn.metrics import roc_curve
fpr, tpr, thresholds = roc_curve(y_val, y_pred_proba)
# See the results in a table
pd.DataFrame({
'False Positive Rate': fpr,
'True Positive Rate': tpr,
'Threshold': thresholds
})
# See the results on a plot.
# This is the "Receiver Operating Characteristic" curve
plt.scatter(fpr, tpr)
plt.title('ROC curve')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate');
# Use scikit-learn to calculate the area under the curve.
from sklearn.metrics import roc_auc_score
roc_auc_score(y_val, y_pred_proba)
###Output
_____no_output_____
###Markdown
**Recap:** ROC AUC measures how well a classifier ranks predicted probabilities. So, when you get your classifier’s ROC AUC score, you need to use predicted probabilities, not discrete predictions. Your code may look something like this:```pythonfrom sklearn.metrics import roc_auc_scorey_pred_proba = model.predict_proba(X_test_transformed)[:, -1] Probability for last classprint('Test ROC AUC:', roc_auc_score(y_test, y_pred_proba))```ROC AUC ranges from 0 to 1. Higher is better. A naive majority class baseline will have an ROC AUC score of 0.5.
###Code
from sklearn.metrics import roc_auc_score
y_pred_proba = model.predict_proba(X_test_transformed)[:, -1] # Probability for last class
print('Test ROC AUC:', roc_auc_score(y_test, y_pred_proba))
###Output
_____no_output_____ |
scratch/fields/128x128 - demo.ipynb | ###Markdown
Model in STAX
###Code
n_summaries = 2
n_s = 5000
n_d = 5000
λ = 100.0
ϵ = 0.1
# define inception block layer
def InceptBlock2(filters, strides, do_5x5=True, do_3x3=True):
"""InceptNet convolutional striding block.
filters: tuple: (f1,f2,f3)
filters1: for conv1x1
filters2: for conv1x1,conv3x3
filters3L for conv1x1,conv5x5"""
filters1, filters2, filters3 = filters
conv1x1 = stax.serial(stax.Conv(filters1, (1,1), strides, padding="SAME"))
filters4 = filters2
conv3x3 = stax.serial(stax.Conv(filters2, (1,1), strides=None, padding="SAME"),
stax.Conv(filters4, (3,3), strides, padding="SAME"))
filters5 = filters3
conv5x5 = stax.serial(stax.Conv(filters3, (1,1), strides=None, padding="SAME"),
stax.Conv(filters5, (5,5), strides, padding="SAME"))
maxpool = stax.serial(stax.MaxPool((3,3), padding="SAME"),
stax.Conv(filters4, (1,1), strides, padding="SAME"))
if do_3x3:
if do_5x5:
return stax.serial(
stax.FanOut(4), # should num=3 or 2 here ?
stax.parallel(conv1x1, conv3x3, conv5x5, maxpool),
stax.FanInConcat(),
stax.LeakyRelu)
else:
return stax.serial(
stax.FanOut(3), # should num=3 or 2 here ?
stax.parallel(conv1x1, conv3x3, maxpool),
stax.FanInConcat(),
stax.LeakyRelu)
else:
return stax.serial(
stax.FanOut(2), # should num=3 or 2 here ?
stax.parallel(conv1x1, maxpool),
stax.FanInConcat(),
stax.LeakyRelu)
def Reshape(newshape):
"""Layer function for a reshape layer."""
init_fun = lambda rng, input_shape: (newshape,())
apply_fun = lambda params, inputs, **kwargs: np.reshape(inputs,newshape)
return init_fun, apply_fun
rng,drop_rng = jax.random.split(rng)
fs = 64 #for 128x128 sims
model = stax.serial(
InceptBlock2((fs,fs,fs), strides=(4,4)),
InceptBlock2((fs,fs,fs), strides=(4,4)),
InceptBlock2((fs,fs,fs), strides=(4,4)),
InceptBlock2((fs,fs,fs), strides=(2,2), do_5x5=False, do_3x3=False),
stax.Conv(n_summaries, (1,1), strides=(1,1), padding="SAME"),
stax.Flatten,
Reshape((n_summaries,))
)
optimiser = optimizers.adam(step_size=1e-3)
###Output
_____no_output_____
###Markdown
Random seeds for IMNN
###Code
rng, initial_model_key = jax.random.split(rng)
rng, fitting_key = jax.random.split(rng)
###Output
_____no_output_____
###Markdown
Random seeds for ABC
###Code
rng, abc_key = jax.random.split(rng)
###Output
_____no_output_____
###Markdown
2D Gaussian Field Simulator in JAX Steps to creating $(N \times N)$ 2D Gaussian field for IMNN:1. Generate a $(N\times N)$ white noise field $\varphi$ such that $\langle \varphi_k \varphi_{-k} \rangle' = 1$2. Fourier Transform $\varphi$ to real space: $R_{\rm white}(\textbf{x}) \rightarrow R_{\rm white}(\textbf{k})$ - note: NumPy's DFT Fourier convention is: $$\phi_{ab}^{\textbf{k}} = \sum_{c,d = 0}^{N-1} \exp{(-i x_c k_a - i x_d k_b) \phi^{\textbf{x}}_{cd}}$$ $$\phi_{ab}^{\textbf{x}} = \frac{1}{N^2}\sum_{c,d = 0}^{N-1} \exp{(-i x_c k_a - i x_d k_b) \phi^{\textbf{k}}_{cd}}$$ 3. Scale white noise $R_{\rm white}(\textbf{k})$ by the chosen power spectrum evaluated over a field of $k$ values:$$ R_P(\textbf{k}) = P^{1/2}(k) R_{\rm white}(\textbf{k}) $$ - note: here we need to ensure that this array of amplitudes is Hermitian, e.g. $\phi^{* \textbf{k}}_{a(N/2 + b)} = \phi^{\textbf{k}}_{a(N/2 - b)}$. This is accomplished by choosing indexes $k_a = k_b = \frac{2\pi}{N} (0, \dots, N/2, -N/2+1, \dots, -1)$ and then evaluating the square root of the outer product of the meshgrid between the two: $k = \sqrt{k^2_a + k^2_b}$. We can then evaluate $P^{1/2}(k)$. 4. Fourier Transform $R_{P}(\textbf{k})$ to real space: $ R_P(\textbf{x}) = \int d^d \tilde{k} e^{i\textbf{k} \cdot \textbf{x}} R_p(\textbf{k}) $:$$R_{ab}^{\textbf{x}} = \frac{1}{N^2}\sum_{c,d = 0}^{N-1} \exp{(-i x_c k_a - i x_d k_b) R^{\textbf{k}}_{cd}}$$
###Code
# SET 32-BiT floats for model !
θ_fid = np.array([1.0, 0.5], dtype=np.float32)
δθ = np.array([0.1, 0.1], dtype=np.float32)
n_params = 2
N = 128
dim = 2
L = 128
field_shape = (N,N)
input_shape = (1,1, N,N)
simulator_args = {"N": N, "L": L, "dim": dim, "shape": field_shape, 'vol_norm': False, "N_scale": True, "squeeze": False}
###Output
_____no_output_____
###Markdown
simulator class for a Powerbox in JaxAttributes:- simulator for Gaussian fields from input power spectrum- analytic Fisher information computation at a given $\theta$
###Code
rng,fg_key = jax.random.split(rng)
foregrounds = jax.random.normal(fg_key, (1000, 1,) + simulator_args['shape'])*0
def default_P(k, A, B):
return A*k**-B
class powerBoxJax:
def __init__(self, shape, pk=None, k=None):
if pk is None:
self.pk = default_P
else:
self.pk = pk
if k is None:
self.k = np.sqrt(np.sum(np.array(np.meshgrid(*(
(np.hstack((np.arange(0, _shape//2 + 1),
np.arange(-_shape//2 + 1, 0))) * 2*np.pi / _shape)**2
for _shape in shape))), axis=0))
else:
self.k = k
self.shape = shape
self.N = shape[0]
def simulator(self, rng, θ, simulator_args=simulator_args, add_foregrounds=False):
def P(k, A=1, B=1):
return self.pk(k, A, B)
def fn(key, A, B):
shape = self.shape #simulator_args["shape"]
k = self.k
new_shape = ()
for _shape in shape:
if _shape % 2 == 0:
new_shape += (_shape+1,)
else:
new_shape += (_shape,)
key1,key2 = jax.random.split(key)
if add_foregrounds:
foreground = foregrounds[jax.random.randint(key2,
minval=0, maxval=1000, shape=())]
else:
foreground = 0.
# L is in length units, like Gpc
L = simulator_args['L']
dim = simulator_args['dim']
if np.isscalar(L):
L = [L]*int(dim)
else:
L = np.array(L)
V = np.prod(np.array(L))
scale = V**(1./dim)
Lk = ()
_N = 1
for i,_shape in enumerate(shape):
_N *= _shape
Lk += (_shape / L[i],) # 1 / dx
fft_norm = np.prod(np.array(Lk))
_dims = len(shape)
tpl = ()
for _d in range(_dims):
tpl += (_d,)
# POWERBOX IMPLEMENTATION
mag = jax.random.normal(key1, shape=tuple(N for N in new_shape))
# random phases
pha = 2 * np.pi * jax.random.uniform(key1, shape=tuple(N for N in new_shape))
# now make hermitian field (reality condition)
revidx = (slice(None, None, -1),) * len(mag.shape)
mag = (mag + mag[revidx]) / np.sqrt(2)
pha = (pha - pha[revidx]) / 2 + np.pi
dk = mag * (np.cos(pha) + 1j * np.sin(pha)) # output is complex
cutidx = (slice(None, -1),) * len(new_shape)
dk = dk[cutidx]
powers = np.concatenate((np.zeros(1),
np.sqrt(P(k.flatten()[1:], A=A, B=B)))).reshape(k.shape)
# normalize power by volume
if simulator_args['vol_norm']:
powers = powers/V
fourier_field = powers * dk
fourier_field = jax.ops.index_update(
fourier_field,
np.zeros(len(shape), dtype=int),
np.zeros((1,)))
field = np.real(np.fft.ifftn(fourier_field) * fft_norm * V)
if simulator_args["N_scale"]:
field *= scale
field = np.expand_dims(field + foreground, (0,))
if not simulator_args["squeeze"]:
field = np.expand_dims(field, (0,))
return np.array(field, dtype='float32')
shape = self.shape
A, B = θ
if A.shape == B.shape:
if len(A.shape) == 0:
return fn(rng, A, B)
else:
keys = jax.random.split(rng, num=A.shape[0] + 1)
rng = keys[0]
keys = keys[1:]
return jax.vmap(
lambda key, A, B: simulator(key, (A, B), simulator_args=simulator_args)
)(keys, A, B)
else:
if len(A.shape) > 0:
keys = jax.random.split(rng, num=A.shape[0] + 1)
rng = keys[0]
keys = keys[1:]
return jax.vmap(
lambda key, A: simulator(key, (A, B), simulator_args=simulator_args)
)(keys, A)
elif len(B.shape) > 0:
keys = jax.random.split(rng, num=B.shape[0])
return jax.vmap(
lambda key, B: simulator(key, (A, B), simulator_args=simulator_args)
)(keys, B)
def AnalyticFisher(self,
θ,
kvec=None,
N=None
):
"""
Code for computing the Analytic Fisher for a Gaussian
Field with power spectrum P(k) = Ak^-B
"""
A,B = θ
if N is None:
N = self.N
# we want all UNIQUE fourier modes
if kvec is None:
kvec = self.k[1:N//2, 1:N//2]
pk = lambda k : A*(k**-B) # P(k) = Ak^(-B)
p_a = lambda k : k**-B # deriv w.r.t. A
p_b = lambda k : -A*(k**-B)*np.log(k) # deriv w.r.t. B
powers = (pk(kvec.flatten()[:]))#np.concatenate((np.ones(1),
powera = (p_a(kvec.flatten()[:])) #np.concatenate((np.zeros(1),
powerb = (p_b(kvec.flatten()[:])) #np.concatenate((np.zeros(1),
Cinv = np.diag(1. / powers) # diagonal inv. covariance
Ca = np.diag(powera / 1.) # C_{,A}
Cb = np.diag(powerb / 1.) # C_{,B}
Faa = 0.5 * np.trace((Ca @ Cinv @ Ca @ Cinv))
Fab = 0.5 * np.trace((Ca @ Cinv @ Cb @ Cinv))
Fba = 0.5 * np.trace((Cb @ Cinv @ Ca @ Cinv))
Fbb = 0.5 * np.trace((Cb @ Cinv @ Cb @ Cinv))
return np.array([[Faa, Fab], [Fba, Fbb]])
###Output
_____no_output_____
###Markdown
analytic likelihood class- return the log-likelihood (computed on a grid) for power spectrum parameters for Gaussian Fields- takes in a PowerboxJax simulator object `PBJ` and fourier-transformed field target data `Δ_target`methods:- `get_likelihood`: return likelihood marginal on a grid- `plot_contours`: plot likelihood contours on a meshgrid- `plot_corner`: plot liklihood contours, target crosshairs, and image data on a 2x2 corner plot
###Code
class analyticFieldLikelihood:
def __init__(self,
PBJ,
field_shape,
Δ_target,
prior,
k=None,
pk=None,
gridsize=20,
tiling=2):
"""code for computing a gaussian field's likelihood for power spectrum parameters
PBJ : powerBox simulator object
field_shape : list. shape of field input
Δ : array-like. FFT of the real-space field
prior : array-like. range over which to compute the likelihood
k : array-like. fourier modes over which to compute P(k)
tiling : list or int. tiling=2 means likelihood will be computed as 2x2 grid
gridsize : how large to make the likelihood surface
"""
if k is None:
self.k = PBJ.k
if pk is None:
self.pk = PBJ.pk
self.field_shape = field_shape
self.gridsize = gridsize
if np.isscalar(tiling):
self.tiling = [tiling]*2
else:
self.tiling = tiling
#self.tilesize = gridsize // tiling
self.N = np.sqrt(np.prod(np.array(field_shape))) # should just be N for NxN grid
self.prior = prior
self.k = k
self.Δ = Δ_target
def Pk(self, k, A=1, B=0.5):
return self.pk(k, A, B)
return np.diag(pk)
def log_likelihood(self, k, A, B, Δ):
Δ = Δ.flatten()[:]
k = k
dlength = len(k.flatten())
def fn(_A, _B):
nrm = np.pad(np.ones(dlength-2)*2, (1,1), constant_values=1.)
nrm = jax.ops.index_update(
nrm, np.array([[0],[(dlength-2)]]), np.array([[1],[1]]))
nrm = 1
powers = self.Pk(k.flatten()[:], A=_A, B=_B)
# covariance is P(k)
C = powers * nrm
invC = 1./self.Pk(k.flatten()[:], A=_A, B=_B)
logdetC = np.sum(np.log(C))
pi2 = np.pi * 2.
m_half_size = -0.5 * len(Δ)
exponent = - 0.5 * np.sum(np.conj(Δ) * invC * Δ)
norm = -0.5 * logdetC + m_half_size*np.log(pi2)
return (exponent + norm)
return jax.vmap(fn)(A, B)
def get_likelihood(self, return_grid=False, shift=None):
A_start = self.prior[0][0]
A_end = self.prior[1][0]
B_start = self.prior[0][1]
B_end = self.prior[1][1]
region_size = [self.gridsize // self.tiling[i] for i in range(len(self.tiling))]
print("computing likelihood on a %dx%d grid \n \
in tiles of size %dx%d"%(self.gridsize, self.gridsize, region_size[0], region_size[1]))
def get_like_region(A0, A1, B0, B1, qsize):
A_range = np.linspace(A0, A1, qsize)
B_range = np.linspace(B0, B1, qsize)
A, B = np.meshgrid(A_range, B_range)
return (self.log_likelihood(self.k,
A.ravel(), B.ravel(), self.Δ).reshape(qsize,qsize))
A_incr = (A_end - A_start) / self.tiling[0]
B_incr = (B_end - B_start) / self.tiling[1]
# marks the ends of linspace
A_starts = [A_start + (i)*A_incr for i in range(self.tiling[0])]
A_ends = [A_start + (i+1)*A_incr for i in range(self.tiling[0])]
B_starts = [B_start + (i)*B_incr for i in range(self.tiling[1])]
B_ends = [B_start + (i+1)*B_incr for i in range(self.tiling[1])]
_like_cols = []
for _col in range(self.tiling[0]):
# slide horizontally in A
_like_row = []
for _row in range(self.tiling[1]):
# slide vertically in B
_like = get_like_region(A_starts[_row],
A_ends[_row],
B_starts[_col],
B_ends[_col],
region_size[0],
)
_like_row.append(_like)
_like_cols.append(np.concatenate(_like_row, axis=1))
_log_likelihood = np.real(np.concatenate(_like_cols, axis=0))
if shift is None:
shift = np.max(_log_likelihood)
#print('shift', shift)
print('loglike mean', np.mean(_log_likelihood))
#_log_likelihood = _log_likelihood - shift
if return_grid:
_A_range = np.linspace(self.prior[0,0], self.prior[1,0], self.gridsize)
_B_range = np.linspace(self.prior[0,0], self.prior[1,0], self.gridsize)
return (_log_likelihood), _A_range, _B_range
return (_log_likelihood)
def plot_contours(self, ax=None,
θ_ref=None, shift=None,
xlabel='A', ylabel='B',
return_like=True):
_like, _A, _B = self.get_likelihood(return_grid=True, shift=shift)
_like = scipy.special.softmax(np.real(_like))
_A, _B = np.meshgrid(_A, _B)
if ax is None:
fig,ax = plt.subplots(figsize=(10,10))
mesh = ax.contourf(_A, _B, _like)
plt.colorbar(mesh, ax=ax)
if θ_ref is not None:
ax.scatter(θ_ref[0], θ_ref[1], zorder=10, marker='+', s=100, color='r')
ax.set_xlabel('A')
ax.set_ylabel('B')
if return_like:
return _like, ax
else:
return ax
def plot_corner(self, ax=None, θ_ref=None, label="Analytic likelihood",
image_data=None, return_like=False):
_like, _A_range, _B_range = self.get_likelihood(return_grid=True)
likelihoodA = scipy.special.softmax(np.real(_like)).sum(0) #np.real(likelihood).sum(0)
likelihoodA /= likelihoodA.sum() * (_A_range[1] - _A_range[0])
likelihoodB = scipy.special.softmax(np.real(_like)).sum(1) #np.real(likelihood).sum(1)
likelihoodB /= likelihoodB.sum() * (_B_range[1] - _B_range[0])
_like = scipy.special.softmax(np.real(_like))
sorted_marginal = np.sort(_like.flatten())[::-1]
cdf = np.cumsum(sorted_marginal / sorted_marginal.sum())
value = []
for level in [0.997, 0.95, 0.68]:
this_value = sorted_marginal[np.argmin(np.abs(cdf - level))]
if len(value) == 0:
value.append(this_value)
elif this_value <= value[-1]:
break
else:
value.append(this_value)
if ax is None:
fig,ax = plt.subplots(nrows=2, ncols=2)
ax[1,0].contour(_A_range, _B_range, _like, levels=value, colors='C2', alpha=0.7)
ax[0,0].plot(_A_range, likelihoodA, color='C2', label=None, alpha=0.7)
ax[1,1].plot(likelihoodB, _B_range, color='C2', label='loglike', alpha=0.7)
if image_data is not None:
ax[0,1].imshow(np.squeeze(image_data))
else:
ax[0,1].axis("off")
if θ_ref is not None:
ax[0,0].axvline(θ_ref[0], linestyle='--', c='k')
ax[1,0].axvline(θ_ref[0], linestyle='--', c='k')
ax[1,0].axhline(θ_ref[1], linestyle='--', c='k')
ax[1,1].axhline(θ_ref[1], linestyle='--', c='k', label=r'$\theta_{\rm ref}$')
ax[1,0].set_xlabel(r'$A$')
ax[1,0].set_ylabel(r'$B$')
if return_like:
return ax,_like
else:
return ax
PBJ = powerBoxJax(simulator_args['shape'])
simulator = PBJ.simulator
###Output
_____no_output_____
###Markdown
sim and gradient
###Code
def simulator_gradient(rng, θ, simulator_args=simulator_args):
return value_and_jacrev(simulator, argnums=1, allow_int=True, holomorphic=True)(rng, θ, simulator_args=simulator_args)
rng, key = jax.random.split(rng)
field_shape
# plot example simulation and derivative
deriv_args = {"N": N, "L": L, "dim": dim, "shape": field_shape, "vol_norm": True, "N_scale": True, "squeeze": False}
simulation, simulation_gradient = value_and_jacfwd(simulator, argnums=1)(rng, θ_fid, simulator_args=deriv_args)
plt.imshow(np.squeeze(simulation[0]), extent=(0,1,0,1))
plt.colorbar()
plt.title('example simulation')
plt.show()
plt.imshow(np.squeeze(simulation_gradient[0].T[0].T), extent=(0,1,0,1))
plt.title('gradient of simulation')
plt.colorbar()
plt.show()
def get_simulations(rng, n_s, θ, simulator_args=simulator_args):
def get_simulator(key):
return simulator(key, θ, simulator_args=simulator_args)
keys = jax.random.split(rng, num=n_s)
return jax.vmap(get_simulator)(np.array(keys))
def get_simulation_gradients(rng, n_s, n_d, θ, simulator_args=simulator_args):
def get_batch_gradient(key):
return simulator_gradient(key, θ, simulator_args=simulator_args)
keys = jax.random.split(rng, num=n_s)
return jax.vmap(get_batch_gradient)(np.array(keys)[:n_d])
###Output
_____no_output_____
###Markdown
known analytic Fisher information For a gaussian field, the likelihood is written$$ \mathcal{L}(\Delta | \theta) = \frac{1}{(2\pi)^{N_p / 2} \det(C)^{1/2}}\exp{\left(-\frac{1}{2} \Delta C^{-1} \Delta \right)}$$Where $\Delta \in \mathbb{R}^{N_p},\ N_p=N_k=V=N\times N$ is the Fourier Transform of the observed real-space field.This yields a Fisher information matrix of$$F_{\alpha \beta} = \langle -\frac{\partial^2 \ln \mathcal{L}}{\partial \lambda_\alpha \partial \lambda_\beta} \rangle= \frac{1}{2} {\rm Tr} (C_{, \alpha} C^{-1} C_{, \beta} C^{-1}) $$where the covariance is$$ C_{k_i, k_j} = P(k_i)\delta_{ij}$$The associated derivatives for a power law $P(k) = Ak^{-B}$ are$$\begin{align} C_{,A} &= \left( k^{-B} \right)\delta_{ij} \\ C_{,B} &= \left( -Ak^{-B}\ln k \right) \delta_{ij}\end{align} $$We notice that the Fisher information is *only* a function of the power spectrum parameters. It tells us the curvature of the chosen model (likelihood function) at a given $\theta$. The analytic Fisher information is the maximum amount of information we can expect the IMNN to extract from our simulations. <!-- Alternatively, we can explore a volume integral analytically from the definition of C :where the Fisher matrix is given by$$ F_{\alpha \beta} = \sum_k \frac{1}{(\delta P_k)^2} \frac{\partial P_k}{\partial \lambda_\alpha} \frac{\partial P_k}{\partial \lambda_\beta}$$and the error on $P_k$ is given (for a square, 2D box) as$$ \delta P_k = \sqrt{\frac{2}{k (\Delta k) V} } \left( P_k + \frac{1}{\bar{n}} \right) $$ --> <!-- For a gaussian field, the likelihood is written$$ \ln \mathcal{L}(\theta | \vec{d}) = \ln \mathcal{L}(\theta | \Delta) = \sqrt{\frac{1}{2\pi C}} \exp{\frac{-{\Delta}^2}{2C}}$$where $\vec{d} = \Delta$ is the overdensity field (in a cosmological context this is the measured temperature or galaxy count in a sky survey). Given that the power spectrum describes the correlations at different scales $k$, we can define the correlation via the power spectrum $C = P(k) = Ak^{-B}$ to compute the log-likelihood. The Fisher information matrix, given as $$ F_{\alpha \beta} = \langle - \frac{\partial^2 \ln \mathcal{L}}{\partial \theta_\alpha \partial \theta_\beta} \rangle $$can then be computed analytically for our likelihood:$$ F_{\alpha \beta} = \sum_k \frac{1}{(\delta P_k)^2} \frac{\partial P_k}{\partial \theta_\alpha} \frac{\partial P_k}{\partial \theta_\beta} $$where $\delta P_k = \sqrt{\frac{2}{4\pi \Delta k V k_{\rm tot}^2}} (P_k + \frac{1}{\bar{n}})$ is the error on $P_k$ with survey volume $V$, sampling interval $\Delta k$, and shot noise $1/\bar{n}$. Using the fact that $d\ln P_k = \frac{d P_k}{P_k}$, we can rewrite the sum as an integral:$$ F_{\alpha \beta} = 2 \pi \left( \frac{V_{\rm eff}}{\lambda^3} \right) \int_{k_{\rm min}}^{k_{\rm max}} d \ln k \frac{\partial \ln P_k}{\partial \theta_\alpha} \frac{\partial \ln P_k}{\partial \theta_\beta}$$Where $V_{\rm eff}$ and $\lambda^3$ are our effective windowed survey size and survey extent, respectively (set to 1 for now). Doing the integration explicitly, we obtain the Fisher matrix for parameters $\theta = (A, B)$:$$ F = 2 \pi \left( \frac{V_{\rm eff}}{\lambda^3} \right) \begin{bmatrix} \frac{1}{A^2} \ln (\frac{k_{\rm max}}{k_{\rm min}}) & \frac{1}{2A} ((\ln k_{\rm min})^2 - (\ln k_{\rm max})^2) \\ \frac{1}{2A} ((\ln k_{\rm min})^2 - (\ln k_{\rm max})^2) & \frac{1}{3}((\ln k_{\rm max})^3 - (\ln k_{\rm min})^3) \\\end{bmatrix}$$ --> For our fiducial model with our data vector of size $128^2$, our $\rm det|F|$ reads:
###Code
N = simulator_args["N"]
shape = simulator_args["shape"]
kbin = np.sqrt(np.sum(np.array(np.meshgrid(*(
np.hstack((np.arange(0, _shape//2 + 1),
np.arange(-_shape//2 + 1, 0))) *2* np.pi / _shape
for _shape in shape)))**2, axis=0))
f_expected = PBJ.AnalyticFisher(θ_fid, kvec=None)
print("analytic F(θ_fid): ", f_expected)
detf_expected = np.linalg.det(f_expected)
print("analytic det(F(θ_fid)): ", detf_expected)
# MAKE SIMULATION
N = simulator_args["N"]
shape = (N,N)
θ_sim = np.array([0.7, 0.8])
simulator_args = {"N": N, "L": L, "dim": dim, "shape": shape, "vol_norm": True, "N_scale": False, "squeeze": True}
simulator_args["shape"] = (N,N)
simkey,rng = jax.random.split(rng)
#sim = np.squeeze(target_data)#
sim = np.squeeze(simulator(simkey, θ_sim, simulator_args=simulator_args))
sim_fft = (np.fft.fft2(sim)) #/ (N**2)
# PLOT ANALYTIC POSTERIOR
# IGNORE FIRST FOURIER MODE -- no information here !
gridsize = 100 # for likelihood gridding
Δ = sim_fft[1:N//2, 1:N//2]
k = kbin[1:N//2, 1:N//2]
prior_range = np.array([[0.1, 0.1], [1.25, 1.25]])
AL = analyticFieldLikelihood(PBJ, shape, Δ, prior_range, k=k, gridsize=gridsize, tiling=[5,5])
#plt.style.use('default')
ax = AL.plot_corner(θ_ref=θ_sim, image_data=sim)
simulator_args
def sanity_check(gridsize=50, num=20):
likes = []
likeAs = []
likeBs = []
rng1 = jax.random.PRNGKey(13)
values = []
θ_target = np.array([0.8, 0.8], dtype=np.float32)
for t in range(num):
key, rng1 = jax.random.split(rng1)
targ = simulator(
key,
θ_target)
gridsize = 50 # for likelihood gridding
Δ = np.fft.fftn(np.squeeze(targ))[1:N//2, 1:N//2] / N
k = kbin[1:N//2, 1:N//2]
prior_range = np.array([[0.1, 0.1], [1.25, 1.25]])
AL = analyticFieldLikelihood(PBJ, shape, Δ, prior_range, k=k,
gridsize=gridsize, tiling=[5,5])
likelihood,A_range,B_range = AL.get_likelihood(shift=None, return_grid=True)
_A_range = A_range#*np.exp(shift)
_B_range = B_range#*np.exp(shift)
likelihoodA = scipy.special.softmax(np.real(likelihood)).sum(0) #np.real(likelihood).sum(0)
likelihoodA /= likelihoodA.sum() * (_A_range[1] - _A_range[0])
likelihoodB = scipy.special.softmax(np.real(likelihood)).sum(1) #np.real(likelihood).sum(1)
likelihoodB /= likelihoodB.sum() * (_B_range[1] - _B_range[0])
likelihood = scipy.special.softmax(np.real(likelihood))
sorted_marginal = np.sort(likelihood.flatten())[::-1]
cdf = np.cumsum(sorted_marginal / sorted_marginal.sum())
value = []
for level in [0.997, 0.95, 0.68]:
this_value = sorted_marginal[np.argmin(np.abs(cdf - level))]
if len(value) == 0:
value.append(this_value)
elif this_value <= value[-1]:
break
else:
value.append(this_value)
#fig, ax = plt.subplots(2, 2, figsize=(10, 10))
#likelihood /= likelihood.sum()
likes.append(likelihood)
likeAs.append(likelihoodA)
likeBs.append(likelihoodB)
values.append(value)
return likes,likeAs,likeBs,values
likes,likeAs,likeBs,values = sanity_check()
fig,ax = plt.subplots(nrows=2, ncols=2)
for l,like in enumerate(likes):
ax[1,0].contour(A_range, B_range, like, levels=value, colors='#FF8D33', alpha=0.5)
ax[0, 0].plot(A_range, likeAs[l], color='#FF8D33', label=None, alpha=0.5)
ax[0, 1].axis("off")
ax[1, 1].plot(likeBs[l], B_range, color='#FF8D33', label='loglike', alpha=0.5)
ax[1,0].scatter(θ_target[0], θ_target[1], marker='+', s=50, color='blue', zorder=20)
ax[0,0].axvline(θ_target[0], linestyle='--', c='k')
ax[1,0].axvline(θ_target[0], linestyle='--', c='k')
ax[1,0].axhline(θ_target[1], linestyle='--', c='k')
ax[1,1].axhline(θ_target[1], linestyle='--', c='k', label=r'$\theta_{\rm target}$')
ax[1,0].set_xlabel(r'$A$')
ax[1,0].set_ylabel(r'$B$')
###Output
computing likelihood on a 50x50 grid
in tiles of size 10x10
shift -204802937697.98566
loglike mean -859104641710.4711
###Markdown
Initialise IMNN
###Code
simulator_args["squeeze"] = False
simulator_args['vol_norm'] = True
simulator_args['N_scale'] = True # false
simulator_args['L'] = L
simulator_args
IMNN = SimulatorIMNN(
n_s=5000,
n_d=5000,
n_params=n_params,
n_summaries=n_summaries,
input_shape=input_shape,
θ_fid=θ_fid,
model=model,
optimiser=optimiser,
key_or_state=initial_model_key,
simulator=lambda rng, θ: simulator(rng, θ, simulator_args=simulator_args),
# devices=[jax.devices()[0]],
# n_per_device=1000
)
###Output
_____no_output_____
###Markdown
Fit
###Code
# SAVING IMNN ATTRIBUTES
import cloudpickle as pickle
import os
def save_weights(IMNN, folder_name='./model', weights='final'):
# create output directory
if not os.path.exists(folder_name):
os.mkdir(folder_name)
def pckl_me(obj, path):
with open(path, 'wb') as file_pi:
pickle.dump(obj, file_pi)
file_pi.close()
# save IMNN (optimiser) state:
savestate = jax.experimental.optimizers.unpack_optimizer_state(IMNN.state)
pckl_me(savestate, os.path.join(folder_name, 'IMNN_state'))
# save weights
if weights == 'final':
np.save(os.path.join(folder_name, 'final_w'), IMNN.final_w)
else:
np.save(os.path.join(folder_name, 'best_w'), IMNN.best_w)
# save initial weights
np.save(os.path.join(folder_name, 'initial_w'), IMNN.initial_w)
# save training history
pckl_me(IMNN.history, os.path.join(folder_name, 'history'))
# save important attributes as a dict
imnn_attributes = {
'n_s': IMNN.n_s,
'n_d': IMNN.n_d,
'input_shape': IMNN.input_shape,
'n_params' : IMNN.n_params,
'n_summaries': IMNN.n_summaries,
'θ_fid': IMNN.θ_fid,
'F': IMNN.F,
'validate': IMNN.validate,
'simulate': IMNN.simulate,
}
pckl_me(imnn_attributes, os.path.join(folder_name, 'IMNN_attributes'))
print('saved weights and attributes to the file ', folder_name)
def load_weights(IMNN, folder_name='./model', weights='final', load_attributes=True):
def unpckl_me(path):
file = open(path, 'rb')
return pickle.load(file)
# load and assign weights
if weights=='final':
weights = np.load(os.path.join(folder_name, 'final_w.npy'), allow_pickle=True)
IMNN.final_w = weights
else:
weights = np.load(os.path.join(folder_name, 'best_w.npy'), allow_pickle=True)
IMNN.best_w = weights
# re-pack and load the optimiser state
loadstate = unpckl_me(os.path.join(folder_name, 'IMNN_state'))
IMNN.state = jax.experimental.optimizers.pack_optimizer_state(loadstate)
# load history
IMNN.history = unpckl_me(os.path.join(folder_name, 'history'))
# load important attributes
if load_attributes:
IMNN.intial_w = np.load(os.path.join(folder_name, 'initial_w.npy'), allow_pickle=True)
attributes = unpckl_me(os.path.join('test_model', 'IMNN_attributes'))
IMNN.θ_fid = attributes['θ_fid']
IMNN.n_s = attributes['n_s']
IMNN.n_d = attributes['n_d']
IMNN.input_shape = attributes['input_shape']
print('loaded IMNN with these attributes: ', attributes)
# # test save functions
# save_weights(IMNN, folder_name='./model')
# # test load functions
# # initialize a new imnn with different attributes and then load the old file
# # to overwrite them
# my_new_IMNN = SimIMNN(
# n_s=300,
# n_d=100,
# n_params=n_params,
# n_summaries=n_summaries,
# input_shape=input_shape,
# θ_fid=np.array([1.0,1.0]),
# key=initial_model_key,
# model=model,
# optimiser=optimiser,
# simulator=lambda rng, θ: simulator(rng, θ, simulator_args=simulator_args),
# )
# load_weights(my_new_IMNN, folder_name='./model', load_attributes=True)
# my_new_IMNN.set_F_statistics(rng, my_new_IMNN.best_w, my_new_IMNN.θ_fid, my_new_IMNN.n_s, my_new_IMNN.n_d, validate=True)
θ_fid
%%time
for i in range(1):
rng,fit_rng = jax.random.split(rng)
IMNN.fit(λ=10., ϵ=ϵ, rng=fit_rng, min_iterations=500) #for IMNN, IMNN_rng in zip(IMNNs, IMNN_rngs);
#save_weights(IMNN, folder_name='./big_incept128')
IMNNs = [IMNN]
latexify(fig_width=3.37)
plt.plot(IMNN.history['detF'][:])
plt.plot(np.ones(len(IMNN.history['detF'][:]))*detf_expected, c='k', linestyle='--')
plt.ylim(1e-2, 1e7)
plt.ylabel(r'$\det \textbf{F}$')
plt.xlabel('number of epochs')
plt.yscale('log')
plt.tight_layout()
plt.savefig('/mnt/home/tmakinen/repositories/field-plots/128x128-training.png', dpi=400)
np.linalg.det(IMNNs[0].F) #/ (detf_expected)
IMNNs[0].F
print('IMNN F:', IMNN.F)
print('IMNN det F:', np.linalg.det(IMNN.F))
print('IMNN F / analytic det F: ', (np.linalg.det(IMNN.F)) / detf_expected)
###Output
IMNN F: [[ 2485.2085 -1814.8022]
[-1814.8022 1678.63 ]]
IMNN det F: 878238.75
IMNN F / analytic det F: 0.9076835203931398
###Markdown
Data for ABC example
###Code
class uniform:
def __init__(self, low, high):
self.low = np.array(low)
self.high = np.array(high)
self.event_shape = [[] for i in range(self.low.shape[0])]
def sample(self, n=None, seed=None):
if n is None:
n = 1
keys = np.array(jax.random.split(
seed,
num=len(self.event_shape)))
return jax.vmap(
lambda key, low, high : jax.random.uniform(
key,
shape=(n,),
minval=low,
maxval=high))(
keys, self.low, self.high)
prior = uniform([0.6, 0.2], [1.25, 1.20])
simulator_args
simulator_args = {"N": N, "L": L, "dim": dim, "shape": shape, "N_scale": True, "vol_norm": True, "squeeze": True}
rng, key = jax.random.split(rng)
θ_target = np.array([0.9, 0.6])
target_data = simulator(
key,
θ_target,
simulator_args={**simulator_args, **{'squeeze':False}})
###Output
_____no_output_____
###Markdown
analytic likelihood calculation
###Code
target_data = np.load('./128x128-gauss/example-field_theta=%d_%d.npy'%(θ_target[0]*10, θ_target[1]*10))
target_data = np.expand_dims(np.expand_dims(np.expand_dims(target_data,0),0),0)
gridsize = 100 # for likelihood gridding
Δ = np.fft.fftn(np.squeeze(target_data))[1:N//2, 1:N//2] / N
k = kbin[1:N//2, 1:N//2]
prior_range = np.array([[0.1, 0.1], [1.25, 1.25]])
AL = analyticFieldLikelihood(PBJ, shape, Δ, prior_range, k=k, gridsize=gridsize, tiling=[5,5])
%%time
%matplotlib inline
ax,like = AL.plot_corner(θ_ref=θ_target, image_data=target_data, return_like=True)
latexify(fig_height=5.37)
plt.show()
np.save('./128x128-gauss/example-field_theta=%d_%d'%(θ_target[0]*10, θ_target[1]*10), np.squeeze(target_data))
###Output
_____no_output_____
###Markdown
Gaussian approximation
###Code
@jit #partial(jax.jit, static_argnums=0)
def get_estimate(d):
if len(d.shape) == 1:
return IMNN.θ_fid + np.einsum(
"ij,kj,kl,l->i",
IMNN.invF,
IMNN.dμ_dθ,
IMNN.invC,
IMNN.model(IMNN.best_w, d, rng=rng) - IMNN.μ)
else:
return IMNN.θ_fid + np.einsum(
"ij,kj,kl,ml->mi",
IMNN.invF,
IMNN.dμ_dθ,
IMNN.invC,
IMNN.model(IMNN.best_w, d, rng=rng) - IMNN.μ)
estimates = IMNN.get_estimate(target_data) #[i.get_estimate(target_data) for i in IMNNs];
estimates
GAs = [GaussianApproximation(IMNN.get_estimate(target_data), IMNN.invF, prior)]
#GaussianApproximation(get_estimate(target_data), np.linalg.inv(f_expected), prior)]
%matplotlib inline
for i, (GA, label) in enumerate(zip(GAs, ['sim IMNN'])):
if i == 0:
ax = GA.marginal_plot(
axis_labels=[r"$A$", r"$B$"], label='on-the-fly IMNN', colours="C{}".format(i)
)
else:
GA.marginal_plot(ax=ax, label='sim IMNN', colours="C{}".format(i), ncol=8)
###Output
_____no_output_____
###Markdown
ABC
###Code
{**simulator_args, **{'squeeze':False}}
ABC = ApproximateBayesianComputation(
target_data, prior,
lambda A,B : simulator(A,B, simulator_args={**simulator_args, **{'squeeze':False}}),
IMNN.get_estimate, F=IMNN.F, gridsize=50
)
%%time
rng,abc_key = jax.random.split(rng)
ABC(rng=abc_key,
n_samples=int(1e3),
min_accepted=15000,
max_iterations=50000,
ϵ=0.05,
smoothing=0.);
ABC.parameters.accepted[0].shape
ABC.parameters.accepted[0][0]
#np.save("accepted.npy", ABC.parameters.accepted)
#ax = ABC.scatter_summaries(points=ABC.summaries.rejected, colours='red')
ABC.scatter_summaries( colours='blue')
likelihood, A_range, B_range = AL.get_likelihood(return_grid=True)
likelihoodA = scipy.special.softmax(np.real(likelihood)).sum(0) #np.real(likelihood).sum(0)
likelihoodA /= likelihoodA.sum() * (A_range[1] - A_range[0])
likelihoodB = scipy.special.softmax(np.real(likelihood)).sum(1) #np.real(likelihood).sum(1)
likelihoodB /= likelihoodB.sum() * (B_range[1] - B_range[0])
likelihood = scipy.special.softmax(np.real(likelihood))
sorted_marginal = np.sort(likelihood.flatten())[::-1]
cdf = np.cumsum(sorted_marginal / sorted_marginal.sum())
value = []
for level in [0.997, 0.95, 0.68]:
this_value = sorted_marginal[np.argmin(np.abs(cdf - level))]
if len(value) == 0:
value.append(this_value)
elif this_value <= value[-1]:
break
else:
value.append(this_value)
%matplotlib inline
#plt.style.use('default')
new_colors = [ '#2c0342', '#286d87', '#4fb49d', '#9af486']
#fig = plt.figure(constrained_layout=True, figsize=(3.41*1., 3.41*1.))
#fig,ax = plt.subplots(nrows=2, ncols=2, figsize=(3.37*2, 3.37*2))
#ax = fig.subplots(nrows=2, ncols=2)
fig,ax = plt.subplots(nrows=2, ncols=2, figsize=(3.41*1., 3.41*1.),
gridspec_kw={'height_ratios': [1, 1], 'width_ratios':[1,1]})
latexify(fig_width=3.41, fig_height=3.41)
# just to fiddle with the label
ax[0,0].plot(0.3, 0., color=new_colors[0],
marker='o', label='ABC')
cmap_reversed = matplotlib.cm.get_cmap('viridis_r')
ax = GAs[0].marginal_plot(ax=ax, colours='#00c133', #new_colors[1],
axis_labels=[r"$A$", r"$B$"], label="Gaussian Approximation", ncol=1,
linestyle='dotted')
ax[0,0].legend(framealpha=0.)
ax[0,1].imshow(np.squeeze(target_data), cmap='viridis')
ax[0,0].axvline(θ_target[0], linestyle='--', c='k')
ax[1,0].axvline(θ_target[0], linestyle='--', c='k')
ax[1,0].axhline(θ_target[1], linestyle='--', c='k')
ax[1,1].axhline(θ_target[1], linestyle='--', c='k', label=r'$\theta_{\rm target}$')
ax[1,0].set_xlabel(r'$A$')
ax[1,0].set_ylabel(r'$B$')
ax[0,0].axvline(θ_fid[0], linestyle='--', c='k', alpha=0.4)
ax[1,0].axvline(θ_fid[0], linestyle='--', c='k', alpha=0.4)
ax[1,0].axhline(θ_fid[1], linestyle='--', c='k', alpha=0.4)
ax[1,1].axhline(θ_fid[1], linestyle='--', c='k', alpha=0.4, label=r'$\theta_{\rm fid}$')
ax[1,1].legend(framealpha=0.)
# add in the likelihood estimate
ax[0, 0].plot(A_range, likelihoodA, color='#FF8D33', label='Analytic Likelihood', linestyle='--')
#ax[0,0].legend(framealpha=0.)
ax[0, 1].axis("off")
ax[1, 0].contour(A_range, B_range, likelihood, levels=value, colors='#FF8D33', linestyles='--')
ax[1, 1].plot(likelihoodB, B_range, color='#FF8D33', label='Analytic Likelihood', linestyle='--')
# ax = ABC.scatter_plot(ax=ax,
# colours=new_colors[0],
# axis_labels=[r"$A$", r"$B$"],
# s=8,
# label='ABC', bbox_to_anchor=None)
# ABC scatter plots
ax[0,0].hist(ABC.parameters.accepted[0][:, 0], color=new_colors[0], histtype='step', density=True)
ax[1,0].scatter(ABC.parameters.accepted[0][:, 0], ABC.parameters.accepted[0][:, 1], s=8, alpha=0.6,
c=np.log(ABC.distances.accepted[0]), cmap='Purples', edgecolors=None, linewidths=0, marker='.')
ax[1,1].hist(ABC.parameters.accepted[0][:, 1], color=new_colors[0],
histtype='step', density=True, orientation='horizontal')
#ax[1,0].legend(framealpha=0.)
ax[0,0].set_xlim(0.55, 1.1)
#ax[0,0].set_ylim(0., 10.1)
ax[1,0].set_xlim(0.55, 1.1)
ax[1,0].set_ylim(0.3, 0.9)
ax[1,1].set_ylim(0.3, 0.9)
ax[0,0].legend(framealpha=0., bbox_to_anchor=(1.08, 1.5), frameon=False)
ax[1,1].set_yticks([])
ax[1,1].set_xticks([])
ax[0,0].set_xticks([])
ax[0,0].set_yticks([])
#ax[0,0].set_ylabel(r'$\mathit{P}(A|\textbf{x})$')
#ax[1,1].set_xlabel(r'$\mathit{P}(B|\textbf{x})$')
plt.subplots_adjust(wspace=0.1, hspace=0.1)
#plt.tight_layout()
plt.savefig('/mnt/home/tmakinen/repositories/field-plots/128x128-inference-contours-3sig-nolab-col.png', dpi=400, bbox_inches='tight')
#plt.subplots_adjust(wspace=0, hspace=0)
plt.show()
# save all the contours
# save all analytic calculations
fname = './128x128-gauss/'
np.save(fname + 'target_field_1', np.squeeze(target_data))
np.save(fname + 'analytic_likelihood', likelihood)
np.save(fname + 'ranges', np.stack((A_range, B_range), axis=0))
# save ABC posterior
np.save("./128x128-gauss/IMNN_accepted.npy", ABC.parameters.accepted)
# Create figures in Python that handle LaTeX, and save images to files in my
# preferred formatting. I typically place this code in the root of each of my
# projects, and import using:
# from latexify import *
# which will also run the latexify() function on the import.
# Based on code from https://nipunbatra.github.io/blog/2014/latexify.html
import matplotlib
import matplotlib.pyplot as plt
from math import sqrt
#Back-end to use depends on the system
from matplotlib.backends.backend_pgf import FigureCanvasPgf
matplotlib.backend_bases.register_backend('pdf', FigureCanvasPgf)
# matplotlib.use('pgf')
# from matplotlib.backends.backend_pgf import FigureCanvasPgf
# matplotlib.backend_bases.register_backend('ps', FigureCanvasPgf)
import seaborn as sns
sns.set_style("white")
#my preferred palette. From
#https://seaborn.pydata.org/tutorial/color_palettes.html: "The cubehelix color
#palette system makes sequential palettes with a linear increase or decrease in
#brightness and some variation in hue. This means that the information in your
#colormap will be preserved when converted to black and white (for printing) or
#when viewed by a colorblind individual."
# I typically set the number of colors (below, 8) to the distinct colors I need
# in a given plot, so as to use the full range.
sns.set_palette(sns.color_palette("cubehelix", 8))
# The following is the latexify function. It allows you to create 2 column or 1
# column figures. You may also wish to alter the height or width of the figure.
# The default settings are good for most cases. You may also change the
# parameters such as labelsize and fontsize based on your classfile.
def latexify(fig_width=None, fig_height=None, columns=1):
"""Set up matplotlib's RC params for LaTeX plotting.
Call this before plotting a figure.
Parameters
----------
fig_width : float, optional, inches
fig_height : float, optional, inches
columns : {1, 2}
"""
# code adapted from http://www.scipy.org/Cookbook/Matplotlib/LaTeX_Examples
# Width and max height in inches for IEEE journals taken from
# computer.org/cms/Computer.org/Journal%20templates/transactions_art_guide.pdf
assert(columns in [1, 2])
if fig_width is None:
fig_width = 6.9 if columns == 1 else 13.8 # width in inches #3.39
if fig_height is None:
golden_mean = (sqrt(5) - 1.0) / 2.0 # Aesthetic ratio
fig_height = fig_width * golden_mean # height in inches
MAX_HEIGHT_INCHES = 16.0
if fig_height > MAX_HEIGHT_INCHES:
print(("WARNING: fig_height too large:" + fig_height +
"so will reduce to" + MAX_HEIGHT_INCHES + "inches."))
fig_height = MAX_HEIGHT_INCHES
params = {
# 'backend': 'ps',
# 'pgf.rcfonts': False,
# 'pgf.preamble': ['\\usepackage{gensymb}', '\\usepackage[dvipsnames]{xcolor}'],
# "pgf.texsystem": "pdflatex",
# 'text.latex.preamble': ['\\usepackage{gensymb}', '\\usepackage[dvipsnames]{xcolor}'],
'text.latex.preamble': '\\usepackage{mathptmx}',
#values below are useful defaults. individual plot fontsizes are
#modified as necessary.
'axes.labelsize': 8, # fontsize for x and y labels
'axes.titlesize': 8,
'font.size': 8,
'legend.fontsize': 8,
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'text.usetex': True,
'figure.figsize': [fig_width, fig_height],
'font.family': 'serif',
'font.serif': 'Times',
'lines.linewidth': 1.5,
'lines.markersize':1,
'xtick.major.pad' : 2,
'ytick.major.pad' : 2,
'axes.xmargin' : .0, # x margin. See `axes.Axes.margins`
'axes.ymargin' : .0, # y margin See `axes.Axes.margins`
}
matplotlib.rcParams.update(params)
def saveimage(name, fig = plt, extension = 'pdf', folder = 'plots/'):
sns.despine()
#Minor ticks off by default in matplotlib
# plt.minorticks_off()
#grid being off is the default for seaborn white style, so not needed.
# plt.grid(False, axis = "x")
# plt.grid(False, axis = "y")
fig.savefig('{}{}.{}'.format(folder,name, extension), bbox_inches = 'tight')
latexify()
###Output
_____no_output_____ |
notebooks/analysing-text-statistics.ipynb | ###Markdown
 Analysing Text StatisticsLet's try out some statistical analysis of text, including [natural language processing](https://en.wikipedia.org/wiki/Natural_language_processing), using a [public domain](https://en.wikipedia.org/wiki/Public_domain) book from [Project Gutenberg](http://www.gutenberg.org).The example we'll use is the [most downloaded](http://www.gutenberg.org/browse/scores/top) book, [Pride and Prejudice by Jane Austen](http://www.gutenberg.org/ebooks/1342). Running this first code cell will import and display the contents of the book.Feel free to change the link in the following code cell if you'd like to explore another book, but make sure you are using the `Plain Text UTF-8` link.
###Code
gutenberg_text_link = 'http://gutenberg.org/files/1342/1342-0.txt'
import requests
r = requests.get(gutenberg_text_link) # get the online book file
r.encoding = 'utf-8' # specify the type of text encoding in the file
text = r.text.split('***')[2] # get the part after the header
text = text.replace("’","'").replace("“",'"').replace("”",'"') # replace any 'smart quotes'
book_title = r.text[r.text.index('Title:')+7:r.text.index('Author:')-4] # find the book title
print(text)
###Output
_____no_output_____
###Markdown
Making a DataFrame of ChaptersNow that we have the text of the book, let's split it into chapters. We'll use the Python [library](https://en.wikipedia.org/wiki/Library_(computing)) called [pandas](https://pandas.pydata.org) to create a [dataframe](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html) that includes the text and length of each chapter (number of characters, including spaces and punctuation).
###Code
import pandas as pd
chapters = pd.DataFrame() # create an empty data frame that we will append to
for chapter in text.split('Chapter'):
if len(chapter)>500: # so that we are getting actual book chapters
chapter = chapter.replace('\r','').replace('\n','') # delete the 'new line' characters
chapters = chapters.append({'Chapter Text':chapter, 'Length':len(chapter)}, ignore_index=True)
chapters.index=chapters.index+1 # set the index to the chapter number
chapters
###Output
_____no_output_____
###Markdown
Visualizing Chapter LengthsFrom that dataframe we can create a bar graph of chapter lengths.
###Code
import plotly.express as px
px.bar(chapters, y='Length', title='Characters per Chapter in '+book_title).update_xaxes(title='Chapter')
###Output
_____no_output_____
###Markdown
Counting Words by TypeWe'll use the [spaCy](https://spacy.io) natural language processing library to identify the [parts of speech](https://spacy.io/api/annotationpos-tagging) in the text. For this example we'll just look at adjectives, verbs, nouns, and proper nouns, but you can add to the list on the first line in the code cell.This will take a while to run, and will result in a dataframe containing the number and percent of each of those parts of speech in each chapter.
###Code
word_types = ['ADJ', 'VERB', 'NOUN', 'PROPN'] # https://spacy.io/api/annotation#pos-tagging
# uncomment the following two installation lines if you get errors
#!pip install spacy --user
#!python -m spacy download en_core_web_sm
import en_core_web_sm
nlp = en_core_web_sm.load() # set up natural language processing
parts_of_speech_df = pd.DataFrame(columns=word_types) # create an empty dataframe with column names
for i in range(1,len(chapters)+1): # iterate through the chapters dataframe
parts_of_speech_list = [] # create an empty list
word_count = 0
for token in nlp(chapters['Chapter Text'][i]): # loop through each token in the chapter
word_count += 1 # increment the word counter
part_of_speech = token.pos_
if part_of_speech in word_types: # if it is in the list of types we made
parts_of_speech_list.append(part_of_speech)
word_type_count = {} # create an empty dictionary
for word_type in word_types:
word_type_count.update({word_type:parts_of_speech_list.count(word_type)}) # add to that dictionary
word_type_count.update({'Words':word_count})
parts_of_speech_df = parts_of_speech_df.append(word_type_count, ignore_index=True) # add to dataframe
parts_of_speech_df = parts_of_speech_df.astype(int) # convert values to integers
parts_of_speech_df.index = parts_of_speech_df.index+1 # set the dataframe index to Chapter number
parts_of_speech_df['Adjectives %'] = parts_of_speech_df['ADJ']/parts_of_speech_df['Words']*100
parts_of_speech_df['Verbs %'] = parts_of_speech_df['VERB']/parts_of_speech_df['Words']*100
parts_of_speech_df['Nouns %'] = parts_of_speech_df['NOUN']/parts_of_speech_df['Words']*100
parts_of_speech_df['Proper Nouns %'] = parts_of_speech_df['PROPN']/parts_of_speech_df['Words']*100
parts_of_speech_df
###Output
_____no_output_____
###Markdown
We can also plot the proportional occurrences of those parts of speech.
###Code
px.line(parts_of_speech_df, y=['Adjectives %', 'Verbs %', 'Nouns %', 'Proper Nouns %'],
title='Proportion of Word Types in '+book_title).update_xaxes(title='Chapter')
###Output
_____no_output_____
###Markdown
Most Common VerbsTo get an idea of the most common words in the text we can look at a part of speech, verbs for example, and count which are the most frequent.This will also take some time to run.
###Code
word_type = 'VERB'
from collections import Counter
words_df = pd.DataFrame()
for i in range(1,len(chapters)+1): # loop through the chapters dataframe
word_list = []
for token in nlp(chapters['Chapter Text'][i]):
if token.pos_ == word_type: # if the token is a VERB
word_list.append(token.lemma_.strip().lower())
words_df = words_df.append(Counter(word_list), ignore_index=True) # add the list to the dataframe
words_df.index = words_df.index+1 # set the dataframe index to Chapter number
words_df.sum().sort_values(ascending=False)
###Output
_____no_output_____
###Markdown
In our example text there are 1233 unique verbs. Let's look at the `10` most common verbs.
###Code
words_df.sum().sort_values(ascending=False).head(10)
###Output
_____no_output_____
###Markdown
We can also choose a verb and plot its frequency by chapter as a percent of the total number of words.
###Code
word = 'say'
words_df['%'] = words_df[word]/parts_of_speech_df['Words']*100 # calculate the percent of the words in each chapter
px.bar(words_df, y='%', title='Percent Frequency of the Word "'+word+'" by Chapter in '+book_title).update_xaxes(title='Chapter').update_yaxes(title='Percent Freqency')
###Output
_____no_output_____
###Markdown
Most Common NamesWe can also look at character names and how often they occur in each chapter. The spaCy library does a fairly good job of identifying names, but you may see some false positives (words that aren't actually character names).
###Code
names_df = pd.DataFrame()
for i in range(1,len(chapters)+1):
names_list = []
for token in nlp(chapters['Chapter Text'][i]):
#if token.pos_ == 'PROPN':
if token.ent_type_ == 'PERSON': # seems to be more reliable than part_of_speech == proper_noun
names_list.append(token.text)
names_df = names_df.append(Counter(names_list), ignore_index=True)
names_df
###Output
_____no_output_____
###Markdown
List of Character NamesWe can check out the list of words identified as names.
###Code
for name in names_df.columns:
print(name)
###Output
_____no_output_____
###Markdown
Cleaning DataIf you'd like to remove columns that may categorized incorrectly or are uncommon, we can drop columns with fewer than five occurrences.
###Code
for column in names_df.columns:
if names_df[column].sum() < 5: # if there are fewer than five occurences
names_df.drop(columns=column, inplace=True)
names_df
###Output
_____no_output_____
###Markdown
Visualization of Name FrequenciesLet's make a bar graph of the top `20` most frequently mentioned characters.
###Code
d = names_df.sum().sort_values(ascending=False).head(20)
px.bar(d, title='Character Name Frequencies in '+book_title).update_yaxes(title='Frequency').update_xaxes(title='Name').update(layout_showlegend=False)
###Output
_____no_output_____
###Markdown
Cumulative Name Frequencies over TimeSince we have the text divided into chapters, let's visualize the cumulative mentions of the top `3` character names throughout the book.
###Code
how_many_names = 3
main_character_names = names_df.sum().sort_values(ascending=False).head(how_many_names).index # list the top three
main_characters = names_df[main_character_names].fillna(value=0) # create a new dataframe for top three
main_characters.index = main_characters.index+1
title = 'Cumulative Character Mentions of Time in '+book_title
px.line(main_characters.cumsum(), title=title).update_yaxes(title='Total Mentions').update_xaxes(title='Chapter')
###Output
_____no_output_____
###Markdown
Sentiment Analysis[Sentiment analysis](https://en.wikipedia.org/wiki/Sentiment_analysis) is categorizing text based on its tone (negative, neutral, or positive).For this we will use the [vaderSentiment](https://github.com/cjhutto/vaderSentiment) library, then visualize the positive and negative sentiment by chapter.
###Code
#!pip install vaderSentiment --user
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
analyzer = SentimentIntensityAnalyzer()
sentiment_df = pd.DataFrame()
for i in range(1,len(chapters)+1):
senti = analyzer.polarity_scores(chapters['Chapter Text'][i]) # analyze the sentiment of chapter
sentiment_df = sentiment_df.append(senti, ignore_index=True) # add to dataframe
sentiment_df['neg'] = -sentiment_df['neg'] # change the sign of the negative sentiment
sentiment_df.index = sentiment_df.index+1
px.bar(sentiment_df, y=['pos', 'neg'], title=book_title+' Sentiment Analysis by Chapter').update(layout_showlegend=False)
###Output
_____no_output_____
###Markdown
ReadabilityOne last library to introduce, [textstat](https://github.com/shivam5992/textstat) for checking the readability, complexity, and grade level of text. It includes a [number of functions](https://github.com/shivam5992/textstatlist-of-functions), but we'll only use a few of them. This will take a while to run.
###Code
#!pip install --user textstat
import textstat
textstats = pd.DataFrame()
for i in range(1,len(chapters)+1):
text = chapters['Chapter Text'][i]
textstats_data = {'Flesch-Kincaid Grade':textstat.flesch_kincaid_grade(text),
'Gunning Fog Index':textstat.gunning_fog(text),
'Linsear Write Formula':textstat.linsear_write_formula(text),
'Readability':textstat.text_standard(text, float_output=True)}
textstats = textstats.append(textstats_data, ignore_index=True)
textstats.index = textstats.index+1
textstats
###Output
_____no_output_____
###Markdown
Now that we have a dataframe of readability information, we can plot and describe the readability statistics.
###Code
px.line(textstats, y='Readability', title=book_title+' Readability by Chapter').update_xaxes(title='Chapter')
textstats.describe()
###Output
_____no_output_____ |
src/BagofVisualWords.ipynb | ###Markdown
###Code
import cv2
import numpy as np
import pickle as cPickle
from sklearn.cluster import MiniBatchKMeans
from sklearn.neighbors import KNeighborsClassifier
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from google.colab import drive
drive.mount('/content/drive')
###Output
_____no_output_____
###Markdown
Let us first read the train and test files
###Code
train_images_filenames = cPickle.load(open('train_images_filenames.dat','r'))
test_images_filenames = cPickle.load(open('test_images_filenames.dat','r'))
train_labels = cPickle.load(open('train_labels.dat','r'))
test_labels = cPickle.load(open('test_labels.dat','r'))
train_images_filenames[12]
###Output
_____no_output_____
###Markdown
We create a SIFT object detector and descriptor
###Code
SIFTdetector = cv2.xfeatures2d.SIFT_create(nfeatures=300)
###Output
_____no_output_____
###Markdown
We compute the SIFT descriptors for all the train images and subsequently build a numpy array with all the descriptors stacked together
###Code
Train_descriptors = []
Train_label_per_descriptor = []
for filename,labels in zip(train_images_filenames,train_labels):
ima=cv2.imread(filename)
gray=cv2.cvtColor(ima,cv2.COLOR_BGR2GRAY)
kpt,des=SIFTdetector.detectAndCompute(gray,None)
Train_descriptors.append(des)
Train_label_per_descriptor.append(labels)
D=np.vstack(Train_descriptors)
###Output
_____no_output_____
###Markdown
We now compute a k-means clustering on the descriptor space
###Code
k = 128
codebook = MiniBatchKMeans(n_clusters=k, verbose=False, batch_size=k * 20,compute_labels=False,reassignment_ratio=10**-4,random_state=42)
codebook.fit(D)
###Output
_____no_output_____
###Markdown
And, for each train image, we project each keypoint descriptor to its closest visual word. We represent each of the images with the frequency of each visual word.
###Code
visual_words=np.zeros((len(Train_descriptors),k),dtype=np.float32)
for i in xrange(len(Train_descriptors)):
words=codebook.predict(Train_descriptors[i])
visual_words[i,:]=np.bincount(words,minlength=k)
###Output
_____no_output_____
###Markdown
We build a k-nn classifier and train it with the train descriptors
###Code
knn = KNeighborsClassifier(n_neighbors=5,n_jobs=-1,metric='euclidean')
knn.fit(visual_words, train_labels)
###Output
_____no_output_____
###Markdown
We end up computing the test descriptors and compute the accuracy of the model
###Code
visual_words_test=np.zeros((len(test_images_filenames),k),dtype=np.float32)
for i in range(len(test_images_filenames)):
filename=test_images_filenames[i]
ima=cv2.imread(filename)
gray=cv2.cvtColor(ima,cv2.COLOR_BGR2GRAY)
kpt,des=SIFTdetector.detectAndCompute(gray,None)
words=codebook.predict(des)
visual_words_test[i,:]=np.bincount(words,minlength=k)
accuracy = 100*knn.score(visual_words_test, test_labels)
print(accuracy)
###Output
_____no_output_____
###Markdown
Dimensionality reduction, with PCA and LDA
###Code
pca = PCA(n_components=64)
VWpca = pca.fit_transform(visual_words)
knnpca = KNeighborsClassifier(n_neighbors=5,n_jobs=-1,metric='euclidean')
knnpca.fit(VWpca, train_labels)
vwtestpca = pca.transform(visual_words_test)
accuracy = 100*knnpca.score(vwtestpca, test_labels)
print(accuracy)
lda = LinearDiscriminantAnalysis(n_components=64)
VWlda = lda.fit_transform(visual_words,train_labels)
knnlda = KNeighborsClassifier(n_neighbors=5,n_jobs=-1,metric='euclidean')
knnlda.fit(VWlda, train_labels)
vwtestlda = lda.transform(visual_words_test)
accuracy = 100*knnlda.score(vwtestlda, test_labels)
print(accuracy)
###Output
_____no_output_____ |
python-scripts/data_analytics_learn/link_pandas/Ex_Files_Pandas_Data/Exercise Files/03_03/Final/.ipynb_checkpoints/Date Arithmetic-checkpoint.ipynb | ###Markdown
Date Arithmeticdocumentation: http://pandas.pydata.org/pandas-docs/stable/timeseries.htmltimeseries-offsets| Type | | Description ||-----------|---|-------------------------------------------------------------------|| date | | Store calendar date (year, month, day) using a Gregorian Calendar || datetime | | Store both date and time || timedelta | | Difference between two datetime values | common date arithmetic operations- calculate differences between date- generate sequences of dates and time spans- convert time series to a particular frequency Date, time, functionsdocumentation: http://pandas.pydata.org/pandas-docs/stable/api.htmltop-level-dealing-with-datetimelike| to_datetime(*args, **kwargs) | Convert argument to datetime. | ||---------------------------------------------------|-----------------------------------------------------------------------------|---|| to_timedelta(*args, **kwargs) | Convert argument to timedelta | || date_range([start, end, periods, freq, tz, ...]) | Return a fixed frequency datetime index, with day (calendar) as the default | || bdate_range([start, end, periods, freq, tz, ...]) | Return a fixed frequency datetime index, with business day as the default | || period_range([start, end, periods, freq, name]) | Return a fixed frequency datetime index, with day (calendar) as the default | || timedelta_range([start, end, periods, freq, ...]) | Return a fixed frequency timedelta index, with day as the default | || infer_freq(index[, warn]) | Infer the most likely frequency given the input index. | |
###Code
import pandas as pd
import numpy as np
from datetime import datetime
###Output
_____no_output_____
###Markdown
now()
###Code
now = datetime.now()
now
now.year, now.month, now.day
###Output
_____no_output_____
###Markdown
deltasource: http://pandas.pydata.org/pandas-docs/stable/timedeltas.html
###Code
delta = now - datetime(2001, 1, 1)
delta
delta.days
###Output
_____no_output_____
###Markdown
Parsing Timedelta from string
###Code
pd.Timedelta('4 days 7 hours')
###Output
_____no_output_____
###Markdown
named keyword arguments
###Code
# note: these MUST be specified as keyword arguments
pd.Timedelta(days=1, seconds=1)
###Output
_____no_output_____
###Markdown
integers with a unit
###Code
pd.Timedelta(1, unit='d')
###Output
_____no_output_____
###Markdown
create a range of dates from Timedelta
###Code
us_memorial_day = datetime(2016, 5, 30)
print(us_memorial_day)
us_labor_day = datetime(2016, 9, 5)
print(us_labor_day)
us_summer_time = us_labor_day - us_memorial_day
print(us_summer_time)
type(us_summer_time)
us_summer_time_range = pd.date_range(us_memorial_day, periods=us_summer_time.days, freq='D')
###Output
_____no_output_____
###Markdown
summer_time time series with random data
###Code
us_summer_time_time_series = pd.Series(np.random.randn(us_summer_time.days), index=us_summer_time_range)
us_summer_time_time_series.tail()
###Output
_____no_output_____ |
Section 2: Elementary machine learning algorithms/linear_regression/05_03_Hyperparameters_and_Model_Validation.ipynb | ###Markdown
*This notebook contains an excerpt from the [Python Data Science Handbook](http://shop.oreilly.com/product/0636920034919.do) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/PythonDataScienceHandbook).**The text is released under the [CC-BY-NC-ND license](https://creativecommons.org/licenses/by-nc-nd/3.0/us/legalcode), and code is released under the [MIT license](https://opensource.org/licenses/MIT). If you find this content useful, please consider supporting the work by [buying the book](http://shop.oreilly.com/product/0636920034919.do)!* Hyperparameters and Model Validation In the previous section, we saw the basic recipe for applying a supervised machine learning model:1. Choose a class of model2. Choose model hyperparameters3. Fit the model to the training data4. Use the model to predict labels for new dataThe first two pieces of this—the choice of model and choice of hyperparameters—are perhaps the most important part of using these tools and techniques effectively.In order to make an informed choice, we need a way to *validate* that our model and our hyperparameters are a good fit to the data.While this may sound simple, there are some pitfalls that you must avoid to do this effectively. Thinking about Model ValidationIn principle, model validation is very simple: after choosing a model and its hyperparameters, we can estimate how effective it is by applying it to some of the training data and comparing the prediction to the known value.The following sections first show a naive approach to model validation and why itfails, before exploring the use of holdout sets and cross-validation for more robustmodel evaluation. Model validation the wrong wayLet's demonstrate the naive approach to validation using the Iris data, which we saw in the previous section.We will start by loading the data:
###Code
from sklearn.datasets import load_iris
iris = load_iris()
X = iris.data
y = iris.target
###Output
_____no_output_____
###Markdown
Next we choose a model and hyperparameters. Here we'll use a *k*-neighbors classifier with ``n_neighbors=1``.This is a very simple and intuitive model that says "the label of an unknown point is the same as the label of its closest training point:"
###Code
from sklearn.neighbors import KNeighborsClassifier
model = KNeighborsClassifier(n_neighbors=1)
###Output
_____no_output_____
###Markdown
Then we train the model, and use it to predict labels for data we already know:
###Code
model.fit(X, y)
y_model = model.predict(X)
###Output
_____no_output_____
###Markdown
Finally, we compute the fraction of correctly labeled points:
###Code
from sklearn.metrics import accuracy_score
accuracy_score(y, y_model)
###Output
_____no_output_____
###Markdown
We see an accuracy score of 1.0, which indicates that 100% of points were correctly labeled by our model!But is this truly measuring the expected accuracy? Have we really come upon a model that we expect to be correct 100% of the time?As you may have gathered, the answer is no.In fact, this approach contains a fundamental flaw: *it trains and evaluates the model on the same data*.Furthermore, the nearest neighbor model is an *instance-based* estimator that simply stores the training data, and predicts labels by comparing new data to these stored points: except in contrived cases, it will get 100% accuracy *every time!* Model validation the right way: Holdout setsSo what can be done?A better sense of a model's performance can be found using what's known as a *holdout set*: that is, we hold back some subset of the data from the training of the model, and then use this holdout set to check the model performance.This splitting can be done using the ``train_test_split`` utility in Scikit-Learn:
###Code
import sklearn
from sklearn.model_selection import train_test_split
# split the data with 50% in each set
X1, X2, y1, y2 = train_test_split(X, y, random_state=0,
train_size=0.5)
# fit the model on one set of data
model.fit(X1, y1)
# evaluate the model on the second set of data
y2_model = model.predict(X2)
accuracy_score(y2, y2_model)
###Output
_____no_output_____
###Markdown
We see here a more reasonable result: the nearest-neighbor classifier is about 90% accurate on this hold-out set.The hold-out set is similar to unknown data, because the model has not "seen" it before. Model validation via cross-validationOne disadvantage of using a holdout set for model validation is that we have lost a portion of our data to the model training.In the above case, half the dataset does not contribute to the training of the model!This is not optimal, and can cause problems – especially if the initial set of training data is small.One way to address this is to use *cross-validation*; that is, to do a sequence of fits where each subset of the data is used both as a training set and as a validation set.Visually, it might look something like this:[figure source in Appendix](/content/drive/MyDrive/figures/05.03-2-fold-CV.png)Here we do two validation trials, alternately using each half of the data as a holdout set.Using the split data from before, we could implement it like this:
###Code
y2_model = model.fit(X1, y1).predict(X2)
y1_model = model.fit(X2, y2).predict(X1)
accuracy_score(y1, y1_model), accuracy_score(y2, y2_model)
###Output
_____no_output_____
###Markdown
What comes out are two accuracy scores, which we could combine (by, say, taking the mean) to get a better measure of the global model performance.This particular form of cross-validation is a *two-fold cross-validation*—that is, one in which we have split the data into two sets and used each in turn as a validation set.We could expand on this idea to use even more trials, and more folds in the data—for example, here is a visual depiction of five-fold cross-validation:[figure source in Appendix](06.00-Figure-Code.ipynb5-Fold-Cross-Validation)Here we split the data into five groups, and use each of them in turn to evaluate the model fit on the other 4/5 of the data.This would be rather tedious to do by hand, and so we can use Scikit-Learn's ``cross_val_score`` convenience routine to do it succinctly:
###Code
from sklearn.model_selection import cross_val_score
cross_val_score(model, X, y, cv=5)
###Output
_____no_output_____
###Markdown
Repeating the validation across different subsets of the data gives us an even better idea of the performance of the algorithm.Scikit-Learn implements a number of useful cross-validation schemes that are useful in particular situations; these are implemented via iterators in the ``cross_validation`` module.For example, we might wish to go to the extreme case in which our number of folds is equal to the number of data points: that is, we train on all points but one in each trial.This type of cross-validation is known as *leave-one-out* cross validation, and can be used as follows:
###Code
from sklearn.model_selection import LeaveOneOut
scores = cross_val_score(model, X, y, cv=LeaveOneOut())
scores
###Output
_____no_output_____
###Markdown
Because we have 150 samples, the leave one out cross-validation yields scores for 150 trials, and the score indicates either successful (1.0) or unsuccessful (0.0) prediction.Taking the mean of these gives an estimate of the error rate:
###Code
scores.mean()
###Output
_____no_output_____
###Markdown
Other cross-validation schemes can be used similarly.For a description of what is available in Scikit-Learn, use IPython to explore the ``sklearn.cross_validation`` submodule, or take a look at Scikit-Learn's online [cross-validation documentation](http://scikit-learn.org/stable/modules/cross_validation.html). Selecting the Best ModelNow that we've seen the basics of validation and cross-validation, we will go into a litte more depth regarding model selection and selection of hyperparameters.These issues are some of the most important aspects of the practice of machine learning, and I find that this information is often glossed over in introductory machine learning tutorials.Of core importance is the following question: *if our estimator is underperforming, how should we move forward?*There are several possible answers:- Use a more complicated/more flexible model- Use a less complicated/less flexible model- Gather more training samples- Gather more data to add features to each sampleThe answer to this question is often counter-intuitive.In particular, sometimes using a more complicated model will give worse results, and adding more training samples may not improve your results!The ability to determine what steps will improve your model is what separates the successful machine learning practitioners from the unsuccessful. The Bias-variance trade-offFundamentally, the question of "the best model" is about finding a sweet spot in the tradeoff between *bias* and *variance*.Consider the following figure, which presents two regression fits to the same dataset:[figure source in Appendix](06.00-Figure-Code.ipynbBias-Variance-Tradeoff)It is clear that neither of these models is a particularly good fit to the data, but they fail in different ways.The model on the left attempts to find a straight-line fit through the data.Because the data are intrinsically more complicated than a straight line, the straight-line model will never be able to describe this dataset well.Such a model is said to *underfit* the data: that is, it does not have enough model flexibility to suitably account for all the features in the data; another way of saying this is that the model has high *bias*.The model on the right attempts to fit a high-order polynomial through the data.Here the model fit has enough flexibility to nearly perfectly account for the fine features in the data, but even though it very accurately describes the training data, its precise form seems to be more reflective of the particular noise properties of the data rather than the intrinsic properties of whatever process generated that data.Such a model is said to *overfit* the data: that is, it has so much model flexibility that the model ends up accounting for random errors as well as the underlying data distribution; another way of saying this is that the model has high *variance*. To look at this in another light, consider what happens if we use these two models to predict the y-value for some new data.In the following diagrams, the red/lighter points indicate data that is omitted from the training set:[figure source in Appendix](06.00-Figure-Code.ipynbBias-Variance-Tradeoff-Metrics)The score here is the $R^2$ score, or [coefficient of determination](https://en.wikipedia.org/wiki/Coefficient_of_determination), which measures how well a model performs relative to a simple mean of the target values. $R^2=1$ indicates a perfect match, $R^2=0$ indicates the model does no better than simply taking the mean of the data, and negative values mean even worse models.From the scores associated with these two models, we can make an observation that holds more generally:- For high-bias models, the performance of the model on the validation set is similar to the performance on the training set.- For high-variance models, the performance of the model on the validation set is far worse than the performance on the training set. If we imagine that we have some ability to tune the model complexity, we would expect the training score and validation score to behave as illustrated in the following figure:[figure source in Appendix](06.00-Figure-Code.ipynbValidation-Curve)The diagram shown here is often called a *validation curve*, and we see the following essential features:- The training score is everywhere higher than the validation score. This is generally the case: the model will be a better fit to data it has seen than to data it has not seen.- For very low model complexity (a high-bias model), the training data is under-fit, which means that the model is a poor predictor both for the training data and for any previously unseen data.- For very high model complexity (a high-variance model), the training data is over-fit, which means that the model predicts the training data very well, but fails for any previously unseen data.- For some intermediate value, the validation curve has a maximum. This level of complexity indicates a suitable trade-off between bias and variance.The means of tuning the model complexity varies from model to model; when we discuss individual models in depth in later sections, we will see how each model allows for such tuning. Validation curves in Scikit-LearnLet's look at an example of using cross-validation to compute the validation curve for a class of models.Here we will use a *polynomial regression* model: this is a generalized linear model in which the degree of the polynomial is a tunable parameter.For example, a degree-1 polynomial fits a straight line to the data; for model parameters $a$ and $b$:$$y = ax + b$$A degree-3 polynomial fits a cubic curve to the data; for model parameters $a, b, c, d$:$$y = ax^3 + bx^2 + cx + d$$We can generalize this to any number of polynomial features.In Scikit-Learn, we can implement this with a simple linear regression combined with the polynomial preprocessor.We will use a *pipeline* to string these operations together (we will discuss polynomial features and pipelines more fully in [Feature Engineering](05.04-Feature-Engineering.ipynb)):
###Code
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
def PolynomialRegression(degree=2, **kwargs):
return make_pipeline(PolynomialFeatures(degree),
LinearRegression(**kwargs))
###Output
_____no_output_____
###Markdown
Now let's create some data to which we will fit our model:
###Code
import numpy as np
def make_data(N, err=1.0, rseed=1):
# randomly sample the data
rng = np.random.RandomState(rseed)
X = rng.rand(N, 1) ** 2
y = 10 - 1. / (X.ravel() + 0.1)
if err > 0:
y += err * rng.randn(N)
return X, y
X, y = make_data(40)
###Output
_____no_output_____
###Markdown
We can now visualize our data, along with polynomial fits of several degrees:
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn; seaborn.set() # plot formatting
X_test = np.linspace(-0.1, 1.1, 500)[:, None]
plt.scatter(X.ravel(), y, color='black')
axis = plt.axis()
for degree in [1, 3, 5]:
y_test = PolynomialRegression(degree).fit(X, y).predict(X_test)
plt.plot(X_test.ravel(), y_test, label='degree={0}'.format(degree))
plt.xlim(-0.1, 1.0)
plt.ylim(-2, 12)
plt.legend(loc='best');
###Output
_____no_output_____
###Markdown
The knob controlling model complexity in this case is the degree of the polynomial, which can be any non-negative integer.A useful question to answer is this: what degree of polynomial provides a suitable trade-off between bias (under-fitting) and variance (over-fitting)?We can make progress in this by visualizing the validation curve for this particular data and model; this can be done straightforwardly using the ``validation_curve`` convenience routine provided by Scikit-Learn.Given a model, data, parameter name, and a range to explore, this function will automatically compute both the training score and validation score across the range:
###Code
from sklearn.model_selection import learning_curve, validation_curve
degree = np.arange(0, 21)
train_score, val_score = validation_curve(PolynomialRegression(), X, y,
'polynomialfeatures__degree', degree, cv=5)
plt.plot(degree, np.median(train_score, 1), color='blue', label='training score')
plt.plot(degree, np.median(val_score, 1), color='red', label='validation score')
plt.legend(loc='best')
plt.ylim(0, 1)
plt.xlabel('degree')
plt.ylabel('score');
###Output
_____no_output_____
###Markdown
This shows precisely the qualitative behavior we expect: the training score is everywhere higher than the validation score; the training score is monotonically improving with increased model complexity; and the validation score reaches a maximum before dropping off as the model becomes over-fit.From the validation curve, we can read-off that the optimal trade-off between bias and variance is found for a third-order polynomial; we can compute and display this fit over the original data as follows:
###Code
plt.scatter(X.ravel(), y)
lim = plt.axis()
y_test = PolynomialRegression(3).fit(X, y).predict(X_test)
plt.plot(X_test.ravel(), y_test);
plt.axis(lim);
###Output
_____no_output_____
###Markdown
Notice that finding this optimal model did not actually require us to compute the training score, but examining the relationship between the training score and validation score can give us useful insight into the performance of the model. Learning CurvesOne important aspect of model complexity is that the optimal model will generally depend on the size of your training data.For example, let's generate a new dataset with a factor of five more points:
###Code
X2, y2 = make_data(200)
plt.scatter(X2.ravel(), y2);
###Output
_____no_output_____
###Markdown
We will duplicate the preceding code to plot the validation curve for this larger dataset; for reference let's over-plot the previous results as well:
###Code
degree = np.arange(21)
train_score2, val_score2 = validation_curve(PolynomialRegression(), X2, y2,
'polynomialfeatures__degree', degree, cv=7)
plt.plot(degree, np.median(train_score2, 1), color='blue', label='training score')
plt.plot(degree, np.median(val_score2, 1), color='red', label='validation score')
plt.plot(degree, np.median(train_score, 1), color='blue', alpha=0.3, linestyle='dashed')
plt.plot(degree, np.median(val_score, 1), color='red', alpha=0.3, linestyle='dashed')
plt.legend(loc='lower center')
plt.ylim(0, 1)
plt.xlabel('degree')
plt.ylabel('score');
###Output
_____no_output_____
###Markdown
The solid lines show the new results, while the fainter dashed lines show the results of the previous smaller dataset.It is clear from the validation curve that the larger dataset can support a much more complicated model: the peak here is probably around a degree of 6, but even a degree-20 model is not seriously over-fitting the data—the validation and training scores remain very close.Thus we see that the behavior of the validation curve has not one but two important inputs: the model complexity and the number of training points.It is often useful to to explore the behavior of the model as a function of the number of training points, which we can do by using increasingly larger subsets of the data to fit our model.A plot of the training/validation score with respect to the size of the training set is known as a *learning curve.*The general behavior we would expect from a learning curve is this:- A model of a given complexity will *overfit* a small dataset: this means the training score will be relatively high, while the validation score will be relatively low.- A model of a given complexity will *underfit* a large dataset: this means that the training score will decrease, but the validation score will increase.- A model will never, except by chance, give a better score to the validation set than the training set: this means the curves should keep getting closer together but never cross.With these features in mind, we would expect a learning curve to look qualitatively like that shown in the following figure: [figure source in Appendix](06.00-Figure-Code.ipynbLearning-Curve) The notable feature of the learning curve is the convergence to a particular score as the number of training samples grows.In particular, once you have enough points that a particular model has converged, *adding more training data will not help you!*The only way to increase model performance in this case is to use another (often more complex) model. Learning curves in Scikit-LearnScikit-Learn offers a convenient utility for computing such learning curves from your models; here we will compute a learning curve for our original dataset with a second-order polynomial model and a ninth-order polynomial:
###Code
from sklearn.model_selection import learning_curve
fig, ax = plt.subplots(1, 2, figsize=(16, 6))
fig.subplots_adjust(left=0.0625, right=0.95, wspace=0.1)
for i, degree in enumerate([2, 9]):
N, train_lc, val_lc = learning_curve(PolynomialRegression(degree),
X, y, cv=7,
train_sizes=np.linspace(0.3, 1, 25))
ax[i].plot(N, np.mean(train_lc, 1), color='blue', label='training score')
ax[i].plot(N, np.mean(val_lc, 1), color='red', label='validation score')
ax[i].hlines(np.mean([train_lc[-1], val_lc[-1]]), N[0], N[-1],
color='gray', linestyle='dashed')
ax[i].set_ylim(0, 1)
ax[i].set_xlim(N[0], N[-1])
ax[i].set_xlabel('training size')
ax[i].set_ylabel('score')
ax[i].set_title('degree = {0}'.format(degree), size=14)
ax[i].legend(loc='best')
###Output
_____no_output_____
###Markdown
This is a valuable diagnostic, because it gives us a visual depiction of how our model responds to increasing training data.In particular, when your learning curve has already converged (i.e., when the training and validation curves are already close to each other) *adding more training data will not significantly improve the fit!*This situation is seen in the left panel, with the learning curve for the degree-2 model.The only way to increase the converged score is to use a different (usually more complicated) model.We see this in the right panel: by moving to a much more complicated model, we increase the score of convergence (indicated by the dashed line), but at the expense of higher model variance (indicated by the difference between the training and validation scores).If we were to add even more data points, the learning curve for the more complicated model would eventually converge.Plotting a learning curve for your particular choice of model and dataset can help you to make this type of decision about how to move forward in improving your analysis. Validation in Practice: Grid SearchThe preceding discussion is meant to give you some intuition into the trade-off between bias and variance, and its dependence on model complexity and training set size.In practice, models generally have more than one knob to turn, and thus plots of validation and learning curves change from lines to multi-dimensional surfaces.In these cases, such visualizations are difficult and we would rather simply find the particular model that maximizes the validation score.Scikit-Learn provides automated tools to do this in the grid search module.Here is an example of using grid search to find the optimal polynomial model.We will explore a three-dimensional grid of model features; namely the polynomial degree, the flag telling us whether to fit the intercept, and the flag telling us whether to normalize the problem.This can be set up using Scikit-Learn's ``GridSearchCV`` meta-estimator:
###Code
from sklearn.model_selection import GridSearchCV
param_grid = {'polynomialfeatures__degree': np.arange(21),
'linearregression__fit_intercept': [True, False],
'linearregression__normalize': [True, False]}
grid = GridSearchCV(PolynomialRegression(), param_grid, cv=7)
###Output
_____no_output_____
###Markdown
Notice that like a normal estimator, this has not yet been applied to any data.Calling the ``fit()`` method will fit the model at each grid point, keeping track of the scores along the way:
###Code
grid.fit(X, y);
###Output
_____no_output_____
###Markdown
Now that this is fit, we can ask for the best parameters as follows:
###Code
grid.best_params_
###Output
_____no_output_____
###Markdown
Finally, if we wish, we can use the best model and show the fit to our data using code from before:
###Code
model = grid.best_estimator_
plt.scatter(X.ravel(), y)
lim = plt.axis()
y_test = model.fit(X, y).predict(X_test)
plt.plot(X_test.ravel(), y_test);
plt.axis(lim);
###Output
_____no_output_____ |
02_usecases/factorization_machines_recommendations/13_Recommendation_Train_Model.ipynb | ###Markdown
Creating and Evaluating Solutions In this notebook, you will train several models using Amazon Personalize, specifically: 1. User Personalization - what items are most relevant to a specific user.1. Similar Items - given an item, what items are similar to it.1. Personalized Ranking - given a user and a collection of items, in what order are they most releveant. Outline1. [Introduction](intro)1. [Create solutions](solutions)1. [Evaluate solutions](eval)1. [Using evaluation metrics](use)1. [Storing useful variables](vars) Introduction To recap, for the most part, the algorithms in Amazon Personalize (called recipes) look to solve different tasks, explained here:1. **User Personalization** - New release that supports ALL HRNN workflows / user personalization needs, it will be what we use here.1. **HRNN & HRNN-Metadata** - Recommends items based on previous user interactions with items.1. **HRNN-Coldstart** - Recommends new items for which interaction data is not yet available.1. **Personalized-Ranking** - Takes a collection of items and then orders them in probable order of interest using an HRNN-like approach.1. **SIMS (Similar Items)** - Given one item, recommends other items also interacted with by users.1. **Popularity-Count** - Recommends the most popular items, if HRNN or HRNN-Metadata do not have an answer - this is returned by default.No matter the use case, the algorithms all share a base of learning on user-item-interaction data which is defined by 3 core attributes:1. **UserID** - The user who interacted1. **ItemID** - The item the user interacted with1. **Timestamp** - The time at which the interaction occurredWe also support event types and event values defined by:1. **Event Type** - Categorical label of an event (browse, purchased, rated, etc).1. **Event Value** - A value corresponding to the event type that occurred. Generally speaking, we look for normalized values between 0 and 1 over the event types. For example, if there are three phases to complete a transaction (clicked, added-to-cart, and purchased), then there would be an event_value for each phase as 0.33, 0.66, and 1.0 respectfully.The event type and event value fields are additional data which can be used to filter the data sent for training the personalization model. In this particular exercise we will not have an event type or event value. To run this notebook, you need to have run the previous notebooks, `01_Validating_and_Importing_User_Item_Interaction_Data` and `02_Validating_and_Importing_Item_Metadata.ipynb`, where you created a dataset and imported interaction and item metadata data into Amazon Personalize. At the end of that notebook, you saved some of the variable values, which you now need to load into this notebook. 
###Code
%store -r
###Output
_____no_output_____
###Markdown
Create solutions [Back to top](top)In this notebook, you will create solutions with the following recipes:1. User Personalization1. SIMS1. Personalized-RankingThe Popularity-Count recipe is the simplest solution available in Amazon Personalize and it should only be used as a fallback, so it will also not be covered in this notebook.Similar to the previous notebook, start by importing the relevant packages, and set up a connection to Amazon Personalize using the SDK.
###Code
import time
from time import sleep
import json
import boto3
# Configure the SDK to Personalize:
personalize = boto3.client('personalize')
personalize_runtime = boto3.client('personalize-runtime')
###Output
_____no_output_____
###Markdown
In Amazon Personalize, a specific variation of an algorithm is called a recipe. Different recipes are suitable for different situations. A trained model is called a solution, and each solution can have many versions that relate to a given volume of data when the model was trained.To start, we will list all the recipes that are supported. This will allow you to select one and use that to build your model.
###Code
personalize.list_recipes()
###Output
_____no_output_____
###Markdown
The output is just a JSON representation of all of the algorithms mentioned in the introduction.Next we will select specific recipes and build models with them. User PersonalizationThe User-Personalization (aws-user-personalization) recipe is optimized for all USER_PERSONALIZATION recommendation scenarios. When recommending items, it uses automatic item exploration.With automatic exploration, Amazon Personalize automatically tests different item recommendations, learns from how users interact with these recommended items, and boosts recommendations for items that drive better engagement and conversion. This improves item discovery and engagement when you have a fast-changing catalog, or when new items, such as news articles or promotions, are more relevant to users when fresh.You can balance how much to explore (where items with less interactions data or relevance are recommended more frequently) against how much to exploit (where recommendations are based on what we know or relevance). Amazon Personalize automatically adjusts future recommendations based on implicit user feedback.First, select the recipe by finding the ARN in the list of recipes above.
###Code
user_personalization_recipe_arn = "arn:aws:personalize:::recipe/aws-user-personalization"
###Output
_____no_output_____
###Markdown
Create the solutionFirst you create a solution using the recipe. Although you provide the dataset ARN in this step, the model is not yet trained. See this as an identifier instead of a trained model.
###Code
user_personalization_create_solution_response = personalize.create_solution(
name = "personalize-poc-userpersonalization",
datasetGroupArn = dataset_group_arn,
recipeArn = user_personalization_recipe_arn
)
user_personalization_solution_arn = user_personalization_create_solution_response['solutionArn']
print(json.dumps(user_personalization_solution_arn, indent=2))
###Output
"arn:aws:personalize:us-east-1:835319576252:solution/personalize-poc-userpersonalization"
###Markdown
Create the solution versionOnce you have a solution, you need to create a version in order to complete the model training. The training can take a while to complete, upwards of 25 minutes, and an average of 90 minutes for this recipe with our dataset. Normally, we would use a while loop to poll until the task is completed. However the task would block other cells from executing, and the goal here is to create many models and deploy them quickly. So we will set up the while loop for all of the solutions further down in the notebook. There, you will also find instructions for viewing the progress in the AWS console.
###Code
userpersonalization_create_solution_version_response = personalize.create_solution_version(
solutionArn = user_personalization_solution_arn
)
userpersonalization_solution_version_arn = userpersonalization_create_solution_version_response['solutionVersionArn']
print(json.dumps(user_personalization_create_solution_response, indent=2))
###Output
{
"solutionArn": "arn:aws:personalize:us-east-1:835319576252:solution/personalize-poc-userpersonalization",
"ResponseMetadata": {
"RequestId": "20623f71-407c-46ab-80bd-3d6a9dacb797",
"HTTPStatusCode": 200,
"HTTPHeaders": {
"content-type": "application/x-amz-json-1.1",
"date": "Sun, 08 Nov 2020 06:46:11 GMT",
"x-amzn-requestid": "20623f71-407c-46ab-80bd-3d6a9dacb797",
"content-length": "105",
"connection": "keep-alive"
},
"RetryAttempts": 0
}
}
###Markdown
SIMSSIMS is one of the oldest algorithms used within Amazon for recommendation systems. A core use case for it is when you have one item and you want to recommend items that have been interacted with in similar ways over your entire user base. This means the result is not personalized per user. Sometimes this leads to recommending mostly popular items, so there is a hyperparameter that can be tweaked which will reduce the popular items in your results. For our use case, using the Movielens data, let's assume we pick a particular movie. We can then use SIMS to recommend other movies based on the interaction behavior of the entire user base. The results are not personalized per user, but instead, differ depending on the movie we chose as our input.Just like last time, we start by selecting the recipe.
###Code
SIMS_recipe_arn = "arn:aws:personalize:::recipe/aws-sims"
###Output
_____no_output_____
###Markdown
Create the solutionAs with HRNN, start by creating the solution first. Although you provide the dataset ARN in this step, the model is not yet trained. See this as an identifier instead of a trained model.
###Code
sims_create_solution_response = personalize.create_solution(
name = "personalize-poc-sims",
datasetGroupArn = dataset_group_arn,
recipeArn = SIMS_recipe_arn
)
sims_solution_arn = sims_create_solution_response['solutionArn']
print(json.dumps(sims_create_solution_response, indent=2))
###Output
{
"solutionArn": "arn:aws:personalize:us-east-1:835319576252:solution/personalize-poc-sims",
"ResponseMetadata": {
"RequestId": "a27bfa79-c34c-4b85-890c-a3b2652843ad",
"HTTPStatusCode": 200,
"HTTPHeaders": {
"content-type": "application/x-amz-json-1.1",
"date": "Sun, 08 Nov 2020 06:46:11 GMT",
"x-amzn-requestid": "a27bfa79-c34c-4b85-890c-a3b2652843ad",
"content-length": "90",
"connection": "keep-alive"
},
"RetryAttempts": 0
}
}
###Markdown
Create the solution versionOnce you have a solution, you need to create a version in order to complete the model training. The training can take a while to complete, upwards of 25 minutes, and an average of 35 minutes for this recipe with our dataset. Normally, we would use a while loop to poll until the task is completed. However the task would block other cells from executing, and the goal here is to create many models and deploy them quickly. So we will set up the while loop for all of the solutions further down in the notebook. There, you will also find instructions for viewing the progress in the AWS console.
###Code
sims_create_solution_version_response = personalize.create_solution_version(
solutionArn = sims_solution_arn
)
sims_solution_version_arn = sims_create_solution_version_response['solutionVersionArn']
print(json.dumps(sims_create_solution_version_response, indent=2))
###Output
{
"solutionVersionArn": "arn:aws:personalize:us-east-1:835319576252:solution/personalize-poc-sims/666f373e",
"ResponseMetadata": {
"RequestId": "b4a6c1c1-6223-4cc1-9153-3da6606191fd",
"HTTPStatusCode": 200,
"HTTPHeaders": {
"content-type": "application/x-amz-json-1.1",
"date": "Sun, 08 Nov 2020 06:46:12 GMT",
"x-amzn-requestid": "b4a6c1c1-6223-4cc1-9153-3da6606191fd",
"content-length": "106",
"connection": "keep-alive"
},
"RetryAttempts": 0
}
}
###Markdown
Personalized RankingPersonalized Ranking is an interesting application of HRNN. Instead of just recommending what is most probable for the user in question, this algorithm takes in a user and a list of items as well. The items are then rendered back in the order of most probable relevance for the user. The use case here is for filtering on unique categories that you do not have item metadata to create a filter, or when you have a broad collection that you would like better ordered for a particular user.For our use case, using the MovieLens data, we could imagine that a VOD application may want to create a shelf of comic book movies, or movies by a specific director. We most likely have these lists based title metadata we have. We would use personalized ranking to re-order the list of movies for each user, based on their previous tagging history. Just like last time, we start by selecting the recipe.
###Code
rerank_recipe_arn = "arn:aws:personalize:::recipe/aws-personalized-ranking"
###Output
_____no_output_____
###Markdown
Create the solutionAs with the previous solution, start by creating the solution first. Although you provide the dataset ARN in this step, the model is not yet trained. See this as an identifier instead of a trained model.
###Code
rerank_create_solution_response = personalize.create_solution(
name = "personalize-poc-rerank",
datasetGroupArn = dataset_group_arn,
recipeArn = rerank_recipe_arn
)
rerank_solution_arn = rerank_create_solution_response['solutionArn']
print(json.dumps(rerank_create_solution_response, indent=2))
###Output
{
"solutionArn": "arn:aws:personalize:us-east-1:835319576252:solution/personalize-poc-rerank",
"ResponseMetadata": {
"RequestId": "bcd3deaa-07e5-47ba-bbd3-abc41b61bb57",
"HTTPStatusCode": 200,
"HTTPHeaders": {
"content-type": "application/x-amz-json-1.1",
"date": "Sun, 08 Nov 2020 06:46:12 GMT",
"x-amzn-requestid": "bcd3deaa-07e5-47ba-bbd3-abc41b61bb57",
"content-length": "92",
"connection": "keep-alive"
},
"RetryAttempts": 0
}
}
###Markdown
Create the solution versionOnce you have a solution, you need to create a version in order to complete the model training. The training can take a while to complete, upwards of 25 minutes, and an average of 35 minutes for this recipe with our dataset. Normally, we would use a while loop to poll until the task is completed. However the task would block other cells from executing, and the goal here is to create many models and deploy them quickly. So we will set up the while loop for all of the solutions further down in the notebook. There, you will also find instructions for viewing the progress in the AWS console.
###Code
rerank_create_solution_version_response = personalize.create_solution_version(
solutionArn = rerank_solution_arn
)
rerank_solution_version_arn = rerank_create_solution_version_response['solutionVersionArn']
print(json.dumps(rerank_create_solution_version_response, indent=2))
###Output
{
"solutionVersionArn": "arn:aws:personalize:us-east-1:835319576252:solution/personalize-poc-rerank/44c934a6",
"ResponseMetadata": {
"RequestId": "61ffe655-1749-49f6-9a67-3aea32515ba8",
"HTTPStatusCode": 200,
"HTTPHeaders": {
"content-type": "application/x-amz-json-1.1",
"date": "Sun, 08 Nov 2020 06:46:12 GMT",
"x-amzn-requestid": "61ffe655-1749-49f6-9a67-3aea32515ba8",
"content-length": "108",
"connection": "keep-alive"
},
"RetryAttempts": 0
}
}
###Markdown
View solution creation statusAs promised, how to view the status updates in the console:* In another browser tab you should already have the AWS Console up from opening this notebook instance. * Switch to that tab and search at the top for the service `Personalize`, then go to that service page. * Click `View dataset groups`.* Click the name of your dataset group, most likely something with POC in the name.* Click `Solutions and recipes`.* You will now see a list of all of the solutions you created above, including a column with the status of the solution versions. Once it is `Active`, your solution is ready to be reviewed. It is also capable of being deployed.Or simply run the cell below to keep track of the solution version creation status.
###Code
%%time
in_progress_solution_versions = [
userpersonalization_solution_version_arn,
sims_solution_version_arn,
rerank_solution_version_arn
]
max_time = time.time() + 10*60*60 # 10 hours
while time.time() < max_time:
for solution_version_arn in in_progress_solution_versions:
version_response = personalize.describe_solution_version(
solutionVersionArn = solution_version_arn
)
status = version_response["solutionVersion"]["status"]
if status == "ACTIVE":
print("Build succeeded for {}".format(solution_version_arn))
in_progress_solution_versions.remove(solution_version_arn)
elif status == "CREATE FAILED":
print("Build failed for {}".format(solution_version_arn))
in_progress_solution_versions.remove(solution_version_arn)
if len(in_progress_solution_versions) <= 0:
break
else:
print("At least one solution build is still in progress")
time.sleep(60)
###Output
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
Build succeeded for arn:aws:personalize:us-east-1:835319576252:solution/personalize-poc-sims/666f373e
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
Build succeeded for arn:aws:personalize:us-east-1:835319576252:solution/personalize-poc-rerank/44c934a6
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
At least one solution build is still in progress
Build succeeded for arn:aws:personalize:us-east-1:835319576252:solution/personalize-poc-userpersonalization/d8407cb8
CPU times: user 456 ms, sys: 58.3 ms, total: 514 ms
Wall time: 1h 2min 8s
###Markdown
Hyperparameter tuningPersonalize offers the option of running hyperparameter tuning when creating a solution. Because of the additional computation required to perform hyperparameter tuning, this feature is turned off by default. Therefore, the solutions we created above, will simply use the default values of the hyperparameters for each recipe. For more information about hyperparameter tuning, see the [documentation](https://docs.aws.amazon.com/personalize/latest/dg/customizing-solution-config-hpo.html).If you have settled on the correct recipe to use, and are ready to run hyperparameter tuning, the following code shows how you would do so, using SIMS as an example.```pythonsims_create_solution_response = personalize.create_solution( name = "personalize-poc-sims-hpo", datasetGroupArn = dataset_group_arn, recipeArn = SIMS_recipe_arn, performHPO=True)sims_solution_arn = sims_create_solution_response['solutionArn']print(json.dumps(sims_create_solution_response, indent=2))```If you already know the values you want to use for a specific hyperparameter, you can also set this value when you create the solution. The code below shows how you could set the value for the `popularity_discount_factor` for the SIMS recipe.```pythonsims_create_solution_response = personalize.create_solution( name = "personalize-poc-sims-set-hp", datasetGroupArn = dataset_group_arn, recipeArn = SIMS_recipe_arn, solutionConfig = { 'algorithmHyperParameters': { 'popularity_discount_factor': '0.7' } })sims_solution_arn = sims_create_solution_response['solutionArn']print(json.dumps(sims_create_solution_response, indent=2))``` Evaluate solution versions [Back to top](top)It should not take more than an hour to train all the solutions from this notebook. While training is in progress, we recommend taking the time to read up on the various algorithms (recipes) and their behavior in detail. This is also a good time to consider alternatives to how the data was fed into the system and what kind of results you expect to see.When the solutions finish creating, the next step is to obtain the evaluation metrics. Personalize calculates these metrics based on a subset of the training data. The image below illustrates how Personalize splits the data. Given 10 users, with 10 interactions each (a circle represents an interaction), the interactions are ordered from oldest to newest based on the timestamp. Personalize uses all of the interaction data from 90% of the users (blue circles) to train the solution version, and the remaining 10% for evaluation. For each of the users in the remaining 10%, 90% of their interaction data (green circles) is used as input for the call to the trained model. The remaining 10% of their data (orange circle) is compared to the output produced by the model and used to calculate the evaluation metrics.We recommend reading [the documentation](https://docs.aws.amazon.com/personalize/latest/dg/working-with-training-metrics.html) to understand the metrics, but we have also copied parts of the documentation below for convenience.You need to understand the following terms regarding evaluation in Personalize:* *Relevant recommendation* refers to a recommendation that matches a value in the testing data for the particular user.* *Rank* refers to the position of a recommended item in the list of recommendations. Position 1 (the top of the list) is presumed to be the most relevant to the user.* *Query* refers to the internal equivalent of a GetRecommendations call.The metrics produced by Personalize are:* **coverage**: The proportion of unique recommended items from all queries out of the total number of unique items in the training data (includes both the Items and Interactions datasets).* **mean_reciprocal_rank_at_25**: The [mean of the reciprocal ranks](https://en.wikipedia.org/wiki/Mean_reciprocal_rank) of the first relevant recommendation out of the top 25 recommendations over all queries. This metric is appropriate if you're interested in the single highest ranked recommendation.* **normalized_discounted_cumulative_gain_at_K**: Discounted gain assumes that recommendations lower on a list of recommendations are less relevant than higher recommendations. Therefore, each recommendation is discounted (given a lower weight) by a factor dependent on its position. To produce the [cumulative discounted gain](https://en.wikipedia.org/wiki/Discounted_cumulative_gain) (DCG) at K, each relevant discounted recommendation in the top K recommendations is summed together. The normalized discounted cumulative gain (NDCG) is the DCG divided by the ideal DCG such that NDCG is between 0 - 1. (The ideal DCG is where the top K recommendations are sorted by relevance.) Amazon Personalize uses a weighting factor of 1/log(1 + position), where the top of the list is position 1. This metric rewards relevant items that appear near the top of the list, because the top of a list usually draws more attention.* **precision_at_K**: The number of relevant recommendations out of the top K recommendations divided by K. This metric rewards precise recommendation of the relevant items.Let's take a look at the evaluation metrics for each of the solutions produced in this notebook. *Please note, your results might differ from the results described in the text of this notebook, due to the quality of the Movielens dataset.* User Personalization metricsFirst, retrieve the evaluation metrics for the User Personalization solution version.
###Code
user_personalization_solution_metrics_response = personalize.get_solution_metrics(
solutionVersionArn = userpersonalization_solution_version_arn
)
print(json.dumps(user_personalization_solution_metrics_response, indent=2))
###Output
{
"solutionVersionArn": "arn:aws:personalize:us-east-1:835319576252:solution/personalize-poc-userpersonalization/d8407cb8",
"metrics": {
"coverage": 0.0786,
"mean_reciprocal_rank_at_25": 0.2263,
"normalized_discounted_cumulative_gain_at_10": 0.2174,
"normalized_discounted_cumulative_gain_at_25": 0.2831,
"normalized_discounted_cumulative_gain_at_5": 0.1802,
"precision_at_10": 0.0464,
"precision_at_25": 0.0321,
"precision_at_5": 0.0571
},
"ResponseMetadata": {
"RequestId": "d88a71cd-8b6a-4096-9862-f58d6bbcb8bd",
"HTTPStatusCode": 200,
"HTTPHeaders": {
"content-type": "application/x-amz-json-1.1",
"date": "Sun, 08 Nov 2020 07:48:19 GMT",
"x-amzn-requestid": "d88a71cd-8b6a-4096-9862-f58d6bbcb8bd",
"content-length": "419",
"connection": "keep-alive"
},
"RetryAttempts": 0
}
}
###Markdown
The normalized discounted cumulative gain above tells us that at 5 items, we have less than a (38% for full 22% for small) chance in a recommendation being a part of a user's interaction history (in the hold out phase from training and validation). Around 13% of the recommended items are unique, and we have a precision of only (14% for full, 7.5% for small) in the top 5 recommended items. This is clearly not a great model, but keep in mind that we had to use rating data for our interactions because Movielens is an explicit dataset based on ratings. The Timestamps also were from the time that the movie was rated, not watched, so the order is not the same as the order a viewer would watch movies. SIMS metricsNow, retrieve the evaluation metrics for the SIMS solution version.
###Code
sims_solution_metrics_response = personalize.get_solution_metrics(
solutionVersionArn = sims_solution_version_arn
)
print(json.dumps(sims_solution_metrics_response, indent=2))
###Output
{
"solutionVersionArn": "arn:aws:personalize:us-east-1:835319576252:solution/personalize-poc-sims/666f373e",
"metrics": {
"coverage": 0.1843,
"mean_reciprocal_rank_at_25": 0.1777,
"normalized_discounted_cumulative_gain_at_10": 0.2299,
"normalized_discounted_cumulative_gain_at_25": 0.2962,
"normalized_discounted_cumulative_gain_at_5": 0.1954,
"precision_at_10": 0.0588,
"precision_at_25": 0.0418,
"precision_at_5": 0.0824
},
"ResponseMetadata": {
"RequestId": "48ac3d87-8481-41ec-8b9e-8725241f0ba0",
"HTTPStatusCode": 200,
"HTTPHeaders": {
"content-type": "application/x-amz-json-1.1",
"date": "Sun, 08 Nov 2020 07:48:20 GMT",
"x-amzn-requestid": "48ac3d87-8481-41ec-8b9e-8725241f0ba0",
"content-length": "404",
"connection": "keep-alive"
},
"RetryAttempts": 0
}
}
###Markdown
In this example we are seeing a slightly elevated precision at 5 items, a little over (4.5% for full, 6.4% for small) this time. Effectively this is probably within the margin of error, but given that no effort was made to mask popularity, it may just be returning super popular results that a large volume of users have interacted with in some way. Personalized ranking metricsNow, retrieve the evaluation metrics for the personalized ranking solution version.
###Code
rerank_solution_metrics_response = personalize.get_solution_metrics(
solutionVersionArn = rerank_solution_version_arn
)
print(json.dumps(rerank_solution_metrics_response, indent=2))
###Output
{
"solutionVersionArn": "arn:aws:personalize:us-east-1:835319576252:solution/personalize-poc-rerank/44c934a6",
"metrics": {
"coverage": 0.0038,
"mean_reciprocal_rank_at_25": 0.0593,
"normalized_discounted_cumulative_gain_at_10": 0.0824,
"normalized_discounted_cumulative_gain_at_25": 0.0958,
"normalized_discounted_cumulative_gain_at_5": 0.0377,
"precision_at_10": 0.0189,
"precision_at_25": 0.0113,
"precision_at_5": 0.0075
},
"ResponseMetadata": {
"RequestId": "d81e90b9-d502-465f-a234-eeec9fa22341",
"HTTPStatusCode": 200,
"HTTPHeaders": {
"content-type": "application/x-amz-json-1.1",
"date": "Sun, 08 Nov 2020 07:48:20 GMT",
"x-amzn-requestid": "d81e90b9-d502-465f-a234-eeec9fa22341",
"content-length": "406",
"connection": "keep-alive"
},
"RetryAttempts": 0
}
}
###Markdown
Just a quick comment on this one, here we see again a precision of near (2.7% for full, 2.2% for small), as this is based on User Personalization, that is to be expected. However the sample items are not the same for validaiton, thus the low scoring. Using evaluation metrics [Back to top](top)It is important to use evaluation metrics carefully. There are a number of factors to keep in mind.* If there is an existing recommendation system in place, this will have influenced the user's interaction history which you use to train your new solutions. This means the evaluation metrics are biased to favor the existing solution. If you work to push the evaluation metrics to match or exceed the existing solution, you may just be pushing the User Personalization to behave like the existing solution and might not end up with something better.* The HRNN Coldstart recipe is difficult to evaluate using the metrics produced by Amazon Personalize. The aim of the recipe is to recommend items which are new to your business. Therefore, these items will not appear in the existing user transaction data which is used to compute the evaluation metrics. As a result, HRNN Coldstart will never appear to perform better than the other recipes, when compared on the evaluation metrics alone. Note: The User Personalization recipe also includes improved cold start functionalityKeeping in mind these factors, the evaluation metrics produced by Personalize are generally useful for two cases:1. Comparing the performance of solution versions trained on the same recipe, but with different values for the hyperparameters and features (impression data etc)1. Comparing the performance of solution versions trained on different recipes (except HRNN Coldstart).Properly evaluating a recommendation system is always best done through A/B testing while measuring actual business outcomes. Since recommendations generated by a system usually influence the user behavior which it is based on, it is better to run small experiments and apply A/B testing for longer periods of time. Over time, the bias from the existing model will fade. Storing useful variables [Back to top](top)Before exiting this notebook, run the following cells to save the version ARNs for use in the next notebook.
###Code
%store userpersonalization_solution_version_arn
%store sims_solution_version_arn
%store rerank_solution_version_arn
%store user_personalization_solution_arn
%store sims_solution_arn
%store rerank_solution_arn
###Output
Stored 'userpersonalization_solution_version_arn' (str)
Stored 'sims_solution_version_arn' (str)
Stored 'rerank_solution_version_arn' (str)
Stored 'user_personalization_solution_arn' (str)
Stored 'sims_solution_arn' (str)
Stored 'rerank_solution_arn' (str)
###Markdown
Release Resources
###Code
%%javascript
Jupyter.notebook.save_checkpoint();
Jupyter.notebook.session.delete();
###Output
_____no_output_____ |
Week 2/.ipynb_checkpoints/Logistic_Regression_with_a_Neural_Network_mindset_v6a-checkpoint.ipynb | ###Markdown
Logistic Regression with a Neural Network mindsetWelcome to your first (required) programming assignment! You will build a logistic regression classifier to recognize cats. This assignment will step you through how to do this with a Neural Network mindset, and so will also hone your intuitions about deep learning.**Instructions:**- Do not use loops (for/while) in your code, unless the instructions explicitly ask you to do so.**You will learn to:**- Build the general architecture of a learning algorithm, including: - Initializing parameters - Calculating the cost function and its gradient - Using an optimization algorithm (gradient descent) - Gather all three functions above into a main model function, in the right order. UpdatesThis notebook has been updated over the past few months. The prior version was named "v5", and the current versionis now named '6a' If you were working on a previous version:* You can find your prior work by looking in the file directory for the older files (named by version name).* To view the file directory, click on the "Coursera" icon in the top left corner of this notebook.* Please copy your work from the older versions to the new version, in order to submit your work for grading. List of Updates* Forward propagation formula, indexing now starts at 1 instead of 0.* Optimization function comment now says "print cost every 100 training iterations" instead of "examples".* Fixed grammar in the comments.* Y_prediction_test variable name is used consistently.* Plot's axis label now says "iterations (hundred)" instead of "iterations".* When testing the model, the test image is normalized by dividing by 255. 1 - Packages First, let's run the cell below to import all the packages that you will need during this assignment. - [numpy](www.numpy.org) is the fundamental package for scientific computing with Python.- [h5py](http://www.h5py.org) is a common package to interact with a dataset that is stored on an H5 file.- [matplotlib](http://matplotlib.org) is a famous library to plot graphs in Python.- [PIL](http://www.pythonware.com/products/pil/) and [scipy](https://www.scipy.org/) are used here to test your model with your own picture at the end.
###Code
import numpy as np
import matplotlib.pyplot as plt
import h5py
import scipy
from PIL import Image
from scipy import ndimage
from lr_utils import load_dataset
%matplotlib inline
###Output
_____no_output_____
###Markdown
2 - Overview of the Problem set **Problem Statement**: You are given a dataset ("data.h5") containing: - a training set of m_train images labeled as cat (y=1) or non-cat (y=0) - a test set of m_test images labeled as cat or non-cat - each image is of shape (num_px, num_px, 3) where 3 is for the 3 channels (RGB). Thus, each image is square (height = num_px) and (width = num_px).You will build a simple image-recognition algorithm that can correctly classify pictures as cat or non-cat.Let's get more familiar with the dataset. Load the data by running the following code.
###Code
# Loading the data (cat/non-cat)
train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()
###Output
_____no_output_____
###Markdown
We added "_orig" at the end of image datasets (train and test) because we are going to preprocess them. After preprocessing, we will end up with train_set_x and test_set_x (the labels train_set_y and test_set_y don't need any preprocessing).Each line of your train_set_x_orig and test_set_x_orig is an array representing an image. You can visualize an example by running the following code. Feel free also to change the `index` value and re-run to see other images.
###Code
# Example of a picture
index = 25
plt.imshow(train_set_x_orig[index])
print ("y = " + str(train_set_y[:, index]) + ", it's a '" + classes[np.squeeze(train_set_y[:, index])].decode("utf-8") + "' picture.")
###Output
y = [1], it's a 'cat' picture.
###Markdown
Many software bugs in deep learning come from having matrix/vector dimensions that don't fit. If you can keep your matrix/vector dimensions straight you will go a long way toward eliminating many bugs. **Exercise:** Find the values for: - m_train (number of training examples) - m_test (number of test examples) - num_px (= height = width of a training image)Remember that `train_set_x_orig` is a numpy-array of shape (m_train, num_px, num_px, 3). For instance, you can access `m_train` by writing `train_set_x_orig.shape[0]`.
###Code
### START CODE HERE ### (≈ 3 lines of code)
m_train = train_set_x_orig.shape[0]
m_test = test_set_x_orig.shape[0]
num_px = train_set_x_orig.shape[1]
### END CODE HERE ###
print ("Number of training examples: m_train = " + str(m_train))
print ("Number of testing examples: m_test = " + str(m_test))
print ("Height/Width of each image: num_px = " + str(num_px))
print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)")
print ("train_set_x shape: " + str(train_set_x_orig.shape))
print ("train_set_y shape: " + str(train_set_y.shape))
print ("test_set_x shape: " + str(test_set_x_orig.shape))
print ("test_set_y shape: " + str(test_set_y.shape))
###Output
Number of training examples: m_train = 209
Number of testing examples: m_test = 50
Height/Width of each image: num_px = 64
Each image is of size: (64, 64, 3)
train_set_x shape: (209, 64, 64, 3)
train_set_y shape: (1, 209)
test_set_x shape: (50, 64, 64, 3)
test_set_y shape: (1, 50)
###Markdown
**Expected Output for m_train, m_test and num_px**: **m_train** 209 **m_test** 50 **num_px** 64 For convenience, you should now reshape images of shape (num_px, num_px, 3) in a numpy-array of shape (num_px $*$ num_px $*$ 3, 1). After this, our training (and test) dataset is a numpy-array where each column represents a flattened image. There should be m_train (respectively m_test) columns.**Exercise:** Reshape the training and test data sets so that images of size (num_px, num_px, 3) are flattened into single vectors of shape (num\_px $*$ num\_px $*$ 3, 1).A trick when you want to flatten a matrix X of shape (a,b,c,d) to a matrix X_flatten of shape (b$*$c$*$d, a) is to use: ```pythonX_flatten = X.reshape(X.shape[0], -1).T X.T is the transpose of X```
###Code
# Reshape the training and test examples
### START CODE HERE ### (≈ 2 lines of code)
train_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0],-1).T
test_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0],-1).T
### END CODE HERE ###
print ("train_set_x_flatten shape: " + str(train_set_x_flatten.shape))
print ("train_set_y shape: " + str(train_set_y.shape))
print ("test_set_x_flatten shape: " + str(test_set_x_flatten.shape))
print ("test_set_y shape: " + str(test_set_y.shape))
print ("sanity check after reshaping: " + str(train_set_x_flatten[0:5,0]))
###Output
train_set_x_flatten shape: (12288, 209)
train_set_y shape: (1, 209)
test_set_x_flatten shape: (12288, 50)
test_set_y shape: (1, 50)
sanity check after reshaping: [17 31 56 22 33]
###Markdown
**Expected Output**: **train_set_x_flatten shape** (12288, 209) **train_set_y shape** (1, 209) **test_set_x_flatten shape** (12288, 50) **test_set_y shape** (1, 50) **sanity check after reshaping** [17 31 56 22 33] To represent color images, the red, green and blue channels (RGB) must be specified for each pixel, and so the pixel value is actually a vector of three numbers ranging from 0 to 255.One common preprocessing step in machine learning is to center and standardize your dataset, meaning that you substract the mean of the whole numpy array from each example, and then divide each example by the standard deviation of the whole numpy array. But for picture datasets, it is simpler and more convenient and works almost as well to just divide every row of the dataset by 255 (the maximum value of a pixel channel). Let's standardize our dataset.
###Code
train_set_x = train_set_x_flatten/255.
test_set_x = test_set_x_flatten/255.
###Output
_____no_output_____
###Markdown
**What you need to remember:**Common steps for pre-processing a new dataset are:- Figure out the dimensions and shapes of the problem (m_train, m_test, num_px, ...)- Reshape the datasets such that each example is now a vector of size (num_px \* num_px \* 3, 1)- "Standardize" the data 3 - General Architecture of the learning algorithm It's time to design a simple algorithm to distinguish cat images from non-cat images.You will build a Logistic Regression, using a Neural Network mindset. The following Figure explains why **Logistic Regression is actually a very simple Neural Network!****Mathematical expression of the algorithm**:For one example $x^{(i)}$:$$z^{(i)} = w^T x^{(i)} + b \tag{1}$$$$\hat{y}^{(i)} = a^{(i)} = sigmoid(z^{(i)})\tag{2}$$ $$ \mathcal{L}(a^{(i)}, y^{(i)}) = - y^{(i)} \log(a^{(i)}) - (1-y^{(i)} ) \log(1-a^{(i)})\tag{3}$$The cost is then computed by summing over all training examples:$$ J = \frac{1}{m} \sum_{i=1}^m \mathcal{L}(a^{(i)}, y^{(i)})\tag{6}$$**Key steps**:In this exercise, you will carry out the following steps: - Initialize the parameters of the model - Learn the parameters for the model by minimizing the cost - Use the learned parameters to make predictions (on the test set) - Analyse the results and conclude 4 - Building the parts of our algorithm The main steps for building a Neural Network are:1. Define the model structure (such as number of input features) 2. Initialize the model's parameters3. Loop: - Calculate current loss (forward propagation) - Calculate current gradient (backward propagation) - Update parameters (gradient descent)You often build 1-3 separately and integrate them into one function we call `model()`. 4.1 - Helper functions**Exercise**: Using your code from "Python Basics", implement `sigmoid()`. As you've seen in the figure above, you need to compute $sigmoid( w^T x + b) = \frac{1}{1 + e^{-(w^T x + b)}}$ to make predictions. Use np.exp().
###Code
# GRADED FUNCTION: sigmoid
def sigmoid(z):
"""
Compute the sigmoid of z
Arguments:
z -- A scalar or numpy array of any size.
Return:
s -- sigmoid(z)
"""
### START CODE HERE ### (≈ 1 line of code)
s = 1/(1+np.exp(z*-1))
### END CODE HERE ###
return s
print ("sigmoid([0, 2]) = " + str(sigmoid(np.array([0,2]))))
###Output
sigmoid([0, 2]) = [ 0.5 0.88079708]
###Markdown
**Expected Output**: **sigmoid([0, 2])** [ 0.5 0.88079708] 4.2 - Initializing parameters**Exercise:** Implement parameter initialization in the cell below. You have to initialize w as a vector of zeros. If you don't know what numpy function to use, look up np.zeros() in the Numpy library's documentation.
###Code
# GRADED FUNCTION: initialize_with_zeros
def initialize_with_zeros(dim):
"""
This function creates a vector of zeros of shape (dim, 1) for w and initializes b to 0.
Argument:
dim -- size of the w vector we want (or number of parameters in this case)
Returns:
w -- initialized vector of shape (dim, 1)
b -- initialized scalar (corresponds to the bias)
"""
### START CODE HERE ### (≈ 1 line of code)
w = np.zeros((dim,1))
b = 0
### END CODE HERE ###
assert(w.shape == (dim, 1))
assert(isinstance(b, float) or isinstance(b, int))
return w, b
dim = 2
w, b = initialize_with_zeros(dim)
print ("w = " + str(w))
print ("b = " + str(b))
###Output
w = [[ 0.]
[ 0.]]
b = 0
###Markdown
**Expected Output**: ** w ** [[ 0.] [ 0.]] ** b ** 0 For image inputs, w will be of shape (num_px $\times$ num_px $\times$ 3, 1). 4.3 - Forward and Backward propagationNow that your parameters are initialized, you can do the "forward" and "backward" propagation steps for learning the parameters.**Exercise:** Implement a function `propagate()` that computes the cost function and its gradient.**Hints**:Forward Propagation:- You get X- You compute $A = \sigma(w^T X + b) = (a^{(1)}, a^{(2)}, ..., a^{(m-1)}, a^{(m)})$- You calculate the cost function: $J = -\frac{1}{m}\sum_{i=1}^{m}y^{(i)}\log(a^{(i)})+(1-y^{(i)})\log(1-a^{(i)})$Here are the two formulas you will be using: $$ \frac{\partial J}{\partial w} = \frac{1}{m}X(A-Y)^T\tag{7}$$$$ \frac{\partial J}{\partial b} = \frac{1}{m} \sum_{i=1}^m (a^{(i)}-y^{(i)})\tag{8}$$
###Code
# GRADED FUNCTION: propagate
def propagate(w, b, X, Y):
"""
Implement the cost function and its gradient for the propagation explained above
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat) of size (1, number of examples)
Return:
cost -- negative log-likelihood cost for logistic regression
dw -- gradient of the loss with respect to w, thus same shape as w
db -- gradient of the loss with respect to b, thus same shape as b
Tips:
- Write your code step by step for the propagation. np.log(), np.dot()
"""
m = X.shape[1]
# FORWARD PROPAGATION (FROM X TO COST)
### START CODE HERE ### (≈ 2 lines of code)
A = sigmoid(np.dot(w.T,X) + b) # compute activation
cost = -1/m*(np.sum(np.multiply(Y,np.log(A))+np.multiply(1-Y,np.log(1-A))))# compute cost
### END CODE HERE ###
# BACKWARD PROPAGATION (TO FIND GRAD)
### START CODE HERE ### (≈ 2 lines of code)
dw = (1/m)*(np.dot(X,(A-Y).T))
db = (1/m)*(np.sum(A-Y))
### END CODE HERE ###
assert(dw.shape == w.shape)
assert(db.dtype == float)
cost = np.squeeze(cost)
assert(cost.shape == ())
grads = {"dw": dw,
"db": db}
return grads, cost
w, b, X, Y = np.array([[1.],[2.]]), 2., np.array([[1.,2.,-1.],[3.,4.,-3.2]]), np.array([[1,0,1]])
grads, cost = propagate(w, b, X, Y)
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
print ("cost = " + str(cost))
###Output
dw = [[ 0.99845601]
[ 2.39507239]]
db = 0.00145557813678
cost = 5.80154531939
###Markdown
**Expected Output**: ** dw ** [[ 0.99845601] [ 2.39507239]] ** db ** 0.00145557813678 ** cost ** 5.801545319394553 4.4 - Optimization- You have initialized your parameters.- You are also able to compute a cost function and its gradient.- Now, you want to update the parameters using gradient descent.**Exercise:** Write down the optimization function. The goal is to learn $w$ and $b$ by minimizing the cost function $J$. For a parameter $\theta$, the update rule is $ \theta = \theta - \alpha \text{ } d\theta$, where $\alpha$ is the learning rate.
###Code
# GRADED FUNCTION: optimize
def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False):
"""
This function optimizes w and b by running a gradient descent algorithm
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of shape (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat), of shape (1, number of examples)
num_iterations -- number of iterations of the optimization loop
learning_rate -- learning rate of the gradient descent update rule
print_cost -- True to print the loss every 100 steps
Returns:
params -- dictionary containing the weights w and bias b
grads -- dictionary containing the gradients of the weights and bias with respect to the cost function
costs -- list of all the costs computed during the optimization, this will be used to plot the learning curve.
Tips:
You basically need to write down two steps and iterate through them:
1) Calculate the cost and the gradient for the current parameters. Use propagate().
2) Update the parameters using gradient descent rule for w and b.
"""
costs = []
for i in range(num_iterations):
# Cost and gradient calculation (≈ 1-4 lines of code)
### START CODE HERE ###
grads,cost = propagate(w, b, X, Y)
### END CODE HERE ###
# Retrieve derivatives from grads
dw = grads["dw"]
db = grads["db"]
# update rule (≈ 2 lines of code)
### START CODE HERE ###
w = w - learning_rate*dw
b = b - learning_rate*db
### END CODE HERE ###
# Record the costs
if i % 100 == 0:
costs.append(cost)
# Print the cost every 100 training iterations
if print_cost and i % 100 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
params = {"w": w,
"b": b}
grads = {"dw": dw,
"db": db}
return params, grads, costs
params, grads, costs = optimize(w, b, X, Y, num_iterations= 100, learning_rate = 0.009, print_cost = False)
print ("w = " + str(params["w"]))
print ("b = " + str(params["b"]))
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
###Output
w = [[ 0.19033591]
[ 0.12259159]]
b = 1.92535983008
dw = [[ 0.67752042]
[ 1.41625495]]
db = 0.219194504541
###Markdown
**Expected Output**: **w** [[ 0.19033591] [ 0.12259159]] **b** 1.92535983008 **dw** [[ 0.67752042] [ 1.41625495]] **db** 0.219194504541 **Exercise:** The previous function will output the learned w and b. We are able to use w and b to predict the labels for a dataset X. Implement the `predict()` function. There are two steps to computing predictions:1. Calculate $\hat{Y} = A = \sigma(w^T X + b)$2. Convert the entries of a into 0 (if activation 0.5), stores the predictions in a vector `Y_prediction`. If you wish, you can use an `if`/`else` statement in a `for` loop (though there is also a way to vectorize this).
###Code
# GRADED FUNCTION: predict
def predict(w, b, X):
'''
Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Returns:
Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X
'''
m = X.shape[1]
Y_prediction = np.zeros((1,m))
w = w.reshape(X.shape[0], 1)
# Compute vector "A" predicting the probabilities of a cat being present in the picture
### START CODE HERE ### (≈ 1 line of code)
A = sigmoid(np.dot(w.T,X)+b)
### END CODE HERE ###
for i in range(A.shape[1]):
# Convert probabilities A[0,i] to actual predictions p[0,i]
### START CODE HERE ### (≈ 4 lines of code)
if A[0,i] <= 0.5:
Y_prediction[0,i] = 0
else:
Y_prediction[0,i] = 1
### END CODE HERE ###
assert(Y_prediction.shape == (1, m))
return Y_prediction
w = np.array([[0.1124579],[0.23106775]])
b = -0.3
X = np.array([[1.,-1.1,-3.2],[1.2,2.,0.1]])
print ("predictions = " + str(predict(w, b, X)))
###Output
predictions = [[ 1. 1. 0.]]
###Markdown
**Expected Output**: **predictions** [[ 1. 1. 0.]] **What to remember:**You've implemented several functions that:- Initialize (w,b)- Optimize the loss iteratively to learn parameters (w,b): - computing the cost and its gradient - updating the parameters using gradient descent- Use the learned (w,b) to predict the labels for a given set of examples 5 - Merge all functions into a model You will now see how the overall model is structured by putting together all the building blocks (functions implemented in the previous parts) together, in the right order.**Exercise:** Implement the model function. Use the following notation: - Y_prediction_test for your predictions on the test set - Y_prediction_train for your predictions on the train set - w, costs, grads for the outputs of optimize()
###Code
# GRADED FUNCTION: model
def model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5, print_cost = False):
"""
Builds the logistic regression model by calling the function you've implemented previously
Arguments:
X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train)
Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train)
X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test)
Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test)
num_iterations -- hyperparameter representing the number of iterations to optimize the parameters
learning_rate -- hyperparameter representing the learning rate used in the update rule of optimize()
print_cost -- Set to true to print the cost every 100 iterations
Returns:
d -- dictionary containing information about the model.
"""
### START CODE HERE ###
# initialize parameters with zeros (≈ 1 line of code)
w, b = initialize_with_zeros(X_train.shape[0])
# Gradient descent (≈ 1 line of code)
parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost = False)
# Retrieve parameters w and b from dictionary "parameters"
w = parameters["w"]
b = parameters["b"]
# Predict test/train set examples (≈ 2 lines of code)
Y_prediction_train = predict(w,b,X_train)
Y_prediction_test = predict(w,b,X_test)
### END CODE HERE ###
# Print train/test Errors
print("train accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))
print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))
d = {"costs": costs,
"Y_prediction_test": Y_prediction_test,
"Y_prediction_train" : Y_prediction_train,
"w" : w,
"b" : b,
"learning_rate" : learning_rate,
"num_iterations": num_iterations}
return d
###Output
_____no_output_____
###Markdown
Run the following cell to train your model.
###Code
d = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 2000, learning_rate = 0.005, print_cost = True)
###Output
train accuracy: 99.04306220095694 %
test accuracy: 70.0 %
###Markdown
**Expected Output**: **Cost after iteration 0 ** 0.693147 $\vdots$ $\vdots$ **Train Accuracy** 99.04306220095694 % **Test Accuracy** 70.0 % **Comment**: Training accuracy is close to 100%. This is a good sanity check: your model is working and has high enough capacity to fit the training data. Test accuracy is 68%. It is actually not bad for this simple model, given the small dataset we used and that logistic regression is a linear classifier. But no worries, you'll build an even better classifier next week!Also, you see that the model is clearly overfitting the training data. Later in this specialization you will learn how to reduce overfitting, for example by using regularization. Using the code below (and changing the `index` variable) you can look at predictions on pictures of the test set.
###Code
# Example of a picture that was wrongly classified.
index = 1
plt.imshow(test_set_x[:,index].reshape((num_px, num_px, 3)))
print ("y = " + str(test_set_y[0,index]) + ", you predicted that it is a \"" + classes[d["Y_prediction_test"][0,index]].decode("utf-8") + "\" picture.")
###Output
y = 1, you predicted that it is a "cat" picture.
###Markdown
Let's also plot the cost function and the gradients.
###Code
# Plot learning curve (with costs)
costs = np.squeeze(d['costs'])
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(d["learning_rate"]))
plt.show()
###Output
_____no_output_____
###Markdown
**Interpretation**:You can see the cost decreasing. It shows that the parameters are being learned. However, you see that you could train the model even more on the training set. Try to increase the number of iterations in the cell above and rerun the cells. You might see that the training set accuracy goes up, but the test set accuracy goes down. This is called overfitting. 6 - Further analysis (optional/ungraded exercise) Congratulations on building your first image classification model. Let's analyze it further, and examine possible choices for the learning rate $\alpha$. Choice of learning rate **Reminder**:In order for Gradient Descent to work you must choose the learning rate wisely. The learning rate $\alpha$ determines how rapidly we update the parameters. If the learning rate is too large we may "overshoot" the optimal value. Similarly, if it is too small we will need too many iterations to converge to the best values. That's why it is crucial to use a well-tuned learning rate.Let's compare the learning curve of our model with several choices of learning rates. Run the cell below. This should take about 1 minute. Feel free also to try different values than the three we have initialized the `learning_rates` variable to contain, and see what happens.
###Code
learning_rates = [0.01, 0.001, 0.0001]
models = {}
for i in learning_rates:
print ("learning rate is: " + str(i))
models[str(i)] = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 1500, learning_rate = i, print_cost = False)
print ('\n' + "-------------------------------------------------------" + '\n')
for i in learning_rates:
plt.plot(np.squeeze(models[str(i)]["costs"]), label= str(models[str(i)]["learning_rate"]))
plt.ylabel('cost')
plt.xlabel('iterations (hundreds)')
legend = plt.legend(loc='upper center', shadow=True)
frame = legend.get_frame()
frame.set_facecolor('0.90')
plt.show()
###Output
learning rate is: 0.01
train accuracy: 99.52153110047847 %
test accuracy: 68.0 %
-------------------------------------------------------
learning rate is: 0.001
train accuracy: 88.99521531100478 %
test accuracy: 64.0 %
-------------------------------------------------------
learning rate is: 0.0001
train accuracy: 68.42105263157895 %
test accuracy: 36.0 %
-------------------------------------------------------
###Markdown
**Interpretation**: - Different learning rates give different costs and thus different predictions results.- If the learning rate is too large (0.01), the cost may oscillate up and down. It may even diverge (though in this example, using 0.01 still eventually ends up at a good value for the cost). - A lower cost doesn't mean a better model. You have to check if there is possibly overfitting. It happens when the training accuracy is a lot higher than the test accuracy.- In deep learning, we usually recommend that you: - Choose the learning rate that better minimizes the cost function. - If your model overfits, use other techniques to reduce overfitting. (We'll talk about this in later videos.) 7 - Test with your own image (optional/ungraded exercise) Congratulations on finishing this assignment. You can use your own image and see the output of your model. To do that: 1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub. 2. Add your image to this Jupyter Notebook's directory, in the "images" folder 3. Change your image's name in the following code 4. Run the code and check if the algorithm is right (1 = cat, 0 = non-cat)!
###Code
## START CODE HERE ## (PUT YOUR IMAGE NAME)
my_image = "my_image.jpg" # change this to the name of your image file
## END CODE HERE ##
# We preprocess the image to fit your algorithm.
fname = "images/" + my_image
image = np.array(ndimage.imread(fname, flatten=False))
image = image/255.
my_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((1, num_px*num_px*3)).T
my_predicted_image = predict(d["w"], d["b"], my_image)
plt.imshow(image)
print("y = " + str(np.squeeze(my_predicted_image)) + ", your algorithm predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") + "\" picture.")
###Output
_____no_output_____ |
floodzone_pfirm_analysis.ipynb | ###Markdown
New York City's Flood Zone Exploratory Data AnalysisAuthor: Mark Bauer 1. IntroductionI've wanted to do this project for some time now, and I'm finally happy to share this open source project. This notebook demonstrates how to analyze FEMA's Preliminary Flood Insurance Rate Map (i.e. PFIRM), sometimes known as '*flood zone*.' The flood zone is for New York City (all five boroughs). Let's see what cool things we can discover about this dataset! Resources Before Getting Started NYC's Preliminary Flood Insurance Rate Map (PFIRM) data can be downloaded here: http://www.region2coastal.com/view-flood-maps-data/view-preliminary-flood-map-data/*Figure 1. Screenshot of website of the pfirm data* *Figure 2. Screenshot of data section for New York City* About the data: >The Digital Flood Insurance Rate Map (DFIRM) Database depicts flood risk information and supporting data used to develop the risk data. The primary risk classifications used are the 1-percent-annual-chance flood event, the 0.2-percent-annual-chance flood event, and areas of minimal flood risk. The DFIRM Database is derived from Flood Insurance Studies (FISs), previously published Flood Insurance Rate Maps (FIRMs), flood hazard analyses performed in support of the FISs and FIRMs, and new mapping data, where available. The FISs and FIRMs are published by the Federal Emergency Management Agency (FEMA). The file is georeferenced to earth's surface using the State Plane projection and coordinate system. The specifications for the horizontal control of DFIRM data files are consistent with those required for mapping at a scale of 1:12,000. General identification information about this dataset>Originator: Federal Emergency Management Agency Publication_Date: 20150130 Title: DIGITAL FLOOD INSURANCE RATE MAP DATABASE, CITY OF NEW YORK, NEW YORK Geospatial_Data_Presentation_Form: FEMA-DFIRM-Preliminary Publication_Information:>Publication_Place: Washington, DC Publisher: Federal Emergency Management Agency Online_Linkage: https://msc.fema.gov To read the full Preliminary Flood Insurance Study mentioned above: https://msc.fema.gov/portal/downloadProduct?productID=360497V000B Unfortunately, there is no data dictionary provided in the download. To learn more about each column, the [metadata](https://github.com/mebauer/nyc-floodzone-analysis/blob/master/pfirm_nyc/360497_PRELIM_metadata.txt) points to **Guidelines and Specifications for Flood Hazard Mapping Partners: Appendix L: Guidance for Preparing Draft Digital Data and DFIRM Database.** You can find this resource at the Homeland Security Digital Library located here: https://www.hsdl.org/?abstract&did=13285. I've also uploaded this resource for you with the title [data_dictionary.pdf](https://github.com/mebauer/nyc-floodzone-analysis/blob/master/data_dictionary.pdf). *Figure 3. Screenshot of data dictionary webpage* Libraries
###Code
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import geopandas as gpd
import urllib.request
from zipfile import ZipFile
plt.rcParams['savefig.facecolor'] = 'white'
%matplotlib inline
print('printing packages and versions:\n')
%reload_ext watermark
%watermark -v -p numpy,pandas,seaborn,matplotlib.pyplot,geopandas
###Output
printing packages and versions:
CPython 3.7.1
IPython 7.20.0
numpy 1.19.2
pandas 1.2.1
seaborn 0.11.1
matplotlib.pyplot 3.3.2
geopandas 0.8.1
###Markdown
*Figure 4. Screenshot of data path* ***Uncomment the code below to download the files from the web.***
###Code
# # url path from the web
# url = 'https://msc.fema.gov/portal/downloadProduct?productID=360497_PRELIMDB'
# print('data url path:', url)
# # assigning file name as 'pfirm_nyc.zip'
# urllib.request.urlretrieve(url, 'pfirm_nyc.zip')
# listing files in our directory
print('listing the new downloaded file in our directory:')
%ls
###Output
listing the new downloaded file in our directory:
CODE_OF_CONDUCT.md floodzone_pfirm_analysis.ipynb
CONTRIBUTING.md [34mimgs[m[m/
LICENSE [34mpfirm_nyc[m[m/
README.md pfirm_nyc.zip
data_dictionary.pdf
###Markdown
***Uncomment the code below to unzip and extract the files from the zip file.***
###Code
# path = 'pfirm_nyc.zip'
# print('name of zip file:', path)
# # opening zip using 'with' keyword in read mode
# with zipfile.ZipFile(path, 'r') as file:
# # extracing all items in our zipfile
# # naming our file 'pfirm_nyc'
# file.extractall('pfirm_nyc')
print('listing items after unzipping the file:\n')
%ls pfirm_nyc/
###Output
listing items after unzipping the file:
360497_PRELIM_metadata.txt s_fld_haz_ar.shx
360497_PRELIM_metadata.xml s_fld_haz_ln.dbf
L_PAN_REVIS.dbf s_fld_haz_ln.shp
L_POL_FHBM.dbf s_fld_haz_ln.shx
S_BASE_INDEX.prj s_gen_struct.dbf
S_BFE.prj s_gen_struct.shp
S_CBRS.prj s_gen_struct.shx
S_CST_TSCT_LN.dbf s_label_ld.dbf
S_CST_TSCT_LN.prj s_label_ld.prj
S_CST_TSCT_LN.shp s_label_ld.sbn
S_CST_TSCT_LN.shx s_label_ld.sbx
S_FIRM_PAN.prj s_label_ld.shp
S_FLD_HAZ_AR.prj s_label_ld.shx
S_FLD_HAZ_LN.prj s_label_pt.dbf
S_GEN_STRUCT.prj s_label_pt.prj
S_LiMWA.dbf s_label_pt.sbn
S_LiMWA.prj s_label_pt.sbx
S_LiMWA.shp s_label_pt.shp
S_LiMWA.shx s_label_pt.shx
S_PERM_BMK.prj s_perm_bmk.dbf
S_POL_AR.prj s_perm_bmk.shp
S_POL_LN.prj s_perm_bmk.shx
S_QUAD_INDEX.prj s_pol_ar.dbf
S_WTR_AR.prj s_pol_ar.shp
S_WTR_LN.prj s_pol_ar.shx
S_XS.prj s_pol_ln.dbf
l_comm_info.dbf s_pol_ln.shp
l_stn_start.dbf s_pol_ln.shx
s_base_index.dbf s_quad_index.dbf
s_base_index.shp s_quad_index.shp
s_base_index.shx s_quad_index.shx
s_bfe.dbf s_wtr_ar.dbf
s_bfe.shp s_wtr_ar.shp
s_bfe.shx s_wtr_ar.shx
s_cbrs.dbf s_wtr_ln.dbf
s_cbrs.shp s_wtr_ln.shp
s_cbrs.shx s_wtr_ln.shx
s_firm_pan.dbf s_xs.dbf
s_firm_pan.shp s_xs.shp
s_firm_pan.shx s_xs.shx
s_fld_haz_ar.dbf study_info.dbf
s_fld_haz_ar.shp
###Markdown
2. The filesFor this analysis, we are interested in the special flood hazard area shapefile - `s_fld_haz_ar.shp`. This contains information about the flood zone. A shapefile is geospatial vector data for geographic information system software and stores geometric location and associated attribute information. Data description from metadata:>Entity_Type_Label: s_fld_haz_ar Entity_Type_Definition: Location and attributes for flood insurance risk zones on the DFIRM.For more information about our columns and their descriptions, please find the [metadata files](https://github.com/mebauer/nyc-floodzone-analysis/blob/master/pfirm_nyc/360497_PRELIM_metadata.txt) located in the pfirm_nyc folder. 2.1 Inspecting the data
###Code
# reading in shape file
path = 'pfirm_nyc/s_fld_haz_ar.shp'
pfirm_df = gpd.read_file(path)
# previewing first five rorws of data
pfirm_df.head()
# previewing last five rorws of data
pfirm_df.tail()
print('dataframe shape:\n')
rows = pfirm_df.shape[0]
columns = pfirm_df.shape[1]
print('number of rows: {:,}.\nnumber of columns: {}.'.format(rows, columns))
###Output
dataframe shape:
number of rows: 3,985.
number of columns: 15.
###Markdown
In this dataset, each row is a flood zone geometry. The type of flood zone can be inspected under the `FLD_ZONE` column. Additionally, there are 15 columns that contain information about each flood zone row.
###Code
print('type of python object:\n\n{}'.format(type(pfirm_df)))
###Output
type of python object:
<class 'geopandas.geodataframe.GeoDataFrame'>
###Markdown
We read the shapefile into Python as a GeoDataFrame. This type is similar to a pandas dataframe, but incudes additional spatial information - mainly, our `geometry` column. The geometry column contains polygons that represent the physical space of each flood zone.
###Code
print('quickly inspecting our map.')
pfirm_df.plot()
# summary of the data
pfirm_df.info()
###Output
<class 'geopandas.geodataframe.GeoDataFrame'>
RangeIndex: 3985 entries, 0 to 3984
Data columns (total 15 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 FLD_AR_ID 3985 non-null object
1 FLD_ZONE 3985 non-null object
2 FLOODWAY 47 non-null object
3 SFHA_TF 3985 non-null object
4 STATIC_BFE 3985 non-null float64
5 V_DATUM 1153 non-null object
6 DEPTH 3985 non-null float64
7 LEN_UNIT 1165 non-null object
8 VELOCITY 3985 non-null float64
9 VEL_UNIT 0 non-null object
10 AR_REVERT 0 non-null object
11 BFE_REVERT 3985 non-null float64
12 DEP_REVERT 3985 non-null float64
13 SOURCE_CIT 3985 non-null object
14 geometry 3985 non-null geometry
dtypes: float64(5), geometry(1), object(9)
memory usage: 467.1+ KB
###Markdown
We notice that there are two columns that are completely null - `VEL_UNIT` and `AR_REVERT`. We may be able to safely drop them in the future.
###Code
print('count of data types:')
pfirm_df.dtypes.value_counts()
print('summary statistics for numeric columns:')
pfirm_df.describe()
###Output
summary statistics for numeric columns:
###Markdown
There seems to be weird values of -9999 in the numeric columns. Possibly placeholders for null values. We will inspect these values later.
###Code
print('summary statistics for string/object columns:')
pfirm_df.describe(include='object')
print('summary statistics for all column types:')
pfirm_df.drop(columns='geometry').describe(include='all').T
s1 = pfirm_df.isnull().sum().sort_values(ascending=False)
s2 = round(pfirm_df.isnull().sum().sort_values(ascending=False) / len(pfirm_df), 3)
print('null statistics of each column:')
pd.concat([s1.rename('count_null'), s2.rename('normalized')], axis=1)
print('total number of nulls in data: {:,}.'.format(pfirm_df.isnull().sum().sum()))
###Output
total number of nulls in data: 17,560.
###Markdown
Now that we have a better understanding of our data, let's come back to those 100% null value columns and see if we can safely drop them 2.2 Dropping unnecessary columns Definitions from our data dictionary:VEL_UNIT>Velocity Unit Lookup Identification. A code that provides a link to a valid unit of velocity from the D_Vel_Units table. This unit indicates the measurement system for the velocity of the flood hazard area. The value is shown in the legend where alluvial fans are present. This field is only populated if the VELOCITY field is populated.AR_REVERT>If the area is Zone AR, this field would hold the zone that the area would revert to if the AR zone were removed. This field is only populated if the corresponding area is Zone AR. Acceptable values for this field are listed in the D_Zone table.
###Code
# previewing columns that are 100% null
pfirm_df[['VEL_UNIT', 'AR_REVERT']].head()
# inspecting items within these columns
for col in ['VEL_UNIT', 'AR_REVERT']:
print(pfirm_df[col].value_counts())
###Output
Series([], Name: VEL_UNIT, dtype: int64)
Series([], Name: AR_REVERT, dtype: int64)
###Markdown
There are no values besides `None` in these columns, and we can safely drop these.
###Code
print('number of columns: {}'.format(pfirm_df.shape[1]))
# dropping 100% null columns
pfirm_df = pfirm_df.drop(columns=['VEL_UNIT', 'AR_REVERT'])
print('new number of columns: {}\n'.format(pfirm_df.shape[1]))
pfirm_df.head()
# let's preview our null counts after droppping those columns
s1 = pfirm_df.isnull().sum().sort_values(ascending=False)
s2 = round(pfirm_df.isnull().sum().sort_values(ascending=False) / len(pfirm_df), 2)
print('null statistics:')
pd.concat([s1.rename('count_null'), s2.rename('normalized')], axis=1)
print('reviewing items in FLOODWAY column that is almost 100% null')
pfirm_df['FLOODWAY'].value_counts(dropna=False)
###Output
reviewing items in FLOODWAY column that is almost 100% null
###Markdown
Although `FLOODWAY` is 99% null, it still has non-null values. Let's keep the column.
###Code
# plotting histograms of numeric columns
pfirm_df.hist(figsize=(12,12))
plt.tight_layout()
###Output
_____no_output_____
###Markdown
There seems to be some outliers in these columns. Let's inspect them.
###Code
print("previewing items within each column. hopefully we'll \
find columns that are\n\
not 100% null but are still not useful for this analysis.\n")
for col in pfirm_df.columns:
# geometry type does not have a .value_counts() method
if col == 'geometry':
continue
print(col, 'value counts:')
print(pfirm_df[col].value_counts(dropna=False, normalize=True).head())
print()
###Output
previewing items within each column. hopefully we'll find columns that are
not 100% null but are still not useful for this analysis.
FLD_AR_ID value counts:
1200 0.000251
2221 0.000251
1976 0.000251
3194 0.000251
1957 0.000251
Name: FLD_AR_ID, dtype: float64
FLD_ZONE value counts:
0.2 PCT ANNUAL CHANCE FLOOD HAZARD 0.433124
AE 0.318444
X 0.132748
VE 0.105395
A 0.005270
Name: FLD_ZONE, dtype: float64
FLOODWAY value counts:
NaN 0.988206
FLOODWAY 0.011794
Name: FLOODWAY, dtype: float64
SFHA_TF value counts:
F 0.56788
T 0.43212
Name: SFHA_TF, dtype: float64
STATIC_BFE value counts:
-9999.0 0.710665
13.0 0.055458
14.0 0.038896
11.0 0.036888
12.0 0.036136
Name: STATIC_BFE, dtype: float64
V_DATUM value counts:
NaN 0.710665
NAVD88 0.289335
Name: V_DATUM, dtype: float64
DEPTH value counts:
-9999.0 0.996989
3.0 0.001757
1.0 0.000753
2.0 0.000502
Name: DEPTH, dtype: float64
LEN_UNIT value counts:
NaN 0.707654
FEET 0.292346
Name: LEN_UNIT, dtype: float64
VELOCITY value counts:
-9999.0 1.0
Name: VELOCITY, dtype: float64
BFE_REVERT value counts:
-9999.0 1.0
Name: BFE_REVERT, dtype: float64
DEP_REVERT value counts:
-9999.0 1.0
Name: DEP_REVERT, dtype: float64
SOURCE_CIT value counts:
STUDY4 0.737014
STUDY3 0.257716
STUDY5 0.005270
Name: SOURCE_CIT, dtype: float64
###Markdown
Reviewing possible columns to drop. After reviewing items within each column, there are a few columns that have only one value as well as values that look off (e.g. -9999.0.). Let's continue to inspect these columns.
###Code
pfirm_df.iloc[:, -5:-1].head()
# summary statistics of possible columns to drop
pfirm_df.iloc[:, -5:-1].describe()
###Output
_____no_output_____
###Markdown
these columns don't appear to be useful in this analysis, so I'll drop them.
###Code
print('number of initial columns: {}'.format(pfirm_df.shape[1]))
col_drop = pfirm_df.iloc[:, -5:-1].columns
print('columns to drop:', col_drop.to_list())
pfirm_df = pfirm_df.drop(columns=col_drop)
print('number of new columns: {}'.format(pfirm_df.shape[1]))
pfirm_df.head()
###Output
number of initial columns: 13
columns to drop: ['VELOCITY', 'BFE_REVERT', 'DEP_REVERT', 'SOURCE_CIT']
number of new columns: 9
###Markdown
Let's continue inspecting our data
###Code
# inspecting our data
pfirm_df.info()
s1 = pfirm_df.isnull().sum().sort_values(ascending=False)
s2 = round(pfirm_df.isnull().sum().sort_values(ascending=False) / len(pfirm_df), 2)
print('null statistics:')
pd.concat([s1.rename('count_null'), s2.rename('normalized')], axis=1)
###Output
null statistics:
###Markdown
We've inspecting most of our columns, and even dropped some that didn't have any values stored in them. We did see wierd values such as -9999, but for now, let's turn our attention to our geometry column. 2.3 Inspecting our geometry column Lastly, let's inspect our geometry column, which is a type provided by the shapefile (.shp) format.
###Code
print('reviewing geometry column:')
pfirm_df[['geometry']].head()
pfirm_df[['geometry']].info()
print("reviewing the geometry's coordinate referance system (CRS).")
pfirm_df.crs
###Output
reviewing the geometry's coordinate referance system (CRS).
###Markdown
From the `.crs` attribute above, our geometry's CRS is *NAD83 / New York Long Island (ftUS)* - code 2263.Our units are in feet.
###Code
# looking at the values in the V_DATUM column
pfirm_df['V_DATUM'].value_counts()
###Output
_____no_output_____
###Markdown
We could potentially drop this column and just record the vertical datum on the page, but this does provide useful information. Let's keep it.
###Code
v_datum = pfirm_df['V_DATUM'].value_counts().index[0]
print('the vertical datum for this geometry is: {}.'.format(v_datum))
###Output
the vertical datum for this geometry is: NAVD88.
###Markdown
There's a lot of useful information stored in our geometry column, but for now, let's continue and inspect our flood zone values. 3. Inspecting flood zones and static base flood elevation values Definitions from our [data dictionary](https://github.com/mebauer/nyc-floodzone-analysis/blob/master/data_dictionary.pdf):FLD_ZONE>Flood Zone Lookup Identification. This is a code that provides a link to a valid entry from the D_Zone table. This is the flood zone label/abbreviation for the area.STATIC_BFE>Static Base Flood Elevation. For areas of constant Base Flood Elevation (BFE), the BFE value is shown beneath the zone label rather than on a BFE line. In this situation the same BFE applies to the entire polygon. This is normally occurs in lakes or coastal zones. This field is only populated where a static BFE is shown on the FIRM.DEPTH>Depth Value for Zone AO Areas. This is shown beneath the zone label on the FIRM. This field is only populated if a depth is shown on the FIRM.SFHA_TF>Special Flood Hazard Area. If the area is within SFHA this field would be True. This field will be true for any area that is coded for any A or V zone flood areas. It should be false for any X or D zone flood areas. Enter “T” for true or “F” for false. Brief definition of FEMA's flood zones:>Flood hazard areas identified on the Flood Insurance Rate Map are identified as a Special Flood Hazard Area (SFHA). SFHA are defined as the area that will be inundated by the flood event having a 1-percent chance of being equaled or exceeded in any given year. The 1-percent annual chance flood is also referred to as the base flood or 100-year flood. SFHAs are labeled as Zone A, Zone AO, Zone AH, Zones A1-A30, Zone AE, Zone A99, Zone AR, Zone AR/AE, Zone AR/AO, Zone AR/A1-A30, Zone AR/A, Zone V, Zone VE, and Zones V1-V30. Moderate flood hazard areas, labeled Zone B or Zone X (shaded) are also shown on the FIRM, and are the areas between the limits of the base flood and the 0.2-percent-annual-chance (or 500-year) flood. The areas of minimal flood hazard, which are the areas outside the SFHA and higher than the elevation of the 0.2-percent-annual-chance flood, are labeled Zone C or Zone X (unshaded). Source: https://www.fema.gov/glossary/flood-zones
###Code
print('reviewing items in the FLD_ZONE column:')
pfirm_df['FLD_ZONE'].value_counts()
zone_vals = pfirm_df['FLD_ZONE'].unique()
print('the unique values in our flood zone column are:\n\n{}'.format(zone_vals))
###Output
the unique values in our flood zone column are:
['AE' 'X' '0.2 PCT ANNUAL CHANCE FLOOD HAZARD' 'VE' 'OPEN WATER' 'AO' 'A']
###Markdown
From a quick glance, we can probably drop the `OPEN WATER` value, as this is probably only the outline of a water boundary. Additionally, zone 'X' doesn't tell us any information about flood zones.
###Code
zone_checks = ['OPEN WATER', 'X']
fig, axs = plt.subplots(2, 1, figsize=(10,10))
for zone, ax in zip(zone_checks, axs):
pfirm_df[pfirm_df['FLD_ZONE'].isin([zone])].plot(ax=ax)
ax.set_title('FLD_ZONE = {}.'.format(zone))
###Output
_____no_output_____
###Markdown
Let's look at some summary statistics.
###Code
print('reviewing summary statistics for base flood elevations by flood zone:')
pfirm_df.groupby(by=['FLD_ZONE'])['STATIC_BFE'].describe()
# zones with base flood elevations
bfe_df = pfirm_df.groupby(by=['FLD_ZONE'])['STATIC_BFE'].describe()
bfe_df[bfe_df['max'] > 0]
zones = bfe_df[bfe_df['max'] > 0].index.to_list()
print('zones {} are the only zones that have static base flood elevations.'.format(zones))
###Output
zones ['AE', 'VE'] are the only zones that have static base flood elevations.
###Markdown
The null placeholder of -9999 will be problematic in the future. We will want to replace these with nulls.
###Code
print('reviewing summary statistics for depth elevations by flood zone:')
pfirm_df.groupby(by=['FLD_ZONE'])['DEPTH'].describe()
# zones with base flood elevations
depths_df = pfirm_df.groupby(by=['FLD_ZONE'])['DEPTH'].describe()
depths_df[depths_df['max'] > 0]
depths = depths_df[depths_df['max'] > 0].index.to_list()
print('zone {} is the only zone that has a depth elevation.'.format(depths))
sfha_df = pfirm_df.groupby(by=['SFHA_TF', 'FLD_ZONE'])['STATIC_BFE'].describe()
sfha_df.sort_values(by='SFHA_TF', ascending=False)
###Output
_____no_output_____
###Markdown
Although zones A & AO don't have base flood elevations, they are inside the special flood hazard area (SFHA). Thus, we can not drop them. `T` stands for **True** and `F` stands for **False**. 3.1 Replacing -9999 values with null It seems to be that this dataset represents null values for certain numeric columns as -9999 (refer to screenshot below). Let's see what the values are if we exclude these values. *Figure 5. Screenshot of null description found in data dictionary*
###Code
pfirm_df.loc[pfirm_df['STATIC_BFE'] > -9999][['STATIC_BFE']].describe()
###Output
_____no_output_____
###Markdown
Those values look much better and are probably the real statistics of our static base flood elevation column. For this analysis, we can safely replace the -9999.0 values with nulls. Let's first track our replacement process.
###Code
null_length = len(pfirm_df.loc[pfirm_df['STATIC_BFE'] <= -9999])
print('there are {:,} values with null placeholders of -9999 for STATIC_BFE that \
need to be replaced with nulls.'.format(null_length))
s1 = pfirm_df.isnull().sum().sort_values(ascending=False)
s2 = round(pfirm_df.isnull().sum().sort_values(ascending=False) / len(pfirm_df), 3)
print('null statistics:')
print('total nulls in dataframe: {:,}.'.format(s1.sum()))
before_replace_df = pd.concat([s1.rename('count_null'), s2.rename('normalized')], axis=1)
before_replace_df.head(len(before_replace_df))
print('replacing incorrectly inserted values (e.g. -9999) with nans.')
for col in ['STATIC_BFE', 'DEPTH']:
# if our column value is less than -1, replace with nan, else keep column value
pfirm_df[col] = np.where(pfirm_df[col] < -1, np.nan, pfirm_df[col])
pfirm_df.describe()
s1 = pfirm_df.isnull().sum().sort_values(ascending=False)
s2 = round(pfirm_df.isnull().sum().sort_values(ascending=False) / len(pfirm_df), 3)
print('null statistics:')
print('total nulls in dataframe: {:,}.'.format(s1.sum()))
after_replace_df = pd.concat([s1.rename('count_null'), s2.rename('normalized')], axis=1)
after_replace_df.head(len(after_replace_df))
pd.concat([before_replace_df,
after_replace_df.rename(columns={'count_null':'count_after',
'normalized':'normalized_after'})], axis=1)
###Output
_____no_output_____
###Markdown
You can see that both columns static_bfe and depth have a significant amount of null values after replacing -9999 with null. 3.2 Reviewing summary statistics by flood zone after filling in nulls
###Code
print('reviewing summary statistics for static base flood elevations by flood zone:')
pfirm_df.groupby(by=['FLD_ZONE'])['STATIC_BFE'].describe()
sfha_df = pfirm_df.groupby(by=['SFHA_TF', 'FLD_ZONE'])['STATIC_BFE'].describe()
sfha_df.sort_values(by='SFHA_TF', ascending=False)
###Output
_____no_output_____
###Markdown
Performing summary statistics of static base flood elevations grouped by Special Flood Hazard Area (True/False) and Flood Zone. Zones AE and VE are the only ones with elevation values, and the remaining zones have null values.
###Code
print('distribution of numeric columns. this is perhaps the true distribution of these columns:')
hist = pfirm_df.hist(figsize=(12,6), bins=25)
for ax in hist.flatten():
ax.set_xlabel("ft.", fontsize=12)
ax.set_ylabel("Count", fontsize=12)
plt.tight_layout()
print('inspecting the distribution of numeric columns with box plots:')
fig, axs = plt.subplots(2, 1, figsize=(10,12))
for ax, col in zip(axs.flat, ['STATIC_BFE', 'DEPTH']):
sns.boxplot(x=col,
y="FLD_ZONE",
data=pfirm_df,
ax=ax)
ax.set_xlabel(col + ' (ft.)')
###Output
inspecting the distribution of numeric columns with box plots:
###Markdown
Flood zones VE and AE are the only zones that have a static base flood elevation (BFE) value. Flood zone AO is the only zone with a depth elevation, although these depths are very small. 3.2 Dropping unnecessary flood zone values There are two flood zones in particular, X and OPEN WATER, that may be good candidates to drop. We don't want to drop any flood zones in the special flood hazard area (AE, VE, A, and AO), and we also don't want to drop the 0.2 % annual chance flood zone (although outside of the special flood hazard area, this information is still quite useful). Let's see if we can safely drop the previously mentioned values safely.
###Code
# number of records in each flood zone
pfirm_df['FLD_ZONE'].value_counts()
# preview the dataframe where flood zones are open water and x
pfirm_df.loc[pfirm_df['FLD_ZONE'].isin(['OPEN WATER', 'X'])].head()
# summary statistics where flood zones are open water and x
pfirm_df.loc[pfirm_df['FLD_ZONE'].isin(['OPEN WATER', 'X'])].iloc[:, :-1].describe(include='all')
# preview count of records grouped by flood zones are open water and x
d1 = pfirm_df.loc[pfirm_df['FLD_ZONE'].isin(['OPEN WATER', 'X'])]
d1.groupby(by=['FLD_ZONE', 'SFHA_TF']).count()
print('number of records in total data frame: {:,}'.format(pfirm_df.shape[0]))
length = pfirm_df.loc[pfirm_df['FLD_ZONE'].isin(['OPEN WATER', 'X'])].shape[0]
print('number of records to drop where flood zone is open water and x: {:,}'.format(length))
pfirm_df = pfirm_df.loc[~pfirm_df['FLD_ZONE'].isin(['OPEN WATER', 'X'])]
print('number of records in dataframe after dropping open water and x zones: {:,}'.format(pfirm_df.shape[0]))
# our dropped flood zones are no longer included in dataframe
pfirm_df.groupby(by=['SFHA_TF', 'FLD_ZONE']).count()
# number of records in each flood zone
pfirm_df['FLD_ZONE'].value_counts()
# a quick preview of our map after dropping open water and x flood zones
pfirm_df.plot()
# summary statistics of data
pfirm_df.iloc[:, :-1].describe(include='all')
# summary of data
pfirm_df.info()
fig, ax = plt.subplots(figsize=(10, 6))
sns.histplot(pfirm_df,
x='STATIC_BFE',
hue='FLD_ZONE',
multiple="stack")
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Distribution of static base flood elevations (ft.) by flood zone.
###Code
fig, ax = plt.subplots(figsize=(10, 6))
sns.histplot(pfirm_df,
x='STATIC_BFE',
hue='FLD_ZONE',
multiple="stack",
element='step')
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Distribution of static base flood elevations (ft.) by flood zone where element = step.
###Code
# plotting static base flood elevations distribution by flood zone per plot
zones = pfirm_df['FLD_ZONE'].unique()
for zone in zones:
plt.figure()
plot = pfirm_df.loc[pfirm_df['FLD_ZONE'] == zone]
sns.histplot(plot, x='STATIC_BFE')
plt.title('FLD_ZONE = {}'.format(zone))
# previewing count of flood zones by within special flood hazard area
pfirm_df['SFHA_TF'].value_counts()
# summary statistics of static base flood elevations by special flood hazard area
pfirm_df.groupby(by=['SFHA_TF'])['STATIC_BFE'].describe()
###Output
_____no_output_____
###Markdown
4. Inspect the geometry of the data 4.1 Mapping the flood zone *Figure 6. Screenshot of borough boundaries page on nyc open data*Website link: https://data.cityofnewyork.us/City-Government/Borough-Boundaries/tqmj-j8zm
###Code
# importing borough boundaries for better aesthetics
path = 'https://data.cityofnewyork.us/api/geospatial/tqmj-j8zm?method=export&format=Shapefile'
borough_gdf = gpd.read_file(path)
borough_gdf.head()
# quick plot of boroughs
borough_gdf.plot()
plt.title('nyc boroughs')
# converting to the local crs of our floodzone dataframe
borough_gdf = borough_gdf.to_crs(epsg=2263)
print(borough_gdf.crs)
borough_gdf.plot()
plt.title('nyc boroughs')
fig, ax = plt.subplots(figsize=(10, 10))
pfirm_df.plot(ax=ax)
borough_gdf.plot(ax=ax,
facecolor='none',
edgecolor='black',
zorder=1)
plt.title('Preliminary Flood Insurance Rate Map of New York City', fontsize=15)
# preview active crs
pfirm_df.crs
# adding a column for our geometry converted to WGS84
pfirm_df['wgs84'] = pfirm_df.to_crs(epsg=4326)['geometry']
pfirm_df.head()
# make sure 2263 is still our active geometry
pfirm_df.crs
fig, ax = plt.subplots(figsize=(10, 10))
# plotting our map in WGS84
pfirm_df.set_geometry('wgs84').plot(ax=ax)
borough_gdf.to_crs(epsg=4326).plot(ax=ax,
facecolor='none',
edgecolor='black',
zorder=1)
plt.title('Preliminary Flood Insurance Rate Map of New York City', fontsize=15)
###Output
_____no_output_____
###Markdown
4.2 Analyzing the flood zone's area
###Code
# previewing geometry's area in square ft.
pfirm_df.area.head()
# converting square feet to square miles
sq_mi = pfirm_df.area / 5280**2
sq_mi.head()
# summary statistics of area in sq mi
sq_mi_df = sq_mi.to_frame().rename(columns={0:'area sq mi'})
round(sq_mi_df.describe(), 4)
sq_mi_df.hist(bins=25, figsize=(10,6))
plt.title("Distribution of Each Geometry's Area", fontsize=15)
plt.xlabel('sq mi')
plt.ylabel('count')
print('flood zone area statistics:\n')
print('area in square feet: {:,.0f}'.format(pfirm_df.area.sum()))
print('area in square miles: {:,.0f}'.format(pfirm_df.area.sum() / 5280**2))
# creating a dataframe of only flood zones in the special flood hazard area (sfha)
# 0.2 pct chance is excluded from the sfha
sfha_df = pfirm_df[~pfirm_df['FLD_ZONE'].isin(['0.2 PCT ANNUAL CHANCE FLOOD HAZARD'])]
sfha_df['FLD_ZONE'].value_counts()
print('area statistics only of special flood hazard area zones:\n')
print('area in square feet: {:,.0f}'.format(sfha_df.area.sum()))
print('area in square miles: {:,.0f}'.format(sfha_df.area.sum() / 5280**2))
# retrieving area calculations by flood zone
zones = pfirm_df['FLD_ZONE'].unique()
empty_dict = {}
for zone in zones:
zone_df = pfirm_df.loc[pfirm_df['FLD_ZONE'] == zone]
zone_series = zone_df.area.sum() / 5280**2
empty_dict.update({zone:zone_series})
empty_dict
s1 = pd.Series(empty_dict).rename('area (sq mi)')
s1 = s1.reset_index().rename(columns={'index': 'zone'})
s1 = s1.sort_values(by='area (sq mi)', ascending=False)
# plot of area by flood zone
fig, ax = plt.subplots(figsize=(8, 8))
sns.barplot(data=s1,
x='area (sq mi)',
y='zone')
plt.title('Area by Flood Zone in NYC', fontsize=15)
###Output
_____no_output_____ |
code/anomalies-detection.ipynb | ###Markdown
EDA
###Code
symbols = ['GME', 'BB', 'NOK', 'AMC']
# year, month, and day.
start = dt.datetime(2014, 12, 1)
end = dt.datetime.now()
volume = []
closes = []
for symbol in symbols:
print(symbol)
vdata = web.DataReader(symbol, 'yahoo', start, end)
cdata = vdata[['Close']]
closes.append(cdata)
vdata = vdata[['Volume']]
volume.append(vdata)
volume = pd.concat(volume, axis = 1).dropna()
volume.columns = symbols
closes = pd.concat(closes, axis = 1).dropna()
closes.columns = symbols
volume.head()
volume.plot(title = "Trading volume",figsize=(12, 6))
plt.savefig("figures/trading_volume.pdf")
plt.show()
closes.plot(title = "Close price", figsize=(12, 6))
plt.savefig("figures/close_price.pdf")
plt.show()
print(volume.describe().to_latex())
print("Skewness: %f" % volume['GME'].skew())
print("Kurtosis: %f" % volume['GME'].kurt())
sns_plot = sns.distplot(volume['GME'])
plt.title("Distribution of trading volume GME")
sns.despine()
sns_plot.get_figure().savefig("figures/distribution_trading_volume_GME.pdf")
sns_plot = sns.distplot(closes['GME'])
plt.title("Distribution of closing price GME")
sns.despine()
sns_plot.get_figure().savefig("figures/distribution_closing_price_GME.pdf")
sns_plot = sns.regplot(x="GME", y="BB", data=volume)
plt.title("Regplot GME BB")
sns.despine();
sns_plot.get_figure().savefig("figures/regplot_GME_BB.pdf")
sns_plot = sns.regplot(x="GME", y="AMC", data=volume)
plt.title("Regplot GME AMC")
sns.despine();
sns_plot.get_figure().savefig("figures/regplot_GME_AMC.pdf")
sns_plot = sns.regplot(x="GME", y="NOK", data=volume)
plt.title("Regplot GME NOK")
sns.despine();
sns_plot.get_figure().savefig("figures/regplot_GME_NOK.pdf")
volume.plot.scatter('GME','BB')
plt.savefig("figures/GME_BB.pdf")
plt.show()
volume.plot.scatter('GME','NOK')
plt.savefig("figures/GME_NOK.pdf")
plt.show()
volume.plot.scatter('GME','AMC')
plt.savefig("figures/GME_AMC.pdf")
plt.show()
GME_BB_volume = volume.iloc[:, 0:2]
GME_BB_volume.head()
###Output
_____no_output_____
###Markdown
Models to use IsolationForest (Univariate Anomaly Detection)
###Code
from sklearn.ensemble import IsolationForest
#GME volume
isolation_forest = IsolationForest(n_estimators=100)
isolation_forest.fit(volume['GME'].values.reshape(-1, 1))
xx = np.linspace(volume['GME'].min(), volume['GME'].max(), len(volume)).reshape(-1,1)
anomaly_score = isolation_forest.decision_function(xx)
outlier = isolation_forest.predict(xx)
plt.figure(figsize=(10,4))
plt.plot(xx, anomaly_score, label='anomaly score')
plt.fill_between(xx.T[0], np.min(anomaly_score), np.max(anomaly_score),
where=outlier==-1, color='r',
alpha=.4, label='outlier region')
plt.legend()
plt.ylabel('GME Anomaly score')
plt.xlabel('GME Trading volume')
plt.savefig("figures/if_trading_GME.pdf")
plt.show();
# GME closing price
isolation_forest = IsolationForest(n_estimators=100)
isolation_forest.fit(closes['GME'].values.reshape(-1, 1))
xx = np.linspace(closes['GME'].min(), closes['GME'].max(), len(closes)).reshape(-1,1)
anomaly_score = isolation_forest.decision_function(xx)
outlier = isolation_forest.predict(xx)
plt.figure(figsize=(10,4))
plt.plot(xx, anomaly_score, label='anomaly score')
plt.fill_between(xx.T[0], np.min(anomaly_score), np.max(anomaly_score),
where=outlier==-1, color='r',
alpha=.4, label='outlier region')
plt.legend()
plt.ylabel('GME Anomaly score')
plt.xlabel('GME Closing price')
plt.savefig("figures/if_closing_GME.pdf")
plt.show();
# Nokia volume
isolation_forest = IsolationForest(n_estimators=100)
isolation_forest.fit(volume['NOK'].values.reshape(-1, 1))
xx = np.linspace(volume['NOK'].min(), volume['NOK'].max(), len(volume)).reshape(-1,1)
anomaly_score = isolation_forest.decision_function(xx)
outlier = isolation_forest.predict(xx)
plt.figure(figsize=(10,4))
plt.plot(xx, anomaly_score, label='anomaly score')
plt.fill_between(xx.T[0], np.min(anomaly_score), np.max(anomaly_score),
where=outlier==-1, color='r',
alpha=.4, label='outlier region')
plt.legend()
plt.ylabel('NOK Anomaly score')
plt.xlabel('NOK Trading volume')
plt.savefig("figures/if_trading_NOK.pdf")
plt.show();
# Nokia closing price
isolation_forest = IsolationForest(n_estimators=100)
isolation_forest.fit(closes['NOK'].values.reshape(-1, 1))
xx = np.linspace(closes['NOK'].min(), closes['NOK'].max(), len(closes)).reshape(-1,1)
anomaly_score = isolation_forest.decision_function(xx)
outlier = isolation_forest.predict(xx)
plt.figure(figsize=(10,4))
plt.plot(xx, anomaly_score, label='anomaly score')
plt.fill_between(xx.T[0], np.min(anomaly_score), np.max(anomaly_score),
where=outlier==-1, color='r',
alpha=.4, label='outlier region')
plt.legend()
plt.ylabel('NOK Anomaly score')
plt.xlabel('NOK Closing price')
plt.savefig("figures/if_closing_NOK.pdf")
plt.show();
# AMC volume
isolation_forest = IsolationForest(n_estimators=100)
isolation_forest.fit(volume['AMC'].values.reshape(-1, 1))
xx = np.linspace(volume['AMC'].min(), volume['AMC'].max(), len(volume)).reshape(-1,1)
anomaly_score = isolation_forest.decision_function(xx)
outlier = isolation_forest.predict(xx)
plt.figure(figsize=(10,4))
plt.plot(xx, anomaly_score, label='anomaly score')
plt.fill_between(xx.T[0], np.min(anomaly_score), np.max(anomaly_score),
where=outlier==-1, color='r',
alpha=.4, label='outlier region')
plt.legend()
plt.ylabel('AMC Anomaly score')
plt.xlabel('AMC Trading volume')
plt.savefig("figures/if_trading_AMC.pdf")
plt.show();
# AMC closing price
isolation_forest = IsolationForest(n_estimators=100)
isolation_forest.fit(closes['AMC'].values.reshape(-1, 1))
xx = np.linspace(closes['AMC'].min(), closes['AMC'].max(), len(closes)).reshape(-1,1)
anomaly_score = isolation_forest.decision_function(xx)
outlier = isolation_forest.predict(xx)
plt.figure(figsize=(10,4))
plt.plot(xx, anomaly_score, label='anomaly score')
plt.fill_between(xx.T[0], np.min(anomaly_score), np.max(anomaly_score),
where=outlier==-1, color='r',
alpha=.4, label='outlier region')
plt.legend()
plt.ylabel('AMC Anomaly score')
plt.xlabel('AMC Closing price')
plt.savefig("figures/if_closing_AMC.pdf")
plt.show();
# BB volume
isolation_forest = IsolationForest(n_estimators=100)
isolation_forest.fit(volume['BB'].values.reshape(-1, 1))
xx = np.linspace(volume['BB'].min(), volume['BB'].max(), len(volume)).reshape(-1,1)
anomaly_score = isolation_forest.decision_function(xx)
outlier = isolation_forest.predict(xx)
plt.figure(figsize=(10,4))
plt.plot(xx, anomaly_score, label='anomaly score')
plt.fill_between(xx.T[0], np.min(anomaly_score), np.max(anomaly_score),
where=outlier==-1, color='r',
alpha=.4, label='outlier region')
plt.legend()
plt.ylabel('BB Anomaly score')
plt.xlabel('BB Trading volume')
plt.savefig("figures/if_trading_BB.pdf")
plt.show();
# BB closing price
isolation_forest = IsolationForest(n_estimators=100)
isolation_forest.fit(closes['BB'].values.reshape(-1, 1))
xx = np.linspace(closes['BB'].min(), closes['BB'].max(), len(closes)).reshape(-1,1)
anomaly_score = isolation_forest.decision_function(xx)
outlier = isolation_forest.predict(xx)
plt.figure(figsize=(10,4))
plt.plot(xx, anomaly_score, label='anomaly score')
plt.fill_between(xx.T[0], np.min(anomaly_score), np.max(anomaly_score),
where=outlier==-1, color='r',
alpha=.4, label='outlier region')
plt.legend()
plt.ylabel('BB Anomaly score')
plt.xlabel('BB Closing price')
plt.savefig("figures/if_closing_BB.pdf")
plt.show();
###Output
_____no_output_____
###Markdown
PyOD
###Code
# Import all models
from pyod.models.abod import ABOD
from pyod.models.cblof import CBLOF
from pyod.models.hbos import HBOS
from pyod.models.iforest import IForest
from pyod.models.knn import KNN
from pyod.models.lof import LOF
from pyod.models.mcd import MCD
from pyod.models.ocsvm import OCSVM
from pyod.models.pca import PCA
from pyod.models.lscp import LSCP
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler(feature_range=(0, 1))
GME_BB_volume[['GME','BB']] = scaler.fit_transform(GME_BB_volume[['GME','BB']])
GME_BB_volume[['GME','BB']].head()
GME_BB_volume[['GME','BB']].size
X1 = GME_BB_volume['GME'].values.reshape(-1,1)
X2 = GME_BB_volume['BB'].values.reshape(-1,1)
X = np.concatenate((X1,X2),axis=1)
# initialize a set of detectors for LSCP
detector_list = [LOF(n_neighbors=5), LOF(n_neighbors=10), LOF(n_neighbors=15),
LOF(n_neighbors=20), LOF(n_neighbors=25), LOF(n_neighbors=30),
LOF(n_neighbors=35), LOF(n_neighbors=40), LOF(n_neighbors=45),
LOF(n_neighbors=50)]
random_state = np.random.RandomState(42)
outliers_fraction = 0.01
# Define seven outlier detection tools to be compared
classifiers = {
'Angle-based Outlier Detector (ABOD)':
ABOD(contamination=outliers_fraction),
'Cluster-based Local Outlier Factor (CBLOF)':
CBLOF(contamination=outliers_fraction,
check_estimator=False, random_state=random_state),
'Histogram-base Outlier Detection (HBOS)': HBOS(
contamination=outliers_fraction),
'Isolation Forest': IForest(contamination=outliers_fraction,
random_state=random_state),
'K Nearest Neighbors (KNN)': KNN(
contamination=outliers_fraction),
'Average KNN': KNN(method='mean',
contamination=outliers_fraction),
'Local Outlier Factor (LOF)':
LOF(n_neighbors=35, contamination=outliers_fraction),
'Minimum Covariance Determinant (MCD)': MCD(
contamination=outliers_fraction, random_state=random_state),
'One-class SVM (OCSVM)': OCSVM(contamination=outliers_fraction),
'Principal Component Analysis (PCA)': PCA(
contamination=outliers_fraction, random_state=random_state),
'Locally Selective Combination (LSCP)': LSCP(
detector_list, contamination=outliers_fraction,
random_state=random_state)
}
xx , yy = np.meshgrid(np.linspace(0,1 , 200), np.linspace(0, 1, 200))
for i, (clf_name, clf) in enumerate(classifiers.items()):
print(i + 1, 'fitting', clf_name)
clf.fit(X)
# predict raw anomaly score
scores_pred = clf.decision_function(X) * -1
# prediction of a datapoint category outlier or inlier
y_pred = clf.predict(X)
n_inliers = len(y_pred) - np.count_nonzero(y_pred)
n_outliers = np.count_nonzero(y_pred == 1)
plt.figure(figsize=(25, 15))
# copy of dataframe
dfx = GME_BB_volume
dfx['outlier'] = y_pred.tolist()
# IX1 - inlier feature 1, IX2 - inlier feature 2
IX1 = np.array(dfx['GME'][dfx['outlier'] == 0]).reshape(-1,1)
IX2 = np.array(dfx['BB'][dfx['outlier'] == 0]).reshape(-1,1)
# OX1 - outlier feature 1, OX2 - outlier feature 2
OX1 = dfx['GME'][dfx['outlier'] == 1].values.reshape(-1,1)
OX2 = dfx['BB'][dfx['outlier'] == 1].values.reshape(-1,1)
print('OUTLIERS : ',n_outliers,'INLIERS : ',n_inliers, clf_name)
# threshold value to consider a datapoint inlier or outlier
threshold = stats.scoreatpercentile(scores_pred,100 * outliers_fraction)
# decision function calculates the raw anomaly score for every point
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()]) * -1
Z = Z.reshape(xx.shape)
subplot = plt.subplot(3, 4, i + 1)
# fill blue map colormap from minimum anomaly score to threshold value
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),cmap=plt.cm.Blues_r)
# draw red contour line where anomaly score is equal to thresold
a = subplot.contour(xx, yy, Z, levels=[threshold],linewidths=2, colors='red')
# fill orange contour lines where range of anomaly score is from threshold to maximum anomaly score
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],colors='orange')
b = subplot.scatter(IX1,IX2, c='white',s=20, edgecolor='k')
c = subplot.scatter(OX1,OX2, c='black',s=20, edgecolor='k')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b,c],
['learned decision function', 'inliers','outliers'],
prop=matplotlib.font_manager.FontProperties(size=10),
loc='lower right')
subplot.set_xlabel("%d. %s" % (i + 1, clf_name))
subplot.set_xlim((0, 1))
subplot.set_ylim((0, 1))
plt.show()
###Output
1 fitting Angle-based Outlier Detector (ABOD)
OUTLIERS : 16 INLIERS : 1536 Angle-based Outlier Detector (ABOD)
2 fitting Cluster-based Local Outlier Factor (CBLOF)
|
examples/extreme_response_full_sea_state_example.ipynb | ###Markdown
Extreme Conditions Modeling - Full Sea State ApproachExtreme conditions modeling consists of identifying the expected extreme (e.g. 100-year) response of some quantity of interest, such as WEC motions or mooring loads. Three different methods of estimating extreme conditions were adapted from [WDRT](https://github.com/WEC-Sim/WDRT): full sea state approach, contour approach, and MLER design wave. This noteboook presents the full sea state approach. The full sea state approach consists of the following steps: 1. Take $N$ samples to represent the sea state. Each sample represents a small area of the sea state and consists of a representative $(H_{s}, T_{e})_i$ pair and a weight $W_i$ associated with the probability of that sea state area. 2. For each sample $(H_{s}, T_{e})_i$ calculate the short-term (e.g. 3-hours) extreme for the quantity of interest (e.g. WEC motions or mooring tension).3. Integrate over the entire sea state to obtain the long-term extreme. This is a sum of the products of the weight of each sea state times the short-term extreme. See more details and equations in> [1] Coe, Ryan G., Carlos A. Michelén Ströfer, Aubrey Eckert-Gallup, and Cédric Sallaberry. 2018. “Full Long-Term Design Response Analysis of a Wave Energy Converter.” Renewable Energy 116: 356–66.**NOTE:** Prior to running this example it is recommended to become familiar with `environmental_contours_example.ipynb` and `short_term_extremes_example.ipynb` since some code blocks are adapted from those examples and used here without the additional description. We start by importing the relevant modules, including `waves.contours` submodule which includes the sampling function, and `loads.extreme` which inlcudes the short-term extreme and full sea state integration functions.
###Code
from mhkit.wave import resource, contours, graphics
from mhkit.loads import extreme
import matplotlib.pyplot as plt
from mhkit.wave.io import ndbc
import pandas as pd
import numpy as np
###Output
_____no_output_____
###Markdown
Obtain and Process NDBC Buouy DataThe first step will be obtaining the environmental data and creating the contours.See `environmental_contours_example.ipynb` for more details and explanations of how this is being done in the following code block.
###Code
parameter = 'swden'
buoy_number = '46022'
ndbc_available_data = ndbc.available_data(parameter, buoy_number)
years_of_interest = ndbc_available_data[ndbc_available_data.year < 2013]
filenames = years_of_interest['filename']
ndbc_requested_data = ndbc.request_data(parameter, filenames)
ndbc_data = {}
for year in ndbc_requested_data:
year_data = ndbc_requested_data[year]
ndbc_data[year] = ndbc.to_datetime_index(parameter, year_data)
Hm0_list = []
Te_list = []
# Iterate over each year and save the result in the initalized dictionary
for year in ndbc_data:
year_data = ndbc_data[year]
Hm0_list.append(resource.significant_wave_height(year_data.T))
Te_list.append(resource.energy_period(year_data.T))
# Concatenate list of Series into a single DataFrame
Te = pd.concat(Te_list, axis=0)
Hm0 = pd.concat(Hm0_list, axis=0)
Hm0_Te = pd.concat([Hm0, Te], axis=1)
# Drop any NaNs created from the calculation of Hm0 or Te
Hm0_Te.dropna(inplace=True)
# Sort the DateTime index
Hm0_Te.sort_index(inplace=True)
Hm0_Te_clean = Hm0_Te[Hm0_Te.Hm0 < 20]
Hm0 = Hm0_Te_clean.Hm0.values
Te = Hm0_Te_clean.Te.values
dt = (Hm0_Te_clean.index[2]-Hm0_Te_clean.index[1]).seconds
###Output
_____no_output_____
###Markdown
1. SamplingThe first step is sampling the sea state to get samples $(H_s, T_e)_i$ and associtated weights. For this we will use the `waves.contours.samples_full_seastate` function. We will sample 20 points between each return level, for 10 levels ranging from 0.001—100 years return periods. For more details on the sampling approach see> [1] Coe, Ryan G., Carlos A. Michelén Ströfer, Aubrey Eckert-Gallup, and Cédric Sallaberry. 2018. “Full Long-Term Design Response Analysis of a Wave Energy Converter.” Renewable Energy 116: 356–66.> [2] Eckert-Gallup, Aubrey C., Cédric J. Sallaberry, Ann R. Dallman, and Vincent S. Neary. 2016. “Application of Principal Component Analysis (PCA) and Improved Joint Probability Distributions to the Inverse First-Order Reliability Method (I-FORM) for Predicting Extreme Sea States.” Ocean Engineering 112 (January): 307–19.
###Code
# return levels
levels = np.array([0.001, 0.01, 0.05, 0.1, 0.5, 1, 5, 10, 50, 100])
# points per return level interval
npoints = 20
# Create samples
sample_hs, sample_te, sample_weights = contours.samples_full_seastate(
Hm0, Te, npoints, levels, dt)
###Output
_____no_output_____
###Markdown
We will now plot the samples alongside the contours. First we will create the different contours using `contours.environmental_contours`. See `environmental_contours_example.ipynb` for more details on using this function. There are 20 samples, randomly distributed, between each set of return levels.
###Code
# Create the contours
Te_contours = []
Hm0_contours = []
for period in levels:
copula = contours.environmental_contours(
Hm0, Te, dt, period, 'PCA', return_PCA=True)
Hm0_contours.append(copula['PCA_x1'])
Te_contours.append(copula['PCA_x2'])
# plot
fig, ax = plt.subplots(figsize=(8, 4))
labels = [f"{period}-year Contour" for period in levels]
ax = graphics.plot_environmental_contour(
sample_te, sample_hs, Te_contours, Hm0_contours,
data_label='Samples', contour_label=labels,
x_label='Energy Period, $Te$ [s]',
y_label='Sig. wave height, $Hm0$ [m]', ax=ax)
###Output
_____no_output_____
###Markdown
2. Short-Term Extreme DistributionsMany different methods for short-term extremes were adapted from WDRT, and a summary and examples can be found in `short_term_extremes_example.ipynb`. The response quantity of interest is typically related to the WEC itself, e.g. maximum heave displacement, PTO extension, or load on the mooring lines. This requires running a simulation (e.g. WEC-Sim) for each of the 200 sampled sea states $(H_s, T_e)_i$. For the sake of example we will consider the wave elevation as the quantity of interest (can be thought as a proxy for heave motion in this example). Wave elevation time-series for a specific sea state can be created quickly without running any external software. **NOTE:** The majority of the for loop below is simply creating the synthetic data (wave elevation time series). In a realistic case the variables `time` and `data` describing each time series would be obtained externally, e.g. through simulation software such as WEC-Sim or CFD. For this reason the details of creating the synthetic data are not presented here, instead assume for each sea state there is time-series data available. The last lines of the for-loop create the short-term extreme distribution from the time-series using the `loads.extreme.short_term_extreme` function. The short-term period will be 3-hours and we will use 1-hour "simulations" and the Weibul-tail-fitting method to estimate the 3-hour short-term extreme distributions for each of the 200 samples.For more details on short-term extreme distributions see `short_term_extremes_example.ipynb` and > [3] Michelén Ströfer, Carlos A., and Ryan Coe. 2015. “Comparison of Methods for Estimating Short-Term Extreme Response of Wave Energy Converters.” In OCEANS 2015 - MTS/IEEE Washington, 1–6. IEEE.
###Code
# create the short-term extreme distribution for each sample sea state
t_st = 3.0 * 60.0 * 60.0
gamma = 3.3
t_sim = 1.0 * 60.0 * 60.0
ste_all = []
i = 0
n = len(sample_hs)
for hs, te in zip(sample_hs, sample_te):
tp = te / (0.8255 + 0.03852*gamma - 0.005537*gamma**2 + 0.0003154*gamma**3)
i += 1
print(f"Sea state {i}/{n}. (Hs, Te) = ({hs} m, {te} s). Tp = {tp} s")
# time & frequency arrays
f0 = 1.0/t_sim
T_min = tp/10.0 # s
f_max = 1.0/T_min
Nf = int(f_max/f0)
time = np.linspace(0, t_sim, 2*Nf+1)
f = np.linspace(f0, f_max, Nf)
# spectrum
S = resource.jonswap_spectrum(f, tp, hs, gamma)
# 1-hour elevation time-series
data = resource.surface_elevation(S, time).values.squeeze()
# 3-hour extreme distribution
ste = extreme.short_term_extreme(time, data, t_st, 'peaks_weibull_tail_fit')
ste_all.append(ste)
###Output
Sea state 1/200. (Hs, Te) = (2.7476690431712387 m, 9.898396804782426 s). Tp = 10.953763434059923 s
Sea state 2/200. (Hs, Te) = (3.531570115440943 m, 11.072854690290276 s). Tp = 12.253441967345596 s
Sea state 3/200. (Hs, Te) = (3.5631169008261416 m, 10.529043453084103 s). Tp = 11.651649600094585 s
Sea state 4/200. (Hs, Te) = (3.3136880458617415 m, 9.477416257821945 s). Tp = 10.48789795981279 s
Sea state 5/200. (Hs, Te) = (3.172687890319087 m, 9.360633157328573 s). Tp = 10.35866345031299 s
Sea state 6/200. (Hs, Te) = (3.0630002153170452 m, 8.59765613746172 s). Tp = 9.514337854353085 s
Sea state 7/200. (Hs, Te) = (2.7069002802048345 m, 8.619396416083681 s). Tp = 9.538396080519615 s
Sea state 8/200. (Hs, Te) = (2.4473757676516734 m, 8.436380373858702 s). Tp = 9.335866875971888 s
Sea state 9/200. (Hs, Te) = (2.046394958916393 m, 7.376850430979894 s). Tp = 8.163369897472288 s
Sea state 10/200. (Hs, Te) = (2.293289736769183 m, 8.853033813999282 s). Tp = 9.796943887461476 s
Sea state 11/200. (Hs, Te) = (2.2020532526799967 m, 8.69872911280172 s). Tp = 9.626187225850904 s
Sea state 12/200. (Hs, Te) = (2.0876480479410366 m, 8.539262027183852 s). Tp = 9.449717766621593 s
Sea state 13/200. (Hs, Te) = (1.8902057494339104 m, 8.529332396785021 s). Tp = 9.438729439469068 s
Sea state 14/200. (Hs, Te) = (2.308096327590195 m, 9.112272664224243 s). Tp = 10.083822772424941 s
Sea state 15/200. (Hs, Te) = (1.9622893666119121 m, 9.151949676311485 s). Tp = 10.127730145785039 s
Sea state 16/200. (Hs, Te) = (2.305667961346008 m, 9.200023294046321 s). Tp = 10.18092937051529 s
Sea state 17/200. (Hs, Te) = (1.9701858425767147 m, 10.185262006644049 s). Tp = 11.27121419104896 s
Sea state 18/200. (Hs, Te) = (2.292470528080541 m, 9.616889178090641 s). Tp = 10.642241476668234 s
Sea state 19/200. (Hs, Te) = (2.6097974565855493 m, 10.607583411714911 s). Tp = 11.738563485638863 s
Sea state 20/200. (Hs, Te) = (2.7555697594812547 m, 10.323372655443912 s). Tp = 11.42405019111185 s
Sea state 21/200. (Hs, Te) = (3.682633905384873 m, 12.402475369156193 s). Tp = 13.724826744150525 s
Sea state 22/200. (Hs, Te) = (4.212665972583244 m, 12.185046415308522 s). Tp = 13.48421552486582 s
Sea state 23/200. (Hs, Te) = (4.05026799253212 m, 11.113905814149078 s). Tp = 12.298869960213526 s
Sea state 24/200. (Hs, Te) = (5.303297566743566 m, 10.639559673240361 s). Tp = 11.773949054753066 s
Sea state 25/200. (Hs, Te) = (4.924134073730911 m, 9.785109089934313 s). Tp = 10.828396988068192 s
Sea state 26/200. (Hs, Te) = (3.5985615413148957 m, 8.581846021873012 s). Tp = 9.496842064939978 s
Sea state 27/200. (Hs, Te) = (3.135645469719277 m, 7.8511911960291645 s). Tp = 8.688284853899408 s
Sea state 28/200. (Hs, Te) = (2.431810006062661 m, 7.116680274464138 s). Tp = 7.875460410382518 s
Sea state 29/200. (Hs, Te) = (2.0838410893707966 m, 6.298801092171736 s). Tp = 6.970378985868918 s
Sea state 30/200. (Hs, Te) = (1.6423455509793814 m, 6.523674804404985 s). Tp = 7.219228723348807 s
Sea state 31/200. (Hs, Te) = (1.4428843954801813 m, 7.031455517574027 s). Tp = 7.7811489936843605 s
Sea state 32/200. (Hs, Te) = (1.0660804027836115 m, 7.003369546884362 s). Tp = 7.750068498042693 s
Sea state 33/200. (Hs, Te) = (0.6706525629195497 m, 7.391577956604073 s). Tp = 8.179667671226758 s
Sea state 34/200. (Hs, Te) = (0.9453680651508931 m, 8.514256864842773 s). Tp = 9.422046554978298 s
Sea state 35/200. (Hs, Te) = (0.8606454052847035 m, 9.19445876440277 s). Tp = 10.1747715509674 s
Sea state 36/200. (Hs, Te) = (1.446046535282041 m, 9.890985020002242 s). Tp = 10.94556140511448 s
Sea state 37/200. (Hs, Te) = (1.814284454479686 m, 10.884111327310288 s). Tp = 12.044574795357422 s
Sea state 38/200. (Hs, Te) = (1.8369294990041127 m, 14.071484897068036 s). Tp = 15.571785994067179 s
Sea state 39/200. (Hs, Te) = (2.4279375035930384 m, 11.721321793811422 s). Tp = 12.971048604746592 s
Sea state 40/200. (Hs, Te) = (3.4366878484585097 m, 13.500507507743283 s). Tp = 14.939931020760929 s
Sea state 41/200. (Hs, Te) = (4.881749805564816 m, 15.102009111828629 s). Tp = 16.712184655000232 s
Sea state 42/200. (Hs, Te) = (5.929650654049805 m, 14.041374740462938 s). Tp = 15.538465493897355 s
Sea state 43/200. (Hs, Te) = (6.733647106251836 m, 12.756124450005649 s). Tp = 14.116181874349593 s
Sea state 44/200. (Hs, Te) = (5.823482465094092 m, 10.757719220728456 s). Tp = 11.904706767965251 s
Sea state 45/200. (Hs, Te) = (4.522240943762454 m, 8.448078861366799 s). Tp = 9.348812655700389 s
Sea state 46/200. (Hs, Te) = (3.9632342495762476 m, 7.763746279892604 s). Tp = 8.591516564674196 s
Sea state 47/200. (Hs, Te) = (3.48351682452572 m, 7.324368618974508 s). Tp = 8.105292476993444 s
Sea state 48/200. (Hs, Te) = (2.563456346797842 m, 6.056216781584966 s). Tp = 6.701930346822829 s
Sea state 49/200. (Hs, Te) = (1.995627635392273 m, 5.300300446310389 s). Tp = 5.865418245333961 s
Sea state 50/200. (Hs, Te) = (1.2482525200731613 m, 5.00092407195376 s). Tp = 5.534122375192174 s
Sea state 51/200. (Hs, Te) = (1.0112713778368576 m, 5.4894639018048546 s). Tp = 6.0747503000819165 s
Sea state 52/200. (Hs, Te) = (0.7225874968126063 m, 5.797180698365338 s). Tp = 6.415275847873832 s
Sea state 53/200. (Hs, Te) = (0.3412147422072267 m, 6.444682361990287 s). Tp = 7.131814110219623 s
Sea state 54/200. (Hs, Te) = (0.37629800539801495 m, 7.278127327491898 s). Tp = 8.054120941059342 s
Sea state 55/200. (Hs, Te) = (0.2593556848203602 m, 9.772429376897422 s). Tp = 10.814365364588443 s
Sea state 56/200. (Hs, Te) = (0.22439382449464995 m, 11.838773644910981 s). Tp = 13.101023167012615 s
Sea state 57/200. (Hs, Te) = (0.7231170863341507 m, 13.331652990941636 s). Tp = 14.75307324285046 s
Sea state 58/200. (Hs, Te) = (1.0944677391961881 m, 14.740489201708725 s). Tp = 16.31211950806222 s
Sea state 59/200. (Hs, Te) = (2.2544246940776325 m, 15.00832277152429 s). Tp = 16.608509481238684 s
Sea state 60/200. (Hs, Te) = (4.221697213400411 m, 15.06617516629942 s). Tp = 16.672530095784513 s
Sea state 61/200. (Hs, Te) = (0.42919942735341365 m, 14.389743813203152 s). Tp = 15.923977661756238 s
Sea state 62/200. (Hs, Te) = (0.8066477894744781 m, 14.929114433172016 s). Tp = 16.520855953356616 s
Sea state 63/200. (Hs, Te) = (2.18706772725983 m, 16.553280263601604 s). Tp = 18.31819027274999 s
Sea state 64/200. (Hs, Te) = (4.058781607387172 m, 17.104385307921103 s). Tp = 18.92805411250597 s
Sea state 65/200. (Hs, Te) = (5.280990480119309 m, 17.091896795314423 s). Tp = 18.914234075237836 s
Sea state 66/200. (Hs, Te) = (6.67877937615683 m, 15.499740904199125 s). Tp = 17.152322593471858 s
Sea state 67/200. (Hs, Te) = (6.927635065873248 m, 14.740354537612657 s). Tp = 16.31197048608619 s
Sea state 68/200. (Hs, Te) = (6.837998607790748 m, 12.158743171888053 s). Tp = 13.455107830795898 s
Sea state 69/200. (Hs, Te) = (6.515875893176208 m, 10.937669435895 s). Tp = 12.103843267109248 s
Sea state 70/200. (Hs, Te) = (5.304001413051129 m, 8.964849111642947 s). Tp = 9.92068092719158 s
Sea state 71/200. (Hs, Te) = (4.21625929476447 m, 7.632380539272374 s). Tp = 8.446144614602561 s
Sea state 72/200. (Hs, Te) = (3.90275894328102 m, 7.0180531016534715 s). Tp = 7.766317612771473 s
Sea state 73/200. (Hs, Te) = (3.092131576145537 m, 6.263479592630679 s). Tp = 6.931291509609928 s
Sea state 74/200. (Hs, Te) = (2.061691984692526 m, 5.2597539358137295 s). Tp = 5.8205486676825124 s
Sea state 75/200. (Hs, Te) = (1.7477973238002613 m, 4.952976645409529 s). Tp = 5.481062796151686 s
Sea state 76/200. (Hs, Te) = (1.188105496243663 m, 4.789426304957664 s). Tp = 5.300074725638678 s
Sea state 77/200. (Hs, Te) = (0.8909185568988089 m, 4.865417933018925 s). Tp = 5.384168577720861 s
Sea state 78/200. (Hs, Te) = (0.5868809150201082 m, 5.342702431380711 s). Tp = 5.912341128176003 s
Sea state 79/200. (Hs, Te) = (0.3249029161852639 m, 5.8652556732843655 s). Tp = 6.490608973606103 s
Sea state 80/200. (Hs, Te) = (0.19854447978643908 m, 6.682309275317205 s). Tp = 7.394776794529401 s
Sea state 81/200. (Hs, Te) = (1.1378533719939599 m, 16.29937669010103 s). Tp = 18.037215511479157 s
Sea state 82/200. (Hs, Te) = (1.7207753935819574 m, 18.18049776867239 s). Tp = 20.11890163619976 s
Sea state 83/200. (Hs, Te) = (3.045435393397856 m, 18.2722028817323 s). Tp = 20.220384344355757 s
Sea state 84/200. (Hs, Te) = (5.9410240832717705 m, 18.20434585099714 s). Tp = 20.14529240000659 s
Sea state 85/200. (Hs, Te) = (6.900385063891696 m, 16.965929376112204 s). Tp = 18.774836015374902 s
Sea state 86/200. (Hs, Te) = (7.5413039919231215 m, 16.07885944382636 s). Tp = 17.79318672616556 s
Sea state 87/200. (Hs, Te) = (8.33324739000868 m, 14.60855477544739 s). Tp = 16.16611823911159 s
Sea state 88/200. (Hs, Te) = (8.073493320987428 m, 13.666423534396822 s). Tp = 15.123537006833741 s
Sea state 89/200. (Hs, Te) = (7.742808676245161 m, 11.899449249557978 s). Tp = 13.168168001941908 s
Sea state 90/200. (Hs, Te) = (6.04122128102763 m, 9.145483011133436 s). Tp = 10.120574005051886 s
Sea state 91/200. (Hs, Te) = (5.307838900691543 m, 8.532751345737658 s). Tp = 9.44251291660773 s
Sea state 92/200. (Hs, Te) = (3.5538843391472863 m, 6.449345501593802 s). Tp = 7.1369744335613 s
Sea state 93/200. (Hs, Te) = (3.2727441707177842 m, 6.178574531557655 s). Tp = 6.837333874682727 s
Sea state 94/200. (Hs, Te) = (2.396435845611073 m, 5.2840531245047435 s). Tp = 5.847438634796415 s
Sea state 95/200. (Hs, Te) = (1.7925351881694949 m, 4.859872189983066 s). Tp = 5.378031547807942 s
Sea state 96/200. (Hs, Te) = (1.3712158702331128 m, 4.614584181856148 s). Tp = 5.106590943109626 s
Sea state 97/200. (Hs, Te) = (1.0020762104765706 m, 4.4051257091119895 s). Tp = 4.874800017270961 s
Sea state 98/200. (Hs, Te) = (0.6840846016235075 m, 4.6709868121298745 s). Tp = 5.169007219327068 s
Sea state 99/200. (Hs, Te) = (0.4400318176169047 m, 5.264806535859035 s). Tp = 5.826139975721048 s
Sea state 100/200. (Hs, Te) = (0.2950455607439133 m, 5.368907174671332 s). Tp = 5.941339820036593 s
Sea state 101/200. (Hs, Te) = (0.2538207006543258 m, 17.12463819459854 s). Tp = 18.950466361064674 s
Sea state 102/200. (Hs, Te) = (2.401827718374254 m, 19.74909028958335 s). Tp = 21.854737422273 s
Sea state 103/200. (Hs, Te) = (4.771083170894034 m, 20.076537695823838 s). Tp = 22.217097256475835 s
Sea state 104/200. (Hs, Te) = (6.672059784308026 m, 18.96224243886321 s). Tp = 20.98399588853062 s
Sea state 105/200. (Hs, Te) = (7.266218886285855 m, 18.517848875220015 s). Tp = 20.492221102798194 s
Sea state 106/200. (Hs, Te) = (8.704119700269523 m, 16.896830852172723 s). Tp = 18.698370209870912 s
Sea state 107/200. (Hs, Te) = (9.025791407684109 m, 15.924773336852542 s). Tp = 17.622671965285193 s
Sea state 108/200. (Hs, Te) = (8.335871487107436 m, 12.413322380618183 s). Tp = 13.736830263494564 s
Sea state 109/200. (Hs, Te) = (7.4617419694961065 m, 10.850802026111152 s). Tp = 12.00771405793852 s
Sea state 110/200. (Hs, Te) = (6.0882272377996465 m, 9.069127068362327 s). Tp = 10.036076995041613 s
Sea state 111/200. (Hs, Te) = (4.971882552682233 m, 7.629443455087086 s). Tp = 8.442894378631417 s
Sea state 112/200. (Hs, Te) = (3.9137147533928953 m, 6.396097294409882 s). Tp = 7.078048904883903 s
Sea state 113/200. (Hs, Te) = (3.06161156988711 m, 5.531224857642791 s). Tp = 6.120963807183185 s
Sea state 114/200. (Hs, Te) = (2.7343619821293204 m, 5.293654205328456 s). Tp = 5.85806338243266 s
Sea state 115/200. (Hs, Te) = (2.0584231261728334 m, 4.660229583463185 s). Tp = 5.157103055415991 s
Sea state 116/200. (Hs, Te) = (1.3721475288257854 m, 4.323265832156141 s). Tp = 4.784212243856746 s
Sea state 117/200. (Hs, Te) = (0.8599939026300656 m, 4.213959963822748 s). Tp = 4.663252188678921 s
Sea state 118/200. (Hs, Te) = (0.5419078120521271 m, 4.519931230768316 s). Tp = 5.001846098565626 s
Sea state 119/200. (Hs, Te) = (0.3868003926473669 m, 4.617757194340929 s). Tp = 5.110102262045115 s
Sea state 120/200. (Hs, Te) = (0.07629861562822371 m, 5.12934517057858 s). Tp = 5.676235725669164 s
Sea state 121/200. (Hs, Te) = (0.34633798237637237 m, 19.72005237767673 s). Tp = 21.822603489483953 s
Sea state 122/200. (Hs, Te) = (2.3276352644630043 m, 20.988388061461745 s). Tp = 23.22616906774459 s
Sea state 123/200. (Hs, Te) = (4.4025116313558925 m, 20.635956263518874 s). Tp = 22.836161007101737 s
Sea state 124/200. (Hs, Te) = (6.356747543266486 m, 20.26518087818985 s). Tp = 22.425853566597404 s
Sea state 125/200. (Hs, Te) = (8.350325561657009 m, 18.84467767246754 s). Tp = 20.853896371944614 s
Sea state 126/200. (Hs, Te) = (10.507095756225086 m, 16.726452821049758 s). Tp = 18.509826480609608 s
Sea state 127/200. (Hs, Te) = (10.051931328368415 m, 15.526444557776568 s). Tp = 17.181873389411972 s
Sea state 128/200. (Hs, Te) = (9.569671582265315 m, 13.484451946197973 s). Tp = 14.922163615954188 s
Sea state 129/200. (Hs, Te) = (7.670417919717313 m, 10.688834805882522 s). Tp = 11.828477899856887 s
Sea state 130/200. (Hs, Te) = (6.695773472447784 m, 9.28833992655812 s). Tp = 10.27866231847709 s
Sea state 131/200. (Hs, Te) = (5.2124365852315915 m, 7.762126130889369 s). Tp = 8.589723675455936 s
Sea state 132/200. (Hs, Te) = (4.332304023671216 m, 6.700456908860995 s). Tp = 7.414859327958517 s
Sea state 133/200. (Hs, Te) = (3.6845002818825954 m, 6.05544401874487 s). Tp = 6.701075192042921 s
Sea state 134/200. (Hs, Te) = (2.651930188462395 m, 4.882016850087267 s). Tp = 5.402537270592454 s
Sea state 135/200. (Hs, Te) = (1.761425029748757 m, 4.320829327277171 s). Tp = 4.781515958935407 s
Sea state 136/200. (Hs, Te) = (1.6715886684994976 m, 4.2515655747998595 s). Tp = 4.704867308234196 s
Sea state 137/200. (Hs, Te) = (0.982745350372394 m, 4.044748867057365 s). Tp = 4.475999812264762 s
Sea state 138/200. (Hs, Te) = (0.6319038698634393 m, 3.9844552342473887 s). Tp = 4.4092776749928495 s
Sea state 139/200. (Hs, Te) = (0.43826897160129663 m, 4.309987462686959 s). Tp = 4.769518135222388 s
Sea state 140/200. (Hs, Te) = (0.12296019879881004 m, 4.824883587782545 s). Tp = 5.339312462389205 s
Sea state 141/200. (Hs, Te) = (1.7008854788623395 m, 21.591483415345785 s). Tp = 23.893566421570075 s
Sea state 142/200. (Hs, Te) = (1.958403888846231 m, 22.399914218070567 s). Tp = 24.78819208070469 s
Sea state 143/200. (Hs, Te) = (6.654571320228031 m, 21.834806379665565 s). Tp = 24.1628324695775 s
Sea state 144/200. (Hs, Te) = (7.417015230376994 m, 22.004251917947876 s). Tp = 24.350344283652746 s
Sea state 145/200. (Hs, Te) = (10.015094697709129 m, 19.80808526300293 s). Tp = 21.920022437147782 s
Sea state 146/200. (Hs, Te) = (10.935487969680494 m, 18.194709889967456 s). Tp = 20.134629053238356 s
Sea state 147/200. (Hs, Te) = (10.740817257849123 m, 16.312190381294457 s). Tp = 18.051395397860563 s
Sea state 148/200. (Hs, Te) = (10.318979128904427 m, 13.810844960534954 s). Tp = 15.283356639035425 s
Sea state 149/200. (Hs, Te) = (8.926396179491384 m, 11.686930863038155 s). Tp = 12.932990914456232 s
Sea state 150/200. (Hs, Te) = (6.360592705852408 m, 8.544681430031973 s). Tp = 9.455714985961547 s
Sea state 151/200. (Hs, Te) = (5.079095551882201 m, 7.2036675682669244 s). Tp = 7.97172228560098 s
Sea state 152/200. (Hs, Te) = (4.139472783285571 m, 6.228329733752621 s). Tp = 6.89239397433383 s
Sea state 153/200. (Hs, Te) = (3.4483306755718233 m, 5.481984054027074 s). Tp = 6.0664729512650855 s
Sea state 154/200. (Hs, Te) = (2.8921184163756575 m, 4.935956882773301 s). Tp = 5.462228387176384 s
Sea state 155/200. (Hs, Te) = (2.1073029243263735 m, 4.3654137834767255 s). Tp = 4.830854007881896 s
Sea state 156/200. (Hs, Te) = (1.6474602839139243 m, 4.092507633924886 s). Tp = 4.528850616742159 s
Sea state 157/200. (Hs, Te) = (1.0572722867269018 m, 3.8421613140275808 s). Tp = 4.2518123832963495 s
Sea state 158/200. (Hs, Te) = (0.6406099130872815 m, 3.8713841750793403 s). Tp = 4.284150984500205 s
Sea state 159/200. (Hs, Te) = (0.4054145619344566 m, 4.049398568043829 s). Tp = 4.481145264164633 s
Sea state 160/200. (Hs, Te) = (0.04052982291891705 m, 4.528167327323754 s). Tp = 5.010960327371321 s
Sea state 161/200. (Hs, Te) = (0.9321525156202579 m, 23.34548077630066 s). Tp = 25.83457490352772 s
Sea state 162/200. (Hs, Te) = (3.967681748224935 m, 24.333091362041877 s). Tp = 26.92748448621447 s
Sea state 163/200. (Hs, Te) = (6.784028064639531 m, 23.110281041500112 s). Tp = 25.57429818341138 s
Sea state 164/200. (Hs, Te) = (9.44189240558023 m, 22.593599747148144 s). Tp = 25.002528352321065 s
Sea state 165/200. (Hs, Te) = (11.240531608432693 m, 19.575407738491727 s). Tp = 21.662536845270218 s
Sea state 166/200. (Hs, Te) = (12.004540248825101 m, 17.882918927035586 s). Tp = 19.789594951636836 s
Sea state 167/200. (Hs, Te) = (12.024225448635484 m, 16.671252728627874 s). Tp = 18.448740956776554 s
Sea state 168/200. (Hs, Te) = (11.357641718079881 m, 14.223019851712502 s). Tp = 15.739477598869739 s
Sea state 169/200. (Hs, Te) = (8.821707924875701 m, 11.009149800345794 s). Tp = 12.18294486485446 s
Sea state 170/200. (Hs, Te) = (7.106361798567036 m, 9.224265100430067 s). Tp = 10.207755837222862 s
Sea state 171/200. (Hs, Te) = (5.211295520287843 m, 7.015787616178823 s). Tp = 7.7638105813357745 s
Sea state 172/200. (Hs, Te) = (4.8195217091534825 m, 6.637986810245572 s). Tp = 7.3457286701932984 s
Sea state 173/200. (Hs, Te) = (3.3902610822019144 m, 5.164748265935532 s). Tp = 5.7154134974863275 s
Sea state 174/200. (Hs, Te) = (2.6095653202369826 m, 4.621018348933888 s). Tp = 5.113711120796454 s
Sea state 175/200. (Hs, Te) = (1.8697727603224317 m, 4.0452987797603335 s). Tp = 4.4766083566357855 s
Sea state 176/200. (Hs, Te) = (1.5454032632552606 m, 3.901895461469463 s). Tp = 4.317915382894982 s
Sea state 177/200. (Hs, Te) = (1.1162594384590447 m, 3.7807872241095453 s). Tp = 4.1838945906273315 s
Sea state 178/200. (Hs, Te) = (0.9028634682006197 m, 3.7222798357613645 s). Tp = 4.119149147122681 s
Sea state 179/200. (Hs, Te) = (0.32249025006568055 m, 3.8987959672401518 s). Tp = 4.314485420728108 s
Sea state 180/200. (Hs, Te) = (0.3250987448403442 m, 3.9671561761459007 s). Tp = 4.390134192082142 s
Sea state 181/200. (Hs, Te) = (1.0306696654071352 m, 23.938975875960722 s). Tp = 26.49134842444872 s
Sea state 182/200. (Hs, Te) = (2.603264286267793 m, 25.13838407710226 s). Tp = 27.81863747491678 s
Sea state 183/200. (Hs, Te) = (5.858754709021494 m, 25.063693808326345 s). Tp = 27.735983732989357 s
Sea state 184/200. (Hs, Te) = (10.155435864907151 m, 23.211633456341872 s). Tp = 25.686456788238267 s
Sea state 185/200. (Hs, Te) = (12.361550008947113 m, 20.139407734957956 s). Tp = 22.286670496400152 s
Sea state 186/200. (Hs, Te) = (12.732660337947125 m, 18.002340099850258 s). Tp = 19.92174880461209 s
Sea state 187/200. (Hs, Te) = (12.471212243449607 m, 16.22100142577672 s). Tp = 17.950483879941046 s
Sea state 188/200. (Hs, Te) = (11.30029974478813 m, 13.819219928253016 s). Tp = 15.292624545440956 s
Sea state 189/200. (Hs, Te) = (9.231594696755867 m, 11.08794558713383 s). Tp = 12.270141854942771 s
Sea state 190/200. (Hs, Te) = (7.4957968275525655 m, 9.146029757697058 s). Tp = 10.121179045709924 s
Sea state 191/200. (Hs, Te) = (6.174952841653434 m, 7.83613931929022 s). Tp = 8.67162814672867 s
Sea state 192/200. (Hs, Te) = (4.681623492075341 m, 6.339937017001209 s). Tp = 7.01590082118326 s
Sea state 193/200. (Hs, Te) = (3.900851734182826 m, 5.570871321782711 s). Tp = 6.164837375575169 s
Sea state 194/200. (Hs, Te) = (3.1189685428910567 m, 4.886108588584802 s). Tp = 5.407065270067518 s
Sea state 195/200. (Hs, Te) = (1.9193980219283122 m, 3.8820797075687836 s). Tp = 4.295986874190348 s
Sea state 196/200. (Hs, Te) = (1.5120152039662609 m, 3.6534201969813007 s). Tp = 4.042947696703235 s
Sea state 197/200. (Hs, Te) = (1.0490506632626337 m, 3.48025912982143 s). Tp = 3.8513242042259317 s
Sea state 198/200. (Hs, Te) = (0.5803416777834116 m, 3.454649334283839 s). Tp = 3.8229838934214713 s
Sea state 199/200. (Hs, Te) = (0.49309280206832695 m, 3.543799112476096 s). Tp = 3.9216388170193093 s
Sea state 200/200. (Hs, Te) = (0.08514972661072973 m, 3.7868424627988535 s). Tp = 4.190595438597723 s
###Markdown
3. Long-Term Extreme DistributionFinally we integrate the weighted short-term extreme distributions over the entire sea state space to obtain the extreme distribution, assuming a 3-hour sea state coherence. For this we use the `loads.extreme.full_seastate_long_term_extreme` function.The integral reduces to a sum over the 200 bins of the weighted short-term extreme distributions.
###Code
lte = extreme.full_seastate_long_term_extreme(ste_all, sample_weights)
###Output
_____no_output_____
###Markdown
Similar to the short-term extreme functions, the output of long-term extreme function is a probability distribution (`scipy.stats.rv_continuous`). This object provides common statistical functions (PDF, CDF, PPF, etc.) and metrics (expected value, median, etc). Here, we will look at the survival function and the 100-year return level. The value of the survival function at a given return level (e.g. 100-years) (`s_t` in the code below) is $1/N$ where $N$ is the number of short-term periods in the return period. In this case $N$ is the number of 3-hour periods in a 100-year period, which gives `s_t`$\approx 3e-6$. The corresponding response, i.e. the 100-year wave elevation in this case, is given as the inverse cumulative function (ppf) of $1-$`s_t`. This gives a 100-year wave of about 10.4 meters.
###Code
t_st_hr = t_st/(60.0*60.0)
t_return_yr = 100.0
s_t = 1.0/(365.25*24*t_return_yr/t_st_hr)
x_t = lte.ppf(1-s_t)
print(f"100-year elevation: {x_t} m")
###Output
100-year elevation: 11.139864671857818 m
###Markdown
Finally we plot the survival function and show the 100-year return level (dashed grey lines). The 100-year value is about 104 m (where the grey line intersects the x-axis)
###Code
x = np.linspace(0, 20, 1000)
fig, ax = plt.subplots()
# plot survival function
ax.semilogy(x, lte.sf(x))
# format plot
plt.grid(True, which="major", linestyle=":")
ax.tick_params(axis="both", which="major", direction="in")
ax.xaxis.set_ticks_position('both')
ax.yaxis.set_ticks_position('both')
plt.minorticks_off()
ax.set_xticks([0, 5, 10, 15, 20])
ax.set_yticks(1.0*10.0**(-1*np.arange(11)))
ax.set_xlabel("elevation [m]")
ax.set_ylabel("survival function (1-cdf)")
ax.set_xlim([0, x[-1]])
ylim = [1e-10, 1]
ax.set_ylim(ylim)
# 100-year return level
ax.plot([0, x[-1]], [s_t, s_t], '--', color="0.5", linewidth=1)
ax.plot([x_t, x_t], ylim, '--', color="0.5", linewidth=1)
###Output
_____no_output_____ |
3_10_Scene_Understanding/CarND-Object-Detection-Lab.ipynb | ###Markdown
CarND Object Detection LabLet's get started!
###Code
HTML("""
<video width="960" height="600" controls>
<source src="{0}" type="video/mp4">
</video>
""".format('result.mp4'))
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from PIL import ImageDraw
from PIL import ImageColor
import time
from scipy.stats import norm
%matplotlib inline
plt.style.use('ggplot')
###Output
_____no_output_____
###Markdown
MobileNets[*MobileNets*](https://arxiv.org/abs/1704.04861), as the name suggests, are neural networks constructed for the purpose of running very efficiently (high FPS, low memory footprint) on mobile and embedded devices. *MobileNets* achieve this with 3 techniques:1. Perform a depthwise convolution followed by a 1x1 convolution rather than a standard convolution. The 1x1 convolution is called a pointwise convolution if it's following a depthwise convolution. The combination of a depthwise convolution followed by a pointwise convolution is sometimes called a separable depthwise convolution.2. Use a "width multiplier" - reduces the size of the input/output channels, set to a value between 0 and 1.3. Use a "resolution multiplier" - reduces the size of the original input, set to a value between 0 and 1.These 3 techniques reduce the size of cummulative parameters and therefore the computation required. Of course, generally models with more paramters achieve a higher accuracy. *MobileNets* are no silver bullet, while they perform very well larger models will outperform them. ** *MobileNets* are designed for mobile devices, NOT cloud GPUs**. The reason we're using them in this lab is automotive hardware is closer to mobile or embedded devices than beefy cloud GPUs. Convolutions Vanilla ConvolutionBefore we get into the *MobileNet* convolution block let's take a step back and recall the computational cost of a vanilla convolution. There are $N$ kernels of size $D_k * D_k$. Each of these kernels goes over the entire input which is a $D_f * D_f * M$ sized feature map or tensor (if that makes more sense). The computational cost is:$$D_g * D_g * M * N * D_k * D_k$$Let $D_g * D_g$ be the size of the output feature map. Then a standard convolution takes in a $D_f * D_f * M$ input feature map and returns a $D_g * D_g * N$ feature map as output.(*Note*: In the MobileNets paper, you may notice the above equation for computational cost uses $D_f$ instead of $D_g$. In the paper, they assume the output and input are the same spatial dimensions due to stride of 1 and padding, so doing so does not make a difference, but this would want $D_g$ for different dimensions of input and output.) Depthwise ConvolutionA depthwise convolution acts on each input channel separately with a different kernel. $M$ input channels implies there are $M$ $D_k * D_k$ kernels. Also notice this results in $N$ being set to 1. If this doesn't make sense, think about the shape a kernel would have to be to act upon an individual channel.Computation cost:$$D_g * D_g * M * D_k * D_k$$ Pointwise ConvolutionA pointwise convolution performs a 1x1 convolution, it's the same as a vanilla convolution except the kernel size is $1 * 1$.Computation cost:$$D_k * D_k * D_g * D_g * M * N =1 * 1 * D_g * D_g * M * N =D_g * D_g * M * N$$Thus the total computation cost is for separable depthwise convolution:$$D_g * D_g * M * D_k * D_k + D_g * D_g * M * N$$which results in $\frac{1}{N} + \frac{1}{D_k^2}$ reduction in computation:$$\frac {D_g * D_g * M * D_k * D_k + D_g * D_g * M * N} {D_g * D_g * M * N * D_k * D_k} = \frac {D_k^2 + N} {D_k^2*N} = \frac {1}{N} + \frac{1}{D_k^2}$$*MobileNets* use a 3x3 kernel, so assuming a large enough $N$, separable depthwise convnets are ~9x more computationally efficient than vanilla convolutions! Width MultiplierThe 2nd technique for reducing the computational cost is the "width multiplier" which is a hyperparameter inhabiting the range [0, 1] denoted here as $\alpha$. $\alpha$ reduces the number of input and output channels proportionally:$$D_f * D_f * \alpha M * D_k * D_k + D_f * D_f * \alpha M * \alpha N$$ Resolution MultiplierThe 3rd technique for reducing the computational cost is the "resolution multiplier" which is a hyperparameter inhabiting the range [0, 1] denoted here as $\rho$. $\rho$ reduces the size of the input feature map:$$\rho D_f * \rho D_f * M * D_k * D_k + \rho D_f * \rho D_f * M * N$$ Combining the width and resolution multipliers results in a computational cost of:$$\rho D_f * \rho D_f * a M * D_k * D_k + \rho D_f * \rho D_f * a M * a N$$Training *MobileNets* with different values of $\alpha$ and $\rho$ will result in different speed vs. accuracy tradeoffs. The folks at Google have run these experiments, the result are shown in the graphic below: MACs (M) represents the number of multiplication-add operations in the millions. Exercise 1 - Implement Separable Depthwise ConvolutionIn this exercise you'll implement a separable depthwise convolution block and compare the number of parameters to a standard convolution block. For this exercise we'll assume the width and resolution multipliers are set to 1.Docs:* [depthwise convolution](https://www.tensorflow.org/api_docs/python/tf/nn/depthwise_conv2d)
###Code
def vanilla_conv_block(x, kernel_size, output_channels):
"""
Vanilla Conv -> Batch Norm -> ReLU
"""
x = tf.layers.conv2d(
x, output_channels, kernel_size, (2, 2), padding='SAME')
x = tf.layers.batch_normalization(x)
return tf.nn.relu(x)
# TODO: implement MobileNet conv block
def mobilenet_conv_block(x, kernel_size, output_channels):
"""
Depthwise Conv -> Batch Norm -> ReLU -> Pointwise Conv -> Batch Norm -> ReLU
"""
input_shape = x.get_shape()
# print(input_shape)
channels = int(input_shape[3])
# print(channels)
# print(channels.__class__)
W = tf.Variable(tf.truncated_normal((kernel_size, kernel_size, channels, 1)))
# depthwise conv
x = tf.nn.depthwise_conv2d(x, W, (1, 2, 2, 1), padding='SAME')
x = tf.layers.batch_normalization(x)
x = tf.nn.relu(x)
# pointwise conv
x = tf.layers.conv2d(x, output_channels, (1, 1), padding='SAME')
x = tf.layers.batch_normalization(x)
return tf.nn.relu(x)
return None
###Output
_____no_output_____
###Markdown
**[Sample solution](./exercise-solutions/e1.py)**Let's compare the number of parameters in each block.
###Code
# constants but you can change them so I guess they're not so constant :)
INPUT_CHANNELS = 32
OUTPUT_CHANNELS = 512
KERNEL_SIZE = 3
IMG_HEIGHT = 256
IMG_WIDTH = 256
with tf.Session(graph=tf.Graph()) as sess:
# input
x = tf.constant(np.random.randn(1, IMG_HEIGHT, IMG_WIDTH, INPUT_CHANNELS), dtype=tf.float32)
with tf.variable_scope('vanilla'):
vanilla_conv = vanilla_conv_block(x, KERNEL_SIZE, OUTPUT_CHANNELS)
with tf.variable_scope('mobile'):
mobilenet_conv = mobilenet_conv_block(x, KERNEL_SIZE, OUTPUT_CHANNELS)
vanilla_params = [
(v.name, np.prod(v.get_shape().as_list()))
for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'vanilla')
]
mobile_params = [
(v.name, np.prod(v.get_shape().as_list()))
for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'mobile')
]
print("VANILLA CONV BLOCK")
total_vanilla_params = sum([p[1] for p in vanilla_params])
for p in vanilla_params:
print("Variable {0}: number of params = {1}".format(p[0], p[1]))
print("Total number of params =", total_vanilla_params)
print()
print("MOBILENET CONV BLOCK")
total_mobile_params = sum([p[1] for p in mobile_params])
for p in mobile_params:
print("Variable {0}: number of params = {1}".format(p[0], p[1]))
print("Total number of params =", total_mobile_params)
print()
print("{0:.3f}x parameter reduction".format(total_vanilla_params /
total_mobile_params))
###Output
VANILLA CONV BLOCK
Variable vanilla/conv2d/kernel:0: number of params = 147456
Variable vanilla/conv2d/bias:0: number of params = 512
Variable vanilla/batch_normalization/gamma:0: number of params = 512
Variable vanilla/batch_normalization/beta:0: number of params = 512
Total number of params = 148992
MOBILENET CONV BLOCK
Variable mobile/Variable:0: number of params = 288
Variable mobile/batch_normalization/gamma:0: number of params = 32
Variable mobile/batch_normalization/beta:0: number of params = 32
Variable mobile/conv2d/kernel:0: number of params = 16384
Variable mobile/conv2d/bias:0: number of params = 512
Variable mobile/batch_normalization_1/gamma:0: number of params = 512
Variable mobile/batch_normalization_1/beta:0: number of params = 512
Total number of params = 18272
8.154x parameter reduction
###Markdown
Your solution should show the majority of the parameters in *MobileNet* block stem from the pointwise convolution. *MobileNet* SSDIn this section you'll use a pretrained *MobileNet* [SSD](https://arxiv.org/abs/1512.02325) model to perform object detection. You can download the *MobileNet* SSD and other models from the [TensorFlow detection model zoo](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md) (*note*: we'll provide links to specific models further below). [Paper](https://arxiv.org/abs/1611.10012) describing comparing several object detection models.Alright, let's get into SSD! Single Shot Detection (SSD)Many previous works in object detection involve more than one training phase. For example, the [Faster-RCNN](https://arxiv.org/abs/1506.01497) architecture first trains a Region Proposal Network (RPN) which decides which regions of the image are worth drawing a box around. RPN is then merged with a pretrained model for classification (classifies the regions). The image below is an RPN: The SSD architecture is a single convolutional network which learns to predict bounding box locations and classify the locations in one pass. Put differently, SSD can be trained end to end while Faster-RCNN cannot. The SSD architecture consists of a base network followed by several convolutional layers: **NOTE:** In this lab the base network is a MobileNet (instead of VGG16.) Detecting BoxesSSD operates on feature maps to predict bounding box locations. Recall a feature map is of size $D_f * D_f * M$. For each feature map location $k$ bounding boxes are predicted. Each bounding box carries with it the following information:* 4 corner bounding box **offset** locations $(cx, cy, w, h)$* $C$ class probabilities $(c_1, c_2, ..., c_p)$SSD **does not** predict the shape of the box, rather just where the box is. The $k$ bounding boxes each have a predetermined shape. This is illustrated in the figure below:The shapes are set prior to actual training. For example, In figure (c) in the above picture there are 4 boxes, meaning $k$ = 4. Exercise 2 - SSD Feature MapsIt would be a good exercise to read the SSD paper prior to a answering the following questions.***Q: Why does SSD use several differently sized feature maps to predict detections?*** A: Your answer hereTo be able to detect objects at different distances (and so different sizes before being classified). The current approach leaves us with thousands of bounding box candidates, clearly the vast majority of them are nonsensical. Exercise 3 - Filtering Bounding Boxes***Q: What are some ways which we can filter nonsensical bounding boxes?*** A: Your answer hereBy calculating the ratio of the intersection over union and so to calculate the likeliness of an error in the detection in case of heavily varying results. LossWith the final set of matched boxes we can compute the loss:$$L = \frac {1} {N} * ( L_{class} + L_{box})$$where $N$ is the total number of matched boxes, $L_{class}$ is a softmax loss for classification, and $L_{box}$ is a L1 smooth loss representing the error of the matched boxes with the ground truth boxes. L1 smooth loss is a modification of L1 loss which is more robust to outliers. In the event $N$ is 0 the loss is set 0. SSD Summary* Starts from a base model pretrained on ImageNet. * The base model is extended by several convolutional layers.* Each feature map is used to predict bounding boxes. Diversity in feature map size allows object detection at different resolutions.* Boxes are filtered by IoU metrics and hard negative mining.* Loss is a combination of classification (softmax) and dectection (smooth L1)* Model can be trained end to end. Object Detection InferenceIn this part of the lab you'll detect objects using pretrained object detection models. You can download the latest pretrained models from the [model zoo](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md), although do note that you may need a newer version of TensorFlow (such as v1.8) in order to use the newest models.We are providing the download links for the below noted files to ensure compatibility between the included environment file and the models.[SSD_Mobilenet 11.6.17 version](http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_coco_11_06_2017.tar.gz)[RFCN_ResNet101 11.6.17 version](http://download.tensorflow.org/models/object_detection/rfcn_resnet101_coco_11_06_2017.tar.gz)[Faster_RCNN_Inception_ResNet 11.6.17 version](http://download.tensorflow.org/models/object_detection/faster_rcnn_inception_resnet_v2_atrous_coco_11_06_2017.tar.gz)Make sure to extract these files prior to continuing!
###Code
# Frozen inference graph files. NOTE: change the path to where you saved the models.
SSD_GRAPH_FILE = 'pretrained/ssd_mobilenet_v1_coco_11_06_2017/frozen_inference_graph.pb'
RFCN_GRAPH_FILE = 'pretrained/rfcn_resnet101_coco_11_06_2017/frozen_inference_graph.pb'
FASTER_RCNN_GRAPH_FILE = 'pretrained/faster_rcnn_inception_resnet_v2_atrous_coco_11_06_2017/frozen_inference_graph.pb'
###Output
_____no_output_____
###Markdown
Below are utility functions. The main purpose of these is to draw the bounding boxes back onto the original image.
###Code
# Colors (one for each class)
cmap = ImageColor.colormap
print("Number of colors =", len(cmap))
COLOR_LIST = sorted([c for c in cmap.keys()])
#
# Utility funcs
#
def filter_boxes(min_score, boxes, scores, classes):
"""Return boxes with a confidence >= `min_score`"""
n = len(classes)
idxs = []
for i in range(n):
if scores[i] >= min_score:
idxs.append(i)
filtered_boxes = boxes[idxs, ...]
filtered_scores = scores[idxs, ...]
filtered_classes = classes[idxs, ...]
return filtered_boxes, filtered_scores, filtered_classes
def to_image_coords(boxes, height, width):
"""
The original box coordinate output is normalized, i.e [0, 1].
This converts it back to the original coordinate based on the image
size.
"""
box_coords = np.zeros_like(boxes)
box_coords[:, 0] = boxes[:, 0] * height
box_coords[:, 1] = boxes[:, 1] * width
box_coords[:, 2] = boxes[:, 2] * height
box_coords[:, 3] = boxes[:, 3] * width
return box_coords
def draw_boxes(image, boxes, classes, thickness=4):
"""Draw bounding boxes on the image"""
draw = ImageDraw.Draw(image)
for i in range(len(boxes)):
bot, left, top, right = boxes[i, ...]
class_id = int(classes[i])
color = COLOR_LIST[class_id]
draw.line([(left, top), (left, bot), (right, bot), (right, top), (left, top)], width=thickness, fill=color)
def load_graph(graph_file):
"""Loads a frozen inference graph"""
graph = tf.Graph()
with graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(graph_file, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return graph
###Output
Number of colors = 148
###Markdown
Below we load the graph and extract the relevant tensors using [`get_tensor_by_name`](https://www.tensorflow.org/api_docs/python/tf/Graphget_tensor_by_name). These tensors reflect the input and outputs of the graph, or least the ones we care about for detecting objects.
###Code
# detection_graph = load_graph(SSD_GRAPH_FILE)
# detection_graph = load_graph(RFCN_GRAPH_FILE)
detection_graph = load_graph(FASTER_RCNN_GRAPH_FILE)
# The input placeholder for the image.
# `get_tensor_by_name` returns the Tensor with the associated name in the Graph.
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
# The classification of the object (integer id).
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
###Output
_____no_output_____
###Markdown
Run detection and classification on a sample image.
###Code
# Load a sample image.
image = Image.open('./assets/sample1.jpg')
image_np = np.expand_dims(np.asarray(image, dtype=np.uint8), 0)
with tf.Session(graph=detection_graph) as sess:
# Actual detection.
(boxes, scores, classes) = sess.run([detection_boxes, detection_scores, detection_classes],
feed_dict={image_tensor: image_np})
# Remove unnecessary dimensions
boxes = np.squeeze(boxes)
scores = np.squeeze(scores)
classes = np.squeeze(classes)
confidence_cutoff = 0.8
# Filter boxes with a confidence score less than `confidence_cutoff`
boxes, scores, classes = filter_boxes(confidence_cutoff, boxes, scores, classes)
# The current box coordinates are normalized to a range between 0 and 1.
# This converts the coordinates actual location on the image.
width, height = image.size
box_coords = to_image_coords(boxes, height, width)
# Each class with be represented by a differently colored box
draw_boxes(image, box_coords, classes)
plt.figure(figsize=(12, 8))
plt.imshow(image)
###Output
_____no_output_____
###Markdown
Timing DetectionThe model zoo comes with a variety of models, each its benefits and costs. Below you'll time some of these models. The general tradeoff being sacrificing model accuracy for seconds per frame (SPF).
###Code
def time_detection(sess, img_height, img_width, runs=10):
image_tensor = sess.graph.get_tensor_by_name('image_tensor:0')
detection_boxes = sess.graph.get_tensor_by_name('detection_boxes:0')
detection_scores = sess.graph.get_tensor_by_name('detection_scores:0')
detection_classes = sess.graph.get_tensor_by_name('detection_classes:0')
# warmup
gen_image = np.uint8(np.random.randn(1, img_height, img_width, 3))
sess.run([detection_boxes, detection_scores, detection_classes], feed_dict={image_tensor: gen_image})
times = np.zeros(runs)
for i in range(runs):
t0 = time.time()
sess.run([detection_boxes, detection_scores, detection_classes], feed_dict={image_tensor: image_np})
t1 = time.time()
times[i] = (t1 - t0) * 1000
return times
with tf.Session(graph=detection_graph) as sess:
times = time_detection(sess, 600, 1000, runs=10)
# Create a figure instance
fig = plt.figure(1, figsize=(9, 6))
# Create an axes instance
ax = fig.add_subplot(111)
plt.title("Object Detection Timings")
plt.ylabel("Time (ms)")
# Create the boxplot
plt.style.use('fivethirtyeight')
bp = ax.boxplot(times)
###Output
_____no_output_____
###Markdown
Exercise 4 - Model TradeoffsDownload a few models from the [model zoo](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md) and compare the timings. Detection on a VideoFinally run your pipeline on [this short video](https://s3-us-west-1.amazonaws.com/udacity-selfdrivingcar/advanced_deep_learning/driving.mp4).
###Code
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from IPython.display import HTML
HTML("""
<video width="960" height="600" controls>
<source src="{0}" type="video/mp4">
</video>
""".format('driving.mp4'))
###Output
_____no_output_____
###Markdown
Exercise 5 - Object Detection on a VideoRun an object detection pipeline on the above clip.
###Code
clip = VideoFileClip('driving.mp4')
# TODO: Complete this function.
# The input is an NumPy array.
# The output should also be a NumPy array.
def pipeline(img):
draw_img = Image.fromarray(img)
boxes, scores, classes = sess.run([detection_boxes, detection_scores, detection_classes], feed_dict={image_tensor: np.expand_dims(img, 0)})
# Remove unnecessary dimensions
boxes = np.squeeze(boxes)
scores = np.squeeze(scores)
classes = np.squeeze(classes)
confidence_cutoff = 0.8
# Filter boxes with a confidence score less than `confidence_cutoff`
boxes, scores, classes = filter_boxes(confidence_cutoff, boxes, scores, classes)
# The current box coordinates are normalized to a range between 0 and 1.
# This converts the coordinates actual location on the image.
width, height = draw_img.size
box_coords = to_image_coords(boxes, height, width)
# Each class with be represented by a differently colored box
draw_boxes(draw_img, box_coords, classes)
return np.array(draw_img)
###Output
_____no_output_____
###Markdown
**[Sample solution](./exercise-solutions/e5.py)**
###Code
with tf.Session(graph=detection_graph) as sess:
image_tensor = sess.graph.get_tensor_by_name('image_tensor:0')
detection_boxes = sess.graph.get_tensor_by_name('detection_boxes:0')
detection_scores = sess.graph.get_tensor_by_name('detection_scores:0')
detection_classes = sess.graph.get_tensor_by_name('detection_classes:0')
new_clip = clip.fl_image(pipeline)
# write to file
new_clip.write_videofile('result.mp4')
HTML("""
<video width="960" height="600" controls>
<source src="{0}" type="video/mp4">
</video>
""".format('result.mp4'))
###Output
_____no_output_____ |
ICO Data Object Example.ipynb | ###Markdown
ICO Data object example
###Code
import ICO
import os
import pandas as pd
import time
###Output
_____no_output_____
###Markdown
The `Data` object is initialized with the path to the directory of .pickle files. On creation it reads in the pickle files, but does not transform the data.
###Code
data = ICO.Data(os.getcwd()+'/data/')
###Output
_____no_output_____
###Markdown
The `data` object can be accessed like a dictionary to the underlying `Dataframes`. These will be transformed on their first access into a normalized form. (This might take awhile for the first access)
###Code
start = time.time()
data["all_encounter_data"]
print(time.time() - start)
data["all_encounter_data"].describe(include='all')
data["all_encounter_data"].columns.values
data['all_encounter_data'].shape[0]
data['all_encounter_data'].to_pickle('all_encounter_data_Dan_20170415.pickle')
start = time.time()
data["all_person_data"]
print(time.time() - start)
data["all_person_data"].describe(include='all')
data["all_person_data"].columns.values
data['all_person_data'].shape[0]
data['all_person_data'].to_pickle('all_person_data_Dan_20170415.pickle')
###Output
_____no_output_____ |
02_Python_Datatypes_examples/002_transpose a matrix.ipynb | ###Markdown
All the IPython Notebooks in this **Python Examples** series by Dr. Milaan Parmar are available @ **[GitHub](https://github.com/milaan9/90_Python_Examples)** Python Program to Transpose a MatrixIn this example, you will learn to transpose a matrix (which is created by using a nested list).To understand this example, you should have the knowledge of the following **[Python programming](https://github.com/milaan9/01_Python_Introduction/blob/main/000_Intro_to_Python.ipynb)** topics:* **[Python for Loop](https://github.com/milaan9/03_Python_Flow_Control/blob/main/005_Python_for_Loop.ipynb)*** **[Python List](https://github.com/milaan9/02_Python_Datatypes/blob/main/003_Python_List.ipynb)** In Python, we can implement a matrix as a nested list (list inside a list). We can treat each element as a row of the matrix.For example **`X = [[1, 2], [4, 5], [3, 6]]`** would represent a 3x2 matrix. The first row can be selected as **`X[0]`**. And, the element in the first-row first column can be selected as **`X[0][0]`**.Transpose of a matrix is the interchanging of rows and columns. It is denoted as **`X'`**. The element at **`ith`** row and **`jth`** column in **`X`** will be placed at **`jth`** row and **`ith`** column in **`X'`**. So if **`X`** is a 3x2 matrix, **`X'`** will be a 2x3 matrix.Here are a couple of ways to accomplish this in Python.
###Code
# Example 1: transpose a matrix using a nested loop
X = [[12,9],
[7 ,3],
[5 ,6]]
result = [[0,0,0],
[0,0,0]]
# iterate through rows
for i in range(len(X)):
# iterate through columns
for j in range(len(X[0])):
result[j][i] = X[i][j]
for r in result:
print(r)
'''
>>Output/Runtime Test Cases:
[12, 7, 5]
[9, 3, 6]
'''
###Output
[12, 7, 5]
[9, 3, 6]
###Markdown
**Explanation:** In this program we have used nested **`for`** loops to iterate through each row and each column. At each point we place the **`X[i][j]`** element into **`result[j][i]`**.
###Code
# Example 2: transpose a matrix using list comprehension
X = [[12,9],
[7 ,3],
[5 ,6]]
result = [[X[j][i] for j in range(len(X))] for i in range(len(X[0]))]
for r in result:
print(r)
'''
>>Output/Runtime Test Cases:
[12, 7, 5]
[9, 3, 6]
'''
###Output
[12, 7, 5]
[9, 3, 6]
|
notebooks/4-WMT.ipynb | ###Markdown
Example 4Let's use [IPython Notebook](http://ipython.org/notebook.html) to download model output from WMT and examine the results. Set up with `pylab` magic, plus other global imports:
###Code
%pylab inline
import os
###Output
_____no_output_____
###Markdown
Switch to the directory **examples/4-WMT** to use as our working directory:
###Code
os.chdir(os.path.join('..', 'examples', '4-WMT'))
os.getcwd()
###Output
_____no_output_____
###Markdown
The model output is stored as a **tar.gz** file on the CSDMS website. Here's the name and location of the results of the experiment **Multidim Parameter Study MP-2**:
###Code
run_id = '0b6296d2-ccdd-4717-b347-96be60bfe8e7'
download_file = run_id + '.tar.gz'
###Output
_____no_output_____
###Markdown
You can download and unpack the model output with shell commands.Or, here's a pure Python solution:
###Code
def wmt_download_and_unpack(download_file):
import requests
import tarfile
download_url = 'http://csdms.colorado.edu/pub/users/wmt/' + download_file
r = requests.get(download_url)
with open(download_file, 'w') as fp:
fp.write(r.content)
tar = tarfile.open(download_file)
tar.extractall()
tar.close()
###Output
_____no_output_____
###Markdown
Here's the call (it takes a few seconds):
###Code
wmt_download_and_unpack(download_file)
###Output
_____no_output_____
###Markdown
Change to the directory containing the unpacked output and get a listing:
###Code
os.chdir(run_id)
%ls
###Output
_____no_output_____
###Markdown
This is the standard WMT packaging for a model run.Change to the parameter study directory:
###Code
os.chdir('multidim_parameter_study')
###Output
_____no_output_____
###Markdown
Read the Dakota tabular data file:
###Code
data = numpy.loadtxt('dakota.dat', skiprows=1, unpack=True, usecols=[0,2,3,4])
data.shape
###Output
_____no_output_____
###Markdown
Reshape the variables from the data file to set up a surface plot:
###Code
m = len(set(data[1,]))
n = len(set(data[2,]))
T2 = data[1,].reshape(n,m)
P2 = data[2,].reshape(n,m)
Qs2 = data[3,].reshape(n,m)
###Output
_____no_output_____
###Markdown
Make a surface plot with `Axes3D.plot_surface`:
###Code
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = Axes3D(fig)
ax.plot_surface(T2, P2, Qs2, rstride=2, cstride=2)
ax.view_init(elev=30, azim=-75)
ax.set_xlabel('$T \,(^\circ C)$')
ax.set_ylabel('$P \,(m)$')
ax.set_zlabel('$Q_s \,(kg \, s^{-1})$')
###Output
_____no_output_____ |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.