path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
places/marin.ipynb | ###Markdown
Configuration_Initial steps to get the notebook ready to play nice with our repository. Do not delete this section._ Code formatting with [black](https://pypi.org/project/nb-black/).
###Code
%load_ext lab_black
import os
import pathlib
this_dir = pathlib.Path(os.path.abspath(""))
data_dir = this_dir / "data"
import pytz
import glob
import requests
import pandas as pd
import json
from datetime import datetime, date
from bs4 import BeautifulSoup
import regex as re
###Output
_____no_output_____
###Markdown
Download Retrieve the page
###Code
url = "https://utility.arcgis.com/usrsvcs/servers/9ccc4670c77442f7b12b198a904f4a51/rest/services/HHS/Covid/MapServer/0/query?f=json&returnGeometry=false&outFields=*&where=1=1"
r = requests.get(url)
data = r.json()
###Output
_____no_output_____
###Markdown
Parse
###Code
dict_list = []
for item in data["features"]:
d = dict(
county="Marin",
area=item["attributes"]["Name"],
confirmed_cases=item["attributes"]["CumulativePositives"],
)
dict_list.append(d)
df = pd.DataFrame(dict_list)
###Output
_____no_output_____
###Markdown
Get timestamp
###Code
headers = {"User-Agent": "Mozilla/5.0"}
url = "https://coronavirus.marinhhs.org/surveillance"
page = requests.get(url, headers=headers)
soup = BeautifulSoup(page.content, "html.parser")
last_updated_sentence = soup.find("div", {"class": "last-updated"}).text
last_updated_sentence
date = re.search("[0-9]{2}.[0-9]{2}.2[0-9]{1}", last_updated_sentence).group()
df["county_date"] = pd.to_datetime(date).date()
###Output
_____no_output_____
###Markdown
Vet Ensure we're getting all 54 areas of Marin County
###Code
try:
assert not len(df) > 54
except AssertionError:
raise AssertionError("Marin County's scraper has more rows than before")
try:
assert not len(df) < 54
except AssertionError:
raise AssertionError("Marin's scraper is missing rows")
###Output
_____no_output_____
###Markdown
Export Set date
###Code
tz = pytz.timezone("America/Los_Angeles")
today = datetime.now(tz).date()
slug = "marin"
df.to_csv(data_dir / slug / f"{today}.csv", index=False)
###Output
_____no_output_____
###Markdown
Combine
###Code
csv_list = [
i
for i in glob.glob(str(data_dir / slug / "*.csv"))
if not str(i).endswith("timeseries.csv")
]
df_list = []
for csv in csv_list:
if "manual" in csv:
df = pd.read_csv(csv, parse_dates=["date"])
else:
file_date = csv.split("/")[-1].replace(".csv", "")
df = pd.read_csv(csv, parse_dates=["county_date"])
df["date"] = file_date
df_list.append(df)
df = pd.concat(df_list).sort_values(["date", "area"])
df.to_csv(data_dir / slug / "timeseries.csv", index=False)
###Output
_____no_output_____ |
notebooks/2.4-JS-ctakes-time-bow-tfidf.ipynb | ###Markdown
IntroductionImplementation of cTAKES BoW method with extracted relative time information (added to the BoW cTAKES orig. pairs (Polarity-CUI)), evaluated against the annotations from: > Gehrmann, Sebastian, et al. "Comparing deep learning and concept extraction based methods for patient phenotyping from clinical narratives." PloS one 13.2 (2018): e0192360. Import Packages
###Code
# imported packages
import multiprocessing
import collections
import itertools
import re
import os
# xml and xmi
from lxml import etree
# arrays and dataframes
import pandas
import numpy
# classifier
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.preprocessing import FunctionTransformer
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.multiclass import OneVsRestClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
# plotting
import matplotlib
matplotlib.use('Agg') # server
try:
get_ipython
# jupyter notebook
%matplotlib inline
except:
pass
import matplotlib.pyplot as plt
# import custom modules
import context # set search path to one level up
from src import evaluation # method for evaluation of classifiers
###Output
_____no_output_____
###Markdown
Define variables and parameters
###Code
# variables and parameters
# filenames
input_directory = '../data/interim/cTAKES_output'
input_filename = '../data/raw/annotations.csv'
results_filename = '../reports/ctakes_time_bow_tfidf_results.csv'
plot_filename_1 = '../reports/ctakes_time_bow_tfidf_boxplot_1.png'
plot_filename_2 = '../reports/ctakes_time_bow_tfidf_boxplot_2.png'
# number of splits and repeats for cross validation
n_splits = 5
n_repeats = 10
# n_repeats = 1 # for testing
# number of workers
n_workers=multiprocessing.cpu_count()
# n_workers = 1 # for testing
# keep the conditions for which results are reported in the publication
conditions = [
# 'cohort',
'Obesity',
# 'Non.Adherence',
# 'Developmental.Delay.Retardation',
'Advanced.Heart.Disease',
'Advanced.Lung.Disease',
'Schizophrenia.and.other.Psychiatric.Disorders',
'Alcohol.Abuse',
'Other.Substance.Abuse',
'Chronic.Pain.Fibromyalgia',
'Chronic.Neurological.Dystrophies',
'Advanced.Cancer',
'Depression',
# 'Dementia',
# 'Unsure',
]
###Output
_____no_output_____
###Markdown
Load and prepare data Load and parse xmi data
###Code
%load_ext ipycache
%%cache --read 2.4-JS-ctakes-time-bow-tfidf_cache.pkl X
def ctakes_xmi_to_df(xmi_path):
records = []
tree = etree.parse(xmi_path)
root = tree.getroot()
mentions = []
for mention in root.iterfind('*[@{http://www.omg.org/XMI}id][@typeID][@polarity][@ontologyConceptArr][@event]'):
for concept in mention.attrib['ontologyConceptArr'].split(" "):
d = dict(mention.attrib)
d['ontologyConceptArr'] = concept
mentions.append(d)
mentions_df = pandas.DataFrame(mentions)
concepts = []
for concept in root.iterfind('*[@{http://www.omg.org/XMI}id][@cui][@tui]'):
concepts.append(dict(concept.attrib))
concepts_df = pandas.DataFrame(concepts)
events = []
for event in root.iterfind('*[@{http://www.omg.org/XMI}id][@properties]'):
events.append(dict(event.attrib))
events_df = pandas.DataFrame(events)
eventproperties = []
for eventpropertie in root.iterfind('*[@{http://www.omg.org/XMI}id][@docTimeRel]'):
eventproperties.append(dict(eventpropertie.attrib))
eventproperties_df = pandas.DataFrame(eventproperties)
merged_df = mentions_df\
.merge(right=concepts_df, left_on='ontologyConceptArr', right_on='{http://www.omg.org/XMI}id')\
.merge(right=events_df, left_on='event', right_on='{http://www.omg.org/XMI}id')\
.merge(right=eventproperties_df, left_on='properties', right_on='{http://www.omg.org/XMI}id')
# # unique cui and tui per event IDEA: consider keeping all
# merged_df = merged_df.drop_duplicates(subset=['event', 'cui', 'tui'])
# merge the doctimerel with polarity of the *mention and the cui
merged_df['doctimerelpolaritycui'] = merged_df['docTimeRel'] + merged_df['polarity_x'] + merged_df['cui']
# merge the polarity of the *mention and the cui
merged_df['polaritycui'] = merged_df['polarity_x'] + merged_df['cui']
# return as a string of cui's separated by space
return ' '.join(list(merged_df['doctimerelpolaritycui']) + list(merged_df['polaritycui']))
X = []
# key function for sorting the files according to the integer of the filename
def key_fn(x):
i = x.split(".")[0]
if i != "":
return int(i)
return None
for f in sorted(os.listdir(input_directory), key=key_fn): # for each file in the input directory
if f.endswith(".xmi"):
fpath = os.path.join(input_directory, f)
# parse file and append as a dataframe to x_df
try:
X.append(ctakes_xmi_to_df(fpath))
except Exception as e:
print e
X.append('NaN')
X = numpy.array(X)
###Output
_____no_output_____
###Markdown
Load annotations and classification data
###Code
# read and parse csv file
data = pandas.read_csv(input_filename)
# data = data[0:100] # for testing
# X = X[0:100] # for testing
data.head()
# groups: the subject ids
# used in order to ensure that
# "patients’ notes stay within the set, so that all discharge notes in the
# test set are from patients not previously seen by the model." Gehrmann17.
groups_df = data.filter(items=['subject.id'])
groups = groups_df.as_matrix()
# y: the annotated classes
y_df = data.filter(items=conditions) # filter the conditions
y = y_df.as_matrix()
print(X.shape, groups.shape, y.shape)
###Output
_____no_output_____
###Markdown
Define classifiers
###Code
# dictionary of classifiers (sklearn estimators)
classifiers = collections.OrderedDict()
def tokenizer(text):
pattern = r'[\s]+' # match any sequence of whitespace characters
repl = r' ' # replace with space
temp_text = re.sub(pattern, repl, text)
return temp_text.lower().split(' ') # lower-case and split on space
prediction_models = [
('logistic_regression', LogisticRegression(random_state=0)),
("random_forest", RandomForestClassifier(random_state=0)),
("naive_bayes", MultinomialNB()),
("svm_linear", SVC(kernel="linear", random_state=0, probability=True)),
("gradient_boosting", GradientBoostingClassifier(random_state=0)),
]
# BoW
representation_models = [('ctakes_time_bow_tfidf', TfidfVectorizer(tokenizer=tokenizer))] # IDEA: Use Tfidf on normal BoW model aswell?
# cross product of representation models and prediction models
# save to classifiers as pipelines of rep. model into pred. model
for rep_model, pred_model in itertools.product(representation_models, prediction_models):
classifiers.update({ # add this classifier to classifiers dictionary
'{rep_model}_{pred_model}'.format(rep_model=rep_model[0], pred_model=pred_model[0]): # classifier name
Pipeline([rep_model, pred_model]), # concatenate representation model with prediction model in a pipeline
})
###Output
_____no_output_____
###Markdown
Run and evaluate
###Code
results = evaluation.run_evaluation(X=X,
y=y,
groups=groups,
conditions=conditions,
classifiers=classifiers,
n_splits=n_splits,
n_repeats=n_repeats,
n_workers=n_workers)
###Output
_____no_output_____
###Markdown
Save and plot results
###Code
# save results
results_df = pandas.DataFrame(results)
results_df.to_csv(results_filename)
results_df.head()
## load results for plotting
# import pandas
# results = pandas.read_csv('output/results.csv')
# plot and save
axs = results_df.groupby('name').boxplot(column='AUROC', by='condition', rot=90, figsize=(10,10))
for ax in axs:
ax.set_ylim(0,1)
plt.savefig(plot_filename_1)
# plot and save
axs = results_df.groupby('condition').boxplot(column='AUROC', by='name', rot=90, figsize=(10,10))
for ax in axs:
ax.set_ylim(0,1)
plt.savefig(plot_filename_2)
###Output
_____no_output_____ |
combine_csv_files.ipynb | ###Markdown
Script to concatenate all .csv files in a folder into one .csv filesJeff used for RD orders
###Code
import os
import csv
path = " " # insert path
directories = [dirs for dirs in os.listdir(path) if os.path.isdir(os.path.join(path, dirs))]
print(len(directories))
for dirs in directories:
print(dirs)
# ouptut file
new_file = 'combined_file.csv'
# now add all folders in a file directory
for dirs in directories:
dir_path = os.path.join(path + "/" + dirs)
for filename in os.listdir(dir_path):
if filename.endswith('.csv'):
with open(os.path.join(dir_path+"/"+filename)) as csvfile:
file_reader = csv.reader(csvfile, delimiter=',')
for row in file_reader:
with open(os.path.join('./'+new_file), 'a') as newfile:
file_writer = csv.writer(newfile)
file_writer.writerow(row)
###Output
_____no_output_____ |
notebooks/word2vec/build_w2v.ipynb | ###Markdown
Dataset
###Code
#path
#dataset_path = "/Users/Alessandro/Dev/repos/ReSt/dataset/haspeede2/preprocessed/reference/reference_tweets.csv"
dataset_path = root_project + 'dataset/haspeede2/preprocessed/dev/dev.csv'
w2v_bin_path = root_project + 'results/model/word2vec/twitter128.bin'
#w2v_bin_path = root_project + 'results/model/word2vec/tweets_2019_Word2Vect.bin'
dataset = load_csv_to_dict(dataset_path)
senteces = dataset["tokens"]
senteces[29]
dtype(dataset)
sentece_i = 53
print("Examples sentence: {}".format(dataset["text"][sentece_i]))
print("To tokens: {}".format(dataset["tokens"][sentece_i]))
###Output
Examples sentence: +++#Siria🇸🇾 Evacuati civili dalla città terroristi #IS avanzano dopo violenti scontri con Esercito #siriano a #Palmira ma non è finita+++
To tokens: ['+', '+', '<', 'Siria', '>', 'bandiera', 'siria', 'Evacuati', 'civili', 'da', 'la', 'città', 'terroristi', '<', 'Is', '>', 'avanzano', 'dopo', 'violenti', 'scontri', 'con', 'Esercito', '<', 'siriano', '>', 'a', '<', 'Palmira', '>', 'ma', 'non', 'è', 'finita', '+', '+']
###Markdown
useful information
###Code
#data
n_sentences = len(senteces)
unique_words = set([word for words in senteces for word in words])
unique_words_freq = dict(Counter(i for sub in senteces for i in set(sub)))
n_unique_words = len(unique_words)
#print data
print(" - #sentences: {}".format(n_sentences))
print(" - Unique word on the datset: {}".format(n_unique_words))
###Output
- #sentences: 6839
- Unique word on the datset: 20594
###Markdown
W2V
###Code
token_setences = dataset["tokens"]
#w2v_model = Word2Vec.load(w2v_bin_path)
wv = KeyedVectors.load_word2vec_format(datapath(w2v_bin_path), binary=True)
wv["africani"]
#len(w2v_model.wv.vocab.keys())
len(wv.vocab.keys())
wv.vectors.shape
know_words = []
unknow_words = []
for word in unique_words:
if word in wv.vocab.keys():
know_words.append(word)
else:
unknow_words.append(word)
print("know words: {}".format(len(know_words)))
print("unknow words: {}".format(len(unknow_words)))
unknow_words_freq = {word: unique_words_freq[word] for word in unknow_words}
unknow_words_freq_sorted = sorted(unknow_words_freq.items(),key=operator.itemgetter(1),reverse=True)
unknow_words_freq_sorted[:111]
###Output
_____no_output_____
###Markdown
build keras embedding matrix
###Code
def get_index_key_association(wv):
key_to_index = {"<UNK>": 0}
index_to_key = {0: "<UNK>"}
for idx, word in enumerate(sorted(wv.vocab)):
key_to_index[word] = idx+1 # which row in `weights` corresponds to which word?
index_to_key[idx+1] = word # which row in `weights` corresponds to which word?
return index_to_key, key_to_index
def build_keras_embedding_matrix(wv, index_to_key=None):
print('Vocab_size is {}'.format(len(wv.vocab)))
vec_size = wv.vector_size
vocab_size = len(wv.vocab) + 1 # plus the unknown word
if index_to_key is None:
index_to_key, _ = get_index_key_association(wv)
# Create the embedding matrix where words are indexed alphabetically
embedding_matrix = np.zeros(shape=(vocab_size, vec_size))
for idx in index_to_key:
#jump the first, words not found in embedding int 0 and will be all-zeros
if idx != 0:
embedding_matrix[idx] = wv.get_vector(index_to_key[idx])
print('Embedding_matrix with unk word loaded')
print('Shape {}'.format(embedding_matrix.shape))
return embedding_matrix, vocab_size
index_to_key, key_to_index = get_index_key_association(wv)
embedding_matrix, vocab_size = build_keras_embedding_matrix(wv, index_to_key)
###Output
Vocab_size is 1170776
Embedding_matrix loaded
Shape (1170777, 128)
|
Recommendations/Movie_Recommendations.ipynb | ###Markdown
Movie RecommendationsRecommendations are a common machine learning task widely used by many leading companies, such as Netflix, Amazon, and YouTube. If you have used any of these online services, you are familiar with recommendations that are often prefixed with "You might also like.." or "Recommended items other customers buy...".There are many ways to generate recommendations. It could be done based on simple criteria, such as movie genre, e.g. comedies or action adventure. More sophisticated recommendations might consider many more factors, such as the director, when the movie was produced and so on.In this example, we will use a common, straightforward method known as [collaborative filtering](https://en.wikipedia.org/wiki/Collaborative_filtering). This method is based on idea that many customers have similar likes and dislikes. It also considers similarities between products. It's a simple, yet effective technique that depends only user preferences, such as product ratings. If you have a sufficiently large dataset of ratings from your customers, then this approach is a good place to start.
###Code
%tensorflow_version 2.x
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from tensorflow.keras.layers import Input, Embedding, Flatten, Dot, Dense, Add, Dropout
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from sklearn.preprocessing import LabelEncoder
###Output
_____no_output_____
###Markdown
Load dataIn this example, we will make movie recommendations given about 100,000 samples from roughly 10,000 customers or users.The data set is freely available on the [MovieLens website](https://grouplens.org/datasets/movielens/).
###Code
!wget http://files.grouplens.org/datasets/movielens/ml-latest-small.zip
!unzip ml-latest-small.zip
movies = pd.read_csv('ml-latest-small/movies.csv')
movies.head()
ratings = pd.read_csv('ml-latest-small/ratings.csv')
ratings.head()
###Output
_____no_output_____
###Markdown
Join Ratings with MoviesThe ratings don't contain movie titles, so let's join or merge these two sets for convenience.
###Code
ratings = ratings.merge(movies, on='movieId').drop(['genres','timestamp'],axis=1)
ratings.head()
###Output
_____no_output_____
###Markdown
Generate Sequential Identifiers`userId` and `movieId` are not sequential, which causes problems for our model. To compensate, we can use the [LabelEncoder](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelEncoder.html) class from [scikit-learn](https://scikit-learn.org/) to generate sequential identifiers for users and movies. The original identifiers are still available, so we can always join back to the original data set if needed.
###Code
user_enc = LabelEncoder()
ratings['userSeq'] = user_enc.fit_transform(ratings['userId'].values)
item_enc = LabelEncoder()
ratings['movieSeq'] = item_enc.fit_transform(ratings['movieId'].values)
ratings.head()
###Output
_____no_output_____
###Markdown
Train/Test SplitThis case is a bit unusual because we need ratings for every movie from every user to train an accurate model. If we used a traditional split, some movies might be left out, which will cause problems during prediction.For this reason, we will use all of the data for training and a subset for model validation only.
###Code
train_unused, test = train_test_split(ratings, test_size=0.20, random_state=0)
# All data is used for training
train = ratings
numUsers = len(train.userSeq.unique())
numMovies = len(train.movieSeq.unique())
print((numUsers, numMovies))
print((len(train), len(test)))
###Output
(610, 9724)
(100836, 20168)
###Markdown
Recommendation ModelCollaborative filtering tries to minimize the error between a predicted value and ground truth. This is similar to many supervised machine learning problems. The model learns a set of features that similar movies share. The number of features could be as simple as the genre or more complex. The `numFeatures` variable below is a hyperparameter that can be tuned to optimize performance.This model uses the [Keras functional API](https://keras.io/getting-started/functional-api-guide/) rather than adding layers to a Sequential model. This is necessary because we have two sets of inputs, userSeq and movieSeq.
###Code
numFeatures = 50
dropout = 0.0
user_input = Input(shape=(1,))
user_emb = Embedding(numUsers, numFeatures)(user_input)
flat_user = Flatten()(user_emb)
user_dropout = Dropout(dropout)(flat_user)
movie_input = Input(shape=(1,))
movie_emb = Embedding(numMovies, numFeatures)(movie_input)
flat_movie = Flatten()(movie_emb)
movie_dropout = Dropout(dropout)(flat_movie)
dotProduct = Dot(axes=1)([user_dropout, movie_dropout])
user_bias = Embedding(numUsers, 1)(user_input)
movie_bias = Embedding(numMovies, 1)(movie_input)
sum = Add()([dotProduct, user_bias, movie_bias])
flat_sum = Flatten()(sum)
output = Dropout(dropout)(flat_sum)
model = Model([user_input, movie_input], output)
model.summary()
###Output
Model: "model"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) [(None, 1)] 0
__________________________________________________________________________________________________
input_2 (InputLayer) [(None, 1)] 0
__________________________________________________________________________________________________
embedding (Embedding) (None, 1, 50) 30500 input_1[0][0]
__________________________________________________________________________________________________
embedding_1 (Embedding) (None, 1, 50) 486200 input_2[0][0]
__________________________________________________________________________________________________
flatten (Flatten) (None, 50) 0 embedding[0][0]
__________________________________________________________________________________________________
flatten_1 (Flatten) (None, 50) 0 embedding_1[0][0]
__________________________________________________________________________________________________
dropout (Dropout) (None, 50) 0 flatten[0][0]
__________________________________________________________________________________________________
dropout_1 (Dropout) (None, 50) 0 flatten_1[0][0]
__________________________________________________________________________________________________
dot (Dot) (None, 1) 0 dropout[0][0]
dropout_1[0][0]
__________________________________________________________________________________________________
embedding_2 (Embedding) (None, 1, 1) 610 input_1[0][0]
__________________________________________________________________________________________________
embedding_3 (Embedding) (None, 1, 1) 9724 input_2[0][0]
__________________________________________________________________________________________________
add (Add) (None, 1, 1) 0 dot[0][0]
embedding_2[0][0]
embedding_3[0][0]
__________________________________________________________________________________________________
flatten_2 (Flatten) (None, 1) 0 add[0][0]
__________________________________________________________________________________________________
dropout_2 (Dropout) (None, 1) 0 flatten_2[0][0]
==================================================================================================
Total params: 527,034
Trainable params: 527,034
Non-trainable params: 0
__________________________________________________________________________________________________
###Markdown
Model Training
###Code
model.compile(loss='mean_squared_error', optimizer=Adam())
history = model.fit([train.userSeq, train.movieSeq], train.rating,
batch_size=32, epochs=10, verbose=1,
validation_data=([test.userSeq, test.movieSeq], test.rating))
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
###Output
Epoch 1/10
3152/3152 [==============================] - 21s 7ms/step - loss: 6.1680 - val_loss: 1.4478
Epoch 2/10
3152/3152 [==============================] - 21s 7ms/step - loss: 1.1436 - val_loss: 0.8063
Epoch 3/10
3152/3152 [==============================] - 21s 7ms/step - loss: 0.8166 - val_loss: 0.6531
Epoch 4/10
3152/3152 [==============================] - 21s 7ms/step - loss: 0.6842 - val_loss: 0.5524
Epoch 5/10
3152/3152 [==============================] - 21s 7ms/step - loss: 0.5825 - val_loss: 0.4693
Epoch 6/10
3152/3152 [==============================] - 21s 7ms/step - loss: 0.4921 - val_loss: 0.3900
Epoch 7/10
3152/3152 [==============================] - 21s 7ms/step - loss: 0.4097 - val_loss: 0.3163
Epoch 8/10
3152/3152 [==============================] - 21s 7ms/step - loss: 0.3364 - val_loss: 0.2574
Epoch 9/10
3152/3152 [==============================] - 21s 7ms/step - loss: 0.2741 - val_loss: 0.2068
Epoch 10/10
3152/3152 [==============================] - 20s 6ms/step - loss: 0.2233 - val_loss: 0.1665
###Markdown
Notice the validation loss is slightly lower than the training loss. If the model was overfitting, then the opposite would be true, so this is a peculiar case.There are a few reasons this can happen:1. Keras artifact explained the [Keras FAQ](https://keras.io/getting-started/faq/why-is-the-training-loss-much-higher-than-the-testing-loss). Keras computes training loss as the average during training time, which can change quite a bit during one epoch. Validation is computed at the end of an epoch when the model loss is probably lower.2. The test set is not not representative of the training set. In some cases, the test set might be easier to predict than the training set. This could happen if a very small test set is used. Make PredictionsWe can make predictions for a given user by creating a numpy array of all movies and a numpy array of the same dimension filled with just the one user we are interested in. The model will predict ratings for the specified user given all movies in the full data set.We can then sort the data set by predicted rating descending to get the best recommendations first.
###Code
# The user for whom we want to make recommendations
userNumber = 0
uniqueMovies = ratings.drop_duplicates(subset=['movieSeq'])
movie_vector = uniqueMovies.movieSeq.values
user_vector = np.ones((len(uniqueMovies),)) * userNumber
predictions = model.predict([user_vector, movie_vector])
predictedDF = uniqueMovies.copy()
predictedDF['Predictions'] = predictions
predictedDF.sort_values(by='Predictions', ascending=False).head(5)
###Output
_____no_output_____
###Markdown
Error AnalysisLet's look at some movies where the ground truth did not compare well with predictions.
###Code
oneUser = predictedDF[predictedDF.userSeq == userNumber].copy()
oneUser['Error'] = (oneUser.rating - oneUser.Predictions)**2
oneUser.sort_values(by='Error', ascending=False).head(5)
ratings[ratings.movieSeq == 919].sort_values(by='rating', ascending=True)
###Output
_____no_output_____
###Markdown
Movie RecommendationsRecommendations are a common machine learning task widely used by many leading companies, such as Netflix, Amazon, and YouTube. If you have used any of these online services, you are familiar with recommendations that are often prefixed with "You might also like.." or "Recommended items other customers buy...".There are many ways to generate recommendations. It could be done based on simple criteria, such as movie genre, e.g. comedies or action adventure. More sophisticated recommendations might consider many more factors, such as the director, when the movie was produced and so on.In this example, we will use a common, straightforward method known as [collaborative filtering](https://en.wikipedia.org/wiki/Collaborative_filtering). This method is based on idea that many customers have similar likes and dislikes. It also considers similarities between products. It's a simple, yet effective technique that depends only user preferences, such as product ratings. If you have a sufficiently large dataset of ratings from your customers, then this approach is a good place to start.
###Code
%tensorflow_version 2.x
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from tensorflow.keras.layers import Input, Embedding, Flatten, Dot, Dense, Add, Dropout
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from sklearn.preprocessing import LabelEncoder
###Output
_____no_output_____
###Markdown
Load dataIn this example, we will make movie recommendations given about 100,000 samples from roughly 10,000 customers or users.The data set is freely available on the [MovieLens website](https://grouplens.org/datasets/movielens/).
###Code
!wget http://files.grouplens.org/datasets/movielens/ml-latest-small.zip
!unzip ml-latest-small.zip
movies = pd.read_csv('ml-latest-small/movies.csv')
movies.head()
ratings = pd.read_csv('ml-latest-small/ratings.csv')
ratings.head()
###Output
_____no_output_____
###Markdown
Join Ratings with MoviesThe ratings don't contain movie titles, so let's join or merge these two sets for convenience.
###Code
ratings = ratings.merge(movies, on='movieId').drop(['genres','timestamp'],axis=1)
ratings.head()
###Output
_____no_output_____
###Markdown
Generate Sequential Identifiers`userId` and `movieId` are not sequential, which causes problems for our model. To compensate, we can use the [LabelEncoder](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelEncoder.html) class from [scikit-learn](https://scikit-learn.org/) to generate sequential identifiers for users and movies. The original identifiers are still available, so we can always join back to the original data set if needed.
###Code
user_enc = LabelEncoder()
ratings['userSeq'] = user_enc.fit_transform(ratings['userId'].values)
item_enc = LabelEncoder()
ratings['movieSeq'] = item_enc.fit_transform(ratings['movieId'].values)
ratings.head()
###Output
_____no_output_____
###Markdown
Train/Test SplitThis case is a bit unusual because we need ratings for every movie from every user to train an accurate model. If we used a traditional split, some movies might be left out, which will cause problems during prediction.For this reason, we will use all of the data for training and a subset for model validation only.
###Code
train_unused, test = train_test_split(ratings, test_size=0.20, random_state=0)
# All data is used for training
train = ratings
numUsers = len(train.userSeq.unique())
numMovies = len(train.movieSeq.unique())
print((numUsers, numMovies))
print((len(train), len(test)))
###Output
(610, 9724)
(100836, 20168)
###Markdown
Recommendation ModelCollaborative filtering tries to minimize the error between a predicted value and ground truth. This is similar to many supervised machine learning problems. The model learns a set of features that similar movies share. The number of features could be as simple as the genre or more complex. The `numFeatures` variable below is a hyperparameter that can be tuned to optimize performance.This model uses the [Keras functional API](https://keras.io/getting-started/functional-api-guide/) rather than adding layers to a Sequential model. This is necessary because we have two sets of inputs, userSeq and movieSeq.
###Code
numFeatures = 50
dropout = 0.0
user_input = Input(shape=(1,))
user_emb = Embedding(numUsers, numFeatures)(user_input)
flat_user = Flatten()(user_emb)
user_dropout = Dropout(dropout)(flat_user)
movie_input = Input(shape=(1,))
movie_emb = Embedding(numMovies, numFeatures)(movie_input)
flat_movie = Flatten()(movie_emb)
movie_dropout = Dropout(dropout)(flat_movie)
dotProduct = Dot(axes=1)([user_dropout, movie_dropout])
user_bias = Embedding(numUsers, 1)(user_input)
movie_bias = Embedding(numMovies, 1)(movie_input)
sum = Add()([dotProduct, user_bias, movie_bias])
flat_sum = Flatten()(sum)
output = Dropout(dropout)(flat_sum)
model = Model([user_input, movie_input], output)
model.summary()
###Output
Model: "model_1"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_3 (InputLayer) [(None, 1)] 0
__________________________________________________________________________________________________
input_4 (InputLayer) [(None, 1)] 0
__________________________________________________________________________________________________
embedding_4 (Embedding) (None, 1, 50) 30500 input_3[0][0]
__________________________________________________________________________________________________
embedding_5 (Embedding) (None, 1, 50) 486200 input_4[0][0]
__________________________________________________________________________________________________
flatten_3 (Flatten) (None, 50) 0 embedding_4[0][0]
__________________________________________________________________________________________________
flatten_4 (Flatten) (None, 50) 0 embedding_5[0][0]
__________________________________________________________________________________________________
dropout_3 (Dropout) (None, 50) 0 flatten_3[0][0]
__________________________________________________________________________________________________
dropout_4 (Dropout) (None, 50) 0 flatten_4[0][0]
__________________________________________________________________________________________________
dot_1 (Dot) (None, 1) 0 dropout_3[0][0]
dropout_4[0][0]
__________________________________________________________________________________________________
embedding_6 (Embedding) (None, 1, 1) 610 input_3[0][0]
__________________________________________________________________________________________________
embedding_7 (Embedding) (None, 1, 1) 9724 input_4[0][0]
__________________________________________________________________________________________________
add_1 (Add) (None, 1, 1) 0 dot_1[0][0]
embedding_6[0][0]
embedding_7[0][0]
__________________________________________________________________________________________________
flatten_5 (Flatten) (None, 1) 0 add_1[0][0]
__________________________________________________________________________________________________
dropout_5 (Dropout) (None, 1) 0 flatten_5[0][0]
==================================================================================================
Total params: 527,034
Trainable params: 527,034
Non-trainable params: 0
__________________________________________________________________________________________________
###Markdown
Model Training
###Code
model.compile(loss='mean_squared_error', optimizer=Adam())
history = model.fit([train.userSeq, train.movieSeq], train.rating,
batch_size=32, epochs=10, verbose=1,
validation_data=([test.userSeq, test.movieSeq], test.rating))
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
###Output
Train on 100836 samples, validate on 20168 samples
Epoch 1/10
100836/100836 [==============================] - 25s 250us/sample - loss: 6.1631 - val_loss: 1.4479
Epoch 2/10
100836/100836 [==============================] - 24s 243us/sample - loss: 1.1436 - val_loss: 0.8050
Epoch 3/10
100836/100836 [==============================] - 24s 240us/sample - loss: 0.8126 - val_loss: 0.6449
Epoch 4/10
100836/100836 [==============================] - 24s 239us/sample - loss: 0.6762 - val_loss: 0.5411
Epoch 5/10
100836/100836 [==============================] - 24s 238us/sample - loss: 0.5714 - val_loss: 0.4535
Epoch 6/10
100836/100836 [==============================] - 24s 236us/sample - loss: 0.4810 - val_loss: 0.3776
Epoch 7/10
100836/100836 [==============================] - 24s 239us/sample - loss: 0.4016 - val_loss: 0.3104
Epoch 8/10
100836/100836 [==============================] - 24s 238us/sample - loss: 0.3331 - val_loss: 0.2550
Epoch 9/10
100836/100836 [==============================] - 24s 237us/sample - loss: 0.2743 - val_loss: 0.2072
Epoch 10/10
100836/100836 [==============================] - 24s 239us/sample - loss: 0.2250 - val_loss: 0.1669
###Markdown
Notice the validation loss is slightly lower than the training loss. If the model was overfitting, then the opposite would be true, so this is a peculiar case.There are a few reasons this can happen:1. Keras artifact explained the [Keras FAQ](https://keras.io/getting-started/faq/why-is-the-training-loss-much-higher-than-the-testing-loss). Keras computes training loss as the average during training time, which can change quite a bit during one epoch. Validation is computed at the end of an epoch when the model loss is probably lower.2. The test set is not not representative of the training set. In some cases, the test set might be easier to predict than the training set. This could happen if a very small test set is used. Make PredictionsWe can make predictions for a given user by creating a numpy array of all movies and a numpy array of the same dimension filled with just the one user we are interested in. The model will predict ratings for the specified user given all movies in the full data set.We can then sort the data set by predicted rating descending to get the best recommendations first.
###Code
# The user for whom we want to make recommendations
userNumber = 0
uniqueMovies = ratings.drop_duplicates(subset=['movieSeq'])
movie_vector = uniqueMovies.movieSeq.values
user_vector = np.ones((len(uniqueMovies),)) * userNumber
predictions = model.predict([user_vector, movie_vector])
pSeries = pd.Series([a[0] for a in predictions])
predictedDF = uniqueMovies.copy()
predictedDF['Predictions'] = pSeries
predictedDF.sort_values(by='Predictions', ascending=False).head(10)
###Output
_____no_output_____
###Markdown
Error AnalysisLet's look at some movies where the ground truth did not compare well with predictions.
###Code
oneUser = predictedDF[predictedDF.userSeq == userNumber].copy()
oneUser['Error'] = (oneUser.rating - oneUser.Predictions)**2
oneUser.sort_values(by='Error', ascending=False).head(5)
ratings[ratings.movieSeq == 520].sort_values(by='rating', ascending=True)
###Output
_____no_output_____ |
DSA/backtracking/subsets.ipynb | ###Markdown
Given a set of distinct integers, nums, return all possible subsets (the power set).Note: The solution set must not contain duplicate subsets.Example: Input: nums = [1,2,3] Output: [ [3], [1], [2], [1,2,3], [1,3], [2,3], [1,2], [] ]
###Code
class Solution(object):
def subsets(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
res = []
self.dfs(nums, 0, [], res)
return res
def dfs(self, nums, index, path, res):
res.append(path)
for i in range(index, len(nums)):
self.dfs(nums, i+1, path+[nums[i]], res)
# test
nums = [1,2,3]
print(Solution().subsets(nums))
###Output
[[], [1], [1, 2], [1, 2, 3], [1, 3], [2], [2, 3], [3]]
|
notebooks/dev_summit_2020/Step 4 - Optimally Creating and Assigning Work Orders Based on Routes.ipynb | ###Markdown
Optimally Creating and Assigning Work Orders Based on RoutesSuppose our organization needs to perform restaurant/brewery inspections in the Greater Portland, Maine area. Let's assume that there are around 25 breweries that need to be inspected and that there are 5 workers that are available to do the inspections. As the supervisor of these workers I'm going to develop a Python Script (well, Jupyter Notebook in this case) that will optimally create distinct routes for my workers, create assignments at the brewery locations, and then assign the assignment to the correct worker. Import ArcGIS API for PythonLet's import some libraries and connect to our organization
###Code
import pandas as pd
import arcgis
from arcgis.gis import GIS
from arcgis.apps import workforce
pd.options.display.max_columns = None
gis = GIS("https://arcgis.com", "workforce_scripts")
project = workforce.Project(gis.content.search("type:'Workforce Project' Maine Brewery Inspections")[0])
project.assignments_item.layers[0].delete_features(where="1=1")
###Output
_____no_output_____
###Markdown
View the breweries that need to be inspected
###Code
breweries = gis.content.search("type:'Feature Service' owner:workforce_scripts Maine Breweries")[0].layers[0]
breweries.filter = "location in ('Portland','South Portland','Gorham','Biddeford','Scarborough', 'Topsham','Freeport')"
webmap = gis.map("Portland, ME", zoomlevel=10)
webmap.add_layer(breweries)
webmap
breweries_df = breweries.query(where=breweries.filter, out_fields="objectid,name,location,url", as_df=True)
breweries_df
###Output
_____no_output_____
###Markdown
Create a route for each workerNow that we know what we're working with, let's use the Plan Routes tool to generate the most optimal routes for each of the workers. First we need to define where the workers will start their routes. Each worker will begin from the main office located at 100 Commercial Street, Portland Maine. We'll use the geocoding module to get an exact location for this address.
###Code
from arcgis.geocoding import geocode
start_location = geocode("100 Commercial Street, Portland, ME", out_sr={"wkid": 102100})[0]["location"]
start_location["spatialReference"] = {"wkid": 102100}
start_location
###Output
_____no_output_____
###Markdown
Next we need to create a feature at this location
###Code
feature = arcgis.features.Feature(
attributes={
"ObjectID": 1,
"Name": "Office"
},
geometry=start_location
)
###Output
_____no_output_____
###Markdown
Next, we'll create a Feature Set from the feature. Then we'll create a Feature Collection from the Feature Set. Finally, we'll format the layer so that it conforms to the expected input format defined [here](https://doc.arcgis.com/en/arcgis-online/analyze/plan-routes.htm).
###Code
feature_set = arcgis.features.FeatureSet([feature])
feature_collection = arcgis.features.FeatureCollection.from_featureset(feature_set)
start_layer = {"layerDefinition": feature_collection.properties["layers"][0]["layerDefinition"], "featureSet": feature_set.value}
###Output
_____no_output_____
###Markdown
Then we'll run the Plan Routes tool using the breweries layer as list of stops to route to. We'll set the number of routes equal to the number of workers. We'll also set the start time and start location as well as few other parameters.
###Code
from datetime import datetime
workers = project.workers.search()
results = arcgis.features.analysis.plan_routes(breweries, # Feature Layer of Stops
len(workers), # Number of routes to generate
5, # Maximum stops per route
datetime.now(), # Start time of route
start_layer, # The dictionary we created to represent the start location
stop_service_time=60, # How much time in minutes to spend at each stop
max_route_time=480, # The maximum time for the worker to complete the route
)
results
###Output
_____no_output_____
###Markdown
Let's see what the routes look like
###Code
webmap = gis.map("Portland, ME", zoomlevel=10)
webmap.add_layer(results["routes_layer"])
webmap.add_layer(results["assigned_stops_layer"])
webmap
###Output
_____no_output_____
###Markdown
Let's look at what data is in route
###Code
routes = results['routes_layer'].query().sdf
routes
###Output
_____no_output_____
###Markdown
You can see that each route has a name, total time, and total distance among other things. Let's see what information is provided in an assigned stop.
###Code
stops = results['assigned_stops_layer'].query().sdf
stops
###Output
_____no_output_____
###Markdown
You can see each row in the above table contains the attributes of each Brewery along with information about which route it is on. You'll also notice that there are several additional stops not related to a brewery. These are the starting and ending locations of each route. Create Assignment and Assign To WorkerFor each route that was generated we will select a random worker to complete that route. Then we'll find the breweries that were assigned to that route and create an Inspection Assignment for each one. Notice that when the assignment is created we are also assigning it to a worker.An important thing to note is that we are setting the due date of the assignment to the departure date of the stop. This means that a mobile worker will be able to sort their "To Do" list by due date and see the assignments in the correct order (according to the route).
###Code
import random
assignments_to_add = []
for _, row in routes.iterrows():
worker = random.choice(workers)
workers.remove(worker)
route_stops = stops.loc[(stops['RouteName'] == row["RouteName"]) & stops['globalid'].notnull()]
for _, stop in route_stops.iterrows():
assignments_to_add.append(workforce.Assignment(
project,
assignment_type="Inspection",
location=stop["name"],
status="assigned",
worker=worker,
assigned_date=datetime.now(),
due_date=stop["DepartTime"],
geometry=stop["SHAPE"]
))
assignments = project.assignments.batch_add(assignments_to_add)
###Output
_____no_output_____
###Markdown
Let's check to verify the assignments were created and are assigned
###Code
webmap.add_layer(project.assignments_layer)
webmap
###Output
_____no_output_____ |
Python for Finance - Code Files/103 Monte Carlo - Predicting Stock Prices - Part I/Online Financial Data (APIs)/Python 3 APIs/MC Predicting Stock Prices - Part I - Solution_IEX.ipynb | ###Markdown
Monte Carlo - Forecasting Stock Prices - Part I *Suggested Answers follow (usually there are multiple ways to solve a problem in Python).* Download the data for Microsoft (‘MSFT’) from IEX for the period ‘2015-1-1’ until today.
###Code
import numpy as np
import pandas as pd
from pandas_datareader import data as wb
import matplotlib.pyplot as plt
from scipy.stats import norm
%matplotlib inline
ticker = 'MSFT'
data = pd.DataFrame()
data[ticker] = wb.DataReader(ticker, data_source='iex', start='2015-1-1')['close']
###Output
5y
###Markdown
Use the .pct_change() method to obtain the log returns of Microsoft for the designated period.
###Code
log_returns = np.log(1 + data.pct_change())
log_returns.tail()
data.plot(figsize=(10, 6));
log_returns.plot(figsize = (10, 6))
###Output
_____no_output_____
###Markdown
Assign the mean value of the log returns to a variable, called “U”, and their variance to a variable, called “var”.
###Code
u = log_returns.mean()
u
var = log_returns.var()
var
###Output
_____no_output_____
###Markdown
Calculate the drift, using the following formula: $$drift = u - \frac{1}{2} \cdot var$$
###Code
drift = u - (0.5 * var)
drift
###Output
_____no_output_____
###Markdown
Store the standard deviation of the log returns in a variable, called “stdev”.
###Code
stdev = log_returns.std()
stdev
###Output
_____no_output_____ |
LabExercise_6/Lab6_GroundedSource.ipynb | ###Markdown
Understanding grounded source EM The objective of this lab exercise is to help students develop a better understanding of the physics of the grounded source EM, with the help of the interactive apps that allow students to adjust model and survey parameters and simulate EM fields. We are going to look at three models that were discussed in class in order to build an understanding of the grounded source EM. - Halfspace (0.01 S/m)- Conductive block in halfspace (1 S/m)- Resitive block in halfspace (10$^{-4}$ S/m).We will also use a canonical layered Earth model to simulate a marine CSEM survey. After finishing this exercise, students will understand * How the currents distribute in a homogenous halfspace;* How a resitor and a conductor change the current distribution;* How the resistivit of hydrocarbon reservior affect the electric field.Author: Jiajia Sun at University of Houston, Nov 2nd, 2018.
###Code
!pip install em_examples
from ipywidgets import interact, interactive, FloatSlider, IntSlider, ToggleButtons
from em_examples.TDEMGroundedSource import *
%pylab inline
###Output
Populating the interactive namespace from numpy and matplotlib
###Markdown
Grounded source EM with a halfspace Survey Setup:
###Code
Q = interact(choose_model,
model=ToggleButtons(
options=["halfspace", "conductor", "resistor"], value="halfspace"
)
)
import matplotlib
matplotlib.rcParams['font.size']=16
options = load_or_run_results(
re_run=False,
fname=choose_model(Q.widget.kwargs['model']),
sigma_block=0.01,
sigma_halfspace=0.01
)
tdem = PlotTDEM(**options)
interact(
tdem.show_3d_survey_geometry,
elev=FloatSlider(min=-180, max=180, step=10, value=30),
azim=FloatSlider(min=-180, max=180, step=10, value=-45),
)
interact(
tdem.plot_input_currents,
itime=IntSlider(min=15, max=50, step=1, value=15, contiusous_update=False),
scale=ToggleButtons(
options=["linear", "log"], value="linear"
),
)
interact(
tdem.plot_electric_currents,
itime=IntSlider(min=15, max=50, step=1, value=15, contiusous_update=False)
)
###Output
_____no_output_____
###Markdown
**Task 1**: Set itime to 15, i.e., at time 0.00 ms (immediately after the current in the electrical dipole is interrupted), describe the spatial pattern of the induced currents in the x-z plane (i.e., the right panel). **(10 points)** **HINT:** The spatial pattern includes the directions, the shapes, and the magnitudes of the induced currents. **(answer to Task 1:) ** **Task 2**: As you increase the time (by adjusting the itime slider), summarize what you observe about the current density in x-z plane (i.e. the right panel). **(20 points)** **HINT:** Please summarize your observations from the following aspects. First, how does the maximum current density value change when you increase the time? Secondly, how does the location of the peak of the current density change? Thirdly, how does the direction of the currents change? **(answer to Task 2:)** Grounded source EM with a conductor
###Code
Q = interact(choose_model,
model=ToggleButtons(
options=["halfspace", "conductor", "resistor"], value="conductor"
)
)
import matplotlib
matplotlib.rcParams['font.size']=16
options = load_or_run_results(
re_run=False,
fname=choose_model(Q.widget.kwargs['model']),
sigma_block=0.01,
sigma_halfspace=0.01
)
tdem = PlotTDEM(**options)
interact(
tdem.plot_electric_currents,
itime=IntSlider(min=15, max=50, step=1, value=15, contiusous_update=False)
)
###Output
_____no_output_____
###Markdown
**Task 3**: Set itime to 15, i.e., at time 0.00 ms (immediately after the current in the electrical dipole is interrupted). How is the induced current in the case of a conductor different from what you have observed above for a homogeneous halfspace? **(10 points)** **(answer to Task 3:)** **Task 4**: Set itime to 42, i.e., at time 8.10 ms. How is the induced current different from what you observed at time 0.00 ms? **(10 points)** **(answer to Task 4:) ** Grounded source EM with a resistor
###Code
Q = interact(choose_model,
model=ToggleButtons(
options=["halfspace", "conductor", "resistor"], value="resistor"
)
)
import matplotlib
matplotlib.rcParams['font.size']=16
options = load_or_run_results(
re_run=False,
fname=choose_model(Q.widget.kwargs['model']),
sigma_block=0.01,
sigma_halfspace=0.01
)
tdem = PlotTDEM(**options)
interact(
tdem.plot_electric_currents,
itime=IntSlider(min=15, max=50, step=1, value=15, contiusous_update=False)
)
###Output
_____no_output_____
###Markdown
**Task 5**: Set itime to 16, i.e., at time 0.02 ms. Summarize your observations of the induced current in the x-y and x-z plane. **(15 points)** **(answer to Task 5:) ** **Task 6**: Now keep increasing the itime index. How does the induced current in the x-z plane change with time? **(15 points)** **(answer to Task 6:) ** Marine controlled source EM (CSEM)
###Code
from em_examples.MarineCSEM1D import show_canonical_model, FieldsApp, DataApp
from IPython.display import HTML
from empymod import utils
###Output
_____no_output_____
###Markdown
Canonical modelWe consider a canonical resistivity model, which includes a thin resistive layer (correspond to a reservoir containing signicant amount of hydrocarbon). Five layers having different resistivity values are considered: - air: perfect insulater ($\rho_0)$~1e8 $\Omega$m)- seawater: conductor ($\rho_1)$~0.3 $\Omega$m)- sea sediment (upper): conductor ($\rho_2)$~1 $\Omega$m)- reservoir: resistor ($\rho_3)$~100 $\Omega$m)) - sea sediment (lower): conductor ($\rho_4)$~1 $\Omega$m)Conductive sea sediment can have anisotropy, and often vertical resistivity ($\rho_v$) is greater than horizontal resistivity ($\rho_h$); e.g. $\rho_v/\rho_h \simeq 2$. However, the hydrocarbon reservoir is often assumed to be isotropic.
###Code
show_canonical_model()
DataApp()
###Output
_____no_output_____ |
notebooks/The_network_of_a_party.ipynb | ###Markdown
In this notebook, we will create a network for some parties and analyze the patterns obtained. We may focus on 3 parties: PSL (party of the president), PT (party of the previous presidents and the main opposition), NOVO (right-wing and liberal, as they say) and PDT ( floating about center and left-center-wing).
###Code
!pip install nxviz
!pip install unidecode
# Import necessary modules
import pandas as pd
import matplotlib.pyplot as plt
# Building the graph
import requests
from sklearn.feature_extraction.text import CountVectorizer
from itertools import combinations
import networkx as nx
from nxviz import CircosPlot
from unidecode import unidecode
dataset = pd.read_csv('speeches.csv')
dataset.loc[:, 'speech'] = dataset.speech.str.replace('\r', '')
dataset.loc[:, 'speech'] = dataset.speech.str.replace('\n', '')
dataset.loc[:, 'speech'] = dataset.speech.str.replace('-', '')
###Output
_____no_output_____
###Markdown
GraphsThe graphs will be created by following the steps of the methos explained on this [paper](https://scholar.google.com.br/scholar?q=Identifying+the+Pathways+for+Meaning+Circulation+using+Text+Network+Analysis&hl=pt-BR&as_sdt=0&as_vis=1&oi=scholart).We will use the 2-gram and 5-gram to generate the graph. That is, given a phrase, we extract the words in groups of 2 and 5 words. For exemple:```Some are born to sweet delight.```When applying the method to the phrase above, we may get the following tokens:```[ 'Some are', 'are born', 'born to', 'to sweet', 'sweet delight', 'Some are born to sweet', 'are born to sweet delight' ]```In this study, each token will have a number attached to it representing the frequency of the token in the text. Given a token, each word represents a node in the graph. For the 2-gram, the frequency is also the weight of the edge between the words. In the example above, we will have a edge `(Some, are)` with weight 1.When processing the 5-gram, we have to form combinations of length 2, and then repeat the process used for the 2-gram tokens. In any case, if the edge aleady exists, the weigth of the edge will be increased. Continuing the example above, the first 5-gram token wll have combinations as below:```(Some, are), (are, born), (born, to), (to, sweet), (some, born), (Some, to), (Some, sweet)...```and so on.The first pair already exists in the graph, since it was already obtained in a 2-gram token. Thus, its weight will be update for 2.This whole process will be repeated until the final graph is built.
###Code
def is_important(word):
if len(word) < 2:
return False
ends = ['indo', 'ando', 'ondo', 'r', 'em',
'amos', 'imos', 'ente', 'emos','ou','dei',
'iam', 'cido', 'mos', 'am']
for end in ends:
if word.endswith(end):
return False
return True
def norm(word):
exceptions = ['pais', 'pessoas', 'dados', 'companhias', 'juntos']
if word in exceptions:
return word
ends = ['es', 'as']
for end in ends:
if word.endswith(end):
return word[:-2]
if word.endswith('is'):
return word[:-2] + 'l'
if word.endswith('s'):
return word[:-1]
return word
def generate_graph(vocabulary):
"""
"""
# Create a undirected graph
G = nx.Graph()
# Iterate over each item of the vocabulary
for phrase, frequency in vocabulary.items():
# Get words in the phrase
words = phrase.split()
# Using only tokens of length 2 or 5
if len(words) not in [2,5]:
continue
words_norm = [norm(word) for word in words if is_important(word) ]
# Extract unique words in the phrase
words_unique = list(set(words_norm))
# Create a node if it does not exists already
G.add_nodes_from(words_unique)
# Form combinations of 2 from the words
# which will be a edge
pair = combinations(words_unique, 2)
for word1, word2 in pair:
edge = (word1, word2)
# Increments weight of edge
# if it already exists
# Otherwise, create a new edge
if edge in G.edges:
G.edges[word1, word2]['weight'] += frequency
else:
G.add_weighted_edges_from([(word1, word2, frequency)])
return G
###Output
_____no_output_____
###Markdown
PSLThe first party analysed is the one which our president represents.
###Code
psl = dataset.query('party == "PSL"')
###Output
_____no_output_____
###Markdown
We will use the [CountVectorizer](https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.htmlsklearn.feature_extraction.text.CountVectorizer) in order to calculate the frequency in which the words, or group of words, appears in the speeches.It's important to define some stop words, that is, words that are irrelevant because they are repeated too many times (as articles, eg).
###Code
stop_words_pt = requests.get('https://gist.githubusercontent.com/alopes/5358189/'+
'raw/2107d809cca6b83ce3d8e04dbd9463283025284f/stopwords.txt')
TOKENS_ALPHANUMERIC = '[A-Za-z]+(?=\\s+)'
# Ignore irrelevant words
STOP_WORDS = [unidecode(word.strip()) for word in stop_words_pt.text.split('\n')]
STOP_WORDS += ['neste', 'nesta', 'aqui', 'vou', 'nele', 'mesma', 'faz',
'zero', 'dois', 'duas', 'ir', 'mil', 'vai', 'aa', 'porque', 'pois',
'gostaria', 'cumprimentar', 'quero', 'dizer', 'vez', 'sobre', 'cada',
'deste', 'desta', 'ainda', 'vamos', 'pode', 'vem', 'deixar', 'vao',
'fazer', 'sendo', 'todo', 'todos', 'grande', 'presidente', 'quer',
'qualquer', 'dia', 'deputado', 'deputados', 'deputadas', 'venho',
'ver', 'tudo', 'tao', 'querem', 'correnco', 'corresponda', 'forma',
'fez', 'dar', 'apenas', 'traz', 'varios', 'vim', 'alem', 'sido',
'demos', 'todas', 'dermos', 'vemos', 'vale', 'torno', 'faco', 'espera',
'expressar', 'tentamos', 'pegar', 'queremos', 'usaremos', 'senhores',
'senhoras', 'senhor', 'senhora', 'fazendo', 'veio', 'vi', 'durante',
'ali', 'aqui', 'queria', 'ouvi', 'falando', 'entao', 'parece', 'assistam',
'presenciei', 'falar', 'algumas', 'sei', 'usar', 'fiz', 'usei', 'quiser',
'garantir', 'devida', 'contemplar', 'adianta', 'pensarmos', 'alguns',
'muitas', 'muitos', 'implica', 'fizeram', 'frisar', 'diz', 'poucas',
'usam', 'acho', 'combinamos', 'reiteradamente', 'deferido', 'outro',
'precisamos', 'importante', 'interessante', 'amplie', 'elencar',
'trago', 'outros', 'outras', 'outra', 'parte', 'encaminhado', 'integra',
'vezes', 'seis', 'partir', 'cria', 'atraves', 'anos', 'meses', 'oitava',
'chegou', 'posso', 'referente', 'detinado', 'nenhuma', 'nenhum', 'iv',
'doze', 'medias', 'ultimos', 'esquece', 'colocar', 'unica', 'ano',
'aplicando', 'fica', 'fale', 'concedo', 'fala', 'passaram', 'comum',
'menos', 'mais', 'jamais','sempre', 'querendo', 'ai', 'mexe', 'alguma',
'saber', 'der', 'peco', 'cuide', 'peco', 'estar', 'trazer', 'sabe',
'tirou', 'cumprimento', 'passam', 'facamos', 'fazem', 'quatro',
'muita', 'certeza', 'la', 'quase', 'disse', 'maior', 'feito', 'deve',
'inspecionados', 'inicio', 'citando', 'poder', 'ficar', 'aplicase',
'inicialmente', 'solicito', 'dessa', 'precisa', 'cabe', 'possui',
'terceiro', 'mencionou', 'altura', 'podiam', 'certa', 'bem', 'toda',
'exija', 'trata', 'coisa', 'simples', 'criaram', 'medida', 'momento',
'tentando', 'agradeco', 'pronunciamento', 'inventaram', 'votarmos',
'votar', 'votaram', 'votamos', 'sustarmos', 'criou', 'falei', 'preciso',
'convencam', 'atingiu', 'volta', 'questao', 'chegar', 'destacar',
'causou','prezadas', 'prezados', 'desculpemm', 'encerramento',
'prezado','parece' 'confirmando','excelentissimo', 'escutado',
'orientando','correndo','haver','respeitassem','ora','reconhecemos',
'cumprimentando','informar','orientar','suprimir','profunda',
'destacar','considera','comeca','focar', 'quiserem','encaminhamento',
'dentro', 'obrigar', 'discutida', 'reais', 'gastamse', 'tanta',
'tanto', 'tantas', 'tantos', 'ajudar', 'avanca','messes',
'dispensado', 'chegar', 'previsto', 'preciso', 'convencam', 'duvida',
'agora', 'tomam','tirar', 'unico', 'faca', 'primeiro', 'podemos',
'contra', 'acabar', 'coloca', 'algo', 'uns', 'carregam', 'surgiu',
'rever', 'retiralo', 'ressalto', 'importancia', 'aproveito',
'oportunidade', 'comungo', 'significa', 'parabenizar','hoje',
'conheca', 'invertendo', 'confirmando', 'desenvolveu', 'aprofundar',
'conduz', 'desculpeme', 'excelentissimos', 'roda', 'descaracteriza',
'concedem', 'cresca', 'favoravelmente', 'instalamos', 'autorize',
'determina', 'assim', 'dias', 'onde', 'quando', 'tira', 'pensar',
'implicara', 'horas', 'acredito', 'ninguem', 'procuraria', 'acima',
'deverao', 'falo', 'nada', 'fundamental', 'totalmente', 'nessa',
'fazermos', 'pensar', 'ganhar', 'comete', 'sofre', 'nesse', 'neste',
'existe', 'fere', 'passou', 'tres', 'obstruindo', 'rediscutir',
'assunto', 'assuntos', 'entendo', 'preservar', 'tarde', 'meios',
'desse', 'simplesmente','antes', 'longe', 'perto','aproximadamente',
'mal', 'melhor', 'pior', 'falamos', 'bastasse', 'mostrar', 'meio',
'alguem', 'inclusive', 'colega', 'boa', 'bom', 'nobre', 'primeira',
'primeiro', 'milhoes', 'deputada', 'deputadas', 'ficaria', 'estara',
'desses', 'dessas', 'junto', 'fim', 'semana', 'orientamos', 'claro',
'claros', 'orienta','pouco', 'colegas']
vec_alphanumeric = CountVectorizer(token_pattern=TOKENS_ALPHANUMERIC,decode_error='replace' ,
stop_words=STOP_WORDS, ngram_range=(2,5),
encoding='latin1', strip_accents='unicode')
# Fit to the data
X = vec_alphanumeric.fit_transform(psl.speech)
###Output
_____no_output_____
###Markdown
Below, is the number of features (or tokens) obtained with all the speeches.
###Code
len(vec_alphanumeric.get_feature_names())
vec_alphanumeric.vocabulary_
vec_alphanumeric.get_feature_names()[:5]
###Output
_____no_output_____
###Markdown
Creating the graphNow, we can use the method defined in the previous section to generate the graph.
###Code
G = generate_graph(vec_alphanumeric.vocabulary_)
len(G.nodes())
len(G.edges())
nx.write_graphml_lxml(G, "psl.graphml")
###Output
_____no_output_____
###Markdown
Extracting information from the graphConnected component is a subgraph in which there is a path between any two vertexes. One graph may have many connected components, as the one below:> *Image from Wikipedia*In the context of social networks, this can be use for finding some groups that have some connection between them.
###Code
components = list(nx.connected_components(G))
print("There are %i components" % len(components))
###Output
There are 1 components
###Markdown
As there is only one components, we can't extract more interesting information about it. CentralityThe centraility indicators give us a notion about the most important nodes in the graph. This can be calculated using the degree of a node.
###Code
# Plot the degree distribution of the GitHub collaboration network
plt.hist(list(nx.degree_centrality(G).values()))
plt.show()
# Compute the degree centralities of G
deg_cent = nx.degree_centrality(G)
# Compute the maximum degree centrality
max_dc = max(deg_cent.values())
prolific_collaborators = [n for n, dc in deg_cent.items() if dc == max_dc]
# Print the most prolific collaborator(s)
print(prolific_collaborators)
###Output
['estado']
###Markdown
CliqueA clique is a subset of nodes that are fully connected. This concept can be largelly used in study of social network, since in that context they can represent a group of people who all know each other.When analysing the speeches, we can use the maximal clique to find the biggest group of words that appear in the same context.
###Code
cliques = nx.find_cliques(G)
len(list(cliques))
largest_clique = sorted(nx.find_cliques(G), key=lambda x:len(x))[-1]
G_lc = G.subgraph(largest_clique)
for n in G_lc.nodes():
G_lc.node[n]['degree centrality'] = deg_cent[n]
# Create the CircosPlot object
c = CircosPlot(G_lc, node_labels=True, node_grouping='degree centrality',
node_order='degree centrality')
# Draw the CircosPlot to the screen
c.draw()
plt.show()
from nxviz import ArcPlot
G_lmc = G_lc.copy()
# Go out 1 degree of separation
for node in list(G_lmc.nodes()):
if(deg_cent[node] == max_dc):
G_lmc.add_nodes_from(G.neighbors(node))
G_lmc.add_edges_from(zip([node]*len(list(G.neighbors(node))), G.neighbors(node)))
# Record each node's degree centrality score
for n in G_lmc.nodes():
G_lmc.node[n]['degree centrality'] = deg_cent[n]
# Create the ArcPlot object: a
a = ArcPlot(G_lmc, node_order='degree centrality', node_labels=True)
# Draw the ArcPlot to the screen
a.draw()
plt.show()
###Output
_____no_output_____
###Markdown
PT
###Code
pt = dataset.query('party == "PT"')
vec_pt = CountVectorizer(token_pattern=TOKENS_ALPHANUMERIC,decode_error='replace' ,
stop_words=STOP_WORDS, ngram_range=(2,5),
encoding='latin1', strip_accents='unicode')
X = vec_pt.fit_transform(pt.speech)
G = generate_graph(vec_pt.vocabulary_)
n_size = len(G.nodes())
e_size = len(G.edges())
print("There are %i nodes and %i edges" % (n_size, e_size))
nx.write_graphml_lxml(G, "pt.graphml")
# Compute the degree centralities of G
deg_cent = nx.degree_centrality(G)
# Compute the maximum degree centrality
max_dc = max(deg_cent.values())
prolific_collaborators = [n for n, dc in deg_cent.items() if dc == max_dc]
# Print the most prolific collaborator(s)
print(prolific_collaborators)
###Output
['governo']
###Markdown
Clique
###Code
largest_clique = sorted(nx.find_cliques(G), key=lambda x:len(x))[-1]
G_lc = G.subgraph(largest_clique)
for n in G_lc.nodes():
G_lc.node[n]['degree centrality'] = deg_cent[n]
# Create the CircosPlot object
c = CircosPlot(G_lc, node_labels=True, node_grouping='degree centrality',
node_order='degree centrality')
# Draw the CircosPlot to the screen
c.draw()
plt.show()
###Output
_____no_output_____
###Markdown
NOVO
###Code
novo = dataset.query('party == "NOVO"')
vec_novo = CountVectorizer(token_pattern=TOKENS_ALPHANUMERIC,decode_error='replace' ,
stop_words=STOP_WORDS, ngram_range=(2,5),
encoding='latin1', strip_accents='unicode')
X = vec_novo.fit_transform(novo.speech)
G = generate_graph(vec_novo.vocabulary_)
n_size = len(G.nodes())
e_size = len(G.edges())
print("There are %i nodes and %i edges" % (n_size, e_size))
nx.write_graphml_lxml(G, "novo.graphml")
###Output
There are 1937 nodes and 13443 edges
###Markdown
Centrality
###Code
# Compute the degree centralities of G
deg_cent = nx.degree_centrality(G)
# Compute the maximum degree centrality
max_dc = max(deg_cent.values())
prolific_collaborators = [n for n, dc in deg_cent.items() if dc == max_dc]
# Print the most prolific collaborator(s)
print(prolific_collaborators)
deg_cent.values()
# Plot the degree distribution of the GitHub collaboration network
plt.hist(list(nx.degree_centrality(G).values()))
plt.show()
# Plot the degree distribution of the GitHub collaboration network
# plt.hist(list(nx.betweenness_centrality(G).values()))
# plt.show()
# Compute the degree centralities of G
bet_cent = nx.betweenness_centrality(G)
# Compute the maximum degree centrality
max_bc = max(bet_cent.values())
prolific_collaborators = [n for n, bc in bet_cent.items() if bc == max_bc]
# Print the most prolific collaborator(s)
print(prolific_collaborators)
sorted_bc = sorted(bet_cent.values())
top_ten_bc = sorted_bc[:10]
top_nodes = [n for n, bc in bet_cent.items() if (bc==max_bc) or (bc == top_ten_bc[1])]
G_bc = G.subgraph(top_nodes)
# Create the CircosPlot object
c = CircosPlot(G_bc, node_labels=True)
# Draw the CircosPlot to the screen
c.draw()
plt.show()
###Output
_____no_output_____
###Markdown
Clique
###Code
largest_clique = sorted(nx.find_cliques(G), key=lambda x:len(x))[-1]
G_lc = G.subgraph(largest_clique)
for n in G_lc.nodes():
G_lc.node[n]['degree centrality'] = deg_cent[n]
# Create the CircosPlot object
c = CircosPlot(G_lc, node_labels=True, node_grouping='degree centrality',
node_order='degree centrality')
# Draw the CircosPlot to the screen
c.draw()
plt.show()
from nxviz import ArcPlot
i = 0
G_lmc = G_lc.copy()
# Go out 1 degree of separation
for node in list(G_lmc.nodes()):
if((deg_cent[node] > 0.09) and (i < 10)):
i+=1
G_lmc.add_nodes_from(G.neighbors(node))
G_lmc.add_edges_from(zip([node]*len(list(G.neighbors(node))), G.neighbors(node)))
# Record each node's degree centrality score
for n in G_lmc.nodes():
G_lmc.node[n]['degree centrality'] = deg_cent[n]
# Create the ArcPlot object: a
a = ArcPlot(G_lmc, node_order='degree centrality', node_labels=True)
# Draw the ArcPlot to the screen
a.draw()
plt.show()
###Output
_____no_output_____
###Markdown
PDT
###Code
pdt = dataset.query('party == "PDT"')
vec_pdt = CountVectorizer(token_pattern=TOKENS_ALPHANUMERIC,decode_error='replace' ,
stop_words=STOP_WORDS, ngram_range=(2,5),
encoding='latin1', strip_accents='unicode')
X = vec_pdt.fit_transform(pdt.speech)
G = generate_graph(vec_pdt.vocabulary_)
n_size = len(G.nodes())
e_size = len(G.edges())
print("There are %i nodes and %i edges" % (n_size, e_size))
nx.write_graphml_lxml(G, "pdt.graphml")
###Output
There are 4049 nodes and 34459 edges
###Markdown
Centrality
###Code
# Compute the degree centralities of G
deg_cent = nx.degree_centrality(G)
# Compute the maximum degree centrality
max_dc = max(deg_cent.values())
prolific_collaborators = [n for n, dc in deg_cent.items() if dc == max_dc]
# Print the most prolific collaborator(s)
print(prolific_collaborators)
###Output
['governo']
###Markdown
Clique
###Code
largest_clique = sorted(nx.find_cliques(G), key=lambda x:len(x))[-1]
G_lc = G.subgraph(largest_clique)
for n in G_lc.nodes():
G_lc.node[n]['degree centrality'] = deg_cent[n]
# Create the CircosPlot object
c = CircosPlot(G_lc, node_labels=True, node_grouping='degree centrality',
node_order='degree centrality')
# Draw the CircosPlot to the screen
c.draw()
plt.show()
###Output
_____no_output_____ |
site/ko/guide/keras/train_and_evaluate.ipynb | ###Markdown
Copyright 2020 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Training and evaluation with the built-in methods TensorFlow.org에서 보기 Google Colab에서 실행 GitHub에서 소스 보기 노트북 다운로드 설정
###Code
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
###Output
_____no_output_____
###Markdown
시작하기이 안내서는 훈련 및 유효성 검증을 위해 내장 API를 사용할 때의 훈련, 평가 및 예측 (추론) 모델 (예 : `model.fit()` , `model.evaluate()` , `model.predict()` )에 대해 설명합니다.고유한 훈련 단계 함수를 지정하면서 `fit()`을 사용하려면 " `fit()`에서 이루어지는 작업 사용자 정의하기" 가이드를 참조하세요.고유한 훈련 및 평가 루프를 처음부터 작성하려면 ["처음부터 훈련 루프 작성"](https://www.tensorflow.org/guide/keras/writing_a_training_loop_from_scratch/) 안내서를 참조하십시오.일반적으로, 내장 루프를 사용하든 직접 작성하든 관계없이 모델 훈련 및 유효성 검사는 모든 종류의 Keras 모델(순차 모델, Functional API로 작성된 모델 및 모델 하위 클래스화를 통해 처음부터 작성된 모델)에서 완전히 동일하게 작동합니다.이 가이드는 분산 교육에 대해서는 다루지 않습니다. 분산 교육에 대해서는 [멀티 GPU 및 분산 교육 안내서를](https://keras.io/guides/distributed_training/) 참조하십시오. API 개요 : 첫 번째 엔드 투 엔드 예제데이터를 모델의 내장 훈련 루프로 전달할 때는 **NumPy 배열**(데이터가 작고 메모리에 맞는 경우) 또는 **`tf.data Dataset` 객체**를 사용해야 합니다. 다음 몇 단락에서는 옵티마이저, 손실 및 메트릭을 사용하는 방법을 보여주기 위해 MNIST 데이터세트를 NumPy 배열로 사용하겠습니다.다음 모델을 고려해 보겠습니다 (여기서는 Functional API를 사용하여 빌드하지만 Sequential 모델 또는 하위 클래스 모델 일 수도 있음).
###Code
inputs = keras.Input(shape=(784,), name="digits")
x = layers.Dense(64, activation="relu", name="dense_1")(inputs)
x = layers.Dense(64, activation="relu", name="dense_2")(x)
outputs = layers.Dense(10, activation="softmax", name="predictions")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
###Output
_____no_output_____
###Markdown
일반적인 엔드 투 엔드 워크 플로는 다음과 같이 구성되어 있습니다.- 학습- 원래 교육 데이터에서 생성 된 홀드 아웃 세트에 대한 유효성 검사- 테스트 데이터에 대한 평가이 예에서는 MNIST 데이터를 사용합니다.
###Code
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Preprocess the data (these are NumPy arrays)
x_train = x_train.reshape(60000, 784).astype("float32") / 255
x_test = x_test.reshape(10000, 784).astype("float32") / 255
y_train = y_train.astype("float32")
y_test = y_test.astype("float32")
# Reserve 10,000 samples for validation
x_val = x_train[-10000:]
y_val = y_train[-10000:]
x_train = x_train[:-10000]
y_train = y_train[:-10000]
###Output
_____no_output_____
###Markdown
훈련 구성(최적화 프로그램, 손실, 메트릭)을 지정합니다.
###Code
model.compile(
optimizer=keras.optimizers.RMSprop(), # Optimizer
# Loss function to minimize
loss=keras.losses.SparseCategoricalCrossentropy(),
# List of metrics to monitor
metrics=[keras.metrics.SparseCategoricalAccuracy()],
)
###Output
_____no_output_____
###Markdown
`fit()`를 호출하여 데이터를 "batch_size" 크기의 "배치"로 분할하고 지정된 수의 "epoch"에 대해 전체 데이터세트를 반복 처리하여 모델을 훈련시킵니다.
###Code
print("Fit model on training data")
history = model.fit(
x_train,
y_train,
batch_size=64,
epochs=2,
# We pass some validation for
# monitoring validation loss and metrics
# at the end of each epoch
validation_data=(x_val, y_val),
)
###Output
_____no_output_____
###Markdown
반환되는 "이력" 객체는 훈련 중 손실 값과 메트릭 값에 대한 레코드를 유지합니다.
###Code
history.history
###Output
_____no_output_____
###Markdown
`evaluate()`를 통해 테스트 데이터에 대해 모델을 평가합니다.
###Code
# Evaluate the model on the test data using `evaluate`
print("Evaluate on test data")
results = model.evaluate(x_test, y_test, batch_size=128)
print("test loss, test acc:", results)
# Generate predictions (probabilities -- the output of the last layer)
# on new data using `predict`
print("Generate predictions for 3 samples")
predictions = model.predict(x_test[:3])
print("predictions shape:", predictions.shape)
###Output
_____no_output_____
###Markdown
이제이 워크 플로의 각 부분을 자세히 검토하겠습니다. `compile()` 메소드 : 손실, 메트릭 및 최적화 프로그램 지정`fit()` 으로 모델을 학습하려면 손실 함수, 최적화 프로그램 및 선택적으로 모니터링 할 일부 메트릭을 지정해야합니다.이것을 `compile()` 메소드의 인수로 모델에 전달합니다.
###Code
model.compile(
optimizer=keras.optimizers.RMSprop(learning_rate=1e-3),
loss=keras.losses.SparseCategoricalCrossentropy(),
metrics=[keras.metrics.SparseCategoricalAccuracy()],
)
###Output
_____no_output_____
###Markdown
`metrics` 인수는 목록이어야합니다. 모델에는 여러 개의 메트릭이있을 수 있습니다.모델에 여러 개의 출력이있는 경우 각 출력에 대해 서로 다른 손실 및 메트릭을 지정하고 모델의 총 손실에 대한 각 출력의 기여도를 조정할 수 있습니다. 이에 대한 자세한 내용은 **"다중 입력, 다중 출력 모델로 데이터 전달"** 섹션에서 확인할 수 있습니다.기본 설정에 만족하면 대부분의 경우 최적화, 손실 및 메트릭을 문자열 식별자를 통해 바로 가기로 지정할 수 있습니다.
###Code
model.compile(
optimizer="rmsprop",
loss="sparse_categorical_crossentropy",
metrics=["sparse_categorical_accuracy"],
)
###Output
_____no_output_____
###Markdown
나중에 재사용하기 위해 모델 정의와 컴파일 단계를 함수에 넣겠습니다. 이 안내서의 여러 예에서 여러 번 호출합니다.
###Code
def get_uncompiled_model():
inputs = keras.Input(shape=(784,), name="digits")
x = layers.Dense(64, activation="relu", name="dense_1")(inputs)
x = layers.Dense(64, activation="relu", name="dense_2")(x)
outputs = layers.Dense(10, activation="softmax", name="predictions")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
return model
def get_compiled_model():
model = get_uncompiled_model()
model.compile(
optimizer="rmsprop",
loss="sparse_categorical_crossentropy",
metrics=["sparse_categorical_accuracy"],
)
return model
###Output
_____no_output_____
###Markdown
많은 내장 옵티 마이저, 손실 및 메트릭을 사용할 수 있습니다일반적으로 고유한 손실, 메트릭 또는 최적화 프로그램을 처음부터 새로 만들 필요가 없는데, Keras API에 필요한 것들이 이미 들어 있을 개연성이 높기 때문입니다.옵티마이저- `SGD()` (모멘텀이 있거나 없음)- `RMSprop()`- `Adam()`- 기타손실:- `MeanSquaredError()`- `KLDivergence()`- `CosineSimilarity()`- 기타메트릭- `AUC()`- `Precision()`- `Recall()`- 기타 관례 손실Keras로 커스텀 손실을 제공하는 두 가지 방법이 있습니다. 첫 번째 예는 입력 `y_true` 및 `y_pred` 를 받아들이는 함수를 만듭니다. 다음 예는 실제 데이터와 예측 간의 평균 제곱 오차를 계산하는 손실 함수를 보여줍니다.
###Code
def custom_mean_squared_error(y_true, y_pred):
return tf.math.reduce_mean(tf.square(y_true - y_pred))
model = get_uncompiled_model()
model.compile(optimizer=keras.optimizers.Adam(), loss=custom_mean_squared_error)
# We need to one-hot encode the labels to use MSE
y_train_one_hot = tf.one_hot(y_train, depth=10)
model.fit(x_train, y_train_one_hot, batch_size=64, epochs=1)
###Output
_____no_output_____
###Markdown
`y_true` 및 `y_pred` 이외의 매개 변수를 사용하는 손실 함수가 필요한 경우 `tf.keras.losses.Loss` 클래스를 서브 클래스 화하고 다음 두 메소드를 구현할 수 있습니다.- `__init__(self)` : 손실 함수 호출 중에 전달할 매개 변수를 승인합니다.- `call(self, y_true, y_pred)` : 목표 (y_true)와 모델 예측 (y_pred)을 사용하여 모델의 손실을 계산평균 제곱 오차를 사용하려고하지만 예측 값을 0.5에서 멀어지게하는 용어가 추가되었다고 가정 해 보겠습니다 (우리는 범주 형 목표가 원-핫 인코딩되고 0과 1 사이의 값을 취하는 것으로 가정). 이렇게하면 모델이 너무 자신감이없는 인센티브가 생겨 과적 합을 줄이는 데 도움이 될 수 있습니다 (시도 할 때까지 작동하는지 알 수 없음).방법은 다음과 같습니다.
###Code
class CustomMSE(keras.losses.Loss):
def __init__(self, regularization_factor=0.1, name="custom_mse"):
super().__init__(name=name)
self.regularization_factor = regularization_factor
def call(self, y_true, y_pred):
mse = tf.math.reduce_mean(tf.square(y_true - y_pred))
reg = tf.math.reduce_mean(tf.square(0.5 - y_pred))
return mse + reg * self.regularization_factor
model = get_uncompiled_model()
model.compile(optimizer=keras.optimizers.Adam(), loss=CustomMSE())
y_train_one_hot = tf.one_hot(y_train, depth=10)
model.fit(x_train, y_train_one_hot, batch_size=64, epochs=1)
###Output
_____no_output_____
###Markdown
맞춤 측정 항목API의 일부가 아닌 메트릭이 필요한 경우 `tf.keras.metrics.Metric` 클래스를 서브 클래 싱하여 사용자 지정 메트릭을 쉽게 만들 수 있습니다. 4 가지 방법을 구현해야합니다.- `__init__(self)` . 여기서 메트릭에 대한 상태 변수를 만듭니다.- `update_state(self, y_true, y_pred, sample_weight=None)` 대상 y_true 및 모델 예측 y_pred를 사용하여 상태 변수를 업데이트합니다.- `result(self)` : 상태 변수를 사용하여 최종 결과를 계산합니다.- `reset_states(self)` : 메트릭의 상태를 다시 초기화합니다.경우에 따라 결과 계산이 매우 비싸고 주기적으로 만 수행되기 때문에 상태 업데이트와 결과 계산은 각각 `update_state()` 와 `result()` 에서 별도로 유지됩니다.다음은 `CategoricalTruePositives` 메트릭을 구현하는 방법을 보여주는 간단한 예제입니다.이 메트릭은 주어진 클래스에 속하는 것으로 올바르게 분류 된 샘플 수를 계산합니다.
###Code
class CategoricalTruePositives(keras.metrics.Metric):
def __init__(self, name="categorical_true_positives", **kwargs):
super(CategoricalTruePositives, self).__init__(name=name, **kwargs)
self.true_positives = self.add_weight(name="ctp", initializer="zeros")
def update_state(self, y_true, y_pred, sample_weight=None):
y_pred = tf.reshape(tf.argmax(y_pred, axis=1), shape=(-1, 1))
values = tf.cast(y_true, "int32") == tf.cast(y_pred, "int32")
values = tf.cast(values, "float32")
if sample_weight is not None:
sample_weight = tf.cast(sample_weight, "float32")
values = tf.multiply(values, sample_weight)
self.true_positives.assign_add(tf.reduce_sum(values))
def result(self):
return self.true_positives
def reset_states(self):
# The state of the metric will be reset at the start of each epoch.
self.true_positives.assign(0.0)
model = get_uncompiled_model()
model.compile(
optimizer=keras.optimizers.RMSprop(learning_rate=1e-3),
loss=keras.losses.SparseCategoricalCrossentropy(),
metrics=[CategoricalTruePositives()],
)
model.fit(x_train, y_train, batch_size=64, epochs=3)
###Output
_____no_output_____
###Markdown
표준 서명에 맞지 않는 손실 및 메트릭 처리하기거의 대부분의 손실과 메트릭은 `y_true` 및 `y_pred`에서 계산할 수 있습니다(여기서 `y_pred`가 모델의 출력). 그러나 모두가 그런 것은 아닙니다. 예를 들어, 정규화 손실은 레이어의 활성화만 요구할 수 있으며(이 경우 대상이 없음) 이 활성화는 모델 출력이 아닐 수 있습니다.이러한 경우 사용자 정의 레이어의 호출 메서드 내에서 `self.add_loss(loss_value)`를 호출할 수 있습니다. 이러한 방식으로 추가된 손실은 훈련 중 "주요" 손실(`compile()`로 전달되는 손실)에 추가됩니다. 다음은 활동 정규화를 추가하는 간단한 예입니다. 참고로 활동 정규화는 모든 Keras 레이어에 내장되어 있으며 이 레이어는 구체적인 예를 제공하기 위한 것입니다.
###Code
class ActivityRegularizationLayer(layers.Layer):
def call(self, inputs):
self.add_loss(tf.reduce_sum(inputs) * 0.1)
return inputs # Pass-through layer.
inputs = keras.Input(shape=(784,), name="digits")
x = layers.Dense(64, activation="relu", name="dense_1")(inputs)
# Insert activity regularization as a layer
x = ActivityRegularizationLayer()(x)
x = layers.Dense(64, activation="relu", name="dense_2")(x)
outputs = layers.Dense(10, name="predictions")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=keras.optimizers.RMSprop(learning_rate=1e-3),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
)
# The displayed loss will be much higher than before
# due to the regularization component.
model.fit(x_train, y_train, batch_size=64, epochs=1)
###Output
_____no_output_____
###Markdown
`add_metric()` 사용하여 메트릭 값 로깅에 대해 동일한 작업을 수행 할 수 있습니다.
###Code
class MetricLoggingLayer(layers.Layer):
def call(self, inputs):
# The `aggregation` argument defines
# how to aggregate the per-batch values
# over each epoch:
# in this case we simply average them.
self.add_metric(
keras.backend.std(inputs), name="std_of_activation", aggregation="mean"
)
return inputs # Pass-through layer.
inputs = keras.Input(shape=(784,), name="digits")
x = layers.Dense(64, activation="relu", name="dense_1")(inputs)
# Insert std logging as a layer.
x = MetricLoggingLayer()(x)
x = layers.Dense(64, activation="relu", name="dense_2")(x)
outputs = layers.Dense(10, name="predictions")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=keras.optimizers.RMSprop(learning_rate=1e-3),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
)
model.fit(x_train, y_train, batch_size=64, epochs=1)
###Output
_____no_output_____
###Markdown
[Functional API](https://www.tensorflow.org/guide/keras/functional/) 에서 `model.add_loss(loss_tensor)` 또는 `model.add_metric(metric_tensor, name, aggregation)` 호출 할 수도 있습니다.다음은 간단한 예입니다.
###Code
inputs = keras.Input(shape=(784,), name="digits")
x1 = layers.Dense(64, activation="relu", name="dense_1")(inputs)
x2 = layers.Dense(64, activation="relu", name="dense_2")(x1)
outputs = layers.Dense(10, name="predictions")(x2)
model = keras.Model(inputs=inputs, outputs=outputs)
model.add_loss(tf.reduce_sum(x1) * 0.1)
model.add_metric(keras.backend.std(x1), name="std_of_activation", aggregation="mean")
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
)
model.fit(x_train, y_train, batch_size=64, epochs=1)
###Output
_____no_output_____
###Markdown
`add_loss()` 를 통해 손실을 전달하면 모델에는 이미 손실이 있으므로 손실 함수없이 `compile()` 을 호출 할 수 있습니다.다음 `LogisticEndpoint` 레이어를 생각해 보겠습니다. 이 레이어는 입력으로 targets 및 logits를 받아들이고 `add_loss()`를 통해 교차 엔트로피 손실을 추적합니다. 또한 `add_metric()`를 통해 분류 정확도도 추적합니다.
###Code
class LogisticEndpoint(keras.layers.Layer):
def __init__(self, name=None):
super(LogisticEndpoint, self).__init__(name=name)
self.loss_fn = keras.losses.BinaryCrossentropy(from_logits=True)
self.accuracy_fn = keras.metrics.BinaryAccuracy()
def call(self, targets, logits, sample_weights=None):
# Compute the training-time loss value and add it
# to the layer using `self.add_loss()`.
loss = self.loss_fn(targets, logits, sample_weights)
self.add_loss(loss)
# Log accuracy as a metric and add it
# to the layer using `self.add_metric()`.
acc = self.accuracy_fn(targets, logits, sample_weights)
self.add_metric(acc, name="accuracy")
# Return the inference-time prediction tensor (for `.predict()`).
return tf.nn.softmax(logits)
###Output
_____no_output_____
###Markdown
다음과 같이 `loss` 인수없이 컴파일 된 두 개의 입력 (입력 데이터 및 대상)이있는 모델에서 사용할 수 있습니다.
###Code
import numpy as np
inputs = keras.Input(shape=(3,), name="inputs")
targets = keras.Input(shape=(10,), name="targets")
logits = keras.layers.Dense(10)(inputs)
predictions = LogisticEndpoint(name="predictions")(logits, targets)
model = keras.Model(inputs=[inputs, targets], outputs=predictions)
model.compile(optimizer="adam") # No loss argument!
data = {
"inputs": np.random.random((3, 3)),
"targets": np.random.random((3, 10)),
}
model.fit(data)
###Output
_____no_output_____
###Markdown
다중 입력 모델 교육에 대한 자세한 내용은 **다중 입력, 다중 출력 모델로 데이터 전달** 섹션을 참조하십시오. 유효성 검사 홀드아웃 세트를 자동으로 분리하기본 첫 번째 엔드 투 엔드 예제에서, 우리는 `validation_data` 인수를 사용하여 NumPy 배열의 튜플 `(x_val, y_val)` 을 모델에 전달하여 각 에포크의 끝에서 유효성 검증 손실 및 유효성 검증 메트릭을 평가합니다.또 다른 옵션: 인수 `validation_split`를 사용하여 유효성 검사 목적으로 훈련 데이터의 일부를 자동으로 예약할 수 있습니다. 인수 값은 유효성 검사를 위해 예약할 데이터 비율을 나타내므로 0보다 크고 1보다 작은 값으로 설정해야 합니다. 예를 들어, `validation_split=0.2`는 "유효성 검사를 위해 데이터의 20%를 사용"한다는 의미이고`validation_split=0.6`은 "유효성 검사를 위해 데이터의 60%를 사용"한다는 의미입니다.유효성을 계산하는 방법은 셔플 링 전에 맞춤 호출로 수신 한 배열의 마지막 x % 샘플을 가져 오는 것입니다.NumPy 데이터를 학습 할 때 `validation_split` 만 사용할 수 있습니다.
###Code
model = get_compiled_model()
model.fit(x_train, y_train, batch_size=64, validation_split=0.2, epochs=1)
###Output
_____no_output_____
###Markdown
tf.data 데이터 세트의 교육 및 평가앞서 몇 단락에 걸쳐 손실, 메트릭 및 옵티마이저를 처리하는 방법을 살펴보았으며, 데이터가 NumPy 배열로 전달될 때 fit에서 `validation_data` 및 `validation_split` 인수를 사용하는 방법도 알아보았습니다.이제 데이터가 `tf.data.Dataset` 객체의 형태로 제공되는 경우를 살펴 보겠습니다.`tf.data` API는 빠르고 확장 가능한 방식으로 데이터를 로드하고 사전 처리하기 위한 TensorFlow 2.0의 유틸리티 세트입니다.`Datasets` 생성에 대한 자세한 설명은 [tf.data 설명서](https://www.tensorflow.org/guide/data)를 참조하세요.`Dataset` 인스턴스를 메서드 `fit()`, `evaluate()` 및 `predict()`로 직접 전달할 수 있습니다.
###Code
model = get_compiled_model()
# First, let's create a training Dataset instance.
# For the sake of our example, we'll use the same MNIST data as before.
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
# Shuffle and slice the dataset.
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
# Now we get a test dataset.
test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))
test_dataset = test_dataset.batch(64)
# Since the dataset already takes care of batching,
# we don't pass a `batch_size` argument.
model.fit(train_dataset, epochs=3)
# You can also evaluate or predict on a dataset.
print("Evaluate")
result = model.evaluate(test_dataset)
dict(zip(model.metrics_names, result))
###Output
_____no_output_____
###Markdown
데이터세트는 각 epoch의 끝에서 재설정되므로 다음 epoch에서 재사용할 수 있습니다.이 데이터세트의 특정 배치 수에 대해서만 훈련을 실행하려면 다음 epoch로 이동하기 전에 이 데이터세트를 사용하여 모델이 실행해야 하는 훈련 단계의 수를 지정하는 `steps_per_epoch` 인수를 전달할 수 있습니다.이렇게 하면 각 epoch가 끝날 때 데이터세트가 재설정되지 않고 다음 배치를 계속 가져오게 됩니다. 무한 반복되는 데이터세트가 아니라면 결국 데이터세트의 데이터가 고갈됩니다.
###Code
model = get_compiled_model()
# Prepare the training dataset
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
# Only use the 100 batches per epoch (that's 64 * 100 samples)
model.fit(train_dataset, epochs=3, steps_per_epoch=100)
###Output
_____no_output_____
###Markdown
유효성 검사 데이터 집합 사용`fit()` 에서 `Dataset` 인스턴스를 `validation_data` 인수로 전달할 수 있습니다.
###Code
model = get_compiled_model()
# Prepare the training dataset
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
# Prepare the validation dataset
val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))
val_dataset = val_dataset.batch(64)
model.fit(train_dataset, epochs=1, validation_data=val_dataset)
###Output
_____no_output_____
###Markdown
각 시대가 끝날 때 모델은 유효성 검사 데이터 집합을 반복하고 유효성 검사 손실 및 유효성 검사 메트릭을 계산합니다.이 데이터세트의 특정 배치 수에 대해서만 유효성 검사를 실행하려면 유효성 검사를 중단하고 다음 epoch로 넘어가기 전에 유효성 검사 데이터세트에서 모델이 실행해야 하는 유효성 검사 단계의 수를 지정하는 `validation_steps` 인수를 전달할 수 있습니다.
###Code
model = get_compiled_model()
# Prepare the training dataset
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
# Prepare the validation dataset
val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))
val_dataset = val_dataset.batch(64)
model.fit(
train_dataset,
epochs=1,
# Only run validation using the first 10 batches of the dataset
# using the `validation_steps` argument
validation_data=val_dataset,
validation_steps=10,
)
###Output
_____no_output_____
###Markdown
유효성 검사 데이터 세트는 사용 후마다 재설정되므로 항상 에포크에서 에포크까지 동일한 샘플을 평가하게됩니다.인수 `validation_split`(훈련 데이터로부터 홀드아웃 세트 생성)는 `Dataset` 객체로 훈련할 때는 지원되지 않는데, 이를 위해서는 데이터세트 샘플을 인덱싱할 수 있어야 하지만 `Dataset` API에서는 일반적으로 이것이 불가능하기 때문입니다. 지원되는 다른 입력 형식NumPy 배열, 즉시 실행 텐서 및 TensorFlow `Datasets` 외에도 Pandas 데이터프레임을 사용하거나 데이터 및 레이블의 배치를 생성하는 Python 생성기에서 Keras 모델을 훈련할 수 있습니다.특히, `keras.utils.Sequence` 클래스는 멀티스레딩을 인식하고 셔플이 가능한 Python 데이터 생성기를 빌드하기 위한 간단한 인터페이스를 제공합니다.일반적으로 다음을 사용하는 것이 좋습니다.- 데이터가 작고 메모리에 맞는 경우 NumPy 입력 데이터- 큰 데이터세트가 있고 분산 훈련을 수행해야 하는 경우 `Dataset` 객체- 큰 데이터세트가 있고 TensorFlow에서 수행할 수 없는 많은 사용자 정의 Python 측 처리를 수행해야 하는 경우(예: 데이터 로드 또는 사전 처리를 위해 외부 라이브러리에 의존하는 경우) `Sequence` 객체 `keras.utils.Sequence` 객체를 입력으로 사용하기`keras.utils.Sequence`는 두 가지 중요한 속성을 가진 Python 생성기를 얻기 위해 하위 클래스화를 수행할 수 있는 유틸리티입니다.- 멀티 프로세싱과 잘 작동합니다.- 셔플할 수 있습니다(예: `fit()`에서 `shuffle=True`를 전달하는 경우).`Sequence` 는 두 가지 방법을 구현해야합니다.- `__getitem__`- `__len__``__getitem__` 메소드는 완전한 배치를 리턴해야합니다. 신기원 사이의 데이터 세트를 수정하려면 `on_epoch_end` 구현할 수 있습니다.간단한 예를 들자면 다음과 같습니다.```pythonfrom skimage.io import imreadfrom skimage.transform import resizeimport numpy as np Here, `filenames` is list of path to the images and `labels` are the associated labels.class CIFAR10Sequence(Sequence): def __init__(self, filenames, labels, batch_size): self.filenames, self.labels = filenames, labels self.batch_size = batch_size def __len__(self): return int(np.ceil(len(self.filenames) / float(self.batch_size))) def __getitem__(self, idx): batch_x = self.filenames[idx * self.batch_size:(idx + 1) * self.batch_size] batch_y = self.labels[idx * self.batch_size:(idx + 1) * self.batch_size] return np.array([ resize(imread(filename), (200, 200)) for filename in batch_x]), np.array(batch_y)sequence = CIFAR10Sequence(filenames, labels, batch_size)model.fit(sequence, epochs=10)``` 샘플 가중치 및 클래스 가중치 사용기본 설정을 사용하면 샘플의 무게가 데이터 세트의 빈도에 따라 결정됩니다. 샘플 빈도와 관계없이 데이터에 가중치를 부여하는 방법에는 두 가지가 있습니다.- 클래스 가중치- 샘플 무게 클래스 가중치이 가중치는 `Model.fit()`에 대한 `class_weight` 인수로 사전을 전달하여 설정합니다. 이 사전은 클래스 인덱스를 이 클래스에 속한 샘플에 사용해야 하는 가중치에 매핑합니다.이 방법은 샘플링을 다시 수행하지 않고 클래스의 균형을 맞추거나 특정 클래스에 더 중요한 모델을 훈련시키는 데 사용할 수 있습니다.예를 들어, 데이터에서 클래스 "0"이 클래스 "1"로 표시된 것의 절반인 경우 `Model.fit(..., class_weight={0: 1., 1: 0.5})`을 사용할 수 있습니다. 다음은 클래스 5(MNIST 데이터세트에서 숫자 "5")의 올바른 분류에 더 많은 중요성을 두도록 클래스 가중치 또는 샘플 가중치를 사용하는 NumPy 예입니다.
###Code
import numpy as np
class_weight = {
0: 1.0,
1: 1.0,
2: 1.0,
3: 1.0,
4: 1.0,
# Set weight "2" for class "5",
# making this class 2x more important
5: 2.0,
6: 1.0,
7: 1.0,
8: 1.0,
9: 1.0,
}
print("Fit with class weight")
model = get_compiled_model()
model.fit(x_train, y_train, class_weight=class_weight, batch_size=64, epochs=1)
###Output
_____no_output_____
###Markdown
샘플 무게세밀한 제어를 위해 또는 분류기를 작성하지 않는 경우 "샘플 가중치"를 사용할 수 있습니다.- NumPy 데이터에서 학습하는 경우 : `sample_weight` 인수를 `Model.fit()` .- `tf.data` 또는 다른 종류의 반복자에서 훈련 할 때 : Yield `(input_batch, label_batch, sample_weight_batch)` 튜플."샘플 가중치"배열은 배치에서 각 샘플이 총 손실을 계산하는 데 필요한 가중치를 지정하는 숫자 배열입니다. 불균형 분류 문제 (거의 보이지 않는 클래스에 더 많은 가중치를 부여하는 아이디어)에 일반적으로 사용됩니다.사용 된 가중치가 1과 0 인 경우, 어레이는 손실 함수에 대한 *마스크* 로 사용될 수 있습니다 (전체 손실에 대한 특정 샘플의 기여를 완전히 버림).
###Code
sample_weight = np.ones(shape=(len(y_train),))
sample_weight[y_train == 5] = 2.0
print("Fit with sample weight")
model = get_compiled_model()
model.fit(x_train, y_train, sample_weight=sample_weight, batch_size=64, epochs=1)
###Output
_____no_output_____
###Markdown
일치하는 `Dataset` 예는 다음과 같습니다.
###Code
sample_weight = np.ones(shape=(len(y_train),))
sample_weight[y_train == 5] = 2.0
# Create a Dataset that includes sample weights
# (3rd element in the return tuple).
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train, sample_weight))
# Shuffle and slice the dataset.
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
model = get_compiled_model()
model.fit(train_dataset, epochs=1)
###Output
_____no_output_____
###Markdown
다중 입력, 다중 출력 모델로 데이터 전달이전 예에서는 단일 입력(형상 `(764,)`의 텐서)과 단일 출력(형상 `(10,)`의 예측 텐서)이 있는 모델을 고려했습니다. 그렇다면 입력 또는 출력이 여러 개인 모델은 어떨까요?shape `(32, 32, 3)` ( `(height, width, channels)` 입력과 shape `(None, 10)` 의 시계열 입력 `(timesteps, features)` 하십시오. 우리의 모델은이 입력들의 조합으로부터 계산 된 두 개의 출력을 가질 것입니다 : "점수"(모양 `(1,)` )와 5 개의 클래스 (모양 `(5,)` )에 대한 확률 분포.
###Code
image_input = keras.Input(shape=(32, 32, 3), name="img_input")
timeseries_input = keras.Input(shape=(None, 10), name="ts_input")
x1 = layers.Conv2D(3, 3)(image_input)
x1 = layers.GlobalMaxPooling2D()(x1)
x2 = layers.Conv1D(3, 3)(timeseries_input)
x2 = layers.GlobalMaxPooling1D()(x2)
x = layers.concatenate([x1, x2])
score_output = layers.Dense(1, name="score_output")(x)
class_output = layers.Dense(5, name="class_output")(x)
model = keras.Model(
inputs=[image_input, timeseries_input], outputs=[score_output, class_output]
)
###Output
_____no_output_____
###Markdown
이 모델을 플로팅하여 여기서 수행중인 작업을 명확하게 확인할 수 있습니다 (플롯에 표시된 셰이프는 샘플 별 셰이프가 아니라 배치 셰이프 임).
###Code
keras.utils.plot_model(model, "multi_input_and_output_model.png", show_shapes=True)
###Output
_____no_output_____
###Markdown
컴파일 타임에 손실 함수를 목록으로 전달하여 출력마다 다른 손실을 지정할 수 있습니다.
###Code
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=[keras.losses.MeanSquaredError(), keras.losses.CategoricalCrossentropy()],
)
###Output
_____no_output_____
###Markdown
모델에 단일 손실 함수만 전달하는 경우, 모든 출력에 동일한 손실 함수가 적용됩니다(여기서는 적합하지 않음).메트릭의 경우도 마찬가지입니다.
###Code
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=[keras.losses.MeanSquaredError(), keras.losses.CategoricalCrossentropy()],
metrics=[
[
keras.metrics.MeanAbsolutePercentageError(),
keras.metrics.MeanAbsoluteError(),
],
[keras.metrics.CategoricalAccuracy()],
],
)
###Output
_____no_output_____
###Markdown
출력 레이어에 이름을 지정 했으므로 dict를 통해 출력 당 손실 및 메트릭을 지정할 수도 있습니다.
###Code
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss={
"score_output": keras.losses.MeanSquaredError(),
"class_output": keras.losses.CategoricalCrossentropy(),
},
metrics={
"score_output": [
keras.metrics.MeanAbsolutePercentageError(),
keras.metrics.MeanAbsoluteError(),
],
"class_output": [keras.metrics.CategoricalAccuracy()],
},
)
###Output
_____no_output_____
###Markdown
출력이 두 개 이상인 경우 명시적 이름과 사전을 사용하는 것이 좋습니다.`loss_weights` 인수를 사용하여 출력별 손실에 서로 다른 가중치를 부여할 수 있습니다(예를 들어, 클래스 손실에 2x의 중요도를 부여하여 이 예에서 "score" 손실에 우선권을 줄 수 있음).
###Code
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss={
"score_output": keras.losses.MeanSquaredError(),
"class_output": keras.losses.CategoricalCrossentropy(),
},
metrics={
"score_output": [
keras.metrics.MeanAbsolutePercentageError(),
keras.metrics.MeanAbsoluteError(),
],
"class_output": [keras.metrics.CategoricalAccuracy()],
},
loss_weights={"score_output": 2.0, "class_output": 1.0},
)
###Output
_____no_output_____
###Markdown
이러한 출력이 예측 용이지만 훈련 용이 아닌 경우 특정 출력에 대한 손실을 계산하지 않도록 선택할 수도 있습니다.
###Code
# List loss version
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=[None, keras.losses.CategoricalCrossentropy()],
)
# Or dict loss version
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss={"class_output": keras.losses.CategoricalCrossentropy()},
)
###Output
_____no_output_____
###Markdown
적합하게 다중 입력 또는 다중 출력 모델에 데이터를 전달하는 것은 컴파일에서 손실 함수를 지정하는 것과 유사한 방식으로 작동합니다. **NumPy 배열 목록을** 전달할 수 있습니다 (손실 함수를 수신 한 출력에 1 : 1 매핑). **출력 이름을 NumPy 배열에 매핑합니다** .
###Code
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=[keras.losses.MeanSquaredError(), keras.losses.CategoricalCrossentropy()],
)
# Generate dummy NumPy data
img_data = np.random.random_sample(size=(100, 32, 32, 3))
ts_data = np.random.random_sample(size=(100, 20, 10))
score_targets = np.random.random_sample(size=(100, 1))
class_targets = np.random.random_sample(size=(100, 5))
# Fit on lists
model.fit([img_data, ts_data], [score_targets, class_targets], batch_size=32, epochs=1)
# Alternatively, fit on dicts
model.fit(
{"img_input": img_data, "ts_input": ts_data},
{"score_output": score_targets, "class_output": class_targets},
batch_size=32,
epochs=1,
)
###Output
_____no_output_____
###Markdown
`Dataset` 사용 사례는 다음과 같습니다. NumPy 배열에서 수행 한 것과 유사하게 `Dataset` 은 튜플 튜플을 반환해야합니다.
###Code
train_dataset = tf.data.Dataset.from_tensor_slices(
(
{"img_input": img_data, "ts_input": ts_data},
{"score_output": score_targets, "class_output": class_targets},
)
)
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
model.fit(train_dataset, epochs=1)
###Output
_____no_output_____
###Markdown
콜백 사용하기Keras의 콜백은 훈련 중 다른 시점(epoch의 시작, 배치의 끝, epoch의 끝 등)에서 호출되며 다음과 같은 동작을 구현하는 데 사용할 수 있는 객체입니다.- 훈련 중 서로 다른 시점에서 유효성 검사 수행(내장된 epoch당 유효성 검사에서 더욱 확장)- 정기적으로 또는 특정 정확도 임계값을 초과할 때 모델 검사점 설정- 훈련이 정체 된 것처럼 보일 때 모델의 학습 속도 변경- 훈련이 정체 된 것처럼 보일 때 최상위 레이어의 미세 조정- 교육이 종료되거나 특정 성능 임계 값을 초과 한 경우 전자 메일 또는 인스턴트 메시지 알림 보내기- 기타콜백은 `fit()` 에 대한 호출에 목록으로 전달 될 수 있습니다.
###Code
model = get_compiled_model()
callbacks = [
keras.callbacks.EarlyStopping(
# Stop training when `val_loss` is no longer improving
monitor="val_loss",
# "no longer improving" being defined as "no better than 1e-2 less"
min_delta=1e-2,
# "no longer improving" being further defined as "for at least 2 epochs"
patience=2,
verbose=1,
)
]
model.fit(
x_train,
y_train,
epochs=20,
batch_size=64,
callbacks=callbacks,
validation_split=0.2,
)
###Output
_____no_output_____
###Markdown
많은 내장 콜백을 사용할 수 있습니다- `ModelCheckpoint` : 주기적으로 모델을 저장합니다.- `EarlyStopping`: 훈련이 더 이상 유효성 검사 메트릭을 개선하지 못하는 경우 훈련을 중단합니다.- `TensorBoard` : 시각화 할 수 있습니다 정기적으로 쓰기 모델 로그 [TensorBoard](https://www.tensorflow.org/tensorboard) (섹션 "시각화"에서 자세한 내용).- `CSVLogger` : 손실 및 메트릭 데이터를 CSV 파일로 스트리밍합니다.- 기타전체 목록은 [콜백 설명서](https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/) 를 참조하십시오. 자신의 콜백 작성기본 클래스 `keras.callbacks.Callback` 을 확장하여 사용자 정의 콜백을 작성할 수 있습니다. 콜백은 클래스 속성 `self.model` 통해 연관된 모델에 액세스 할 수 있습니다.[사용자 정의 콜백을 작성하기 위한 전체 가이드](https://www.tensorflow.org/guide/keras/custom_callback/)를 꼭 읽어보세요. 다음은 훈련 중 배치별 손실 값 목록을 저장하는 간단한 예입니다.다음은 훈련 중 배치 별 손실 값 목록을 저장하는 간단한 예입니다.
###Code
class LossHistory(keras.callbacks.Callback):
def on_train_begin(self, logs):
self.per_batch_losses = []
def on_batch_end(self, batch, logs):
self.per_batch_losses.append(logs.get("loss"))
###Output
_____no_output_____
###Markdown
모델 검사점 설정하기상대적으로 큰 데이터세트에 대한 모델을 훈련시킬 때는 모델의 검사점을 빈번하게 저장하는 것이 중요합니다.이를 수행하는 가장 쉬운 방법은 `ModelCheckpoint` 콜백을 사용하는 것입니다.
###Code
model = get_compiled_model()
callbacks = [
keras.callbacks.ModelCheckpoint(
# Path where to save the model
# The two parameters below mean that we will overwrite
# the current checkpoint if and only if
# the `val_loss` score has improved.
# The saved model name will include the current epoch.
filepath="mymodel_{epoch}",
save_best_only=True, # Only save a model if `val_loss` has improved.
monitor="val_loss",
verbose=1,
)
]
model.fit(
x_train, y_train, epochs=2, batch_size=64, callbacks=callbacks, validation_split=0.2
)
###Output
_____no_output_____
###Markdown
`ModelCheckpoint` 콜백을 사용하여 내결함성을 구현할 수 있습니다. 훈련이 무작위로 중단 된 경우 모델의 마지막 저장된 상태에서 훈련을 다시 시작할 수있는 기능. 기본 예는 다음과 같습니다.
###Code
import os
# Prepare a directory to store all the checkpoints.
checkpoint_dir = "./ckpt"
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
def make_or_restore_model():
# Either restore the latest model, or create a fresh one
# if there is no checkpoint available.
checkpoints = [checkpoint_dir + "/" + name for name in os.listdir(checkpoint_dir)]
if checkpoints:
latest_checkpoint = max(checkpoints, key=os.path.getctime)
print("Restoring from", latest_checkpoint)
return keras.models.load_model(latest_checkpoint)
print("Creating a new model")
return get_compiled_model()
model = make_or_restore_model()
callbacks = [
# This callback saves a SavedModel every 100 batches.
# We include the training loss in the saved model name.
keras.callbacks.ModelCheckpoint(
filepath=checkpoint_dir + "/ckpt-loss={loss:.2f}", save_freq=100
)
]
model.fit(x_train, y_train, epochs=1, callbacks=callbacks)
###Output
_____no_output_____
###Markdown
또한 모델 저장 및 복원을 위해 자체 콜백을 작성하십시오.직렬화 및 저장에 대한 전체 안내서는 [모델 저장 및 직렬화 안내서를](https://www.tensorflow.org/guide/keras/save_and_serialize/) 참조하십시오. 학습 속도 일정 사용하기딥 러닝 모델을 훈련 할 때 일반적인 패턴은 훈련이 진행됨에 따라 점차적으로 학습을 줄이는 것입니다. 이것을 일반적으로 "학습률 감소"라고합니다.학습 붕괴 스케줄은 정적 인 (현재 에포크 또는 현재 배치 인덱스의 함수로서 미리 고정됨) 또는 동적 (모델의 현재 행동, 특히 검증 손실에 대응) 일 수있다. 옵티마이저로 일정 전달하기옵티 마이저에서 schedule 객체를 `learning_rate` 인수로 전달하여 정적 학습 속도 감소 스케줄을 쉽게 사용할 수 있습니다.
###Code
initial_learning_rate = 0.1
lr_schedule = keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate, decay_steps=100000, decay_rate=0.96, staircase=True
)
optimizer = keras.optimizers.RMSprop(learning_rate=lr_schedule)
###Output
_____no_output_____
###Markdown
`ExponentialDecay` , `PiecewiseConstantDecay` , `PolynomialDecay` 및 `InverseTimeDecay` 와 같은 몇 가지 기본 제공 일정을 사용할 수 있습니다. 콜백을 사용하여 동적 학습 속도 일정 구현옵티마이저가 유효성 검사 메트릭에 액세스할 수 없으므로 이러한 일정 객체로는 동적 학습률 일정(예: 유효성 검사 손실이 더 이상 개선되지 않을 때 학습률 감소)을 달성할 수 없습니다.그러나 콜백은 유효성 검사 메트릭을 포함해 모든 메트릭에 액세스할 수 있습니다! 따라서 옵티마이저에서 현재 학습률을 수정하는 콜백을 사용하여 이 패턴을 달성할 수 있습니다. 실제로 이 부분이`ReduceLROnPlateau` 콜백으로 내장되어 있습니다. 훈련 중 손실 및 메트릭 시각화하기교육 중에 모델을 주시하는 가장 좋은 방법은 로컬에서 실행할 수있는 브라우저 기반 응용 프로그램 인 [TensorBoard](https://www.tensorflow.org/tensorboard) 를 사용하는 것입니다.- 교육 및 평가를위한 손실 및 지표의 라이브 플롯- (옵션) 레이어 활성화 히스토그램 시각화- (옵션) `Embedding` 레이어에서 학습한 포함된 공간의 3D 시각화pip와 함께 TensorFlow를 설치한 경우, 명령줄에서 TensorBoard를 시작할 수 있습니다.```tensorboard --logdir=/full_path_to_your_logs``` TensorBoard 콜백 사용하기TensorBoard를 Keras 모델 및 fit 메서드와 함께 사용하는 가장 쉬운 방법은 `TensorBoard` 콜백입니다.가장 간단한 경우로, 콜백에서 로그를 작성할 위치만 지정하면 바로 쓸 수 있습니다.
###Code
keras.callbacks.TensorBoard(
log_dir="/full_path_to_your_logs",
histogram_freq=0, # How often to log histogram visualizations
embeddings_freq=0, # How often to log embedding visualizations
update_freq="epoch",
) # How often to write logs (default: once per epoch)
###Output
_____no_output_____ |
code/draft-notebooks/data-exploration.ipynb | ###Markdown
Importing Libraries
###Code
import pandas as pd
import numpy as np
import seaborn as sns
###Output
_____no_output_____
###Markdown
Reading Data
###Code
patient_info = pd.read_csv('../../data/PatientInfo.csv')
patient_info.head()
patient_info.describe()
patient_info.hist();
sns.heatmap(patient_info.corr(), annot=True, fmt=".2f");
sns.pairplot(patient_info.select_dtypes(include=['float']).dropna());
###Output
_____no_output_____ |
notebooks/losses_evaluation/Dstripes/basic/studentT/convolutional/tVAE/Dstripest05VAE_Convolutional_reconst_1ellwlb_01ssim.ipynb | ###Markdown
Settings
###Code
%env TF_KERAS = 1
import os
sep_local = os.path.sep
import sys
sys.path.append('..'+sep_local+'..')
print(sep_local)
os.chdir('..'+sep_local+'..'+sep_local+'..'+sep_local+'..'+sep_local+'..')
print(os.getcwd())
import tensorflow as tf
print(tf.__version__)
###Output
_____no_output_____
###Markdown
Dataset loading
###Code
dataset_name='Dstripes'
import tensorflow as tf
train_ds = tf.data.Dataset.from_generator(
lambda: training_generator,
output_types=tf.float32 ,
output_shapes=tf.TensorShape((batch_size, ) + image_size)
)
test_ds = tf.data.Dataset.from_generator(
lambda: testing_generator,
output_types=tf.float32 ,
output_shapes=tf.TensorShape((batch_size, ) + image_size)
)
_instance_scale=1.0
for data in train_ds:
_instance_scale = float(data[0].numpy().max())
break
_instance_scale
import numpy as np
from collections.abc import Iterable
if isinstance(inputs_shape, Iterable):
_outputs_shape = np.prod(inputs_shape)
_outputs_shape
###Output
_____no_output_____
###Markdown
Model's Layers definition
###Code
units=20
c=50
menc_lays = [
tf.keras.layers.Conv2D(filters=units//2, kernel_size=3, strides=(2, 2), activation='relu'),
tf.keras.layers.Conv2D(filters=units*9//2, kernel_size=3, strides=(2, 2), activation='relu'),
tf.keras.layers.Flatten(),
# No activation
tf.keras.layers.Dense(latents_dim)
]
venc_lays = [
tf.keras.layers.Conv2D(filters=units//2, kernel_size=3, strides=(2, 2), activation='relu'),
tf.keras.layers.Conv2D(filters=units*9//2, kernel_size=3, strides=(2, 2), activation='relu'),
tf.keras.layers.Flatten(),
# No activation
tf.keras.layers.Dense(latents_dim)
]
dec_lays = [
tf.keras.layers.Dense(units=units*c*c, activation=tf.nn.relu),
tf.keras.layers.Reshape(target_shape=(c , c, units)),
tf.keras.layers.Conv2DTranspose(filters=units, kernel_size=3, strides=(2, 2), padding="SAME", activation='relu'),
tf.keras.layers.Conv2DTranspose(filters=units*3, kernel_size=3, strides=(2, 2), padding="SAME", activation='relu'),
# No activation
tf.keras.layers.Conv2DTranspose(filters=3, kernel_size=3, strides=(1, 1), padding="SAME")
]
###Output
_____no_output_____
###Markdown
Model definition
###Code
model_name = dataset_name+'VAE_Convolutional_reconst_1ell_01ssmi'
experiments_dir='experiments'+sep_local+model_name
from training.autoencoding_basic.autoencoders.tVAE import tVAE as AE
inputs_shape=image_size
variables_params = \
[
{
'name': 'inference_mean',
'inputs_shape':inputs_shape,
'outputs_shape':latents_dim,
'layers': menc_lays
}
,
{
'name': 'inference_logvariance',
'inputs_shape':inputs_shape,
'outputs_shape':latents_dim,
'layers': venc_lays
}
,
{
'name': 'generative',
'inputs_shape':latents_dim,
'outputs_shape':inputs_shape,
'layers':dec_lays
}
]
from utils.data_and_files.file_utils import create_if_not_exist
_restore = os.path.join(experiments_dir, 'var_save_dir')
create_if_not_exist(_restore)
_restore
#to restore trained model, set filepath=_restore
ae = AE(
name=model_name,
df=0.5,
latents_dim=latents_dim,
batch_size=batch_size,
variables_params=variables_params,
filepath=None
)
from evaluation.quantitive_metrics.structural_similarity import prepare_ssim_multiscale
from statistical.losses_utilities import similarity_to_distance
from statistical.ae_losses import expected_loglikelihood_with_lower_bound as ellwlb
ae.compile(loss={'x_logits': lambda x_true, x_logits: ellwlb(x_true, x_logits)+ 0.1*similarity_to_distance(prepare_ssim_multiscale([ae.batch_size]+ae.get_inputs_shape()))(x_true, x_logits)})
###Output
_____no_output_____
###Markdown
Callbacks
###Code
from training.callbacks.sample_generation import SampleGeneration
from training.callbacks.save_model import ModelSaver
es = tf.keras.callbacks.EarlyStopping(
monitor='loss',
min_delta=1e-12,
patience=12,
verbose=1,
restore_best_weights=False
)
ms = ModelSaver(filepath=_restore)
csv_dir = os.path.join(experiments_dir, 'csv_dir')
create_if_not_exist(csv_dir)
csv_dir = os.path.join(csv_dir, ae.name+'.csv')
csv_log = tf.keras.callbacks.CSVLogger(csv_dir, append=True)
csv_dir
image_gen_dir = os.path.join(experiments_dir, 'image_gen_dir')
create_if_not_exist(image_gen_dir)
sg = SampleGeneration(latents_shape=latents_dim, filepath=image_gen_dir, gen_freq=5, save_img=True, gray_plot=False)
###Output
_____no_output_____
###Markdown
Model Training
###Code
from training.callbacks.disentangle_supervied import DisentanglementSuperviedMetrics
from training.callbacks.disentangle_unsupervied import DisentanglementUnsuperviedMetrics
gts_mertics = DisentanglementSuperviedMetrics(
ground_truth_data=eval_dataset,
representation_fn=lambda x: ae.encode(x),
random_state=np.random.RandomState(0),
file_Name=gts_csv,
num_train=10000,
num_test=100,
batch_size=batch_size,
continuous_factors=False,
gt_freq=10
)
gtu_mertics = DisentanglementUnsuperviedMetrics(
ground_truth_data=eval_dataset,
representation_fn=lambda x: ae.encode(x),
random_state=np.random.RandomState(0),
file_Name=gtu_csv,
num_train=20000,
num_test=500,
batch_size=batch_size,
gt_freq=10
)
ae.fit(
x=train_ds,
input_kw=None,
steps_per_epoch=int(1e4),
epochs=int(1e6),
verbose=2,
callbacks=[ es, ms, csv_log, sg, gts_mertics, gtu_mertics],
workers=-1,
use_multiprocessing=True,
validation_data=test_ds,
validation_steps=int(1e4)
)
###Output
_____no_output_____
###Markdown
Model Evaluation inception_score
###Code
from evaluation.generativity_metrics.inception_metrics import inception_score
is_mean, is_sigma = inception_score(ae, tolerance_threshold=1e-6, max_iteration=200)
print(f'inception_score mean: {is_mean}, sigma: {is_sigma}')
###Output
_____no_output_____
###Markdown
Frechet_inception_distance
###Code
from evaluation.generativity_metrics.inception_metrics import frechet_inception_distance
fis_score = frechet_inception_distance(ae, training_generator, tolerance_threshold=1e-6, max_iteration=10, batch_size=32)
print(f'frechet inception distance: {fis_score}')
###Output
_____no_output_____
###Markdown
perceptual_path_length_score
###Code
from evaluation.generativity_metrics.perceptual_path_length import perceptual_path_length_score
ppl_mean_score = perceptual_path_length_score(ae, training_generator, tolerance_threshold=1e-6, max_iteration=200, batch_size=32)
print(f'perceptual path length score: {ppl_mean_score}')
###Output
_____no_output_____
###Markdown
precision score
###Code
from evaluation.generativity_metrics.precision_recall import precision_score
_precision_score = precision_score(ae, training_generator, tolerance_threshold=1e-6, max_iteration=200)
print(f'precision score: {_precision_score}')
###Output
_____no_output_____
###Markdown
recall score
###Code
from evaluation.generativity_metrics.precision_recall import recall_score
_recall_score = recall_score(ae, training_generator, tolerance_threshold=1e-6, max_iteration=200)
print(f'recall score: {_recall_score}')
###Output
_____no_output_____
###Markdown
Image Generation image reconstruction Training dataset
###Code
%load_ext autoreload
%autoreload 2
from training.generators.image_generation_testing import reconstruct_from_a_batch
from utils.data_and_files.file_utils import create_if_not_exist
save_dir = os.path.join(experiments_dir, 'reconstruct_training_images_like_a_batch_dir')
create_if_not_exist(save_dir)
reconstruct_from_a_batch(ae, training_generator, save_dir)
from utils.data_and_files.file_utils import create_if_not_exist
save_dir = os.path.join(experiments_dir, 'reconstruct_testing_images_like_a_batch_dir')
create_if_not_exist(save_dir)
reconstruct_from_a_batch(ae, testing_generator, save_dir)
###Output
_____no_output_____
###Markdown
with Randomness
###Code
from training.generators.image_generation_testing import generate_images_like_a_batch
from utils.data_and_files.file_utils import create_if_not_exist
save_dir = os.path.join(experiments_dir, 'generate_training_images_like_a_batch_dir')
create_if_not_exist(save_dir)
generate_images_like_a_batch(ae, training_generator, save_dir)
from utils.data_and_files.file_utils import create_if_not_exist
save_dir = os.path.join(experiments_dir, 'generate_testing_images_like_a_batch_dir')
create_if_not_exist(save_dir)
generate_images_like_a_batch(ae, testing_generator, save_dir)
###Output
_____no_output_____
###Markdown
Complete Randomness
###Code
from training.generators.image_generation_testing import generate_images_randomly
from utils.data_and_files.file_utils import create_if_not_exist
save_dir = os.path.join(experiments_dir, 'random_synthetic_dir')
create_if_not_exist(save_dir)
generate_images_randomly(ae, save_dir)
from training.generators.image_generation_testing import interpolate_a_batch
from utils.data_and_files.file_utils import create_if_not_exist
save_dir = os.path.join(experiments_dir, 'interpolate_dir')
create_if_not_exist(save_dir)
interpolate_a_batch(ae, testing_generator, save_dir)
###Output
100%|██████████| 15/15 [00:00<00:00, 19.90it/s]
|
models/ANN/.ipynb_checkpoints/ANNtfidf-checkpoint.ipynb | ###Markdown
This module runs ANN on TFIDF
###Code
#make necessary imports
import numpy as np
import pandas as pd
import itertools
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
#Read the data
df=pd.read_csv('../../datasets/liar_tweaked/trainvectordata.csv')
testdf=pd.read_csv('../../datasets/liar_tweaked/testvectordata.csv')
validdf=pd.read_csv('../../datasets/liar_tweaked/validvectordata.csv')
x_train,y_train=df['statement'],df['label']
x_test,y_test=testdf['statement'],testdf['label']
x_valid,y_valid=validdf['statement'],validdf['label']
#tfidf
tfidf_vectorizer=TfidfVectorizer(stop_words='english', max_df=0.7)
tfidf_train=tfidf_vectorizer.fit_transform(df['statement'])
tfidf_test=tfidf_vectorizer.transform(testdf['statement'])
tfidf_valid=tfidf_vectorizer.transform(validdf['statement'])
tfidf_train
#building the classifier
def build_classifier():
clf=Sequential()
clf.add(Dense(output_dim=500,init='uniform',activation='relu',input_dim=11915))
clf.add(Dense(output_dim=100,init='uniform',activation='relu'))
clf.add(Dense(output_dim=50,init='uniform',activation='relu'))
clf.add(Dense(output_dim=20,init='uniform',activation='relu'))
clf.add(Dense(output_dim=10,init='uniform',activation='relu'))
clf.add(Dense(output_dim=5,init='uniform',activation='relu'))
clf.add(Dense(output_dim=1,init='uniform',activation='sigmoid'))
clf.compile(optimizer='adam', loss='binary_crossentropy',metrics=['accuracy'])
return clf
#make necessary imports
import keras
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import cross_val_score
#build ANN, use k fold cross validation
clf=KerasClassifier(build_fn=build_classifier, batch_size=10, nb_epoch=100)
accuracies=cross_val_score(estimator=clf, X=tfidf_train,y=df['label'],cv=10,n_jobs=-1)
#see accuracies
accuracies
#fit on training data and check accuracies on both test and valid data
clf.fit(tfidf_train,y_train, batch_size=10, nb_epoch=10)
y_test_pred = clf.predict(tfidf_test)
print('algorithm - test dataset accuracy - valid dataset accuracy')
print('ANNTFIDF - ' ,round(accuracy_score(y_test, y_test_pred),4), ' - ', end='')
y_test_pred = clf.predict(tfidf_valid)
print(round(accuracy_score(y_valid, y_test_pred),4))
###Output
/Users/lovedeepsingh/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:4: UserWarning: Update your `Dense` call to the Keras 2 API: `Dense(activation="relu", input_dim=11915, units=500, kernel_initializer="uniform")`
after removing the cwd from sys.path.
/Users/lovedeepsingh/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:5: UserWarning: Update your `Dense` call to the Keras 2 API: `Dense(activation="relu", units=100, kernel_initializer="uniform")`
"""
/Users/lovedeepsingh/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:6: UserWarning: Update your `Dense` call to the Keras 2 API: `Dense(activation="relu", units=50, kernel_initializer="uniform")`
/Users/lovedeepsingh/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:7: UserWarning: Update your `Dense` call to the Keras 2 API: `Dense(activation="relu", units=20, kernel_initializer="uniform")`
import sys
/Users/lovedeepsingh/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:8: UserWarning: Update your `Dense` call to the Keras 2 API: `Dense(activation="relu", units=10, kernel_initializer="uniform")`
/Users/lovedeepsingh/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:9: UserWarning: Update your `Dense` call to the Keras 2 API: `Dense(activation="relu", units=5, kernel_initializer="uniform")`
if __name__ == '__main__':
/Users/lovedeepsingh/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:10: UserWarning: Update your `Dense` call to the Keras 2 API: `Dense(activation="sigmoid", units=1, kernel_initializer="uniform")`
# Remove the CWD from sys.path while we load stuff.
|
additional_content/analyzing_the_data/analyzing_narval.ipynb | ###Markdown
Coarse-graining NARVAL Data **For Figure 1 of the paper**
###Code
import os
import sys
import xarray as xr
import numpy as np
import pandas as pd
import importlib
import matplotlib
import matplotlib.pyplot as plt
# For psyplot
import psyplot.project as psy
import matplotlib as mpl
# %matplotlib inline
# %config InlineBackend.close_figures = False
psy.rcParams['plotter.maps.xgrid'] = False
psy.rcParams['plotter.maps.ygrid'] = False
mpl.rcParams['figure.figsize'] = [10., 8.]
path = '/pf/b/b309170/my_work/NARVAL/'
file_cg = 'for_paraview/clc_R02B04_NARVALII_2016072800_cloud_DOM01_0017_boxed_scaled.nc'
file_orig = 'for_paraview/dei4_NARVALII_2016072800_cloud_DOM01_ML_0017_clc_scaled.nc'
###Output
_____no_output_____
###Markdown
Question 1: Does the coarse-graining look right? Of horizontal coarse-graining (psyplot): If you get the error 'ValueError: Can only plot 2-dimensional data!', then you need to use cdo setgrid on the file first.
###Code
# # Note that the cloud cover scheme used was a 0-1 cloud cover scheme.
# maps = psy.plot.mapplot(os.path.join(path, file_orig), dims = {'name': 'ccl', 'height': 40},
# projection='robin', cmap='Blues_r', title='Cloud cover on 20041105 at 15:00 (on layer 40)')
# plt.savefig('original_cloud_cover_snapshot.pdf')
# Note that the cloud cover scheme used was a 0-1 cloud cover scheme.
maps = psy.plot.mapplot(os.path.join(path, file_orig), dims = {'name': 'clc', 'height': 40}, cticksize=34,
projection='robin', cmap='Blues_r')
# maps.update(lonlatbox=[-180, 180, -90, 90])
plt.savefig('original_cloud_cover_snapshot_untitled_narval.pdf')
# I had to cdo sellonlatbox first. Zooming in via lonlatbox in maps update did not work!
maps = psy.plot.mapplot(os.path.join(path, file_cg), dims = {'name': 'clc', 'height': 40}, cticksize=34,
projection='robin', cmap='Blues_r', bounds=[0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1])
# maps.update(lonlatbox=[-68, 15, -10, 20]) #[lon.min(), lon.max(), lat.min(), lat.max()]
# maps.update(lonlatbox=[-180, 180, -90, 90])
# plt.savefig('horizontally_coarse_grained_cloud_cover_untitled_narval.pdf')
# plt.savefig('test_2.pdf', bbox_inches=Bbox([[4, 4], [6, 6]]))
###Output
_____no_output_____
###Markdown
Of vertical coarse-graining:
###Code
## Load original data
# Load clc profile
DS = xr.open_dataset('/pf/b/b309170/my_work/NARVAL/data/clc/clc_R02B04_NARVALII_2016072800_cloud_DOM01_0017.nc')
da = DS.clc.values
print(da.shape)
# Extract all nan_fields
nan_fields = np.where(~np.isnan(da[0, -1, :]))[0]
# # Some arbitrary horizontal field
rand_field = np.random.randint(len(nan_fields))
rand_field = nan_fields[rand_field]
# rand_field=10084 # To reconstruct the profile from the paper
print(rand_field)
cl_hr = da[0, :, rand_field]
# Load zg profile
DS = xr.open_dataset('/pf/b/b309170/my_work/NARVAL/data/z_ifc/zf_R02B04_NARVALI_fg_DOM01.nc')
da = DS.zf.values
zg_hr = da[:, rand_field]
# zg_hr = zg_hr[-91:] # Need the 91 earth-bound layers
## Load vertically coarse-grained data
# Load clc profile
DS = xr.open_dataset('/pf/b/b309170/my_work/NARVAL/data_var_vertinterp/clc/int_var_clc_R02B04_NARVALII_2016072800_cloud_DOM01_0017.nc')
da = DS.clc.values
not_nan = ~np.isnan(da[0,:,rand_field])
cl_lr = da[0, not_nan, rand_field]
# Load zg profile
DS = xr.open_dataset('/pf/b/b309170/my_work/NARVAL/data_var_vertinterp/zg/zg_icon-a_capped.nc')
da = DS.zg.values
zg_lr = da[not_nan, rand_field]
# Increase the general font size
size_plot_elements = 16
matplotlib.rcParams['legend.fontsize'] = size_plot_elements
matplotlib.rcParams['axes.labelsize'] = size_plot_elements # For an axes xlabel and ylabel
matplotlib.rcParams['xtick.labelsize'] = size_plot_elements
matplotlib.rcParams['ytick.labelsize'] = size_plot_elements
fig = plt.figure(figsize=(2,4))
# # Units in kilometers
# zg_hr = zg_hr/1000
# zg_lr = zg_lr/1000
# ax = fig.add_subplot(211, title='High-res vertical cloud cover profile', ylim=(0, np.max(zg_lr)), xlim=(-0.05,1),
# xlabel='Cloud Cover Fraction', ylabel='Mean height of a vertical layer in km')
ax = fig.add_subplot(111, ylim=(0, np.max(zg_lr[4:])), xlim=(-0.05,1), ylabel='z [km]', xticks=[0,0.5,1])
ax.plot(cl_hr/100, zg_hr)
ax.plot(cl_hr/100, zg_hr, 'b.')
plt.savefig('vertical_coarse-graining_narval_example_v2_1.pdf', bbox_inches='tight')
fig = plt.figure(figsize=(2,4))
# ax_2 = fig.add_subplot(212, title='Low-res vertical cloud cover profile', ylim=(0, np.max(zg_lr)), xlim=(-0.05,1),
# xlabel='Cloud Cover Fraction', ylabel='Mean height of a vertical layer in km')
ax_2 = fig.add_subplot(111, ylim=(0, np.max(zg_lr[4:])), xlim=(-0.05,1),
xlabel='Cloud Fraction', ylabel='z [km]', xticks=[0,0.5,1])
ax_2.plot(cl_lr/100, zg_lr)
ax_2.plot(cl_lr/100, zg_lr, 'b.')
plt.savefig('vertical_coarse-graining_narval_example_v2_2.pdf', bbox_inches='tight')
fig = plt.figure(figsize=(10,7))
ax = fig.add_subplot(121, title='High-res vertical cloud cover profile')
ax.plot(cl_hr/100, zg_hr)
ax.plot(cl_hr/100, zg_hr, 'b.')
ax_2 = fig.add_subplot(122, title='Low-res vertical cloud cover profile')
ax_2.plot(cl_lr/100, zg_lr)
ax_2.plot(cl_lr/100, zg_lr, 'b.')
###Output
_____no_output_____ |
0-initial-language-model-with-sp-small.ipynb | ###Markdown
With SentencePiece tokenizer Initial setup
###Code
%reload_ext autoreload
%autoreload 2
%matplotlib inline
from fastai import *
from fastai.text import *
bs = 512
data_path = Config.data_path()
lang = 'nl'
name = f'{lang}wiki_sp' # Use a different directory.
path = data_path/name
path.mkdir(exist_ok=True, parents=True)
lm_fns = [f'{lang}_wt', f'{lang}_wt_vocab']
###Output
_____no_output_____
###Markdown
Download wikipedia data
###Code
# from nlputils import split_wiki,get_wiki
# get_wiki(path, lang)
# path.ls()
###Output
_____no_output_____
###Markdown
Split in separate files
###Code
dest = path/'docs_small'
dest.ls()[:5]
###Output
_____no_output_____
###Markdown
Create databunch for language model
###Code
# This takes about 45 minutes:
data = (TextList.from_folder(dest, processor=[OpenFileProcessor(), SPProcessor()])
.split_by_rand_pct(0.1, seed=42)
.label_for_lm()
.databunch(bs=bs, num_workers=1, bptt=70))
data.save(f'{lang}_databunch_sp') # Different databunch
len(data.vocab.itos),len(data.train_ds)
# data.train_ds[:5]
data.vocab.itos[:1000]
data.show_batch()
###Output
_____no_output_____
###Markdown
Train language model
###Code
data = load_data(dest, f'{lang}_databunch_sp', bs=bs, num_workers=1)
# data.train_ds[:1]
config = dict(emb_sz=400, n_hid=1152, n_layers=3, pad_token=1, qrnn=False, bidir=False, output_p=0.1,
hidden_p=0.15, input_p=0.25, embed_p=0.02, weight_p=0.2, tie_weights=True, out_bias=True)
learn = language_model_learner(data, AWD_LSTM, config=config, drop_mult=1.0, pretrained=False)
# learn = language_model_learner(data, AWD_LSTM, config=config, drop_mult=1.5, pretrained=False)
# learn = language_model_learner(data, AWD_LSTM, drop_mult=1.0, pretrained=False)
learn.unfreeze()
learn.lr_find()
learn.recorder.plot()
# lr = 3e-3
lr = 1e-2
# lr = 2e-2
# learn.fit_one_cycle(1, lr, moms=(0.8, 0.7))
# Previous run (lr = 5e-3)
# learn.fit_one_cycle(5, lr, moms=(0.8, 0.7))
learn.fit_one_cycle(10, lr, moms=(0.8, 0.7))
# learn.fit_one_cycle(1, lr, moms=(0.8, 0.7))
mdl_path = path/'models'
mdl_path.mkdir(exist_ok=True)
# learn.to_fp32().save(mdl_path/lm_fns[0], with_opt=False)
learn.save(mdl_path/lm_fns[0], with_opt=False)
learn.data.vocab.save(mdl_path/(lm_fns[1] + '.pkl'))
TEXT = '''Het beleg van Utrecht
Het beleg van Utrecht'''
N_WORDS = 200
N_SENTENCES = 2
print("\n".join(learn.predict(TEXT, N_WORDS, temperature=0.85) for _ in range(N_SENTENCES)))
# learn = language_model_learner(data, AWD_LSTM, drop_mult=1.,
# path = path,
# pretrained_fnames=lm_fns)
learn.export()
TEXT = '''Jan Wolkers
Jan Wolkers'''
N_WORDS = 500
N_SENTENCES = 1
print("\n".join(learn.predict(TEXT, N_WORDS, temperature=0.65) for _ in range(N_SENTENCES)))
###Output
Jan Wolkers
Jan Wolkers , ▁heer ▁van ▁xxmaj ▁ stad ▁of ▁xxmaj ▁ eg mont ▁van ▁xxmaj ▁ eg mont ▁van ▁der ▁xxmaj ▁congregati e ▁( rotterdam , ▁13 ▁maart ▁11 49 ▁ - ▁xxmaj ▁paramaribo , ▁27 ▁oktober ▁1917 ) ▁was ▁een ▁xxmaj ▁ bulgaars ▁ diskjockey , ▁die ▁bekend ▁was ▁als ▁xxmaj ▁ fellow s ▁xxmaj ▁centrum . ▁xxmaj ▁hij ▁groeide ▁op ▁in ▁xxmaj ▁ missouri , ▁xxmaj ▁italië . ▁xxmaj ▁hij ▁trouwde ▁met ▁xxmaj ▁frank ▁xxmaj ▁ z elaer ▁en ▁xxmaj ▁anna ▁xxmaj ▁ aya nne , ▁die ▁hij ▁was . ▁xxmaj ▁in ▁2008 ▁was ▁hij ▁deel ▁van ▁het ▁xxmaj ▁nederlands ▁xxmaj ▁parlement . ▁xxmaj ▁in ▁zijn ▁studie ▁begon ▁hij ▁in ▁xxmaj ▁de n ▁xxmaj ▁haag . ▁xxmaj ▁hij ▁stierf ▁in ▁1921 . ▁xxmaj ▁in ▁1768 ▁werd ▁hij ▁lid ▁van ▁xxmaj ▁ kruisweg . ▁xxmaj ▁zijn ▁vader ▁was ▁xxmaj ▁anna ▁xxmaj ▁ s ig s ▁en ▁xxmaj ▁ agnese ▁xxmaj ▁ gen s s á . ▁xxmaj ▁hij ▁trad ▁op ▁als ▁de ▁vrouw ▁van ▁xxmaj ▁anna ▁van ▁xxmaj ▁ eg mont . ▁xxmaj ▁hij ▁werd ▁geboren ▁in ▁xxmaj ▁ t pen au la , ▁xxmaj ▁ petersburg ▁in ▁xxmaj ▁amsterdam . ▁xxmaj ▁in ▁zijn ▁ 1530 ▁begon ▁hij ▁samen ▁met ▁zijn ▁vader ▁aan ▁de ▁xxmaj ▁orde ▁van ▁xxmaj ▁ eg mont s . ▁xxmaj ▁in ▁zijn ▁eerste ▁huwelijk ▁met ▁xxmaj ▁sir ▁xxmaj ▁vi em ▁xxmaj ▁ le ▁xxmaj ▁ ce en o , ▁die ▁in ▁de ▁meer rlaatste ▁in ▁de ▁xxmaj ▁jordaan se ▁filosofie ▁werd ▁ontdekt , ▁werd ▁hij ▁naar ▁xxmaj ▁rome ▁gestuurd ▁en ▁werd ▁hij ▁begraven ▁in ▁de ▁xxmaj ▁orde ▁van ▁xxmaj ▁afgevaardigden . ▁xxmaj ▁als ▁reactie ▁voor ▁de ▁dood ▁van ▁de ▁xxmaj ▁ kortrijkse ▁koning ▁xxmaj ▁humbert ▁xxup ▁ii ▁kreeg ▁ze ▁een ▁eigen ▁liefde ▁en ▁was ▁hij ▁ook ▁betrokken ▁in ▁de ▁vele ▁kerken ▁die ▁zij ▁toch ▁hadden ▁vertoond . ▁xxmaj ▁in ▁tegenstelling ▁tot ▁zijn ▁broer ▁" de ▁macht ▁van ▁xxmaj ▁ eg mont " ▁werd ▁zij ▁lid ▁van ▁de ▁" ballista - expeditie " ▁die ▁op ▁20 ▁december ▁1944 ▁in ▁xxmaj ▁nederland ▁werd ▁verheven . ▁xxmaj ▁in ▁de ▁jaren ▁90 ▁begon ▁xxmaj ▁pieck ▁zich ▁terug ▁als ▁patroon inspecteur ▁van ▁xxmaj ▁ kruisweg . ▁xxmaj ▁hij ▁was ▁vooral ▁bekend ▁van ▁de ▁xxmaj ▁franse ▁kunstschilder ▁xxmaj ▁william ▁xxmaj ▁ wy hl ▁die ▁ook ▁in ▁de ▁xxmaj ▁nederlandse ▁literatuur ▁werkte , ▁maar ▁ook ▁de ▁ verantwoordelijk heid ▁voor ▁de ▁xxmaj ▁zon ▁in ▁een ▁periode ▁van ▁meer ▁dan ▁100.000 ▁pond . ▁xxmaj ▁hij ▁was ▁de ▁zoon ▁van ▁xxmaj ▁maria ▁xxmaj ▁johannes ▁xxmaj ▁ le ku mel ▁en ▁hij ▁is ▁een ▁van ▁de ▁meest ▁succesvolle ▁xxmaj ▁amerikaanse ▁schilders ▁die ▁in ▁het ▁xxmaj ▁engels ▁werkte . ▁xxmaj ▁hij ▁studeerde ▁samen ▁met ▁xxmaj ▁karl ▁xxmaj ▁ e ck art , ▁die ▁in ▁zijn ▁naam ▁van ▁xxmaj ▁ th ▁xxmaj ▁ wy y ▁in ▁1949 ▁een ▁relatie ▁met ▁xxmaj ▁ e din ▁xxmaj ▁law , ▁een ▁xxmaj ▁vlaamse ▁familie ▁was . ▁xxmaj ▁in ▁de ▁negentiende ▁eeuw ▁ontstonden ▁er ▁ ongeveer ▁twee ▁eeuwen ▁in ▁het ▁xxmaj ▁verenigd ▁xxmaj ▁koninkrijk , ▁maar ▁ook ▁in ▁xxmaj ▁polen , ▁xxmaj ▁frankrijk
|
notebooks/drafts_and_sketches/spacy_training-NB version.ipynb | ###Markdown
The plot below shows the distribution of words among each represented author.
###Code
fig = plt.figure(figsize=(8,4))
sns.set_theme(palette = 'rocket')
sns.barplot(x = df['author'].unique(), y= df['author'].value_counts())
plt.grid(color='w', linestyle='-', linewidth=1, zorder = 0)
plt.xlabel('Author')
plt.ylabel('Number of Lines')
plt.title('Comparison of Lines by Author')
plt.show()
from sklearn.model_selection import train_test_split
train, test = train_test_split(df, test_size=0.33, random_state=42)
print('Text sample:', train['text'].iloc[0])
print('Author of this text:', train['author'].iloc[0])
print('Training Data Shape:', train.shape)
print('Testing Data Shape:', test.shape)
import spacy
nlp = spacy.load('en_core_web_sm')
punctuations = string.punctuation
def cleanup_text(docs, logging=True):
texts = []
counter = 1
for doc in docs:
if counter % 1000 == 0 and logging:
print("Processed %d out of %d documents." % (counter, len(docs)))
counter += 1
doc = nlp(doc, disable=['parser', 'ner'])
tokens = [tok.lemma_.lower().strip() for tok in doc if tok.lemma_ != '-PRON-']
tokens = [tok for tok in tokens if tok not in stopwords and tok not in punctuations]
tokens = ' '.join(tokens)
texts.append(tokens)
return pd.Series(texts)
EAP_text = [text for text in train[train['author'] == 'EAP']['text']]
HPL_text = [text for text in train[train['author'] == 'HPL']['text']]
MWS_text = [text for text in train[train['author'] == 'MWS']['text']]
EAP_clean = cleanup_text(EAP_text)
EAP_clean = ' '.join(EAP_clean).split()
HPL_clean = cleanup_text(HPL_text)
HPL_clean = ' '.join(HPL_clean).split()
MWS_clean = cleanup_text(MWS_text)
MWS_clean = ' '.join(MWS_clean).split()
EAP_counts = Counter(EAP_clean)
HPL_counts = Counter(HPL_clean)
MWS_counts = Counter(MWS_clean)
EAP_common_words = [word[0] for word in EAP_counts.most_common(20)]
EAP_common_counts = [word[1] for word in EAP_counts.most_common(20)]
fig = plt.figure(figsize=(18,6))
sns.barplot(x=EAP_common_words, y=EAP_common_counts, palette = 'rocket')
plt.title('Most Common Words used in short stories written by Edgar Allan Poe')
plt.show()
HPL_common_words = [word[0] for word in HPL_counts.most_common(20)]
HPL_common_counts = [word[1] for word in HPL_counts.most_common(20)]
fig = plt.figure(figsize=(18,6))
sns.barplot(x=HPL_common_words, y=HPL_common_counts, palette='rocket')
plt.title('Most Common Words used in the short stories of H.P. Lovecraft')
plt.show()
#plt.savefig(f'images/{author}_words.png')
MWS_common_words = [word[0] for word in MWS_counts.most_common(20)]
MWS_common_counts = [word[1] for word in MWS_counts.most_common(20)]
fig = plt.figure(figsize=(18,6))
sns.barplot(x=MWS_common_words, y=MWS_common_counts, palette = 'rocket')
plt.title('Most Common Words used in the short stories of Mary Wallstonecraft Shelley')
plt.show()
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.base import TransformerMixin
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.feature_extraction.stop_words import ENGLISH_STOP_WORDS
from sklearn.metrics import accuracy_score
from nltk.corpus import stopwords
import string
import re
import spacy
spacy.load('en')
from spacy.lang.en import English
parser = English()
STOPLIST = set(stopwords.words('english') + list(ENGLISH_STOP_WORDS))
SYMBOLS = " ".join(string.punctuation).split(" ") + ["-", "...", "”", "”"]
class CleanTextTransformer(TransformerMixin):
def transform(self, X, **transform_params):
return [cleanText(text) for text in X]
def fit(self, X, y=None, **fit_params):
return self
def get_params(self, deep=True):
return {}
def cleanText(text):
text = text.strip().replace("\n", " ").replace("\r", " ")
text = text.lower()
return text
def tokenizeText(sample):
tokens = parser(sample)
lemmas = []
for tok in tokens:
lemmas.append(tok.lemma_.lower().strip() if tok.lemma_ != "-PRON-" else tok.lower_)
tokens = lemmas
tokens = [tok for tok in tokens if tok not in STOPLIST]
tokens = [tok for tok in tokens if tok not in SYMBOLS]
return tokens
from sklearn.naive_bayes import MultinomialNB
def printNMostInformative(vectorizer, clf, N):
feature_names = vectorizer.get_feature_names()
feature_with_fns = sorted(zip(clf.feature_log_prob_[0], feature_names))
topClass1 = coefs_with_fns[:N]
topClass2 = coefs_with_fns[:-(N + 1):-1]
topClass3 =
print("Class 1 best: ")
for feat in topClass1:
print(feat)
print("Class 2 best: ")
for feat in topClass2:
print(feat)
print( "Class 3 best: ")
for feat in topClass3:
print(feat)
vectorizer = CountVectorizer(tokenizer=tokenizeText, ngram_range=(1,1))
clf = MultinomialNB()
pipe = Pipeline([('cleanText', CleanTextTransformer()), ('vectorizer', vectorizer), ('clf', clf)])
# data
train1 = train['text'].tolist()
labelsTrain1 = train['author'].tolist()
test1 = test['text'].tolist()
labelsTest1 = test['author'].tolist()
# train
pipe.fit(train1, labelsTrain1)
# test
preds = pipe.predict(test1)
print("accuracy:", accuracy_score(labelsTest1, preds))
print("Top 20 features used to predict: ")
printNMostInformative(vectorizer, clf, 20)
pipe = Pipeline([('cleanText', CleanTextTransformer()), ('vectorizer', vectorizer)])
transform = pipe.fit_transform(train1, labelsTrain1)
vocab = vectorizer.get_feature_names()
for i in range(len(train1)):
s = ""
indexIntoVocab = transform.indices[transform.indptr[i]:transform.indptr[i+1]]
numOccurences = transform.data[transform.indptr[i]:transform.indptr[i+1]]
for idx, num in zip(indexIntoVocab, numOccurences):
s += str((vocab[idx], num))
from sklearn import metrics
print(metrics.classification_report(labelsTest1, preds,
target_names=df['author'].unique()))
###Output
precision recall f1-score support
EAP 0.82 0.79 0.81 2587
HPL 0.82 0.79 0.81 1852
MWS 0.78 0.84 0.81 2023
accuracy 0.81 6462
macro avg 0.81 0.81 0.81 6462
weighted avg 0.81 0.81 0.81 6462
|
classification_for_image_tagging/flower_fruit/classify_images.ipynb | ###Markdown
Run images through flower/fruit classification pipeline---*Last Updated 29 September 2020* 1) Run images through Model 7 and 11 and save results to tsv in batches of 5,000 images at a time. 2) Post-process results from image batches to filter predictions using confidence values chosen in [det_conf_threshold.ipynb](https://colab.research.google.com/github/aubricot/computer_vision_with_eol_images/blob/master/classification_for_image_tagging/flower_fruit/det_conf_threshold.ipynb) and save results to tsv. 3) Display filtered classification results on images and adjust confidence thresholds as needed. Imports---
###Code
# Mount google drive to import/export files
from google.colab import drive
drive.mount('/content/drive', force_remount=True)
# For working with data and plotting graphs
import itertools
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
# For image classification and training
import tensorflow as tf
###Output
_____no_output_____
###Markdown
Run images through model(s) for classification of flowers/fruits--- Use model(s) and confidence threshold(s) selected in det_conf_threshold.ipynb Define functions & variables
###Code
import csv
# Load trained model from path
TRAIN_SESS_NUM = "07"
saved_model_path = '/content/drive/My Drive/summer20/classification/flower_fruit/saved_models/' + TRAIN_SESS_NUM
flower_model = tf.keras.models.load_model(saved_model_path)
TRAIN_SESS_NUM = "11"
saved_model_path = '/content/drive/My Drive/summer20/classification/flower_fruit/saved_models/' + TRAIN_SESS_NUM
null_model = tf.keras.models.load_model(saved_model_path)
label_names = ['Flower', 'Fruit', 'Null']
# Load in image from URL
# Modified from https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/saved_model.ipynb#scrollTo=JhVecdzJTsKE
def image_from_url(url, fn):
file = tf.keras.utils.get_file(fn, url) # Filename doesn't matter
disp_img = tf.keras.preprocessing.image.load_img(file)
img = tf.keras.preprocessing.image.load_img(file, target_size=[224, 224])
x = tf.keras.preprocessing.image.img_to_array(img)
x = tf.keras.applications.mobilenet_v2.preprocess_input(
x[tf.newaxis,...])
return x, disp_img
# Read in EOL image bundle dataframe
# TO DO: Type in image bundle address using form field to right
bundle = 'https://editors.eol.org/other_files/bundle_images/files/images_for_Angiosperms_20K_breakdown_000031.txt' #@param {type:"string"}
df = pd.read_csv(bundle, sep='\t', header=0)
df.head()
###Output
_____no_output_____
###Markdown
Run 20K image bundle through classification pipeline
###Code
# Write header row of output crops file
# TO DO: Change file name for each bundle/run abcd if doing 4 batches using dropdown form to right
tags_file = "angiosperm_tags_flowfru_20k_d" #@param ["angiosperm_tags_flowfru_20k_a", "angiosperm_tags_flowfru_20k_b", "angiosperm_tags_flowfru_20k_c", "angiosperm_tags_flowfru_20k_d"]
tags_fpath = "/content/drive/My Drive/summer20/classification/flower_fruit/results/" + tags_file + ".tsv"
with open(tags_fpath, 'a') as out_file:
tsv_writer = csv.writer(out_file, delimiter='\t')
tsv_writer.writerow(["eolMediaURL", "identifier", \
"dataObjectVersionID", "ancestry", \
"tag7", "tag7_conf", "tag11", "tag11_conf"])
# Set number of seconds to timeout if image url taking too long to open
import socket
socket.setdefaulttimeout(10)
import time
# TO DO: Set start and end rows to run inference for from EOL image bundle using form field to right
# If running in 4 batches of 5000 images, use values in dropdown menu
start = 15000 #@param ["0", "5000", "10000", "15000"] {type:"raw"}
end = 20000 #@param ["5000", "10000", "15000", "20000"] {type:"raw"}
# Loop through EOL image bundle to classify images and generate tags
for i, row in df.iloc[start:end].iterrows():
try:
# Get url from image bundle
url = df['eolMediaURL'][i]
# Read in image from url
fn = str(i) + '.jpg'
img, disp_img = image_from_url(url, fn)
# Record inference time
start_time = time.time()
# Detection and draw boxes on image
# For flowers/fruits (reproductive structures)
predictions = flower_model.predict(img, batch_size=1)
label_num = np.argmax(predictions)
f_conf = predictions[0][label_num]
f_class = label_names[label_num]
# For null (no reproductive structures)
predictions = null_model.predict(img, batch_size=1)
label_num = np.argmax(predictions)
n_conf = predictions[0][label_num]
n_class = label_names[label_num]
end_time = time.time()
# Display progress message after each image
print('Inference complete for {} of {} images'.format(i, (end-start)))
# Optional: Show classification results for images
# Only use to view predictions on <50 images at a time
#_, ax = plt.subplots(figsize=(10, 10))
#ax.imshow(disp_img)
#plt.axis('off')
#plt.title("{}) Mod 7 Prediction: {}, Confidence: {}%, \
#\n Mod 11 Prediction: {}, Confidence: {}%, Inference Time: {}".format(i, \
#f_class, f_conf, n_class, n_conf,format(end_time-start_time, '.3f')))
# Export tagging results to tsv
# Define variables for export
identifier = df['identifier'][i]
dataObjectVersionID = df['dataObjectVersionID'][i]
ancestry = df['ancestry'][i]
with open(tags_fpath, 'a') as out_file:
tsv_writer = csv.writer(out_file, delimiter='\t')
tsv_writer.writerow([url, identifier, dataObjectVersionID, ancestry, \
f_class, f_conf, n_class, n_conf])
except:
print('Check if URL from {} is valid'.format(url))
###Output
_____no_output_____
###Markdown
Post-process classification predictions using confidence threshold values for models 7 and 11 chosen in det_conf_threshold.ipynb---
###Code
# Combine exported model predictions and confidence values from above to one dataframe
base = '/content/drive/My Drive/summer20/classification/flower_fruit/results/angiosperm_tags_flowfru_20k_'
exts = ['a.tsv', 'b.tsv', 'c.tsv', 'd.tsv']
all_filenames = [base + e for e in exts]
df = pd.concat([pd.read_csv(f, sep='\t', header=0, na_filter = False) for f in all_filenames], ignore_index=True)
# Filter predictions using determined confidence value thresholds
# Make column for "reproductive structures present?" tag
df['reprod'] = np.nan
# Adjust final tag based on Model 7 and 11 predictions and confidence values
for i, row in df.iterrows():
# If Model 7 predicts flower with >1.6 confidence
if df['tag7'][i]=="Flower" and df['tag7_conf'][i]>1.6:
# And Model 11 does not predict null with >= 1.5 confidence
if df['tag11'][i]=="Null" and df['tag11_conf'][i]>=1.5:
# Reproductive structures present -> YES
df['reprod'][i] = "Y"
# And Model 11 predicts null with >= 1.5 confidence
elif df['tag11'][i]=="Null" and df['tag11_conf'][i]<1.5:
# Reproductive structures present -> NO
df['reprod'][i] = "N"
# And Model 11 predicts fruit or flower with any confidence
else:
# Reproductive structures present -> NO
df['reprod'][i] = "Y"
# If Model 7 predicts flower with <= 1.6 confidence
elif df['tag7'][i]=="Flower" and df['tag7_conf'][i]<=1.6:
# Reproductive structures present -> Maybe
df['reprod'][i] = "M"
# If Model 7 predicts fruit or null with any confidence
else:
# Reproductive structures present -> NO
df['reprod'][i] = "N"
# Make all tags for grasses -> N (Poaceae, especially bamboo had bad classification results on manual inspection)
taxon = "Poaceae"
df['reprod'].loc[df.ancestry.str.contains(taxon, case=False, na=False)] = "N"
# Write results to tsv
df.to_csv("/content/drive/My Drive/summer20/classification/flower_fruit/results/angiosperm_tags_flowfru_20k_finaltags.tsv", sep='\t', index=False)
# Inspect results
print(df.head(10))
print("Number of positive identified reproductive structures: {}".format(len(df[df['reprod']=="Y"])))
print("Number of possible identified reproductive structures: {}".format(len(df[df['reprod']=="M"])))
print("Number of negative identified reproductive structures: {}".format(len(df[df['reprod']=="N"])))
###Output
_____no_output_____
###Markdown
Display final classification results on images---
###Code
# Set number of seconds to timeout if image url taking too long to open
import socket
socket.setdefaulttimeout(10)
# TO DO: Update file path to finaltags.tsv file
path = "/content/drive/My Drive/summer20/classification/flower_fruit/results/"
f = "angiosperm_tags_flowfru_20k_finaltags.tsv" #@param
fpath = path + f
df = pd.read_csv(fpath, sep='\t', header=0, na_filter = False)
# Function to load in image from URL
# Modified from https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/saved_model.ipynb#scrollTo=JhVecdzJTsKE
def image_from_url(url, fn):
file = tf.keras.utils.get_file(fn, url) # Filename doesn't matter
disp_img = tf.keras.preprocessing.image.load_img(file)
img = tf.keras.preprocessing.image.load_img(file, target_size=[224, 224])
x = tf.keras.preprocessing.image.img_to_array(img)
x = tf.keras.applications.mobilenet_v2.preprocess_input(
x[tf.newaxis,...])
return x, disp_img
# TO DO: Set start and end rows to run inference for from EOL image bundle using form field to right
# If running in 4 batches of 5000 images, use values in dropdown menu
start = 0#@param {type:"raw"}
end = 50 #@param {type:"raw"}
# Loop through EOL image bundle to classify images and generate tags
for i, row in df.iloc[start:end].iterrows():
try:
# Get url from image bundle
url = df['eolMediaURL'][i]
# Read in image from url
fn = str(i) + '.jpg'
img, disp_img = image_from_url(url, fn)
# Record inference time
pred = df['reprod'][i]
# Display progress message after each image is loaded
print('Successfully loaded {} of {} images'.format(i+1, (end-start)))
# Show classification results for images
# Only use to view predictions on <50 images at a time
_, ax = plt.subplots(figsize=(10, 10))
ax.imshow(disp_img)
plt.axis('off')
plt.title("{}) Combined Mod 7 & 11 Prediction: {}".format(i+1, pred))
except:
print('Check if URL from {} is valid'.format(url))
###Output
_____no_output_____ |
Model-Study/mlModelsEssemble.ipynb | ###Markdown
Comparing the Classifier
###Code
random_forest_clf = RandomForestClassifier(bootstrap=False, class_weight=None,
criterion='entropy', max_depth=7, max_features='auto',
max_leaf_nodes=None, min_impurity_decrease=0.0,
min_impurity_split=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=525, n_jobs=-1, oob_score=False, random_state=42,
verbose=0, warm_start=True)
svc_clf = SVC(C=3.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape='ovr', degree=6, gamma=0.1,
kernel='poly', max_iter=-1, probability=True, random_state=None,
shrinking=True, tol=0.001, verbose=False)
mlp_clf = MLPClassifier(activation='relu', alpha=1.8, batch_size='auto',
beta_1=0.9, beta_2=0.999, early_stopping=False,
epsilon=1e-08, hidden_layer_sizes=(50, 100),
learning_rate='constant', learning_rate_init=0.001,
max_iter=1000, momentum=0.9, nesterovs_momentum=True,
power_t=0.5, random_state=42, shuffle=True, solver='lbfgs',
tol=0.0001, validation_fraction=0.1, verbose=False,
warm_start=False)
estimators = [random_forest_clf, svc_clf, mlp_clf]
for estimator in estimators:
print("Training the", estimator)
estimator.fit(X_train_scaled, y_train)
[estimator.score(X_test_scaled, y_test) for estimator in estimators]
###Output
_____no_output_____
###Markdown
Voting Classifier
###Code
named_estimators = [
("random_forest_clf", random_forest_clf),
("svc_clf", svc_clf),
("mlp_clf", mlp_clf),
]
voting_clf = VotingClassifier(named_estimators, n_jobs=-1)
print(voting_clf.voting)
voting_clf.fit(X_train_scaled, y_train)
voting_clf.score(X_test_scaled, y_test)
[estimator.score(X_test_scaled, y_test) for estimator in voting_clf.estimators_]
voting_clf.set_params(random_forest_clf=None)
voting_clf.estimators
#del voting_clf.estimators_[0]
voting_clf.score(X_test_scaled, y_test)
[estimator.score(X_test_scaled, y_test) for estimator in voting_clf.estimators_]
voting_clf.voting = "soft"
print(voting_clf.voting)
voting_clf.score(X_test_scaled, y_test)
[estimator.score(X_test_scaled, y_test) for estimator in voting_clf.estimators_]
###Output
_____no_output_____
###Markdown
Evaluating the Essemble With Cross-Validation
###Code
# y_pred_prob = voting_clf.predict_proba(X_test_scaled)[:,1]
y_scores = cross_val_predict(voting_clf, X_train_scaled, y_train, cv=3, method='predict_proba')
y_train_pred = cross_val_predict(voting_clf, X_train_scaled, y_train, cv=3)
# hack to work around issue #9589 in Scikit-Learn 0.19.0
if y_scores.ndim == 2:
y_scores = y_scores[:, 1]
precisions, recalls, thresholds = precision_recall_curve(y_train, y_scores)
def plot_precision_recall_vs_threshold(precisions, recalls, thresholds):
plt.plot(thresholds, precisions[:-1], "b--", label="Precision")
plt.plot(thresholds, recalls[:-1], "g-", label="Recall")
plt.xlabel("Threshold")
plt.legend(loc="upper left")
plt.ylim([0, 1])
plot_precision_recall_vs_threshold(precisions, recalls, thresholds)
plt.show()
# Generate ROC curve values: fpr, tpr, thresholds
fpr, tpr, thresholds = roc_curve(y_train, y_scores)
# Plot ROC curve
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr, tpr)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
plt.show()
roc_auc_score(y_train, y_scores)
#print(confusion_matrix(y_test,y_pred))
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_train, y_train_pred)
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=['Sem Perda','Perda'],
title='Confusion matrix, without normalization')
print(classification_report(y_test, y_pred))
###Output
precision recall f1-score support
0 0.83 0.87 0.85 46
1 0.45 0.38 0.42 13
avg / total 0.75 0.76 0.76 59
###Markdown
Predicting the Classes in Test Set
###Code
y_pred = voting_clf.predict(X_test_scaled)
y_pred_prob = voting_clf.predict_proba(X_test_scaled)[:,1]
# Generate ROC curve values: fpr, tpr, thresholds
fpr, tpr, thresholds = roc_curve(y_test, y_pred_prob)
# Plot ROC curve
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr, tpr)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
plt.show()
roc_auc_score(y_test, y_pred_prob)
#print(confusion_matrix(y_test,y_pred))
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=['Sem Perda','Perda'],
title='Confusion matrix, without normalization')
print(classification_report(y_test, y_pred))
###Output
precision recall f1-score support
0 0.83 0.87 0.85 46
1 0.45 0.38 0.42 13
avg / total 0.75 0.76 0.76 59
|
notebooks/02-GenerativeModels.ipynb | ###Markdown
Generative modelsMost **classification algorithms** fall into one of two categories: - discriminative classifiers - generative classifiers **Discriminative classifiers** model the target variable, y, as a direct function of the predictor variables, x. Example: logistic regression uses the following model, where 𝜷 is a length-D vector of coefficients and x is a length-D vector of predictors:**Generative classifiers** instead view the predictors as being generated according to their class — i.e., they see x as a function of y, rather than the other way around. They then use Bayes’ rule to get from p(x|y = k) to P(y = k|x), as explained below.Generative models can be broken down into the three following steps. Suppose we have a classification task with K unordered classes, represented by k = 1, 2, …, K. - Estimate the prior probability that a target belongs to any given class. I.e., estimate P(y = k) for k = 1, 2, …, K. - Estimate the density of the predictors conditional on the target belonging to each class. I.e., estimate p(x|y = k) for k = 1, 2, …, K. - Calculate the posterior probability that the target belongs to any given class. I.e., calculate P(y = k|x), which is proportional to p(x|y = k)P(y = k) by Bayes’ rule.We then classify an observation as belonging to the class k for which the following expression is greatest: Class Priors estimation for Generative classifiersLet I_nk be an indicator which equals 1 if y_n = k and 0 otherwise.Our estimate of P(y = k) is just the sample fraction of the observations from class k. Data LikelihoodThe next step is to model the conditional distribution of x given y so that we can estimate this distribution’s parameters. This of course depends on the family of distributions we choose to model x. Three common approaches are detailed below.sns.
###Code
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_iris
data = load_iris()
X, y = data.data, data.target
X_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size=0.1)
###Output
_____no_output_____
###Markdown
Linear Discriminative Analysis (LDA)In LDA, we assume the following distribution for xfor k = 1, 2, …, K. Note that each class has the same covariance matrix but a unique mean vector.
###Code
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
clf = LinearDiscriminantAnalysis()
clf.fit(X_train, Y_train)
y_pred = clf.predict(X_train)
print("Train Accuracy ", np.mean(y_pred == Y_train))
colors = {0: 'r',1: 'g',2: 'b'}
fig, ax = plt.subplots(1, 2)
for x, y in zip(X_train, y_pred):
ax[0].scatter(x[0],x[1],c=colors[y])
ax[0].set_xlabel("predicted")
for x, y in zip(X_train, Y_train):
ax[1].scatter(x[0],x[1],c=colors[y])
ax[1].set_xlabel("real")
plt.show()
###Output
Train Accuracy 0.9777777777777777
###Markdown
Quadratic Discriminant Analysis (QDA)QDA looks very similar to LDA but assumes each class has its own covariance matrix. I.e.,
###Code
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
clf = QuadraticDiscriminantAnalysis()
clf.fit(X_train, Y_train)
y_pred = clf.predict(X_train)
print("Train Accuracy ", np.mean(y_pred == Y_train))
colors = {0: 'r',1: 'g',2: 'b'}
fig, ax = plt.subplots(1, 2)
for x, y in zip(X_train, y_pred):
ax[0].scatter(x[0],x[1],c=colors[y])
ax[0].set_xlabel("predicted")
for x, y in zip(X_train, Y_train):
ax[1].scatter(x[0],x[1],c=colors[y])
ax[1].set_xlabel("real")
plt.show()
###Output
Train Accuracy 0.9851851851851852
###Markdown
Naive BayesNaive Bayes assumes the random variables within x are independent conditional on the class of the observation. That is, if x is D-dimensional,
###Code
from sklearn.naive_bayes import GaussianNB
clf = GaussianNB()
clf.fit(X_train, Y_train)
y_pred = clf.predict(X_train)
print("Train Accuracy ", np.mean(y_pred == Y_train))
colors = {0: 'r',1: 'g',2: 'b'}
fig, ax = plt.subplots(1, 2)
for x, y in zip(X_train, y_pred):
ax[0].scatter(x[0],x[1],c=colors[y])
ax[0].set_xlabel("predicted")
for x, y in zip(X_train, Y_train):
ax[1].scatter(x[0],x[1],c=colors[y])
ax[1].set_xlabel("real")
plt.show()
###Output
Train Accuracy 0.9555555555555556
|
resources/boyle example.ipynb | ###Markdown
Boyle usage Creating new environment and sample usage
###Code
Boyle.list()
Boyle.mk("my_new_env")
Boyle.activate("my_new_env")
Boyle.install({:number, "~> 0.5.7"})
Boyle.list()
Boyle.active_env_name()
###Output
_____no_output_____
###Markdown
Number library usage
###Code
Number.Currency.number_to_currency(2034.46)
Number.Phone.number_to_phone(1112223333, area_code: true, country_code: 1)
Number.Percentage.number_to_percentage(100, precision: 0)
Number.Human.number_to_human(1234)
Number.Delimit.number_to_delimited(12345678)
###Output
_____no_output_____
###Markdown
Deactivating virtual environment
###Code
Boyle.deactivate()
Boyle.active_env_name()
# Number library won't work if you deactivate the environment
Number.Currency.number_to_currency(2034.46)
Boyle.activate("my_new_env")
# and if you activate it will work again
Number.Currency.number_to_currency(2034.46)
Boyle.freeze()
Boyle.list()
Boyle.rm("my_new_env")
###Output
_____no_output_____ |
Classification/Classification-pytorch.ipynb | ###Markdown
EDA
###Code
data.Exited.value_counts().plot(kind='pie', autopct='%1.0f%%',explode=(0.05, 0.05))
sns.countplot(x='Geography', data=data)
sns.countplot(x='Exited',hue='Geography', data=data)
###Output
_____no_output_____
###Markdown
Pre-processing
###Code
data.columns
numerical_columns = ['CreditScore', 'Age', 'Tenure', 'Balance', 'NumOfProducts', 'EstimatedSalary']
categorical_columns = ['Geography', 'Gender', 'HasCrCard', 'IsActiveMember']
output = ['Exited']
for cat in categorical_columns:
data[cat] = data[cat].astype('category')
data.dtypes
from sklearn.preprocessing import LabelEncoder
encoder = LabelEncoder()
for col in categorical_columns:
data[col] = encoder.fit_transform(data[col])
data.head()
###Output
_____no_output_____
###Markdown
Normalise
###Code
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
for col in [numerical_columns,categorical_columns, output]:
data[col] = scaler.fit_transform(data[col])
data.head()
x = data.loc[:, :"EstimatedSalary"]
y = data['Exited']
x.head()
y.head()
print(x.shape, y.shape)
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x,y,test_size=0.3)
print("x_train dim:",x_train.shape, "\ty_train dim:", y_train.shape)
print("x_test dim:",x_test.shape, "\ty_test dim:", y_test.shape)
###Output
x_train dim: (7000, 10) y_train dim: (7000,)
x_test dim: (3000, 10) y_test dim: (3000,)
###Markdown
Convert to tensor
###Code
x_train_tensor = torch.tensor(x_train.values, dtype=torch.float)
y_train_tensor = torch.tensor(y_train.values, dtype=torch.long)
x_test_tensor = torch.tensor(x_test.values, dtype=torch.float)
y_test_tensor = torch.tensor(y_test.values, dtype=torch.long)
print("x_train dim:",x_train_tensor.shape, "\ty_train dim:", y_train_tensor.shape)
print("x_test dim:",x_test_tensor.shape, "\ty_test dim:", y_test_tensor.shape)
###Output
x_train dim: torch.Size([7000, 10]) y_train dim: torch.Size([7000])
x_test dim: torch.Size([3000, 10]) y_test dim: torch.Size([3000])
###Markdown
Model
###Code
class Network(nn.Module):
def __init__(self, n_input, h, n_output):
super().__init__()
self.layer = nn.Linear(n_input, h)
self.output = nn.Linear(h, n_output)
def forward(self, x):
x = self.layer(x)
x = self.output(x)
x = torch.sigmoid(x)
return x
n_input, n_output = x_train_tensor.shape[1], 2
h = 100
model = Network(n_input, h, n_output)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
losses = []
epochs = 500
for e in range(1, epochs+1):
y_pred = model(x_train_tensor)
loss = criterion(y_pred, y_train_tensor)
losses.append(loss)
if e%50 ==0:
print(f"epochs: {e} ===> loss:{loss}")
if torch.isnan(loss):
break
optimizer.zero_grad()
loss.backward()
optimizer.step()
plt.plot(range(len(losses)), losses)
plt.xlabel("# epochs")
plt.ylabel("loss")
plt.show()
with torch.no_grad():
y_val = model(x_test_tensor)
loss = criterion(y_val, y_test_tensor)
print("Test loss: ", loss)
y_val = np.argmax(y_val, axis=1)
y_val
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
print(confusion_matrix(y_test_tensor, y_val))
print(classification_report(y_test_tensor, y_val))
accuracy = accuracy_score(y_test_tensor, y_val)*100
print(f'Accuracy: {accuracy:.2f}')
###Output
[[2354 54]
[ 414 178]]
precision recall f1-score support
0 0.85 0.98 0.91 2408
1 0.77 0.30 0.43 592
accuracy 0.84 3000
macro avg 0.81 0.64 0.67 3000
weighted avg 0.83 0.84 0.82 3000
Accuracy: 84.40
|
Notebooks/Performance-Evaluation/FE_AutoEncoder.ipynb | ###Markdown
Import libraries
###Code
from google.colab import drive
from pathlib import Path
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
import time
import os
import csv
import concurrent.futures
###Output
_____no_output_____
###Markdown
Utility functions Create annot and load descriptors
###Code
def create_annot(path):
image_list = list(Path(path).glob('*/*.jpg'))
# the identity name is in the path (the name of the parent directory)
names_list = [i.parent.name for i in image_list] # get the identity of each image
# keep info in a pandas DataFrame
annot = pd.DataFrame({'identity': names_list, 'image_path': image_list})
return annot
def concatenate_annots(list_of_paths):
concat_annot = pd.DataFrame()
with concurrent.futures.ThreadPoolExecutor() as executor:
annots = [executor.submit(create_annot, path) for path in list_of_paths]
for annot in annots:
new_annot = annot.result()
concat_annot = concat_annot.append(new_annot, ignore_index = True)
return concat_annot
def load_descriptors(path):
with open(path, 'rb') as file:
return np.load(file)
def concatenate_descriptors(list_of_paths):
concat_descriptors = None
with concurrent.futures.ThreadPoolExecutor() as executor:
descriptors = [executor.submit(load_descriptors, path) for path in list_of_paths]
for descriptor in descriptors:
new_descriptor = descriptor.result()
if concat_descriptors is None:
concat_descriptors = new_descriptor
else:
concat_descriptors = np.concatenate([concat_descriptors, new_descriptor])
return concat_descriptors
###Output
_____no_output_____
###Markdown
Create pivots
###Code
def generate_pivots(descriptors, n, strategy="rnd"):
if strategy == "kMED":
kmedoids = sklearn_extra.cluster.KMedoids(n_clusters=n).fit(descriptors)
return kmedoids.cluster_centers_
if strategy != "rnd":
print(strategy, "was not implemented. Random pivots were returned")
pivots_id = np.random.choice(np.arange(len(descriptors)), size=n)
return descriptors[pivots_id]
def generate_list_of_pivots(descriptors, t, n, strategy="rnd"):
list_of_pivots = []
with concurrent.futures.ThreadPoolExecutor() as executor:
pivots = [executor.submit(generate_pivots, descriptors, n, strategy) for i in range(t)]
for pivot in concurrent.futures.as_completed(pivots):
new_pivot = pivot.result()
list_of_pivots.append(new_pivot)
return list_of_pivots
###Output
_____no_output_____
###Markdown
Save test results
###Code
def save_results(dir, file_name, results):
with open(os.path.join(dir, file_name +".csv"), 'w') as f:
writer = csv.writer(f)
# write the header
writer.writerow(["CLASS", "AP", "QUERY TIME"])
# write the data
for r in results:
writer.writerow(r)
###Output
_____no_output_____
###Markdown
Test Performance
###Code
drive.mount('/content/drive', force_remount=True)
###Output
Mounted at /content/drive
###Markdown
Create annot and load descriptors for the database
###Code
db_annot = concatenate_annots(['/content/drive/MyDrive/CV_Birds/train', '/content/drive/MyDrive/CV_Birds/mirflickr25k'])
db_annot
db_descriptors = concatenate_descriptors(['/content/drive/MyDrive/CV_Birds/features/training/AutoEncoder/512to128withPace64_feature_extraction.npy','/content/drive/MyDrive/CV_Birds/features/distractor/AutoEncoder/512to128withPace64_feature_extraction.npy'])
db_descriptors.shape
###Output
_____no_output_____
###Markdown
Create annot and load descriptors for the test set
###Code
query_annot = create_annot('/content/drive/MyDrive/CV_Birds/test')
query_annot
query_descriptors = load_descriptors('/content/drive/MyDrive/CV_Birds/features/test/AutoEncoder/512to128withPace64_feature_extraction.npy')
query_descriptors.shape
###Output
_____no_output_____
###Markdown
To run our tests we select only the first image of each species within the test set. Please note that within the test set we have 5 images per species.
###Code
queries_indexes = [x for x in range(325*5) if x%5 == 0]
###Output
_____no_output_____
###Markdown
Create PP-Index
###Code
def get_descriptor_from_id(id_object):
return db_descriptors[id_object]
%cd "/content/drive/MyDrive/CV_Birds/Notebooks/PP-Index"
%run PPIndex.ipynb
# generate pivots
pivots = generate_pivots(db_descriptors, 40, "rnd")
# cosine tree
cosine_tree = PrefixTree(pivots, length=3, distance_metric='cosine', base_directory="/content/cosine", tree_file='tree_structure')
if cosine_tree.is_empty():
cosine_tree.insert_objects_into_tree(range(len(db_descriptors)))
cosine_tree.save()
!cp /content/cosine/tree* /content/drive/MyDrive/CV_Birds/indexes/feature_extraction/tree/cosine/
# euclidean tree
euclidean_tree = PrefixTree(pivots, length=3, distance_metric='euclidean', base_directory="/content/euclidean", tree_file='tree_structure')
if euclidean_tree.is_empty():
euclidean_tree.insert_objects_into_tree(range(len(db_descriptors)))
euclidean_tree.save()
!cp /content/euclidean/tree* /content/drive/MyDrive/CV_Birds/indexes/feature_extraction/tree/euclidean/
###Output
_____no_output_____
###Markdown
Compute mAP
###Code
birds_db = db_annot.loc[db_annot['identity'] != 'mirflickr']
counts = birds_db.groupby('identity').count()
print("Minimum number of images per species:", int(counts.min()))
print("Maximum number of images per species:", int(counts.max()))
print("Average number of images:", float(counts.sum()/325))
###Output
Minimum number of images per species: 116
Maximum number of images per species: 249
Average number of images: 145.63692307692307
###Markdown
Since at most we have 249 images per species, we use $n=250$.
###Code
n = 250
###Output
_____no_output_____
###Markdown
The formula for Average Precision is the following:> $AP@n=\frac{1}{GTP}\sum_{k=1}^{n}P@k×rel@k$where $GTP$ refers to the total number of ground truth positives, $n$ refers to the total number of images we are interested in, $P@k$ refers to the precision@k and $rel@k$ is a relevance function. The relevance function is an indicator function which equals 1 if the document at rank $k$ is relevant and equals to 0 otherwise.
###Code
def compute_ap(query_index, retrieved_ids):
query_identity = query_annot['identity'][query_index]
print(query_index//5, query_identity)
GTP = len(db_annot.loc[db_annot['identity'] == query_identity])
relevant = 0
precision_summation = 0
for k, id in enumerate(retrieved_ids):
if db_annot['identity'][id] == query_identity: # relevant result
relevant = relevant + 1
precision_at_k = relevant/(k+1)
precision_summation = precision_summation + precision_at_k
return (query_identity, precision_summation/GTP)
###Output
_____no_output_____
###Markdown
For each query, $Q$, we can calculate a corresponding $AP$. Then, the $mAP$ is simply the mean of all the queries that were made.> $mAP = \frac{1}{N}\sum_{i=1}^{N}AP_i$In our case, $N=325$ (one query per species) Simple tree Cosine
###Code
def cosine_tree_queries(query_index, n):
start_time = time.time()
ids, distances = cosine_tree.find_nearest_neighbors(query_descriptors[query_index], n)
end_time = time.time()
ids = ids.tolist()
return compute_ap(query_index, ids) + (end_time - start_time,)
aps = []
for query_index in queries_indexes:
aps.append(cosine_tree_queries(query_index, n))
aps
ap_at_n = np.array([ap[1] for ap in aps])
query_time = np.array(([ap[2] for ap in aps]))
mAP_at_n = np.mean(ap_at_n, axis=0)
avg_query_time = np.mean(query_time, axis=0)
print("mAP:", mAP_at_n)
print("avg. query time: ", avg_query_time)
save_results('/content/drive/MyDrive/CV_Birds/performance/fine_tuning/index/AutoEncoder', 'AE_FE_tree_cosine_results', aps)
###Output
_____no_output_____
###Markdown
Euclidean
###Code
def euclidean_tree_queries(query_index, n):
start_time = time.time()
ids, distances = euclidean_tree.find_nearest_neighbors(query_descriptors[query_index], n)
end_time = time.time()
ids = ids.tolist()
return compute_ap(query_index, ids) + (end_time - start_time,)
aps = []
for query_index in queries_indexes:
aps.append(euclidean_tree_queries(query_index, n))
aps
ap_at_n = np.array([ap[1] for ap in aps])
query_time = np.array(([ap[2] for ap in aps]))
mAP_at_n = np.mean(ap_at_n, axis=0)
avg_query_time = np.mean(query_time, axis=0)
print("mAP:", mAP_at_n)
print("avg. query time: ", avg_query_time)
save_results('/content/drive/MyDrive/CV_Birds/performance/fine_tuning/index/AutoEncoder', 'AE_FE_tree_euclidean_results', aps)
###Output
_____no_output_____
###Markdown
Tree with query perturbation Cosine
###Code
def cosine_pert_tree_queries(query_index, n):
start_time = time.time()
ids, distances = cosine_tree.find_nearest_neighbors_with_query_perturbation(query_descriptors[query_index], n, perturbations=3)
end_time = time.time()
ids = ids.tolist()
return compute_ap(query_index, ids) + (end_time - start_time,)
aps = []
for query_index in queries_indexes:
aps.append(cosine_pert_tree_queries(query_index, n))
aps
ap_at_n = np.array([ap[1] for ap in aps])
query_time = np.array(([ap[2] for ap in aps]))
mAP_at_n = np.mean(ap_at_n, axis=0)
avg_query_time = np.mean(query_time, axis=0)
print("mAP:", mAP_at_n)
print("avg. query time: ", avg_query_time)
save_results('/content/drive/MyDrive/CV_Birds/performance/fine_tuning/index/AutoEncoder', 'AE_FE_pert_tree_cosine_results', aps)
###Output
_____no_output_____
###Markdown
Euclidean
###Code
def euclidean_pert_tree_queries(query_index, n):
start_time = time.time()
ids, distances = euclidean_tree.find_nearest_neighbors_with_query_perturbation(query_descriptors[query_index], n, perturbations=3)
end_time = time.time()
ids = ids.tolist()
return compute_ap(query_index, ids) + (end_time - start_time,)
aps = []
for query_index in queries_indexes:
aps.append(euclidean_pert_tree_queries(query_index, n))
aps
ap_at_n = np.array([ap[1] for ap in aps])
query_time = np.array(([ap[2] for ap in aps]))
mAP_at_n = np.mean(ap_at_n, axis=0)
avg_query_time = np.mean(query_time, axis=0)
print("mAP:", mAP_at_n)
print("avg. query time: ", avg_query_time)
save_results('/content/drive/MyDrive/CV_Birds/performance/fine_tuning/index/AutoEncoder', 'AE_FE_pert_tree_euclidean_results', aps)
###Output
_____no_output_____ |
notebook/procs-pandas-plot-area.ipynb | ###Markdown
区域图可以使用Series.plot.area()和DataFrame.plot.area()创建区域图。 默认情况下,区域图堆叠。 为了产生堆积区域图,每列必须是正值或全部负值。当输入数据包含NaN时,它会自动填满0。如果要删除缺失值或填充其他值,请在调用plot之前使用dataframe.dropna()或dataframe.fillna()。
###Code
df = pd.DataFrame(np.random.rand(10, 4), columns=['a', 'b', 'c', 'd'])
df.plot.area()
# 注:随机产生标准正态分布的df,四栏分别绘制堆积区域图
# 为了产生一个未堆积的图,通过使参数stacked=False。
# 除非另有规定,否则透明度:Alpha值设置为0.5,即半透明:
df.plot.area(stacked=False)
###Output
_____no_output_____
###Markdown
散点图可以使用DataFrame.plot.scatter()方法绘制散点图。 散点图需要x和y轴的数字列。 这些可以由x和y关键字指定。
###Code
df = pd.DataFrame(np.random.rand(50, 4), columns=['a', 'b', 'c', 'd'])
df.plot.scatter(x='a', y='b')
# 将产生的a栏作为x轴数据,b栏作为y轴数据绘图
ax = df.plot.scatter(x='a', y='b', color='DarkBlue', label='Group 1')
df.plot.scatter(x='c', y='d', color='RED', label='Group 2', ax=ax)
# 注:要在单个轴上绘制多个列组,要重复指定目标轴的绘图方法,
# 建议指定颜色和标签关键字来区分每个组。
df.plot.scatter(x='a', y='b', c='c', s=50)
# 注:关键字c可以作为列的名称给出,以为每个点提供颜色
# 你可以传递由matplotlib散点支持的其他关键字。
# 下面的示例显示使用数据框列值作为气泡大小的气泡图:
df.plot.scatter(x='a', y='b', s=df['c']*200)
# 注:增加c栏作为气泡(散点)大小值
###Output
_____no_output_____ |
Feature Engineering/Feature_Engineering_Part_5.ipynb | ###Markdown
Probability Ratio Encoding 1. Probability of Survived based on Cabin --- **Categorical Feature**2. Probability of Not Survived --- **1-prob(Survived)**3. **prob(Survived)/prob(Not Survived)**4. Dictonary to map cabin with probability.5. Replace with the categorical feature.
###Code
import pandas as pd
df=pd.read_csv('titanic.csv',usecols=['Cabin','Survived'])
df.head()
###Output
_____no_output_____
###Markdown
Replacing 'NAN' with 'Missing' Values
###Code
df['Cabin'].fillna('Missing',inplace=True)
df.head()
df['Cabin'].unique()
df['Cabin']=df['Cabin'].astype(str).str[0]
df.head()
df.Cabin.unique()
prob_df=df.groupby(['Cabin'])['Survived'].mean()
prob_df = pd.DataFrame(prob_df)
prob_df
prob_df['Died']= 1 - prob_df['Survived']
prob_df.head()
prob_df['Probability_ratio']=prob_df['Survived']/prob_df['Died']
prob_df.head()
probability_encoded=prob_df['Probability_ratio'].to_dict()
probability_encoded
df['Cabin_encoded']=df['Cabin'].map(probability_encoded)
df.head(15)
###Output
_____no_output_____
###Markdown
Transformation of the Features.1. Why Transformation of Features Are Required? * Linear Regression --- Gradient Descent --- **Global Minima** * Algorithms like KNN,K Means,Hierarichal Clustering --- **Eucledian Distance**2. Every Point has some vector and direction.3. Deep Learning Techniques(Standardization, Scaling --- 0-255 pixels) * ANN ---> Global Minima, Gradient Descent * CNN * RNN Types Of Transformation1. Normalization And Standardization2. Scaling to Minimum And Maximum values3. Scaling To Median And Quantiles4. Guassian Transformation * Logarithmic Transformation * Reciprocal Transformation * Square Root Transformation * Exponential Transformation * Box-Cox Transformation. Standardization* We try to bring all the variables or features to a similar scale. * Standarization means centering the variable at zero. * **Z = (x-x_mean)/std*** Mean = 0, Standard Deviation = 1.
###Code
import pandas as pd
df=pd.read_csv('titanic.csv', usecols=['Pclass','Age','Fare','Survived'])
df.head()
df.isnull().sum()
df['Age'].fillna(df.Age.median(),inplace=True)
df.isnull().sum()
###Output
_____no_output_____
###Markdown
Standarisation: We use the Standardscaler from sklearn Library.
###Code
from sklearn.preprocessing import StandardScaler
scaler=StandardScaler()
# fit vs fit_transform
df_scaled=scaler.fit_transform(df)
df_scaled
pd.DataFrame(df_scaled)
import matplotlib.pyplot as plt
%matplotlib inline
plt.hist(df_scaled[:,1],bins=20)
plt.xlabel('Pclass_Scaled')
plt.ylabel('Number of Points')
plt.hist(df_scaled[:,2],bins=20)
plt.xlabel('Age_Scaled')
plt.ylabel('Number of Points')
plt.hist(df_scaled[:,2],bins=20)
plt.xlabel('Fare_Scaled')
plt.ylabel('Number of Points')
###Output
_____no_output_____
###Markdown
* **If there are outliers, it will affect the standardization**
###Code
plt.hist(df['Fare'],bins=20)
plt.xlabel('Fare_Not_Scaled')
plt.ylabel('Number of Points')
###Output
_____no_output_____
###Markdown
Min-Max Scaling (CNN) ---> Deep Learning Techniques* Min Max Scaling scales the values between 0 to 1. * X_scaled = (X - X.min / (X.max - X.min)
###Code
from sklearn.preprocessing import MinMaxScaler
min_max=MinMaxScaler()
df_minmax=pd.DataFrame(min_max.fit_transform(df),columns=df.columns)
df_minmax.head()
plt.hist(df_minmax['Pclass'],bins=20)
plt.hist(df_minmax['Fare'],bins=20)
plt.hist(df_minmax['Age'],bins=20)
###Output
_____no_output_____
###Markdown
Robust Scaler 1. It is used to scale the feature to median and quantiles. 2. Scaling using median and quantiles consists of substracting the median from all the observations, and then dividing by the interquantile difference. 3. The interquantile difference is the difference between the 75th and 25th quantile: * **IQR = 75th quantile - 25th quantile** 4. X_scaled = (X - X.median) / IQR 5. 0,1,2,3,4,5,6,7,8,9,10 * **9** ---> 90 percentile ---> 90% of all values in this group is less than 9. * **1** ---> 10 precentile ---> 10% of all values in this group is less than 1.
###Code
from sklearn.preprocessing import RobustScaler
scaler=RobustScaler()
df_robust_scaler=pd.DataFrame(scaler.fit_transform(df),columns=df.columns)
df_robust_scaler.head()
plt.hist(df_robust_scaler['Fare'],bins=20)
plt.hist(df_robust_scaler['Age'],bins=20)
plt.hist(df_robust_scaler['Pclass'],bins=20)
###Output
_____no_output_____
###Markdown
Guassian Transformation* Some machine learning algorithms like linear regression and logistic regression assume that the features are normally distributed ---> **Accuracy Performance increases when data is normally distributed** * Logarithmic transformation * Reciprocal transformation * Square Root transformation * Exponential transformation (more general, you can use any exponent) * BoxCox transformation
###Code
df=pd.read_csv('titanic.csv',usecols=['Age','Fare','Survived'])
df.head()
###Output
_____no_output_____
###Markdown
Filling the missing "NAN" values with Median Values
###Code
df['Age']=df['Age'].fillna(df['Age'].median())
df.isnull().sum()
import scipy.stats as stat
import pylab
###Output
_____no_output_____
###Markdown
If you want to check whether feature is Guassian or Normal distributed we use ---> ***Q-Q plot***
###Code
def plot_data(df, feature):
plt.figure(figsize=(10, 6))
plt.subplot(1, 2, 1) # ----> 1 row, 2 column and 1st index.
df[feature].hist()
plt.subplot(1, 2, 2) # ----> 1 row, 2 column and 2nd index.
stat.probplot(df[feature], dist='norm', plot=pylab)
plt.show()
plot_data(df, 'Age')
###Output
_____no_output_____
###Markdown
* If all the points are falling in the red line, then we can say that the feature is normally distributed.
###Code
plot_data(df, 'Fare')
###Output
_____no_output_____
###Markdown
Logarithmic Transformation * **Logarithmic Transformation works best when your data is Right skewed or Left-skewed.**
###Code
import numpy as np
df['Age_log'] = np.log(df['Age'])
plot_data(df, 'Age_log')
###Output
_____no_output_____
###Markdown
Reciprocal Transformation
###Code
df['Age_reciprocal']=1/df.Age
plot_data(df,'Age_reciprocal')
###Output
_____no_output_____
###Markdown
Square Root Transformation
###Code
df['Age_sqaure']=df.Age**(1/2)
plot_data(df,'Age_sqaure')
###Output
_____no_output_____
###Markdown
Exponential Transformation
###Code
df['Age_exponential']=df.Age**(1/1.2)
plot_data(df,'Age_exponential')
###Output
_____no_output_____
###Markdown
Box-Cox Transformation * The Box-Cox transformation is defined as: * **T(Y)=(Y exp(λ)−1)/λ*** where Y is the response variable and λ is the transformation parameter. λ varies from -5 to 5. In the transformation, all values of λ are considered and the optimal value for a given variable is selected.
###Code
stat.boxcox(df['Age'])
df['Age_Boxcox'],parameters=stat.boxcox(df['Age'])
parameters
plot_data(df,'Age_Boxcox')
###Output
_____no_output_____
###Markdown
'Fare' Variable Plots
###Code
plot_data(df,'Fare')
###Output
_____no_output_____
###Markdown
Logarithmic Transformation of (x+1)
###Code
df['Fare_log']=np.log1p(df['Fare'])
plot_data(df,'Fare_log')
df['Fare_Boxcox'],parameters=stat.boxcox(df['Fare']+1)
plot_data(df,'Fare_Boxcox')
###Output
_____no_output_____ |
UniFiCourseSpring2020/gotchas.ipynb | ###Markdown
<img src="http://www.cerm.unifi.it/chianti/images/logo%20unifi_positivo.jpg" alt="UniFI logo" style="float: left; width: 20%; height: 20%;"> Massimo Nocentini, PhD.May 27, 2020: initAbstractSome tips, tricks and gotchas, in particular.
###Code
__AUTHORS__ = {'am': ("Andrea Marino",
"[email protected]",),
'mn': ("Massimo Nocentini",
"[email protected]",
"https://github.com/massimo-nocentini/",)}
__KEYWORDS__ = ['Python', 'Jupyter', 'gotchas', 'keynote',]
###Output
_____no_output_____
###Markdown
Tips, tricks and gotchasThis lecture addresses some gotchas that could arise in daily programming; moreover, at the beginning we will introduce some helpful objects that could make coding easier.First of all, some imports as usual:
###Code
from collections import defaultdict, Counter
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = (8, 8)
###Output
_____no_output_____
###Markdown
A grouping pattern, avoiding *quadratic* time Assume to have two lists that have to be related in some way, namely using a predicate $P$. In the following example we want to build a list of all pairs (boy,girl) such that their names starts with the same letter. Here the input:
###Code
girls = ['alice', 'allie', 'bernice', 'brenda', 'clarice', 'cilly']
boys = ['chris', 'christopher', 'arald', 'arnold', 'bob']
###Output
_____no_output_____
###Markdown
the bad way, quadratic time:
###Code
[(b, g) for b in boys for g in girls if b[0] == g[0]]
###Output
_____no_output_____
###Markdown
there is a better approach avoiding quadratic time, toward [`defaultdict`][dd]:[dd]:https://docs.python.org/3/library/collections.htmldefaultdict-objects
###Code
letterGirls = {}
for girl in girls:
letterGirls.setdefault(girl[0], []).append(girl)
[(b, g) for b in boys for g in letterGirls[b[0]]]
###Output
_____no_output_____
###Markdown
However there is an even better solution, as pointed out in the [example][e] subsection of the previous link: use `defaultdict` instead of repeating call `setdefault` method for each new key. From the official documentation:[e]:https://docs.python.org/3/library/collections.htmldefaultdict-examples
###Code
>>> s = [('yellow', 1), ('blue', 2), ('yellow', 3), ('blue', 4), ('red', 1)]
>>> d = defaultdict(list)
>>> for k, v in s:
... d[k].append(v)
...
>>> list(d.items())
[('blue', [2, 4]), ('red', [1]), ('yellow', [1, 3])]
###Output
_____no_output_____
###Markdown
The *Bunch* pattern A very good book on algorithms implemented in Python is the one by Magnus Hetland, https://www.apress.com/gp/book/9781484200568, with the companion Github repository https://github.com/apress/python-algorithms-14.Hetland, pag. 34, propose the following pattern to build a container of properties in order to avoid vanilla dict (adjusting from item 4.18 of Alex Martelli's [*Python Cookbook*][cb]):[cb]:http://shop.oreilly.com/product/9780596007973.do
###Code
class Bunch(dict):
def __init__(self, *args, **kwds):
super(Bunch, self).__init__(*args, **kwds)
self.__dict__ = self
>>> T = Bunch
>>> t = T(left=T(left="a", right="b"), right=T(left="c"))
>>> t.left
>>> t.left.right
>>> t['left']['right']
>>> "left" in t.right
"right" in t.right
###Output
_____no_output_____
###Markdown
However, inheriting from `dict` is discouraged by Alex:>A further tempting but not fully sound alternative is to have the Bunch class inheritfrom `dict`, and set attribute access special methods equal to the item access specialmethods, as follows: class DictBunch(dict): __getattr__ = dict.__getitem__ __setattr__ = dict.__setitem__ __delattr__ = dict.__delitem__>One problem with this approach is that, with this definition, an instance x of`DictBunch` has many attributes it doesn't really have, because it inherits all theattributes (methods, actually, but there's no significant difference in this context) of`dict`. So, you can’t meaningfully check `hasattr(x, someattr)` , as you could with theclasses `Bunch` and `EvenSimplerBunch` (which sets the dictionary directly, without using `update`) previously shown, unless you can somehow ruleout the value of someattr being any of several common words such as `keys` , `pop` ,and `get`. Python’s distinction between attributes and items is really a wellspring of clarity andsimplicity. Unfortunately, many newcomers to Python wrongly believe that it wouldbe better to confuse items with attributes, generally because of previous experiencewith JavaScript and other such languages, in which attributes and items are regularlyconfused. But educating newcomers is a much better idea than promoting item/attribute confusion. Alex original definition reads as follows:
###Code
class Bunch(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
###Output
_____no_output_____
###Markdown
It is interesting to observe that this idiom has been merged within the *standard library*, starting from Python **3.3**, as with the name of [`SimpleNamespace`][sn]:[sn]:https://docs.python.org/3/library/types.htmltypes.SimpleNamespace
###Code
from types import SimpleNamespace
x, y = 32, 64
point = SimpleNamespace(datum=y, squared=y*y, coord=x)
point
point.datum, point.squared, point.coord
[i for i in point]
###Output
_____no_output_____
###Markdown
If you need `point` to be iterable use the structured object [`namedtuple`][nt] instead.[nt]:https://docs.python.org/3/library/collections.htmlcollections.namedtuple Python's `list.append` isn't Lisp's `cons` Python `list` objects behave like `stack` objects, such that it is *cheap* to `append` and `pop` at the *top*, which is the *right* end. On the other hand, Lisp `pair` objects allows us to *easily* `cons` on the *beginning*, the very *opposite* direction.
###Code
def fast_countdown(count):
nums = []
for i in range(count):
nums.append(i)
nums.reverse()
return nums
def slow_countdown(count):
nums = []
for i in range(count):
nums.insert(0, i)
return nums
def printer(lst, chunk=10):
print("{}...{}".format(" ".join(map(str, lst[:chunk])),
" ".join(map(str, lst[-chunk:]))))
%timeit nums = fast_countdown(10**5)
%timeit nums = slow_countdown(10**5)
###Output
1.61 s ± 13.2 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
###Markdown
Citing Hetland, pag 11:> Python lists aren’t really lists in the traditional computer science sense of the word, and that explains the puzzle of why append is so much more efficient than insert . A classical list - a so-called linked list - is implemented as a series of nodes, each (except for the last) keeping a reference to the next. The underlying implementation of Python’s list type is a bit different. Instead of several separate nodesreferencing each other, a list is basically a single, contiguous slab of memory - what is usually known as anarray. This leads to some important differences from linked lists. For example, while iterating over the contentsof the list is equally efficient for both kinds (except for some overhead in the linked list), directly accessing an element at a given index is much more efficient in an array. This is because the position of the element can becalculated, and the right memory location can be accessed directly. In a linked list, however, one would have totraverse the list from the beginning.The difference we've been bumping up against, though, has to do with insertion. In a linked list, once you knowwhere you want to insert something, insertion is cheap; it takes roughly the same amount of time, no matter howmany elements the list contains. That's not the case with arrays: An insertion would have to move all elementsthat are to the right of the insertion point, possibly even moving all the elements to a larger array, if needed.A specific solution for appending is to use what’s often called a dynamic array, or vector. 4 The idea is to allocate an array that is too big and then to reallocate it in linear time whenever it overflows. It might seem that this makes the append just as bad as the insert. In both cases, we risk having to move a large number of elements.The main difference is that it happens less often with the append. In fact, if we can ensure that we always moveto an array that is bigger than the last by a fixed percentage (say 20 percent or even 100 percent), the averagecost, amortized over many appends, is constant. enhance with `deque` objects `deque` implements *FIFO* queues: they are as cheap to append to the right as a normal `list`, but enhance it to *cheaply* insert on the *front* too.
###Code
from collections import deque
def enhanced_slow_countdown(count):
nums = deque()
for i in range(count):
nums.appendleft(i)
return nums
%timeit nums = enhanced_slow_countdown(10**5)
###Output
5.19 ms ± 159 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
###Markdown
Hidden squares: concerning `list`s and `set`s
###Code
from random import randrange
max_value = 10000
checks = 1000
L = [randrange(max_value) for i in range(checks)]
%timeit [randrange(max_value) in L for _ in range(checks)]
S = set(L) # convert the list to a set object.
%timeit [randrange(max_value) in S for _ in range(checks)]
###Output
439 µs ± 31.6 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
###Markdown
Hetland's words, pag. 35:>They're both pretty fast, and it might seem pointless to create a set from the list—unnecessary work, right? Well,it depends. If you're going to do many membership checks, it might pay off, because membership checks are linearfor lists and constant for sets. What if, for example, you were to gradually add values to a collection and for each step check whether the value was already added? [...] Using a list would give you quadratic running time, whereas using a set would be linear. That’s a huge difference. **The lesson is that it's important to pick the right built-in data structure for the job.**
###Code
lists = [[1, 2], [3, 4, 5], [6]]
sum(lists, [])
###Output
_____no_output_____
###Markdown
Hetland, pag.36:>This works, and it even looks rather elegant, but it really isn't. You see, under the covers, the sum function doesn't know all too much about what you’re summing, and it has to do one addition after another. That way, you're right back at the quadratic running time of the += example for strings. Here's a better way: Just try timing both versions. As long as lists is pretty short, there won't be much difference, but it shouldn'ttake long before the sum version is thoroughly beaten.
###Code
res = []
for lst in lists:
res.extend(lst)
res
###Output
_____no_output_____
###Markdown
try to do that with more populated lists... concerning `string`s
###Code
def string_producer(length):
return ''.join([chr(randrange(ord('a'), ord('z'))) for _ in range(length)])
%%timeit
s = ""
for chunk in string_producer(10**5):
s += chunk
###Output
74.4 ms ± 5.29 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
###Markdown
maybe some optimization is performed because `s` is a `string` object.
###Code
%%timeit
chunks = []
for chunk in string_producer(10**5):
chunks.append(chunk)
s = ''.join(chunks)
###Output
61.5 ms ± 1.27 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
###Markdown
a better approach using constant `append` to the top
###Code
%timeit s = ''.join(string_producer(10**5))
###Output
60.1 ms ± 2.26 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
###Markdown
maybe a little better since it doesn't loop with `for` explicitly. Counting Max permutation>Eight persons with very particular tastes have bought tickets to the movies. Some of them are happy withtheir seats, but most of them are not. Let’s say each of them has a favorite seat, and you want to find a way to let them switch seats to make as many people as possible happy with the result. However, all of them refuse to move to another seat if they can’t get their favorite.The following function `max_perm` computes the maximum permutation that can be applied given a desired one; namely,it produces a new permutation that moves as many elements as it can, in order to ensure the `one-to-one` property -- no one in the set points outside it, and each seat (in the set) is pointedto exactly once. It can be seen as a function that *fixes* a given permutation according to the required behavior.
###Code
def perm_isomorphism(M, domain):
iso = dict(enumerate(domain))
return [iso[M[i]] for i in range(len(M))]
def fix_perm(M, fix):
return [M[i] if i in fix else i for i in range(len(M))]
###Output
_____no_output_____
###Markdown
The following is a naive implementation, recursive but in $\mathcal{O}(n^{2})$, where $n$ is the permutation length.
###Code
def naive_max_perm(M, A=None):
'''
Fix a permutation such that it is one-to-one and maximal, recursively.
consumes:
M - a permutation as a list of integers
A - a set of positions allowed to move
produces:
a set `fix` such that makes M maximal, ensuring to be one-to-one
'''
if A is None: A = set(range(len(M))) # init to handle first invocation, all elems can move
if len(A) == 1: return A # recursion base, unary perm can move, trivial
B = set(M[i] for i in A) # b in B iff b is desired by someone
C = A - B # c in C iff c isn't desired, so discard it
return naive_max_perm(M, A - C) if C else A # recur with desired position only
I = range(8) # the identity permutation
letters = "abcdefgh"
perm_isomorphism(I, letters)
M = [2, 2, 0, 5, 3, 5, 7, 4]
perm_isomorphism(M, letters)
fix = naive_max_perm(M)
max_M = fix_perm(M, fix)
perm_isomorphism(max_M, letters)
###Output
_____no_output_____
###Markdown
Hetland, pag. 78:>The function `naive_max_perm` receives a set `A` of remaining people and creates a set `B` of seats that are pointedto. If it finds an element in `A` that is not in `B`, it removes the element and solves the remaining problem recursively. Let's use the implementation on our example, M = `[2, 2, 0, 5, 3, 5, 7, 4]`:
###Code
naive_max_perm(M)
###Output
_____no_output_____
###Markdown
>So, a, c, and f can take part in the permutation. The others will have to sit in nonfavorite seats.The implementation isn't too bad. The handy set type lets us manipulate sets with ready-made high-level operations,rather than having to implement them ourselves. There are some problems, though. For one thing, we might want aniterative solution. [...] A worse problem, though, is that the algorithm is quadratic! (Exercise 4-10 asks you to show this.) The most wasteful operation is the repeated creation of the set B. If we could just keep track of which chairs are no longer pointed to, we could eliminate this operation entirely. One way of doing this would be to keep a count for each element. We could decrement the count for chair x when a person pointing to x is eliminated, and if x ever got a count of zero, both person and chair x would be out of the game.>>This idea of reference counting can be useful in general. It is, for example, a basic component in many systemsfor garbage collection (a form of memory management that automatically deallocates objects that are no longer useful). You'll see this technique again in the discussion of topological sorting. >There may be more than one element to be eliminated at any one time, but we can just put any new ones wecome across into a “to-do” list and deal with them later. If we needed to make sure the elements were eliminated inthe order in which we discover that they’re no longer useful, we would need to use a first-in, first-out queue such as the deque class (discussed in Chapter 5). We don’t really care, so we could use a set, for example, but just appending to and popping from a list will probably give us quite a bit less overhead. But feel free to experiment, of course.
###Code
def max_perm(M):
n = len(M) # How many elements?
A = set(range(n)) # A = {0, 1, ... , n-1}
count = Counter(M) # desired positions by frequencies
Q = deque([i for i in A if not count[i]]) # useless elements
while Q: # While useless elts. left...
i = Q.pop() # get one of them
A.remove(i) # remove it from the maximal permutation
j = M[i] # get its desired position
count[j] -= 1 # and release it for someone else
if not count[j]: # if such position isn't desired anymore
Q.appendleft(j) # enqueue such position in order to discard it
return A
fix = max_perm(M)
max_M = fix_perm(M, fix)
perm_isomorphism(max_M, letters)
###Output
_____no_output_____
###Markdown
Counting Sort Hetland, pag 85:>By default, I'm just sorting objects based on their values. By supplying a key function, you can sort byanything you’d like. Note that the keys must be integers in a limited range. If this range is $0\ldots k-1$, running time is then $\mathcal{O}(n + k)$. (Note that although the common implementation simply counts the elements and then figures out where to put them in `B`, Python makes it easy to just build value lists for each key and thenconcatenate them.) If several values have the same key, they'll end up in the original order with respect toeach other. Sorting algorithms with this property are called *stable*.
###Code
def counting_sort(A, key=None, sort_boundary=None):
'''
Sorts the given collection A in linear time, assuming their elements are hashable.
This implementation implements a vanilla counting sort, working in linear time respect
iterable length and spacing between objects. It works best if elements are evenly, namely
*uniformly* distributed in the domain; on contrast, if they are sparse and concentrated
near accumulation points, traversing distances between them is time consuming.
If `sort_boundary` is instantiated to a float within [0,1], then the domain is ordered
using a classic loglinear algorithm before building the result.
'''
if key is None: key = lambda x: x
B, C = [], defaultdict(list)
for x in A:
C[key(x)].append(x)
domain = sorted(C) if sort_boundary and len(C) <= len(A)*sort_boundary \
else range(min(C), max(C)+1)
for k in domain:
B.extend(C[k])
return B
A = [randrange(50) for i in range(2*10**3)]
assert sorted(A) == counting_sort(A)
n, bins, patches = plt.hist(A, 10, facecolor='green', alpha=0.5)
plt.xlabel('elements'); plt.ylabel('frequencies'); plt.grid(True)
plt.show()
%timeit counting_sort(A)
%timeit counting_sort(A, sort_boundary=1)
B = ([randrange(50) for i in range(10**3)] +
[10**4 + randrange(50) for i in range(10**3)])
n, bins, patches = plt.hist(B, 100, facecolor='green', alpha=0.5)
plt.xlabel('elements'); plt.ylabel('frequencies'); plt.grid(True)
plt.show()
assert sorted(B) == counting_sort(B)
%timeit counting_sort(B)
%timeit counting_sort(B, sort_boundary=1/8)
###Output
247 µs ± 20.4 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
|
notebooks/glider/glider_test.ipynb | ###Markdown
Glider
###Code
import iris
iris.FUTURE.netcdf_promote = True
url = ('http://tds.marine.rutgers.edu:8080/thredds/dodsC/'
'cool/glider/mab/Gridded/20130911T000000_20130920T000000_gp2013_modena.nc')
glider = iris.load(url)
lon = glider.extract_strict('Longitude').data
lat = glider.extract_strict('Latitude').data
glider = glider.extract_strict('Temperature')
depth = glider.coord('depth').points
import numpy as np
import numpy.ma as ma
import seawater as sw
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from utilities import time_coord
%matplotlib inline
def plot_glider(cube, mask_topo=False, track_inset=False, **kw):
"""Plot glider cube."""
cmap = kw.pop('cmap', plt.cm.rainbow)
data = ma.masked_invalid(cube.data.squeeze())
t = time_coord(cube)
#t = t.units.num2date(t.points.squeeze())
dist, pha = sw.dist(lat, lon, units='km')
dist = np.r_[0, np.cumsum(dist)]
dist, z = np.broadcast_arrays(dist[..., None], depth)
try:
z_range = cube.coord(axis='Z').attributes['actual_range']
except KeyError:
z_range = z.min(), z.max()
try:
data_range = cube.attributes['actual_range']
except KeyError:
data_range = data.min(), data.max()
condition = np.logical_and(data >= data_range[0], data <= data_range[1])
data = ma.masked_where(~condition, data)
condition = np.logical_and(z >= z_range[0], z <= z_range[1])
z = ma.masked_where(~condition, z)
fig, ax = plt.subplots(figsize=(9, 3.75))
cs = ax.pcolor(dist, z, data, cmap=cmap, snap=True, **kw)
if mask_topo:
h = z.max(axis=1)
x = dist[:, 0]
ax.plot(x, h, color='black', linewidth='0.5', zorder=3)
ax.fill_between(x, h, y2=h.max(), color='0.9', zorder=3)
#ax.set_title('Glider track from {} to {}'.format(t[0], t[-1]))
fig.tight_layout()
if track_inset:
axin = inset_axes(ax, width="25%", height="30%", loc=4)
axin.plot(lon, lat, 'k.')
start, end = (lon[0], lat[0]), (lon[-1], lat[-1])
kw = dict(marker='o', linestyle='none')
axin.plot(*start, color='g', **kw)
axin.plot(*end, color='r', **kw)
axin.axis('off')
return fig, ax, cs
###Output
_____no_output_____
###Markdown
Models
###Code
from utilities import CF_names, quick_load_cubes
models = dict(useast=('http://ecowatch.ncddc.noaa.gov/thredds/dodsC/'
'ncom_us_east_agg/US_East_Apr_05_2013_to_Current_best.ncd'),
hycom=('http://ecowatch.ncddc.noaa.gov/thredds/dodsC/'
'hycom/hycom_reg1_agg/HYCOM_Region_1_Aggregation_best.ncd'),
sabgom=('http://omgsrv1.meas.ncsu.edu:8080/thredds/dodsC/'
'fmrc/sabgom/SABGOM_Forecast_Model_Run_Collection_best.ncd'),
coawst=('http://geoport.whoi.edu/thredds/dodsC/'
'coawst_4/use/fmrc/coawst_4_use_best.ncd'))
name_list = CF_names['sea_water_temperature']
coawst = quick_load_cubes(models['coawst'], name_list, strict=True)
useast = quick_load_cubes(models['useast'], name_list, strict=True)
hycom = quick_load_cubes(models['hycom'], name_list, strict=True)
from datetime import datetime
from utilities import proc_cube
# Glider info.
start = glider.coord(axis='T').attributes['minimum']
stop = glider.coord(axis='T').attributes['maximum']
start = datetime.strptime(start, '%Y-%m-%d %H:%M:%S')
stop = datetime.strptime(stop, '%Y-%m-%d %H:%M:%S')
bbox = lon.min(), lat.min(), lon.max(), lat.max()
# Subsetting the cube to the glider limits.
coawst = proc_cube(coawst, bbox=bbox, time=(start, stop), units=glider.units)
useast = proc_cube(useast, bbox=bbox, time=(start, stop), units=glider.units)
hycom = proc_cube(hycom, bbox=bbox, time=(start, stop), units=glider.units)
coawst, useast, hycom
for aux in coawst.aux_factories:
coawst.remove_aux_factory(aux)
from iris.analysis import trajectory
sample_points = [('latitude', lat),
('longitude', lon),
('time', glider.coord(axis='T').points)]
depth = glider.coord('depth').points
fig, ax, cs = plot_glider(glider, mask_topo=False, track_inset=True)
iuseast = trajectory.interpolate(useast, sample_points)
iuseast.transpose()
depth = -iuseast.coord(axis='Z').points
fig, ax, cs = plot_glider(iuseast, mask_topo=False, track_inset=True)
ax.set_ylim(-120, 0)
t = ax.set_title("USEAST")
ihycom = trajectory.interpolate(hycom, sample_points)
ihycom.transpose()
depth = -ihycom.coord(axis='Z').points
fig, ax, cs = plot_glider(ihycom, mask_topo=False, track_inset=True)
ax.set_ylim(-120, 0)
t = ax.set_title("HYCOM")
icoawst = trajectory.interpolate( coawst, sample_points)
icoawst.transpose()
depth = -icoawst.coord(axis='Z').points
fig, ax, cs = plot_glider(ihycom, mask_topo=False, track_inset=True)
ax.set_ylim(-120, 0)
t = ax.set_title("COAWST")
###Output
_____no_output_____ |
.ipynb_checkpoints/sample_windows3-checkpoint.ipynb | ###Markdown
Chapter 8 - 포인터 1.1 메모리 주서와 주소연산자 & 주소 개념**메모리 공간은 바이트마다 고유한 주소(address)가 있다.** 마치 아파트 각 세대마다 고유 번호가 있는 것과 같다. 아파트의 호수로 집을 찾듯이 주소를 이용하여 메모리의 위치를 파악할 수 있다. 메모리 주소는 0부터 바이트마다 1씩 증가한다. **메모리 주소는 저장 장소인 변수 이름과 함께 기억 장소를 참조하는 또 다른 방법이다.** 이 주소값을 이용하면 보다 편리하고 융통성 있는 프로그램을 만들 수 있다. 그러나 메모리 주소를 잘못 다루면 시스템에 심각한 문제를 일으킬 수 있다. 또한 메모리 주소를 처음 학습하는 사람에겐 좀 어려울 수 있다.
###Code
co1 = CompileOutputOnly('exer8_1')
cio1 = CompileInputOuput('exer8_9')
saq1 = ShortAnswerQuestion('(1) 메모리 공간은 바이트마다 고유한 ____(이)가 있다.', ['주소', '주소값', 'address', 'Address'], ' 주소를 말한다.', ' 주소를 이용하여 메모리의 위치를 파악할 수 있다.')
cq1 = ChoiceQuestion("""(2) 배열 선언
double a[] = {2, 4, 5, 7, 8, 9};
에서 *a와 *(a+2)의 참조값은 각각 무엇인가?
""", ['4, 7', '2, 5', '5, 8', '2, 4'], 1, ' 인덱스는 0부터 시작한다.', ' *a는 2, *(a+2)는 5이다.')
cq2 = ChoiceQuestion("""다음은 여러 포인터와 선언에 대한 설명이다. 다음 중에서 잘못 설명하고 있는 것은 무엇인가?""", ['double형 포인터 선언: double *pd;', 'int형 포인터 원소 4개인 배열 선언: int *p[4];', '일차원 배열 int a[3]의 배열 포인터 선언: int *p;', '이차원 배열 int b[3][4]의 배열 포인터 선언: int *p[3][4];'], 3, ' 이차원 배열 포인터는 *를 두개 붙여 선언한다.', ' int **p로 선언한다.')
rate = AchieveRate()
add_link_buttons(1, 'sample_windows2.ipynb')
###Output
_____no_output_____ |
preprocessing/basic-preprocessing.ipynb | ###Markdown
Data Preprocessing*Prepared by:***Jude Michael Teves** Faculty, Software Technology Department College of Computer Studies - De La Salle University This notebook shows how to perform common preprocessing techniques in Python. Preliminaries Import libraries
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('darkgrid')
sns.set_palette('Set2')
# sns.color_palette('Set2')
###Output
_____no_output_____
###Markdown
Load data
###Code
df = pd.read_csv('https://raw.githubusercontent.com/Cyntwikip/data-repository/main/titanic.csv')
df.head()
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 891 entries, 0 to 890
Data columns (total 12 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 PassengerId 891 non-null int64
1 Survived 891 non-null int64
2 Pclass 891 non-null int64
3 Name 891 non-null object
4 Sex 891 non-null object
5 Age 714 non-null float64
6 SibSp 891 non-null int64
7 Parch 891 non-null int64
8 Ticket 891 non-null object
9 Fare 891 non-null float64
10 Cabin 204 non-null object
11 Embarked 889 non-null object
dtypes: float64(2), int64(5), object(5)
memory usage: 83.7+ KB
###Markdown
Preprocessing Text transformationsBy accessing the `str` attribute of an object feature/column in Pandas, we can use the methods under string data type / object.
###Code
df['Name'].str.lower()
df['Name'].str.upper()
df['Name'].str.title()
df['Name'].str.split(',')
###Output
_____no_output_____
###Markdown
EncodingIn many cases, we need our data to be in numerical format, so how should we deal with datasets with categorical data in it? We can use different encoding strategies for that. One of which is One-hot Encoder. This encoding strategy creates one column for each unique value in the original column. We use this when there is no hierarchy in our categories.
###Code
df[['Embarked']]
df['Embarked'].value_counts()
df['Embarked'].isnull().sum()
###Output
_____no_output_____
###Markdown
Pandas get_dummiesOne approach for doing one-hot encoding is through Pandas' get_dummies function.
###Code
pd.get_dummies(df['Embarked'])
###Output
_____no_output_____
###Markdown
Sklearn OneHotEncoderAnother approach for doing one-hot encoding is through sklearn's OneHotEncoder class.
###Code
from sklearn.preprocessing import OneHotEncoder
encoder = OneHotEncoder(handle_unknown='ignore')
df_encoded = encoder.fit_transform(df[['Embarked']]).toarray()
df_encoded = pd.DataFrame(df_encoded, columns=encoder.categories_)
df_encoded
df_encoded.sum(axis=0)
###Output
_____no_output_____
###Markdown
Notice that there are 4 columns here instead of 3. This is because it also creates a new column (the last one) for the null values.Additionally, we can transform it back to its original form.
###Code
pd.DataFrame(encoder.inverse_transform(df_encoded))
###Output
_____no_output_____
###Markdown
BinningBinning converts a continuous feature into a categorical one by chunking/binning the values. This is somewhat like the opposite of one-hot encoding.
###Code
df['Fare']
df['Fare'].describe()
fig, ax = plt.subplots(1,1, figsize=(8,4), dpi=100)
df['Fare'].hist(bins=50, ax=ax)
plt.show()
###Output
_____no_output_____
###Markdown
Manual cuts
###Code
bins = [0, 50, 100, 200, 400]
# Create Group Names
group_names = ['0-49.99','50-99.99','100-199.99','200-399.99']
fare_binned = pd.cut(df['Fare'], bins, labels=group_names, include_lowest=True) # to include the leftmost value in the bins
fare_binned.head()
fare_binned.value_counts()
fare_binned[fare_binned.isnull()]
df['Fare'][fare_binned.isnull()]
###Output
_____no_output_____
###Markdown
Cuts with equal spacing
###Code
fare_binned = pd.cut(df['Fare'], 5)
fare_binned.head()
###Output
_____no_output_____
###Markdown
Handling missing data DroppingIf there is not much null values, we can simply drop them.
###Code
df.info()
df.dropna().info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 183 entries, 1 to 889
Data columns (total 12 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 PassengerId 183 non-null int64
1 Survived 183 non-null int64
2 Pclass 183 non-null int64
3 Name 183 non-null object
4 Sex 183 non-null object
5 Age 183 non-null float64
6 SibSp 183 non-null int64
7 Parch 183 non-null int64
8 Ticket 183 non-null object
9 Fare 183 non-null float64
10 Cabin 183 non-null object
11 Embarked 183 non-null object
dtypes: float64(2), int64(5), object(5)
memory usage: 18.6+ KB
###Markdown
Although in this example, many rows were omitted. ImputationAnother approach is to impute or fill in missing values instead. We can change the imputation strategy to mean, median, most frequent, and constant. We will use most frequent since it can handle categorical data.
###Code
from sklearn.impute import SimpleImputer
imputer = SimpleImputer(missing_values=np.nan, strategy='most_frequent')
df_imputed = pd.DataFrame(imputer.fit_transform(df), columns=df.columns)
df_imputed
###Output
_____no_output_____
###Markdown
No more missing values now!
###Code
df_imputed.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 891 entries, 0 to 890
Data columns (total 12 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 PassengerId 891 non-null object
1 Survived 891 non-null object
2 Pclass 891 non-null object
3 Name 891 non-null object
4 Sex 891 non-null object
5 Age 891 non-null object
6 SibSp 891 non-null object
7 Parch 891 non-null object
8 Ticket 891 non-null object
9 Fare 891 non-null object
10 Cabin 891 non-null object
11 Embarked 891 non-null object
dtypes: object(12)
memory usage: 83.7+ KB
###Markdown
Feature SelectionSometimes, we don't need some of the features, so we simply drop them.
###Code
df_selected = df.drop(['PassengerId', 'Name'], axis=1)
df_selected
###Output
_____no_output_____
###Markdown
Here's one-liner code to remove categorical features which I believe will be very useful in many cases.
###Code
df_selected = df.loc[:, df.dtypes!='object']
df_selected
###Output
_____no_output_____
###Markdown
ScalingThere are times wherein we will have to rescale our data especially when dealing with Machine Learning.
###Code
from sklearn.preprocessing import StandardScaler, MinMaxScaler
scaler = MinMaxScaler(feature_range=(0,1))
scaled = scaler.fit_transform(df_selected)
df_scaled = pd.DataFrame(scaled, columns=df_selected.columns)
df_scaled
###Output
_____no_output_____
###Markdown
Now our minimum and maximum values are 0 and 1.
###Code
df_scaled.describe()
###Output
_____no_output_____
###Markdown
We can also use other scaler techniques such as standard scaler, which standardizes the data by scaling it based on its mean and standard deviation.
###Code
scaler = StandardScaler()
scaled = scaler.fit_transform(df_selected)
df_scaled = pd.DataFrame(scaled, columns=df_selected.columns)
df_scaled
###Output
_____no_output_____
###Markdown
Now, the data is standardized.
###Code
df_scaled.describe()
###Output
_____no_output_____
###Markdown
GroupingWe can group by a specific feature/column and chain it with whatever aggregation function we would like to use.
###Code
df['Pclass'].value_counts()
df.groupby('Pclass').sum()
df.groupby('Pclass').mean()
###Output
_____no_output_____
###Markdown
Using mathematical operations and functions
###Code
df_copy = df.copy()
df_copy['Fare_transformed'] = df['Fare']*2
df_copy[['Fare', 'Fare_transformed']]
df_copy['Fare_log'] = np.log(df_copy['Fare'])
df_copy[['Fare', 'Fare_log']]
###Output
C:\Anaconda\envs\dsi\lib\site-packages\pandas\core\arraylike.py:358: RuntimeWarning: divide by zero encountered in log
result = getattr(ufunc, method)(*inputs, **kwargs)
###Markdown
Using custom functions
###Code
def func(x):
if x < 50:
return 'low'
elif x < 100:
return 'medium'
else:
return 'high'
df_copy['Fare_custom_func'] = df['Fare'].apply(func)
df_copy[['Fare', 'Fare_custom_func']]
###Output
_____no_output_____ |
Fundamentals of Python (Edited).ipynb | ###Markdown
Fundamentals of Python **Python Variables**
###Code
x = float(1)
a, b = 0, -1
a, b, c = "Sally", "John", "Ana"
print('This is a sample')
print(a)
print(c)
###Output
This is a sample
Sally
Ana
###Markdown
**Casting**
###Code
print(x)
###Output
1.0
###Markdown
**Type() Function**
###Code
y = "Johnny"
print(type(y))
print(type(x))
###Output
<class 'str'>
<class 'float'>
###Markdown
**Double Quotes and Single Quotes**
###Code
#h = "Maria"
h = 'Maria'
v = 1
V = 3
print(h)
print(v)
print(v+1)
print(V)
###Output
Maria
1
2
3
###Markdown
**Multiple Variables with Different Value**
###Code
x, y, z = "one", "two", 'three'
print(x)
print(y)
print(z)
print(x, y, z)
###Output
one
two
three
one two three
###Markdown
**One Value to Multiple Variables**
###Code
x = y = z = "Stella"
print(x, y, z)
print(x)
print(y)
print(z)
###Output
Stella Stella Stella
Stella
Stella
Stella
###Markdown
**Output Variables**
###Code
x = "enjoying"
y = "Python is"
print("Python is " + x)
print(y + " " + x)
###Output
Python is enjoying
Python is enjoying
###Markdown
**Arithmetic Operations**
###Code
f = 2
g = 4
i = 6
print(f+g)
print(f-g)
print(f*i)
print(int(i/g))
print(3/g)
print(3%g)
print(3//g)
print(3**6)
###Output
6
-2
12
1
0.75
3
0
729
###Markdown
**Assignment Operators**
###Code
k = 2
l = 3
k+=3 #same as k=k+3
print(k)
print(l>>1)
###Output
5
1
###Markdown
**Boolean Operators**
###Code
k = 5
l = 10
print(k>>2) #shift right twice
print(k<<2) #shift left twice
###Output
1
20
###Markdown
**Relational Operators**
###Code
v=1
k=2
print(v>k)
print(v==k)
###Output
False
False
###Markdown
**Logical Operators**
###Code
print(v<k and k==k)
print(v<k or k==v)
print(not (v<k or k==v))
###Output
True
True
False
###Markdown
**Identity Operators**
###Code
print(v is k)
print(v is not k)
###Output
False
True
|
Titanic-Classification-Problem-EDA&Preprocessing.ipynb | ###Markdown
Data Exploring
###Code
data.head()
###Output
_____no_output_____
###Markdown
Visualizing null values.
###Code
sns.heatmap(data.isnull(), yticklabels=False, cbar=False, cmap= 'viridis')
###Output
_____no_output_____
###Markdown
- Fare column has only one null value.- Age column has many null values.- Cabin column has a majority of null values.- Survived column has null values for the test data.
###Code
data.info()
###Output
_____no_output_____
###Markdown
Is data balanced?
###Code
sns.countplot(data = data, x= 'Survived')
###Output
_____no_output_____
###Markdown
Which is the most survived gender?
###Code
sns.countplot(data = data, x= 'Survived', hue= 'Sex')
plt.legend(loc =(1.1,0.9)),
###Output
_____no_output_____
###Markdown
Does first class have more survival rate?
###Code
sns.countplot(data = data, x='Survived', hue='Pclass')
###Output
_____no_output_____
###Markdown
The distribution of passengers' age.
###Code
sns.distplot(data['Age'].dropna(), kde = False, bins = 35)
###Output
_____no_output_____
###Markdown
The distribution of number of siblings.
###Code
sns.countplot(x = 'SibSp', data = data)
###Output
_____no_output_____
###Markdown
Number of passenger's in each class.
###Code
sns.countplot(data= data.dropna(), x='Pclass')
###Output
_____no_output_____
###Markdown
Proportion of each gender in different classes.
###Code
sns.countplot(data= data, x='Pclass', hue= 'Sex')
###Output
_____no_output_____
###Markdown
Ticket fare for each class.
###Code
sns.boxplot(data= data.dropna(), x='Pclass', y= 'Fare')
data.describe()
###Output
_____no_output_____
###Markdown
Data cleaning Fill missing values in Age with the median age for the corresponding class
###Code
class_mean_age = data.pivot_table(values='Age', index='Pclass', aggfunc='median')
null_age = data['Age'].isnull()
data.loc[null_age,'Age'] = data.loc[null_age,'Pclass'].apply(lambda x: class_mean_age.loc[x] )
data.Age.isnull().sum()
###Output
_____no_output_____
###Markdown
Fill the missing value in Fare with the median fare for the corresponding class.
###Code
class_mean_fare = data.pivot_table(values= 'Fare', index= 'Pclass', aggfunc='median')
null_fare = data['Fare'].isnull()
data.loc[null_fare, 'Fare'] = data.loc[null_fare, 'Pclass'].apply(lambda x: class_mean_fare.loc[x] )
data.Fare.isnull().sum()
###Output
_____no_output_____
###Markdown
Fill the missing values in Embarked with the most common port for corresponding class.
###Code
data.Embarked.value_counts()
data['Embarked'] = data.Embarked.fillna('S')
data.Embarked.isnull().sum()
###Output
_____no_output_____
###Markdown
Feature Engineering Create New features Create a new feature with the title of each passenger.
###Code
data['Title'] = data.Name.apply(lambda x : x[x.find(',')+2:x.find('.')])
data.Title.value_counts()
###Output
_____no_output_____
###Markdown
We can notice that only 4 titles have significant frequency and the others are repeated only 8 time or less. So, we will combine all titles with small frequency under one title (say, Other).
###Code
rare_titles = (data['Title'].value_counts() < 10)
data['Title'] = data['Title'].apply(lambda x : 'Other' if rare_titles.loc[x] == True else x)
###Output
_____no_output_____
###Markdown
Create a new feature for the family size This feature combines the number of siblings and parents/children (SibSp and Parch) +1 (The passenger himself).
###Code
data['FamilySize'] = data['SibSp'] + data['Parch'] + 1
###Output
_____no_output_____
###Markdown
Create a new feature to indicate whether the passenger was alone.
###Code
data['IsAlone'] = 0
data['IsAlone'].loc[ data['FamilySize'] == 1] = 1
###Output
_____no_output_____
###Markdown
Create a new feature by discretizing Age into buckets/bins Age is discretized into 4 bins coresponding to 4 stages of human life:1. Childhood.2. Adolescence.3. Adulthood.4. Old Age. Check this link for more details: https://bit.ly/2LkPFPf
###Code
data['AgeBins'] = 0
data['AgeBins'].loc[(data['Age'] >= 11) & (data['Age'] < 20)] = 1
data['AgeBins'].loc[(data['Age'] >= 20) & (data['Age'] < 60)] = 2
data['AgeBins'].loc[data['Age'] >= 60] = 3
###Output
_____no_output_____
###Markdown
Create new feature by discretizing Fare into 4 buckets/bins based on quantiles.
###Code
data['FareBins'] = pd.qcut(data['Fare'], 4)
###Output
_____no_output_____
###Markdown
Drop unused columns from data. 1. Some features are expected to not have effect of the classification such as PassengerId, Name and Ticket. 2. Also some futures have too much missing values such as the Cabin which render it useless.3. We'll also drop the original features we used to create the new features because there will be high correlation between these features which may confuse the model about feature importance.
###Code
data.columns
data.drop(columns=['PassengerId','Name','Ticket', 'Cabin', 'Age', 'Fare', 'SibSp', 'Parch'], inplace= True)
###Output
_____no_output_____
###Markdown
Convert qualitative features into numeric form. Convert categorical features (Embarked, Sex, Title) to numerical features and drop one dummy variable for each.
###Code
data = pd.get_dummies(
data, columns=['Embarked', 'Sex', 'Title'], drop_first=True)
###Output
_____no_output_____
###Markdown
Convert qualitative ordinal features (FareBins) into numeric form.
###Code
label = LabelEncoder()
data['FareBins'] = label.fit_transform(data['FareBins'])
data.head(7)
###Output
_____no_output_____
###Markdown
Splitting Data back to train/test sets.
###Code
#Final train data
train = data[data.source == 'train'].drop(columns = ['source']).reset_index(drop=True)
test = data[data.source == 'test'].drop(columns = ['source','Survived']).reset_index(drop=True)
train['Survived'] = train.Survived.astype('int64')
###Output
_____no_output_____
###Markdown
Rescaling features using different scalers Normalizing numeric features (Age, SibSp, Parch, FamilySize and Fare). We will try the following scalers and we'll select the best one:1. MinMaxScaler2. MaxAbsScaler3. StandardScaler4. RobustScaler5. Normalizer6. QuantileTransformer7. PowerTransformer
###Code
feature_to_scale = ['FamilySize']
scalers = {}
for i in feature_to_scale:
scaler = RobustScaler()
scaler.fit(train[[i]])
train[i] = scaler.transform(train[[i]])
test[i] = scaler.transform(test[[i]])
scalers.update({i:scaler})
scalers
###Output
_____no_output_____
###Markdown
Exporting modified train/test data to external file.
###Code
#Final Test data
train.to_csv('train_modified.csv', index = False)
test.to_csv('test_modified.csv', index = False)
passngerID.to_csv('ID.csv', index = False)
###Output
_____no_output_____ |
ml_basics/rdm010_neural_network/neural_network.ipynb | ###Markdown
What's a neural network
###Code
# This video explains very well what is a neural network,
# and also basically how it works via the hand-written digits recognition example.
from IPython.display import YouTubeVideo
YouTubeVideo('aircAruvnKk')
###Output
_____no_output_____
###Markdown
The neural network we will build in this postWe will again use the hand-written digits data to build a hand-written recognition neural network model in this post.As you can see from above NN model graph, our NN has 3 layers: - An input layer: recall that each of the input hand-written digit holds a 20 by 20 pixels, which gives us 400 input layer units plus 1 always `+1` bias unit; - A hidden layer: which has 25 units (not counting the extra bias unit which always outputs `+1`); - An output layer: which has 10 output units (corresponding to the 10 digit classes); That is:$$\begin{cases} a^{(1)}.shape &= (401, 1) \\ \Theta^{(1)}.shape &= (25, 401) \\ z^{(2)} = \Theta^{(1)} a^{(1)} = (25,401)@(401,1) &= (25, 1) \\ \Theta^{(2)}.shape &= (10, 26) \\ z^{(3)} = \Theta^{(2)} a^{(2)} = (10, 26)@(26, 1) &= (10, 1)\end{cases}$$ Question: why the hidden layer has 25 units? Hand-written digits recognition with neural network
###Code
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Sets the backend of matplotlib to the 'inline' backend.
#
# With this backend, the output of plotting commands is displayed inline within frontends like the Jupyter notebook,
# directly below the code cell that produced it.
# The resulting plots will then also be stored in the notebook document.
#
# More details: https://stackoverflow.com/questions/43027980/purpose-of-matplotlib-inline
%matplotlib inline
from scipy.io import loadmat
data = loadmat(os.getcwd() + '/hand_written_digits.mat')
data
X = data['X']
y = data['y']
X.shape, y.shape
###Output
_____no_output_____
###Markdown
Use [one-hot encoding](https://en.wikipedia.org/wiki/One-hot) to encode the classes labels[One-hot encoding](https://en.wikipedia.org/wiki/One-hot) projects class label $K_i$ to a $K$-length vector, which its component at index $i$ is 1, and all others components are 0.
###Code
from sklearn.preprocessing import OneHotEncoder
onehot_encoder = OneHotEncoder(sparse=False)
y_onehot = onehot_encoder.fit_transform(y)
y_onehot.shape
y[0], y_onehot[0, :]
def sigmoid(x):
return 1 / (1 + np.exp(-x))
###Output
_____no_output_____
###Markdown
`forward_propagate` just simulates the process that all inputs run through the neural network we defined, then returns the intermediate results and the final output.
###Code
def forward_propagate(X, theta1, theta2):
a1 = np.insert(X, 0, values=np.ones(X.shape[0]), axis=1)
z2 = a1 @ theta1.T
a2 = np.insert(sigmoid(z2), 0, values=np.ones(X.shape[0]), axis=1)
z3 = a2 @ theta2.T
h = sigmoid(z3)
return a1, z2, a2, z3, h
###Output
_____no_output_____
###Markdown
Define `cost` function (WITHOUT regularization item) to evaluate the loss of the network$$J(\theta) = -\frac{1}{n} \sum\limits_{i=1}^n \sum\limits_{k=1}^K \Big[ \ y_k^{(i)}log\big( h_\theta(x^{(i)})_k \big) + \ (1 - y_k^{(i)}) log\big( 1 - h_\theta(x^{(i)})_k \big) \\Big]$$
###Code
def cost(num_of_hidden_layer_units, num_of_labels, X, y, alpha):
theta1 = (
np.random.random(
size=(num_of_hidden_layer_units, X.shape[1] + 1)
) - 0.5
) * 0.25
theta2 = (
np.random.random(
size=(num_of_labels, num_of_hidden_layer_units + 1)
) - 0.5
) * 0.25
a1, z2, a2, z3, h = forward_propagate(X, theta1, theta2)
J = 0.
for i in range(X.shape[0]):
part0 = np.multiply(y[i,:], np.log(h[i,:]))
part1 = np.multiply(1 - y[i,:], np.log(1 - h[i,:]))
J += np.sum(part0 + part1)
return -J/X.shape[0]
cost(25, 10, X, y_onehot, 1)
###Output
_____no_output_____
###Markdown
Define `cost` function (WITH regularization item) to evaluate the loss of the network$$J(\theta) = -\frac{1}{n} \sum\limits_{i=1}^n \sum\limits_{k=1}^K \Big[ \ y_k^{(i)}log\big( h_\theta(x^{(i)})_k \big) + \ (1 - y_k^{(i)}) log\big( 1 - h_\theta(x^{(i)})_k \big) \\Big] + \ \frac{\alpha}{2n} \Big[ \ \sum\limits_{j=1}^{25} \sum\limits_{k=1}^{400} (\Theta_{j,k}^{(1)})^2 + \ \sum\limits_{j=1}^{10} \sum\limits_{k=1}^{25} (\Theta_{j,k}^{(2)})^2 \\Big]$$As you can see, we don't regularize the bias unit.
###Code
def cost(num_of_hidden_layer_units, num_of_labels, X, y, alpha):
theta1 = (
np.random.random(
size=(num_of_hidden_layer_units, X.shape[1] + 1)
) - 0.5
) * 0.25
theta2 = (
np.random.random(
size=(num_of_labels, num_of_hidden_layer_units + 1)
) - 0.5
) * 0.25
a1, z2, a2, z3, h = forward_propagate(X, theta1, theta2)
J = 0.
for i in range(X.shape[0]):
part0 = np.multiply(y[i,:], np.log(h[i,:]))
part1 = np.multiply(1 - y[i,:], np.log(1 - h[i,:]))
J += np.sum(part0 + part1)
regularization_item = float(alpha) / (2 * X.shape[0]) * (
np.sum(
np.power(theta1[:,1:], 2)
) +
np.sum(
np.power(theta2[:,1:], 2)
)
)
return -J/X.shape[0] + regularization_item
cost(25, 10, X, y_onehot, 1)
###Output
_____no_output_____
###Markdown
Computes the gradient of the sigmoid function
###Code
def sigmoid_gradient(x):
return np.multiply(sigmoid(x), (1 - sigmoid(x)))
###Output
_____no_output_____
###Markdown
Implement backpropagation algorithm (WITH cost regularization item and gradient regularization item)- Backpropagation computes the parameter updates that will reduce the error of the network on the training data.- Combine the [Chain rule](https://en.wikipedia.org/wiki/Chain_rule) and the below two graphs should be good enough to explain what is and what does backpropagation algorithm do. Lets take calculating the derivative of $e=(a+b)*(b+1)$ as example, and lets introduce in intermediate variables $c=a+b$ and $d=b+1$: For calculating the $d_e|_{a=2,b=1}$, with the [Chain rule](https://en.wikipedia.org/wiki/Chain_rule) we know: $$ \begin{align*} d_e|_{a=2,b=1} &= \frac{\partial e}{\partial a} + \frac{\partial e}{\partial b} \\ &= \frac{\partial e}{\partial c} \cdot \frac{\partial c}{\partial a} + \ \frac{\partial e}{\partial c} \cdot \frac{\partial c}{\partial b} + \ \frac{\partial e}{\partial d} \cdot \frac{\partial d}{\partial b} \end{align*} $$ If we visualize the above chain rules in a tree, we get: We found that actually: 1. The value of $\frac{\partial e}{\partial a}$ is the product of all the derivatives on the path from node $a$ to node $e$; 2. The value of $\frac{\partial e}{\partial b}$ is the sum of the product of all the derivatives on the two different paths respectively from node $b$ to node $e$; That means: to upper node $p$ and lower node $q$, for calculating $\frac{\partial p}{\partial q}$ we need to find out all the paths from node $q$ to node $p$, then to each path we calculate the product of all the derivatives on that path, and then sum all the products from all the different paths! But maybe you already noticed: we visited certain paths multiple times, for example: path 'a-c-e' and 'b-c-e' both visited path 'c-e', this duplicated traversal cost to a huge neural network is significant!- And here is also where the backpropagation algorithm comes in: just like indicated in its name (back), it looks up the paths from the root node to the leaf nodes, and traverse each path eactly once, how it achieves this: 1. It starts from root node with initial value `1`, and processes the others nodes by layer from top to bottom; 2. To each node (lets say $p$), calculate the derivative of $p$ to each of its direct children (lets say $q$), that is: $\frac{\partial p}{\partial q}$, then store the product of the value that accumulated on node $p$ (for root node it is our initial value `1`) and the just calculated $\frac{\partial p}{\partial q}$ on node $q$; 3. After finished one layer, sum all the stored values on each node respectively, and store as its accumulated value; 4. Repeat step '2' and '3' until finish all the nodes, the value lastly accumulated on the leaf node (lets say $q$) is the derivative of the root node (lets say $p$) to this leaf node, that is: $\frac{\partial p}{\partial q}$! More clearly, still with above example, demonstrate the process with below graph: - The computations required for backpropagation are a superset of those required in the cost function, so what we will do actually is extending the cost function to perform the backpropagation as well, and then return both the cost and the gradients.- And since we will use our `backprop` function with the `scipy.optimize.minimize` function, which means the `backprop` will be called upon each epoch of the training, so we cannot do the `theta1` and `theta2` random generation like our above `cost` function, but pass in through the `params`.
###Code
def backprop(params, num_of_hidden_layer_units, num_of_labels, X, y, alpha):
theta1 = np.reshape(
params[:num_of_hidden_layer_units * (X.shape[1] + 1)],
(num_of_hidden_layer_units, X.shape[1] + 1)
)
theta2 = np.reshape(
params[num_of_hidden_layer_units * (X.shape[1] + 1):],
(num_of_labels, num_of_hidden_layer_units + 1)
)
a1, z2, a2, z3, h = forward_propagate(X, theta1, theta2)
# Initializations.
J = 0.
delta1 = np.zeros(theta1.shape) # (25, 401)
delta2 = np.zeros(theta2.shape) # (10, 26)
# Compute the cost.
for i in range(X.shape[0]):
part0 = np.multiply(y[i,:], np.log(h[i,:]))
part1 = np.multiply(1 - y[i,:], np.log(1 - h[i,:]))
J += np.sum(part0 + part1)
J = -J/X.shape[0]
# Add the regularization item to cost.
cost_regularization_item = float(alpha) / (2 * X.shape[0]) * (
np.sum(
np.power(theta1[:,1:], 2)
) +
np.sum(
np.power(theta2[:,1:], 2)
)
)
J += cost_regularization_item
# Perform backpropagation.
for t in range(X.shape[0]):
a1t = a1[[t],:] # (1, 401)
z2t = z2[[t],:] # (1, 25)
a2t = a2[[t],:] # (1, 26)
ht = h[[t],:] # (1, 10)
yt = y[[t],:] # (1, 10)
d3t = ht - yt # (1, 10)
z2t = np.insert(z2t, 0, values=np.ones(z2t.shape[0]), axis=1) # (1, 26)
d2t = np.multiply(d3t @ theta2, sigmoid_gradient(z2t)) # (1, 26)
delta1 += d2t[:,1:].T @ a1t
delta2 += d3t.T @ a2t
delta1 /= X.shape[0]
delta2 /= X.shape[0]
# Add the regularization item to the gradient.
# Note:
# We never regularize the bias item.
delta1[:,1:] += theta1[:,1:] * alpha / X.shape[0]
delta2[:,1:] += theta2[:,1:] * alpha / X.shape[0]
# Unravel the gradient matrices into a single array.
# Note:
# The first parameter of `np.concatenate` needs to be a tuple.
grad = np.concatenate(
(np.ravel(delta1), np.ravel(delta2))
)
return J, grad
num_of_labels = 10
num_of_hidden_layer_units = 25
params = (
np.random.random(
size=25 * (X.shape[1] + 1) + num_of_labels * (num_of_hidden_layer_units + 1)
) - 0.5
) * 0.25
J, grad = backprop(params, num_of_hidden_layer_units, num_of_labels, X, y_onehot, 1)
J, grad.shape
###Output
_____no_output_____
###Markdown
Finally we are ready to train our networkWe put a bound on the number of iterations since the objective function is not likely to completely converge. As you can see the total cost has dropped below to around 0.3 though, so that's a good indicator that the algorithm is working.
###Code
from scipy.optimize import minimize
# Minimize the objective function.
fmin = minimize(
fun=backprop, x0=params, args=(num_of_hidden_layer_units, num_of_labels, X, y_onehot, 1),
method='TNC', jac=True, options={'maxiter': 250}
)
fmin
###Output
_____no_output_____
###Markdown
Let's use the parameters it found and forward-propagate them through the network to get some predictions, and evaluate the overall accuracy of our network
###Code
theta1 = np.reshape(
fmin.x[:num_of_hidden_layer_units * (X.shape[1] + 1)],
(num_of_hidden_layer_units, X.shape[1] + 1)
)
theta2 = np.reshape(
fmin.x[num_of_hidden_layer_units * (X.shape[1] + 1):],
(num_of_labels, num_of_hidden_layer_units + 1)
)
a1, z2, a2, z3, h = forward_propagate(X, theta1, theta2)
y_pred = np.array(np.argmax(h, axis=1) + 1)
correct = [1 if a == b else 0 for (a, b) in zip(y_pred, y)]
accuracy = (sum(map(int, correct)) / float(len(correct)))
print('Total accuracy: {0:.2f}%'.format(accuracy * 100))
###Output
Total accuracy: 99.44%
|
Exercise Week 6 - Ahmad Ichsan Baihaqi.ipynb | ###Markdown
**Exercise Week 6****Author: Ahmad Ichsan Baihaqi****Email: [email protected]**
###Code
def questionOne():
def dictionary(keys, values):
return dict(zip(keys, values))
## sample
keys = ['Phone', 'Tablet', 'Laptop']
values = ['iPhone 11', 'iPad Mini', 'Macbook Pro']
result = dictionary(keys, values)
return result
questionOne()
from math import sqrt
## run to install humanize: pip install humanize
from humanize import number
def questionTwo():
def fibonacci(n):
if (n < 0):
return "Sorry, fibonacci start from 0"
if (n == 0):
return f"Next fibonacci number is {1}, and it is the 2nd Fibonacci number"
if (n == 1):
return f"Next fibonacci number is {1}, and it is the 3rd Fibonacci number"
n_2 = 1
n_1 = 1
position = 4
sums = n_2 + n_1
while (sums <= n):
position = position + 1
# Update the first
n_2 = n_1
# Update the second
n_1 = sums
# Update the third
sums = n_2 + n_1
return f"Next fibonacci number is {sums}, and it is the {number.ordinal(position)} Fibonacci number"
print(fibonacci(-1))
print(fibonacci(0))
print(fibonacci(14))
print(fibonacci(60))
print(fibonacci(55))
questionTwo()
import numpy as np
def questionThree():
def scoring(input_array, approved, not_approved):
in_approved = [item for item in input_array if item in approved]
in_not_approved = [item for item in input_array if item in not_approved]
total_approved = len(in_approved)
total_not_approved = len(in_not_approved)
return total_approved - total_not_approved
print(scoring(
[1, 2, 3, 4, 10, 9, 8, 7],
[1, 10, 11],
[8, 5]
))
print(scoring(
[1, 1, 1, 5, 5, 2, 3, 10],
[1, 3],
[5]
))
questionThree()
###Output
1
2
|
german_to_english.ipynb | ###Markdown
Import Required Libraries
###Code
import string
import re
from numpy import array, argmax, random, take
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense, LSTM, Embedding, Bidirectional, RepeatVector, TimeDistributed
from keras.preprocessing.text import Tokenizer
from keras.callbacks import ModelCheckpoint
from keras.preprocessing.sequence import pad_sequences
from keras.models import load_model
from keras import optimizers
import matplotlib.pyplot as plt
% matplotlib inline
pd.set_option('display.max_colwidth', 200)
###Output
Using TensorFlow backend.
###Markdown
Read Data Our data is a text file of English-German sentence pairs. First we will read the file using the function defined below.
###Code
# function to read raw text file
def read_text(filename):
# open the file
file = open(filename, mode='rt', encoding='utf-8')
# read all text
text = file.read()
file.close()
return text
###Output
_____no_output_____
###Markdown
Now let's define a function to split the text into English-German pairs separated by '\n' and then split these pairs into English sentences and German sentences.
###Code
# split a text into sentences
def to_lines(text):
sents = text.strip().split('\n')
sents = [i.split('\t') for i in sents]
return sents
###Output
_____no_output_____
###Markdown
__Download the data from [here.](http://www.manythings.org/anki/deu-eng.zip)__ and extract "deu.txt" in your working directory.
###Code
data = read_text("deu.txt")
deu_eng = to_lines(data)
deu_eng = array(deu_eng)
###Output
_____no_output_____
###Markdown
The actual data contains over 150,000 sentence-pairs. However, we will use the first 50,000 sentence pairs only to reduce the training time of the model. You can change this number as per you system computation power.
###Code
deu_eng = deu_eng[:50000,:]
###Output
_____no_output_____
###Markdown
Text Pre-Processing Text CleaningLet's take a look at our data, then we will decide which pre-processing steps to adopt.
###Code
deu_eng
###Output
_____no_output_____
###Markdown
We will get rid of the punctuation marks, and then convert the text to lower case.
###Code
# Remove punctuation
deu_eng[:,0] = [s.translate(str.maketrans('', '', string.punctuation)) for s in deu_eng[:,0]]
deu_eng[:,1] = [s.translate(str.maketrans('', '', string.punctuation)) for s in deu_eng[:,1]]
deu_eng
# convert to lowercase
for i in range(len(deu_eng)):
deu_eng[i,0] = deu_eng[i,0].lower()
deu_eng[i,1] = deu_eng[i,1].lower()
deu_eng
###Output
_____no_output_____
###Markdown
Text to Sequence ConversionTo feed our data in a Seq2Seq model, we will have to convert both the input and the output sentences into integer sequences of fixed length. Before that, let's visualise the length of the sentences. We will capture the lengths of all the sentences in two separate lists for English and German, respectively.
###Code
# empty lists
eng_l = []
deu_l = []
# populate the lists with sentence lengths
for i in deu_eng[:,0]:
eng_l.append(len(i.split()))
for i in deu_eng[:,1]:
deu_l.append(len(i.split()))
length_df = pd.DataFrame({'eng':eng_l, 'deu':deu_l})
length_df.hist(bins = 30)
plt.show()
###Output
_____no_output_____
###Markdown
The maximum length of the German sentences is 11 and that of the English phrases is 8. Let's vectorize our text data by using Keras's Tokenizer() class. It will turn our sentences into sequences of integers. Then we will pad those sequences with zeros to make all the sequences of same length.
###Code
# function to build a tokenizer
def tokenization(lines):
tokenizer = Tokenizer()
tokenizer.fit_on_texts(lines)
return tokenizer
# prepare english tokenizer
eng_tokenizer = tokenization(deu_eng[:, 0])
eng_vocab_size = len(eng_tokenizer.word_index) + 1
eng_length = 8
print('English Vocabulary Size: %d' % eng_vocab_size)
# prepare Deutch tokenizer
deu_tokenizer = tokenization(deu_eng[:, 1])
deu_vocab_size = len(deu_tokenizer.word_index) + 1
deu_length = 8
print('Deutch Vocabulary Size: %d' % deu_vocab_size)
###Output
Deutch Vocabulary Size: 10998
###Markdown
Given below is a function to prepare the sequences. It will also perform sequence padding to a maximum sentence length as mentioned above.
###Code
# encode and pad sequences
def encode_sequences(tokenizer, length, lines):
# integer encode sequences
seq = tokenizer.texts_to_sequences(lines)
# pad sequences with 0 values
seq = pad_sequences(seq, maxlen=length, padding='post')
return seq
###Output
_____no_output_____
###Markdown
Model Building We will now split the data into train and test set for model training and evaluation, respectively.
###Code
from sklearn.model_selection import train_test_split
train, test = train_test_split(deu_eng, test_size=0.2, random_state = 12)
###Output
_____no_output_____
###Markdown
It's time to encode the sentences. We will encode German sentences as the input sequences and English sentences as the target sequences. It will be done for both train and test datasets.
###Code
# prepare training data
trainX = encode_sequences(deu_tokenizer, deu_length, train[:, 1])
trainY = encode_sequences(eng_tokenizer, eng_length, train[:, 0])
# prepare validation data
testX = encode_sequences(deu_tokenizer, deu_length, test[:, 1])
testY = encode_sequences(eng_tokenizer, eng_length, test[:, 0])
###Output
_____no_output_____
###Markdown
Now comes the exciting part! Let us define our Seq2Seq model architecture. We are using an Embedding layer and an LSTM layer as our encoder and another LSTM layer followed by a Dense layer as the decoder.
###Code
# build NMT model
def build_model(in_vocab, out_vocab, in_timesteps, out_timesteps, units):
model = Sequential()
model.add(Embedding(in_vocab, units, input_length=in_timesteps, mask_zero=True))
model.add(LSTM(units))
model.add(RepeatVector(out_timesteps))
model.add(LSTM(units, return_sequences=True))
model.add(Dense(out_vocab, activation='softmax'))
return model
###Output
_____no_output_____
###Markdown
We are using RMSprop optimizer in this model as it is usually a good choice for recurrent neural networks.
###Code
model = build_model(deu_vocab_size, eng_vocab_size, deu_length, eng_length, 512)
rms = optimizers.RMSprop(lr=0.001)
model.compile(optimizer=rms, loss='sparse_categorical_crossentropy')
###Output
_____no_output_____
###Markdown
Please note that we have used __'sparse_categorical_crossentropy'__ as the loss function because it allows us to use the target sequence as it is instead of one hot encoded format. One hot encoding the target sequences with such a huge vocabulary might consume our system's entire memory. It seems we are all set to start training our model. We will train it for 30 epochs and with a batch size of 512. You may change and play these hyperparameters. We will also be using __ModelCheckpoint()__ to save the best model with lowest validation loss. I personally prefer this method over early stopping.
###Code
filename = 'model.h1.24_jan_19'
checkpoint = ModelCheckpoint(filename, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
history = model.fit(trainX, trainY.reshape(trainY.shape[0], trainY.shape[1], 1),
epochs=30, batch_size=512,
validation_split = 0.2,
callbacks=[checkpoint], verbose=1)
###Output
Train on 32000 samples, validate on 8000 samples
Epoch 1/30
32000/32000 [==============================] - 70s 2ms/step - loss: 3.5693 - val_loss: 3.2825
Epoch 00001: val_loss improved from inf to 3.28246, saving model to model.h1.24_jan_19
Epoch 2/30
32000/32000 [==============================] - 69s 2ms/step - loss: 2.9459 - val_loss: 2.8888
Epoch 00002: val_loss improved from 3.28246 to 2.88876, saving model to model.h1.24_jan_19
Epoch 3/30
32000/32000 [==============================] - 70s 2ms/step - loss: 2.7409 - val_loss: 2.7025
Epoch 00003: val_loss improved from 2.88876 to 2.70251, saving model to model.h1.24_jan_19
Epoch 4/30
32000/32000 [==============================] - 69s 2ms/step - loss: 2.5648 - val_loss: 2.5602
Epoch 00004: val_loss improved from 2.70251 to 2.56023, saving model to model.h1.24_jan_19
Epoch 5/30
32000/32000 [==============================] - 69s 2ms/step - loss: 2.4135 - val_loss: 2.4447
Epoch 00005: val_loss improved from 2.56023 to 2.44467, saving model to model.h1.24_jan_19
Epoch 6/30
32000/32000 [==============================] - 69s 2ms/step - loss: 2.2460 - val_loss: 2.3151
Epoch 00006: val_loss improved from 2.44467 to 2.31508, saving model to model.h1.24_jan_19
Epoch 7/30
32000/32000 [==============================] - 69s 2ms/step - loss: 2.0894 - val_loss: 2.2352
Epoch 00007: val_loss improved from 2.31508 to 2.23518, saving model to model.h1.24_jan_19
Epoch 8/30
32000/32000 [==============================] - 69s 2ms/step - loss: 1.9484 - val_loss: 2.0836
Epoch 00008: val_loss improved from 2.23518 to 2.08361, saving model to model.h1.24_jan_19
Epoch 9/30
32000/32000 [==============================] - 69s 2ms/step - loss: 1.8160 - val_loss: 2.0163
Epoch 00009: val_loss improved from 2.08361 to 2.01626, saving model to model.h1.24_jan_19
Epoch 10/30
32000/32000 [==============================] - 69s 2ms/step - loss: 1.6913 - val_loss: 1.9196
Epoch 00010: val_loss improved from 2.01626 to 1.91964, saving model to model.h1.24_jan_19
Epoch 11/30
32000/32000 [==============================] - 69s 2ms/step - loss: 1.5775 - val_loss: 1.8510
Epoch 00011: val_loss improved from 1.91964 to 1.85097, saving model to model.h1.24_jan_19
Epoch 12/30
32000/32000 [==============================] - 69s 2ms/step - loss: 1.4687 - val_loss: 1.7856
Epoch 00012: val_loss improved from 1.85097 to 1.78561, saving model to model.h1.24_jan_19
Epoch 13/30
32000/32000 [==============================] - 69s 2ms/step - loss: 1.3675 - val_loss: 1.7459
Epoch 00013: val_loss improved from 1.78561 to 1.74589, saving model to model.h1.24_jan_19
Epoch 14/30
32000/32000 [==============================] - 69s 2ms/step - loss: 1.2760 - val_loss: 1.6869
Epoch 00014: val_loss improved from 1.74589 to 1.68692, saving model to model.h1.24_jan_19
Epoch 15/30
32000/32000 [==============================] - 69s 2ms/step - loss: 1.1830 - val_loss: 1.6406
Epoch 00015: val_loss improved from 1.68692 to 1.64064, saving model to model.h1.24_jan_19
Epoch 16/30
32000/32000 [==============================] - 69s 2ms/step - loss: 1.0996 - val_loss: 1.6035
Epoch 00016: val_loss improved from 1.64064 to 1.60352, saving model to model.h1.24_jan_19
Epoch 17/30
32000/32000 [==============================] - 69s 2ms/step - loss: 1.0209 - val_loss: 1.5809
Epoch 00017: val_loss improved from 1.60352 to 1.58090, saving model to model.h1.24_jan_19
Epoch 18/30
32000/32000 [==============================] - 70s 2ms/step - loss: 0.9443 - val_loss: 1.5471
Epoch 00018: val_loss improved from 1.58090 to 1.54706, saving model to model.h1.24_jan_19
Epoch 19/30
32000/32000 [==============================] - 69s 2ms/step - loss: 0.8773 - val_loss: 1.5202
Epoch 00019: val_loss improved from 1.54706 to 1.52018, saving model to model.h1.24_jan_19
Epoch 20/30
32000/32000 [==============================] - 69s 2ms/step - loss: 0.8082 - val_loss: 1.5076
Epoch 00020: val_loss improved from 1.52018 to 1.50758, saving model to model.h1.24_jan_19
Epoch 21/30
32000/32000 [==============================] - 69s 2ms/step - loss: 0.7446 - val_loss: 1.4867
Epoch 00021: val_loss improved from 1.50758 to 1.48668, saving model to model.h1.24_jan_19
Epoch 22/30
32000/32000 [==============================] - 71s 2ms/step - loss: 0.6871 - val_loss: 1.4644
Epoch 00022: val_loss improved from 1.48668 to 1.46445, saving model to model.h1.24_jan_19
Epoch 23/30
32000/32000 [==============================] - 69s 2ms/step - loss: 0.6328 - val_loss: 1.4643
Epoch 00023: val_loss improved from 1.46445 to 1.46427, saving model to model.h1.24_jan_19
Epoch 24/30
32000/32000 [==============================] - 69s 2ms/step - loss: 0.5786 - val_loss: 1.4563
Epoch 00024: val_loss improved from 1.46427 to 1.45626, saving model to model.h1.24_jan_19
Epoch 25/30
32000/32000 [==============================] - 69s 2ms/step - loss: 0.5304 - val_loss: 1.4339
Epoch 00025: val_loss improved from 1.45626 to 1.43388, saving model to model.h1.24_jan_19
Epoch 26/30
32000/32000 [==============================] - 70s 2ms/step - loss: 0.4859 - val_loss: 1.4391
Epoch 00026: val_loss did not improve from 1.43388
Epoch 27/30
32000/32000 [==============================] - 69s 2ms/step - loss: 0.4417 - val_loss: 1.4290
Epoch 00027: val_loss improved from 1.43388 to 1.42898, saving model to model.h1.24_jan_19
Epoch 28/30
32000/32000 [==============================] - 69s 2ms/step - loss: 0.4047 - val_loss: 1.4241
Epoch 00028: val_loss improved from 1.42898 to 1.42413, saving model to model.h1.24_jan_19
Epoch 29/30
32000/32000 [==============================] - 70s 2ms/step - loss: 0.3650 - val_loss: 1.4649
Epoch 00029: val_loss did not improve from 1.42413
Epoch 30/30
32000/32000 [==============================] - 69s 2ms/step - loss: 0.3314 - val_loss: 1.4229
Epoch 00030: val_loss improved from 1.42413 to 1.42288, saving model to model.h1.24_jan_19
###Markdown
Let's compare the training loss and the validation loss.
###Code
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.legend(['train','validation'])
plt.show()
###Output
_____no_output_____
###Markdown
Make Predictions Let's load the saved model to make predictions.
###Code
model = load_model('model.h1.24_jan_19')
preds = model.predict_classes(testX.reshape((testX.shape[0],testX.shape[1])))
def get_word(n, tokenizer):
for word, index in tokenizer.word_index.items():
if index == n:
return word
return None
# convert predictions into text (English)
preds_text = []
for i in preds:
temp = []
for j in range(len(i)):
t = get_word(i[j], eng_tokenizer)
if j > 0:
if (t == get_word(i[j-1], eng_tokenizer)) or (t == None):
temp.append('')
else:
temp.append(t)
else:
if(t == None):
temp.append('')
else:
temp.append(t)
preds_text.append(' '.join(temp))
pred_df = pd.DataFrame({'actual' : test[:,0], 'predicted' : preds_text})
pd.set_option('display.max_colwidth', 200)
pred_df.head(15)
pred_df.tail(15)
pred_df.tail(15)
pred_df.sample(15)
###Output
_____no_output_____
###Markdown
Import Required Libraries
###Code
import string
import re
from numpy import array, argmax, random, take
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense, LSTM, Embedding, Bidirectional, RepeatVector, TimeDistributed
from keras.preprocessing.text import Tokenizer
from keras.callbacks import ModelCheckpoint
from keras.preprocessing.sequence import pad_sequences
from keras.models import load_model
from keras import optimizers
import matplotlib.pyplot as plt
% matplotlib inline
pd.set_option('display.max_colwidth', 200)
###Output
Using TensorFlow backend.
###Markdown
Read Data Our data is a text file of English-German sentence pairs. First we will read the file using the function defined below.
###Code
# function to read raw text file
def read_text(filename):
# open the file
file = open(filename, mode='rt', encoding='utf-8')
# read all text
text = file.read()
file.close()
return text
###Output
_____no_output_____
###Markdown
Now let's define a function to split the text into English-German pairs separated by '\n' and then split these pairs into English sentences and German sentences.
###Code
# split a text into sentences
def to_lines(text):
sents = text.strip().split('\n')
sents = [i.split('\t') for i in sents]
return sents
###Output
_____no_output_____
###Markdown
__Download the data from [here.](http://www.manythings.org/anki/deu-eng.zip)__ and extract "deu.txt" in your working directory.
###Code
data = read_text("deu.txt")
deu_eng = to_lines(data)
deu_eng = array(deu_eng)
###Output
_____no_output_____
###Markdown
The actual data contains over 150,000 sentence-pairs. However, we will use the first 50,000 sentence pairs only to reduce the training time of the model. You can change this number as per you system computation power.
###Code
deu_eng = deu_eng[:50000,:]
###Output
_____no_output_____
###Markdown
Text Pre-Processing Text CleaningLet's take a look at our data, then we will decide which pre-processing steps to adopt.
###Code
deu_eng
###Output
_____no_output_____
###Markdown
We will get rid of the punctuation marks, and then convert the text to lower case.
###Code
# Remove punctuation
deu_eng[:,0] = [s.translate(str.maketrans('', '', string.punctuation)) for s in deu_eng[:,0]]
deu_eng[:,1] = [s.translate(str.maketrans('', '', string.punctuation)) for s in deu_eng[:,1]]
deu_eng
# convert to lowercase
for i in range(len(deu_eng)):
deu_eng[i,0] = deu_eng[i,0].lower()
deu_eng[i,1] = deu_eng[i,1].lower()
deu_eng
###Output
_____no_output_____
###Markdown
Text to Sequence ConversionTo feed our data in a Seq2Seq model, we will have to convert both the input and the output sentences into integer sequences of fixed length. Before that, let's visualise the length of the sentences. We will capture the lengths of all the sentences in two separate lists for English and German, respectively.
###Code
# empty lists
eng_l = []
deu_l = []
# populate the lists with sentence lengths
for i in deu_eng[:,0]:
eng_l.append(len(i.split()))
for i in deu_eng[:,1]:
deu_l.append(len(i.split()))
length_df = pd.DataFrame({'eng':eng_l, 'deu':deu_l})
length_df.hist(bins = 30)
plt.show()
###Output
_____no_output_____
###Markdown
The maximum length of the German sentences is 11 and that of the English phrases is 8. Let's vectorize our text data by using Keras's Tokenizer() class. It will turn our sentences into sequences of integers. Then we will pad those sequences with zeros to make all the sequences of same length.
###Code
# function to build a tokenizer
def tokenization(lines):
tokenizer = Tokenizer()
tokenizer.fit_on_texts(lines)
return tokenizer
# prepare english tokenizer
eng_tokenizer = tokenization(deu_eng[:, 0])
eng_vocab_size = len(eng_tokenizer.word_index) + 1
eng_length = 8
print('English Vocabulary Size: %d' % eng_vocab_size)
# prepare Deutch tokenizer
deu_tokenizer = tokenization(deu_eng[:, 1])
deu_vocab_size = len(deu_tokenizer.word_index) + 1
deu_length = 8
print('Deutch Vocabulary Size: %d' % deu_vocab_size)
###Output
Deutch Vocabulary Size: 10998
###Markdown
Given below is a function to prepare the sequences. It will also perform sequence padding to a maximum sentence length as mentioned above.
###Code
# encode and pad sequences
def encode_sequences(tokenizer, length, lines):
# integer encode sequences
seq = tokenizer.texts_to_sequences(lines)
# pad sequences with 0 values
seq = pad_sequences(seq, maxlen=length, padding='post')
return seq
###Output
_____no_output_____
###Markdown
Model Building We will now split the data into train and test set for model training and evaluation, respectively.
###Code
from sklearn.model_selection import train_test_split
train, test = train_test_split(deu_eng, test_size=0.2, random_state = 12)
###Output
_____no_output_____
###Markdown
It's time to encode the sentences. We will encode German sentences as the input sequences and English sentences as the target sequences. It will be done for both train and test datasets.
###Code
# prepare training data
trainX = encode_sequences(deu_tokenizer, deu_length, train[:, 1])
trainY = encode_sequences(eng_tokenizer, eng_length, train[:, 0])
# prepare validation data
testX = encode_sequences(deu_tokenizer, deu_length, test[:, 1])
testY = encode_sequences(eng_tokenizer, eng_length, test[:, 0])
###Output
_____no_output_____
###Markdown
Now comes the exciting part! Let us define our Seq2Seq model architecture. We are using an Embedding layer and an LSTM layer as our encoder and another LSTM layer followed by a Dense layer as the decoder.
###Code
# build NMT model
def build_model(in_vocab, out_vocab, in_timesteps, out_timesteps, units):
model = Sequential()
model.add(Embedding(in_vocab, units, input_length=in_timesteps, mask_zero=True))
model.add(LSTM(units))
model.add(RepeatVector(out_timesteps))
model.add(LSTM(units, return_sequences=True))
model.add(Dense(out_vocab, activation='softmax'))
return model
###Output
_____no_output_____
###Markdown
We are using RMSprop optimizer in this model as it is usually a good choice for recurrent neural networks.
###Code
model = build_model(deu_vocab_size, eng_vocab_size, deu_length, eng_length, 512)
rms = optimizers.RMSprop(lr=0.001)
model.compile(optimizer=rms, loss='sparse_categorical_crossentropy')
###Output
_____no_output_____
###Markdown
Please note that we have used __'sparse_categorical_crossentropy'__ as the loss function because it allows us to use the target sequence as it is instead of one hot encoded format. One hot encoding the target sequences with such a huge vocabulary might consume our system's entire memory. It seems we are all set to start training our model. We will train it for 30 epochs and with a batch size of 512. You may change and play these hyperparameters. We will also be using __ModelCheckpoint()__ to save the best model with lowest validation loss. I personally prefer this method over early stopping.
###Code
filename = 'model.h1.24_jan_19'
checkpoint = ModelCheckpoint(filename, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
history = model.fit(trainX, trainY.reshape(trainY.shape[0], trainY.shape[1], 1),
epochs=30, batch_size=512,
validation_split = 0.2,
callbacks=[checkpoint], verbose=1)
###Output
Train on 32000 samples, validate on 8000 samples
Epoch 1/30
32000/32000 [==============================] - 70s 2ms/step - loss: 3.5693 - val_loss: 3.2825
Epoch 00001: val_loss improved from inf to 3.28246, saving model to model.h1.24_jan_19
Epoch 2/30
32000/32000 [==============================] - 69s 2ms/step - loss: 2.9459 - val_loss: 2.8888
Epoch 00002: val_loss improved from 3.28246 to 2.88876, saving model to model.h1.24_jan_19
Epoch 3/30
32000/32000 [==============================] - 70s 2ms/step - loss: 2.7409 - val_loss: 2.7025
Epoch 00003: val_loss improved from 2.88876 to 2.70251, saving model to model.h1.24_jan_19
Epoch 4/30
32000/32000 [==============================] - 69s 2ms/step - loss: 2.5648 - val_loss: 2.5602
Epoch 00004: val_loss improved from 2.70251 to 2.56023, saving model to model.h1.24_jan_19
Epoch 5/30
32000/32000 [==============================] - 69s 2ms/step - loss: 2.4135 - val_loss: 2.4447
Epoch 00005: val_loss improved from 2.56023 to 2.44467, saving model to model.h1.24_jan_19
Epoch 6/30
32000/32000 [==============================] - 69s 2ms/step - loss: 2.2460 - val_loss: 2.3151
Epoch 00006: val_loss improved from 2.44467 to 2.31508, saving model to model.h1.24_jan_19
Epoch 7/30
32000/32000 [==============================] - 69s 2ms/step - loss: 2.0894 - val_loss: 2.2352
Epoch 00007: val_loss improved from 2.31508 to 2.23518, saving model to model.h1.24_jan_19
Epoch 8/30
32000/32000 [==============================] - 69s 2ms/step - loss: 1.9484 - val_loss: 2.0836
Epoch 00008: val_loss improved from 2.23518 to 2.08361, saving model to model.h1.24_jan_19
Epoch 9/30
32000/32000 [==============================] - 69s 2ms/step - loss: 1.8160 - val_loss: 2.0163
Epoch 00009: val_loss improved from 2.08361 to 2.01626, saving model to model.h1.24_jan_19
Epoch 10/30
32000/32000 [==============================] - 69s 2ms/step - loss: 1.6913 - val_loss: 1.9196
Epoch 00010: val_loss improved from 2.01626 to 1.91964, saving model to model.h1.24_jan_19
Epoch 11/30
32000/32000 [==============================] - 69s 2ms/step - loss: 1.5775 - val_loss: 1.8510
Epoch 00011: val_loss improved from 1.91964 to 1.85097, saving model to model.h1.24_jan_19
Epoch 12/30
32000/32000 [==============================] - 69s 2ms/step - loss: 1.4687 - val_loss: 1.7856
Epoch 00012: val_loss improved from 1.85097 to 1.78561, saving model to model.h1.24_jan_19
Epoch 13/30
32000/32000 [==============================] - 69s 2ms/step - loss: 1.3675 - val_loss: 1.7459
Epoch 00013: val_loss improved from 1.78561 to 1.74589, saving model to model.h1.24_jan_19
Epoch 14/30
32000/32000 [==============================] - 69s 2ms/step - loss: 1.2760 - val_loss: 1.6869
Epoch 00014: val_loss improved from 1.74589 to 1.68692, saving model to model.h1.24_jan_19
Epoch 15/30
32000/32000 [==============================] - 69s 2ms/step - loss: 1.1830 - val_loss: 1.6406
Epoch 00015: val_loss improved from 1.68692 to 1.64064, saving model to model.h1.24_jan_19
Epoch 16/30
32000/32000 [==============================] - 69s 2ms/step - loss: 1.0996 - val_loss: 1.6035
Epoch 00016: val_loss improved from 1.64064 to 1.60352, saving model to model.h1.24_jan_19
Epoch 17/30
32000/32000 [==============================] - 69s 2ms/step - loss: 1.0209 - val_loss: 1.5809
Epoch 00017: val_loss improved from 1.60352 to 1.58090, saving model to model.h1.24_jan_19
Epoch 18/30
32000/32000 [==============================] - 70s 2ms/step - loss: 0.9443 - val_loss: 1.5471
Epoch 00018: val_loss improved from 1.58090 to 1.54706, saving model to model.h1.24_jan_19
Epoch 19/30
32000/32000 [==============================] - 69s 2ms/step - loss: 0.8773 - val_loss: 1.5202
Epoch 00019: val_loss improved from 1.54706 to 1.52018, saving model to model.h1.24_jan_19
Epoch 20/30
32000/32000 [==============================] - 69s 2ms/step - loss: 0.8082 - val_loss: 1.5076
Epoch 00020: val_loss improved from 1.52018 to 1.50758, saving model to model.h1.24_jan_19
Epoch 21/30
32000/32000 [==============================] - 69s 2ms/step - loss: 0.7446 - val_loss: 1.4867
Epoch 00021: val_loss improved from 1.50758 to 1.48668, saving model to model.h1.24_jan_19
Epoch 22/30
32000/32000 [==============================] - 71s 2ms/step - loss: 0.6871 - val_loss: 1.4644
Epoch 00022: val_loss improved from 1.48668 to 1.46445, saving model to model.h1.24_jan_19
Epoch 23/30
32000/32000 [==============================] - 69s 2ms/step - loss: 0.6328 - val_loss: 1.4643
Epoch 00023: val_loss improved from 1.46445 to 1.46427, saving model to model.h1.24_jan_19
Epoch 24/30
32000/32000 [==============================] - 69s 2ms/step - loss: 0.5786 - val_loss: 1.4563
Epoch 00024: val_loss improved from 1.46427 to 1.45626, saving model to model.h1.24_jan_19
Epoch 25/30
32000/32000 [==============================] - 69s 2ms/step - loss: 0.5304 - val_loss: 1.4339
Epoch 00025: val_loss improved from 1.45626 to 1.43388, saving model to model.h1.24_jan_19
Epoch 26/30
32000/32000 [==============================] - 70s 2ms/step - loss: 0.4859 - val_loss: 1.4391
Epoch 00026: val_loss did not improve from 1.43388
Epoch 27/30
32000/32000 [==============================] - 69s 2ms/step - loss: 0.4417 - val_loss: 1.4290
Epoch 00027: val_loss improved from 1.43388 to 1.42898, saving model to model.h1.24_jan_19
Epoch 28/30
32000/32000 [==============================] - 69s 2ms/step - loss: 0.4047 - val_loss: 1.4241
Epoch 00028: val_loss improved from 1.42898 to 1.42413, saving model to model.h1.24_jan_19
Epoch 29/30
32000/32000 [==============================] - 70s 2ms/step - loss: 0.3650 - val_loss: 1.4649
Epoch 00029: val_loss did not improve from 1.42413
Epoch 30/30
32000/32000 [==============================] - 69s 2ms/step - loss: 0.3314 - val_loss: 1.4229
Epoch 00030: val_loss improved from 1.42413 to 1.42288, saving model to model.h1.24_jan_19
###Markdown
Let's compare the training loss and the validation loss.
###Code
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.legend(['train','validation'])
plt.show()
###Output
_____no_output_____
###Markdown
Make Predictions Let's load the saved model to make predictions.
###Code
model = load_model('model.h1.24_jan_19')
preds = model.predict_classes(testX.reshape((testX.shape[0],testX.shape[1])))
def get_word(n, tokenizer):
for word, index in tokenizer.word_index.items():
if index == n:
return word
return None
# convert predictions into text (English)
preds_text = []
for i in preds:
temp = []
for j in range(len(i)):
t = get_word(i[j], eng_tokenizer)
if j > 0:
if (t == get_word(i[j-1], eng_tokenizer)) or (t == None):
temp.append('')
else:
temp.append(t)
else:
if(t == None):
temp.append('')
else:
temp.append(t)
preds_text.append(' '.join(temp))
pred_df = pd.DataFrame({'actual' : test[:,0], 'predicted' : preds_text})
pd.set_option('display.max_colwidth', 200)
pred_df.head(15)
pred_df.tail(15)
pred_df.tail(15)
pred_df.sample(15)
###Output
_____no_output_____ |
muscle_QoreSDK_v2.ipynb | ###Markdown
必要なライブラリを読み込む
###Code
from qore_sdk.client import WebQoreClient
from qore_sdk.featurizer import Featurizer
import qore_sdk.utils
from sklearn import model_selection
from sklearn.metrics import accuracy_score, f1_score
from sklearn.neural_network import MLPClassifier
from sklearn.linear_model import LogisticRegression
import time
import numpy as np
import os
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
データの読み込み
###Code
def load_xyz(str_dir):
with open(os.path.join(str_dir, 'x.txt'), 'r') as f:
x = np.loadtxt(f, delimiter=',', usecols=1)
with open(os.path.join(str_dir, 'y.txt'), 'r') as f:
y = np.loadtxt(f, delimiter=',', usecols=1)
with open(os.path.join(str_dir, 'z.txt'), 'r') as f:
z = np.loadtxt(f, delimiter=',', usecols=1)
return np.stack([x, y, z], 1) # 2D-array
list_data = [
'./data/control',
'./data/udetate',
'./data/hukkin',
'./data/squat',
'./data/roller']
list_X = []
list_y = []
j_label = 0 # incremental label for each data
for i_data in list_data:
print('loading: ' + i_data)
array_loaded = load_xyz(i_data)
array_label = np.repeat(j_label, array_loaded.shape[0])
j_label += 1
plt.figure()
plt.plot(array_loaded)
# 時系列を複数の小時系列に分割する。
# https://qcore-info.github.io/advent-calendar-2019/index.html#qore_sdk.utils.sliding_window
print(array_loaded.shape)
X, y = qore_sdk.utils.sliding_window(array_loaded, width=100, stepsize=1, axis=0, y=array_label, y_def='mode')
print(X.shape, y.shape)
list_X.append(X)
list_y.append(y)
X_all = np.concatenate(list_X, 0)
y_all = np.concatenate(list_y, 0)
###Output
loading: ./data/control
(1742, 3)
(1643, 100, 3) (1643, 1)
loading: ./data/udetate
(398, 3)
(299, 100, 3) (299, 1)
loading: ./data/hukkin
(661, 3)
(562, 100, 3) (562, 1)
loading: ./data/squat
(506, 3)
(407, 100, 3) (407, 1)
loading: ./data/roller
(551, 3)
(452, 100, 3) (452, 1)
###Markdown
n_samples_per_classでクラス当たりのサンプル数を揃える。https://qcore-info.github.io/advent-calendar-2019/index.htmlqore_sdk.utils.under_sample
###Code
_, counts = np.unique(y_all, return_counts=True)
print(counts)
# サンプル数が一番少ないデータの数に合わせる
X, y = qore_sdk.utils.under_sample(X_all, y_all.flatten(), n_samples_per_class=counts.min())
_, counts = np.unique(y, return_counts=True)
print(counts)
###Output
[1643 299 562 407 452]
[299 299 299 299 299]
###Markdown
QoreSDKのFeaturizerを使って特徴抽出
###Code
n_filters = 40
featurizer = Featurizer(n_filters)
X = featurizer.featurize(X, axis=2)
print('X.shape:', X.shape)
###Output
X.shape: (1495, 100, 40)
###Markdown
学習データとテストデータに分割
###Code
X_train, X_test, y_train, y_test = model_selection.train_test_split(
X, y, test_size=0.2, random_state=1
)
print(X_train.shape)
print(y_train.shape)
print(X_test.shape)
print(y_test.shape)
###Output
(1196, 100, 40)
(1196,)
(299, 100, 40)
(299,)
###Markdown
アカウント情報の入力実際のアカウント情報を載せることはできないので、インタラクティブに入力するようにした事前に発行されたユーザーネーム、パスワード、Endpointが必要 詳しくは[Advent Calenderの公式Github](https://github.com/qcore-info/advent-calendar-2019)を参照
###Code
import getpass
username = getpass.getpass(prompt='username: ', stream=None)
password = getpass.getpass(prompt='password: ', stream=None)
endpoint = getpass.getpass(prompt='endpoint: ', stream=None)
# authentication
client = WebQoreClient(username=username,
password=password,
endpoint=endpoint)
###Output
_____no_output_____
###Markdown
学習を行う
###Code
# サンプル数が多すぎると 505 bad gateway エラーになるので、必要であれば、データ数を減らす
# qoresdk の制限: N*T*V < 150,000 && N*T < 10,000
# 制限は、厳密ではないようであるが、これぐらいに抑えないとエラーになる
n_samples_per_class = 200
X_train, y_train = qore_sdk.utils.under_sample(X_train, y_train.flatten(), n_samples_per_class=n_samples_per_class)
start = time.time()
res = client.classifier_train(X=X_train, Y=y_train)
print(res)
###Output
{'res': 'ok', 'train_time': 6.712144613265991}
###Markdown
`classifier_test`を用いると、精度が簡単に求められて便利
###Code
res = client.classifier_test(X=X_test, Y=y_test)
print(res)
###Output
{'accuracy': 1.0, 'f1': 1.0, 'res': 'ok'}
###Markdown
最後には推論もしてみる
###Code
res = client.classifier_predict(X=X_test)
print("acc=", accuracy_score(y_test.tolist(), res["Y"]))
print("f1=", f1_score(y_test.tolist(), res["Y"], average="weighted"))
elapsed_time = time.time() - start
print("elapsed_time:{0}".format(elapsed_time) + "[sec]")
print(res['Y'])
###Output
acc= 1.0
f1= 1.0
elapsed_time:58.683502197265625[sec]
[1, 1, 4, 3, 4, 1, 0, 3, 3, 4, 1, 4, 2, 3, 3, 4, 0, 4, 3, 3, 2, 4, 1, 4, 2, 0, 0, 2, 4, 3, 2, 3, 1, 1, 3, 3, 0, 2, 0, 3, 0, 3, 1, 1, 2, 0, 1, 3, 3, 0, 4, 0, 1, 1, 3, 2, 1, 1, 4, 0, 1, 1, 4, 2, 2, 1, 2, 4, 2, 1, 1, 3, 0, 0, 2, 3, 0, 0, 2, 0, 1, 1, 0, 1, 0, 4, 4, 1, 0, 1, 2, 0, 3, 0, 3, 3, 0, 2, 4, 2, 2, 3, 4, 1, 0, 1, 3, 2, 3, 2, 4, 1, 1, 0, 1, 4, 1, 2, 2, 4, 2, 4, 4, 1, 3, 0, 0, 3, 1, 2, 0, 2, 4, 0, 3, 3, 2, 4, 2, 4, 2, 0, 4, 2, 4, 0, 3, 3, 0, 0, 4, 4, 1, 2, 4, 0, 4, 3, 4, 1, 0, 4, 0, 4, 0, 2, 2, 2, 3, 3, 1, 2, 1, 0, 0, 2, 1, 3, 1, 0, 3, 1, 0, 0, 4, 0, 4, 4, 1, 1, 3, 3, 0, 1, 2, 4, 2, 4, 1, 2, 4, 0, 4, 4, 0, 4, 2, 0, 4, 2, 1, 1, 0, 0, 1, 2, 0, 1, 4, 4, 3, 0, 2, 2, 2, 3, 3, 1, 2, 3, 3, 0, 1, 2, 3, 3, 1, 3, 2, 2, 0, 0, 0, 4, 2, 3, 2, 2, 0, 0, 3, 4, 2, 4, 0, 2, 0, 0, 0, 3, 4, 2, 3, 0, 2, 1, 1, 2, 3, 0, 4, 1, 1, 3, 2, 2, 4, 1, 4, 0, 0, 0, 1, 3, 0, 3, 4, 0, 1, 1, 3, 1, 4, 3, 1, 3, 3, 1, 4]
###Markdown
参考単純な線形回帰、簡単な深層学習と比較する
###Code
X_train = X_train.reshape(len(X_train), -1).astype(np.float64)
X_test = X_test.reshape(len(X_test), -1).astype(np.float64)
y_train = np.ravel(y_train)
y_test = np.ravel(y_test)
print("===LogisticRegression(Using Sklearn)===")
start = time.time()
lr_cls = LogisticRegression(C=9.0)
lr_cls.fit(X_train, y_train)
elapsed_time = time.time() - start
print("elapsed_time:{0}".format(elapsed_time) + "[sec]")
res = lr_cls.predict(X=X_test)
print("acc=", accuracy_score(y_test.tolist(), res))
print("f1=", f1_score(y_test.tolist(), res, average="weighted"))
print("===MLP(Using Sklearn)===")
start = time.time()
mlp_cls = MLPClassifier(hidden_layer_sizes=(100, 100, 100, 10))
mlp_cls.fit(X_train, y_train)
elapsed_time = time.time() - start
print("elapsed_time:{0}".format(elapsed_time) + "[sec]")
res = mlp_cls.predict(X=X_test)
print("acc=", accuracy_score(y_test.tolist(), res))
print("f1=", f1_score(y_test.tolist(), res, average="weighted"))
###Output
===LogisticRegression(Using Sklearn)===
###Markdown
新規取得テストデータで検証 腕立て 12 回の後、少し休憩(ほんとんど動作なし)を行ったデータに対して推論
###Code
# 新規取得データの読み込み
list_data = [
'./data/test01_udetate',
]
list_X = []
list_y = []
# j_label = 0 # incremental label for each data
for i_data in list_data:
print('loading: ' + i_data)
array_loaded = load_xyz(i_data)
array_label = np.repeat(j_label, array_loaded.shape[0])
j_label += 1
plt.figure()
plt.plot(array_loaded)
# 時系列を複数の小時系列に分割する。
# https://qcore-info.github.io/advent-calendar-2019/index.html#qore_sdk.utils.sliding_window
print(array_loaded.shape)
# X, y = qore_sdk.utils.sliding_window(array_loaded, width=100, stepsize=1, axis=0, y=array_label, y_def='mode')
X = qore_sdk.utils.sliding_window(array_loaded, width=100, stepsize=1, axis=0)
# print(X.shape, y.shape)
list_X.append(X)
# list_y.append(y)
X_all = np.concatenate(list_X, 0)
# y_all = np.concatenate(list_y, 0)
# 検証データの Featurizer による特徴抽出
print(X_all.shape)
n_filters = 40
featurizer = Featurizer(n_filters)
X = featurizer.featurize(X_all, axis=2)
print('X.shape:', X.shape)
# 推論
res = client.classifier_predict(X=X)
# print("acc=", accuracy_score(y_test.tolist(), res["Y"]))
# print("f1=", f1_score(y_test.tolist(), res["Y"], average="weighted"))
elapsed_time = time.time() - start
print("elapsed_time:{0}".format(elapsed_time) + "[sec]")
print(res['Y'])
# 推論結果の可視化
plt.figure()
plt.plot(res['Y'])
###Output
_____no_output_____
###Markdown
データラベル0 --> 筋トレしていない1 --> 腕立て2 --> 腹筋3 --> スクワット4 --> 腹筋ローラーデータ数 200 あたりで腕立てを終えて、休憩している。 腕立てをしている間は、ほぼ正解だが、休憩中は当たっていない。これは、学習に利用した、筋トレしていない状態のデータに問題があると思われる。以下、改善案- 筋トレしていない状態のデータに、もっと様々な状況のデータを追加する- 静止、休止状態のクラスを追加する 新規取得した腹筋ローラーデータに対して推論
###Code
# 新規取得データの読み込み
list_data = [
'./data/test02_roller',
]
list_X = []
list_y = []
j_label = 4 # incremental label for each data
for i_data in list_data:
print('loading: ' + i_data)
array_loaded = load_xyz(i_data)
array_label = np.repeat(j_label, array_loaded.shape[0])
# j_label += 1
plt.figure()
plt.plot(array_loaded)
# 時系列を複数の小時系列に分割する。
# https://qcore-info.github.io/advent-calendar-2019/index.html#qore_sdk.utils.sliding_window
print(array_loaded.shape)
X, y = qore_sdk.utils.sliding_window(array_loaded, width=100, stepsize=1, axis=0, y=array_label, y_def='mode')
# X = qore_sdk.utils.sliding_window(array_loaded, width=100, stepsize=1, axis=0)
print(X.shape, y.shape)
list_X.append(X)
list_y.append(y)
X_all = np.concatenate(list_X, 0)
y_all = np.concatenate(list_y, 0)
# 検証データの Featurizer による特徴抽出
print(X_all.shape)
n_filters = 40
featurizer = Featurizer(n_filters)
X = featurizer.featurize(X_all, axis=2)
print('X.shape:', X.shape)
# 推論
res = client.classifier_predict(X=X)
print("acc=", accuracy_score(y_all.tolist(), res["Y"]))
print("f1=", f1_score(y_all.tolist(), res["Y"], average="weighted"))
elapsed_time = time.time() - start
print("elapsed_time:{0}".format(elapsed_time) + "[sec]")
print(res['Y'])
# 推論結果の可視化
plt.figure()
plt.plot(res['Y'])
X_test = X.reshape(len(X), -1).astype(np.float64)
y_test = np.ravel(y_all)
print("===LogisticRegression(Using Sklearn)===")
res = lr_cls.predict(X=X_test)
print("acc=", accuracy_score(y_test.tolist(), res))
print("f1=", f1_score(y_test.tolist(), res, average="weighted"))
print("===MLP(Using Sklearn)===")
res = mlp_cls.predict(X=X_test)
print("acc=", accuracy_score(y_test.tolist(), res))
print("f1=", f1_score(y_test.tolist(), res, average="weighted"))
###Output
===LogisticRegression(Using Sklearn)===
acc= 0.20094562647754138
f1= 0.3346456692913386
===MLP(Using Sklearn)===
acc= 0.27423167848699764
f1= 0.43042671614100186
|
Traffic_Sign_Classifier.1.ipynb | ###Markdown
Self-Driving Car Engineer Nanodegree Deep Learning Project: Build a Traffic Sign Recognition ClassifierIn this notebook, a template is provided for you to implement your functionality in stages, which is required to successfully complete this project. If additional code is required that cannot be included in the notebook, be sure that the Python code is successfully imported and included in your submission if necessary. > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the iPython Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to \n", "**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission. In addition to implementing code, there is a writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-Traffic-Sign-Classifier-Project/blob/master/writeup_template.md) that can be used to guide the writing process. Completing the code template and writeup template will cover all of the [rubric points](https://review.udacity.com/!/rubrics/481/view) for this project.The [rubric](https://review.udacity.com/!/rubrics/481/view) contains "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. The stand out suggestions are optional. If you decide to pursue the "stand out suggestions", you can include the code in this Ipython notebook and also discuss the results in the writeup file.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode. --- Step 0: Load The Data
###Code
# Load pickled data
import pickle
# TODO: Fill this in based on where you saved the training and testing data
training_file = "./traffic-signs-data/train.p"
validation_file= "./traffic-signs-data/valid.p"
testing_file = "./traffic-signs-data/test.p"
with open(training_file, mode='rb') as f:
train = pickle.load(f)
with open(validation_file, mode='rb') as f:
valid = pickle.load(f)
with open(testing_file, mode='rb') as f:
test = pickle.load(f)
X_train, y_train = train['features'], train['labels']
X_valid, y_valid = valid['features'], valid['labels']
X_test, y_test = test['features'], test['labels']
# Print the size of datasets
print("X_train shape:", X_train.shape)
print("y_train shape:", y_train.shape)
print("X_valid shape:", X_valid.shape)
print("y_valid shape:", y_valid.shape)
print("X_test shape:", X_test.shape)
print("y_test shape:", y_test.shape)
###Output
X_train shape: (34799, 32, 32, 3)
y_train shape: (34799,)
X_valid shape: (4410, 32, 32, 3)
y_valid shape: (4410,)
X_test shape: (12630, 32, 32, 3)
y_test shape: (12630,)
###Markdown
--- Step 1: Dataset Summary & ExplorationThe pickled data is a dictionary with 4 key/value pairs:- `'features'` is a 4D array containing raw pixel data of the traffic sign images, (num examples, width, height, channels).- `'labels'` is a 1D array containing the label/class id of the traffic sign. The file `signnames.csv` contains id -> name mappings for each id.- `'sizes'` is a list containing tuples, (width, height) representing the original width and height the image.- `'coords'` is a list containing tuples, (x1, y1, x2, y2) representing coordinates of a bounding box around the sign in the image. **THESE COORDINATES ASSUME THE ORIGINAL IMAGE. THE PICKLED DATA CONTAINS RESIZED VERSIONS (32 by 32) OF THESE IMAGES**Complete the basic data summary below. Use python, numpy and/or pandas methods to calculate the data summary rather than hard coding the results. For example, the [pandas shape method](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.shape.html) might be useful for calculating some of the summary results. Provide a Basic Summary of the Data Set Using Python, Numpy and/or Pandas
###Code
### Replace each question mark with the appropriate value.
### Use python, pandas or numpy methods rather than hard coding the results
import numpy as np
# TODO: Number of training examples
n_train = len(X_train)
# TODO: Number of validation examples
n_validation = len(X_valid)
# TODO: Number of testing examples.
n_test = len(X_test)
# TODO: What's the shape of an traffic sign image?
image_shape = X_train[0].shape
print(y_train, y_train.shape)
# TODO: How many unique classes/labels there are in the dataset.
n_classes = len(np.unique(y_train))
print("Number of training examples =", n_train)
print("Number of validation examples", n_validation)
print("Number of testing examples =", n_test)
print("Image data shape =", image_shape)
print("Number of classes =", n_classes)
###Output
[41 41 41 ... 25 25 25] (34799,)
Number of training examples = 34799
Number of validation examples 4410
Number of testing examples = 12630
Image data shape = (32, 32, 3)
Number of classes = 43
###Markdown
Include an exploratory visualization of the dataset Visualize the German Traffic Signs Dataset using the pickled file(s). This is open ended, suggestions include: plotting traffic sign images, plotting the count of each sign, etc. The [Matplotlib](http://matplotlib.org/) [examples](http://matplotlib.org/examples/index.html) and [gallery](http://matplotlib.org/gallery.html) pages are a great resource for doing visualizations in Python.**NOTE:** It's recommended you start with something simple first. If you wish to do more, come back to it after you've completed the rest of the sections. It can be interesting to look at the distribution of classes in the training, validation and test set. Is the distribution the same? Are there more examples of some classes than others?
###Code
### Data exploration visualization code goes here.
### Feel free to use as many code cells as needed.
import matplotlib.pyplot as plt
# Visualizations will be shown in the notebook.
%matplotlib inline
import random
# 4 images to be shown
fig, axs = plt.subplots(1, 4, figsize=(15, 6))
fig.subplots_adjust(hspace = 1, wspace=.01)
axs = axs.ravel()
for i in range(4):
index = random.randint(0, len(X_train))
image = X_train[index]
axs[i].axis('off')
axs[i].imshow(image)
axs[i].set_title(y_train[index])
# Label frequency histogram
histogram, bins = np.histogram(y_train, bins = n_classes)
width = .5 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.bar(center, histogram, align='center', width=width)
plt.show()
###Output
_____no_output_____
###Markdown
---- Step 2: Design and Test a Model ArchitectureDesign and implement a deep learning model that learns to recognize traffic signs. Train and test your model on the [German Traffic Sign Dataset](http://benchmark.ini.rub.de/?section=gtsrb&subsection=dataset).The LeNet-5 implementation shown in the [classroom](https://classroom.udacity.com/nanodegrees/nd013/parts/fbf77062-5703-404e-b60c-95b78b2f3f9e/modules/6df7ae49-c61c-4bb2-a23e-6527e69209ec/lessons/601ae704-1035-4287-8b11-e2c2716217ad/concepts/d4aca031-508f-4e0b-b493-e7b706120f81) at the end of the CNN lesson is a solid starting point. You'll have to change the number of classes and possibly the preprocessing, but aside from that it's plug and play! With the LeNet-5 solution from the lecture, you should expect a validation set accuracy of about 0.89. To meet specifications, the validation set accuracy will need to be at least 0.93. It is possible to get an even higher accuracy, but 0.93 is the minimum for a successful project submission. There are various aspects to consider when thinking about this problem:- Neural network architecture (is the network over or underfitting?)- Play around preprocessing techniques (normalization, rgb to grayscale, etc)- Number of examples per label (some have more than others).- Generate fake data.Here is an example of a [published baseline model on this problem](http://yann.lecun.com/exdb/publis/pdf/sermanet-ijcnn-11.pdf). It's not required to be familiar with the approach used in the paper but, it's good practice to try to read papers like these. Pre-process the Data Set (normalization, grayscale, etc.) Minimally, the image data should be normalized so that the data has mean zero and equal variance. For image data, `(pixel - 128)/ 128` is a quick way to approximately normalize the data and can be used in this project. Other pre-processing steps are optional. You can try different techniques to see if it improves performance. Use the code cell (or multiple code cells, if necessary) to implement the first step of your project.
###Code
### Preprocess the data here. It is required to normalize the data. Other preprocessing steps could include
### converting to grayscale, etc.
### Feel free to use as many code cells as needed.
X_train_rgb = X_train
X_train_gry = np.sum(X_train/3, axis=3, keepdims=True)
X_valid_rgb = X_valid
X_valid_gry = np.sum(X_valid/3, axis=3, keepdims=True)
X_test_rgb = X_test
X_test_gry = np.sum(X_test/3, axis=3, keepdims=True)
print('RGB: ', X_train_rgb.shape)
print('Grayscale: ', X_train_gry.shape)
X_train = X_train_gry
X_valid = X_valid_gry
X_test = X_test_gry
# Visualization
n_rows = 2
n_cols = 4
offset = 1000
fig, axs = plt.subplots(n_rows, n_cols, figsize=(18, 14))
fig.subplots_adjust(hspace = 0.01, wspace = 0.01)
axs = axs.ravel()
for j in range(0, n_rows, 2):
for i in range(n_cols):
index = i + j * n_cols
image = X_train_rgb[index + offset]
axs[index].axis('off')
axs[index].imshow(image)
for i in range(n_cols):
index = i + j * n_cols + n_cols
image = X_train_gry[index + offset - n_cols].squeeze()
axs[index].axis('off')
axs[index].imshow(image, cmap='gray')
###Output
_____no_output_____
###Markdown
Model Architecture
###Code
### Define your architecture here.
### Feel free to use as many code cells as needed.
# Setup Tensorflow
import tensorflow as tf
EPOCHS = 10
BATCH_SIZE = 50
# Implementing LeNet Architecture
from tensorflow.contrib.layers import flatten
def LeNet(x):
# Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer
mu = 0
sigma = 0.1
# TODO: Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x6.
conv1_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 1, 6), mean = mu, stddev = sigma))
conv1_b = tf.Variable(tf.zeros(6))
conv1 = tf.nn.conv2d(x, conv1_W, strides=[1, 1, 1, 1], padding='VALID') + conv1_b
# TODO: Activation.
conv1 = tf.nn.relu(conv1)
# TODO: Pooling. Input = 28x28x6. Output = 14x14x6.
conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# TODO: Layer 2: Convolutional. Output = 10x10x16.
conv2_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16), mean = mu, stddev = sigma))
conv2_b = tf.Variable(tf.zeros(16))
conv2 = tf.nn.conv2d(conv1, conv2_W, strides=[1, 1, 1, 1], padding='VALID') + conv2_b
# TODO: Activation.
conv2 = tf.nn.relu(conv2)
# TODO: Pooling. Input = 10x10x16. Output = 5x5x16.
conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# TODO: Flatten. Input = 5x5x16. Output = 400.
fc0 = flatten(conv2)
# TODO: Layer 3: Fully Connected. Input = 400. Output = 120.
fc1_W = tf.Variable(tf.truncated_normal(shape=(400, 120), mean = mu, stddev = sigma))
fc1_b = tf.Variable(tf.zeros(120))
fc1 = tf.matmul(fc0, fc1_W) + fc1_b
# TODO: Activation.
fc1 = tf.nn.relu(fc1)
# TODO: Layer 4: Fully Connected. Input = 120. Output = 84.
fc2_W = tf.Variable(tf.truncated_normal(shape=(120, 84), mean = mu, stddev = sigma))
fc2_b = tf.Variable(tf.zeros(84))
fc2 = tf.matmul(fc1, fc2_W) + fc2_b
# TODO: Activation.
fc2 = tf.nn.relu(fc2)
# TODO: Layer 5: Fully Connected. Input = 84. Output = 10.
fc3_W = tf.Variable(tf.truncated_normal(shape=(84, 10), mean = mu, stddev = sigma))
fc3_b = tf.Variable(tf.zeros(10))
logits = tf.matmul(fc2, fc3_W) + fc3_b
return logits
tf.reset_default_graph()
x = tf.placeholder(tf.float32, (None, 32, 32, 1))
y = tf.placeholder(tf.int32, (None))
keep_prob = tf.placeholder(tf.float32)
one_hot_y = tf.one_hot(y, 43)
###Output
_____no_output_____
###Markdown
Train, Validate and Test the Model A validation set can be used to assess how well the model is performing. A low accuracy on the training and validationsets imply underfitting. A high accuracy on the training set but low accuracy on the validation set implies overfitting.
###Code
### Train your model here.
### Calculate and report the accuracy on the training and validation set.
### Once a final model architecture is selected,
### the accuracy on the test set should be calculated and reported as well.
### Feel free to use as many code cells as needed.
rate = 0.0009
logits = LeNet(x)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, one_hot_y)
loss_operation = tf.reduce_meain(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate = rate)
training_operation = operation.minimize(loss_operation)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.saver()
def evaluate_data(X_data, y_data):
num_examples = len(X_data)
total_accuracy = 0
sess = tf.get_default_session()
for offset in range(0, num_examples, BATCH_SIZE):
batch_x, batch_y = X_data[offset: offset + BATCH_SIZE], y_data[offset: offset + BATCH_size]
accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y, keep_prob: 1.0})
total_accuracy += (accuracy * len(batch_x))
return total_accuracy / num_examples
print('done')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
num_examples = len(X_train)
for i in range(EPOCHS):
X_train, y_train = shuffle(X_train, y_train)
for offset in range(0, num_examples, BATCH_SIZE):
end = offset + BATCH_SIZE
batch_x, batch_y = X_train[offset: end], y_train[offset: end]
sess.run(training_operation, feed_dict={x: batch_x, y: batch_y, keep_prob: 0.5})
validation_accuracy = evaluate(X_validation, y_validation)
saver.save(sess, 'lenet')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver2 = tf.train.import_meta_graph('./lenet.meta')
saver2.restore(sess, "./lenet")
test_accuracy = evaluate(X_test_normalized, y_test)
###Output
_____no_output_____
###Markdown
--- Step 3: Test a Model on New ImagesTo give yourself more insight into how your model is working, download at least five pictures of German traffic signs from the web and use your model to predict the traffic sign type.You may find `signnames.csv` useful as it contains mappings from the class id (integer) to the actual sign name. Load and Output the Images
###Code
### Load the images and plot them here.
### Feel free to use as many code cells as needed.
import matplotlib.pyplot as plt
%matplotlib inline
import tensorflow as tf
import numpy as np
import cv2
import glob
import matplotlib.image as mpimg
fig, axs = plt.subplots(2, 4, figsize=(4, 2))
fig.subplots_adjust(hspace = 0.2, wspace = 0.001)
axs = axs.ravel()
my_images = []
for i, img in enumerate(glob.glob('./some/*x.png')):
image = cv2.imread(img)
axs[i].axis('off')
axs[i].imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
my_images.append(image)
my_images = np.asarray(my_images)
my_images_gry = np.sum(my_images/3, axis=3, keepdims=True)
my_images_normalized = (my_images_gry - 128)/128
###Output
_____no_output_____
###Markdown
Predict the Sign Type for Each Image
###Code
### Run the predictions here and use the model to output the prediction for each image.
### Make sure to pre-process the images with the same pre-processing pipeline used earlier.
### Feel free to use as many code cells as needed.
###Output
_____no_output_____
###Markdown
Analyze Performance
###Code
### Calculate the accuracy for these 5 new images.
### For example, if the model predicted 1 out of 5 signs correctly, it's 20% accurate on these new images.
###Output
_____no_output_____
###Markdown
Output Top 5 Softmax Probabilities For Each Image Found on the Web For each of the new images, print out the model's softmax probabilities to show the **certainty** of the model's predictions (limit the output to the top 5 probabilities for each image). [`tf.nn.top_k`](https://www.tensorflow.org/versions/r0.12/api_docs/python/nn.htmltop_k) could prove helpful here. The example below demonstrates how tf.nn.top_k can be used to find the top k predictions for each image.`tf.nn.top_k` will return the values and indices (class ids) of the top k predictions. So if k=3, for each sign, it'll return the 3 largest probabilities (out of a possible 43) and the correspoding class ids.Take this numpy array as an example. The values in the array represent predictions. The array contains softmax probabilities for five candidate images with six possible classes. `tf.nn.top_k` is used to choose the three classes with the highest probability:``` (5, 6) arraya = np.array([[ 0.24879643, 0.07032244, 0.12641572, 0.34763842, 0.07893497, 0.12789202], [ 0.28086119, 0.27569815, 0.08594638, 0.0178669 , 0.18063401, 0.15899337], [ 0.26076848, 0.23664738, 0.08020603, 0.07001922, 0.1134371 , 0.23892179], [ 0.11943333, 0.29198961, 0.02605103, 0.26234032, 0.1351348 , 0.16505091], [ 0.09561176, 0.34396535, 0.0643941 , 0.16240774, 0.24206137, 0.09155967]])```Running it through `sess.run(tf.nn.top_k(tf.constant(a), k=3))` produces:```TopKV2(values=array([[ 0.34763842, 0.24879643, 0.12789202], [ 0.28086119, 0.27569815, 0.18063401], [ 0.26076848, 0.23892179, 0.23664738], [ 0.29198961, 0.26234032, 0.16505091], [ 0.34396535, 0.24206137, 0.16240774]]), indices=array([[3, 0, 5], [0, 1, 4], [0, 5, 1], [1, 3, 5], [1, 4, 3]], dtype=int32))```Looking just at the first row we get `[ 0.34763842, 0.24879643, 0.12789202]`, you can confirm these are the 3 largest probabilities in `a`. You'll also notice `[3, 0, 5]` are the corresponding indices.
###Code
### Print out the top five softmax probabilities for the predictions on the German traffic sign images found on the web.
### Feel free to use as many code cells as needed.
###Output
_____no_output_____
###Markdown
Project WriteupOnce you have completed the code implementation, document your results in a project writeup using this [template](https://github.com/udacity/CarND-Traffic-Sign-Classifier-Project/blob/master/writeup_template.md) as a guide. The writeup can be in a markdown or pdf file. > **Note**: Once you have completed all of the code implementations and successfully answered each question above, you may finalize your work by exporting the iPython Notebook as an HTML document. You can do this by using the menu above and navigating to \n", "**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission. --- Step 4 (Optional): Visualize the Neural Network's State with Test Images This Section is not required to complete but acts as an additional excersise for understaning the output of a neural network's weights. While neural networks can be a great learning device they are often referred to as a black box. We can understand what the weights of a neural network look like better by plotting their feature maps. After successfully training your neural network you can see what it's feature maps look like by plotting the output of the network's weight layers in response to a test stimuli image. From these plotted feature maps, it's possible to see what characteristics of an image the network finds interesting. For a sign, maybe the inner network feature maps react with high activation to the sign's boundary outline or to the contrast in the sign's painted symbol. Provided for you below is the function code that allows you to get the visualization output of any tensorflow weight layer you want. The inputs to the function should be a stimuli image, one used during training or a new one you provided, and then the tensorflow variable name that represents the layer's state during the training process, for instance if you wanted to see what the [LeNet lab's](https://classroom.udacity.com/nanodegrees/nd013/parts/fbf77062-5703-404e-b60c-95b78b2f3f9e/modules/6df7ae49-c61c-4bb2-a23e-6527e69209ec/lessons/601ae704-1035-4287-8b11-e2c2716217ad/concepts/d4aca031-508f-4e0b-b493-e7b706120f81) feature maps looked like for it's second convolutional layer you could enter conv2 as the tf_activation variable.For an example of what feature map outputs look like, check out NVIDIA's results in their paper [End-to-End Deep Learning for Self-Driving Cars](https://devblogs.nvidia.com/parallelforall/deep-learning-self-driving-cars/) in the section Visualization of internal CNN State. NVIDIA was able to show that their network's inner weights had high activations to road boundary lines by comparing feature maps from an image with a clear path to one without. Try experimenting with a similar test to show that your trained network's weights are looking for interesting features, whether it's looking at differences in feature maps from images with or without a sign, or even what feature maps look like in a trained network vs a completely untrained one on the same sign image. Your output should look something like this (above)
###Code
### Visualize your network's feature maps here.
### Feel free to use as many code cells as needed.
# image_input: the test image being fed into the network to produce the feature maps
# tf_activation: should be a tf variable name used during your training procedure that represents the calculated state of a specific weight layer
# activation_min/max: can be used to view the activation contrast in more detail, by default matplot sets min and max to the actual min and max values of the output
# plt_num: used to plot out multiple different weight feature map sets on the same block, just extend the plt number for each new feature map entry
def outputFeatureMap(image_input, tf_activation, activation_min=-1, activation_max=-1 ,plt_num=1):
# Here make sure to preprocess your image_input in a way your network expects
# with size, normalization, ect if needed
# image_input =
# Note: x should be the same name as your network's tensorflow data placeholder variable
# If you get an error tf_activation is not defined it may be having trouble accessing the variable from inside a function
activation = tf_activation.eval(session=sess,feed_dict={x : image_input})
featuremaps = activation.shape[3]
plt.figure(plt_num, figsize=(15,15))
for featuremap in range(featuremaps):
plt.subplot(6,8, featuremap+1) # sets the number of feature maps to show on each row and column
plt.title('FeatureMap ' + str(featuremap)) # displays the feature map number
if activation_min != -1 & activation_max != -1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin =activation_min, vmax=activation_max, cmap="gray")
elif activation_max != -1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmax=activation_max, cmap="gray")
elif activation_min !=-1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin=activation_min, cmap="gray")
else:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", cmap="gray")
###Output
_____no_output_____ |
Sales_Conversion_Optimization.ipynb | ###Markdown
Sales conversion optimization Data: https://www.kaggle.com/loveall/clicks-conversion-tracking
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
df= pd.read_csv("KAG_conversion_data.csv")
df.head()
df.describe()
df.info()
#Dummy encode any categorical or object values in the data and save the resulting data frame to variable X.
X=pd.get_dummies(data=df,drop_first=True)
X
###Output
_____no_output_____
###Markdown
Using a heat map to show the correlation in the dataa. Drop the first 4 columns in the data frame X.b. Basing your answer on what can be seen in the heat map, why did we drop these columns?
###Code
#Drop the first 4 columns in the data frame X.
X.drop(X.iloc[:, 0:4], inplace = True, axis = 1)
X.head()
#Showing correlation in the data using a heatmap and commenting why we dropped the columns above
sns.heatmap(df[["Impressions","Clicks","Spent","Total_Conversion","Approved_Conversion"]].corr(),annot=True,cmap="YlGnBu");
###Output
_____no_output_____
###Markdown
Using the elbow method:a. Determine the best number of clusters for the data in the range of 2 to 20.b. Also include the graphical plot for the elbow curve.
###Code
from sklearn.cluster import KMeans
import seaborn as sns
sum_of_sq_dist = {}
for k in range(2,20):
km = KMeans(n_clusters= k, init= 'k-means++', max_iter= 1000)
km = km.fit(X)
sum_of_sq_dist[k] = km.inertia_
#Plot the graph for the sum of square distance values and Number of Clusters
sns.pointplot(x = list(sum_of_sq_dist.keys()), y = list(sum_of_sq_dist.values()))
plt.xlabel('Number of Clusters(k)')
plt.ylabel('Sum of Square Distances')
plt.title('Elbow Method For Optimal k')
plt.show()
# Based on the result above in 4b use the value at your elbow point to cluster the values in the data frame X.
KMean_clust = KMeans(n_clusters= 4, init= 'k-means++', max_iter= 1000)
KMean_clust.fit(X)
#visualizing the clusters
sns.set(style ='darkgrid')
plt.scatter(X[KMean_clust==2,2], X[KMean_clust==2,3], s= 100, c= 'red')
###Output
_____no_output_____
###Markdown
Building KMeans model with K=4 (Training and Predicting)Use the model to predict the labels from the data and save them to variable y_means
###Code
# Instantiating
kmeans4 = KMeans(n_clusters = 4)
# Training the model
kmeans4.fit(X)
# predicting
y_means = kmeans4.fit_predict(X)
print(y_pred)
# Storing the y_pred values in a new column
df['Advert_Type'] = y_means+1 #to start the cluster number from 1
df.head()
###Output
_____no_output_____
###Markdown
Using any form of distribution plot of your choice and the original data frame, plot 2 graphs that can be used to answer the following:a. Which advert type lead to the highest and consistent amount of sales by customers of all the age brackets?b. Does the company xyz have gender bias in terms of their ad spending? Are their products gender neutral?
###Code
df.groupby(['xyz_campaign_id']).sum().plot(kind='pie', y='Approved_Conversion',figsize=(15,10), autopct='%1.1f%%');
df.groupby(['gender']).sum().plot(kind='pie', y='Approved_Conversion',figsize=(15,10), autopct='%1.1f%%');
###Output
_____no_output_____
###Markdown
Hierarchical clustering
###Code
import scipy.cluster.hierarchy as sch
dendrogram = sch.dendrogram(sch.linkage(X, method = 'ward'))
plt.title('Dendrogam', fontsize = 20)
plt.xlabel('Customers')
plt.ylabel('Ecuclidean Distance')
plt.show()
###Output
_____no_output_____ |
recipes/Hyperparameters/HyperBand/HyperBand.ipynb | ###Markdown
HyperBand IntroductionThis example shows how to perform HyperBand parametric sweeping using CNTK with MNIST dataset to train a convolutional neural network (CNN) on a GPU cluster. Details- We provide a CNTK example [ConvMNIST.py](../ConvMNIST.py) to accept command line arguments for CNTK dataset, model locations, model file suffix and two hyperparameters for tuning: 1. hidden layer dimension and 2. feedforward constant - The implementation of HyperBand algorithm is adopted from the article [*Hyperband: A Novel Bandit-Based Approach to Hyperparameter Optimization*](https://people.eecs.berkeley.edu/~kjamieson/hyperband.html)- For demonstration purposes, MNIST dataset and CNTK training script will be deployed at Azure File Share;- Standard output of the job and the model will be stored on Azure File Share;- MNIST dataset (http://yann.lecun.com/exdb/mnist/) has been preprocessed by usign install_mnist.py available [here](https://batchaisamples.blob.core.windows.net/samples/mnist_dataset.zip?st=2017-09-29T18%3A29%3A00Z&se=2099-12-31T08%3A00%3A00Z&sp=rl&sv=2016-05-31&sr=c&sig=PmhL%2BYnYAyNTZr1DM2JySvrI12e%2F4wZNIwCtf7TRI%2BM%3D). Instructions Install Dependencies and Create Configuration file.Follow [instructions](/recipes) to install all dependencies and create configuration file. Read Configuration and Create Batch AI client
###Code
from __future__ import print_function
import sys
import logging
import numpy as np
import azure.mgmt.batchai.models as models
from azure.storage.blob import BlockBlobService
from azure.storage.file import FileService
sys.path.append('../../..')
import utilities as utils
from utilities.job_factory import ParameterSweep, NumParamSpec, DiscreteParamSpec
cfg = utils.config.Configuration('../../configuration.json')
client = utils.config.create_batchai_client(cfg)
###Output
_____no_output_____
###Markdown
Create Resoruce Group and Batch AI workspace if not exists:
###Code
utils.config.create_resource_group(cfg)
_ = client.workspaces.create(cfg.resource_group, cfg.workspace, cfg.location).result()
###Output
_____no_output_____
###Markdown
1. Prepare Training Dataset and Script in Azure Storage Create Azure Blob ContainerWe will create a new Blob Container with name `batchaisample` under your storage account. This will be used to store the *input training dataset***Note** You don't need to create new blob Container for every cluster. We are doing this in this sample to simplify resource management for you.
###Code
azure_blob_container_name = 'batchaisample'
blob_service = BlockBlobService(cfg.storage_account_name, cfg.storage_account_key)
blob_service.create_container(azure_blob_container_name, fail_on_exist=False)
###Output
_____no_output_____
###Markdown
Upload MNIST Dataset to Azure Blob ContainerFor demonstration purposes, we will download preprocessed MNIST dataset to the current directory and upload it to Azure Blob Container directory named `mnist_dataset`.There are multiple ways to create folders and upload files into Azure Blob Container - you can use [Azure Portal](https://ms.portal.azure.com), [Storage Explorer](http://storageexplorer.com/), [Azure CLI2](/azure-cli-extension) or Azure SDK for your preferable programming language.In this example we will use Azure SDK for python to copy files into Blob.
###Code
mnist_dataset_directory = 'mnist_dataset'
utils.dataset.download_and_upload_mnist_dataset_to_blob(
blob_service, azure_blob_container_name, mnist_dataset_directory)
###Output
_____no_output_____
###Markdown
Create Azure File ShareFor this example we will create a new File Share with name `batchaisample` under your storage account. This will be used to share the *training script file* and *output file*.**Note** You don't need to create new file share for every cluster. We are doing this in this sample to simplify resource management for you.
###Code
azure_file_share_name = 'batchaisample'
file_service = FileService(cfg.storage_account_name, cfg.storage_account_key)
file_service.create_share(azure_file_share_name, fail_on_exist=False)
###Output
_____no_output_____
###Markdown
Upload the training script [ConvMNIST.py](../ConvMNIST.py) to file share directory named `hyperparam_samples`.
###Code
cntk_script_path = "hyperparam_samples"
file_service.create_directory(
azure_file_share_name, cntk_script_path, fail_on_exist=False)
file_service.create_file_from_path(
azure_file_share_name, cntk_script_path, 'ConvMNIST.py', '../ConvMNIST.py')
###Output
_____no_output_____
###Markdown
2. Create Azure Batch AI Compute Cluster Configure Compute Cluster- For this example we will use a GPU cluster of `STANDARD_NC6` nodes. Number of nodes in the cluster is configured with `nodes_count` variable;- We will call the cluster `nc6`;So, the cluster will have the following parameters:
###Code
nodes_count = 4
cluster_name = 'nc6'
parameters = models.ClusterCreateParameters(
location=cfg.location,
vm_size='STANDARD_NC6',
scale_settings=models.ScaleSettings(
manual=models.ManualScaleSettings(target_node_count=nodes_count)
),
user_account_settings=models.UserAccountSettings(
admin_user_name=cfg.admin,
admin_user_password=cfg.admin_password or None,
admin_user_ssh_public_key=cfg.admin_ssh_key or None,
)
)
###Output
_____no_output_____
###Markdown
Create Compute Cluster
###Code
_ = client.clusters.create(cfg.resource_group, cfg.workspace, cluster_name, parameters).result()
###Output
_____no_output_____
###Markdown
Monitor Cluster CreationMonitor the just created cluster. The `utilities` module contains a helper function to print out detail status of the cluster.
###Code
cluster = client.clusters.get(cfg.resource_group, cfg.workspace, cluster_name)
utils.cluster.print_cluster_status(cluster)
###Output
_____no_output_____
###Markdown
3. Hyperparameter tuning using HyperBand Define specifications for the hyperparameters
###Code
param_specs = [
NumParamSpec(
parameter_name="FEEDFORWARD_CONSTANT",
data_type="REAL",
start=0.001,
end=10,
scale="LOG"
),
DiscreteParamSpec(
parameter_name="HIDDEN_LAYERS_DIMENSION",
values=[100, 200, 300]
)
]
###Output
_____no_output_____
###Markdown
Create a parameter substitution object.
###Code
parameters = ParameterSweep(param_specs)
###Output
_____no_output_____
###Markdown
Generate *num_trials* random hyper-parameter configuration and corresponding index We will use the parameter substitution object to specify where we would like to substitute the parameters. We substitutethe values for feedforward constant and hidden layers dimension into `models.JobCreateParameters.cntk_settings.command_line_args`. Note that the `parameters` variable is used like a dict, with the `parameter_name` being used as the key to specify which parameter to substitute. When `parameters.generate_jobs` is called, the `parameters[name]` variables will be replaced with actual values.
###Code
azure_file_share_mount_path = 'afs'
azure_blob_mount_path = 'bfs'
jcp = models.JobCreateParameters(
cluster=models.ResourceId(id=cluster.id),
node_count=1,
std_out_err_path_prefix='$AZ_BATCHAI_JOB_MOUNT_ROOT/{0}'.format(azure_file_share_mount_path),
input_directories = [
models.InputDirectory(
id='SCRIPT',
path='$AZ_BATCHAI_JOB_MOUNT_ROOT/{0}/{1}'.format(azure_blob_mount_path, mnist_dataset_directory))
],
output_directories = [
models.OutputDirectory(
id='ALL',
path_prefix='$AZ_BATCHAI_JOB_MOUNT_ROOT/{0}'.format(azure_file_share_mount_path))],
mount_volumes = models.MountVolumes(
azure_file_shares=[
models.AzureFileShareReference(
account_name=cfg.storage_account_name,
credentials=models.AzureStorageCredentialsInfo(
account_key=cfg.storage_account_key),
azure_file_url='https://{0}.file.core.windows.net/{1}'.format(
cfg.storage_account_name, azure_file_share_name),
relative_mount_path=azure_file_share_mount_path)
],
azure_blob_file_systems=[
models.AzureBlobFileSystemReference(
account_name=cfg.storage_account_name,
credentials=models.AzureStorageCredentialsInfo(
account_key=cfg.storage_account_key),
container_name=azure_blob_container_name,
relative_mount_path=azure_blob_mount_path)
]
),
container_settings=models.ContainerSettings(
image_source_registry=models.ImageSourceRegistry(image='microsoft/cntk:2.5.1-gpu-python2.7-cuda9.0-cudnn7.0')
),
cntk_settings=models.CNTKsettings(
python_script_file_path='$AZ_BATCHAI_JOB_MOUNT_ROOT/{0}/{1}/ConvMNIST.py'.format(azure_file_share_mount_path, cntk_script_path),
command_line_args='--feedforward_const {0} --hidden_layers_dim {1} --epochs $PARAM_EPOCHS --datadir $AZ_BATCHAI_INPUT_SCRIPT --outputdir $AZ_BATCHAI_OUTPUT_ALL --logdir $AZ_BATCHAI_OUTPUT_ALL'
.format(parameters['FEEDFORWARD_CONSTANT'],
parameters['HIDDEN_LAYERS_DIMENSION']) # Substitute hyperparameters
)
)
###Output
_____no_output_____
###Markdown
Create a new experiment.
###Code
experiment_name = 'hyperband_experiment'
experiment = client.experiments.create(cfg.resource_group, cfg.workspace, experiment_name).result()
experiment_utils = utils.experiment.ExperimentUtils(client, cfg.resource_group, cfg.workspace, experiment_name)
###Output
_____no_output_____
###Markdown
We define the following metric extractor to extract desired metric from learning log file. - In this example, we extract the number between "metric =" and "%".
###Code
metric_extractor = utils.job.MetricExtractor(
output_dir_id='ALL',
logfile='progress.log',
regex='metric =(.*?)\%')
###Output
_____no_output_____
###Markdown
Define the number of configurations and generate these jobs.
###Code
num_configs = 16
jobs_to_submit = parameters.generate_jobs_random_search(jcp, num_configs)
# Add environment variable for changing number of epochs per iteration
for job in jobs_to_submit:
job.environment_variables.append(models.EnvironmentVariable(
name='PARAM_EPOCHS',
value=None
))
###Output
_____no_output_____
###Markdown
Before proceed to the following steps, please be sure you have already read the artile [*Hyperband: A Novel Bandit-Based Approach to Hyperparameter Optimization*](https://people.eecs.berkeley.edu/~kjamieson/hyperband.html)We define the following notation of parameters for HyperBand:- ***max_iter***: maximum iterations/epochs per configuration- ***eta***: downsampling rate- ***s_max***: number of unique executions of Successive Halving (minus one)- ***B***: total number of iterations (without reuse) per execution of Succesive Halving (n,r)- ***n***: initial number of configurations- ***r***: initial number of iterations to run configurations for
###Code
max_iter = num_configs
eta = 4
logeta = lambda x: np.log(x)/np.log(eta)
s_max = int(logeta(max_iter))
B = (s_max+1)*max_iter
n = int(np.ceil(B/max_iter/(s_max+1)*eta**s_max))
r = max_iter*eta**(-s_max)
###Output
_____no_output_____
###Markdown
- The following loop describes the early-stopping procedure that considers multiple configurations in parallel and terminates poor performing configurations leaving more resources for more promising configurations. - Note that, for illustration purpose, below implemenntation is a simplified version of the HyperBand algorithm where outler-loop used for hedging was omitted. A full implementation of HyperBand will be provided soon.- ***n_i*** and ***r_i***denote number of remaining configurations and number of epochs to run at given iteration - For each configuration, we generate specific job creation parameters with given configuration and number of epochs. A new thread is started per new job that submits and monitors the job. Once job completes, the final *metric* is extracted and returned from log file
###Code
for i in range(s_max+1):
n_i = int(n*eta**(-i))
r_i = int(r*eta**(i))
print("******** Round #{0} ******** ".format(str(i+1)))
# Add number of epochs to JobCreateParameters
for job in jobs_to_submit:
for ev in job.environment_variables:
if ev.name == 'PARAM_EPOCHS':
ev.value = str(r_i)
# Submit the jobs to the experiment
jobs = experiment_utils.submit_jobs(jobs_to_submit, 'mnist_hyperband').result()
# Wait for the jobs to finish running
experiment_utils.wait_all_jobs()
# Get the results and sort by metric value
results = experiment_utils.get_metrics_for_jobs(jobs, metric_extractor)
results.sort(key=lambda res: res['metric_value'])
for result in results:
print("Job {0} completed with metric value {1}".format(result['job_name'], result['metric_value']))
# Get the N best jobs and submit them again the next iteration
num_jobs_to_submit = int(n_i/eta)
jobs_to_submit = [utils.job.convert_job_to_jcp(res['job'], client) for res in results[0:num_jobs_to_submit]]
#### End Finite Horizon Successive Halving with (n,r)
###Output
_____no_output_____
###Markdown
4. Clean Up (Optional) Delete the ExperimentDelete the experiment and jobs inside it
###Code
_ = client.experiments.delete(cfg.resource_group, cfg.workspace, experiment_name).result()
###Output
_____no_output_____
###Markdown
Delete the ClusterWhen you are finished with the sample and don't want to submit any more jobs you can delete the cluster using the following code.
###Code
client.clusters.delete(cfg.resource_group, cfg.workspace, cluster_name).result()
###Output
_____no_output_____
###Markdown
Delete File ShareWhen you are finished with the sample and don't want to submit any more jobs you can delete the file share completely with all files using the following code.
###Code
service = FileService(cfg.storage_account_name, cfg.storage_account_key)
service.delete_share(azure_file_share_name)
###Output
_____no_output_____ |
Projects/World_ranking_Universities/Plotly_WIDS.ipynb | ###Markdown
PLOTLY LIBRARY Plotly is a Python graphing library. It makes exotic, interactive, publication-quality graphs online. * Importation of libraries and data loading* Different Types of Charts * Line Chart * Scatter Chart * Bar Chart * Pie Chart * Bubble Chart * Histogram * WordCloud * Box Plot * Scatter Matrix Plot
###Code
pip install plotly
import pandas as pd
import numpy as np
from plotly.offline import init_notebook_mode, iplot, plot
import plotly as py
#init_notebook_mode(connected= True)
import plotly.graph_objs as go .
from wordcloud import WordCloud
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
* Plotly was designed to render graphs on a web server or a local port. In order to render the plots inside the jupyter notebook, the notebook mode of plotly must be initialized. Without initializing notebook mode, no plotly plots can be visualized within this notebook (or any jupyter notebook).* To start creating graphs using plotly, we need to import 'graph_objs' modules* iplot() plots the figure(fig) that is created by data and layout 3 Parts To Every Graph* Data or Trace: This is usually a Python list object and contains all the data that we would want to plot. A trace is a collection of data points and their specifications that we would want to plot.* Layout: This object is used to change the features of the graph like axis titles, spacing, fonts etc. which are unrelated to the data itself.* Figure: This is a dictionary-like object which contains both the data object and the layout object and this defines the graph.
###Code
data= pd.read_csv('/content/drive/MyDrive/Plotly Class/timesData.csv')
data.head()
data.info()
#Generating few parts of the data in a cell
df14= data[data.year==2014].iloc[:100, :]
df15= data[data.year==2015].iloc[:100, :]
df16= data[data.year==2016].iloc[:100, :]
df2014= data[data.year== 2014].iloc[:3, :]
df2016= data[data.year== 2016].iloc[:10, :]
df12= data[data.year== 2016].iloc[:20, :]
x2011 = data.student_staff_ratio[data.year == 2011]
x2012 = data.student_staff_ratio[data.year == 2012]
x11 = data.country[data.year == 2012]
x2015 = data[data.year == 2015]
###Output
_____no_output_____
###Markdown
Line Graph Citation and Teaching vs World Rank of Top 100 Universities
###Code
df= data.iloc[:100, :]
first_trace= go.Scatter(x= df.world_rank,
y= df.citations,
mode= 'lines+markers',
name= 'citations',
marker= dict(color= 'rgba(16, 112, 2, 0.8)'),
text= df.university_name)
second_trace= go.Scatter(x= df.world_rank,
y= df.teaching,
mode= 'lines+markers',
name= 'teaching',
marker= dict(color= 'rgba(0, 200, 255, 0.8)'),
text= df.university_name)
data= [first_trace, second_trace]
layout= dict(title = 'Citation and Teaching vs World Rank of Top 100 Universities',
xaxis= dict(title='World_Rank', ticklen= 5, zeroline= False))
fig= dict(data= data, layout= layout)
#fig.show()
iplot(fig)
###Output
_____no_output_____
###Markdown
ScatterPlot Citation vs world rank of top 100 universities with 2014, 2015 and 2016 years
###Code
fst_trace= go.Scatter(x= df14.world_rank,
y= df14.citations,
mode= 'markers',
name= '2014',
marker= dict(color= 'rgba(255, 128, 255, 0.8)'),
text= df.university_name)
sec_trace= go.Scatter(x= df15.world_rank,
y= df15.citations,
mode= 'markers',
name= '2015',
marker= dict(color= 'rgba(255, 8, 255, 0.8)'),
text= df.university_name)
trd_trace= go.Scatter(x= df16.world_rank,
y= df16.citations,
mode= 'markers',
name= '2016',
marker= dict(color= 'rgba(0, 200, 255, 0.8)'),
text= df.university_name)
data= [fst_trace, sec_trace, trd_trace]
layout= dict(title= 'Citation vs world rank of top 100 universities with 2014, 2015 and 2016 years',
xaxis= dict(title= 'World_Rank', ticklen= 5, zeroline= False),
yaxis= dict(title= 'Citations', ticklen= 5, zeroline= False))
fig= dict(data= data, layout= layout)
iplot(fig)
###Output
_____no_output_____
###Markdown
Bar Graph citations and teaching of top 3 universities in 2014 (style1)
###Code
trace1= go.Bar(x= df2014.university_name,
y= df2014.citations,
name= 'citations',
marker= dict(color= 'rgba(255, 128, 255, 0.8)',
line= dict(color= 'rgb(0,0,0)', width= 1.5)),
text= df2014.country)
trace2= go.Bar(x= df2014.university_name,
y= df2014.teaching,
name= 'teaching',
marker= dict(color= 'rgba(0, 200, 255, 0.8)',
line= dict(color= 'rgb(0,0,0)', width= 1.5)),
text= df2014.country)
data= [trace1, trace2]
layout= go.Layout(barmode= 'group')
fig= go.Figure(data= data, layout= layout)
iplot(fig)
###Output
_____no_output_____
###Markdown
Bar Graph 2
###Code
trace1= go.Bar(x= df2014.university_name,
y= df2014.citations,
name= 'citations',
type= 'bar')
trace2= go.Bar(x= df2014.university_name,
y= df2014.teaching,
name= 'teaching',
type= 'bar')
data= [trace1, trace2]
layout= dict(title= 'citations and teaching of top 3 universities in 2014',
xaxis= dict(title= 'Top 3 Universities'),
barmode= 'stack')
fig= go.Figure(data= data, layout= layout)
iplot(fig)
###Output
_____no_output_____
###Markdown
Bar Graph Horizontal bar charts. (style3) Citation vs income for universities In 2016
###Code
df2016.info()
x_res= [x for x in df2016.research]
y_inc= [float(x) for x in df2016.income]
x_name= [x for x in df2016.university_name]
y_name= [x for x in df2016.university_name]
from plotly import tools
trace= go.Bar(x=x_res,
y= y_name,
marker= dict(color= 'rgba(0, 200, 255, 0.8)', line= dict(color='rgba(0, 0, 0)', width= 1.5)),
name= 'research',
orientation= 'h')
traces= go.Scatter(x=y_inc,
y=x_name,
mode= 'lines+markers',
line=dict(color='rgb(63, 72, 204)'),
name= 'income')
layout= dict(title= 'Citation and Income')
#yaxis= dict(showticklabels= True, domain= [0, 0.85]),
#yaxis2= dict(showticklabels= False, showline= True, linecolor= 'rgba(102, 102, 102, 0.8)', linewidth= 2, domain= [0,0.85]),
#xaxis= dict(showline= False, zeroline= False, showticklabels= True, showgrid= True, domain= [0, 0.42]),
#xaxis2= dict(showline= False, zeroline= False, showticklabels= True, showgrid= True, domain= [0.47, 0], side= 'top', dtick= 25),
#legend= dict(x= 0.029, y= 1.038, font= dict(size= 10)),
#margin=dict(l=200, r=20,t=70,b=70),
#paper_bgcolor='rgb(248, 248, 255)',
#plot_bgcolor='rgb(248, 248, 255)')
annotations= []
x_s= np.round(x_res, decimals= 2)
x_c= np.rint(y_inc)
for a , b, c in zip(x_c, x_s, x_name):
annotations.append(dict(xref= 'x2', yref= 'y2', y= c, x= a-4, text='{:,}'.format(a),
font= dict(family= 'Arial', size= 12, color='rgb(63, 72, 204)'), showarrow= False))
annotations.append(dict(xref= 'x1', yref= 'y1', y= c, x= b + 3, text=str(b),
font= dict(family= 'Arial', size= 12, color='rgb(171, 50, 96)'), showarrow= False))
layout['annotations']= annotations
fig= tools.make_subplots(rows= 1, cols= 2, specs=[[{}, {}]], shared_xaxes= True, shared_yaxes= False, vertical_spacing= 0.001)
fig.append_trace(trace, 1, 1)
fig.append_trace(traces, 1, 2)
fig['layout'].update(layout)
iplot(fig)
###Output
/usr/local/lib/python3.6/dist-packages/plotly/tools.py:465: DeprecationWarning:
plotly.tools.make_subplots is deprecated, please use plotly.subplots.make_subplots instead
###Markdown
Pie Chart Student Rate at Top 10 Universities in 2016
###Code
pie= df2016.num_students
list_pie= [float(x.replace(',','.'))for x in df2016.num_students]
label= df2016.university_name
data= dict(values= list_pie, labels= label, domain=dict(x = [0, .6]),
name= 'Number of Student Rate', hoverinfo= 'label+percent+name', hole= .3, type= 'pie' )
layout= dict(title= 'Student Rate at Top 10 Universities in 2016', annotations= [{ "font": { "size": 18},
"showarrow": False,
"text": "Number of Students",
"x": 0.20,
"y": 1}])
fig= dict(data= data, layout= layout)
iplot(fig)
###Output
_____no_output_____
###Markdown
Bubble Chart University world rank (first 20) vs teaching score with number of students(size) and international score (color) in 2016
###Code
df12['num_students']= df12.num_students.str.replace(',','.', regex=True)
df12.international= df12.international.str.replace(',','.', regex=True)
stud_size = [float(x) for x in df12.num_students]
int_color = [float(x) for x in df12.international]
data= dict(x= df12.world_rank,
y= df12.teaching,
mode= 'markers',
marker= dict(color= int_color, size=stud_size, showscale= True),
text= df12.university_name)
layout= dict(title= 'Uni World Rank, Teaching with Number of Student as Size, International Score as Color')
fig= dict(data= data, layout = layout)
iplot(fig)
###Output
_____no_output_____
###Markdown
Histogram students-staff ratio in 2011 and 2012 years
###Code
fst_trace= go.Histogram(x= x2011,
opacity= 0.75,
name= '2011',
marker= dict(color= 'rgba(0, 200, 255, 0.8)'))
scs_trace=go.Histogram(x= x2012,
opacity= 0.75,
name= '2012',
marker= dict(color= 'rgba(255, 128, 255, 0.8)'))
data= [fst_trace, scs_trace]
layout= go.Layout(barmode= 'overlay',
title= ' students-staff ratio in 2011 and 2012',
xaxis= dict(title= 'student_staff_ratio'),
yaxis= dict(title= 'Counts'))
fig= dict(data= data, layout= layout)
iplot(fig)
###Output
_____no_output_____
###Markdown
Word Cloud Most Mentioned Country In 2011* A Wordcloud (or Tag cloud) is a visual representation of text data. It displays a list of words, the importance of each beeing shown with font size or color
###Code
plt.subplots(figsize=(10,10))
cloud= WordCloud(background_color='black', width= 512, height= 384).generate(" ".join(x11))
plt.imshow(cloud)
plt.axis('off')
plt.savefig('graph.png')
plt.show()
###Output
_____no_output_____
###Markdown
Box Plot Total Score and Research in 2015
###Code
trace= go.Box(y= x2015.total_score,
name= 'total score of universities in 2015',
marker=dict(color= 'rgba(16, 112, 2, 0.8)'))
traces= go.Box(y= x2015.research,
name= 'research',
marker= dict(color= 'rgb(12, 12, 140)'))
data= [trace, traces]
iplot(data)
###Output
_____no_output_____
###Markdown
Scatter MatrixPlotResearch, Total_Score, International In 2015
###Code
import plotly.figure_factory as ff
data2015 = x2015.loc[:,["research","international", "total_score"]]
data2015["index"] = np.arange(1,len(data2015)+1)
fig = ff.create_scatterplotmatrix(data2015, diag='box', index='index',colormap='Portland',
colormap_type='cat',
height=700, width=700)
iplot(fig)
###Output
_____no_output_____
###Markdown
3D Scatter Plot World Rank, Citation, Research In 3D
###Code
trace= go.Scatter3d(x= x2015.world_rank,
y= x2015.citations,
z= x2015.research,
mode= 'markers',
marker=dict(size=10,color='rgb(255,0,0)'))
data= [trace]
layout= go.Layout(margin=dict(l=0, r=0, b=0, t=0))
fig= go.Figure(data= data, layout= layout)
iplot(fig)
###Output
_____no_output_____ |
ML_Group_work_Algorithms.ipynb | ###Markdown
###Code
# 1. Prepare Problem
# a) Load libraries
import numpy as np
import pandas as pd
import warnings
import matplotlib.pyplot as plt
from numpy import mean
from numpy import std
from pandas import read_csv
from pandas import set_option
from pandas.plotting import scatter_matrix
from sklearn.pipeline import Pipeline
from sklearn.datasets import make_classification
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.datasets import load_digits
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import RidgeClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import ExtraTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import FeatureUnion
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.feature_selection import RFE
from sklearn.feature_selection import RFECV
from sklearn.decomposition import PCA
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
# b) Load dataset
df = pd.read_csv('https://raw.githubusercontent.com/Carlosrnes/group_work_ml/main/techscape-ecommerce/train.csv')
# Drop Access_ID
df = df.drop(['Access_ID'], axis=1)
# Converting Date type from object to datetime
df['Date'] = pd.to_datetime(df['Date'], format='%d-%b-%y')
filters1 = (
(df['AccountMng_Duration']<=2000)
&
(df['FAQ_Duration']<=1500)
&
(df['Product_Pages']<=500)
&
(df['Product_Duration']<=25000)
&
(df['GoogleAnalytics_PageValue']<=300)
)
df_1 = df[filters1]
print('Percentage of data kept after removing outliers:', np.round(df_1.shape[0] / df.shape[0], 4))
df = df[filters1]
# Creating new features
df['month'] = df['Date'].dt.month
# Dropping columns
df = df.drop(['Date'], axis=1).reset_index(drop=True)
# One-hot encoding
df = pd.concat([df,pd.get_dummies(df['month'], prefix='month_',dummy_na=True)],axis=1).drop(['month'],axis=1)
df = pd.concat([df,pd.get_dummies(df['Type_of_Traffic'], prefix='Type_of_Traffic_',dummy_na=True)],axis=1).drop(['Type_of_Traffic'],axis=1)
df = pd.concat([df,pd.get_dummies(df['Browser'], prefix='Browser_',dummy_na=True)],axis=1).drop(['Browser'],axis=1)
df = pd.concat([df,pd.get_dummies(df['OS'], prefix='OS_',dummy_na=True)],axis=1).drop(['OS'],axis=1)
df = pd.concat([df,pd.get_dummies(df['Country'], prefix='Country_',dummy_na=True)],axis=1).drop(['Country'],axis=1)
df = pd.concat([df,pd.get_dummies(df['Type_of_Visitor'], prefix='Type_of_Visitor_',dummy_na=True)],axis=1).drop(['Type_of_Visitor'],axis=1)
# Sampling
dataset = df.sample(frac=0.90, random_state=786).reset_index(drop=True)
data_unseen = df.drop(dataset.index).reset_index(drop=True)
print('Data for Modeling: ' + str(dataset.shape))
print('Unseen Data For Predictions: ' + str(data_unseen.shape))
# 4. Evaluate Algorithms
# a) Split-out validation dataset
df = dataset.dropna()
df1 = df.drop(['Buy'], axis=1)
array = df1.values
X = array[:,0:df1.shape[1]-1].astype(float)
y = np.array(df['Buy'])
validation_size=0.2
seed = 7
X_train, X_validation, Y_train, Y_validation = train_test_split(X, y,
test_size=validation_size, random_state=seed)
# b) Test options and evaluation metric
num_folds = 10
scoring = 'f1'
# c) Spot Check Algorithms
# create a dict of standard models to evaluate {name:object}
def define_models(models=dict()):
# linear models
models['logistic'] = LogisticRegression()
alpha = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
for a in alpha:
models['ridge-'+str(a)] = RidgeClassifier(alpha=a)
models['sgd'] = SGDClassifier(max_iter=1000, tol=1e-3)
models['pa'] = PassiveAggressiveClassifier(max_iter=1000, tol=1e-3)
# non-linear models
n_neighbors = range(1, 12)
for k in n_neighbors:
models['knn-'+str(k)] = KNeighborsClassifier(n_neighbors=k)
models['cart'] = DecisionTreeClassifier()
models['extra'] = ExtraTreeClassifier()
#models['svml'] = SVC(kernel='linear')
#models['svmp'] = SVC(kernel='poly')
#c_values = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
#for c in c_values:
# models['svmr'+str(c)] = SVC(C=c)
models['bayes'] = GaussianNB()
# ensemble models
n_trees = 100
models['ada'] = AdaBoostClassifier(n_estimators=n_trees)
models['bag'] = BaggingClassifier(n_estimators=n_trees)
models['rf'] = RandomForestClassifier(n_estimators=n_trees)
models['et'] = ExtraTreesClassifier(n_estimators=n_trees)
models['gbm'] = GradientBoostingClassifier(n_estimators=n_trees)
print('Defined %d models' % len(models))
return models
# no transforms pipeline
def pipeline_none(model):
return model
# standardize transform pipeline
def pipeline_standardize(model):
steps = list()
# standardization
steps.append(('standardize', StandardScaler()))
# the model
steps.append(('model', model))
# create pipeline
pipeline = Pipeline(steps=steps)
return pipeline
# normalize transform pipeline
def pipeline_normalize(model):
steps = list()
# normalization
steps.append(('normalize', MinMaxScaler()))
# the model
steps.append(('model', model))
# create pipeline
pipeline = Pipeline(steps=steps)
return pipeline
# standardize and normalize pipeline
def pipeline_std_norm(model):
steps = list()
# standardization
steps.append(('standardize', StandardScaler()))
# normalization
steps.append(('normalize', MinMaxScaler()))
# the model
steps.append(('model', model))
# create pipeline
pipeline = Pipeline(steps=steps)
return pipeline
# evaluate a single model
def evaluate_model(X, y, model, folds, metric, pipe_func):
# create the pipeline
pipeline = pipe_func(model)
# evaluate model
scores = cross_val_score(pipeline, X, y, scoring=metric, cv=folds, n_jobs=-1)
return scores
# evaluate a model and try to trap errors and and hide warnings
def robust_evaluate_model(X, y, model, folds, metric, pipe_func):
scores = None
try:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
scores = evaluate_model(X, y, model, folds, metric, pipe_func)
except:
scores = None
return scores
# evaluate a dict of models {name:object}, returns {name:score}
def evaluate_models(X, y, models, pipe_funcs, folds=num_folds, metric=scoring):
results = dict()
for name, model in models.items():
# evaluate model under each preparation function
for i in range(len(pipe_funcs)):
# evaluate the model
scores = robust_evaluate_model(X, y, model, folds, metric, pipe_funcs[i])
# update name
run_name = str(i) + name
# show process
if scores is not None:
# store a result
results[run_name] = scores
mean_score, std_score = mean(scores), std(scores)
print('>%s: %.3f (+/-%.3f)' % (run_name, mean_score, std_score))
else:
print('>%s: error' % run_name)
return results
# print and plot the top n results
def summarize_results(results, maximize=True, top_n=10):
# check for no results
if len(results) == 0:
print('no results')
return
# determine how many results to summarize
n = min(top_n, len(results))
# create a list of (name, mean(scores)) tuples
mean_scores = [(k,mean(v)) for k,v in results.items()]
# sort tuples by mean score
mean_scores = sorted(mean_scores, key=lambda x: x[1])
# reverse for descending order (e.g. for accuracy)
if maximize:
mean_scores = list(reversed(mean_scores))
# retrieve the top n for summarization
names = [x[0] for x in mean_scores[:n]]
scores = [results[x[0]] for x in mean_scores[:n]]
# print the top n
print()
for i in range(n):
name = names[i]
mean_score, std_score = mean(results[name]), std(results[name])
print('Rank=%d, Name=%s, Score=%.3f (+/- %.3f)' % (i+1, name, mean_score, std_score))
# boxplot for the top n
plt.boxplot(scores, labels=names)
_, labels = plt.xticks()
plt.setp(labels, rotation=90)
plt.savefig('spotcheck.png')
# Run Spot Check Algorithms
# get model list
models = define_models()
# define transform pipelines
pipelines = [pipeline_none, pipeline_standardize, pipeline_normalize, pipeline_std_norm]
# evaluate models
results = evaluate_models(X, y, models, pipelines)
# summarize results
summarize_results(results)
# 5. Improve Accuracy
# a) Algorithm Tuning (Hyperparameters)
# Tune Gradient Boosting Classifier
n_trees = [10,50,100, 200]
learning = [0.001, 0.01, 0.1]
subsample = [0.7, 0.85, 1.0]
max_depth = [3, 7]
param_grid = dict(n_estimators=n_trees, learning_rate=learning, subsample=subsample, max_depth=max_depth)
model = GradientBoostingClassifier()
kfold = StratifiedKFold(n_splits=num_folds, random_state=seed, shuffle=True)
grid = GridSearchCV(estimator=model, param_grid=param_grid, scoring=scoring, cv=kfold)
grid_result = grid.fit(X, y)
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, stdev, param))
# 6. Feature Selection
# Define model
model_selected = GradientBoostingClassifier(n_estimators=100, learning_rate=0.1, max_depth=3, subsample=0.85)
features_list = list(df1.columns.values)
# a) Recursive Feature Elimination.
rfecv = RFECV(estimator=model_selected, step=1, cv=kfold, scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure(figsize=(16,10))
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
# a) Recursive Feature Elimination.
model = model_selected
rfe = RFE(model, 16)
fit = rfe.fit(X, y)
print("Num Features: %d" % fit.n_features_)
print("Selected Features: %s" % fit.support_)
print("Feature Ranking: %s" % fit.ranking_)
print("Feature Ranking: %s" % fit.estimator_)
# c) Feature Importance.
model = model_selected
model.fit(X, y)
plt.figure(figsize=(16,10))
importances = pd.DataFrame({'feature': features_list, 'importance': np.round(model.feature_importances_,3)})
importances = importances.sort_values('importance', ascending=True).set_index('feature')
importances.plot.barh().set_title('Importance of features')
# 7. Finalize Model
# a) Predictions on validation dataset
# prepare the model
model = model_selected
model.fit(X, y)
# estimate accuracy on validation dataset
predictions = model.predict(X_validation)
print('Accuracy:')
print(accuracy_score(Y_validation, predictions))
print('f1-score:')
print(f1_score(Y_validation, predictions))
print('Confusion Matrix:')
print(confusion_matrix(Y_validation, predictions))
print('Classification Report:')
print(classification_report(Y_validation, predictions))
###Output
Accuracy:
0.9225626740947075
f1-score:
0.7191919191919193
Confusion Matrix:
[[1478 48]
[ 91 178]]
Classification Report:
precision recall f1-score support
0 0.94 0.97 0.96 1526
1 0.79 0.66 0.72 269
accuracy 0.92 1795
macro avg 0.86 0.82 0.84 1795
weighted avg 0.92 0.92 0.92 1795
|
audioPrep.ipynb | ###Markdown
###Code
pip install -q tensorflow-io
import tensorflow as tf
import tensorflow_io as tfio
audio = tfio.audio.AudioIOTensor('gs://cloud-samples-tests/speech/brooklyn.flac')
print(audio)
audio_slice = audio[100:]
#remove the last dimension
audio_tensor = tf.squeeze(audio_slice, axis=[-1])
print(audio_tensor)
from IPython.display import Audio
Audio(audio_tensor.numpy(), rate=audio.rate.numpy())
import matplotlib.pyplot as plt
tensor = tf.cast(audio_tensor, tf.float32) / 32768.0
plt.figure()
plt.plot(tensor.numpy())
position = tfio.experimental.audio.trim(tensor, axis=0, epsilon=0.1)
start = position[0]
stop = position[1]
print(start, stop)
processed = tensor[start:stop]
plt.figure()
plt.plot(processed.numpy())
Audio(processed.numpy(), rate=audio.rate.numpy())
fade = tfio.experimental.audio.fade(
processed, fade_in=1000, fade_out=2000, mode="logarithmic")
plt.figure()
plt.plot(fade.numpy())
Audio(fade.numpy(), rate=audio.rate.numpy())
#the spectre
# Convert to spectrogram
spectrogram = tfio.experimental.audio.spectrogram(
fade, nfft=512, window=512, stride=256)
plt.figure()
plt.imshow(tf.math.log(spectrogram).numpy())
# Convert to mel-spectrogram
mel_spectrogram = tfio.experimental.audio.melscale(
spectrogram, rate=16000, mels=128, fmin=0, fmax=8000)
plt.figure()
plt.imshow(tf.math.log(mel_spectrogram).numpy())
# Convert to db scale mel-spectrogram
dbscale_mel_spectrogram = tfio.experimental.audio.dbscale(
mel_spectrogram, top_db=80)
plt.figure()
plt.imshow(dbscale_mel_spectrogram.numpy())
#Frequency masking
freq_mask = tfio.experimental.audio.freq_mask(dbscale_mel_spectrogram, param=10)
plt.figure()
plt.imshow(freq_mask.numpy())
#Time mask
time_mask = tfio.experimental.audio.time_mask(dbscale_mel_spectrogram, param=10)
plt.figure()
plt.imshow(time_mask.numpy())
###Output
_____no_output_____ |
notebooks/8D8_normal_vocab_enrich.ipynb | ###Markdown
6/16/2021all normal tissue finder - make a Bar chart of vocabularies by tissue type
###Code
# basic packages
import os, glob
import pandas as pd
import numpy as np; np.random.seed(0)
import itertools
from collections import Counter, defaultdict
import time
# Import tools needed for visualization
from upsetplot import plot, from_memberships
import seaborn as sns;
import matplotlib.pyplot as plt
plt.style.use('seaborn-paper')
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Arial']})
save_dir = '../data/processed/fig4_modelling/vocab_sum/'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
# TISSUE = 'GDSD0'
normal_tissues = ['Airway','Astrocytes','Bladder','Colon','Esophageal','GDSD6','GM12878','HMEC','Melanocytes','Ovarian',
'Pancreas','Prostate','Renal','Thyroid','Uterine']
normal_tissues_dict = dict(zip(normal_tissues,range(len(normal_tissues))))
###Output
_____no_output_____
###Markdown
1. Tissue specific transcription factors- manual annotation- cell type specific expressed tfs (HOCOMOCO)- modelling tfs A. cell type specific expressed tfs (HOCOMOCO)
###Code
# tfs
tf_annon_df = pd.read_csv('../data/external/HOCOMOCOv11_annotation.csv',index_col=0)
tf_annon_df['id_trim'] = tf_annon_df['id'] + '.pwm.trim'
tf_name_to_id_dict = pd.Series(tf_annon_df.id_trim.values, index=tf_annon_df.tf.values).to_dict()
tf_id_to_name_dict = pd.Series(tf_annon_df.tf.values, index=tf_annon_df.id_trim.values).to_dict()
print(len(tf_name_to_id_dict))
THRES=1
rna_tpm_file = '../data/interim/rna/tissue_tpm_sym.csv'
rna_df = pd.read_csv(rna_tpm_file,index_col=0)
rna_df_tf = rna_df.loc[tf_annon_df.tf.values,normal_tissues]
# rna_df_log = np.log2(rna_df+1e-2)
# rna_df_norm = as.data.frame(scale(rna_df_log, center = TRUE, scale = TRUE))
# head(rna_df_norm)
num_tissues_per_tf = pd.DataFrame(rna_df_tf>THRES).sum(axis=1)
# number of unique tfs
unique_tfs = num_tissues_per_tf.index.values[num_tissues_per_tf==1]
print(unique_tfs.shape)
unique_tf_to_tissue = pd.DataFrame(rna_df_tf>THRES).reset_index().melt('index')
unique_tf_to_tissue = unique_tf_to_tissue[unique_tf_to_tissue['value']]
unique_tf_to_tissue = unique_tf_to_tissue[unique_tf_to_tissue['index'].isin(unique_tfs)]
unique_tf_to_tissue = unique_tf_to_tissue[['index','variable']]
unique_tf_to_tissue.columns = ['tf','cell_type']
unique_tf_to_tissue.cell_type.value_counts()
###Output
_____no_output_____
###Markdown
manual annotation - from literature
###Code
#get tfs
tf_df_manual = pd.read_csv('../data/external/transcription_factor_info_061521.csv').drop_duplicates()
# TFS = sorted(set(tf_df[tf_df['cell_type']=='Keratinocytes']["tf"]))##### DIFFERENT FOR EACH TISSUE
# print(len(TFS))
# print(TFS)
tf_df_manual.cell_type.value_counts()
tf_df = pd.concat([unique_tf_to_tissue,tf_df_manual],sort=True).drop_duplicates()
tf_df.cell_type.value_counts()
###Output
_____no_output_____
###Markdown
2 helper functions
###Code
pd.read_csv(os.path.join(vocab_dir,'expr_'+tissue+'_pro_pro_vocab_info.csv' ))
def get_other_vocab_word(row, next_row):
if row['vocab']!=next_row['vocab']:
return False
vocab_word = set(row['tf'])
vocab_set = set(row['vocab'].split('::'))
other_vocab = list(vocab_set - vocab_word)[0]
return other_vocab == next_row['tf']
def check_distance(row,next_row,max_dist=MAX_DIST):
if row['chr_m']==next_row['chr_m']:
if row['stop_m']<next_row['start_m']:
tot_dist = next_row['stop_m'] - row['start_m']
btn_dist = next_row['start_m'] - row['stop_m']
return (tot_dist < max_dist), tot_dist, btn_dist
return False,-1,-1
def check_tissue(row,next_row,tfs=TFS):
if (row['tf'] in tfs) & (next_row['tf'] in tfs):
return 'both'
elif (row['tf'] in tfs) | (next_row['tf'] in tfs):
return 'one'
else:
return 'none'
def get_hits(vocab_file,tfs=TFS):
print('**** reading', vocab_file)
vocab_df = pd.read_csv(vocab_file)
print(vocab_df.shape)
idx = 0
idx_hits = 0
results_dict = {}
while idx < (vocab_df.shape[0]-1):
# look at next
row = vocab_df.iloc[idx,:]
next_row = vocab_df.iloc[idx+1,:]
check_vocab_pair = get_other_vocab_word(row,next_row)
check_dist,tot_dist, btn_dist = check_distance(row, next_row)
check_tissue_tf = check_tissue(row,next_row,tfs)
if (check_dist and check_vocab_pair):
# print('hi',idx)
# print(row)
# print(next_row)
results_dict[idx_hits] = {'vocab_pair':row['vocab'],'tot_dist':tot_dist,'btn_dist':btn_dist,
'chr':row['chr'],'start':row['start_m'],'stop':next_row['stop_m'],
'vocab1':row['tf'],'vocab1_start':row['start_m'], 'vocab1_stop': row['stop_m'],
'vocab2':next_row['tf'],'vocab2_start':next_row['start_m'], 'vocab2_stop': next_row['stop_m'],
'genes':row['genes'],'num_genes':len(row['genes'].split('|')), 'tissue':row['tissue'],
'check_tissuetf':check_tissue_tf}
idx_hits+=1
idx+=1
print('num_hits',idx_hits)
results_df = pd.DataFrame.from_dict(results_dict, orient='index')
return results_df
def filter_results(results_df,min_hits_per_vocab=10):
print('shape', results_df.shape)
vocab_counts = results_df.vocab_pair.value_counts()
print('original num vocab',vocab_counts.shape[0])
vocab_to_include = vocab_counts[vocab_counts>min_hits_per_vocab].index.values
print('filt num vocab',vocab_to_include.shape[0])
results_df_filt = results_df[results_df.vocab_pair.isin(vocab_to_include)]
return results_df_filt
def get_counts(results_df, label):
counts_df = pd.DataFrame(results_df.vocab_pair.value_counts())
counts_df.columns = ['num_instance']
counts_df['label']=label
return counts_df
def get_vocabs(tissue, tfs, save=True,filter_thres = ['none']):
"""
pipeline to get vocabs
filter thres is a list of 'none' or #'both' # 'one'#'none','one',
"""
tfs = sorted(set(tfs))
print(tissue, 'num tfs', len(tfs))
print(tfs)
pro_pro_file = os.path.join(vocab_dir,'expr_'+tissue+'_pro_pro_vocab_info.csv' )
loop_loop_file = os.path.join(vocab_dir,'expr_'+tissue+'_loop_loop_vocab_info.csv' )
#Takes awhile
# Step 1. get expressiod and stability for specific config regions
results_expr_pro_pro = get_hits(pro_pro_file, tfs=tfs)
results_expr_loop_loop = get_hits(loop_loop_file, tfs=tfs)
# Step 2: raw stats
print('pre genomic instance filter')
motifs_pro_pro = sorted(set(list(results_expr_pro_pro.vocab1.unique())+list(results_expr_pro_pro.vocab2.unique())))
print('num motifs in pro_pro', len(motifs_pro_pro))
print(motifs_pro_pro)
motifs_loop_loop = sorted(set(list(results_expr_loop_loop.vocab1.unique())+list(results_expr_loop_loop.vocab2.unique())))
print('num motifs in loop_loop', len(motifs_loop_loop))
print(motifs_loop_loop)
print('num vocab in expression enrichment (pro-pro region): ', results_expr_pro_pro.vocab_pair.unique().shape[0])
# print('num vocab in expression enrichment intersected with stability (pro-pro region): ', results_stability_pro_pro.vocab_pair.unique().shape[0])
# print(results_stability_pro_pro.vocab_pair.unique())
print('num vocab in expression enrichment (loop-loop region): ', results_expr_loop_loop.vocab_pair.unique().shape[0])
# print('num vocab in expression enrichment intersected with stability (loop-loop region): ', results_stability_loop_loop.vocab_pair.unique().shape[0])
# print(results_stability_loop_loop.vocab_pair.unique())
# step 3: filter expression enriched vocab words if then have at least 10 genomic instances then get stats
results_expr_pro_pro = filter_results(results_expr_pro_pro,min_hits_per_vocab=10)
results_expr_loop_loop = filter_results(results_expr_loop_loop,min_hits_per_vocab=10)
print('post genomic instance filter')
print('num vocab in expression enrichment (pro-pro region): ', results_expr_pro_pro.vocab_pair.unique().shape[0])
# print('num vocab in expression enrichment intersected with stability (pro-pro region): ', results_stability_pro_pro.vocab_pair.unique().shape[0])
print(results_expr_pro_pro.vocab_pair.unique())
print('num vocab in expression enrichment (loop-loop region): ', results_expr_loop_loop.vocab_pair.unique().shape[0])
# print('num vocab in expression enrichment intersected with stability (loop-loop region): ', results_stability_loop_loop.vocab_pair.unique().shape[0])
print(results_expr_loop_loop.vocab_pair.unique())
# step 4: filter expr vocab words based on whether there is they are annotated for skin
print(results_expr_pro_pro[['vocab_pair','check_tissuetf']].drop_duplicates().check_tissuetf.value_counts())
print(results_expr_loop_loop[['vocab_pair','check_tissuetf']].drop_duplicates().check_tissuetf.value_counts())
results_expr_pro_pro_tissue = results_expr_pro_pro[results_expr_pro_pro.check_tissuetf.isin(filter_thres)]
print('pro-pro region')
print('total vocab:',results_expr_pro_pro.vocab_pair.unique().shape[0],'tissue annon vocab:', results_expr_pro_pro_tissue.vocab_pair.unique().shape[0])
print(results_expr_pro_pro_tissue.vocab_pair.unique())
results_expr_loop_loop_tissue = results_expr_loop_loop[results_expr_loop_loop.check_tissuetf.isin(filter_thres)]
print('loop-loop region')
print('total vocab:',results_expr_loop_loop.vocab_pair.unique().shape[0],'tissue annon vocab:', results_expr_loop_loop_tissue.vocab_pair.unique().shape[0])
print(results_expr_loop_loop_tissue.vocab_pair.unique())
# step 5: add in stability scores vocab pairs that pass the genomic instance filter and get genomic instance counts
vocab_summary_df = pd.concat([# get_counts(results_stability_pro_pro, 'stability_pro'),
get_counts(results_expr_pro_pro_tissue, 'expr_pro_tissue'),
# get_counts(results_stability_loop_loop, 'stability_loop'),
get_counts(results_expr_loop_loop_tissue, 'expr_loop_tissue')],axis=0)
vocab_summary_df.index.set_names('vocab',inplace=True)
vocab_summary_df.reset_index(inplace=True)
vocab_summary_df = vocab_summary_df.groupby('vocab').agg({'num_instance':sum, 'label':'|'.join}).reset_index()
vocab_summary_df['tissue']=tissue
print(vocab_summary_df.label.value_counts())
print('*****, number of vocab words', len(vocab_summary_df))
# results_stability_loop_loop.vocab_pair.unique()
# Saving..
if save:
vocab_summary_df.to_csv(os.path.join(save_dir, tissue+'_vocab_summary.csv'))
return vocab_summary_df
###Output
_____no_output_____
###Markdown
3. Vocabulary genomic instances - running
###Code
# global variables
vocab_dir = '../data/processed/fig4_modelling/tf_tf_pairs/'
MAX_DIST=135
%%time
vocab_tissue_all = pd.DataFrame()
for tissue in normal_tissues:
print('==============================================================')
tfs = tf_df[tf_df.cell_type==tissue].tf.values
# if os.path.exists(os.path.join(save_dir, tissue+'_vocab_summary.csv')):
# print('skipped',tissue, 'ar')
vocab_summary_df = get_vocabs(tissue, tfs, save=True,filter_thres = ['none'])
vocab_tissue_all = pd.concat([vocab_tissue_all, vocab_summary_df])
vocab_tissue_all.tissue.value_counts()
# no filtering
###Output
_____no_output_____
###Markdown
redo some of them
###Code
tissue= 'HMEC'
tfs = tf_df[tf_df.cell_type==tissue].tf.values
tfs = list(tfs)+['ARID5B', 'ATF2', 'BACH1', 'BACH2', 'CEBPG', 'DDIT3', 'DLX1', 'FOS', 'FOSB', 'FOSL1', 'FOSL2', 'FOXA1', 'GLI3', 'HES7', 'HEY1', 'HLTF', 'HMGA1', 'HOXA10', 'HOXA5', 'HOXA9', 'HOXB2', 'HOXC13', 'HOXC6', 'HOXC8', 'IRF5', 'IRF7', 'IRX2', 'IRX3', 'JUN', 'JUNB', 'JUND', 'KLF9', 'LHX6', 'MAF', 'MAFF', 'MAFG', 'MAFK', 'MEIS3', 'MESP1', 'MNX1', 'MSX1', 'MSX2', 'NFATC1', 'NFE2L1', 'NFE2L2', 'NFIA', 'OSR2', 'PBX1', 'POU2F2', 'PPARG', 'RREB1', 'RUNX2', 'RUNX3', 'SIX1', 'SMAD4', 'SOX13', 'SP2', 'TCF7', 'TEAD3', 'TP53', 'TP63', 'TWIST1', 'UBP1', 'ZBTB18', 'ZBTB49', 'ZBTB6', 'ZFP28', 'ZNF134', 'ZNF18', 'ZNF331', 'ZNF41', 'ZNF467', 'ZNF490', 'ZNF502', 'ZNF554', 'ZNF563', 'ZNF667', 'ZNF816', 'ZNF85']+['ARID5B', 'ATF2', 'BACH1', 'BACH2', 'CEBPG', 'DDIT3', 'DLX1', 'FOS', 'FOSB', 'FOSL1', 'FOSL2', 'GLI3', 'HES7', 'HEY1', 'HLTF', 'HMGA1', 'HOXA10', 'HOXA5', 'HOXB2', 'HOXC6', 'HOXC8', 'IRF5', 'IRX2', 'IRX3', 'JUN', 'JUNB', 'JUND', 'KLF9', 'LHX6', 'MAF', 'MAFG', 'MAFK', 'MECOM', 'MEIS3', 'MESP1', 'MSX2', 'NFATC1', 'NFE2L1', 'NFE2L2', 'NFIA', 'OSR2', 'PBX1', 'POU2F2', 'RUNX2', 'RUNX3', 'SIX1', 'SMAD4', 'SOX13', 'SP2', 'TCF7', 'TP63', 'TWIST1', 'UBP1', 'ZBTB18', 'ZBTB49', 'ZBTB6', 'ZFP28', 'ZNF134', 'ZNF18', 'ZNF331', 'ZNF41', 'ZNF467', 'ZNF490', 'ZNF502', 'ZNF554', 'ZNF667', 'ZNF816', 'ZNF85']
# if os.path.exists(os.path.join(save_dir, tissue+'_vocab_summary.csv')):
# print('skipped',tissue, 'ar')
vocab_summary_df = get_vocabs(tissue, tfs, save=True,filter_thres = ['none', 'one','both'])
%%time
tissue= 'Uterine'
tfs = tf_df[tf_df.cell_type==tissue].tf.values
# tfs = list(tfs)+['ARID5B', 'ATF2', 'BACH1', 'BACH2', 'CEBPG', 'DDIT3', 'DLX1', 'FOS', 'FOSB', 'FOSL1', 'FOSL2', 'FOXA1', 'GLI3', 'HES7', 'HEY1', 'HLTF', 'HMGA1', 'HOXA10', 'HOXA5', 'HOXA9', 'HOXB2', 'HOXC13', 'HOXC6', 'HOXC8', 'IRF5', 'IRF7', 'IRX2', 'IRX3', 'JUN', 'JUNB', 'JUND', 'KLF9', 'LHX6', 'MAF', 'MAFF', 'MAFG', 'MAFK', 'MEIS3', 'MESP1', 'MNX1', 'MSX1', 'MSX2', 'NFATC1', 'NFE2L1', 'NFE2L2', 'NFIA', 'OSR2', 'PBX1', 'POU2F2', 'PPARG', 'RREB1', 'RUNX2', 'RUNX3', 'SIX1', 'SMAD4', 'SOX13', 'SP2', 'TCF7', 'TEAD3', 'TP53', 'TP63', 'TWIST1', 'UBP1', 'ZBTB18', 'ZBTB49', 'ZBTB6', 'ZFP28', 'ZNF134', 'ZNF18', 'ZNF331', 'ZNF41', 'ZNF467', 'ZNF490', 'ZNF502', 'ZNF554', 'ZNF563', 'ZNF667', 'ZNF816', 'ZNF85']+['ARID5B', 'ATF2', 'BACH1', 'BACH2', 'CEBPG', 'DDIT3', 'DLX1', 'FOS', 'FOSB', 'FOSL1', 'FOSL2', 'GLI3', 'HES7', 'HEY1', 'HLTF', 'HMGA1', 'HOXA10', 'HOXA5', 'HOXB2', 'HOXC6', 'HOXC8', 'IRF5', 'IRX2', 'IRX3', 'JUN', 'JUNB', 'JUND', 'KLF9', 'LHX6', 'MAF', 'MAFG', 'MAFK', 'MECOM', 'MEIS3', 'MESP1', 'MSX2', 'NFATC1', 'NFE2L1', 'NFE2L2', 'NFIA', 'OSR2', 'PBX1', 'POU2F2', 'RUNX2', 'RUNX3', 'SIX1', 'SMAD4', 'SOX13', 'SP2', 'TCF7', 'TP63', 'TWIST1', 'UBP1', 'ZBTB18', 'ZBTB49', 'ZBTB6', 'ZFP28', 'ZNF134', 'ZNF18', 'ZNF331', 'ZNF41', 'ZNF467', 'ZNF490', 'ZNF502', 'ZNF554', 'ZNF667', 'ZNF816', 'ZNF85']
# if os.path.exists(os.path.join(save_dir, tissue+'_vocab_summary.csv')):
# print('skipped',tissue, 'ar')
vocab_summary_df = get_vocabs(tissue, tfs, save=True,filter_thres = ['none', 'one','both'])
###Output
Uterine num tfs 12
['CEBPB', 'EGR1', 'ETS1', 'FOXA2', 'GATA2', 'HOXA7', 'KLF16', 'KLF5', 'NR3C1', 'PBX1', 'SOX17', 'SP1']
**** reading ../data/processed/fig4_modelling/tf_tf_pairs/expr_Uterine_pro_pro_vocab_info.csv
(48061, 15)
num_hits 8615
**** reading ../data/processed/fig4_modelling/tf_tf_pairs/expr_Uterine_loop_loop_vocab_info.csv
(525874, 15)
num_hits 56566
pre genomic instance filter
num motifs in pro_pro 173
['ARID3A', 'ARID5B', 'ATF2', 'ATF4', 'BACH1', 'BCL11A', 'BCL6', 'BHLHE41', 'BPTF', 'BRCA1', 'CBFB', 'CDC5L', 'CEBPB', 'CEBPD', 'CEBPG', 'CLOCK', 'CREB3', 'CREM', 'DBP', 'DDIT3', 'DLX3', 'DLX5', 'EHF', 'ELF3', 'EPAS1', 'ETV4', 'ETV5', 'ETV6', 'FLI1', 'FOS', 'FOSB', 'FOSL1', 'FOSL2', 'FOXA2', 'FOXC1', 'FOXD1', 'FOXH1', 'FOXJ2', 'FOXJ3', 'FOXK1', 'FOXL1', 'FOXO1', 'FOXO3', 'FOXP1', 'FOXQ1', 'FUBP1', 'GATA3', 'GATA6', 'GMEB2', 'GRHL1', 'GRHL2', 'HBP1', 'HEY1', 'HIC2', 'HIVEP1', 'HLTF', 'HMBOX1', 'HMGA1', 'HOXA1', 'HOXA10', 'HOXA5', 'HOXA7', 'HOXA9', 'HOXB2', 'HOXB3', 'HOXB4', 'HOXB6', 'HOXB7', 'HOXB8', 'IRF1', 'IRF2', 'IRF3', 'IRF6', 'IRF7', 'IRX3', 'JUN', 'JUNB', 'JUND', 'KLF4', 'MAFF', 'MAFG', 'MAFK', 'MECOM', 'MEF2A', 'MEIS1', 'MSX1', 'MSX2', 'MXI1', 'MYBL2', 'MYNN', 'NFAT5', 'NFATC1', 'NFE2L1', 'NFE2L2', 'NFIA', 'NFIB', 'NFIC', 'NFIL3', 'NFKB2', 'NKX3-1', 'NR1H3', 'NR1I3', 'NR2F1', 'NR2F6', 'NR3C1', 'NR4A2', 'OVOL1', 'OVOL2', 'PAX8', 'PITX1', 'PLAGL1', 'POU2F1', 'POU2F2', 'PRDM1', 'REL', 'RELA', 'RELB', 'RFX2', 'RREB1', 'RUNX1', 'RUNX2', 'RUNX3', 'SIX1', 'SMAD2', 'SMAD3', 'SMAD4', 'SMARCA1', 'SNAI1', 'SNAI2', 'SOX13', 'SOX15', 'SOX17', 'SOX4', 'SOX7', 'SOX9', 'SPDEF', 'SREBF1', 'STAT1', 'STAT2', 'STAT5A', 'STAT6', 'TBX3', 'TCF7L1', 'TCF7L2', 'TEAD1', 'TEAD3', 'TFCP2', 'TFEB', 'TGIF1', 'THRA', 'TP53', 'TP63', 'TWIST1', 'UBP1', 'ZBTB18', 'ZBTB49', 'ZBTB7B', 'ZFHX3', 'ZFP28', 'ZNF134', 'ZNF232', 'ZNF274', 'ZNF317', 'ZNF335', 'ZNF354A', 'ZNF394', 'ZNF528', 'ZNF563', 'ZNF589', 'ZNF680', 'ZNF768', 'ZNF85', 'ZSCAN16']
num motifs in loop_loop 175
['ARID3A', 'ARID5B', 'ATF2', 'ATF4', 'BACH1', 'BCL11A', 'BCL6', 'BHLHE41', 'BPTF', 'BRCA1', 'CBFB', 'CDC5L', 'CEBPB', 'CEBPD', 'CEBPG', 'CLOCK', 'CREB3', 'CREM', 'DBP', 'DDIT3', 'DLX3', 'DLX5', 'EHF', 'ELF3', 'EPAS1', 'ETV4', 'ETV5', 'ETV6', 'FLI1', 'FOS', 'FOSB', 'FOSL1', 'FOSL2', 'FOXA2', 'FOXC1', 'FOXD1', 'FOXH1', 'FOXJ2', 'FOXJ3', 'FOXK1', 'FOXL1', 'FOXO1', 'FOXO3', 'FOXP1', 'FOXQ1', 'FUBP1', 'GATA3', 'GATA6', 'GMEB2', 'GRHL1', 'GRHL2', 'HBP1', 'HEY1', 'HIC2', 'HIVEP1', 'HLTF', 'HMBOX1', 'HMGA1', 'HOXA1', 'HOXA10', 'HOXA5', 'HOXA7', 'HOXA9', 'HOXB2', 'HOXB3', 'HOXB4', 'HOXB6', 'HOXB7', 'HOXB8', 'IRF1', 'IRF2', 'IRF3', 'IRF6', 'IRF7', 'IRX3', 'JUN', 'JUNB', 'JUND', 'KLF4', 'MAFF', 'MAFG', 'MAFK', 'MECOM', 'MEF2A', 'MEF2D', 'MEIS1', 'MSX1', 'MSX2', 'MXI1', 'MYBL2', 'MYNN', 'NFAT5', 'NFATC1', 'NFE2L1', 'NFE2L2', 'NFIA', 'NFIB', 'NFIC', 'NFIL3', 'NFKB2', 'NKX3-1', 'NR1H3', 'NR1I3', 'NR2F1', 'NR2F6', 'NR3C1', 'NR4A2', 'OVOL1', 'OVOL2', 'PAX8', 'PITX1', 'PLAGL1', 'POU2F1', 'POU2F2', 'PRDM1', 'REL', 'RELA', 'RELB', 'RFX2', 'RREB1', 'RUNX1', 'RUNX2', 'RUNX3', 'SIX1', 'SMAD2', 'SMAD3', 'SMAD4', 'SMARCA1', 'SNAI1', 'SNAI2', 'SOX13', 'SOX15', 'SOX17', 'SOX4', 'SOX7', 'SOX9', 'SPDEF', 'SREBF1', 'STAT1', 'STAT2', 'STAT5A', 'STAT6', 'TBP', 'TBX3', 'TCF7L1', 'TCF7L2', 'TEAD1', 'TEAD3', 'TFCP2', 'TFEB', 'TGIF1', 'THRA', 'TP53', 'TP63', 'TWIST1', 'UBP1', 'ZBTB18', 'ZBTB49', 'ZBTB7B', 'ZFHX3', 'ZFP28', 'ZNF134', 'ZNF232', 'ZNF274', 'ZNF317', 'ZNF335', 'ZNF354A', 'ZNF394', 'ZNF528', 'ZNF563', 'ZNF589', 'ZNF680', 'ZNF768', 'ZNF85', 'ZSCAN16']
num vocab in expression enrichment (pro-pro region): 1813
num vocab in expression enrichment (loop-loop region): 3816
shape (8615, 16)
original num vocab 1813
filt num vocab 128
shape (56566, 16)
original num vocab 3816
filt num vocab 805
post genomic instance filter
num vocab in expression enrichment (pro-pro region): 128
['ARID5B::FOXO1' 'ARID5B::HMBOX1' 'ARID5B::SMAD2' 'ARID5B::SOX9'
'ARID5B::ZNF528' 'CLOCK::NFAT5' 'CREM::SOX13' 'EHF::ETV5' 'EHF::UBP1'
'EHF::ZNF589' 'ELF3::ETV5' 'ELF3::HMGA1' 'ELF3::UBP1' 'ELF3::ZNF589'
'EPAS1::ZNF589' 'ETV4::UBP1' 'ETV4::ZNF589' 'ETV5::ETV6' 'ETV5::KLF4'
'ETV5::MSX2' 'ETV5::NFAT5' 'ETV5::NFE2L1' 'ETV5::NFIA' 'ETV5::NFIC'
'ETV5::NR2F1' 'ETV5::SNAI1' 'ETV5::SNAI2' 'ETV5::SOX13' 'ETV5::TP63'
'ETV6::HMGA1' 'ETV6::MYBL2' 'ETV6::NFE2L1' 'ETV6::UBP1' 'ETV6::ZNF589'
'FLI1::MAFG' 'FLI1::NFIC' 'FLI1::SMAD2' 'FLI1::ZNF335' 'FOS::ZNF589'
'FOSB::ZNF589' 'FOSL1::ZNF589' 'FOSL2::ZNF589' 'FOXA2::NFAT5'
'FOXH1::ZNF589' 'FOXK1::ZNF589' 'FOXO1::SOX13' 'FOXO1::ZNF589'
'GATA3::ZNF589' 'GRHL1::ZNF589' 'HLTF::NFAT5' 'HMBOX1::SOX13'
'HMBOX1::UBP1' 'HMBOX1::ZNF589' 'HMGA1::SNAI1' 'HOXA10::ZNF589'
'HOXB3::ZNF589' 'JUN::ZNF589' 'JUNB::ZNF589' 'JUND::ZNF589' 'KLF4::MYBL2'
'KLF4::STAT1' 'KLF4::ZNF589' 'MAFF::ZNF589' 'MAFG::ZNF589' 'MAFK::ZNF589'
'MSX2::ZNF589' 'MXI1::NFAT5' 'MYBL2::NFIC' 'NFAT5::NFIC' 'NFAT5::NR2F6'
'NFAT5::PITX1' 'NFAT5::REL' 'NFAT5::RELB' 'NFAT5::RUNX1' 'NFAT5::SMAD3'
'NFAT5::SNAI2' 'NFAT5::ZNF274' 'NFAT5::ZNF335' 'NFAT5::ZNF589'
'NFE2L1::ZNF589' 'NFE2L2::ZNF589' 'NFIA::SMAD2' 'NFIA::SREBF1'
'NFIA::ZNF528' 'NFIA::ZNF589' 'NFIC::NR1I3' 'NFIC::SNAI1' 'NFIC::SOX13'
'NFIC::ZNF589' 'NR1H3::ZNF589' 'NR2F1::UBP1' 'NR2F1::ZNF589'
'NR2F6::SOX13' 'NR2F6::UBP1' 'NR2F6::ZNF589' 'REL::SOX13' 'RUNX1::SOX13'
'RUNX2::ZNF589' 'SMAD2::SOX13' 'SMAD2::ZNF589' 'SMAD3::SOX13'
'SMAD3::ZNF589' 'SNAI1::UBP1' 'SNAI1::ZNF589' 'SNAI2::ZNF589'
'SOX13::SOX9' 'SOX13::SREBF1' 'SOX13::ZNF335' 'SOX13::ZNF528'
'SOX13::ZNF589' 'SOX13::ZNF680' 'SOX4::ZNF589' 'SOX9::UBP1'
'SOX9::ZNF589' 'SREBF1::UBP1' 'SREBF1::ZNF589' 'TBX3::ZNF589'
'TEAD1::ZNF589' 'TFCP2::ZNF589' 'TGIF1::ZNF589' 'TP53::ZNF589'
'TP63::ZNF589' 'UBP1::ZNF528' 'ZFP28::ZNF589' 'ZNF335::ZNF589'
'ZNF354A::ZNF589' 'ZNF528::ZNF589' 'ZNF589::ZNF680']
num vocab in expression enrichment (loop-loop region): 805
['ARID5B::FOXO1' 'ARID5B::HIC2' 'ARID5B::HMBOX1' 'ARID5B::MXI1'
'ARID5B::NR2F6' 'ARID5B::RELB' 'ARID5B::SMAD2' 'ARID5B::SOX9'
'ARID5B::SREBF1' 'ARID5B::STAT6' 'ARID5B::TFCP2' 'ARID5B::ZNF528'
'ATF2::ETV4' 'ATF2::FLI1' 'ATF2::NR2F1' 'ATF2::ZNF589' 'ATF4::ZNF589'
'BACH1::ETV5' 'BACH1::ETV6' 'BACH1::FLI1' 'BACH1::NR2F1' 'BACH1::SNAI1'
'BACH1::SOX9' 'BACH1::ZNF589' 'BCL11A::SMAD2' 'BCL11A::SOX9'
'BCL11A::SREBF1' 'BCL6::ETV5' 'BCL6::FLI1' 'BCL6::NFAT5' 'BCL6::NR2F1'
'BCL6::SOX13' 'BCL6::ZNF589' 'BHLHE41::SOX13' 'BHLHE41::UBP1'
'BHLHE41::ZNF589' 'BPTF::NR2F1' 'BRCA1::FOS' 'BRCA1::FOSL2'
'BRCA1::NFAT5' 'BRCA1::NFE2L1' 'BRCA1::NFIC' 'BRCA1::NR1H3'
'BRCA1::SNAI1' 'CBFB::ELF3' 'CBFB::EPAS1' 'CBFB::ETV6' 'CBFB::FOXA2'
'CBFB::KLF4' 'CBFB::NFIC' 'CBFB::NR2F1' 'CBFB::SMAD2' 'CBFB::SMAD3'
'CBFB::SNAI1' 'CBFB::SNAI2' 'CBFB::ZNF335' 'CEBPB::ZNF589' 'CEBPD::FLI1'
'CEBPD::ZNF589' 'CEBPG::FLI1' 'CEBPG::ZNF589' 'CLOCK::EPAS1'
'CLOCK::NFAT5' 'CLOCK::NFE2L1' 'CLOCK::SOX13' 'CLOCK::ZNF335'
'CREB3::EPAS1' 'CREB3::NFIC' 'CREB3::ZNF589' 'CREM::FLI1' 'CREM::SOX13'
'CREM::ZNF589' 'DDIT3::FLI1' 'DDIT3::NR2F1' 'DDIT3::ZNF589' 'EHF::ETV5'
'EHF::FOS' 'EHF::FOSL1' 'EHF::FOSL2' 'EHF::FOXJ3' 'EHF::HBP1'
'EHF::HMGA1' 'EHF::IRF3' 'EHF::JUN' 'EHF::JUND' 'EHF::MAFF' 'EHF::MEIS1'
'EHF::MYBL2' 'EHF::NFE2L1' 'EHF::NFIC' 'EHF::NR1H3' 'EHF::NR1I3'
'EHF::NR3C1' 'EHF::TP63' 'EHF::UBP1' 'EHF::ZBTB49' 'EHF::ZNF335'
'EHF::ZNF394' 'EHF::ZNF589' 'ELF3::ETV5' 'ELF3::FOS' 'ELF3::FOSB'
'ELF3::FOSL1' 'ELF3::FOSL2' 'ELF3::FOXJ3' 'ELF3::HBP1' 'ELF3::HMGA1'
'ELF3::IRF3' 'ELF3::JUN' 'ELF3::JUNB' 'ELF3::JUND' 'ELF3::MAFF'
'ELF3::MEIS1' 'ELF3::MYBL2' 'ELF3::NFE2L1' 'ELF3::NFIC' 'ELF3::NR1I3'
'ELF3::NR3C1' 'ELF3::SNAI2' 'ELF3::TP63' 'ELF3::UBP1' 'ELF3::ZBTB49'
'ELF3::ZNF335' 'ELF3::ZNF394' 'ELF3::ZNF589' 'EPAS1::ETV5' 'EPAS1::FOXJ2'
'EPAS1::FOXJ3' 'EPAS1::HIC2' 'EPAS1::IRF1' 'EPAS1::IRF3' 'EPAS1::MYBL2'
'EPAS1::NR1H3' 'EPAS1::NR1I3' 'EPAS1::NR4A2' 'EPAS1::REL' 'EPAS1::RELA'
'EPAS1::SMAD2' 'EPAS1::SOX4' 'EPAS1::SREBF1' 'EPAS1::STAT1'
'EPAS1::STAT2' 'EPAS1::TFCP2' 'EPAS1::TWIST1' 'EPAS1::ZNF274'
'EPAS1::ZNF394' 'EPAS1::ZNF528' 'EPAS1::ZNF589' 'EPAS1::ZNF680'
'ETV4::ETV5' 'ETV4::FOS' 'ETV4::FOSB' 'ETV4::FOSL2' 'ETV4::FOXJ3'
'ETV4::HBP1' 'ETV4::HMGA1' 'ETV4::JUN' 'ETV4::JUNB' 'ETV4::JUND'
'ETV4::MAFG' 'ETV4::MEIS1' 'ETV4::MYBL2' 'ETV4::NFE2L1' 'ETV4::NFIC'
'ETV4::NR3C1' 'ETV4::SMAD3' 'ETV4::TGIF1' 'ETV4::TP63' 'ETV4::UBP1'
'ETV4::ZBTB49' 'ETV4::ZNF589' 'ETV5::ETV6' 'ETV5::FOS' 'ETV5::FOSB'
'ETV5::FOSL1' 'ETV5::FOSL2' 'ETV5::FOXH1' 'ETV5::FOXK1' 'ETV5::FOXL1'
'ETV5::FOXO1' 'ETV5::GATA3' 'ETV5::HMBOX1' 'ETV5::HOXA5' 'ETV5::HOXB3'
'ETV5::JUN' 'ETV5::JUNB' 'ETV5::JUND' 'ETV5::KLF4' 'ETV5::MAFF'
'ETV5::MAFG' 'ETV5::MAFK' 'ETV5::MEIS1' 'ETV5::MSX2' 'ETV5::MXI1'
'ETV5::NFAT5' 'ETV5::NFE2L1' 'ETV5::NFE2L2' 'ETV5::NFIA' 'ETV5::NFIB'
'ETV5::NFIC' 'ETV5::NR1H3' 'ETV5::NR2F1' 'ETV5::NR2F6' 'ETV5::RFX2'
'ETV5::RUNX2' 'ETV5::SMAD2' 'ETV5::SMAD3' 'ETV5::SMARCA1' 'ETV5::SNAI1'
'ETV5::SNAI2' 'ETV5::SOX13' 'ETV5::SOX9' 'ETV5::SREBF1' 'ETV5::TBX3'
'ETV5::TCF7L1' 'ETV5::TEAD1' 'ETV5::TFCP2' 'ETV5::TGIF1' 'ETV5::THRA'
'ETV5::TP53' 'ETV5::TP63' 'ETV5::TWIST1' 'ETV5::ZFP28' 'ETV5::ZNF335'
'ETV5::ZNF354A' 'ETV5::ZNF528' 'ETV5::ZNF768' 'ETV6::FOS' 'ETV6::FOSB'
'ETV6::FOSL1' 'ETV6::FOSL2' 'ETV6::FOXJ3' 'ETV6::HBP1' 'ETV6::HLTF'
'ETV6::HMGA1' 'ETV6::IRF1' 'ETV6::IRF3' 'ETV6::JUN' 'ETV6::JUNB'
'ETV6::JUND' 'ETV6::MAFF' 'ETV6::MAFG' 'ETV6::MAFK' 'ETV6::MEIS1'
'ETV6::MYBL2' 'ETV6::NFATC1' 'ETV6::NFE2L1' 'ETV6::NFE2L2' 'ETV6::NFIC'
'ETV6::NFKB2' 'ETV6::NR1H3' 'ETV6::NR1I3' 'ETV6::NR3C1' 'ETV6::PRDM1'
'ETV6::REL' 'ETV6::RREB1' 'ETV6::RUNX1' 'ETV6::SMAD2' 'ETV6::SMAD3'
'ETV6::SNAI2' 'ETV6::SREBF1' 'ETV6::STAT1' 'ETV6::STAT2' 'ETV6::TFCP2'
'ETV6::TGIF1' 'ETV6::TP53' 'ETV6::TP63' 'ETV6::UBP1' 'ETV6::ZBTB49'
'ETV6::ZNF335' 'ETV6::ZNF394' 'ETV6::ZNF528' 'ETV6::ZNF589' 'FLI1::FOXC1'
'FLI1::FOXH1' 'FLI1::FOXO1' 'FLI1::FOXO3' 'FLI1::FOXQ1' 'FLI1::GMEB2'
'FLI1::HIC2' 'FLI1::HLTF' 'FLI1::HMBOX1' 'FLI1::HOXA5' 'FLI1::HOXB7'
'FLI1::IRF6' 'FLI1::MAFG' 'FLI1::NFIB' 'FLI1::NFIC' 'FLI1::NKX3-1'
'FLI1::NR2F6' 'FLI1::PITX1' 'FLI1::RELB' 'FLI1::SMAD2' 'FLI1::SMAD3'
'FLI1::SMARCA1' 'FLI1::SOX4' 'FLI1::SREBF1' 'FLI1::STAT6' 'FLI1::TCF7L1'
'FLI1::TCF7L2' 'FLI1::TEAD3' 'FLI1::TFCP2' 'FLI1::TGIF1' 'FLI1::THRA'
'FLI1::ZFHX3' 'FLI1::ZNF274' 'FLI1::ZNF335' 'FLI1::ZNF354A'
'FLI1::ZNF528' 'FLI1::ZNF680' 'FOS::NR2F1' 'FOS::REL' 'FOS::RELB'
'FOS::RUNX1' 'FOS::SMAD2' 'FOS::SNAI1' 'FOS::SOX9' 'FOS::SREBF1'
'FOS::STAT5A' 'FOS::ZNF528' 'FOS::ZNF589' 'FOS::ZNF680' 'FOSB::NR2F1'
'FOSB::REL' 'FOSB::SMAD2' 'FOSB::SNAI1' 'FOSB::SOX9' 'FOSB::ZNF589'
'FOSL1::NR2F1' 'FOSL1::SNAI1' 'FOSL1::SOX9' 'FOSL1::SREBF1'
'FOSL1::ZNF589' 'FOSL2::NR2F1' 'FOSL2::REL' 'FOSL2::RUNX1' 'FOSL2::SMAD2'
'FOSL2::SNAI1' 'FOSL2::SOX9' 'FOSL2::SREBF1' 'FOSL2::ZNF528'
'FOSL2::ZNF589' 'FOSL2::ZNF680' 'FOXA2::NFAT5' 'FOXA2::ZNF589'
'FOXC1::NFIC' 'FOXC1::ZNF589' 'FOXD1::NFAT5' 'FOXH1::SNAI1'
'FOXH1::ZNF589' 'FOXJ2::NFIC' 'FOXJ2::NR2F1' 'FOXJ3::KLF4' 'FOXJ3::NFIC'
'FOXJ3::NR2F1' 'FOXJ3::SMAD3' 'FOXJ3::SNAI1' 'FOXK1::KLF4' 'FOXK1::NFIC'
'FOXK1::ZNF589' 'FOXL1::NFIC' 'FOXO1::HMGA1' 'FOXO1::MSX2' 'FOXO1::MYBL2'
'FOXO1::NFAT5' 'FOXO1::NFE2L1' 'FOXO1::NFIA' 'FOXO1::NFIC' 'FOXO1::SOX13'
'FOXO1::ZBTB49' 'FOXO1::ZNF589' 'FOXO3::NFIC' 'FOXQ1::NFIC'
'FOXQ1::ZNF589' 'FUBP1::NFIC' 'GATA3::ZNF589' 'GMEB2::NFIC'
'GMEB2::NR2F1' 'GMEB2::ZNF589' 'GRHL1::ZNF589' 'HBP1::HMBOX1'
'HBP1::NKX3-1' 'HBP1::NR2F1' 'HBP1::SMAD2' 'HBP1::SNAI1' 'HBP1::SOX9'
'HBP1::SREBF1' 'HBP1::STAT6' 'HBP1::TWIST1' 'HBP1::ZNF85' 'HIC2::HMGA1'
'HIC2::NFAT5' 'HIC2::NFE2L1' 'HIC2::SOX13' 'HIC2::UBP1' 'HIC2::ZNF563'
'HIC2::ZNF589' 'HIVEP1::NFIC' 'HIVEP1::SNAI1' 'HIVEP1::ZNF335'
'HLTF::IRF3' 'HLTF::MYBL2' 'HLTF::NFAT5' 'HLTF::NFIC' 'HLTF::SNAI1'
'HLTF::ZNF589' 'HMBOX1::HMGA1' 'HMBOX1::MSX2' 'HMBOX1::NFAT5'
'HMBOX1::NFE2L1' 'HMBOX1::NFIA' 'HMBOX1::SOX13' 'HMBOX1::UBP1'
'HMBOX1::ZBTB49' 'HMBOX1::ZNF589' 'HMGA1::MXI1' 'HMGA1::NKX3-1'
'HMGA1::NR2F1' 'HMGA1::NR2F6' 'HMGA1::RELB' 'HMGA1::SMAD2' 'HMGA1::SNAI1'
'HMGA1::SOX9' 'HMGA1::SREBF1' 'HMGA1::TFCP2' 'HMGA1::ZNF274'
'HMGA1::ZNF528' 'HMGA1::ZNF680' 'HOXA10::ZNF589' 'HOXA5::SNAI1'
'HOXA5::ZNF589' 'HOXA9::ZNF589' 'HOXB3::NFAT5' 'HOXB3::NFIA'
'HOXB3::UBP1' 'HOXB3::ZNF589' 'IRF1::NFIC' 'IRF1::NR2F1' 'IRF1::SNAI1'
'IRF1::ZNF335' 'IRF2::NFIC' 'IRF2::NR2F1' 'IRF3::KLF4' 'IRF3::NFIC'
'IRF3::NR2F1' 'IRF3::NR2F6' 'IRF3::RFX2' 'IRF3::SMAD2' 'IRF3::SMAD3'
'IRF3::SNAI1' 'IRF3::SPDEF' 'IRF3::SREBF1' 'IRF3::TFCP2' 'IRF3::THRA'
'IRF3::ZNF335' 'IRF3::ZNF528' 'IRX3::ZNF589' 'JUN::NR2F1' 'JUN::REL'
'JUN::SNAI1' 'JUN::SOX9' 'JUN::SREBF1' 'JUN::STAT5A' 'JUN::ZNF589'
'JUN::ZNF680' 'JUNB::NR2F1' 'JUNB::REL' 'JUNB::SMAD2' 'JUNB::SNAI1'
'JUNB::SOX9' 'JUNB::ZNF589' 'JUND::NR2F1' 'JUND::REL' 'JUND::SNAI1'
'JUND::SOX9' 'JUND::SREBF1' 'JUND::ZNF589' 'KLF4::MYBL2' 'KLF4::NR1H3'
'KLF4::NR1I3' 'KLF4::NR2F6' 'KLF4::REL' 'KLF4::RELA' 'KLF4::RELB'
'KLF4::SMAD2' 'KLF4::SOX4' 'KLF4::SREBF1' 'KLF4::STAT1' 'KLF4::STAT2'
'KLF4::TCF7L1' 'KLF4::TFCP2' 'KLF4::ZNF354A' 'KLF4::ZNF394'
'KLF4::ZNF528' 'KLF4::ZNF589' 'MAFF::NR2F1' 'MAFF::SNAI1' 'MAFF::SOX9'
'MAFF::ZNF589' 'MAFG::NR2F1' 'MAFG::SNAI1' 'MAFG::ZNF589' 'MAFK::SOX9'
'MAFK::ZNF589' 'MEF2D::NFIC' 'MEF2D::NR2F1' 'MEIS1::NFIC' 'MEIS1::REL'
'MEIS1::SMAD2' 'MEIS1::SNAI1' 'MEIS1::SREBF1' 'MEIS1::THRA' 'MSX2::NR2F6'
'MSX2::REL' 'MSX2::RUNX1' 'MSX2::SMAD2' 'MSX2::SOX9' 'MSX2::SREBF1'
'MSX2::ZNF528' 'MSX2::ZNF589' 'MXI1::MYBL2' 'MXI1::NFAT5' 'MXI1::NFE2L1'
'MXI1::NFIC' 'MXI1::NR2F1' 'MXI1::SOX13' 'MXI1::UBP1' 'MXI1::ZNF335'
'MXI1::ZNF563' 'MXI1::ZNF589' 'MYBL2::NFIC' 'MYBL2::NKX3-1'
'MYBL2::NR2F1' 'MYBL2::NR2F6' 'MYBL2::RFX2' 'MYBL2::SMAD2' 'MYBL2::SMAD3'
'MYBL2::SNAI1' 'MYBL2::SNAI2' 'MYBL2::SOX9' 'MYBL2::SREBF1'
'MYBL2::TFCP2' 'MYBL2::THRA' 'MYBL2::ZNF335' 'MYBL2::ZNF528'
'MYBL2::ZNF680' 'MYNN::NFAT5' 'MYNN::NFE2L1' 'NFAT5::NFIB' 'NFAT5::NFIC'
'NFAT5::NFKB2' 'NFAT5::NKX3-1' 'NFAT5::NR2F6' 'NFAT5::OVOL2'
'NFAT5::PITX1' 'NFAT5::REL' 'NFAT5::RELB' 'NFAT5::RUNX1' 'NFAT5::SMAD2'
'NFAT5::SMAD3' 'NFAT5::SNAI2' 'NFAT5::SOX9' 'NFAT5::SPDEF'
'NFAT5::SREBF1' 'NFAT5::STAT6' 'NFAT5::TFCP2' 'NFAT5::THRA'
'NFAT5::TWIST1' 'NFAT5::ZBTB18' 'NFAT5::ZNF274' 'NFAT5::ZNF335'
'NFAT5::ZNF528' 'NFAT5::ZNF589' 'NFAT5::ZNF680' 'NFATC1::NR2F1'
'NFE2L1::NKX3-1' 'NFE2L1::NR2F1' 'NFE2L1::NR2F6' 'NFE2L1::REL'
'NFE2L1::RELB' 'NFE2L1::RUNX1' 'NFE2L1::SMAD2' 'NFE2L1::SNAI1'
'NFE2L1::SOX9' 'NFE2L1::SREBF1' 'NFE2L1::STAT5A' 'NFE2L1::STAT6'
'NFE2L1::TFCP2' 'NFE2L1::ZNF274' 'NFE2L1::ZNF317' 'NFE2L1::ZNF528'
'NFE2L1::ZNF589' 'NFE2L1::ZNF680' 'NFE2L2::NR2F1' 'NFE2L2::SNAI1'
'NFE2L2::ZNF589' 'NFIA::NR2F6' 'NFIA::REL' 'NFIA::RELB' 'NFIA::RUNX1'
'NFIA::SMAD2' 'NFIA::SREBF1' 'NFIA::STAT6' 'NFIA::TFCP2' 'NFIA::ZNF528'
'NFIA::ZNF589' 'NFIA::ZNF680' 'NFIB::NR2F1' 'NFIB::SNAI1' 'NFIB::SOX13'
'NFIB::ZNF589' 'NFIC::NFKB2' 'NFIC::NR1H3' 'NFIC::NR1I3' 'NFIC::NR2F1'
'NFIC::NR2F6' 'NFIC::NR4A2' 'NFIC::REL' 'NFIC::RELA' 'NFIC::RUNX1'
'NFIC::RUNX2' 'NFIC::RUNX3' 'NFIC::SMAD2' 'NFIC::SNAI1' 'NFIC::SOX13'
'NFIC::SOX4' 'NFIC::SOX9' 'NFIC::SPDEF' 'NFIC::SREBF1' 'NFIC::STAT1'
'NFIC::STAT2' 'NFIC::STAT5A' 'NFIC::TFCP2' 'NFIC::TWIST1' 'NFIC::ZBTB7B'
'NFIC::ZFP28' 'NFIC::ZNF354A' 'NFIC::ZNF394' 'NFIC::ZNF528'
'NFIC::ZNF589' 'NFIC::ZNF680' 'NFKB2::NR2F1' 'NFKB2::SNAI1'
'NKX3-1::SOX13' 'NKX3-1::UBP1' 'NKX3-1::ZNF563' 'NKX3-1::ZNF589'
'NR1H3::RFX2' 'NR1H3::SMAD2' 'NR1H3::SMAD3' 'NR1H3::SNAI2' 'NR1H3::SOX9'
'NR1H3::SREBF1' 'NR1H3::TFCP2' 'NR1H3::TWIST1' 'NR1H3::ZNF335'
'NR1H3::ZNF528' 'NR1H3::ZNF589' 'NR1I3::NR2F1' 'NR1I3::SMAD3'
'NR1I3::SNAI1' 'NR1I3::ZNF335' 'NR1I3::ZNF85' 'NR2F1::NR2F6'
'NR2F1::NR3C1' 'NR2F1::REL' 'NR2F1::SMAD2' 'NR2F1::SMAD3' 'NR2F1::SOX4'
'NR2F1::SOX9' 'NR2F1::SREBF1' 'NR2F1::STAT1' 'NR2F1::STAT2'
'NR2F1::TFCP2' 'NR2F1::TGIF1' 'NR2F1::THRA' 'NR2F1::TP53' 'NR2F1::UBP1'
'NR2F1::ZBTB49' 'NR2F1::ZNF335' 'NR2F1::ZNF394' 'NR2F1::ZNF528'
'NR2F1::ZNF589' 'NR2F6::SNAI1' 'NR2F6::SOX13' 'NR2F6::UBP1'
'NR2F6::ZBTB49' 'NR2F6::ZNF589' 'NR3C1::SNAI1' 'NR3C1::SOX9'
'NR3C1::ZNF528' 'OVOL2::ZNF589' 'PLAGL1::SNAI1' 'POU2F1::SNAI1'
'PRDM1::SOX9' 'REL::SMAD3' 'REL::SNAI1' 'REL::SOX13' 'REL::TP63'
'RELB::SOX13' 'RELB::UBP1' 'RELB::ZNF563' 'RELB::ZNF589' 'RFX2::SREBF1'
'RFX2::STAT1' 'RREB1::SMAD2' 'RREB1::SOX9' 'RREB1::SREBF1' 'RREB1::TFCP2'
'RREB1::ZNF680' 'RUNX1::SNAI1' 'RUNX1::SOX13' 'RUNX2::ZNF589'
'SMAD2::SMAD3' 'SMAD2::SNAI1' 'SMAD2::SOX13' 'SMAD2::UBP1'
'SMAD2::ZBTB49' 'SMAD2::ZNF335' 'SMAD2::ZNF563' 'SMAD2::ZNF589'
'SMAD3::SNAI1' 'SMAD3::SOX13' 'SMAD3::STAT1' 'SMAD3::ZNF394'
'SMAD3::ZNF589' 'SMARCA1::ZNF589' 'SNAI1::SOX4' 'SNAI1::SREBF1'
'SNAI1::STAT1' 'SNAI1::STAT2' 'SNAI1::TBX3' 'SNAI1::TFCP2' 'SNAI1::TGIF1'
'SNAI1::TP53' 'SNAI1::TP63' 'SNAI1::UBP1' 'SNAI1::ZBTB49' 'SNAI1::ZNF335'
'SNAI1::ZNF394' 'SNAI1::ZNF528' 'SNAI1::ZNF589' 'SNAI1::ZNF680'
'SNAI2::ZNF589' 'SOX13::SOX9' 'SOX13::SREBF1' 'SOX13::STAT6'
'SOX13::TFCP2' 'SOX13::THRA' 'SOX13::ZNF274' 'SOX13::ZNF335'
'SOX13::ZNF528' 'SOX13::ZNF589' 'SOX13::ZNF680' 'SOX4::ZNF335'
'SOX4::ZNF589' 'SOX9::STAT1' 'SOX9::TP53' 'SOX9::TP63' 'SOX9::UBP1'
'SOX9::ZBTB49' 'SOX9::ZNF335' 'SOX9::ZNF394' 'SOX9::ZNF589' 'SPDEF::UBP1'
'SPDEF::ZNF589' 'SREBF1::UBP1' 'SREBF1::ZBTB49' 'SREBF1::ZNF335'
'SREBF1::ZNF563' 'SREBF1::ZNF589' 'STAT1::ZNF335' 'STAT2::ZNF335'
'STAT6::UBP1' 'STAT6::ZNF589' 'TBX3::ZNF589' 'TCF7L1::ZNF589'
'TEAD1::ZNF589' 'TEAD3::ZNF589' 'TFCP2::UBP1' 'TFCP2::ZNF335'
'TFCP2::ZNF563' 'TFCP2::ZNF589' 'TGIF1::ZNF589' 'THRA::ZNF589'
'TP53::ZNF589' 'TP63::ZNF589' 'UBP1::ZNF528' 'UBP1::ZNF680'
'ZBTB49::ZNF528' 'ZBTB49::ZNF680' 'ZFP28::ZNF335' 'ZFP28::ZNF589'
'ZNF134::ZNF589' 'ZNF274::ZNF589' 'ZNF335::ZNF394' 'ZNF335::ZNF528'
'ZNF335::ZNF589' 'ZNF335::ZNF680' 'ZNF354A::ZNF589' 'ZNF528::ZNF563'
'ZNF528::ZNF589' 'ZNF589::ZNF680' 'ZNF589::ZNF768' 'ZNF589::ZNF85']
none 128
one 1
Name: check_tissuetf, dtype: int64
none 798
one 12
both 7
Name: check_tissuetf, dtype: int64
pro-pro region
total vocab: 128 tissue annon vocab: 128
['ARID5B::FOXO1' 'ARID5B::HMBOX1' 'ARID5B::SMAD2' 'ARID5B::SOX9'
'ARID5B::ZNF528' 'CLOCK::NFAT5' 'CREM::SOX13' 'EHF::ETV5' 'EHF::UBP1'
'EHF::ZNF589' 'ELF3::ETV5' 'ELF3::HMGA1' 'ELF3::UBP1' 'ELF3::ZNF589'
'EPAS1::ZNF589' 'ETV4::UBP1' 'ETV4::ZNF589' 'ETV5::ETV6' 'ETV5::KLF4'
'ETV5::MSX2' 'ETV5::NFAT5' 'ETV5::NFE2L1' 'ETV5::NFIA' 'ETV5::NFIC'
'ETV5::NR2F1' 'ETV5::SNAI1' 'ETV5::SNAI2' 'ETV5::SOX13' 'ETV5::TP63'
'ETV6::HMGA1' 'ETV6::MYBL2' 'ETV6::NFE2L1' 'ETV6::UBP1' 'ETV6::ZNF589'
'FLI1::MAFG' 'FLI1::NFIC' 'FLI1::SMAD2' 'FLI1::ZNF335' 'FOS::ZNF589'
'FOSB::ZNF589' 'FOSL1::ZNF589' 'FOSL2::ZNF589' 'FOXA2::NFAT5'
'FOXH1::ZNF589' 'FOXK1::ZNF589' 'FOXO1::SOX13' 'FOXO1::ZNF589'
'GATA3::ZNF589' 'GRHL1::ZNF589' 'HLTF::NFAT5' 'HMBOX1::SOX13'
'HMBOX1::UBP1' 'HMBOX1::ZNF589' 'HMGA1::SNAI1' 'HOXA10::ZNF589'
'HOXB3::ZNF589' 'JUN::ZNF589' 'JUNB::ZNF589' 'JUND::ZNF589' 'KLF4::MYBL2'
'KLF4::STAT1' 'KLF4::ZNF589' 'MAFF::ZNF589' 'MAFG::ZNF589' 'MAFK::ZNF589'
'MSX2::ZNF589' 'MXI1::NFAT5' 'MYBL2::NFIC' 'NFAT5::NFIC' 'NFAT5::NR2F6'
'NFAT5::PITX1' 'NFAT5::REL' 'NFAT5::RELB' 'NFAT5::RUNX1' 'NFAT5::SMAD3'
'NFAT5::SNAI2' 'NFAT5::ZNF274' 'NFAT5::ZNF335' 'NFAT5::ZNF589'
'NFE2L1::ZNF589' 'NFE2L2::ZNF589' 'NFIA::SMAD2' 'NFIA::SREBF1'
'NFIA::ZNF528' 'NFIA::ZNF589' 'NFIC::NR1I3' 'NFIC::SNAI1' 'NFIC::SOX13'
'NFIC::ZNF589' 'NR1H3::ZNF589' 'NR2F1::UBP1' 'NR2F1::ZNF589'
'NR2F6::SOX13' 'NR2F6::UBP1' 'NR2F6::ZNF589' 'REL::SOX13' 'RUNX1::SOX13'
'RUNX2::ZNF589' 'SMAD2::SOX13' 'SMAD2::ZNF589' 'SMAD3::SOX13'
'SMAD3::ZNF589' 'SNAI1::UBP1' 'SNAI1::ZNF589' 'SNAI2::ZNF589'
'SOX13::SOX9' 'SOX13::SREBF1' 'SOX13::ZNF335' 'SOX13::ZNF528'
'SOX13::ZNF589' 'SOX13::ZNF680' 'SOX4::ZNF589' 'SOX9::UBP1'
'SOX9::ZNF589' 'SREBF1::UBP1' 'SREBF1::ZNF589' 'TBX3::ZNF589'
'TEAD1::ZNF589' 'TFCP2::ZNF589' 'TGIF1::ZNF589' 'TP53::ZNF589'
'TP63::ZNF589' 'UBP1::ZNF528' 'ZFP28::ZNF589' 'ZNF335::ZNF589'
'ZNF354A::ZNF589' 'ZNF528::ZNF589' 'ZNF589::ZNF680']
loop-loop region
total vocab: 805 tissue annon vocab: 805
['ARID5B::FOXO1' 'ARID5B::HIC2' 'ARID5B::HMBOX1' 'ARID5B::MXI1'
'ARID5B::NR2F6' 'ARID5B::RELB' 'ARID5B::SMAD2' 'ARID5B::SOX9'
'ARID5B::SREBF1' 'ARID5B::STAT6' 'ARID5B::TFCP2' 'ARID5B::ZNF528'
'ATF2::ETV4' 'ATF2::FLI1' 'ATF2::NR2F1' 'ATF2::ZNF589' 'ATF4::ZNF589'
'BACH1::ETV5' 'BACH1::ETV6' 'BACH1::FLI1' 'BACH1::NR2F1' 'BACH1::SNAI1'
'BACH1::SOX9' 'BACH1::ZNF589' 'BCL11A::SMAD2' 'BCL11A::SOX9'
'BCL11A::SREBF1' 'BCL6::ETV5' 'BCL6::FLI1' 'BCL6::NFAT5' 'BCL6::NR2F1'
'BCL6::SOX13' 'BCL6::ZNF589' 'BHLHE41::SOX13' 'BHLHE41::UBP1'
'BHLHE41::ZNF589' 'BPTF::NR2F1' 'BRCA1::FOS' 'BRCA1::FOSL2'
'BRCA1::NFAT5' 'BRCA1::NFE2L1' 'BRCA1::NFIC' 'BRCA1::NR1H3'
'BRCA1::SNAI1' 'CBFB::ELF3' 'CBFB::EPAS1' 'CBFB::ETV6' 'CBFB::FOXA2'
'CBFB::KLF4' 'CBFB::NFIC' 'CBFB::NR2F1' 'CBFB::SMAD2' 'CBFB::SMAD3'
'CBFB::SNAI1' 'CBFB::SNAI2' 'CBFB::ZNF335' 'CEBPB::ZNF589' 'CEBPD::FLI1'
'CEBPD::ZNF589' 'CEBPG::FLI1' 'CEBPG::ZNF589' 'CLOCK::EPAS1'
'CLOCK::NFAT5' 'CLOCK::NFE2L1' 'CLOCK::SOX13' 'CLOCK::ZNF335'
'CREB3::EPAS1' 'CREB3::NFIC' 'CREB3::ZNF589' 'CREM::FLI1' 'CREM::SOX13'
'CREM::ZNF589' 'DDIT3::FLI1' 'DDIT3::NR2F1' 'DDIT3::ZNF589' 'EHF::ETV5'
'EHF::FOS' 'EHF::FOSL1' 'EHF::FOSL2' 'EHF::FOXJ3' 'EHF::HBP1'
'EHF::HMGA1' 'EHF::IRF3' 'EHF::JUN' 'EHF::JUND' 'EHF::MAFF' 'EHF::MEIS1'
'EHF::MYBL2' 'EHF::NFE2L1' 'EHF::NFIC' 'EHF::NR1H3' 'EHF::NR1I3'
'EHF::NR3C1' 'EHF::TP63' 'EHF::UBP1' 'EHF::ZBTB49' 'EHF::ZNF335'
'EHF::ZNF394' 'EHF::ZNF589' 'ELF3::ETV5' 'ELF3::FOS' 'ELF3::FOSB'
'ELF3::FOSL1' 'ELF3::FOSL2' 'ELF3::FOXJ3' 'ELF3::HBP1' 'ELF3::HMGA1'
'ELF3::IRF3' 'ELF3::JUN' 'ELF3::JUNB' 'ELF3::JUND' 'ELF3::MAFF'
'ELF3::MEIS1' 'ELF3::MYBL2' 'ELF3::NFE2L1' 'ELF3::NFIC' 'ELF3::NR1I3'
'ELF3::NR3C1' 'ELF3::SNAI2' 'ELF3::TP63' 'ELF3::UBP1' 'ELF3::ZBTB49'
'ELF3::ZNF335' 'ELF3::ZNF394' 'ELF3::ZNF589' 'EPAS1::ETV5' 'EPAS1::FOXJ2'
'EPAS1::FOXJ3' 'EPAS1::HIC2' 'EPAS1::IRF1' 'EPAS1::IRF3' 'EPAS1::MYBL2'
'EPAS1::NR1H3' 'EPAS1::NR1I3' 'EPAS1::NR4A2' 'EPAS1::REL' 'EPAS1::RELA'
'EPAS1::SMAD2' 'EPAS1::SOX4' 'EPAS1::SREBF1' 'EPAS1::STAT1'
'EPAS1::STAT2' 'EPAS1::TFCP2' 'EPAS1::TWIST1' 'EPAS1::ZNF274'
'EPAS1::ZNF394' 'EPAS1::ZNF528' 'EPAS1::ZNF589' 'EPAS1::ZNF680'
'ETV4::ETV5' 'ETV4::FOS' 'ETV4::FOSB' 'ETV4::FOSL2' 'ETV4::FOXJ3'
'ETV4::HBP1' 'ETV4::HMGA1' 'ETV4::JUN' 'ETV4::JUNB' 'ETV4::JUND'
'ETV4::MAFG' 'ETV4::MEIS1' 'ETV4::MYBL2' 'ETV4::NFE2L1' 'ETV4::NFIC'
'ETV4::NR3C1' 'ETV4::SMAD3' 'ETV4::TGIF1' 'ETV4::TP63' 'ETV4::UBP1'
'ETV4::ZBTB49' 'ETV4::ZNF589' 'ETV5::ETV6' 'ETV5::FOS' 'ETV5::FOSB'
'ETV5::FOSL1' 'ETV5::FOSL2' 'ETV5::FOXH1' 'ETV5::FOXK1' 'ETV5::FOXL1'
'ETV5::FOXO1' 'ETV5::GATA3' 'ETV5::HMBOX1' 'ETV5::HOXA5' 'ETV5::HOXB3'
'ETV5::JUN' 'ETV5::JUNB' 'ETV5::JUND' 'ETV5::KLF4' 'ETV5::MAFF'
'ETV5::MAFG' 'ETV5::MAFK' 'ETV5::MEIS1' 'ETV5::MSX2' 'ETV5::MXI1'
'ETV5::NFAT5' 'ETV5::NFE2L1' 'ETV5::NFE2L2' 'ETV5::NFIA' 'ETV5::NFIB'
'ETV5::NFIC' 'ETV5::NR1H3' 'ETV5::NR2F1' 'ETV5::NR2F6' 'ETV5::RFX2'
'ETV5::RUNX2' 'ETV5::SMAD2' 'ETV5::SMAD3' 'ETV5::SMARCA1' 'ETV5::SNAI1'
'ETV5::SNAI2' 'ETV5::SOX13' 'ETV5::SOX9' 'ETV5::SREBF1' 'ETV5::TBX3'
'ETV5::TCF7L1' 'ETV5::TEAD1' 'ETV5::TFCP2' 'ETV5::TGIF1' 'ETV5::THRA'
'ETV5::TP53' 'ETV5::TP63' 'ETV5::TWIST1' 'ETV5::ZFP28' 'ETV5::ZNF335'
'ETV5::ZNF354A' 'ETV5::ZNF528' 'ETV5::ZNF768' 'ETV6::FOS' 'ETV6::FOSB'
'ETV6::FOSL1' 'ETV6::FOSL2' 'ETV6::FOXJ3' 'ETV6::HBP1' 'ETV6::HLTF'
'ETV6::HMGA1' 'ETV6::IRF1' 'ETV6::IRF3' 'ETV6::JUN' 'ETV6::JUNB'
'ETV6::JUND' 'ETV6::MAFF' 'ETV6::MAFG' 'ETV6::MAFK' 'ETV6::MEIS1'
'ETV6::MYBL2' 'ETV6::NFATC1' 'ETV6::NFE2L1' 'ETV6::NFE2L2' 'ETV6::NFIC'
'ETV6::NFKB2' 'ETV6::NR1H3' 'ETV6::NR1I3' 'ETV6::NR3C1' 'ETV6::PRDM1'
'ETV6::REL' 'ETV6::RREB1' 'ETV6::RUNX1' 'ETV6::SMAD2' 'ETV6::SMAD3'
'ETV6::SNAI2' 'ETV6::SREBF1' 'ETV6::STAT1' 'ETV6::STAT2' 'ETV6::TFCP2'
'ETV6::TGIF1' 'ETV6::TP53' 'ETV6::TP63' 'ETV6::UBP1' 'ETV6::ZBTB49'
'ETV6::ZNF335' 'ETV6::ZNF394' 'ETV6::ZNF528' 'ETV6::ZNF589' 'FLI1::FOXC1'
'FLI1::FOXH1' 'FLI1::FOXO1' 'FLI1::FOXO3' 'FLI1::FOXQ1' 'FLI1::GMEB2'
'FLI1::HIC2' 'FLI1::HLTF' 'FLI1::HMBOX1' 'FLI1::HOXA5' 'FLI1::HOXB7'
'FLI1::IRF6' 'FLI1::MAFG' 'FLI1::NFIB' 'FLI1::NFIC' 'FLI1::NKX3-1'
'FLI1::NR2F6' 'FLI1::PITX1' 'FLI1::RELB' 'FLI1::SMAD2' 'FLI1::SMAD3'
'FLI1::SMARCA1' 'FLI1::SOX4' 'FLI1::SREBF1' 'FLI1::STAT6' 'FLI1::TCF7L1'
'FLI1::TCF7L2' 'FLI1::TEAD3' 'FLI1::TFCP2' 'FLI1::TGIF1' 'FLI1::THRA'
'FLI1::ZFHX3' 'FLI1::ZNF274' 'FLI1::ZNF335' 'FLI1::ZNF354A'
'FLI1::ZNF528' 'FLI1::ZNF680' 'FOS::NR2F1' 'FOS::REL' 'FOS::RELB'
'FOS::RUNX1' 'FOS::SMAD2' 'FOS::SNAI1' 'FOS::SOX9' 'FOS::SREBF1'
'FOS::STAT5A' 'FOS::ZNF528' 'FOS::ZNF589' 'FOS::ZNF680' 'FOSB::NR2F1'
'FOSB::REL' 'FOSB::SMAD2' 'FOSB::SNAI1' 'FOSB::SOX9' 'FOSB::ZNF589'
'FOSL1::NR2F1' 'FOSL1::SNAI1' 'FOSL1::SOX9' 'FOSL1::SREBF1'
'FOSL1::ZNF589' 'FOSL2::NR2F1' 'FOSL2::REL' 'FOSL2::RUNX1' 'FOSL2::SMAD2'
'FOSL2::SNAI1' 'FOSL2::SOX9' 'FOSL2::SREBF1' 'FOSL2::ZNF528'
'FOSL2::ZNF589' 'FOSL2::ZNF680' 'FOXA2::NFAT5' 'FOXA2::ZNF589'
'FOXC1::NFIC' 'FOXC1::ZNF589' 'FOXD1::NFAT5' 'FOXH1::SNAI1'
'FOXH1::ZNF589' 'FOXJ2::NFIC' 'FOXJ2::NR2F1' 'FOXJ3::KLF4' 'FOXJ3::NFIC'
'FOXJ3::NR2F1' 'FOXJ3::SMAD3' 'FOXJ3::SNAI1' 'FOXK1::KLF4' 'FOXK1::NFIC'
'FOXK1::ZNF589' 'FOXL1::NFIC' 'FOXO1::HMGA1' 'FOXO1::MSX2' 'FOXO1::MYBL2'
'FOXO1::NFAT5' 'FOXO1::NFE2L1' 'FOXO1::NFIA' 'FOXO1::NFIC' 'FOXO1::SOX13'
'FOXO1::ZBTB49' 'FOXO1::ZNF589' 'FOXO3::NFIC' 'FOXQ1::NFIC'
'FOXQ1::ZNF589' 'FUBP1::NFIC' 'GATA3::ZNF589' 'GMEB2::NFIC'
'GMEB2::NR2F1' 'GMEB2::ZNF589' 'GRHL1::ZNF589' 'HBP1::HMBOX1'
'HBP1::NKX3-1' 'HBP1::NR2F1' 'HBP1::SMAD2' 'HBP1::SNAI1' 'HBP1::SOX9'
'HBP1::SREBF1' 'HBP1::STAT6' 'HBP1::TWIST1' 'HBP1::ZNF85' 'HIC2::HMGA1'
'HIC2::NFAT5' 'HIC2::NFE2L1' 'HIC2::SOX13' 'HIC2::UBP1' 'HIC2::ZNF563'
'HIC2::ZNF589' 'HIVEP1::NFIC' 'HIVEP1::SNAI1' 'HIVEP1::ZNF335'
'HLTF::IRF3' 'HLTF::MYBL2' 'HLTF::NFAT5' 'HLTF::NFIC' 'HLTF::SNAI1'
'HLTF::ZNF589' 'HMBOX1::HMGA1' 'HMBOX1::MSX2' 'HMBOX1::NFAT5'
'HMBOX1::NFE2L1' 'HMBOX1::NFIA' 'HMBOX1::SOX13' 'HMBOX1::UBP1'
'HMBOX1::ZBTB49' 'HMBOX1::ZNF589' 'HMGA1::MXI1' 'HMGA1::NKX3-1'
'HMGA1::NR2F1' 'HMGA1::NR2F6' 'HMGA1::RELB' 'HMGA1::SMAD2' 'HMGA1::SNAI1'
'HMGA1::SOX9' 'HMGA1::SREBF1' 'HMGA1::TFCP2' 'HMGA1::ZNF274'
'HMGA1::ZNF528' 'HMGA1::ZNF680' 'HOXA10::ZNF589' 'HOXA5::SNAI1'
'HOXA5::ZNF589' 'HOXA9::ZNF589' 'HOXB3::NFAT5' 'HOXB3::NFIA'
'HOXB3::UBP1' 'HOXB3::ZNF589' 'IRF1::NFIC' 'IRF1::NR2F1' 'IRF1::SNAI1'
'IRF1::ZNF335' 'IRF2::NFIC' 'IRF2::NR2F1' 'IRF3::KLF4' 'IRF3::NFIC'
'IRF3::NR2F1' 'IRF3::NR2F6' 'IRF3::RFX2' 'IRF3::SMAD2' 'IRF3::SMAD3'
'IRF3::SNAI1' 'IRF3::SPDEF' 'IRF3::SREBF1' 'IRF3::TFCP2' 'IRF3::THRA'
'IRF3::ZNF335' 'IRF3::ZNF528' 'IRX3::ZNF589' 'JUN::NR2F1' 'JUN::REL'
'JUN::SNAI1' 'JUN::SOX9' 'JUN::SREBF1' 'JUN::STAT5A' 'JUN::ZNF589'
'JUN::ZNF680' 'JUNB::NR2F1' 'JUNB::REL' 'JUNB::SMAD2' 'JUNB::SNAI1'
'JUNB::SOX9' 'JUNB::ZNF589' 'JUND::NR2F1' 'JUND::REL' 'JUND::SNAI1'
'JUND::SOX9' 'JUND::SREBF1' 'JUND::ZNF589' 'KLF4::MYBL2' 'KLF4::NR1H3'
'KLF4::NR1I3' 'KLF4::NR2F6' 'KLF4::REL' 'KLF4::RELA' 'KLF4::RELB'
'KLF4::SMAD2' 'KLF4::SOX4' 'KLF4::SREBF1' 'KLF4::STAT1' 'KLF4::STAT2'
'KLF4::TCF7L1' 'KLF4::TFCP2' 'KLF4::ZNF354A' 'KLF4::ZNF394'
'KLF4::ZNF528' 'KLF4::ZNF589' 'MAFF::NR2F1' 'MAFF::SNAI1' 'MAFF::SOX9'
'MAFF::ZNF589' 'MAFG::NR2F1' 'MAFG::SNAI1' 'MAFG::ZNF589' 'MAFK::SOX9'
'MAFK::ZNF589' 'MEF2D::NFIC' 'MEF2D::NR2F1' 'MEIS1::NFIC' 'MEIS1::REL'
'MEIS1::SMAD2' 'MEIS1::SNAI1' 'MEIS1::SREBF1' 'MEIS1::THRA' 'MSX2::NR2F6'
'MSX2::REL' 'MSX2::RUNX1' 'MSX2::SMAD2' 'MSX2::SOX9' 'MSX2::SREBF1'
'MSX2::ZNF528' 'MSX2::ZNF589' 'MXI1::MYBL2' 'MXI1::NFAT5' 'MXI1::NFE2L1'
'MXI1::NFIC' 'MXI1::NR2F1' 'MXI1::SOX13' 'MXI1::UBP1' 'MXI1::ZNF335'
'MXI1::ZNF563' 'MXI1::ZNF589' 'MYBL2::NFIC' 'MYBL2::NKX3-1'
'MYBL2::NR2F1' 'MYBL2::NR2F6' 'MYBL2::RFX2' 'MYBL2::SMAD2' 'MYBL2::SMAD3'
'MYBL2::SNAI1' 'MYBL2::SNAI2' 'MYBL2::SOX9' 'MYBL2::SREBF1'
'MYBL2::TFCP2' 'MYBL2::THRA' 'MYBL2::ZNF335' 'MYBL2::ZNF528'
'MYBL2::ZNF680' 'MYNN::NFAT5' 'MYNN::NFE2L1' 'NFAT5::NFIB' 'NFAT5::NFIC'
'NFAT5::NFKB2' 'NFAT5::NKX3-1' 'NFAT5::NR2F6' 'NFAT5::OVOL2'
'NFAT5::PITX1' 'NFAT5::REL' 'NFAT5::RELB' 'NFAT5::RUNX1' 'NFAT5::SMAD2'
'NFAT5::SMAD3' 'NFAT5::SNAI2' 'NFAT5::SOX9' 'NFAT5::SPDEF'
'NFAT5::SREBF1' 'NFAT5::STAT6' 'NFAT5::TFCP2' 'NFAT5::THRA'
'NFAT5::TWIST1' 'NFAT5::ZBTB18' 'NFAT5::ZNF274' 'NFAT5::ZNF335'
'NFAT5::ZNF528' 'NFAT5::ZNF589' 'NFAT5::ZNF680' 'NFATC1::NR2F1'
'NFE2L1::NKX3-1' 'NFE2L1::NR2F1' 'NFE2L1::NR2F6' 'NFE2L1::REL'
'NFE2L1::RELB' 'NFE2L1::RUNX1' 'NFE2L1::SMAD2' 'NFE2L1::SNAI1'
'NFE2L1::SOX9' 'NFE2L1::SREBF1' 'NFE2L1::STAT5A' 'NFE2L1::STAT6'
'NFE2L1::TFCP2' 'NFE2L1::ZNF274' 'NFE2L1::ZNF317' 'NFE2L1::ZNF528'
'NFE2L1::ZNF589' 'NFE2L1::ZNF680' 'NFE2L2::NR2F1' 'NFE2L2::SNAI1'
'NFE2L2::ZNF589' 'NFIA::NR2F6' 'NFIA::REL' 'NFIA::RELB' 'NFIA::RUNX1'
'NFIA::SMAD2' 'NFIA::SREBF1' 'NFIA::STAT6' 'NFIA::TFCP2' 'NFIA::ZNF528'
'NFIA::ZNF589' 'NFIA::ZNF680' 'NFIB::NR2F1' 'NFIB::SNAI1' 'NFIB::SOX13'
'NFIB::ZNF589' 'NFIC::NFKB2' 'NFIC::NR1H3' 'NFIC::NR1I3' 'NFIC::NR2F1'
'NFIC::NR2F6' 'NFIC::NR4A2' 'NFIC::REL' 'NFIC::RELA' 'NFIC::RUNX1'
'NFIC::RUNX2' 'NFIC::RUNX3' 'NFIC::SMAD2' 'NFIC::SNAI1' 'NFIC::SOX13'
'NFIC::SOX4' 'NFIC::SOX9' 'NFIC::SPDEF' 'NFIC::SREBF1' 'NFIC::STAT1'
'NFIC::STAT2' 'NFIC::STAT5A' 'NFIC::TFCP2' 'NFIC::TWIST1' 'NFIC::ZBTB7B'
'NFIC::ZFP28' 'NFIC::ZNF354A' 'NFIC::ZNF394' 'NFIC::ZNF528'
'NFIC::ZNF589' 'NFIC::ZNF680' 'NFKB2::NR2F1' 'NFKB2::SNAI1'
'NKX3-1::SOX13' 'NKX3-1::UBP1' 'NKX3-1::ZNF563' 'NKX3-1::ZNF589'
'NR1H3::RFX2' 'NR1H3::SMAD2' 'NR1H3::SMAD3' 'NR1H3::SNAI2' 'NR1H3::SOX9'
'NR1H3::SREBF1' 'NR1H3::TFCP2' 'NR1H3::TWIST1' 'NR1H3::ZNF335'
'NR1H3::ZNF528' 'NR1H3::ZNF589' 'NR1I3::NR2F1' 'NR1I3::SMAD3'
'NR1I3::SNAI1' 'NR1I3::ZNF335' 'NR1I3::ZNF85' 'NR2F1::NR2F6'
'NR2F1::NR3C1' 'NR2F1::REL' 'NR2F1::SMAD2' 'NR2F1::SMAD3' 'NR2F1::SOX4'
'NR2F1::SOX9' 'NR2F1::SREBF1' 'NR2F1::STAT1' 'NR2F1::STAT2'
'NR2F1::TFCP2' 'NR2F1::TGIF1' 'NR2F1::THRA' 'NR2F1::TP53' 'NR2F1::UBP1'
'NR2F1::ZBTB49' 'NR2F1::ZNF335' 'NR2F1::ZNF394' 'NR2F1::ZNF528'
'NR2F1::ZNF589' 'NR2F6::SNAI1' 'NR2F6::SOX13' 'NR2F6::UBP1'
'NR2F6::ZBTB49' 'NR2F6::ZNF589' 'NR3C1::SNAI1' 'NR3C1::SOX9'
'NR3C1::ZNF528' 'OVOL2::ZNF589' 'PLAGL1::SNAI1' 'POU2F1::SNAI1'
'PRDM1::SOX9' 'REL::SMAD3' 'REL::SNAI1' 'REL::SOX13' 'REL::TP63'
'RELB::SOX13' 'RELB::UBP1' 'RELB::ZNF563' 'RELB::ZNF589' 'RFX2::SREBF1'
'RFX2::STAT1' 'RREB1::SMAD2' 'RREB1::SOX9' 'RREB1::SREBF1' 'RREB1::TFCP2'
'RREB1::ZNF680' 'RUNX1::SNAI1' 'RUNX1::SOX13' 'RUNX2::ZNF589'
'SMAD2::SMAD3' 'SMAD2::SNAI1' 'SMAD2::SOX13' 'SMAD2::UBP1'
'SMAD2::ZBTB49' 'SMAD2::ZNF335' 'SMAD2::ZNF563' 'SMAD2::ZNF589'
'SMAD3::SNAI1' 'SMAD3::SOX13' 'SMAD3::STAT1' 'SMAD3::ZNF394'
'SMAD3::ZNF589' 'SMARCA1::ZNF589' 'SNAI1::SOX4' 'SNAI1::SREBF1'
'SNAI1::STAT1' 'SNAI1::STAT2' 'SNAI1::TBX3' 'SNAI1::TFCP2' 'SNAI1::TGIF1'
'SNAI1::TP53' 'SNAI1::TP63' 'SNAI1::UBP1' 'SNAI1::ZBTB49' 'SNAI1::ZNF335'
'SNAI1::ZNF394' 'SNAI1::ZNF528' 'SNAI1::ZNF589' 'SNAI1::ZNF680'
'SNAI2::ZNF589' 'SOX13::SOX9' 'SOX13::SREBF1' 'SOX13::STAT6'
'SOX13::TFCP2' 'SOX13::THRA' 'SOX13::ZNF274' 'SOX13::ZNF335'
'SOX13::ZNF528' 'SOX13::ZNF589' 'SOX13::ZNF680' 'SOX4::ZNF335'
'SOX4::ZNF589' 'SOX9::STAT1' 'SOX9::TP53' 'SOX9::TP63' 'SOX9::UBP1'
'SOX9::ZBTB49' 'SOX9::ZNF335' 'SOX9::ZNF394' 'SOX9::ZNF589' 'SPDEF::UBP1'
'SPDEF::ZNF589' 'SREBF1::UBP1' 'SREBF1::ZBTB49' 'SREBF1::ZNF335'
'SREBF1::ZNF563' 'SREBF1::ZNF589' 'STAT1::ZNF335' 'STAT2::ZNF335'
'STAT6::UBP1' 'STAT6::ZNF589' 'TBX3::ZNF589' 'TCF7L1::ZNF589'
'TEAD1::ZNF589' 'TEAD3::ZNF589' 'TFCP2::UBP1' 'TFCP2::ZNF335'
'TFCP2::ZNF563' 'TFCP2::ZNF589' 'TGIF1::ZNF589' 'THRA::ZNF589'
'TP53::ZNF589' 'TP63::ZNF589' 'UBP1::ZNF528' 'UBP1::ZNF680'
'ZBTB49::ZNF528' 'ZBTB49::ZNF680' 'ZFP28::ZNF335' 'ZFP28::ZNF589'
'ZNF134::ZNF589' 'ZNF274::ZNF589' 'ZNF335::ZNF394' 'ZNF335::ZNF528'
'ZNF335::ZNF589' 'ZNF335::ZNF680' 'ZNF354A::ZNF589' 'ZNF528::ZNF563'
'ZNF528::ZNF589' 'ZNF589::ZNF680' 'ZNF589::ZNF768' 'ZNF589::ZNF85']
expr_loop_tissue 677
expr_pro_tissue|expr_loop_tissue 128
Name: label, dtype: int64
*****, number of vocab words 805
CPU times: user 4min 1s, sys: 947 ms, total: 4min 2s
Wall time: 4min 1s
###Markdown
filtering
###Code
tissue_to_tfs_dict = {}
for tissue in normal_tissues:
tfs = tf_df[tf_df.cell_type==tissue].tf.values
tissue_to_tfs_dict[tissue] = tfs
vocab_tissue_all_filt = pd.DataFrame()
for tissue in normal_tissues:
tfs = tf_df[tf_df.cell_type==tissue].tf.values
vocab_summary_df = pd.read_csv(os.path.join(save_dir, tissue+'_vocab_summary.csv'), index_col=0)
print(tissue, vocab_summary_df.shape)
if vocab_summary_df.shape[0]>100:
vocab_summary_df1 = vocab_summary_df.copy()
vocab_summary_df1[['tf1','tf2']] = vocab_summary_df1.vocab.str.split('::',expand=True)
vocab_summary_df1 = vocab_summary_df1[(vocab_summary_df1.tf1.isin(tfs))|(vocab_summary_df1.tf2.isin(tfs))]
print(tissue, 'filtered')
print(vocab_summary_df.shape, vocab_summary_df1.shape)
vocab_tissue_all_filt = pd.concat([vocab_tissue_all_filt,vocab_summary_df1])
else:
vocab_tissue_all_filt = pd.concat([vocab_tissue_all_filt,vocab_summary_df])
print(vocab_tissue_all_filt.tissue.value_counts())
vocab_tissue_all_filt.to_csv(os.path.join(save_dir, 'all_normal_tissues_vocab_summary.csv'))
vocab_tissue_all_filt.vocab.unique().shape, vocab_tissue_all_filt.shape
vocab_tissue_all_filt.tissue.value_counts().describe()
# vocab_tissue_all_filt.tissue.value_counts().plot.bar()
vocab_counts = pd.DataFrame(vocab_tissue_all_filt.tissue.value_counts()).reset_index()
vocab_counts.columns = ['tissue','count']
vocab_counts
sns.set(style="whitegrid")
ax = sns.barplot(data=vocab_counts, x='tissue', y='count',color='black')
plt.xticks(rotation=90)
plt.subplots_adjust(top=1, bottom=.3)
plt.savefig(os.path.join(save_dir,'normal_tissue_vocab_counts.pdf'))
###Output
_____no_output_____
###Markdown
upset plot
###Code
plt.style.use('seaborn-paper')
vocab_tissue_all_filt = pd.read_csv(os.path.join(save_dir, 'all_normal_tissues_vocab_summary.csv'),index_col=0)
all_normal = vocab_tissue_all_filt.groupby('vocab').agg({'tissue': '|'.join,'num_instance':sum, 'label': lambda x: '|'.join(list(set(x)))})
display(all_normal)
tissue_counts = all_normal.tissue.value_counts()
print(tissue_counts)
names = [x.split('|') for x in tissue_counts.index]
values = list(tissue_counts.values)
data_upset = from_memberships(names, data=values)
plot(data_upset)
# plt.savefig(os.path.join(save_dir, 'vocabs_cancer_upset.pdf'))
###Output
_____no_output_____
###Markdown
numbers``` of all regulatory TF vocabulary motifs occur pairwise within the same promoter region (Intra-promoter), occur pairwise within the same enhancer region (Intra-Enhancer), and occur with one motif residing in a distal enhancer region and the paired motif residing in the looped target gene promoter (Inter-Enhancer-Promoter) ```
###Code
vocab_tissue_all_filt = pd.read_csv(os.path.join(save_dir, 'all_normal_tissues_vocab_summary.csv'),index_col=0)
vocab_tissue_all_filt[:5]
save_dir
tf_tf_loop_type_files = glob.glob('../data/processed/fig4_modelling/tf_tf_pairs/expr*loop_type.csv')
tissue_loop_df_dict={}
for file in tf_tf_loop_type_files:
tissue = os.path.basename(file).split('_')[1]
print(tissue, file)
tissue_loop_df_dict[tissue] = pd.read_csv(file,index_col=0).fillna('')
# pd.read_csv('../data/processed/fig4_modelling/tf_tf_pairs/expr_Astrocytes_loop_type.csv',index_col=0)
vocab_tissue_all_filt['num_genes_pro_pro'] = 0
vocab_tissue_all_filt['num_genes_pro_loop'] = 0
vocab_tissue_all_filt['num_genes_loop_loop'] = 0
vocab_tissue_all_filt['num_genes_all_config'] = 0
for idx, row in vocab_tissue_all_filt.iterrows():
df = tissue_loop_df_dict[row['tissue']]
info = df.loc[row['vocab']]
genes_all = set(info.pro_pro_genes.split('|')).union(set(info.pro_loop_genes.split('|'))).union(set(info.loop_loop_genes.split('|')))
vocab_tissue_all_filt.at[idx,'num_genes_pro_pro'] = info['pro_pro_count']
vocab_tissue_all_filt.at[idx,'num_genes_pro_loop'] = info['pro_loop_count']
vocab_tissue_all_filt.at[idx,'num_genes_loop_loop'] = info['loop_loop_count']
vocab_tissue_all_filt.at[idx,'num_genes_all_config'] = len(genes_all)
vocab_tissue_all_filt.sum()
config_df = vocab_tissue_all_filt[['num_genes_pro_pro','num_genes_pro_loop','num_genes_loop_loop' ]]
config_df = config_df>0
config_df.sum()/config_df.shape[0]
# vocab_tissue_all_filt['frac_genes_pro_pro'] = vocab_tissue_all_filt['num_genes_pro_pro']/vocab_tissue_all_filt['num_genes_all_config']
# vocab_tissue_all_filt['frac_genes_loop_loop'] = vocab_tissue_all_filt['num_genes_loop_loop']/vocab_tissue_all_filt['num_genes_all_config']
# vocab_tissue_all_filt['frac_genes_pro_loop'] = vocab_tissue_all_filt['num_genes_pro_loop']/vocab_tissue_all_filt['num_genes_all_config']
# vocab_tissue_all_filt['frac_genes_pro_pro_wt'] = vocab_tissue_all_filt['num_genes_pro_pro']*2*vocab_tissue_all_filt['weighting_factor']
# vocab_tissue_all_filt['frac_genes_loop_loop_wt'] = vocab_tissue_all_filt['num_genes_loop_loop']
# vocab_tissue_all_filt['frac_genes_pro_loop_wt'] = vocab_tissue_all_filt['num_genes_pro_loop']*vocab_tissue_all_filt['weighting_factor']
# vocab_tissue_all_filt['num_genes_all_config_wt'] = vocab_tissue_all_filt['frac_genes_pro_pro_wt'] + vocab_tissue_all_filt['frac_genes_loop_loop_wt'] + vocab_tissue_all_filt['frac_genes_pro_loop_wt']
# vocab_tissue_all_filt['frac_genes_pro_pro_wt'] = vocab_tissue_all_filt['frac_genes_pro_pro_wt']/vocab_tissue_all_filt['num_genes_all_config_wt']
# vocab_tissue_all_filt['frac_genes_loop_loop_wt'] = vocab_tissue_all_filt['frac_genes_loop_loop_wt']/vocab_tissue_all_filt['num_genes_all_config_wt']
# vocab_tissue_all_filt['frac_genes_pro_loop_wt'] = vocab_tissue_all_filt['frac_genes_pro_loop_wt']/vocab_tissue_all_filt['num_genes_all_config_wt']
###Output
_____no_output_____ |
lesson02_linear_multiple_input.ipynb | ###Markdown
Lesson02_linear_multiple_input
###Code
import os
os.environ["CUDA_VISIBLE_DEVICES"]="-1" #Ep training tren CPU
from keras.layers import *
from keras.models import *
from keras import optimizers
import numpy as np
x_data = [
[73., 80., 75.],
[93., 88., 93.],
[89., 91., 90.],
[96., 98., 100.],
[73., 66., 70.]
]
y_data = [
[150.],
[185.],
[180.],
[196.],
[142.]
]
x_data, y_data
x_data = np.asarray(x_data)
y_data = np.asarray(y_data)
x_data, y_data
model = Sequential()
model.add(Dense(units = 1, input_dim = 3))
model.compile(loss = 'mse', optimizer = 'rmsprop')
model.summary()
model.fit(x_data, y_data, epochs = 5000)
y_predict = model.predict(np.array([[72, 79, 74]]))
y_predict
###Output
_____no_output_____ |
src/analyze_results.ipynb | ###Markdown
K-fold Cross Validation Analysis
###Code
!unzip results.zip
###Output
Archive: results.zip
creating: IMDBBINARY/results/
inflating: IMDBBINARY/results/blocks1_layers3_sum_uniform_normalized_results.csv
inflating: IMDBBINARY/results/blocks3_layers2_jkn_uniform_normalized_results.csv
inflating: IMDBBINARY/results/blocks2_layers1_jkn_degree_normalized_results.csv
inflating: IMDBBINARY/results/blocks2_layers2_jkn_degree_normalized_results.csv
inflating: IMDBBINARY/results/blocks3_layers1_sum_uniform_normalized_results.csv
inflating: IMDBBINARY/results/blocks1_layers2_sum_degree_normalized_results.csv
inflating: IMDBBINARY/results/blocks1_layers1_jkn_degree_normalized_results.csv
creating: IMDBBINARY/results/.ipynb_checkpoints/
inflating: IMDBBINARY/results/blocks2_layers3_jkn_uniform_normalized_results.csv
inflating: IMDBBINARY/results/blocks2_layers2_sum_degree_normalized_results.csv
inflating: IMDBBINARY/results/blocks2_layers2_jkn_uniform_normalized_results.csv
inflating: IMDBBINARY/results/blocks3_layers2_sum_uniform_normalized_results.csv
inflating: IMDBBINARY/results/blocks3_layers3_sum_uniform_normalized_results.csv
inflating: IMDBBINARY/results/blocks2_layers3_sum_uniform_normalized_results.csv
inflating: IMDBBINARY/results/blocks2_layers1_sum_uniform_normalized_results.csv
inflating: IMDBBINARY/results/blocks3_layers1_jkn_degree_normalized_results.csv
inflating: IMDBBINARY/results/blocks3_layers2_sum_degree_normalized_results.csv
inflating: IMDBBINARY/results/blocks3_layers2_jkn_degree_normalized_results.csv
inflating: IMDBBINARY/results/blocks2_layers1_sum_degree_normalized_results.csv
inflating: IMDBBINARY/results/blocks1_layers1_sum_degree_normalized_results.csv
inflating: IMDBBINARY/results/blocks3_layers3_jkn_uniform_normalized_results.csv
inflating: IMDBBINARY/results/blocks2_layers3_sum_degree_normalized_results.csv
inflating: IMDBBINARY/results/blocks1_layers1_jkn_uniform_normalized_results.csv
inflating: IMDBBINARY/results/blocks1_layers3_sum_degree_normalized_results.csv
inflating: IMDBBINARY/results/blocks1_layers2_jkn_uniform_normalized_results.csv
inflating: IMDBBINARY/results/blocks1_layers3_jkn_degree_normalized_results.csv
inflating: IMDBBINARY/results/blocks2_layers3_jkn_degree_normalized_results.csv
inflating: IMDBBINARY/results/blocks1_layers1_sum_uniform_normalized_results.csv
inflating: IMDBBINARY/results/blocks2_layers1_jkn_uniform_normalized_results.csv
inflating: IMDBBINARY/results/blocks1_layers2_sum_uniform_normalized_results.csv
inflating: IMDBBINARY/results/blocks1_layers3_jkn_uniform_normalized_results.csv
inflating: IMDBBINARY/results/blocks1_layers2_jkn_degree_normalized_results.csv
inflating: IMDBBINARY/results/blocks3_layers1_jkn_uniform_normalized_results.csv
inflating: IMDBBINARY/results/blocks3_layers1_sum_degree_normalized_results.csv
inflating: IMDBBINARY/results/blocks3_layers3_sum_degree_normalized_results.csv
inflating: IMDBBINARY/results/blocks3_layers3_jkn_degree_normalized_results.csv
inflating: IMDBBINARY/results/blocks2_layers2_sum_uniform_normalized_results.csv
###Markdown
Package imports.
###Code
import os
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
plt.style.use('seaborn-whitegrid')
###Output
_____no_output_____
###Markdown
Read all files and extract the best results for each model.
###Code
RESULTS_PATH = 'IMDBBINARY/results'
filenames = [f for f in os.listdir(RESULTS_PATH) if f.endswith('_results.csv')]
results = []
for filename in filenames:
# Read the experiments and obtain
res = pd.read_csv(f'{RESULTS_PATH}/{filename}')
max_value = res['accuracy_mean'].max()
res = res[res['accuracy_mean'] == max_value].to_dict(orient='records')[0]
# Disect the filename to obtain model parameters
filename = filename[:-12]
res['blocks'] = filename.split('_')[0].lstrip('blocks')
res['layers'] = filename.split('_')[1].lstrip('layers')
res['readout'] = filename.split('_')[2]
res['scaling'] = filename.split('_')[3]
results.append(res)
results = pd.DataFrame(results)
results
###Output
_____no_output_____
###Markdown
Best obtained results:
###Code
# Max mean accuracy
max_mean_score = results['accuracy_mean'].max()
display(results[results['accuracy_mean'] == max_mean_score].reset_index(drop=True))
###Output
_____no_output_____
###Markdown
Result analysis:
###Code
# Num blocks vs accuracy
sns.catplot(x='blocks', y='accuracy_mean', data=results, order=['1', '2', '3'], kind='box')
# Num blocks vs accuracy
sns.catplot(x='layers', y='accuracy_mean', data=results, order=['1', '2', '3'], kind='box')
# Scaling factor vs accuracy
sns.catplot(x='scaling', y='accuracy_mean', data=results, kind='box')
# Readout operation vs accuracy
sns.catplot(x='readout', y='accuracy_mean', data=results, kind='box')
###Output
_____no_output_____ |
notebooks/Multilabel_WI_with_aug_training/[Edited]_10a_Copy_of_WeightImprintingSigmoid_MultiPred_Imat_with_bbox_for_train_and_test_with_training_and_aug.ipynb | ###Markdown
Mount Drive
###Code
from google.colab import drive
drive.mount('/content/gdrive')
!pip install -U -q PyDrive
!pip install httplib2==0.15.0
import os
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from pydrive.files import GoogleDriveFileList
from google.colab import auth
from oauth2client.client import GoogleCredentials
from getpass import getpass
import urllib
# 1. Authenticate and create the PyDrive client.
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
# Cloning PAL_2021 to access modules.
# Need password to access private repo.
if 'PAL_2021' not in os.listdir():
cmd_string = 'git clone https://github.com/PAL-ML/CLIPPER.git'
os.system(cmd_string)
###Output
Collecting httplib2==0.15.0
[?25l Downloading https://files.pythonhosted.org/packages/be/83/5e006e25403871ffbbf587c7aa4650158c947d46e89f2d50dcaf018464de/httplib2-0.15.0-py3-none-any.whl (94kB)
[K |███▌ | 10kB 18.0MB/s eta 0:00:01
[K |███████ | 20kB 17.6MB/s eta 0:00:01
[K |██████████▍ | 30kB 14.7MB/s eta 0:00:01
[K |█████████████▉ | 40kB 13.4MB/s eta 0:00:01
[K |█████████████████▎ | 51kB 7.4MB/s eta 0:00:01
[K |████████████████████▊ | 61kB 8.6MB/s eta 0:00:01
[K |████████████████████████▏ | 71kB 8.3MB/s eta 0:00:01
[K |███████████████████████████▋ | 81kB 9.1MB/s eta 0:00:01
[K |███████████████████████████████ | 92kB 8.5MB/s eta 0:00:01
[K |████████████████████████████████| 102kB 5.9MB/s
[?25hInstalling collected packages: httplib2
Found existing installation: httplib2 0.17.4
Uninstalling httplib2-0.17.4:
Successfully uninstalled httplib2-0.17.4
Successfully installed httplib2-0.15.0
###Markdown
Installation
###Code
!pip install scikit-learn==0.24.1
###Output
Collecting scikit-learn==0.24.1
[?25l Downloading https://files.pythonhosted.org/packages/f3/74/eb899f41d55f957e2591cde5528e75871f817d9fb46d4732423ecaca736d/scikit_learn-0.24.1-cp37-cp37m-manylinux2010_x86_64.whl (22.3MB)
[K |████████████████████████████████| 22.3MB 1.4MB/s
[?25hRequirement already satisfied: numpy>=1.13.3 in /usr/local/lib/python3.7/dist-packages (from scikit-learn==0.24.1) (1.19.5)
Requirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.7/dist-packages (from scikit-learn==0.24.1) (1.0.1)
Requirement already satisfied: scipy>=0.19.1 in /usr/local/lib/python3.7/dist-packages (from scikit-learn==0.24.1) (1.4.1)
Collecting threadpoolctl>=2.0.0
Downloading https://files.pythonhosted.org/packages/f7/12/ec3f2e203afa394a149911729357aa48affc59c20e2c1c8297a60f33f133/threadpoolctl-2.1.0-py3-none-any.whl
Installing collected packages: threadpoolctl, scikit-learn
Found existing installation: scikit-learn 0.22.2.post1
Uninstalling scikit-learn-0.22.2.post1:
Successfully uninstalled scikit-learn-0.22.2.post1
Successfully installed scikit-learn-0.24.1 threadpoolctl-2.1.0
###Markdown
Install CLIP dependencies
###Code
# import subprocess
# CUDA_version = [s for s in subprocess.check_output(["nvcc", "--version"]).decode("UTF-8").split(", ") if s.startswith("release")][0].split(" ")[-1]
# print("CUDA version:", CUDA_version)
# if CUDA_version == "10.0":
# torch_version_suffix = "+cu100"
# elif CUDA_version == "10.1":
# torch_version_suffix = "+cu101"
# elif CUDA_version == "10.2":
# torch_version_suffix = ""
# else:
# torch_version_suffix = "+cu110"
# ! pip install torch==1.7.1{torch_version_suffix} torchvision==0.8.2{torch_version_suffix} -f https://download.pytorch.org/whl/torch_stable.html ftfy regex
# ! pip install ftfy regex
!pip install git+https://github.com/openai/CLIP.git
###Output
Collecting git+https://github.com/openai/CLIP.git
Cloning https://github.com/openai/CLIP.git to /tmp/pip-req-build-wtcug5i6
Running command git clone -q https://github.com/openai/CLIP.git /tmp/pip-req-build-wtcug5i6
Collecting ftfy
[?25l Downloading https://files.pythonhosted.org/packages/af/da/d215a091986e5f01b80f5145cff6f22e2dc57c6b048aab2e882a07018473/ftfy-6.0.3.tar.gz (64kB)
[K |████████████████████████████████| 71kB 5.0MB/s
[?25hRequirement already satisfied: regex in /usr/local/lib/python3.7/dist-packages (from clip==1.0) (2019.12.20)
Requirement already satisfied: tqdm in /usr/local/lib/python3.7/dist-packages (from clip==1.0) (4.41.1)
Collecting torch~=1.7.1
[?25l Downloading https://files.pythonhosted.org/packages/90/5d/095ddddc91c8a769a68c791c019c5793f9c4456a688ddd235d6670924ecb/torch-1.7.1-cp37-cp37m-manylinux1_x86_64.whl (776.8MB)
[K |████████████████████████████████| 776.8MB 22kB/s
[?25hCollecting torchvision~=0.8.2
[?25l Downloading https://files.pythonhosted.org/packages/94/df/969e69a94cff1c8911acb0688117f95e1915becc1e01c73e7960a2c76ec8/torchvision-0.8.2-cp37-cp37m-manylinux1_x86_64.whl (12.8MB)
[K |████████████████████████████████| 12.8MB 22.2MB/s
[?25hRequirement already satisfied: wcwidth in /usr/local/lib/python3.7/dist-packages (from ftfy->clip==1.0) (0.2.5)
Requirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from torch~=1.7.1->clip==1.0) (1.19.5)
Requirement already satisfied: typing-extensions in /usr/local/lib/python3.7/dist-packages (from torch~=1.7.1->clip==1.0) (3.7.4.3)
Requirement already satisfied: pillow>=4.1.1 in /usr/local/lib/python3.7/dist-packages (from torchvision~=0.8.2->clip==1.0) (7.1.2)
Building wheels for collected packages: clip, ftfy
Building wheel for clip (setup.py) ... [?25l[?25hdone
Created wheel for clip: filename=clip-1.0-cp37-none-any.whl size=1368708 sha256=e843beab9ae1afbec4026c335a81e9c13e6fe5a63f9d5587a390887ee51073d7
Stored in directory: /tmp/pip-ephem-wheel-cache-3ek7uso1/wheels/79/51/d7/69f91d37121befe21d9c52332e04f592e17d1cabc7319b3e09
Building wheel for ftfy (setup.py) ... [?25l[?25hdone
Created wheel for ftfy: filename=ftfy-6.0.3-cp37-none-any.whl size=41916 sha256=a7df71ac5354a10f426cb36e23188fbd1b9c47c3d601cdbee31a001d4ea4cff3
Stored in directory: /root/.cache/pip/wheels/99/2c/e6/109c8a28fef7a443f67ba58df21fe1d0067ac3322e75e6b0b7
Successfully built clip ftfy
[31mERROR: torchtext 0.9.1 has requirement torch==1.8.1, but you'll have torch 1.7.1 which is incompatible.[0m
Installing collected packages: ftfy, torch, torchvision, clip
Found existing installation: torch 1.8.1+cu101
Uninstalling torch-1.8.1+cu101:
Successfully uninstalled torch-1.8.1+cu101
Found existing installation: torchvision 0.9.1+cu101
Uninstalling torchvision-0.9.1+cu101:
Successfully uninstalled torchvision-0.9.1+cu101
Successfully installed clip-1.0 ftfy-6.0.3 torch-1.7.1 torchvision-0.8.2
###Markdown
Install clustering dependencies
###Code
!pip -q install umap-learn>=0.3.7
###Output
_____no_output_____
###Markdown
Install dataset manager dependencies
###Code
!pip install wget
###Output
Collecting wget
Downloading https://files.pythonhosted.org/packages/47/6a/62e288da7bcda82b935ff0c6cfe542970f04e29c756b0e147251b2fb251f/wget-3.2.zip
Building wheels for collected packages: wget
Building wheel for wget (setup.py) ... [?25l[?25hdone
Created wheel for wget: filename=wget-3.2-cp37-none-any.whl size=9681 sha256=7169ba1e436c9c74ca26fb276befd269bcb4206358afc2c45f9a22f404e00a69
Stored in directory: /root/.cache/pip/wheels/40/15/30/7d8f7cea2902b4db79e3fea550d7d7b85ecb27ef992b618f3f
Successfully built wget
Installing collected packages: wget
Successfully installed wget-3.2
###Markdown
Imports
###Code
# ML Libraries
import tensorflow as tf
import tensorflow_hub as hub
import torch
import torch.nn as nn
import torchvision.models as models
import torchvision.transforms as transforms
import keras
# Data processing
import PIL
import base64
import imageio
import pandas as pd
import numpy as np
import json
from PIL import Image
import cv2
from sklearn.feature_extraction.image import extract_patches_2d
from skimage.measure import label, regionprops
# Plotting
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from IPython.core.display import display, HTML
from matplotlib import cm
import matplotlib.image as mpimg
# Models
import clip
# Datasets
import tensorflow_datasets as tfds
# Clustering
# import umap
from sklearn import metrics
from sklearn.cluster import KMeans
#from yellowbrick.cluster import KElbowVisualizer
# Misc
import progressbar
import logging
from abc import ABC, abstractmethod
import time
import urllib.request
import os
from sklearn.metrics import jaccard_score, hamming_loss, accuracy_score, f1_score
from sklearn.preprocessing import MultiLabelBinarizer
# Modules
# from PAL_2021.PAL_HILL.ExperimentModules import embedding_models
from CLIPPER.code.ExperimentModules.dataset_manager import DatasetManager
from CLIPPER.code.ExperimentModules.weight_imprinting_classifier import WeightImprintingClassifier
from CLIPPER.code.ExperimentModules import simclr_data_augmentations
from CLIPPER.code.ExperimentModules.utils import (save_npy, load_npy,
get_folder_id,
create_expt_dir,
save_to_drive,
load_all_from_drive_folder,
download_file_by_name,
delete_file_by_name)
logging.getLogger('googleapicliet.discovery_cache').setLevel(logging.ERROR)
import os, logging
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
logging.getLogger("tensorflow").setLevel(logging.CRITICAL)
logging.getLogger("tensorflow_hub").setLevel(logging.CRITICAL)
###Output
_____no_output_____
###Markdown
Initialization & Constants**Edited**
###Code
dataset_name = 'IMaterialist'
folder_name = "FGCVIMaterialist-Embeddings-24-03-21"
# Change parentid to match that of experiments root folder in gdrive
parentid = '1bK72W-Um20EQDEyChNhNJthUNbmoSEjD'
# Filepaths
train_labels_filename = "train_labels.npz"
val_labels_filename = "val_labels.npz"
test_labels_filename = "test_labels.npz"
train_embeddings_filename_suffix = "_embeddings_train.npz"
val_embeddings_filename_suffix = "_embeddings_val.npz"
test_embeddings_filename_suffix = "_embeddings_test.npz"
# Initialize sepcific experiment folder in drive
folderid = create_expt_dir(drive, parentid, folder_name)
###Output
title: FGCVIMaterialist-Embeddings-24-03-21, id: 1RbtNKWRThbY6ArnqsYCYp2EFi13kP7dN
Experiment folder already exists. WARNING: Following with this run might overwrite existing results stored.
###Markdown
Load data
###Code
LABEL_PATH = '/content/gdrive/MyDrive/PAL_HILL_2021/Experiments/FGCVIMaterialist-Embeddings-24-03-21/train_labels.npz'
IMG_PATH = '/content/gdrive/MyDrive/PAL_HILL_2021/Datasets/imaterialist-fashion'
# IMG_LIST_PATH = '/content/drive/MyDrive/PAL_HILL_2021/Datasets/Coco2017Train/img_list.npz'
def get_ndarray_from_drive(drive, folderid, filename):
download_file_by_name(drive, folderid, filename)
return np.load(filename, allow_pickle=True)['arr_0']
train_labels = get_ndarray_from_drive(drive, folderid, train_labels_filename)
# val_labels = get_ndarray_from_drive(drive, folderid, val_labels_filename)
# test_labels = get_ndarray_from_drive(drive, folderid, test_labels_filename)
data_folder = '/content/gdrive/MyDrive/PAL_HILL_2021/Datasets/imaterialist-fashion/'
data_df = pd.read_csv('/content/gdrive/MyDrive/PAL_HILL_2021/Datasets/imaterialist-fashion/train.csv')
split = 'train'
image_dir = os.path.join(data_folder, split)
class_id_col = data_df['ClassId'].copy().values
image_id_col = data_df['ImageId'].copy().values
image_fn_col = data_df['ImageId'].copy().apply(lambda x: os.path.join(image_dir, x+'.jpg')).values
encoded_pixels_col = data_df['EncodedPixels'].copy().values
height_col = data_df['Height'].copy().values
width_col = data_df['Width'].copy().values
attributes_ids_col = data_df['AttributesIds'].copy().fillna('').values
final_train_labels = train_labels
for i in range(len(final_train_labels)):
la = final_train_labels[i]
temp_array = []
for l in la:
if l <= 293:
temp_array.append(l)
fin_array = np.array(temp_array)
train_labels[i] = fin_array
labels_per_img = []
id_per_img = []
j = 1
s = set()
for l in final_train_labels[0]:
s.add(l)
for i in range(1, len(final_train_labels)):
if image_id_col[j] == image_id_col[j-1]:
for l in final_train_labels[i]:
s.add(l)
else:
labels_per_img.append(list(s))
s.clear()
id_per_img.append(image_id_col[j-1])
j+=1
###Output
_____no_output_____
###Markdown
Init model (Clip)
###Code
device = "cuda" if torch.cuda.is_available() else "cpu"
model, preprocess = clip.load("ViT-B/32", device=device)
###Output
_____no_output_____
###Markdown
Create label dictionary
###Code
unique_labels = []
for i in range(294):
unique_labels.append(i)
label_dictionary = {la:[] for la in unique_labels}
for i in range(int(len(final_train_labels)/2)):
la = final_train_labels[i]
for l in la:
label_dictionary[l].append(i)
###Output
_____no_output_____
###Markdown
Weight Imprinting models on train data embeddings Function definitions
###Code
def encode_aug_img(images):
images *= 255
images = images.astype('uint8')
embeddings = []
for img in images:
pil_img = Image.fromarray(img.astype('uint8'))
preproc_img = preprocess(pil_img).unsqueeze(0).to(device)
with torch.no_grad():
_emb = model.encode_image(preproc_img)
embeddings.append(_emb.cpu().detach().numpy()[0])
del _emb
del pil_img
del preproc_img
#embeddings.append(np.array(emb_per_img))
#del emb_per_img
return np.array(embeddings)
def _process_encoded_pixels(row):
def get_pixel_loc(value):
x = value%height
y = value//height
return x, y
def process_encoded_pixels_string(encoded_pixels):
mask_pixels = []
ep_list = [int(ep_item) for ep_item in encoded_pixels.split(' ')]
idx = 0
while idx < len(ep_list):
pixel = ep_list[idx]
num_pixels = ep_list[idx+1]
for np in range(num_pixels):
mask_pixels.append(pixel+np)
idx += 2
return mask_pixels
def get_mask(mask_pixels, height, width):
mask = np.zeros((height, width))
for mp in mask_pixels:
x, y = get_pixel_loc(mp)
mask[x, y] = 1
return mask
encoded_pixels = row[0]# .numpy().decode('utf=8')
height = int(row[1])
width = int(row[2])
mask_pixels = process_encoded_pixels_string(encoded_pixels)
mask = get_mask(mask_pixels, height, width)
return mask
def generate_masks(indices):
masks_for_index = []
for i in indices:
img = mpimg.imread(data_folder + split + '/' + image_id_col[i] + '.jpg')
# print("index i = {} has shape = {}".format(i, img.shape))
mask = _process_encoded_pixels((encoded_pixels_col[i], height_col[i], width_col[i]))
props = regionprops(mask.astype(int))
for prop in props:
if img.ndim == 2:
masks_for_index.append(img[prop.bbox[0]: prop.bbox[2], prop.bbox[1]:prop.bbox[3]])
else:
masks_for_index.append(img[prop.bbox[0]: prop.bbox[2], prop.bbox[1]:prop.bbox[3], :])
break
return masks_for_index
def generate_patch_list(indices, episode):
patches_res = []
for i in indices:
img = mpimg.imread(IMG_PATH + i.zfill(12) + '.jpg')
# fig, ax = plt.subplots()
# ax.imshow(img)
for j in labels[i]:
if j['category_id'] in selected_labels_per_episode[episode]:
x = int(j['bbox'][0])
x1 = int(j['bbox'][0] + j['bbox'][2])
y = int(j['bbox'][1])
y1 = int(j['bbox'][1] + j['bbox'][3])
# rect = patches.Rectangle((j['bbox'][0], j['bbox'][1]), j['bbox'][2], j['bbox'][3], linewidth=1, edgecolor='r', facecolor='none')
# ax.add_patch(rect)
patches_res.append(img[y:y1, x:x1, :])
return patches_res
def generate_patch_list_per_img(indices, episode):
patches_res = []
for i in indices:
img = mpimg.imread(IMG_PATH + i.zfill(12) + '.jpg')
# fig, ax = plt.subplots()
# ax.imshow(img)
_patches_per_img = []
for j in labels[i]:
if j['category_id'] in selected_labels_per_episode[episode]:
x = int(j['bbox'][0])
x1 = int(j['bbox'][0] + j['bbox'][2])
y = int(j['bbox'][1])
y1 = int(j['bbox'][1] + j['bbox'][3])
# rect = patches.Rectangle((j['bbox'][0], j['bbox'][1]), j['bbox'][2], j['bbox'][3], linewidth=1, edgecolor='r', facecolor='none')
# ax.add_patch(rect)
_patches_per_img.append(img[y:y1, x:x1, :])
patches_res.append(_patches_per_img)
del _patches_per_img
return patches_res
def encode_patch(patches):
embeddings = []
pil_img = Image.fromarray(patches)
preproc_img = preprocess(pil_img).unsqueeze(0).to(device)
with torch.no_grad():
_emb = model.encode_image(preproc_img)
return np.array(_emb.cpu().detach().numpy()[0])
# for full images we just need the index of the image eg '34'
def encode_full_img(indices):
embeddings = []
for i in indices:
img = mpimg.imread(data_folder + split + '/' + id_per_img[i] + '.jpg')
pil_img = Image.fromarray(img)
preproc_img = preprocess(pil_img).unsqueeze(0).to(device)
with torch.no_grad():
_emb = model.encode_image(preproc_img)
embeddings.append(_emb.cpu().detach().numpy()[0])
del _emb
del pil_img
del preproc_img
#embeddings.append(np.array(emb_per_img))
#del emb_per_img
return np.array(embeddings)
def start_progress_bar(bar_len):
widgets = [
' [',
progressbar.Timer(format= 'elapsed time: %(elapsed)s'),
'] ',
progressbar.Bar('*'),' (',
progressbar.ETA(), ') ',
]
pbar = progressbar.ProgressBar(
max_value=bar_len, widgets=widgets
).start()
return pbar
def evaluate_multi_label_metrics(wi_cls, x,
y,
label_mapping,
threshold=0.7,
metrics=['hamming', 'jaccard', 'subset_accuracy', 'f1_score']):
hamming_score = 0
jaccard_index = 0
pred = wi_cls.predict_multi_label(x, threshold)
pred_mapped = [[label_mapping[val] for val in p] for p in pred]
mlb = MultiLabelBinarizer()
y_bin = mlb.fit_transform(y)
pred_bin = mlb.transform(pred_mapped)
# for i, pred_list in enumerate(pred):
# label_list = set(y[i])
# pred_list = set([label_mapping[p] for p in pred_list])
# len_intersection = len(label_list.intersection(pred_list))
# len_union = len(label_list.union(pred_list))
# if 'hamming' in metrics:
# hamming_score += len_union - len_intersection
# if 'jaccard' in metrics:
# jaccard_index += len_intersection/len_union
metric_values = {}
if 'hamming' in metrics:
# hamming_score = hamming_score / (len(y)*len(label_mapping))
hamming_score = hamming_loss(y_bin, pred_bin)
metric_values['hamming'] = hamming_score
if 'jaccard' in metrics:
# jaccard_index = jaccard_index / len(y)
jaccard_index = jaccard_score(y_bin, pred_bin, average='samples', zero_division = 1.0)
metric_values['jaccard'] = jaccard_index
if 'subset_accuracy' in metrics:
subset_accuracy = accuracy_score(y_bin, pred_bin)
metric_values['subset_accuracy'] = subset_accuracy
if 'f1_score' in metrics:
f1_score_value = f1_score(y_bin, pred_bin, average='samples', zero_division = 1.0)
metric_values['f1_score'] = f1_score_value
return metric_values
#NOT USED IN THIS NOTEBOOK. CHANGE THE BELOW FUNCTION
def run_evaluations(
train_indices,
eval_indices,
wi_y,
eval_y,
num_episodes,
num_ways,
verbose=True,
normalize=True,
metrics=["hamming", "jaccard"],
threshold=0.72
):
metrics_values = {m:[] for m in metrics}
if verbose:
pbar = start_progress_bar(num_episodes)
for i in range(num_episodes):
# wi_x = embeddings[train_indices[i]]
# wi_x_tmp = train_patches_per_episode[i] #generate_patch_list(train_indices[i]) #generate_patch_list(train_indices[i])
wi_x = train_emb_per_episode[i] #encode_image(wi_x_tmp)
'''
Edit the patches before final run
'''
# patch_list = eval_patches_per_episode[i] #generate_patch_list(eval_indices[i]) #patch_list_tmp #generate_patch_list(eval_indices[i]) #numpy array of patches generated from the images located at eval_indices[i]
eval_x_embeddings = eval_emb_per_episode[i] #encode_image(patch_list) #list of embeddings of patches
# dim_1 = eval_x_embeddings.shape[0]
# dim_2 = eval_x_embeddings.shape[1]
# dim_3 = eval_x_embeddings.shape[2]
# eval_x_embeddings = eval_x_embeddings.reshape((dim_1*dim_2, dim_3))
if normalize:
# print("wi_x.shape: {}".format(wi_x.shape))
# print("eval_x_embeddings.shape: {}".format(eval_x_embeddings.shape))
wi_x = WeightImprintingClassifier.preprocess_input(wi_x)
eval_x_embeddings = WeightImprintingClassifier.preprocess_input(eval_x_embeddings)
# eval_x_embeddings_norm = []
# for k in range(len(eval_x_embeddings)):
# _eval_x_embeddings = WeightImprintingClassifier.preprocess_input(eval_x_embeddings[k])
# eval_x_embeddings_norm.append(_eval_x_embeddings)
wi_weights, label_mapping = WeightImprintingClassifier.get_imprinting_weights(
wi_x, wi_y[i], False, True
)
wi_parameters = {
"num_classes": num_ways,
"input_dims": wi_x.shape[-1],
"scale": False,
"dense_layer_weights": wi_weights,
"multi_label": True
}
wi_cls = WeightImprintingClassifier(wi_parameters)
# _pred_y = wi_cls.predict_multi_label(eval_x, threshold)
# for j in range(len(_eval_y)):
# print([label_mapping[p] for p in _pred_y[j]], " vs ", _eval_y[j])
# Evaluate the weight imprinting model
# eval_x_embeddings = eval_x_embeddings.reshape((dim_1, dim_2, dim_3))
# print("eval_x_embeddings.shape before loop: {}".format(eval_x_embeddings.shape))
pred_eval_labels = []
for ind in range(len(eval_x_embeddings_norm)):
# print("Index = {}".format(ind))
_pred_per_patch = []
for patch_no in range(len(eval_x_embeddings_norm[ind])):
# print("Patch = {}".format(patch_no))
# plt.imshow(patch_list[ind][patch_no])
# plt.show()
# print("eval_x_embeddings.shape in loop: {}".format(eval_x_embeddings[ind][patch_no].shape))
_pred_label = wi_cls.predict_multi_label(eval_x_embeddings_norm[ind][patch_no].reshape(1,512), threshold = threshold)
mp_map = [[label_mapping[v] for v in p] for p in _pred_label]
for m in mp_map[0]:
if m:
_pred_per_patch.append(m)
pred_eval_labels.append(_pred_per_patch)
del _pred_per_patch
mlb = MultiLabelBinarizer()
y_bin = mlb.fit_transform(eval_y[i])
pred_bin = mlb.transform(pred_eval_labels)
if 'hamming' in metrics:
metrics_values['hamming'].append(hamming_loss(y_bin, pred_bin))
if 'jaccard' in metrics:
jaccard_index = jaccard_score(y_bin, pred_bin, average='samples')
metrics_values['jaccard'].append(jaccard_index)
if 'subset_accuracy' in metrics:
subset_accuracy = accuracy_score(y_bin, pred_bin)
metrics_values['subset_accuracy'].append(subset_accuracy)
if 'f1_score' in metrics:
f1_score_value = f1_score(y_bin, pred_bin, average='samples')
metrics_values['f1_score'].append(f1_score_value)
# _pred_label = wi_cls.predict_multi_label(eval_x_embeddings, threshold = threshold)
# mp_map = [[label_mapping[v] for v in p] for p in _pred_label]
for j in range(len(eval_y[i])):
# plt.imshow(eval_patches_per_episode[i][j])
# plt.show()
img = mpimg.imread(IMG_PATH + eval_indices[i][j].zfill(12) + '.jpg')
fig, ax = plt.subplots()
ax.imshow(img)
plt.show()
print("True label = {}".format(eval_y[i][j]))
print("Pred label = {}".format(pred_eval_labels[j]))
# met = evaluate_multi_label_metrics(wi_cls,
# eval_x_embeddings, eval_y[i], label_mapping, threshold, metrics
# )
# for m in met:
# metrics_values[m].append(met[m])
del wi_x
# del eval_x
del wi_cls
break
if verbose:
pbar.update(i+1)
return metrics_values
def run_evaluations_clip(
train_indices,
eval_indices,
wi_y,
eval_y,
num_episodes,
num_ways,
verbose=True,
normalize=True,
metrics=["hamming", "jaccard", 'f1_score', 'subset_accuracy'],
threshold=0.72,
):
metrics_values = {m:[] for m in metrics}
if verbose:
pbar = start_progress_bar(num_episodes)
for i in range(num_episodes):
wi_x = train_emb_per_episode[i]
eval_x_embeddings = eval_emb_per_episode[i]
if normalize:
wi_x = WeightImprintingClassifier.preprocess_input(wi_x)
eval_x_embeddings = WeightImprintingClassifier.preprocess_input(eval_x_embeddings)
wi_weights, label_mapping = WeightImprintingClassifier.get_imprinting_weights(
wi_x, wi_y[i], False, True
)
wi_parameters = {
"num_classes": num_ways,
"input_dims": wi_x.shape[-1],
"scale": False,
"dense_layer_weights": wi_weights,
"multi_label": True
}
wi_cls = WeightImprintingClassifier(wi_parameters)
# _pred_y = wi_cls.predict_multi_label(eval_x, threshold)
# for j in range(len(_eval_y)):
# print([label_mapping[p] for p in _pred_y[j]], " vs ", _eval_y[j])
# Evaluate the weight imprinting model
met = wi_cls.evaluate_multi_label_metrics(
eval_x_embeddings, eval_y[i], label_mapping, threshold, metrics
)
for m in met:
metrics_values[m].append(met[m])
del wi_x
# del eval_x
del wi_cls
if verbose:
pbar.update(i+1)
return metrics_values
def run_train_loop_old(
train_indices,
eval_indices,
wi_y,
eval_y,
num_episodes,
num_ways,
verbose=True,
normalize=True,
train_epochs_loop=[5],
train_batch_size=5,
metrics=["hamming", "jaccard", "f1_score"],
threshold=0.72
):
metrics_values = [{m:[] for m in metrics} for _ in range(len(train_epochs_loop)+1)]
if verbose:
pbar = start_progress_bar(num_episodes)
for idx_ep in range(num_episodes):
wi_x = train_emb_reshaped[idx_ep]
eval_x = eval_emb_per_episode[idx_ep]
# print(eval_x.shape)
_wi_y = wi_y[idx_ep]
wi_y_ = []
for i in range(len(_wi_y)):
for j in range(num_augmentations+1):
wi_y_.append(_wi_y[i])
if normalize:
wi_x = WeightImprintingClassifier.preprocess_input(wi_x)
eval_x = WeightImprintingClassifier.preprocess_input(eval_x)
eval_x = np.array(eval_x)
dim1, dim2 = eval_x.shape[0], eval_x.shape[-1]
eval_x = eval_x.reshape(dim1, dim2)
wi_weights, label_mapping = WeightImprintingClassifier.get_imprinting_weights(
wi_x, wi_y_, False, True
)
wi_parameters = {
"num_classes": num_ways,
"input_dims": wi_x.shape[-1],
"scale": False,
"dense_layer_weights": wi_weights,
"multi_label": True
}
wi_cls = WeightImprintingClassifier(wi_parameters)
met = wi_cls.evaluate_multi_label_metrics(
eval_x, eval_y[idx_ep], label_mapping, threshold, metrics
)
for m in met:
metrics_values[0][m].append(met[m])
for idx_tr_ep in range(len(train_epochs_loop)):
ep_y = wi_y_
rev_label_mapping = {label_mapping[val]:val for val in label_mapping}
train_y = np.zeros((len(ep_y), num_ways))
for idx_y, _y in enumerate(ep_y):
for l in _y:
train_y[idx_y, rev_label_mapping[l]] = 1
wi_cls.train(wi_x, train_y, train_epochs_loop[idx_tr_ep], train_batch_size)
met = wi_cls.evaluate_multi_label_metrics(
eval_x, eval_y[idx_ep], label_mapping, threshold, metrics
)
for m in met:
metrics_values[idx_tr_ep+1][m].append(met[m])
# _pred_y = wi_cls.predict_multi_label(eval_x, threshold)
# for j in range(len(_eval_y)):
# print([label_mapping[p] for p in _pred_y[j]], " vs ", _eval_y[j])
# met = wi_cls.evaluate_multi_label_metrics(
# eval_x, eval_y[idx_ep], label_mapping, threshold, metrics
# )
# for m in met:
# metrics_values[m].append(met[m])
del wi_x
del eval_x
del wi_cls
if verbose:
pbar.update(idx_ep+1)
return metrics_values
def run_train_loop(
train_indices,
eval_indices,
wi_y,
eval_y,
num_episodes,
num_ways,
verbose=True,
normalize=True,
train_epochs_loop=[5],
train_batch_size=5,
metrics=['hamming', 'jaccard', 'subset_accuracy', 'ap', 'map', 'c_f1', 'o_f1', 'c_precision', 'o_precision', 'c_recall', 'o_recall', 'classwise_accuracy', 'c_accuracy'],
threshold=0.72
):
metrics_values = [{m:[] for m in metrics} for _ in range(len(train_epochs_loop)+1)]
if verbose:
pbar = start_progress_bar(num_episodes)
all_logits = [] #cc
for idx_ep in range(num_episodes):
wi_x = train_emb_reshaped[idx_ep]
eval_x = eval_emb_per_episode[idx_ep]
ep_logits = [] #cc
_wi_y = wi_y[idx_ep]
wi_y_ = []
for i in range(len(_wi_y)):
for j in range(num_augmentations+1):
wi_y_.append(_wi_y[i])
if normalize:
wi_x = WeightImprintingClassifier.preprocess_input(wi_x)
eval_x = WeightImprintingClassifier.preprocess_input(eval_x)
eval_x = np.array(eval_x)
dim1, dim2 = eval_x.shape[0], eval_x.shape[-1]
eval_x = eval_x.reshape(dim1, dim2)
wi_weights, label_mapping = WeightImprintingClassifier.get_imprinting_weights(
wi_x, wi_y_, False, True
)
wi_parameters = {
"num_classes": num_ways,
"input_dims": wi_x.shape[-1],
"scale": False,
"dense_layer_weights": wi_weights,
"multi_label": True
}
wi_cls = WeightImprintingClassifier(wi_parameters)
met = wi_cls.evaluate_multi_label_metrics(
eval_x, eval_y[idx_ep], label_mapping, threshold, metrics
)
for m in met:
metrics_values[0][m].append(met[m])
for idx_tr_ep in range(len(train_epochs_loop)):
ep_y = wi_y_
rev_label_mapping = {label_mapping[val]:val for val in label_mapping}
train_y = np.zeros((len(ep_y), num_ways))
for idx_y, _y in enumerate(ep_y):
for l in _y:
train_y[idx_y, rev_label_mapping[l]] = 1
wi_cls.train(wi_x, train_y, train_epochs_loop[idx_tr_ep], train_batch_size)
logits = wi_cls.predict_scores(eval_x).tolist() #cc
ep_logits.append(logits) #cc
met = wi_cls.evaluate_multi_label_metrics(
eval_x, eval_y[idx_ep], label_mapping, threshold, metrics
)
for m in met:
metrics_values[idx_tr_ep+1][m].append(met[m])
all_logits.append(ep_logits) #cc
# _pred_y = wi_cls.predict_multi_label(eval_x, threshold)
# for j in range(len(_eval_y)):
# print([label_mapping[p] for p in _pred_y[j]], " vs ", _eval_y[j])
# met = wi_cls.evaluate_multi_label_metrics(
# eval_x, eval_y[idx_ep], label_mapping, threshold, metrics
# )
# for m in met:
# metrics_values[m].append(met[m])
del wi_x
del eval_x
del wi_cls
if verbose:
pbar.update(idx_ep+1)
return metrics_values, all_logits #cc
def embed_augmented_imgs(image, num_augmentations, trivial=False):
if np.max(image) > 1:
image = image/255
def augment_image(image, num_augmentations, trivial=False):
""" Perform SimCLR augmentations on the image
"""
augmented_images = [image]
def _run_filters(image):
width = image.shape[1]
height = image.shape[0]
image_aug = simclr_data_augmentations.random_crop_with_resize(
image,
height,
width
)
image_aug = tf.image.random_flip_left_right(image_aug)
image_aug = simclr_data_augmentations.random_color_jitter(image_aug)
image_aug = simclr_data_augmentations.random_blur(
image_aug,
height,
width
)
image_aug = tf.reshape(image_aug, [image.shape[0], image.shape[1], 3])
image_aug = tf.clip_by_value(image_aug, 0., 1.)
return image_aug.numpy()
for _ in range(num_augmentations):
# aug_image = augmentations(image=image)
if trivial:
aug_image = image
else:
aug_image = _run_filters(image)
augmented_images.append(aug_image)
augmented_images = np.stack(augmented_images)
return augmented_images
if num_augmentations > 0:
aug_images = augment_image(image, num_augmentations, trivial)
else:
aug_images = np.array([image])
embeddings = encode_aug_img(aug_images)
return embeddings
def get_max_mean_jaccard_index_by_threshold(metrics_thresholds):
max_mean_jaccard = np.max([np.mean(mt['jaccard']) for mt in metrics_thresholds])
return max_mean_jaccard
def get_max_mean_f1_score_by_threshold(metrics_thresholds):
max_mean_f1_score = np.max([np.mean(mt['f1_score']) for mt in metrics_thresholds])
return max_mean_f1_score
def get_max_mean_jaccard_index_with_threshold(metrics_thresholds):
max_mean_jaccard = np.max([np.mean(mt['jaccard']) for mt in metrics_thresholds])
threshold = np.argmax([np.mean(mt['jaccard']) for mt in metrics_thresholds])
return max_mean_jaccard, threshold
def get_max_mean_f1_score_with_threshold(metrics_thresholds):
max_mean_f1 = np.max([np.mean(mt['o_f1']) for mt in metrics_thresholds])
threshold = np.argmax([np.mean(mt['o_f1']) for mt in metrics_thresholds])
return max_mean_f1, threshold
# chenni change whole block
def get_best_metric_and_threshold(metrics_thresholds, metric_name, thresholds, optimal='max'):
if optimal=='max':
opt_value = np.max([np.mean(mt[metric_name]) for mt in metrics_thresholds])
opt_threshold = thresholds[np.argmax([np.mean(mt[metric_name]) for mt in metrics_thresholds])]
if optimal=='min':
opt_value = np.min([np.mean(mt[metric_name]) for mt in metrics_thresholds])
opt_threshold = thresholds[np.argmin([np.mean(mt[metric_name]) for mt in metrics_thresholds])]
return opt_value, opt_threshold
def save_trends(num_ways, num_shot, num_augmentations, trivial, clip_metrics_thresholds, logits_thresholds):
# chenni change whole block
all_metrics = ['hamming', 'jaccard', 'subset_accuracy', 'map', 'c_f1', 'o_f1', 'c_precision', 'o_precision', 'c_recall', 'o_recall', 'c_accuracy']
f1_vals = []
f1_t_vals = []
jaccard_vals = []
jaccard_t_vals = []
final_dict = {}
for ind_metric in all_metrics:
vals = []
t_vals = []
final_array = []
for mt in clip_metrics_thresholds:
if ind_metric == "hamming":
ret_val, ret_t_val = get_best_metric_and_threshold(mt,ind_metric,thresholds,"min")
else:
ret_val, ret_t_val = get_best_metric_and_threshold(mt,ind_metric,thresholds,"max")
vals.append(ret_val)
t_vals.append(ret_t_val)
final_array.append(vals)
final_array.append(t_vals)
final_dict[ind_metric] = final_array
if trivial:
graph_filename = "new_metrics"+dataset_name+ "_Patch_Patch"+"_"+str(num_ways)+"w"+str(num_shot)+"s"+str(num_augmentations)+"a_trivial_metrics_graphs.json"
else:
graph_filename = "new_metrics"+dataset_name+ "_Patch_Patch"+"_"+str(num_ways)+"w"+str(num_shot)+"s"+str(num_augmentations)+"a_metrics_graphs.json"
with open(graph_filename, 'w') as f:
json.dump(final_dict, f)
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
delete_file_by_name(drive, folderid, graph_filename)
save_to_drive(drive, folderid, graph_filename)
#cc whole block
def save_results(num_ways, num_shots, num_aug, trivial, clip_metrics_thresholds, logits_thresholds):
if trivial:
#cc
results_filename = "new_metrics"+dataset_name+ "_Patch_Patch" + "_"+str(num_ways)+"w"+str(num_shot)+"s"+str(num_augmentations)+"a_trivial_metrics_with_logits.json"
else:
#cc
results_filename = "new_metrics"+dataset_name+ "_Patch_Patch" + "_"+str(num_ways)+"w"+str(num_shot)+"s"+str(num_augmentations)+"a_metrics_with_logits.json"
with open(results_filename, 'w') as f:
#cc
results = {'metrics': clip_metrics_thresholds,
"logits": logits_thresholds,
}
json.dump(results, f)
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
delete_file_by_name(drive, folderid, results_filename)
save_to_drive(drive, folderid, results_filename)
###Output
_____no_output_____
###Markdown
Setting multiple thresholds
###Code
thresholds = np.arange(0.3, 0.72, 0.01)
thresholds
def plot_metrics_by_threshold(
metrics_thresholds,
thresholds,
metrics=['jaccard', 'hamming', 'f1_score'],
title_suffix=""
):
legend = []
fig = plt.figure(figsize=(7,7))
plt.grid(True)
if 'jaccard' in metrics:
mean_jaccard_threshold = [np.mean(mt['jaccard']) for mt in metrics_thresholds]
opt_threshold_jaccard = thresholds[np.argmax(mean_jaccard_threshold)]
plt.plot(thresholds, mean_jaccard_threshold, c='blue')
plt.axvline(opt_threshold_jaccard, ls="--", c='blue')
legend.append("Jaccard Index")
legend.append(opt_threshold_jaccard)
if 'hamming' in metrics:
mean_hamming_threshold = [np.mean(mt['hamming']) for mt in metrics_thresholds]
opt_threshold_hamming = thresholds[np.argmin(mean_hamming_threshold)]
plt.plot(thresholds, mean_hamming_threshold, c='green')
plt.axvline(opt_threshold_hamming, ls="--", c='green')
legend.append("Hamming Score")
legend.append(opt_threshold_hamming)
if 'f1_score' in metrics:
mean_f1_threshold = [np.mean(mt['f1_score']) for mt in metrics_thresholds]
opt_threshold_f1 = thresholds[np.argmax(mean_f1_threshold)]
plt.plot(thresholds, mean_f1_threshold, c='red')
plt.axvline(opt_threshold_f1, ls="--", c='red')
legend.append("F1 Score")
legend.append(opt_threshold_f1)
plt.xlabel('Threshold')
plt.ylabel('Value')
plt.legend(legend)
plt.title(title_suffix+" Multi label metrics by threshold")
plt.show()
###Output
_____no_output_____
###Markdown
5 way 5 shot Picking indices
###Code
img_list = os.listdir(data_folder + split)
num_ways = 5
num_shot = 5
num_eval = 10
eval_indices = []
train_indices = []
wi_y = []
eval_y = []
shuffle = False
sort = True
num_episodes = 10
selected_labels_per_episode = []
label_dictionary = {la:label_dictionary[la] for la in label_dictionary if len(label_dictionary[la]) >= (num_shot+num_eval)}
unique_labels = list(label_dictionary.keys())
pbar = start_progress_bar(num_episodes)
for s in range(num_episodes):
# Setting random seed for replicability
np.random.seed(s)
_train_indices = []
_eval_indices = []
selected_labels = np.random.choice(unique_labels, size=num_ways, replace=False)
selected_labels_per_episode.append(selected_labels)
for la in selected_labels:
la_indices_train = label_dictionary[la]
la_indices_eval = label_dictionary[la]
tr_idx = []
ev_idx = []
while len(tr_idx) < num_shot:
idx = np.random.choice(la_indices_train)
fname = image_id_col[idx] + '.jpg'
if fname in img_list:
img = mpimg.imread(data_folder + split + '/' + image_id_col[idx] + '.jpg')
if img.ndim!=3:
del img
continue
if idx not in _train_indices and idx not in _eval_indices and idx not in tr_idx and fname in img_list:
tr_idx.append(idx)
while len(ev_idx) < num_eval:
idx = np.random.choice(la_indices_eval)
fname = image_id_col[idx] + '.jpg'
if fname in img_list:
img = mpimg.imread(data_folder + split + '/' + image_id_col[idx] + '.jpg')
if img.ndim!=3:
del img
continue
if idx not in _train_indices and idx not in _eval_indices and idx not in tr_idx and idx not in ev_idx and fname in img_list:
ev_idx.append(idx)
#print(len(ev_idx))
_train_indices = _train_indices + tr_idx
_eval_indices = _eval_indices + ev_idx
if shuffle:
np.random.shuffle(_train_indices)
np.random.shuffle(_eval_indices)
# if sort:
# _train_indices.sort()
# _eval_indices.sort()
train_indices.append(_train_indices)
eval_indices.append(_eval_indices)
_wi_y = []
for idx in _train_indices:
la = train_labels[idx]
_wi_y.append(list([l for l in la if l in selected_labels]))
_eval_y = []
for idx in _eval_indices:
la = train_labels[idx]
_eval_y.append(list([l for l in la if l in selected_labels]))
wi_y.append(_wi_y)
eval_y.append(_eval_y)
pbar.update(s+1)
eval_emb_per_episode = []
pbar = start_progress_bar(num_episodes)
for i in range(num_episodes):
p = generate_masks(eval_indices[i])
eval_emb = []
for j in p:
emb = encode_patch(j)
eval_emb.append(emb)
del p
eval_emb_per_episode.append(np.array(eval_emb))
pbar.update(i+1)
###Output
[elapsed time: 0:03:05] |**********************************| (ETA: 00:00:00)
###Markdown
CLIP 0 Augmentations
###Code
num_augmentations = 0
Trivial = False
train_emb_per_episode = []
pbar = start_progress_bar(num_episodes)
for i in range(num_episodes):
p = generate_masks(train_indices[i])
emb_per_img = []
for j in p:
emb = embed_augmented_imgs(j, num_augmentations, trivial=Trivial)
emb_per_img.append(emb)
train_emb_per_episode.append(np.array(emb_per_img))
del p
pbar.update(i+1)
train_emb_reshaped = []
for i in range(num_episodes):
train_emb_reshaped.append(train_emb_per_episode[i].reshape(train_emb_per_episode[i].shape[0]*train_emb_per_episode[i].shape[1], 512))
train_epochs_loop = [5 for _ in range(16)]
logits_thresholds = [] #cc
clip_metrics_thresholds = []
pbar = start_progress_bar(len(thresholds))
for i, t in enumerate(thresholds):
clip_metrics_t,all_logits = run_train_loop(#cc
train_indices,
eval_indices,
wi_y,
eval_y,
num_episodes,
num_ways,
threshold=t,
verbose=False,
train_epochs_loop=train_epochs_loop
)
clip_metrics_thresholds.append(clip_metrics_t)
logits_thresholds.append(all_logits)#cc
pbar.update(i+1)
save_results(num_ways, num_shot, num_augmentations, Trivial, clip_metrics_thresholds, logits_thresholds)
save_trends(num_ways, num_shot, num_augmentations, Trivial, clip_metrics_thresholds, logits_thresholds)
###Output
Uploaded new_metricsIMaterialist_Patch_Patch_5w5s0a_metrics_with_logits.json to https://drive.google.com/drive/u/1/folders/1RbtNKWRThbY6ArnqsYCYp2EFi13kP7dN
Uploaded new_metricsIMaterialist_Patch_Patch_5w5s0a_metrics_graphs.json to https://drive.google.com/drive/u/1/folders/1RbtNKWRThbY6ArnqsYCYp2EFi13kP7dN
###Markdown
5 Augmentations
###Code
num_augmentations = 5
Trivial = False
train_emb_per_episode = []
pbar = start_progress_bar(num_episodes)
#interacting
for i in range(num_episodes):
p = generate_masks(train_indices[i])
emb_per_img = []
for j in p:
emb = embed_augmented_imgs(j, num_augmentations, trivial=Trivial)
emb_per_img.append(emb)
train_emb_per_episode.append(np.array(emb_per_img))
del p
pbar.update(i+1)
train_emb_reshaped = []
for i in range(num_episodes):
train_emb_reshaped.append(train_emb_per_episode[i].reshape(train_emb_per_episode[i].shape[0]*train_emb_per_episode[i].shape[1], 512))
train_epochs_loop = [5 for _ in range(16)]
logits_thresholds = [] #cc
clip_metrics_thresholds = []
pbar = start_progress_bar(len(thresholds))
for i, t in enumerate(thresholds):
clip_metrics_t,all_logits = run_train_loop(#cc
train_indices,
eval_indices,
wi_y,
eval_y,
num_episodes,
num_ways,
threshold=t,
verbose=False,
train_epochs_loop=train_epochs_loop
)
clip_metrics_thresholds.append(clip_metrics_t)
logits_thresholds.append(all_logits)#cc
pbar.update(i+1)
save_results(num_ways, num_shot, num_augmentations, Trivial, clip_metrics_thresholds, logits_thresholds)
save_trends(num_ways, num_shot, num_augmentations, Trivial, clip_metrics_thresholds, logits_thresholds)
###Output
Uploaded new_metricsIMaterialist_Patch_Patch_5w5s5a_metrics_with_logits.json to https://drive.google.com/drive/u/1/folders/1RbtNKWRThbY6ArnqsYCYp2EFi13kP7dN
Uploaded new_metricsIMaterialist_Patch_Patch_5w5s5a_metrics_graphs.json to https://drive.google.com/drive/u/1/folders/1RbtNKWRThbY6ArnqsYCYp2EFi13kP7dN
###Markdown
5 trivial Augmentations
###Code
num_augmentations = 5
Trivial = True
train_emb_per_episode = []
pbar = start_progress_bar(num_episodes)
for i in range(num_episodes):
p = generate_masks(train_indices[i])
emb_per_img = []
for j in p:
emb = embed_augmented_imgs(j, num_augmentations, trivial=Trivial)
emb_per_img.append(emb)
train_emb_per_episode.append(np.array(emb_per_img))
del p
pbar.update(i+1)
train_emb_reshaped = []
for i in range(num_episodes):
train_emb_reshaped.append(train_emb_per_episode[i].reshape(train_emb_per_episode[i].shape[0]*train_emb_per_episode[i].shape[1], 512))
train_epochs_loop = [5 for _ in range(16)]
logits_thresholds = [] #cc
clip_metrics_thresholds = []
pbar = start_progress_bar(len(thresholds))
for i, t in enumerate(thresholds):
clip_metrics_t,all_logits = run_train_loop(#cc
train_indices,
eval_indices,
wi_y,
eval_y,
num_episodes,
num_ways,
threshold=t,
verbose=False,
train_epochs_loop=train_epochs_loop
)
clip_metrics_thresholds.append(clip_metrics_t)
logits_thresholds.append(all_logits)#cc
pbar.update(i+1)
save_results(num_ways, num_shot, num_augmentations, Trivial, clip_metrics_thresholds, logits_thresholds)
save_trends(num_ways, num_shot, num_augmentations, Trivial, clip_metrics_thresholds, logits_thresholds)
###Output
_____no_output_____
###Markdown
10 Augmentations
###Code
num_augmentations = 10
Trivial = False
train_emb_per_episode = []
pbar = start_progress_bar(num_episodes)
for i in range(num_episodes):
p = generate_masks(train_indices[i])
emb_per_img = []
for j in p:
emb = embed_augmented_imgs(j, num_augmentations, trivial=Trivial)
emb_per_img.append(emb)
train_emb_per_episode.append(np.array(emb_per_img))
del p
pbar.update(i+1)
train_emb_reshaped = []
for i in range(num_episodes):
train_emb_reshaped.append(train_emb_per_episode[i].reshape(train_emb_per_episode[i].shape[0]*train_emb_per_episode[i].shape[1], 512))
train_epochs_loop = [5 for _ in range(16)]
logits_thresholds = [] #cc
clip_metrics_thresholds = []
pbar = start_progress_bar(len(thresholds))
for i, t in enumerate(thresholds):
clip_metrics_t,all_logits = run_train_loop(#cc
train_indices,
eval_indices,
wi_y,
eval_y,
num_episodes,
num_ways,
threshold=t,
verbose=False,
train_epochs_loop=train_epochs_loop
)
clip_metrics_thresholds.append(clip_metrics_t)
logits_thresholds.append(all_logits)#cc
pbar.update(i+1)
save_results(num_ways, num_shot, num_augmentations, Trivial, clip_metrics_thresholds, logits_thresholds)
save_trends(num_ways, num_shot, num_augmentations, Trivial, clip_metrics_thresholds, logits_thresholds)
###Output
Deleting new_metricsIMaterialist_Patch_Patch_5w5s10a_metrics_with_logits.json from GDrive
Uploaded new_metricsIMaterialist_Patch_Patch_5w5s10a_metrics_with_logits.json to https://drive.google.com/drive/u/1/folders/1RbtNKWRThbY6ArnqsYCYp2EFi13kP7dN
Deleting new_metricsIMaterialist_Patch_Patch_5w5s10a_metrics_graphs.json from GDrive
Uploaded new_metricsIMaterialist_Patch_Patch_5w5s10a_metrics_graphs.json to https://drive.google.com/drive/u/1/folders/1RbtNKWRThbY6ArnqsYCYp2EFi13kP7dN
###Markdown
10 Trivial Augmentations
###Code
num_augmentations = 10
Trivial = True
train_emb_per_episode = []
pbar = start_progress_bar(num_episodes)
for i in range(num_episodes):
p = generate_masks(train_indices[i])
emb_per_img = []
for j in p:
emb = embed_augmented_imgs(j, num_augmentations, trivial=Trivial)
emb_per_img.append(emb)
train_emb_per_episode.append(np.array(emb_per_img))
del p
pbar.update(i+1)
train_emb_reshaped = []
for i in range(num_episodes):
train_emb_reshaped.append(train_emb_per_episode[i].reshape(train_emb_per_episode[i].shape[0]*train_emb_per_episode[i].shape[1], 512))
train_epochs_loop = [5 for _ in range(16)]
logits_thresholds = [] #cc
clip_metrics_thresholds = []
pbar = start_progress_bar(len(thresholds))
for i, t in enumerate(thresholds):
clip_metrics_t,all_logits = run_train_loop(#cc
train_indices,
eval_indices,
wi_y,
eval_y,
num_episodes,
num_ways,
threshold=t,
verbose=False,
train_epochs_loop=train_epochs_loop
)
clip_metrics_thresholds.append(clip_metrics_t)
logits_thresholds.append(all_logits)#cc
pbar.update(i+1)
save_results(num_ways, num_shot, num_augmentations, Trivial, clip_metrics_thresholds, logits_thresholds)
save_trends(num_ways, num_shot, num_augmentations, Trivial, clip_metrics_thresholds, logits_thresholds)
max_mean_vals = []
threshold_vals = []
x_vals = [np.sum(train_epochs_loop[:i+1]) for i in range(len(train_epochs_loop))]
for idx_tr_ep in range(len(train_epochs_loop)):
mt = []
for idx_t in range(len(clip_metrics_thresholds)):
mt.append(clip_metrics_thresholds[idx_t][idx_tr_ep])
mmv, t = get_max_mean_jaccard_index_with_threshold(mt)
max_mean_vals.append(mmv)
threshold_vals.append(thresholds[t])
plt.plot(x_vals, max_mean_vals)
plt.title("Jaccard index vs training epochs for {} way {} shot on {} with CLIP".format(num_ways, num_shot, dataset_name))
plt.xlabel("No. of training epochs")
plt.ylabel("Jaccard Index")
# plt.xticks(x_vals, x_vals)
# plt.ylim([0, np.max(max_mean_vals)])
plt.show()
max_mean_vals = []
threshold_vals = []
x_vals = [np.sum(train_epochs_loop[:i+1]) for i in range(len(train_epochs_loop))]
for idx_tr_ep in range(len(train_epochs_loop)):
mt = []
for idx_t in range(len(clip_metrics_thresholds)):
mt.append(clip_metrics_thresholds[idx_t][idx_tr_ep])
mmv, t = get_max_mean_f1_score_with_threshold(mt)
max_mean_vals.append(mmv)
threshold_vals.append(thresholds[t])
plt.plot(x_vals, max_mean_vals)
plt.title("F1 Score vs training epochs with 5 augmentations for {} way {} shot on {} with CLIP".format(num_ways, num_shot, dataset_name))
plt.xlabel("No. of training epochs")
plt.ylabel("Jaccard Index")
# plt.xticks(x_vals, x_vals)
# plt.ylim([0, np.max(max_mean_vals)])
plt.show()
plt.plot(x_vals, threshold_vals)
plt.title("Threshold vs training epochs for {} way {} shot on {} with CLIP".format(num_ways, num_shot, dataset_name))
plt.xlabel("No. of training epochs")
plt.ylabel("Threshold")
plt.xticks(x_vals, x_vals)
# plt.ylim([0, np.max(max_mean_vals)])
plt.show()
###Output
_____no_output_____ |
notebooks/comparison-sb9.ipynb | ###Markdown
Comparison with the ninth spectroscopic binary catalog (SB9)
###Code
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import pickle
from astropy.table import Table
import sys
sys.path.insert(0, "../")
import velociraptor
%matplotlib inline
sb9 = Table.read("../data/sb9_matched_by_position.fits")
print(sb9.dtype.names)
data = Table.read("../data/rv-all.fits")
sb9 = Table.read("../data/sb9_matched_by_position.fits")
sb9_indices = np.nan * np.ones(len(sb9))
for i, source_id in enumerate(sb9["source_id"]):
try:
sb9_indices[i] = np.where(data["source_id"] == int(source_id))[0][0]
except:
continue
finite = np.isfinite(sb9_indices)
sb9 = sb9[finite]
data = data[sb9_indices[finite].astype(int)]
print(len(data), len(sb9))
scalar = 1.0 / (sb9["Per"] * (1 - sb9["e"]**2)**0.5)
x = sb9["K1"] * scalar
y = data["rv_excess_variance"]**0.5 * scalar
c = data["p_sb_50"]
kwds = dict(vmin=0, vmax=1, s=25, cmap="coolwarm_r")
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
scat = ax.scatter(x, y, c=c, **kwds)
ax.loglog()
ax.set_xlim(10**-4.5, 10**3)
ax.set_ylim(10**-4.5, 10**3)
cbar = plt.colorbar(scat)
cbar.set_label(r"\textrm{binary probability}")
ax.set_xlabel(r"${K}/{P\sqrt{1-e^2}}$")
ax.set_ylabel(r"${\sigma_\textrm{vrad excess}}/{P\sqrt{1-e^2}}$")
fig.tight_layout()
def what_did_we_find(data, consistent_with_single,
latex_label_name=None, ax=None, **kwargs):
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.figure
kwds = dict(histtype="bar", stacked="True", alpha=0.5)
kwds.update(kwargs)
finite = np.isfinite(data)
ax.hist([
data[finite * consistent_with_single],
data[finite * (~consistent_with_single)],
], label=("single", "binary"), **kwargs)
legend = plt.legend()
if latex_label_name is not None:
ax.set_xlabel(latex_label_name)
ax.set_ylabel(r"\textrm{count}")
fig.tight_layout()
return fig
label_names = ("Per", "e", "omega", "K1", "K2", "V0", "rms1", "rms2")
kwds = dict(histtype="bar", stacked="True", normed=1, alpha=1, bins=50)
for label_name in label_names:
v = sb9[label_name]
if label_name in ("Per", "rms1", "rms2", "K1", "K2"):
what_did_we_find(np.log10(v), data["p_sb_50"] < 0.5,
latex_label_name=label_name, **kwds)
else:
what_did_we_find(v, data["p_sb_50"] < 0.5,
latex_label_name=label_name, **kwds)
fig = what_did_we_find(np.log10(sb9["K1"]/sb9["Per"]), data["p_sb_50"] < 0.5,
latex_label_name=r"$K/P$", **kwds)
# What about within the period range that we can reasonably detect?
# We can probably detect out to twice the observing span of Gaia (21 mo): 42 months.
detectable = (sb9["Per"] < 42*30.0)
fig = what_did_we_find(np.log10(sb9["K1"]/sb9["Per"])[detectable],
(data["p_sb_50"] < 0.5)[detectable],
latex_label_name=r"$K/P$", **kwds)
label_names = ("Per", "e", "omega", "K1", "K2", "V0", "rms1", "rms2")
kwds = dict(histtype="bar", stacked="True", normed=1, alpha=1, bins=50)
for label_name in label_names:
v = sb9[label_name][detectable]
m = (data["p_sb_50"] < 0.5)[detectable]
if label_name in ("Per", "rms1", "rms2", "K1", "K2"):
what_did_we_find(np.log10(v), m,
latex_label_name=label_name, **kwds)
else:
what_did_we_find(v, m,
latex_label_name=label_name, **kwds)
fig, ax = plt.subplots(figsize=(8, 6))
scat = ax.scatter(sb9["K1"], sb9["Per"],
c=data["p_sb_50"], s=30)
ax.loglog()
ax.set_xlabel(r"$K\,\,/\,\,\mathrm{km\,s}^{-1}$")
ax.set_ylabel(r"$P\,\,/\,\,\mathrm{days}^{-1}$")
cbar = plt.colorbar(scat)
cbar.set_label(r"\textrm{binarity probability}")
fig, ax = plt.subplots(figsize=(10, 8))
scat = ax.scatter(sb9["K1"], sb9["Per"],
c=np.log10(data["rv_excess_variance"]**0.5), s=30)
ax.loglog()
ax.set_xlabel(r"$K\,\,/\,\,\mathrm{km\,s}^{-1}$")
ax.set_ylabel(r"$P\,\,/\,\,\mathrm{days}^{-1}$")
cbar = plt.colorbar(scat)
cbar.set_label(r"$\log_{10}(\sigma_\mathrm{rv,excess}\,\,/\,\,\mathrm{km\,s}^{-1})$")
###Output
/Users/arc/anaconda2/envs/py3/lib/python3.6/site-packages/ipykernel_launcher.py:4: RuntimeWarning: divide by zero encountered in log10
after removing the cwd from sys.path.
|
stable/_downloads/ac6935fd2f56d2a7d16c589c65a46140/plot_artifacts_correction_maxwell_filtering.ipynb | ###Markdown
Artifact correction with Maxwell filterThis tutorial shows how to clean MEG data with Maxwell filtering.Maxwell filtering in MNE can be used to suppress sources of externalinterference and compensate for subject head movements.See `maxwell` for more details.
###Code
import mne
from mne.preprocessing import maxwell_filter
data_path = mne.datasets.sample.data_path()
###Output
_____no_output_____
###Markdown
Set parameters
###Code
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
ctc_fname = data_path + '/SSS/ct_sparse_mgh.fif'
fine_cal_fname = data_path + '/SSS/sss_cal_mgh.dat'
###Output
_____no_output_____
###Markdown
Preprocess with Maxwell filtering
###Code
raw = mne.io.read_raw_fif(raw_fname)
raw.info['bads'] = ['MEG 2443', 'EEG 053', 'MEG 1032', 'MEG 2313'] # set bads
# Here we don't use tSSS (set st_duration) because MGH data is very clean
raw_sss = maxwell_filter(raw, cross_talk=ctc_fname, calibration=fine_cal_fname)
###Output
_____no_output_____
###Markdown
Select events to extract epochs from, pick M/EEG channels, and plot evoked
###Code
tmin, tmax = -0.2, 0.5
event_id = {'Auditory/Left': 1}
events = mne.find_events(raw, 'STI 014')
for r, kind in zip((raw, raw_sss), ('Raw data', 'Maxwell filtered data')):
epochs = mne.Epochs(r, events, event_id, tmin, tmax,
picks=('meg', 'eog'),
baseline=(None, 0), reject=dict(eog=150e-6))
evoked = epochs.average()
evoked.plot(window_title=kind, ylim=dict(grad=(-200, 250),
mag=(-600, 700)), time_unit='s')
###Output
_____no_output_____ |
Python/PythonIntro/ExerciseSolutions.ipynb | ###Markdown
Introduction to Python exercise solutions Exercise: Reading text from a file and splitting*Alice's Adventures in Wonderland* is full of memorable characters. The main characters from the story are listed, one-per-line, in the file named `Characters.txt`.NOTE: we will not always explicitly demonstrate everything you need to know in order to complete an exercise. Instead we focus on teaching you how to discover available methods and how use the help function to learn how to use them. It is expected that you will spend some time during the exercises looking for appropriate methods and perhaps reading documentation.
###Code
# 1. Open the `Characters.txt` file and read its contents.
characters_txt = open("Characters.txt").read()
# 2. Split text on newlines to produce a list with one element per line.
# Store the result as "alice_characters".
alice_characters = characters_txt.splitlines()
###Output
_____no_output_____
###Markdown
Exercise: count the number of main charactersSo far we've learned that there are 12 chapters, around 830 paragraphs, and about 26 thousand words in *Alice's Adventures in Wonderland*. Along the way we've also learned how to open a file and read its contents, split strings, calculate the length of objects, discover methods for string and list objects, and index/subset lists in Python. Now it is time for you to put these skills to use to learn something about the main characters in the story.
###Code
# 1. Count the number of main characters in the story (i.e., get the length
# of the list you created in previous exercise).
# 2. Extract and print just the first character from the list you created in
# the previous exercise.
# 3. (BONUS, optional): Sort the list you created in step 2 alphabetically, and then extract the last element.
###Output
_____no_output_____
###Markdown
Exercise: Iterating and counting thingsNow that we know how to iterate using for-loops and list comprehensions the possibilities really start to open up. For example, we can use these techniques to count the number of times each character appears in the story.
###Code
# 1. Make sure you have both the text and the list of characters.
#
# Open and read both "Alice_in_wonderland.txt" and
# "Characters.txt" if you have not already done so.
characters_txt = open("Characters.txt").read()
alice_txt = open("Alice_in_wonderland.txt").read()
# 2. Which chapter has the most words?
#
# Split the text into chaptes (i.e., split on "CHAPTER ")
# and use a for-loop or list comprehension to iterate over
# the chapters. For each chapter, split it into words and
# calculate the length.
alice_chapters = alice_txt.split("CHAPTER ")[1:]
[len(chapter.split()) for chapter in alice_chapters]
###Output
_____no_output_____
###Markdown
Chapter 4 has the most words (2614)
###Code
# 3. How many times is each character mentioned in the text?
# Iterate over the list of characters using a for-loop or
# list comprehension. For each character, call the count method
# with that character as the argument.
[alice_txt.count(character)
for character in characters_txt.splitlines()]
# 4. (BONUS, optional): Put the character counts computed
# above in a dictionary with character names as the keys and
# counts as the values.
characters = characters_txt.splitlines()
character_mentions = [alice_txt.count(character)
for character in characters]
dict(zip(characters, character_mentions))
# 5. (BONUS, optional): Use a nested for-loop or nested comprehension
# to calculate the number of times each character is mentioned
# in each chapter.
{chapter.splitlines()[0]:
{character: chapter.count(character)
for character in characters}
for chapter in alice_chapters}
###Output
_____no_output_____ |
lectures/Lecture3-regression-regularize.ipynb | ###Markdown
University of Washington: Machine Learning and Statistics Lecture 2: Regression (basis functions, regularization, cross-validation)Andrew Connolly and Stephen Portillo Resources for this notebook include:- [Textbook](https://press.princeton.edu/books/hardcover/9780691198309/statistics-data-mining-and-machine-learning-in-astronomy) Chapter 8. - [astroML website](https://www.astroml.org/index.html)This notebook is developed based on material from A. Connolly, Z. Ivezic, M. Juric, S. Portillo, G. Richards, B. Sipocz, J. VanderPlas, D. Hogg, and many others.The notebook and assoociated material are available from [github](https://github.com/uw-astro/astr-598a-win22).
###Code
pip install corner
###Output
_____no_output_____
###Markdown
This notebook includes:[Linear Basis Function Regression](basis) [Regularization](regularization) [Cross Validation](cross-validation) [Non-linear Regression with MCMC](mcmc) Linear Basis Function Regression [Go to top](toc) We have seen how to implement linear regression and by extension polynomial regression. We don't have to use only polynomials ($x$, $x^2$, $x^3$, etc) - we can use any function f(x)and still have a linear problem (in unknown coefficients, $a_i$), e.g.$$ y(x) = \sum_i^N a_i \, f_i(x) $$ Example: Let's express a complicated $y(x)$, such a cosmmological distance-redshift relation, as sum of Gaussian functions: $ f_i(x) = N(x|\mu_i, \sigma)$, where $\mu_i$ are defined on a grid and $\sigma$ is constant and chosen depending on the intrinsic problem resolution. NOTE: We suppress warnings for the packages (this is not recommended)
###Code
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
###Output
_____no_output_____
###Markdown
Using astroML regression to fit a set of basis functions
###Code
%matplotlib inline
import numpy as np
from matplotlib import pyplot as plt
from astroML.cosmology import Cosmology
from astroML.datasets import generate_mu_z
#------------------------------------------------------------
# Generate data
z_sample, mu_sample, dmu = generate_mu_z(100, random_state=0)
# Plot data and fit
fig = plt.figure(figsize=(10, 10))
fig.subplots_adjust(left=0.1, right=0.95,
bottom=0.1, top=0.95,
hspace=0.05, wspace=0.05)
ax = fig.add_subplot(111)
ax.set_xlim(0.01, 1.8)
ax.set_ylim(35., 50.)
ax.errorbar(z_sample, mu_sample, dmu, fmt='.k', ecolor='gray', lw=1)
ax.set_ylabel(r'$\mu$')
ax.set_xlabel(r'$z$')
%matplotlib inline
from astroML.linear_model import LinearRegression, BasisFunctionRegression
import numpy as np
from matplotlib import pyplot as plt
from astroML.cosmology import Cosmology
from astroML.datasets import generate_mu_z
from astroML.utils import split_samples
#------------------------------------------------------------
# Generate data
z_sample, mu_sample, dmu = generate_mu_z(100, random_state=0)
cosmo = Cosmology()
z = np.linspace(0.01, 2, 1000)
mu_true = np.asarray(map(cosmo.mu, z))
# Split the data into a training and test sample
(z_train, z_test), (mu_train, mu_test) = split_samples(z_sample,
np.column_stack((mu_sample,dmu)),
[0.75, 0.25], random_state=0)
# Define our Gaussians
def gaussian_basis(x, mu, sigma):
return np.exp(-0.5 * ((x - mu) / sigma) ** 2)
#------------------------------------------------------------
nGaussian = 20
basis_mu = np.linspace(0, 2, nGaussian+2)[1:-1, None]
basis_sigma = (basis_mu[1] - basis_mu[0])
clf = BasisFunctionRegression('gaussian', mu=basis_mu, sigma=basis_sigma)
n_constraints = len(basis_mu) + 1
#fit the model
clf.fit(z_train[:, None], mu_train[:,0], mu_train[:,1])
mu_sample_fit = clf.predict(z_train[:, None])
# Plot data and fit
fig = plt.figure(figsize=(10, 10))
fig.subplots_adjust(left=0.1, right=0.95,
bottom=0.1, top=0.95,
hspace=0.05, wspace=0.05)
ax = fig.add_subplot(111)
ax.set_xlim(0.01, 1.8)
ax.set_ylim(35., 50.)
ax.errorbar(z_train, mu_train[:,0], mu_train[:,1], fmt='.k', ecolor='gray', lw=1)
z_fit = np.linspace(0,2,100)
mu_fit = clf.predict(z_fit[:, None])
ax.plot(z_fit, mu_fit, '-k',color='red')
ax.set_ylabel(r'$\mu$')
ax.set_xlabel(r'$z$')
err = np.sqrt(np.sum(((mu_sample_fit - mu_train[:,0])** 2) / (len(mu_sample_fit))))
ax.text(0.2, 49, "MSE {:f}".format(err))
#plot the gaussians
for i in range(nGaussian):
if (clf.coef_[i+1] > 0.):
ax.plot(z,clf.coef_[i+1]*gaussian_basis(z, basis_mu[i], basis_sigma) + clf.coef_[0],color='blue')
else:
ax.plot(z,clf.coef_[i+1]*gaussian_basis(z, basis_mu[i], basis_sigma) + clf.coef_[0],color='blue',ls='--')
###Output
_____no_output_____
###Markdown
Exercise: How many Gaussians is the right number for the fit? Cross-validationAs the complexity of a model increases the data points fit the model more and more closely.This does not result in a better fit to the data. We are overfitting the data (the model has high variance - a small change in a training point can change the model dramatically). This is a classic bias variance trade-offWe can evaluate this using a training set (50-70% of sample), a cross-validation set (15-25%) and a test set (15-25%)The cross-validation set evaluates the cross-validation error $\epsilon_{\rm cv}$ of the model (large for overfit models). Test set gives an estimate of the reliability of the model.We can define a cross validation error$\epsilon_{\rm cv}^{(n)} = \sqrt{\frac{1}{n}\sum_{i=1}^{N_{\rm cv}} \left[y_i - \sum_{m=0}^d \theta_0^{(n)}x_i^m\right]^2}$where we train on the first $n$ points ($\le N_{\rm train}$) and evaluate the error on the $N_{\rm cv}$ cross validation points.
###Code
# Plot the results
fig = plt.figure(figsize=(5, 5))
fig.subplots_adjust(left=0.1, right=0.95,
bottom=0.1, top=0.95,
hspace=0.05, wspace=0.05)
ax = fig.add_subplot(111)
step = 1
nGaussians = np.arange(2, 10, step)
training_err = np.zeros(nGaussians.shape)
crossval_err = np.zeros(nGaussians.shape)
DOF_err = np.zeros(nGaussians.shape)
for i,j in enumerate(nGaussians):
# fit the data
basis_mu = np.linspace(0, 2, j)[:, None]
basis_sigma = 3 * (basis_mu[1] - basis_mu[0])
clf = BasisFunctionRegression('gaussian', mu=basis_mu, sigma=basis_sigma)
n_constraints = len(basis_mu) + 1
clf.fit(z_train[:, None], mu_train[:,0], mu_train[:,1])
mu_train_fit = clf.predict(z_train[:, None])
training_err[i] = np.sqrt(np.sum(((mu_train_fit - mu_train[:,0])) ** 2) / len(mu_train[:,0]))
DOF_err[i] = np.sqrt(np.sum(((mu_train_fit - mu_train[:,0])) ** 2) / (len(mu_train[:,0]) - n_constraints))
mu_test_fit = clf.predict(z_test[:, None])
crossval_err[i] = np.sqrt(np.sum(((mu_test_fit - mu_test[:,0]))**2) / len(mu_test[:,0]))
ax.plot(nGaussians, crossval_err, '--k', label='cross-validation')
ax.plot(nGaussians, training_err, '-k', label='training')
ax.set_xlabel('Number of Gaussians')
ax.set_ylabel('rms error')
ax.legend(loc=2)
ax.set_xlim(0, nGaussians[-1])
plt.show()
###Output
_____no_output_____
###Markdown
Penalized likelihoodsBayes Information Criterion (BIC) $ {\rm BIC} \equiv -2 \ln\left[L^0(M)\right] + k \, \ln N $Aikake information criterion (AIC)${\rm AIC} \equiv -2 \ln\left(L^0(M)\right) + 2\,k + { 2k\,(k+1) \over N - k -1}$$L^0(M)$ : maximum value of the data likelihoodFor complex models BIC penalizes more heavily than AIC Regularization [Go to top](toc) If we progressively increase the number of terms in the fit we reach a regime where we are overfitting the data (i.e. there are not enough degrees of freedom)For cases where we are concerned with overfitting we can apply constraints (usually of smoothness, number of coefficients, size of coefficients).> ($Y - M \theta)^T(Y- M \theta) + \lambda |\theta^T \theta|$with $\lambda$ the regularization parameterThis leads to a solution for the parameters of the model> $\theta = (M^T C^{-1} M + \lambda I)^{-1} (M^T C^{-1} Y)$with $I$ the identity modelFrom the Bayesian perspective this is the same as applying a prior to the regression coefficients> $p(\theta | I ) \propto \exp{\left(\frac{-(\lambda \theta^T \theta)}{2}\right)}$which, when multiplied by the likelihood for regression, gives the same posterior as described above Ridge (Tikhonov) regularization> $ |\theta |^2 < s$penalty is on the sum of the squares of the regression coefficientsThis penalizes the size of the coefficients
###Code
nGaussians=20
basis_mu = np.linspace(0, 2, nGaussians)
basis_sigma = 3 * (basis_mu[1] - basis_mu[0])
#------------------------------------------------------------
# Set up the figure to plot the results
fig = plt.figure(figsize=(12, 7))
fig.subplots_adjust(left=0.07, right=0.95,
bottom=0.08, top=0.95,
hspace=0.1, wspace=0.15)
def gaussian_basis(x, mu, sigma):
return np.exp(-0.5 * ((x - mu) / sigma) ** 2)
lamVal=0.09
z_fit = np.linspace(0., 2., 100)
regularization = ['none', 'l2',]
kwargs = [dict(), dict(alpha=lamVal),]
X = gaussian_basis(z_train[:, np.newaxis], basis_mu, basis_sigma)
for i in range(2):
clf = LinearRegression(regularization=regularization[i],
fit_intercept=True, kwds=kwargs[i])
clf.fit(X, mu_train[:,0], mu_train[:,1])
w = clf.coef_
mu_fit = clf.predict(gaussian_basis(z_fit[:, None], basis_mu, basis_sigma))
# plot fit
ax = fig.add_subplot(221 + i)
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.plot(z_fit, mu_fit, 'k')
ax.errorbar(z_train, mu_train[:,0], mu_train[:,1], fmt='.k', ecolor='gray', lw=1)
ax.set_xlim(0.001, 1.8)
ax.set_ylim(36, 50)
# plot weights
ax = fig.add_subplot(223 + i)
ax.xaxis.set_major_locator(plt.MultipleLocator(0.5))
ax.set_xlabel('$z$')
if i == 0:
ax.set_ylabel(r'$\theta$')
w *= 1E-12
ax.text(0, 1.01, r'$\rm \times 10^{12}$',
transform=ax.transAxes, fontsize=14)
ax.scatter(basis_mu, w[1:], s=9, lw=0, c='k')
ax.set_xlim(-0.05, 1.8)
###Output
_____no_output_____
###Markdown
$\lambda$ is the amount of regularization. How do we choose $\lambda$ Least absolute shrinkage and selection (Lasso) regularization>$(Y - M \theta)^T(Y- M \theta) + \lambda |\theta|$>$ |\theta | < s$penalty is on the absolute values of the regression coefficients QUESTION: What is the result of Lasso? What will happen to the regression coefficients
###Code
nGaussians=50
basis_mu = np.linspace(0, 2, nGaussians)
basis_sigma = 3 * (basis_mu[1] - basis_mu[0])
#------------------------------------------------------------
# Set up the figure to plot the results
fig = plt.figure(figsize=(12, 7))
fig.subplots_adjust(left=0.07, right=0.95,
bottom=0.08, top=0.95,
hspace=0.1, wspace=0.15)
def gaussian_basis(x, mu, sigma):
return np.exp(-0.5 * ((x - mu) / sigma) ** 2)
lamVal=0.0009
z_fit = np.linspace(0., 2., 100)
regularization = ['none', 'l1',]
kwargs = [dict(), dict(alpha=lamVal),]
X = gaussian_basis(z_train[:, np.newaxis], basis_mu, basis_sigma)
for i in range(2):
clf = LinearRegression(regularization=regularization[i],
fit_intercept=True, kwds=kwargs[i])
clf.fit(X, mu_train[:,0], mu_train[:,1])
w = clf.coef_
mu_fit = clf.predict(gaussian_basis(z_fit[:, None], basis_mu, basis_sigma))
# plot fit
ax = fig.add_subplot(221 + i)
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.plot(z_fit, mu_fit, 'k')
ax.errorbar(z_train, mu_train[:,0], mu_train[:,1], fmt='.k', ecolor='gray', lw=1)
ax.set_xlim(0.001, 1.8)
ax.set_ylim(36, 50)
# plot weights
ax = fig.add_subplot(223 + i)
ax.xaxis.set_major_locator(plt.MultipleLocator(0.5))
ax.set_xlabel('$z$')
if i == 0:
ax.set_ylabel(r'$\theta$')
w *= 1E-12
ax.text(0, 1.01, r'$\rm \times 10^{12}$',
transform=ax.transAxes, fontsize=14)
ax.scatter(basis_mu, w[1:], s=9, lw=0, c='k')
ax.set_xlim(-0.05, 1.8)
###Output
_____no_output_____
###Markdown
Non-linear Regression with MCMC[Go to top](toc) Here MCMC stands for Markov Chain Monte CarloIn statistics, Markov chain Monte Carlo (MCMC) methods comprise a class of algorithms for sampling from a probability distribution. By constructing a Markov chain that has the desired distribution as its equilibrium distribution, one can obtain a sample of the desired distribution by recording states from the chain. The more steps are included, the more closely the distribution of the sample matches the actual desired distribution. Various algorithms exist for constructing chains, including the Metropolis–Hastings algorithm. For more details,see Chapter 5 in the textbook and https://en.wikipedia.org/wiki/Markov_chain_Monte_Carlo Highly recommended supplemental background reading:- [Thomas Wiecki: ``MCMC sampling for dummies by Thomas Wiecki"](http://twiecki.github.io/blog/2015/11/10/mcmc-sampling/) For those who want to dive deep:- [Andrieu et al. ``An Introduction to MCMC for Machine Learning" (includes a few pages of history)"](http://www.cs.princeton.edu/courses/archive/spr06/cos598C/papers/AndrieuFreitasDoucetJordan2003.pdf) **To find the maximum of a multi-dimensional function (e.g. likelihood or Bayesian posterior pdf)we need a better method than the brute force grid search!**For example, if we could generate a **sample** of {𝜇𝑖,𝜎𝑖} drawn from the posterior pdf for 𝜇 and 𝜎 , we could simply get posterior pdf for 𝜇 and 𝜎 by plotting histograms of 𝜇 and 𝜎 (similar to the above figure). As simple as that!First we'll say a few words about Monte Carlo in general, and then we'll talk about a special kind of Monte Carlo called Markov Chain Monte Carlo. Definition of the general problemWhat we want to be able to do is to evaluate multi-dimensional ($\theta$ is a k-dimensional vector) integrals of the form $$ I = \int g(\theta) \, p(\theta) \, d\theta,$$where for simplicity posterior pdf is described as$$ p(\theta) \equiv p(M,\theta \,|\,D,I) \propto p(D\,|\,M,\theta,I)\,p(M,\theta\,|\,I). $$For example:1) **Marginalization**: if the first $P$ elements of $\theta$ are the soughtafter model parameters, and the next $k-P$ parameters are nuisance parameters, when marginalizing $p(\theta)$ over nuisance parameterswe have $g(\theta) = 1$ and we integrate over space spanned by $k-P$ nuisance parameters. 2) **Point estimates** for the posterior: if we want the mean of a modelparameter $\theta_m$, then $g(\theta) = \theta_m$ and we integrate overall model parameters. 3) **Model comparison**: here $g(\theta) = 1$ and we integrate over all modelparameters. Monte Carlo Methods What you need is a computer that can generate (pseudo)random numbers and then yousolve a lot of hard problems. Let' start with an easy problem of one-dimensionalnumerical integration.Assume that you can generate a distribution of M random numbers $\theta_j$ uniformly sampled within the integration volume V. Then our interval can be evaluated as $$ I = \int g(\theta) \, p(\theta) \, d\theta = \frac{V}{M} \sum_{j=1}^M g(\theta_j) \, p(\theta_j).$$ Note that in 1-D we can write a similar expression $$ I = \int f(\theta) \, d\theta = \Delta \, \sum_{j=1}^M g(\theta_j) \, p(\theta_j).$$where $ f(\theta) = g(\theta) \, p(\theta) $, and it is assumed that the values$\theta_j$ are sampled on a regular grid with the step $\Delta = V/M$ ($V$ here is thelength of the sampling domain). This expression is the simplest example ofnumerical integration ("rectangle rule", which amounts to approximating $f(\theta)$by a piecewise constant function).The reason why we expressed $f(\theta)$as a product of $g(\theta)$ and $p(\theta)$ is that, as we will see shortly,we can generate a sample drawn from $p(\theta)$ (instead of sampling on a regular grid), and this greatly improves the performance of numerical integration. Markov Chain Monte CarloThe modern version of the Markov Chain Monte Carlo method was invented in the late 1940s by Stanislaw Ulam, while he was working on nuclear weapons projects at the Los Alamos National Laboratory. The name Monte Carlowas given to the method by Nick Metropolis, who then invented the Metropolis sampler, which evolved intoone of the most famous MCMC algorithms, the Metropolis-Hastings algorithm. Algorithms for generating Markov chains are numerous and greatly vary in complexityand applicability. Many of the most important ideas were generated in physics, especiallyin the context of statistical mechanics, thermodynamics, and quantum field theory. Example of non-linear regression: the age-color relation for asteroidsWe will use age and color data for asteroid families shown in figure 1 from the paper "An age–colour relationship for main-belt S-complex asteroids" by Jedicke et al. (2004, Nature 429, 275), see [jedicke.pdf](data/jedicke.pdf)Given their y(x) data (see below), with errors in both x and y, we want to fit the following function$$ 𝑦(𝑥)=𝑎+𝑏∗[1−𝑒𝑥𝑝(−(𝑥/𝑐)^𝑑)]. $$We have a case of non-linear regression because y(x) depends non-linearly on the unknown coefficients c and d. Important: here x is time, not log(time)! But in plots we'll use log(time) for x axis.We want to: a) find the best-fit values and standard errors for parameters a, b, c and d. b) show the marginal distributions of fitted parameters. c) compare our best fit to the best fit from Jedicke et al.
###Code
# These age and color data for asteroid families are taken
# from the paper Jedicke et al. (2004, Nature 429, 275)
# Age is measured in 10^6 yrs (Myr)
# Log10(age) and error (of Log(Age))
logAge = np.asarray([3.398, 2.477, 3.398, 3.477, 3.301, 1.699, 2.699, 0.763,
2.301, 3.079, 3.176, 0.398])
LageErr = np.asarray([0.087, 0.145, 0.174, 0.145, 0.109, 0.347, 0.174, 0.015,
0.217, 0.145, 0.145, 0.434])
# SDSS principal asteroid color PC1 and its error (per family)
PC1 = np.asarray([0.620, 0.476, 0.523, 0.582, 0.460, 0.479, 0.432, 0.351,
0.427, 0.522, 0.532, 0.311])
PC1err = np.asarray([0.005, 0.015, 0.007, 0.011, 0.005, 0.032, 0.033, 0.047,
0.021, 0.015, 0.022, 0.027])
# time/age on linear axes
age = 10**logAge
# and standard error propagation (although errors can be large)
ageErr = age * LageErr * np.log(10)
# let's take a quick look at the data to verify that it looks
# similar to fig. 1 from Jedicke et al.
logT = np.linspace(-0.1, 3.7, 100)
time = np.power(10,logT)
# the best fit from Jedicke et al.
color = 0.32 + 1.0*(1-np.exp(-(time/2.5e4)**0.5))
ax = plt.figure().add_subplot(111)
ax.set_xlabel("log(age/Myr)")
ax.set_ylabel("SDSS PC$_1$ color")
ax.plot(logT,color, c='blue')
ax.errorbar(logAge,PC1,xerr=LageErr, yerr=PC1err, color='r',
marker='.', ls='None', label='Observed')
plt.show()
###Output
_____no_output_____
###Markdown
We will use pymc3 as the tool of choice to perform MCMC, see[pymc3 docs](https://docs.pymc.io/)I highly recommend to peruse this [excellent blog post by Jake VanderPlas](http://jakevdp.github.io/blog/2014/06/14/frequentism-and-bayesianism-4-bayesian-in-python/)
###Code
import pymc3 as pm
from astroML.plotting.mcmc import plot_mcmc
# to make it look more generic (for future code reuse)
xObs = age
xErr = ageErr
yObs = PC1
yErr = PC1err
# three points to make:
# 1) note setting of the priors (for a, b, c and d)
# 2) note how error in x is handled with a latent variable
# 3) the actual model that is fit is given by AgeColor()
# and it's super easy to change it!
def MCMCasteroids(doXerror=True, draws=10000, tune=1000):
with pm.Model():
a = pm.Uniform('a', 0.1, 0.5)
b = pm.Uniform('b', 0, 10)
c = pm.Uniform('c', 0, 2000000)
d = pm.Uniform('d', 0, 2)
if doXerror:
xLatent = pm.Normal('x', mu=xObs, sd=xErr, shape=xObs.shape)
else:
xLatent = xObs
y = pm.Normal('y', mu=AgeColor(xLatent, a, b, c, d), sd=yErr, observed=yObs)
traces = pm.sample(draws=draws, tune=tune, return_inferencedata=False)
return traces
# model to fit
def AgeColor(t, a, b, c, d):
"""age-color relationship from Jedicke et al. (2004)"""
return a + b*(1-np.exp(-(t/c)**d))
# obtain best-fit parameters using MCMC
traces = MCMCasteroids(True)
bf = pm.summary(traces)['mean']
# let's take a look at the data and best-fit models
logT = np.linspace(-0.1, 3.55, 100)
time = np.power(10,logT)
# fit from Jedicke
colorJedicke = 0.32 + 1.0*(1-np.exp(-(time/2.5e4)**0.5))
colorHere = AgeColor(time, bf['a'], bf['b'], bf['c'], bf['d'])
# plot
ax = plt.figure().add_subplot(111)
ax.set_xlabel("log(age/Myr)")
ax.set_ylabel("SDSS PC$_1$ color")
ax.plot(logT,colorJedicke, c='blue', label='best fit from Jedicke et al.')
ax.plot(logT,colorHere, c='green', label='best fit here')
ax.errorbar(logAge,PC1,xerr=LageErr, yerr=PC1err, color='r', marker='.', ls='None', label='data')
plt.legend()
plt.show()
# the so-called traces for model parameters:
plot = pm.traceplot(traces)
# and a pretty so-called corner plot:
labels = ['$a$', '$b$', '$c$', '$d$']
limits = [(0.1, 0.55), (0.0, 12), (-100000,2200000), (0.1, 0.85)]
jedicke = [0.32, 1.0, 25000, 0.5]
# Plot the results
fig = plt.figure(figsize=(9, 9))
fig.subplots_adjust(bottom=0.1, top=0.95,
left=0.1, right=0.95,
hspace=0.05, wspace=0.05)
# This function plots multiple panels with the traces
plot_mcmc([traces[i] for i in ['a', 'b', 'c', 'd']],
labels=labels, limits=limits,
true_values=jedicke, fig=fig, bins=30, colors='k')
plt.show()
import corner
Ls = [r"$a$", r"$b$", r"$c$", r"$d$"]
samples = np.vstack([traces[i] for i in ['a', 'b', 'c', 'd']]).T
corner.corner(samples, truths=jedicke, labels=Ls, quantiles=[0.16, 0.5, 0.84], show_titles=True, title_kwargs={"fontsize": 12});
###Output
_____no_output_____ |
.ipynb_checkpoints/OOI_M2M_CTD_Downloader-checkpoint.ipynb | ###Markdown
Python code for accessing data via the OOI M2M interface Some organizational stuff about OOI: Arrays have Sites. Sites have Platforms. Platforms have Nodes. Nodes have Instruments. Instruments have Sensors. -the end. ARRAY = The Ocean Observatories Initiative is made up of seven major research components in the North and South Atlantic and Pacific: the Cabled Array and its two sub-arrays – Cabled Axial Seamount and Cabled Continental Margin – on the Juan de Fuca plate; the Coastal Endurance Array off the coast of Oregon and Washington; the Coastal Pioneer Array off the coast of New England; Global Argentine Basin Array in the South Atlantic Ocean; the Global Irminger Sea Array off the coast of Greenland; the Global Southern Ocean Array SW of Chile; and Global Station Papa in the Gulf of Alaska. Each array is composed of a number of sites at which different stable and mobile platforms are deployed. Array locations and configuration were designed based on input from the scientific community in order to study a set of specific regional and collectively global science questions. Arrays: CE = Coastal Endurance CP = Coastal Poineer GI = Global Irminger GP = Global Station Papa GS = Global Southern Ocean SITE = A site is a specific geographic location within an array that is the deployment area for one or more platforms. Each site has a defined depth range and a Latitude-Longitude defined zone within which instrument platforms are deployed for defined periods of time. Sites on the Coastal Endurance Array: "01IS": OR Inshore "02SH": OR Shelf "04OS", OR Offshore "05MO", Mobile Assets "06IS", WA Inshore "07SH", WA Shelf "09OS", WA Offshore PLATFORM = A platform is a set of infrastructure that hosts a complement of integrated scientific instruments. A platform can be stable and fixed in place (e.g. a surface mooring) or mobile (e.g. a profiler mooring which has a component that moves up and down in the water column, or a glider which is free to move in 3 dimensions). Each platform can contain multiple “nodes” to which the instruments are attached, and a means of transmitting the data from the integrated instruments to shore. See “Platform Types” entries below for more details on specific platforms within the OOI. Platforms of the Coastal Endurace Array: "SM": Surface Mooring "SP": Surface Peircing Profiler Mooring, "BP": Benthic Experiment Package "PD": Deep Profiler Mooring "PS": Shallow Profiler Mooring, "PM": Profiler Mooring, Posible ARRAY+SITE+PLATFORM combinations for the Coastal Endurance Array: ["CE01ISSM": OR Inshore Surface Mooring, "CE01ISSP": OR Inshore Surface Piercing Profiler Mooring, "CE02SHBP": OR Shelf Cabled Benthic Experiment Package, "CE02SHSM": OR Shelf Surface Mooring, "CE02SHSP": OR Shelf Surface Piercing Profiler Mooring, "CE04OSBP": OR Offshore Surface Piercing Profiler Mooring, "CE04OSPD": OR Offshore Cabled Deep Profiler Mooring, "CE04OSPI": NA/Unknown "CE04OSPS": OR Offshore Cabled Shallow Profiler Mooring, "CE04OSSM": OR Offshore Surface Mooring, "CE05MOAS": Mobile Assets, "CE06ISSM": WA Inshore Surface Mooring, "CE06ISSP": WA Inshore Surface Piercing Profiler Mooring, "CE07SHSM": WA Shelf Surface Mooring, "CE07SHSP": WA Shelf Surface Piercing Profiler Mooring. "CE09OSPM": WA Offshore Profiler Mooring "CE09OSSM": WA Offshore Surface Mooring] NODE = A node is a section of a platform that contains one or more computers and power converters. Instruments on a platform are plugged into a node, which collects the instrument data internally and/or transmit the data externally. Some platforms contain a single node, like a glider. Other platforms have several nodes wired together. For example, a mooring that hosts a surface buoy, near-surface instrument frame, and seafloor multi-function node, each with a different set of instruments attached. Nodes at CE02SHBP: "LJ01D: Low-Power JBox "MJ01C: Medium-Power JBox INSTRUMENT = A scientific instrument is a piece of specialized equipment used to sample oceanographic attributes and collect data. There are 106 unique models of specialized instrumentation used throughout the OOI (850 total instruments deployed at any one time) that collect over 200 unique data products (>100,000 total science and engineering data products). SENSOR = A sensor is the specific part of an instrument that measures a specific element of the surrounding environment. A single instrument can contain multiple sensors that are used to collect data on various environmental attributes, for example, a CTD is an instrument that contains specific sensors to measure conductivity, temperature, and pressure. URLs for exploring the OOI Data ALL OOI 8 digit ARRAY+SITE+PLATFORM codesPLATFORMS = "https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv" All nodes at Coastal Endurance (CE) 02SH (Oregon Shelf) BP (Benthic Package)NODES = "https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE02SHBP" All sensors at CE02SHBP Low-power JBoxSENSORS = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE02SHBP/LJ01D' All methods of sensor 06-CTDBPN106METHODS = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE02SHBP/LJ01D/06-CTDBPN106' All streamed parameters for the CE02SHBP, Low-power JBox, CTD sensorSTREAMED = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE02SHBP/LJ01D/06-CTDBPN106/streamed/' Metadata for the CE02SHBP, Low-power JBox, CTDMETADATA = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE02SHBP/LJ01D/06-CTDBPN106/metadata/'RCRV Datapresence TeamApril 18, 2018"""
###Code
import requests
from datetime import datetime, timedelta, timezone
def convertOOItime(ooi_seconds):
"""Convert ooi seconds to a datetime object."""
t0 = datetime(1900, 1, 1)
dtObj = t0 + timedelta(seconds=ooi_seconds)
return dtObj.isoformat()
def convertDTobj(dtObj):
"""Convert datetime object to ooi seconds."""
ooi_seconds = (dtObj - datetime(1900, 1, 1)).total_seconds()
return ooi_seconds
def run():
"""foo."""
# OOI Authentication Credentials for Chris Romsos
USERNAME = 'OOIAPI-HTX3MQ74GUC2HM'
TOKEN = '0B4CJTS3SS1I'
# DATA_URL Variables
BASE_URL = "https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/"
PLATFORM = "CE02SHBP/"
NODE = "LJ01D/"
SENSOR = "06-CTDBPN106/"
METHOD = "streamed/"
PARAMETER = 'ctdbp_no_sample'
BEGIN = '?beginDT='
END = '&endDT='
LIMIT = '&limit=20000'
# Get the last datetime from the database and convert it to a python datetime obj.
# This will be our start time for the DATA_URL
# last_data_time = aggModels.SensorFloat1Archive.objects.latest('datetime').datetime
# Get the time 15 minutes ago (use for demonstration only)
last_data_time = datetime.now(timezone.utc)-timedelta(minutes=60)
# Reformat for the OOI M2M get request
TIME1 = str(last_data_time.isoformat()).replace('+00:00', 'Z')
# print("LAST_DATA_TIME = "+TIME1)
# Get the UTC time now from the server clock
TIME2 = datetime.utcnow().isoformat()+'Z'
# print("REQUEST_TIME = "+TIME2)
# Build the data URL
DATA_URL = BASE_URL+PLATFORM+NODE+SENSOR+METHOD+PARAMETER+BEGIN+TIME1+END+TIME2+LIMIT
# Go get some OOI Data!
response = requests.get(DATA_URL, auth=(USERNAME, TOKEN))
# Convert the response to json
json_response = response.json()
# print(json_response)
n = 1
for i in json_response:
INSTRUMENT_ISOTIME = convertOOItime(i["time"])+'Z'
SALINITY = i["practical_salinity"] #PSU
TEMPERATURE = i["seawater_temperature"] # deg C
OXYGEN = i["dissolved_oxygen"] # Temp. Sal. Corrected mmol/kg-1
DENSITY = i["density"] # kg/m-3
CONDUCTIVITY = i["ctdbp_no_seawater_conductivity"] # S/m-1
print("*****"+str(n)+"*****")
print(INSTRUMENT_ISOTIME)
print(SALINITY)
print(TEMPERATURE)
print(OXYGEN)
print(DENSITY)
print(CONDUCTIVITY)
n+=1
run()
###Output
*****1*****
2019-10-14T13:57:54.694377Z
33.66939004129604
8.223826200172937
83.5393464101873
1026.5660740116396
3.520859751362286
*****2*****
2019-10-14T13:57:55.694487Z
33.66949030539344
8.223701993919349
83.54055242028052
1026.5663565564457
3.520859766372502
*****3*****
2019-10-14T13:57:56.694389Z
33.669113849373964
8.223764096986315
83.52861083503429
1026.566461568447
3.520833900679014
*****4*****
2019-10-14T13:57:57.694396Z
33.66906299623927
8.223764096986315
83.53820478186898
1026.5669218980202
3.5208339373834887
*****5*****
2019-10-14T13:57:58.694403Z
33.6691438628213
8.223701993919349
83.5212852166428
1026.5674618609282
3.52084044612792
*****6*****
2019-10-14T13:57:59.694826Z
33.66918129950282
8.223701993919349
83.51774797687145
1026.5677998893816
3.520846942535169
*****7*****
2019-10-14T13:58:00.694729Z
33.6692507525436
8.223764096986315
83.47572123286415
1026.5679299384146
3.520859895586935
*****8*****
2019-10-14T13:58:01.695151Z
33.66947164215344
8.223764096986315
83.45405140058405
1026.5679611426879
3.5208793064946184
*****9*****
2019-10-14T13:58:02.694324Z
33.66943646147506
8.223764096986315
83.4238610943734
1026.5676026924477
3.520872808435192
*****10*****
2019-10-14T13:58:03.694749Z
33.66982470319274
8.223764096986315
83.41253028826965
1026.5674728422412
3.520905145517494
*****11*****
2019-10-14T13:58:04.694338Z
33.66952617877853
8.223764096986315
83.41504438053765
1026.566790566272
3.52087274367929
*****12*****
2019-10-14T13:58:05.694761Z
33.66928391022686
8.223764096986315
83.39732098128292
1026.5662759747465
3.520846824784242
*****13*****
2019-10-14T13:58:06.694768Z
33.669368590054475
8.223764096986315
83.40700270283506
1026.5661863540163
3.520853287097118
*****14*****
2019-10-14T13:58:07.694358Z
33.66922643793589
8.223764096986315
83.39391190214654
1026.5661193134779
3.5208403428386874
*****15*****
2019-10-14T13:58:08.694677Z
33.66879128995478
8.223764096986315
83.40176694817393
1026.565996888579
3.520801516451273
*****16*****
2019-10-14T13:58:09.694580Z
33.66883283495157
8.223826200172937
83.43535878843713
1026.5663795637115
3.5208144895794424
*****17*****
2019-10-14T13:58:10.694690Z
33.66885409058867
8.223764096986315
83.45386492686637
1026.5667822228806
3.520814517926126
*****18*****
2019-10-14T13:58:11.694384Z
33.669163634894474
8.223764096986315
83.42401195791503
1026.5673647196113
3.5208469115959105
*****19*****
2019-10-14T13:58:12.694184Z
33.669014520387414
8.223826200172937
83.41244201041624
1026.56744255149
3.520840452106885
*****20*****
2019-10-14T13:58:13.695023Z
33.66921897632784
8.223826200172937
83.39857729714855
1026.567622507882
3.5208598748329796
*****21*****
2019-10-14T13:58:14.695133Z
33.66937085499657
8.223826200172937
83.42237326697685
1026.5676014971818
3.5208728120992743
*****22*****
2019-10-14T13:58:15.694411Z
33.669272717893136
8.2238883034791
83.4220343576836
1026.567217864988
3.5208663157961277
*****23*****
2019-10-14T13:58:16.694626Z
33.66944928704562
8.2238883034791
83.41659786941518
1026.5669733550933
3.5208792352507112
*****24*****
2019-10-14T13:58:17.695570Z
33.66968887409537
8.2238883034791
83.44376936872207
1026.5668353177075
3.520898632705567
*****25*****
2019-10-14T13:58:18.694744Z
33.669701838665276
8.223826200172937
83.4485759728888
1026.5666361331778
3.520892143571987
*****26*****
2019-10-14T13:58:19.694750Z
33.669707060339164
8.223826200172937
83.47761962437605
1026.5665888669994
3.5208921398031445
*****27*****
2019-10-14T13:58:20.694339Z
33.66949621085294
8.2238883034791
83.48137105913501
1026.5665485989762
3.5208792013822245
*****28*****
2019-10-14T13:58:21.694555Z
33.66931018368158
8.223764096986315
83.48202705167081
1026.5667150495115
3.520853329252973
*****29*****
2019-10-14T13:58:22.694665Z
33.66942358131288
8.223826200172937
83.49724776336735
1026.5671242120306
3.5208727740423185
*****30*****
2019-10-14T13:58:23.694151Z
33.66925384795912
8.223826200172937
83.47356459736119
1026.567306843089
3.5208598496631227
*****31*****
2019-10-14T13:58:24.694263Z
33.66937130903338
8.223826200172937
83.46127056415982
1026.5675973871664
3.520872811771556
*****32*****
2019-10-14T13:58:25.694164Z
33.66937871270693
8.2238883034791
83.44674804304981
1026.5676122028358
3.5208792861901625
*****33*****
2019-10-14T13:58:26.694692Z
33.66972796529906
8.223826200172937
83.4524961566243
1026.567753441037
3.5209051716511075
*****34*****
2019-10-14T13:58:27.694283Z
33.66982378893255
8.223826200172937
83.45094842362697
1026.5675629488676
3.5209116259631514
*****35*****
2019-10-14T13:58:28.694184Z
33.66972817018828
8.2238883034791
83.47479796773513
1026.5671565121315
3.520905127813194
*****36*****
2019-10-14T13:58:29.695233Z
33.669479711509226
8.2238883034791
83.46237877971205
1026.5666979513035
3.520879213291023
*****37*****
2019-10-14T13:58:30.694820Z
33.66948062687791
8.223826200172937
83.4330766044428
1026.5666078331121
3.5208727328682357
*****38*****
2019-10-14T13:58:31.694933Z
33.66939821695473
8.223764096986315
83.41270899446991
1026.5665950725493
3.520859789150113
*****39*****
2019-10-14T13:58:32.694106Z
33.66937958499927
8.223764096986315
83.4436319567753
1026.5667637295207
3.5208598025981153
*****40*****
2019-10-14T13:58:33.694530Z
33.66901177131031
8.223764096986315
83.44438227460977
1026.5667086856586
3.5208274509381505
*****41*****
2019-10-14T13:58:34.694536Z
33.66899552081559
8.223826200172937
83.40588285451858
1026.5669376273122
3.520833942396954
*****42*****
2019-10-14T13:58:35.694542Z
33.66910190618748
8.223764096986315
83.38723050206133
1026.567246588453
3.5208404327223626
*****43*****
2019-10-14T13:58:36.694133Z
33.6692366960016
8.223826200172937
83.38133389358501
1026.5674621057228
3.520859862043145
*****44*****
2019-10-14T13:58:37.694139Z
33.669224974167435
8.223764096986315
83.37254351480101
1026.567486376507
3.5208533907553274
*****45*****
2019-10-14T13:58:38.694146Z
33.6691652431807
8.223764096986315
83.39979726550884
1026.5673501610963
3.5208469104350772
*****46*****
2019-10-14T13:58:39.694153Z
33.669316861950264
8.223764096986315
83.38296583812526
1026.5673315040312
3.520859847870133
*****47*****
2019-10-14T13:58:40.694055Z
33.66948266129565
8.223826200172937
83.37373055299398
1026.5672663217495
3.5208792548514176
*****48*****
2019-10-14T13:58:41.694061Z
33.66937253786486
8.2238883034791
83.38211161830344
1026.5669911893717
3.5208727671949758
*****49*****
2019-10-14T13:58:42.694174Z
33.66945830812767
8.2238883034791
83.3994879390839
1026.5668916956583
3.5208792287394854
*****50*****
2019-10-14T13:58:43.694387Z
33.66946193319262
8.223826200172937
83.39327288523326
1026.566777048631
3.5208727463608276
*****51*****
2019-10-14T13:58:44.694081Z
33.66947713308591
8.2238883034791
83.42467393979102
1026.5667212912695
3.5208792151520654
*****52*****
2019-10-14T13:58:45.694089Z
33.66959512620329
8.223764096986315
83.46231049210297
1026.5668433572007
3.520879217366357
*****53*****
2019-10-14T13:58:46.694095Z
33.669647909688976
8.223701993919349
83.47684570584082
1026.5669606312342
3.5208792229579573
*****54*****
2019-10-14T13:58:47.694310Z
33.66938659222955
8.2238883034791
83.46218060948296
1026.5668639679586
3.5208727570508236
*****55*****
2019-10-14T13:58:48.694421Z
33.669151979769026
8.223826200172937
83.42729232224251
1026.5668751515555
3.5208468763192133
*****56*****
2019-10-14T13:58:49.694115Z
33.669139854233535
8.2238883034791
83.41171405754444
1026.567066750614
3.5208533648147835
*****57*****
2019-10-14T13:58:50.694538Z
33.669396494455576
8.2238883034791
83.38471014777735
1026.5674512395321
3.5208792733555105
*****58*****
2019-10-14T13:58:51.694024Z
33.6693123435892
8.2238883034791
83.3797401468795
1026.5675360768294
3.52087281064228
*****59*****
2019-10-14T13:58:52.694029Z
33.66910781564212
8.223950406904862
83.40191937567857
1026.567438607639
3.5208598676882077
*****60*****
2019-10-14T13:58:53.694870Z
33.66943742048241
8.223826200172937
83.40089187142111
1026.567675846771
3.5208792875055055
*****61*****
2019-10-14T13:58:54.694460Z
33.66965947484856
8.223826200172937
83.43092453752953
1026.5676965135242
3.520898697615202
*****62*****
2019-10-14T13:58:55.693945Z
33.66949695523215
8.223950406904862
83.42971877296462
1026.5673005968965
3.5208922040729753
*****63*****
2019-10-14T13:58:56.693952Z
33.66966681621238
8.223950406904862
83.40507104673985
1026.5671168150823
3.520905128407333
*****64*****
2019-10-14T13:58:57.694480Z
33.66981839937473
8.223826200172937
83.41483392486639
1026.5669348330828
3.5209051063777084
*****65*****
2019-10-14T13:58:58.694487Z
33.66984347577643
8.223826200172937
83.42724959474005
1026.5667078439863
3.520905088278294
*****66*****
2019-10-14T13:58:59.693868Z
33.66971918001082
8.223826200172937
83.43526437197235
1026.566479160846
3.520892131055554
*****67*****
2019-10-14T13:59:00.694083Z
33.66936999778145
8.223826200172937
83.44674203469074
1026.5662554451985
3.520859765829017
*****68*****
2019-10-14T13:59:01.694089Z
33.66933610269764
8.223764096986315
83.46889702267272
1026.566480429511
3.5208533105453967
*****69*****
2019-10-14T13:59:02.694408Z
33.669442534160346
8.223826200172937
83.48086100157255
1026.5669526495053
3.52087276036257
*****70*****
2019-10-14T13:59:03.694000Z
33.66933974298511
8.2238883034791
83.4375641246775
1026.567288052797
3.520872790865754
*****71*****
2019-10-14T13:59:04.694421Z
33.66936803098685
8.223826200172937
83.42490581674679
1026.5676270605827
3.5208728141376064
*****72*****
2019-10-14T13:59:05.694324Z
33.66950082352304
8.2238883034791
83.4660163443173
1026.5678606577385
3.52089224497102
*****73*****
2019-10-14T13:59:06.694123Z
33.66985187277836
8.2238883034791
83.47362313960772
1026.5680674692487
3.520924608968736
*****74*****
2019-10-14T13:59:07.693921Z
33.67001702658907
8.2238883034791
83.46645094867559
1026.5679263058594
3.52093753674843
*****75*****
2019-10-14T13:59:08.694032Z
33.66962938898019
8.223826200172937
83.51347286700803
1026.5672919461301
3.520892195864325
*****76*****
2019-10-14T13:59:09.694039Z
33.66960323150827
8.2238883034791
83.51723732958997
1026.5669336516482
3.5208921710545353
*****77*****
2019-10-14T13:59:10.694357Z
33.669763497137794
8.2238883034791
83.51991903649783
1026.5668367336893
3.520905102315025
*****78*****
2019-10-14T13:59:11.693947Z
33.669486296684596
8.223826200172937
83.5100546648505
1026.5665565100387
3.5208727287759336
*****79*****
2019-10-14T13:59:12.694267Z
33.669071895360084
8.2238883034791
83.51721333754871
1026.5663281089946
3.5208403670056416
*****80*****
2019-10-14T13:59:13.694064Z
33.6692561107809
8.2238883034791
83.48367261428332
1026.5666912869187
3.52085980434054
*****81*****
2019-10-14T13:59:14.693967Z
33.669297663910996
8.2238883034791
83.47122916358384
1026.5669920499251
3.5208662977905165
*****82*****
2019-10-14T13:59:15.694287Z
33.6694741535853
8.223826200172937
83.45449187690858
1026.5673433342872
3.5208792609921242
*****83*****
2019-10-14T13:59:16.693876Z
33.66933483312788
8.2238883034791
83.48081740856057
1026.5673324975858
3.5208727944096174
*****84*****
2019-10-14T13:59:17.693987Z
33.669679636341684
8.2238883034791
83.45325704865385
1026.567595841988
3.5209051628440764
*****85*****
2019-10-14T13:59:18.694203Z
33.66963040240368
8.223950406904862
83.42612566165197
1026.5674464348683
3.520905154690249
*****86*****
2019-10-14T13:59:19.693896Z
33.66977630126906
8.2238883034791
83.45327572483056
1026.5673977330227
3.520911616548777
*****87*****
2019-10-14T13:59:20.693799Z
33.669794612566854
8.2238883034791
83.45095992519472
1026.567231979276
3.5209116033320336
*****88*****
2019-10-14T13:59:21.694326Z
33.66946572664947
8.2238883034791
83.4353548688022
1026.566824542795
3.520879223384962
*****89*****
2019-10-14T13:59:22.694124Z
33.66961116294484
8.2238883034791
83.42720684876784
1026.5668618562313
3.520892165329809
*****90*****
2019-10-14T13:59:23.694235Z
33.66953075447584
8.223826200172937
83.43095240149547
1026.5668309800783
3.5208792201388492
*****91*****
2019-10-14T13:59:24.694034Z
33.669313206862064
8.223826200172937
83.45142214201044
1026.5667695188624
3.5208598068190824
*****92*****
2019-10-14T13:59:25.694353Z
33.66915681677704
8.223826200172937
83.43119114558564
1026.5668313662366
3.520846872827966
*****93*****
2019-10-14T13:59:26.693735Z
33.669477188647974
8.223826200172937
83.4084999865236
1026.5673158606223
3.5208792588014717
*****94*****
2019-10-14T13:59:27.694366Z
33.66946075072914
8.223826200172937
83.39681562667796
1026.5674646583302
3.520879270666075
*****95*****
2019-10-14T13:59:28.694268Z
33.669325233108374
8.2238883034791
83.42406015250273
1026.5674193985524
3.5208728013387813
*****96*****
2019-10-14T13:59:29.694378Z
33.669399397577955
8.2238883034791
83.4124327486202
1026.5674249600365
3.520879271260076
*****97*****
2019-10-14T13:59:30.694074Z
33.66975432285982
8.223826200172937
83.4000705200564
1026.567514851533
3.5209051526266615
*****98*****
2019-10-14T13:59:31.694288Z
33.66959129438016
8.223950406904862
83.40436897200333
1026.5671235373939
3.5208986594467384
*****99*****
2019-10-14T13:59:32.694294Z
33.66975380899772
8.223950406904862
83.45010167661425
1026.5670062588072
3.5209115890932
*****100*****
2019-10-14T13:59:33.693990Z
33.66969865906083
8.223950406904862
83.44730662696796
1026.5668285734773
3.520905105423816
*****101*****
2019-10-14T13:59:34.693746Z
33.669482799813835
8.2238883034791
83.4450704876702
1026.5666699958895
3.5208792110619616
*****102*****
2019-10-14T13:59:35.693898Z
33.66938872293001
8.223826200172937
83.45291006244716
1026.5667628471385
3.5208662757554294
*****103*****
2019-10-14T13:59:36.694217Z
33.6694389038542
8.2238883034791
83.45462023765931
1026.567067344581
3.520879242745093
*****104*****
2019-10-14T13:59:37.694118Z
33.669489086317306
8.223950406904862
83.4123222676912
1026.5673718272392
3.5208922097526534
*****105*****
2019-10-14T13:59:38.693810Z
33.669451660481634
8.223826200172937
83.439702286066
1026.5675469443424
3.520879277227274
*****106*****
2019-10-14T13:59:39.693826Z
33.66938779683751
8.2238883034791
83.4679090528767
1026.5675299717386
3.520879279633341
*****107*****
2019-10-14T13:59:40.693841Z
33.66981746097716
8.223950406904862
83.4654160889757
1026.5677838885817
3.5209245901161874
*****108*****
2019-10-14T13:59:41.693935Z
33.66969800201648
8.223950406904862
83.47308609869158
1026.5675114249345
3.5209116293737406
*****109*****
2019-10-14T13:59:42.693736Z
33.66965219054117
8.223950406904862
83.4942008082833
1026.5672492072226
3.520905138963891
*****110*****
2019-10-14T13:59:43.694158Z
33.66959304710158
8.2238883034791
83.49859817768457
1026.5670258411026
3.520892178405416
*****111*****
2019-10-14T13:59:44.693903Z
33.669542009521415
8.223950406904862
83.50723469924462
1026.5668927619242
3.5208921715535544
*****112*****
2019-10-14T13:59:45.693755Z
33.66919901474889
8.223950406904862
83.49509786212904
1026.5666130548605
3.520859801861989
*****113*****
2019-10-14T13:59:46.694596Z
33.66951979353499
8.223826200172937
83.50001967658703
1026.5669301987184
3.52087922805018
*****114*****
2019-10-14T13:59:47.693769Z
33.669393418216075
8.223950406904862
83.49347732037114
1026.5668840110911
3.5208792318861426
*****115*****
2019-10-14T13:59:48.694193Z
33.66938484845834
8.223950406904862
83.48465115583073
1026.5669615854345
3.5208792380716383
*****116*****
2019-10-14T13:59:49.693782Z
33.66930134072839
8.2238883034791
83.4806362953597
1026.5669587669051
3.5208662951366594
*****117*****
2019-10-14T13:59:50.694205Z
33.669514624198484
8.223950406904862
83.51471588405808
1026.567140655667
3.5208921913197826
*****118*****
2019-10-14T13:59:51.694013Z
33.669511591928426
8.223950406904862
83.47976431355943
1026.5671681040421
3.5208921935084256
*****119*****
2019-10-14T13:59:52.693698Z
33.669714375962144
8.223950406904862
83.48830709831348
1026.5673632071123
3.520911617555257
*****120*****
2019-10-14T13:59:53.693600Z
33.66938334782301
8.224074614115409
83.50796868982333
1026.5671388320484
3.520892198693231
*****121*****
2019-10-14T13:59:54.693815Z
33.66930099793137
8.22401251045028
83.50001724165726
1026.567125536617
3.5208792549039116
*****122*****
2019-10-14T13:59:55.693718Z
33.6694166087982
8.2238883034791
83.51173557825676
1026.5672691617658
3.5208792588372724
*****123*****
2019-10-14T13:59:56.694138Z
33.66922721726051
8.22401251045028
83.5385973325956
1026.5671165031447
3.5208727847057872
*****124*****
2019-10-14T13:59:57.693731Z
33.66935577120401
8.223950406904862
83.52986426972278
1026.5672247965858
3.52087925905915
*****125*****
2019-10-14T13:59:58.693946Z
33.669361120703755
8.223950406904862
83.51961434584823
1026.5671763721132
3.5208792551979555
*****126*****
2019-10-14T13:59:59.694473Z
33.66924879701052
8.224074614115409
83.53200119439136
1026.567002991421
3.520879248891993
*****127*****
2019-10-14T14:00:00.693855Z
33.66945261446862
8.22401251045028
83.553441433259
1026.5671068981642
3.5208921923874987
*****128*****
2019-10-14T14:00:01.693965Z
33.669596954131734
8.22401251045028
83.55410172731919
1026.5671541345537
3.5209051351425393
*****129*****
2019-10-14T14:00:02.693764Z
33.669534194728406
8.22401251045028
83.56102067616251
1026.5670453319012
3.520898656970323
*****130*****
2019-10-14T14:00:03.693771Z
33.669488754937994
8.224136717900024
83.54390840718004
1026.5669434087927
3.520905125858814
*****131*****
2019-10-14T14:00:04.693673Z
33.669410732232826
8.224074614115409
83.55573333286736
1026.5668909444114
3.520892178927487
*****132*****
2019-10-14T14:00:05.693679Z
33.669123738987764
8.22401251045028
83.5319424526531
1026.5666993892314
3.520859812505277
*****133*****
2019-10-14T14:00:06.693478Z
33.669462930151724
8.22401251045028
83.52862181156938
1026.5670135194416
3.5208921849417862
*****134*****
2019-10-14T14:00:07.693797Z
33.669397972957015
8.224074614115409
83.52972333187671
1026.5670064430838
3.520892188136968
*****135*****
2019-10-14T14:00:08.693803Z
33.669254861427476
8.224074614115409
83.5318057657383
1026.5669480951524
3.520879244514763
*****136*****
2019-10-14T14:00:09.693707Z
33.669322454690544
8.224074614115409
83.53546540929877
1026.5670131379486
3.520885719183568
*****137*****
2019-10-14T14:00:10.693608Z
33.669531938092504
8.224074614115409
83.56989919115475
1026.5671475883535
3.520905138380026
*****138*****
2019-10-14T14:00:11.693615Z
33.669478447852505
8.224136717900024
83.57171423724928
1026.5670367098292
3.52090513329836
*****139*****
2019-10-14T14:00:12.694456Z
33.66949074914889
8.224198821804293
83.58859521979527
1026.5670071842737
3.5209116042048967
*****140*****
2019-10-14T14:00:13.694565Z
33.66969038386607
8.224136717900024
83.58923649398992
1026.5671489621882
3.5209245507672255
*****141*****
2019-10-14T14:00:14.693947Z
33.66948090005752
8.224136717900024
83.58875188374822
1026.5670145121462
3.5209051315283832
*****142*****
2019-10-14T14:00:15.694473Z
33.669277079570335
8.224198821804293
83.57069094722365
1026.5669106328564
3.520892188016228
*****143*****
2019-10-14T14:00:16.693648Z
33.66932624833623
8.224136717900024
83.61251840028687
1026.5670606275737
3.520892196216889
*****144*****
2019-10-14T14:00:17.693759Z
33.66951247331321
8.224074614115409
83.63941342633393
1026.567323785995
3.52090515242951
*****145*****
2019-10-14T14:00:18.693973Z
33.6693067811437
8.224136717900024
83.66097178686394
1026.5672368486098
3.520892210268182
*****146*****
2019-10-14T14:00:19.693980Z
33.669105923720664
8.224198821804293
83.66033836682853
1026.567106155057
3.5208792646366796
*****147*****
2019-10-14T14:00:20.693674Z
33.66937262848659
8.224136717900024
83.65764825253702
1026.567317695555
3.5208986862066474
*****148*****
2019-10-14T14:00:21.693995Z
33.669455490574066
8.224198821804293
83.63966295712012
1026.5673263507913
3.520911629654349
*****149*****
2019-10-14T14:00:22.693583Z
33.66958251020756
8.224074614115409
83.66231113440676
1026.567366711133
3.5209116253536177
*****150*****
2019-10-14T14:00:23.693590Z
33.66941540892152
8.224260925828162
83.6575034383017
1026.5670940950065
3.5209116148943878
*****151*****
2019-10-14T14:00:24.694326Z
33.669488944404975
8.224198821804293
83.67662274490243
1026.5670235210735
3.5209116055075476
*****152*****
2019-10-14T14:00:25.693604Z
33.66950293135778
8.224198821804293
83.64957944541501
1026.566896909306
3.520911595411883
*****153*****
2019-10-14T14:00:26.693506Z
33.66950526189686
8.224136717900024
83.65657470699853
1026.5667939860464
3.520905113944295
*****154*****
2019-10-14T14:00:27.693930Z
33.66952136934347
8.224198821804293
83.64655529990777
1026.5667300066343
3.5209115821035426
*****155*****
2019-10-14T14:00:28.693519Z
33.6694510014176
8.224198821804293
83.65210776836444
1026.566690079394
3.52090510941855
*****156*****
2019-10-14T14:00:29.693838Z
33.66916360916723
8.224198821804293
83.65588743685952
1026.5665839719577
3.5208792229996924
*****157*****
2019-10-14T14:00:30.693532Z
33.66921991283377
8.224260925828162
83.68207810196793
1026.566833037589
3.520892185588461
*****158*****
2019-10-14T14:00:31.694059Z
33.66932455775211
8.224198821804293
83.68276913601288
1026.5671577605208
3.5208986772133617
*****159*****
2019-10-14T14:00:32.693650Z
33.66937513917921
8.224198821804293
83.66964021752486
1026.5673767974745
3.520905164175414
*****160*****
2019-10-14T14:00:33.693864Z
33.66891208410019
8.224385134234751
83.66630876130401
1026.5670756051193
3.5208792734789087
*****161*****
2019-10-14T14:00:34.693558Z
33.66945818549389
8.224323029971629
83.67788564787142
1026.5674656093627
3.5209245872944233
*****162*****
2019-10-14T14:00:35.693774Z
33.66961412770909
8.224323029971629
83.69010315166385
1026.5674078096656
3.5209375217214203
*****163*****
2019-10-14T14:00:36.694407Z
33.66950137050031
8.224323029971629
83.71926325893729
1026.5670746896517
3.5209245561234344
*****164*****
2019-10-14T14:00:37.694099Z
33.669789927733575
8.224260925828162
83.70661168884186
1026.567088437694
3.52094396202073
*****165*****
2019-10-14T14:00:38.693898Z
33.66959507584959
8.224260925828162
83.72961047552236
1026.5668215383123
3.52092453217828
*****166*****
2019-10-14T14:00:39.693591Z
33.669454214083885
8.224260925828162
83.75957689153623
1026.566742824216
3.5209115868850502
*****167*****
2019-10-14T14:00:40.693494Z
33.66943797378871
8.224260925828162
83.79960121942017
1026.5668898338022
3.520911598607174
*****168*****
2019-10-14T14:00:41.693395Z
33.669533994180426
8.224136717900024
83.78079478646818
1026.5672108046238
3.5209116166815395
*****169*****
2019-10-14T14:00:42.693923Z
33.669453694700294
8.224198821804293
83.74718000585509
1026.56734260739
3.5209116309506054
*****170*****
2019-10-14T14:00:43.693618Z
33.66944287013649
8.224198821804293
83.75644304175795
1026.5674405935479
3.5209116387637556
*****171*****
2019-10-14T14:00:44.693520Z
33.6696596269847
8.224260925828162
83.74201282741676
1026.567591027766
3.5209375325713133
*****172*****
2019-10-14T14:00:45.693421Z
33.669674386016176
8.224260925828162
83.75842252673202
1026.5674574266993
3.5209375219182397
*****173*****
2019-10-14T14:00:46.693950Z
33.669843661729345
8.224323029971629
83.76464575618931
1026.5673607605718
3.5209569265587617
*****174*****
2019-10-14T14:00:47.693851Z
33.669786839597144
8.224260925828162
83.7433306859743
1026.567116391694
3.5209439642497253
*****175*****
2019-10-14T14:00:48.693546Z
33.66979844205272
8.224260925828162
83.74977990800535
1026.5670113656295
3.5209439558751607
*****176*****
2019-10-14T14:00:49.693449Z
33.66967071890282
8.224323029971629
83.75512870669944
1026.5668955384956
3.52093748087402
*****177*****
2019-10-14T14:00:50.693455Z
33.66938642030617
8.224323029971629
83.76973394307495
1026.566761422797
3.5209115921275163
*****178*****
2019-10-14T14:00:51.693566Z
33.66956761714301
8.224260925828162
83.78833052099685
1026.5670700977323
3.5209245519977825
*****179*****
2019-10-14T14:00:52.693366Z
33.66955066576464
8.224260925828162
83.75330988990649
1026.567223544144
3.520924564233221
*****180*****
2019-10-14T14:00:53.693475Z
33.669462202182196
8.224198821804293
83.73769588021611
1026.567265596051
3.5209116248099326
*****181*****
2019-10-14T14:00:54.693795Z
33.669599838162924
8.224198821804293
83.7710114438157
1026.5673735108376
3.5209245724316927
*****182*****
2019-10-14T14:00:55.693801Z
33.6697553181765
8.224260925828162
83.75123144404131
1026.5674017263234
3.5209439870017443
*****183*****
2019-10-14T14:00:56.693807Z
33.6697686630119
8.224260925828162
83.76427085271823
1026.5672809275388
3.520943977369486
*****184*****
2019-10-14T14:00:57.693501Z
33.669977384085335
8.224198821804293
83.76980523602711
1026.5673404654553
3.5209569174212874
*****185*****
2019-10-14T14:00:58.693300Z
33.669923315942306
8.224260925828162
83.780975622237
1026.5672348094781
3.5209569127560436
*****186*****
2019-10-14T14:00:59.693410Z
33.66984400143448
8.224198821804293
83.79752392630643
1026.5671940412062
3.5209439666817874
*****187*****
2019-10-14T14:01:00.693313Z
33.669686780163374
8.224136717900024
83.79688268146964
1026.5671815831688
3.520924553368341
*****188*****
2019-10-14T14:01:01.693738Z
33.66961259972927
8.224198821804293
83.80118625631522
1026.5672579913855
3.520924563220453
*****189*****
2019-10-14T14:01:02.693535Z
33.669534295071344
8.224260925828162
83.7574999076514
1026.567371734509
3.5209245760495604
*****190*****
2019-10-14T14:01:03.694166Z
33.66965781368978
8.224260925828162
83.78760417339335
1026.567607442018
3.5209375338801525
*****191*****
2019-10-14T14:01:04.693340Z
33.66972076697104
8.224260925828162
83.73274651137113
1026.5677144885071
3.5209440119408058
*****192*****
2019-10-14T14:01:05.693658Z
33.66959646566351
8.224323029971629
83.74011692387715
1026.5675676898657
3.520937534469937
*****193*****
2019-10-14T14:01:06.693770Z
33.67001660234206
8.224260925828162
83.7489525963988
1026.567744186436
3.5209698924558177
*****194*****
2019-10-14T14:01:07.693464Z
33.66996956605804
8.224260925828162
83.76952458085881
1026.5674930558691
3.5209634028872365
*****195*****
2019-10-14T14:01:08.693574Z
33.66986305414321
8.224323029971629
83.79635024033297
1026.567185219
3.5209569125613687
*****196*****
2019-10-14T14:01:09.693372Z
33.6698796140746
8.224323029971629
83.7815499601373
1026.5670353177077
3.520956900608495
*****197*****
2019-10-14T14:01:10.694110Z
33.6698098397763
8.224260925828162
83.76990428448836
1026.5669081930034
3.5209439476483877
*****198*****
2019-10-14T14:01:11.693281Z
33.66980939146421
8.224260925828162
83.73257902394452
1026.5669122511379
3.520943947971975
*****199*****
2019-10-14T14:01:12.693184Z
33.66967258238535
8.224323029971629
83.73036245611961
1026.566878670079
3.5209374795289694
*****200*****
2019-10-14T14:01:13.693400Z
33.66978451040799
8.224260925828162
83.75434740243446
1026.567137475666
3.520943965930919
*****201*****
2019-10-14T14:01:14.693301Z
33.66970421203044
8.224323029971629
83.77299221127971
1026.5672692610758
3.5209439801987794
*****202*****
2019-10-14T14:01:15.693204Z
33.66967798655328
8.224260925828162
83.75093409333978
1026.567424834122
3.520937519319376
*****203*****
2019-10-14T14:01:16.693211Z
33.66941023431561
8.224385134234751
83.72917495311461
1026.5673045885248
3.5209245782145127
*****204*****
2019-10-14T14:01:17.693321Z
33.66975361460197
8.224385134234751
83.72323406276736
1026.5675807922419
3.520956947863321
*****205*****
2019-10-14T14:01:18.693223Z
33.669759866268414
8.224385134234751
83.76350909031984
1026.5675242011612
3.520956943350838
*****206*****
2019-10-14T14:01:19.693231Z
33.6699002866059
8.224323029971629
83.75800136526814
1026.5675250933953
3.520963409201467
*****207*****
2019-10-14T14:01:20.693237Z
33.669923611634474
8.224385134234751
83.75595198856936
1026.5673957733568
3.5209698721930462
*****208*****
2019-10-14T14:01:21.693764Z
33.669934569038766
8.224385134234751
83.7238983492532
1026.5672965861472
3.5209698642839897
*****209*****
2019-10-14T14:01:22.693251Z
33.669597114782626
8.224385134234751
83.72249326493392
1026.5669667283
3.5209374903100725
*****210*****
2019-10-14T14:01:23.693465Z
33.669657238506545
8.224323029971629
83.72613726754949
1026.5670175644455
3.5209374906041
*****211*****
2019-10-14T14:01:24.693263Z
33.66979750698417
8.224385134234751
83.7481039500966
1026.5671834726193
3.5209569161817003
*****212*****
2019-10-14T14:01:25.693271Z
33.66965071539672
8.224385134234751
83.71390332656088
1026.5671584349361
3.520943975121275
*****213*****
2019-10-14T14:01:26.693173Z
33.66957223235656
8.224385134234751
83.73758190878907
1026.5671919680922
3.520937508270206
*****214*****
2019-10-14T14:01:27.693179Z
33.669506047383564
8.224447238617472
83.75220817814134
1026.5671960009786
3.5209375123513156
*****215*****
2019-10-14T14:01:28.693290Z
33.669977215933876
8.224385134234751
83.7607216283514
1026.567587448778
3.5209763570254937
*****216*****
2019-10-14T14:01:29.693296Z
33.669861156853855
8.224509343119848
83.69894722636532
1026.5674478515227
3.520976353413434
*****217*****
2019-10-14T14:01:30.693303Z
33.67006317143278
8.224447238617472
83.73375801417858
1026.5675680969318
3.520989298353156
*****218*****
2019-10-14T14:01:31.693102Z
33.66993166101147
8.224447238617472
83.73773200423906
1026.5674047284372
3.5209763462152153
*****219*****
2019-10-14T14:01:32.693213Z
33.66986651387383
8.224447238617472
83.71629662512154
1026.56731754074
3.520969869714498
*****220*****
2019-10-14T14:01:33.693635Z
33.66959420111937
8.224447238617472
83.71408719846893
1026.5670749251842
3.5209439722219256
*****221*****
2019-10-14T14:01:34.693122Z
33.66959484566621
8.224447238617472
83.74300270126263
1026.5670690906334
3.5209439717566897
*****222*****
2019-10-14T14:01:35.693544Z
33.669594265574
8.224447238617472
83.70882154154505
1026.5670743417297
3.520943972175402
*****223*****
2019-10-14T14:01:36.693134Z
33.669526672423046
8.224447238617472
83.68897329121386
1026.5670092988516
3.5209374974640886
*****224*****
2019-10-14T14:01:37.693246Z
33.669471315600255
8.224509343119848
83.71846236737272
1026.5669153130054
3.5209374937293942
*****225*****
2019-10-14T14:01:38.693148Z
33.669889124280445
8.224509343119848
83.73114771202617
1026.5671946870275
3.520976333226392
*****226*****
2019-10-14T14:01:39.693884Z
33.66989215374462
8.224509343119848
83.74000872615655
1026.5671672640237
3.5209763310397144
*****227*****
2019-10-14T14:01:40.693369Z
33.669773062481426
8.224633552483397
83.75856811013313
1026.5670551143296
3.520976329616291
*****228*****
2019-10-14T14:01:41.693167Z
33.67002612181738
8.224509343119848
83.74387774859015
1026.5673083839852
3.5209892814035273
*****229*****
2019-10-14T14:01:42.693070Z
33.66967358216257
8.224509343119848
83.7348667045391
1026.567115083003
3.5209569182475926
*****230*****
2019-10-14T14:01:43.693494Z
33.66960488008406
8.224571447741823
83.76939109279479
1026.5671418979975
3.5209569241453558
*****231*****
2019-10-14T14:01:44.692875Z
33.66965469146614
8.224509343119848
83.7793871872469
1026.567286085013
3.52095693188301
*****232*****
2019-10-14T14:01:45.693193Z
33.66958902484124
8.224571447741823
83.76414755316026
1026.5672854231925
3.520956935589819
*****233*****
2019-10-14T14:01:46.692992Z
33.66958780318237
8.224571447741823
83.77083508864935
1026.5672964819366
3.520956936471625
*****234*****
2019-10-14T14:01:47.693623Z
33.669592701477946
8.224571447741823
83.75940913857059
1026.5672521414238
3.520956932935985
*****235*****
2019-10-14T14:01:48.693526Z
33.66967009135472
8.224571447741823
83.76992226920159
1026.567228501609
3.520963400590076
*****236*****
2019-10-14T14:01:49.693221Z
33.669818036742846
8.224571447741823
83.75547096618743
1026.5672430909653
3.520976340845694
*****237*****
2019-10-14T14:01:50.693019Z
33.67002488550005
8.224509343119848
83.77233352979519
1026.567319575209
3.520989282295906
*****238*****
2019-10-14T14:01:51.693233Z
33.66997023685508
8.224571447741823
83.77179834264489
1026.5672191698511
3.5209892780493326
*****239*****
2019-10-14T14:01:52.693137Z
33.66983195333314
8.224571447741823
83.7575047523358
1026.5671171160914
3.5209763308006083
*****240*****
2019-10-14T14:01:53.693558Z
33.66969514478918
8.224633552483397
83.76157507961854
1026.5670835310477
3.520969862333722
*****241*****
2019-10-14T14:01:54.692940Z
33.66960429418421
8.224571447741823
83.74322570442985
1026.567147201687
3.5209569245682637
*****242*****
2019-10-14T14:01:55.693050Z
33.66959031095478
8.224571447741823
83.73726302108649
1026.5672737809919
3.5209569346614886
*****243*****
2019-10-14T14:01:56.693162Z
33.66970353245469
8.224509343119848
83.7333418777312
1026.5675208765872
3.5209634201440325
*****244*****
2019-10-14T14:01:57.693482Z
33.66942954422503
8.224571447741823
83.76633387635265
1026.5673752600123
3.520944003689136
*****245*****
2019-10-14T14:01:58.692968Z
33.66950595722275
8.224633552483397
83.74768198266402
1026.567442281059
3.5209569518569954
*****246*****
2019-10-14T14:01:59.693391Z
33.66985764768609
8.224633552483397
83.74821111474827
1026.5676432512796
3.5209893156249015
*****247*****
2019-10-14T14:02:00.693085Z
33.66994187215
8.224633552483397
83.78697190050467
1026.5675577458353
3.520995778369422
*****248*****
2019-10-14T14:02:01.693091Z
33.66990230522671
8.224695657344625
83.79357116754623
1026.56732081846
3.5209957632366664
*****249*****
2019-10-14T14:02:02.693410Z
33.669921767871244
8.224695657344625
83.80185666273489
1026.5671446398183
3.520995749188306
*****250*****
2019-10-14T14:02:03.693104Z
33.67007300449958
8.224695657344625
83.79170672645411
1026.5671294403455
3.521008687115629
*****251*****
2019-10-14T14:02:04.693008Z
33.66985194113775
8.224571447741823
83.76680858290248
1026.5669361842558
3.520976316373327
*****252*****
2019-10-14T14:02:05.693430Z
33.66975433512426
8.224447238617472
83.76989307046318
1026.5669791831842
3.5209569036515913
*****253*****
2019-10-14T14:02:06.692916Z
33.66967358216257
8.224509343119848
83.80559389743993
1026.567115083003
3.5209569182475926
*****254*****
2019-10-14T14:02:07.693236Z
33.66972240656677
8.224571447741823
83.78413039357686
1026.567431843436
3.520969886348077
*****255*****
2019-10-14T14:02:08.693450Z
33.669756622005515
8.224509343119848
83.78424476713936
1026.5677172098156
3.5209699053432195
*****256*****
2019-10-14T14:02:09.693040Z
33.669680061514946
8.224571447741823
83.79893677006598
1026.5678151608208
3.520969916913333
*****257*****
2019-10-14T14:02:10.693255Z
33.66968669318606
8.224633552483397
83.81687528451154
1026.567836948129
3.5209763919588446
*****258*****
2019-10-14T14:02:11.693053Z
33.67004457675317
8.224633552483397
83.83048885327115
1026.5679818654578
3.521008751328023
*****259*****
2019-10-14T14:02:12.693060Z
33.67001210063002
8.224695657344625
83.85551085301833
1026.5676807492932
3.5210087310768086
*****260*****
2019-10-14T14:02:13.693276Z
33.67011424076997
8.224695657344625
83.83168238393934
1026.5674330707593
3.521015180903755
*****261*****
2019-10-14T14:02:14.692866Z
33.66973181850878
8.224695657344625
83.85201509491007
1026.5668333704098
3.5209763156943326
*****262*****
2019-10-14T14:02:15.693600Z
33.66974143170725
8.224633552483397
83.86187170297423
1026.5666645347685
3.5209698289235525
*****263*****
2019-10-14T14:02:16.693712Z
33.66972347003452
8.224509343119848
83.85950213325738
1026.5666634914642
3.5209568822384676
*****264*****
2019-10-14T14:02:17.693509Z
33.66950167496218
8.224509343119848
83.88667953833773
1026.5666404941776
3.5209374718159525
*****265*****
2019-10-14T14:02:18.693412Z
33.66962376821281
8.224571447741823
83.89909526793006
1026.5669709189258
3.520956910511773
*****266*****
2019-10-14T14:02:19.693002Z
33.66951532160808
8.224509343119848
83.88242927286879
1026.5671938707023
3.5209439854659546
*****267*****
2019-10-14T14:02:20.693008Z
33.669765586091444
8.224509343119848
83.91110437663811
1026.5676360650752
3.520969898872837
*****268*****
2019-10-14T14:02:21.699265Z
33.66962639436554
8.224571447741823
83.88729201579709
1026.56762405683
3.5209634321310808
*****269*****
2019-10-14T14:02:22.699272Z
33.669845012912404
8.224633552483397
83.91151410289501
1026.5677576238602
3.5209893247448862
*****270*****
2019-10-14T14:02:23.699488Z
33.66986325188986
8.224633552483397
83.9159565018981
1026.567592520945
3.5209893115797035
*****271*****
2019-10-14T14:02:24.699182Z
33.66996126954929
8.224633552483397
83.93238979865379
1026.567382157823
3.5209957643681484
*****272*****
2019-10-14T14:02:25.699292Z
33.66998976212252
8.224633552483397
83.91216989654829
1026.5671242400085
3.5209957438019646
*****273*****
2019-10-14T14:02:26.699508Z
33.67000047688629
8.224571447741823
83.91007444563805
1026.56694543508
3.520989256221972
*****274*****
2019-10-14T14:02:27.699201Z
33.669868580709355
8.224571447741823
83.91743013627408
1026.5667855614465
3.5209763043628564
*****275*****
2019-10-14T14:02:28.699208Z
33.66945579157724
8.224633552483397
83.88395134658246
1026.5665425720028
3.5209439410518164
*****276*****
2019-10-14T14:02:29.699319Z
33.66942344447025
8.224571447741823
83.90334689650166
1026.5667535651498
3.5209374845915065
*****277*****
2019-10-14T14:02:30.699638Z
33.66954114985064
8.224633552483397
83.90423833976149
1026.567123707751
3.5209569264544758
*****278*****
2019-10-14T14:02:31.699228Z
33.66957446203961
8.224571447741823
83.90396566986897
1026.567417249247
3.5209569461014163
*****279*****
2019-10-14T14:02:32.699131Z
33.6696225920238
8.224571447741823
83.89831816304172
1026.567658476637
3.5209634348756684
*****280*****
2019-10-14T14:02:33.699136Z
33.66974303255056
8.224509343119848
83.88162034100907
1026.5678402245683
3.5209699151522678
*****281*****
2019-10-14T14:02:34.699144Z
33.6697558510154
8.224571447741823
83.9061208919479
1026.5678060081368
3.5209763857320873
*****282*****
2019-10-14T14:02:35.699150Z
33.66984741202087
8.224633552483397
83.91487809603113
1026.567735906617
3.520989323013169
*****283*****
2019-10-14T14:02:36.699365Z
33.6699952408684
8.224571447741823
83.9119164531856
1026.567669737199
3.520995783539863
*****284*****
2019-10-14T14:02:37.699163Z
33.67002461191616
8.224633552483397
83.88389533859046
1026.5674856816531
3.521002242190417
*****285*****
2019-10-14T14:02:38.699482Z
33.66996347475086
8.224571447741823
83.88971950092093
1026.5672803810514
3.5209892829302607
*****286*****
2019-10-14T14:02:39.699176Z
33.66997571863642
8.224571447741823
83.88940617501969
1026.5671695483024
3.520989274092554
*****287*****
2019-10-14T14:02:40.699495Z
33.66984297845473
8.224571447741823
83.88317191338815
1026.567017315389
3.5209763228426225
*****288*****
2019-10-14T14:02:41.699190Z
33.66977183712515
8.224571447741823
83.89092981916522
1026.5669843897151
3.520969850668717
*****289*****
2019-10-14T14:02:42.699092Z
33.66976384443908
8.224571447741823
83.86559728770371
1026.5670567406087
3.520969856437877
*****290*****
2019-10-14T14:02:43.699203Z
33.66961525427824
8.224571447741823
83.85694863559632
1026.5670479886783
3.520956916657184
*****291*****
2019-10-14T14:02:44.699001Z
33.66967189313006
8.224571447741823
83.85478357645576
1026.567212191583
3.5209633992895384
*****292*****
2019-10-14T14:02:45.699528Z
33.66972969537549
8.224571447741823
83.81693316568132
1026.5673658636888
3.520969881086937
*****293*****
2019-10-14T14:02:46.698909Z
33.66959346788193
8.224633552483397
83.81510597950373
1026.5673270229481
3.52096341220562
*****294*****
2019-10-14T14:02:47.699125Z
33.669661705477786
8.224633552483397
83.81208162177772
1026.5673862307833
3.5209698864705974
*****295*****
2019-10-14T14:02:48.699444Z
33.669593532332215
8.224633552483397
83.82178538099812
1026.567326439527
3.5209634121590985
*****296*****
2019-10-14T14:02:49.699139Z
33.66966647482748
8.224633552483397
83.79987061470545
1026.567343057518
3.520969883028017
*****297*****
2019-10-14T14:02:50.698938Z
33.66988027573271
8.224633552483397
83.81800933311142
1026.567438418116
3.5209892992916663
*****298*****
2019-10-14T14:02:51.699152Z
33.66982865626389
8.224695657344625
83.81682629252181
1026.5673105933304
3.520989292858685
*****299*****
2019-10-14T14:02:52.699055Z
33.66983838590625
8.224695657344625
83.85016363038287
1026.5672225188996
3.520989285835705
*****300*****
2019-10-14T14:02:53.699790Z
33.66984753856328
8.224695657344625
83.84201134975979
1026.5671396675684
3.5209892792292115
*****301*****
2019-10-14T14:02:54.699588Z
33.66985604974333
8.224695657344625
83.8623897153543
1026.567062623101
3.5209892730857546
*****302*****
2019-10-14T14:02:55.699074Z
33.66966435188118
8.224757762325453
83.84991079377849
1026.5668489985853
3.520976320700127
*****303*****
2019-10-14T14:02:56.699811Z
33.66978409049562
8.224633552483397
83.86262427412464
1026.566955287118
3.5209763216561925
*****304*****
2019-10-14T14:02:57.699503Z
33.66964824290436
8.224695657344625
83.875883221603
1026.5669130045758
3.520969852495786
*****305*****
2019-10-14T14:02:58.699094Z
33.669763971298096
8.224633552483397
83.85725069569014
1026.5671374091976
3.5209763361783843
*****306*****
2019-10-14T14:02:59.699309Z
33.66969223376092
8.224695657344625
83.84990819333159
1026.5671916990045
3.5209763442670168
*****307*****
2019-10-14T14:03:00.699126Z
33.669614266785615
8.224695657344625
83.83075435856989
1026.5672205640217
3.520969877020158
*****308*****
2019-10-14T14:03:01.699010Z
33.66933661523833
8.224695657344625
83.81246878676704
1026.56702629715
3.520943983382611
*****309*****
2019-10-14T14:03:02.699016Z
33.66982439654494
8.224695657344625
83.85787503860202
1026.567349153098
3.5209892959334086
*****310*****
2019-10-14T14:03:03.698918Z
33.669926386315844
8.224819867425879
83.86914385018734
1026.567266458185
3.521008705560789
*****311*****
2019-10-14T14:03:04.699133Z
33.67007929519503
8.224819867425879
83.89356079532537
1026.567236118054
3.5210216422996927
*****312*****
2019-10-14T14:03:05.699764Z
33.66994495833165
8.224757762325453
83.91635039286852
1026.5670165297227
3.5210022122998827
*****313*****
2019-10-14T14:03:06.699772Z
33.669999038385804
8.224633552483397
83.8903429473987
1026.5670402705412
3.5209957371063023
*****314*****
2019-10-14T14:03:07.699257Z
33.66958007211045
8.224633552483397
83.8776654972584
1026.566771374916
3.5209568983600565
*****315*****
2019-10-14T14:03:08.699577Z
33.66976255821976
8.224571447741823
83.87920075497364
1026.5670683836515
3.5209698573662775
*****316*****
2019-10-14T14:03:09.699478Z
33.669686059625214
8.224633552483397
83.8402183636987
1026.5671657717153
3.520969868891481
*****317*****
2019-10-14T14:03:10.699278Z
33.66973220026978
8.224571447741823
83.8381354894687
1026.567343188905
3.5209698792788795
*****318*****
2019-10-14T14:03:11.699285Z
33.669597712891544
8.224633552483397
83.85319330424784
1026.5672885960555
3.520963409141515
*****319*****
2019-10-14T14:03:12.699394Z
33.669665950489524
8.224633552483397
83.87594511623276
1026.567347803946
3.520969883406491
*****320*****
2019-10-14T14:03:13.699297Z
33.669877820727855
8.224633552483397
83.85668907869083
1026.567460641227
3.5209893010637194
*****321*****
2019-10-14T14:03:14.698887Z
33.670091686863124
8.224633552483397
83.84692934154413
1026.567555418315
3.5210087173232423
*****322*****
2019-10-14T14:03:15.698895Z
33.67009832561532
8.224633552483397
83.87926456412534
1026.5674953236871
3.5210087125313168
*****323*****
2019-10-14T14:03:16.699004Z
33.669837155448
8.224695657344625
83.89708709366262
1026.567233657216
3.520989286723865
*****324*****
2019-10-14T14:03:17.698907Z
33.67004483387199
8.224695657344625
83.88870243576609
1026.5673844434814
3.5210087074494782
*****325*****
2019-10-14T14:03:18.698913Z
33.669700167399775
8.224695657344625
83.87854176715263
1026.5671198820262
3.5209763385404163
*****326*****
2019-10-14T14:03:19.699440Z
33.66983845036091
8.224695657344625
83.87075267739185
1026.5672219354449
3.5209892857891805
*****327*****
2019-10-14T14:03:20.698927Z
33.669703788502495
8.224695657344625
83.85981541635503
1026.5670871030688
3.5209763359266613
*****328*****
2019-10-14T14:03:21.698828Z
33.66950036512848
8.224695657344625
83.8577997678942
1026.5668978093427
3.5209569122012945
*****329*****
2019-10-14T14:03:22.698941Z
33.66970926428975
8.224695657344625
83.87628374164595
1026.5670375351667
3.520976331974177
*****330*****
2019-10-14T14:03:23.698946Z
33.669708745733644
8.224695657344625
83.89891971892618
1026.5670422292364
3.5209763323484755
*****331*****
2019-10-14T14:03:24.698953Z
33.66963618675891
8.224695657344625
83.91234481495879
1026.5670221393136
3.5209698611980387
*****332*****
2019-10-14T14:03:25.698856Z
33.669562532177
8.224695657344625
83.91139566283758
1026.567011967942
3.5209633908431983
*****333*****
2019-10-14T14:03:26.698861Z
33.669564519714534
8.224757762325453
83.9369864077743
1026.567075793263
3.52096986923594
*****334*****
2019-10-14T14:03:27.699285Z
33.66955839671944
8.224757762325453
83.93977572990548
1026.5671312202664
3.520969873655615
*****335*****
2019-10-14T14:03:28.698979Z
33.66962418497278
8.224757762325453
83.9491834196115
1026.5672125987162
3.5209763496931616
*****336*****
2019-10-14T14:03:29.699298Z
33.66971341267824
8.224881972646017
83.95291020299992
1026.5672454262474
3.520995768503966
*****337*****
2019-10-14T14:03:30.698888Z
33.669915426240344
8.224819867425879
83.94151214783818
1026.5673656707793
3.521008713471965
*****338*****
2019-10-14T14:03:31.698791Z
33.669725526907754
8.224881972646017
83.95244046698514
1026.5671357651534
3.5209957597596664
*****339*****
2019-10-14T14:03:32.698902Z
33.66986799324656
8.224881972646017
83.95664838812021
1026.5671999464676
3.5210087040168756
*****340*****
2019-10-14T14:03:33.699220Z
33.66992876823516
8.224819867425879
83.97427637361591
1026.567244896642
3.52100870384148
*****341*****
2019-10-14T14:03:34.698812Z
33.66971928872346
8.224819867425879
83.9466803086003
1026.5671104214887
3.5209892844164696
*****342*****
2019-10-14T14:03:35.698923Z
33.669774584768945
8.224757762325453
83.95554145323715
1026.5672049648635
3.52098928819562
*****343*****
2019-10-14T14:03:36.699136Z
33.669836705153145
8.224757762325453
83.97911781686898
1026.5673195476231
3.520995766895007
*****344*****
2019-10-14T14:03:37.698726Z
33.669761890444676
8.224757762325453
83.98957102790399
1026.5673198766096
3.5209892973585855
*****345*****
2019-10-14T14:03:38.705191Z
33.66962051414344
8.224757762325453
83.99400068470342
1026.5672458280294
3.5209763523428284
*****346*****
2019-10-14T14:03:39.705406Z
33.66956096443813
8.224819867425879
83.96874203725689
1026.567189792724
3.5209763516342627
*****347*****
2019-10-14T14:03:40.704892Z
33.66956341067644
8.224819867425879
83.94279315123285
1026.5671676486545
3.520976349868521
*****348*****
2019-10-14T14:03:41.705420Z
33.669832506993906
8.224757762325453
83.94708143554668
1026.5673575501996
3.5209957699253067
*****349*****
2019-10-14T14:03:42.705322Z
33.66991298284825
8.224819867425879
83.97241183764812
1026.5673877888323
3.5210087152356513
*****350*****
2019-10-14T14:03:43.705433Z
33.6697790241805
8.224819867425879
83.9819025006177
1026.5672465921548
3.5209957648372616
*****351*****
2019-10-14T14:03:44.705647Z
33.6697219291553
8.224881972646017
83.98994872143155
1026.5671683329033
3.520995762356596
*****352*****
2019-10-14T14:03:45.705445Z
33.66978386109847
8.224819867425879
83.96733345380086
1026.567202807305
3.520995761345888
*****353*****
2019-10-14T14:03:46.705141Z
33.66986504289549
8.224881972646017
83.98432927021081
1026.5672266536487
3.521008706146496
*****354*****
2019-10-14T14:03:47.705461Z
33.669868007799565
8.224881972646017
83.98195289384809
1026.5671998147304
3.5210087040063707
*****355*****
2019-10-14T14:03:48.705050Z
33.67007813506679
8.224881972646017
83.98714787291038
1026.5673284290494
3.521028123006493
*****356*****
2019-10-14T14:03:49.705056Z
33.669948542018034
8.22494407798564
83.98527888747964
1026.567229521505
3.5210216492931585
*****357*****
2019-10-14T14:03:50.704855Z
33.669865039984586
8.224881972646017
84.00004643381158
1026.567226679999
3.5210087061485966
*****358*****
2019-10-14T14:03:51.705486Z
33.6698435821344
8.224819867425879
83.96004082060223
1026.5673391090195
3.521002241781866
*****359*****
2019-10-14T14:03:52.705908Z
33.66970065135882
8.224881972646017
83.95904223195672
1026.5673609451976
3.5209957777153704
*****360*****
2019-10-14T14:03:53.705499Z
33.66981541722325
8.224819867425879
84.02269524668
1026.5675940643619
3.5210022621118844
*****361*****
2019-10-14T14:03:54.704777Z
33.66967828477881
8.224881972646017
84.04366410481208
1026.5675634141692
3.52099579386012
*****362*****
2019-10-14T14:03:55.705409Z
33.66960682797263
8.224881972646017
84.02407619648093
1026.567533350164
3.5209893219001143
*****363*****
2019-10-14T14:03:56.705728Z
33.66968002776141
8.224881972646017
84.01048971396274
1026.567547636143
3.52099579260199
*****364*****
2019-10-14T14:03:57.705004Z
33.66977016736459
8.22494407798564
84.0034599974193
1026.5674903912313
3.521008730936546
*****365*****
2019-10-14T14:03:58.704906Z
33.66992301273069
8.22494407798564
84.02764139083992
1026.567460618011
3.521021667720787
*****366*****
2019-10-14T14:03:59.705435Z
33.669947493543226
8.225006183444975
84.01717572456262
1026.5673208212356
3.5210281299193498
*****367*****
2019-10-14T14:04:00.705337Z
33.67001793692752
8.22494407798564
84.04825904273987
1026.5672782538206
3.5210281227652676
*****368*****
2019-10-14T14:04:01.704927Z
33.669955887037354
8.22494407798564
84.02389642313075
1026.5671630330148
3.521021643991371
*****369*****
2019-10-14T14:04:02.705246Z
33.670016662280474
8.224881972646017
84.03209407646857
1026.5672079831886
3.521021643815958
*****370*****
2019-10-14T14:04:03.704941Z
33.66987477267674
8.224881972646017
84.00965072914335
1026.5671385777277
3.521008699123356
*****371*****
2019-10-14T14:04:04.704947Z
33.669806008244336
8.22494407798564
84.0055901943537
1026.5671659501697
3.5210087050657273
*****372*****
2019-10-14T14:04:05.705369Z
33.66999976383408
8.22494407798564
83.96232302726943
1026.5674427600582
3.5210281358830153
*****373*****
2019-10-14T14:04:06.705168Z
33.6696416257414
8.22494407798564
83.97032272945192
1026.5673001631696
3.5209957766283235
*****374*****
2019-10-14T14:04:07.704967Z
33.66976701810417
8.22494407798564
83.94768391859424
1026.5675188992545
3.5210087332097677
*****375*****
2019-10-14T14:04:08.704973Z
33.66975934879018
8.22494407798564
83.94482485737488
1026.5675883241809
3.5210087387456914
*****376*****
2019-10-14T14:04:09.704980Z
33.66975799539525
8.22494407798564
83.96817349445696
1026.5676005755263
3.521008739722611
*****377*****
2019-10-14T14:04:10.705611Z
33.669702701757046
8.225006183444975
83.93479345231691
1026.5675060090139
3.5210087359416087
*****378*****
2019-10-14T14:04:11.704785Z
33.66971359063395
8.225006183444975
83.95067738765012
1026.5674074393803
3.521008728081695
*****379*****
2019-10-14T14:04:12.704999Z
33.66946228370675
8.225068289023909
83.98572653603274
1026.567056513192
3.520989295156482
*****380*****
2019-10-14T14:04:13.705005Z
33.66982151614366
8.225068289023909
84.0148909017113
1026.5671891879151
3.5210216535964283
*****381*****
2019-10-14T14:04:14.704908Z
33.669696701981664
8.225068289023909
84.00051208023646
1026.5669652189576
3.5210086965789023
*****382*****
2019-10-14T14:04:15.705331Z
33.66971518619311
8.225130394722555
83.98301176476234
1026.5668797034582
3.5210151630964144
*****383*****
2019-10-14T14:04:16.705651Z
33.66970688308269
8.225068289023909
84.02952421428859
1026.5668730568261
3.5210086892299337
*****384*****
2019-10-14T14:04:17.704824Z
33.669702752065604
8.225068289023909
84.03269386016586
1026.5669104519127
3.5210086922118013
*****385*****
2019-10-14T14:04:18.704832Z
33.66983252617714
8.225068289023909
84.02126507354681
1026.5670895222606
3.521021645649096
*****386*****
2019-10-14T14:04:19.705671Z
33.66962129911777
8.225130394722555
84.0089102171649
1026.567052686911
3.5210087073132597
*****387*****
2019-10-14T14:04:20.705261Z
33.669813764838516
8.225130394722555
84.02286166376321
1026.5673411629955
3.521028139060905
*****388*****
2019-10-14T14:04:21.705060Z
33.669593330464444
8.225130394722555
84.05726094239323
1026.5673058691557
3.521008727501922
*****389*****
2019-10-14T14:04:22.705275Z
33.66952282731169
8.225192500540686
84.09093286258957
1026.567348986139
3.521008734699755
*****390*****
2019-10-14T14:04:23.705072Z
33.66952539480913
8.22525460647853
84.09121536449192
1026.5674075535103
3.5210152127064966
*****391*****
2019-10-14T14:04:24.704870Z
33.66959421465496
8.22525460647853
84.08661544597948
1026.567461483547
3.5210216865884765
*****392*****
2019-10-14T14:04:25.704669Z
33.669543947793166
8.225316712535914
84.02750576682357
1026.5673214118965
3.5210216791788893
*****393*****
2019-10-14T14:04:26.705198Z
33.66976792581595
8.225316712535914
84.0425932020378
1026.5673246190572
3.52104108820763
*****394*****
2019-10-14T14:04:27.705412Z
33.66993609852735
8.225378818713011
84.06084628831059
1026.5672378920174
3.5210604938670653
*****395*****
2019-10-14T14:04:28.704792Z
33.67015764250573
8.225316712535914
84.08845965915506
1026.5671813461754
3.5210734248318833
*****396*****
2019-10-14T14:04:29.705112Z
33.67023163240222
8.22525460647853
84.10836114427836
1026.5671066823527
3.5210734151182437
*****397*****
2019-10-14T14:04:30.704806Z
33.669949093829935
8.225192500540686
84.14112488096683
1026.5668748458268
3.5210410448229763
*****398*****
2019-10-14T14:04:31.704813Z
33.669720477458206
8.225130394722555
84.18392186919382
1026.5668318054572
3.5210151592770416
*****399*****
2019-10-14T14:04:32.704715Z
33.66989285021362
8.225068289023909
84.19479441049857
1026.567220363838
3.521028125668615
*****400*****
2019-10-14T14:04:33.704722Z
33.66986178157013
8.225068289023909
84.16057132712677
1026.567501605467
3.521028148094863
*****401*****
2019-10-14T14:04:34.705249Z
33.66983542247814
8.225068289023909
84.1731554644945
1026.5677402160964
3.521028167121724
*****402*****
2019-10-14T14:04:35.704735Z
33.66962529024311
8.225130394722555
84.1350176916594
1026.5676934728574
3.521015227986243
*****403*****
2019-10-14T14:04:36.704845Z
33.66998264194166
8.225192500540686
84.14343134114269
1026.5679249810694
3.521054067766878
*****404*****
2019-10-14T14:04:37.705268Z
33.670169483929755
8.225378818713011
84.11139677200607
1026.5678328649153
3.521086419797374
*****405*****
2019-10-14T14:04:38.704859Z
33.67027349117324
8.225378818713011
84.13297813163756
1026.5675682750064
3.5210928683318943
*****406*****
2019-10-14T14:04:39.705595Z
33.67022313260777
8.22525460647853
84.10330708137352
1026.567183623856
3.5210734212536505
*****407*****
2019-10-14T14:04:40.705081Z
33.669967341938786
8.225192500540686
84.11562583635975
1026.5667096603186
3.5210410316510323
*****408*****
2019-10-14T14:04:41.704983Z
33.66977552272676
8.225192500540686
84.10976470902058
1026.566415328913
3.521021599408483
*****409*****
2019-10-14T14:04:42.705510Z
33.669621911344215
8.225130394722555
84.1360559436038
1026.5663702347517
3.5210021833228446
*****410*****
2019-10-14T14:04:43.704788Z
33.66932330846909
8.225130394722555
84.12434826895658
1026.5663656411089
3.5209763047155302
*****411*****
2019-10-14T14:04:44.704587Z
33.66968009279064
8.225006183444975
84.11120331857128
1026.567033759853
3.52100222871255
*****412*****
2019-10-14T14:04:45.704800Z
33.66984645334681
8.225006183444975
84.07086049230163
1026.5675585518131
3.5210216792897664
*****413*****
2019-10-14T14:04:46.705432Z
33.6698817557812
8.225006183444975
84.04313332444555
1026.5679158971002
3.521028177370818
*****414*****
2019-10-14T14:04:47.704710Z
33.6696768281018
8.225130394722555
83.98341392758903
1026.567903848598
3.5210217143432354
*****415*****
2019-10-14T14:04:48.705029Z
33.669964653540724
8.225192500540686
83.97560701125354
1026.5680878182275
3.5210540807516657
*****416*****
2019-10-14T14:04:49.705348Z
33.66999860796807
8.22525460647853
83.95242892725442
1026.5678622562857
3.521060536134833
*****417*****
2019-10-14T14:04:50.704417Z
33.67004056386032
8.22525460647853
83.92308618082716
1026.5674824589523
3.5210605058494098
*****418*****
2019-10-14T14:04:51.704632Z
33.66987968096526
8.22525460647853
83.91742181894088
1026.5669080835974
3.5210410512331447
*****419*****
2019-10-14T14:04:52.704951Z
33.66982989875146
8.225130394722555
83.92782619747102
1026.5665182058708
3.521021603852143
*****420*****
2019-10-14T14:04:53.705166Z
33.669637000058216
8.225068289023909
83.94139809871284
1026.5661518396348
3.52099569258125
*****421*****
2019-10-14T14:04:54.704756Z
33.66954990607797
8.22494407798564
83.93750064051746
1026.5660997059176
3.520976272231388
*****422*****
2019-10-14T14:04:55.704764Z
33.66924100222711
8.224881972646017
83.92186530959896
1026.5661065371553
3.520943921321488
*****423*****
2019-10-14T14:04:56.705394Z
33.669397769804604
8.224819867425879
83.93338677793137
1026.5666363440432
3.5209568988716526
*****424*****
2019-10-14T14:04:57.704671Z
33.669943102550555
8.224633552483397
83.89619832866934
1026.5675466080581
3.520995777481303
*****425*****
2019-10-14T14:04:58.704679Z
33.66989200162367
8.224571447741823
83.84970051716071
1026.5679273666067
3.5209893345203853
*****426*****
2019-10-14T14:04:59.705310Z
33.66982038539377
8.224757762325453
83.82849458859165
1026.5681441903728
3.5210023022190304
*****427*****
2019-10-14T14:05:00.704588Z
33.66982050831708
|
examples/design/multi-tube-design-simple.ipynb | ###Markdown
Multi-tube design example (simple)Design a strand displacement gate. See the accompanying design specification (PDF file). See also the LaTeX spec file that you can edit to make your own design specs in a standardized format.This is a 1-step reaction. To design 1 gate, there are 2 elementary step tubes plus 1 global crosstalk tube. Target test tubes: - Reactants (Step 0)- Products (Step 1) - Global crosstalkMaterial: RNA Temperature: 23 C
###Code
# Import Python NUPACK module
from nupack import *
# Define physical model
my_model = Model(material='rna', celsius=23)
# Define sequence domains
da = Domain('N10', name='da')
db = Domain('N8', name='db')
# Define strands containing these domains
sX = TargetStrand([da, db], name='sX')
sA = TargetStrand([~db, ~da], name='sA')
sB = TargetStrand([da], name='sB') # ~dgate is the reverse complement of dgate
sA_toe = TargetStrand([~db], name='sA_toe')
# Define target complexes
cX = TargetComplex([sX], 'U18', name='cX')
cAB = TargetComplex([sA, sB], 'U8D10+', name='cAB')
cXA = TargetComplex([sX, sA], 'D18+', name='cXA')
cB = TargetComplex([sB], 'U10', name='cB')
cA_toe = TargetComplex([sA_toe], 'U8', name='cA_toe')
# Define target test tubes
reactants = TargetTube(on_targets={cX: 1e-08, cAB: 1e-08},
off_targets=SetSpec(max_size=2, exclude=[cXA]), name='reactants')
products = TargetTube(on_targets={cXA: 1e-08, cB: 1e-08},
off_targets=SetSpec(max_size=2), name='products')
crosstalk = TargetTube(on_targets={cX: 1e-08, cAB: 1e-08, cA_toe: 1e-08, cB:1e-8},
off_targets=SetSpec(max_size=2, exclude=[cXA, [sX, sA_toe]]), name='crosstalk')
# Set a stop condition of 2%
# Set seed for random number generation to get a reproducible result for this demo
my_options = DesignOptions(f_stop=0.02, seed=93)
# Define and run the test tube design job
my_design = tube_design(tubes=[reactants, products, crosstalk], model=my_model, options=my_options)
my_results = my_design.run(trials=1)[0]
# Display the design results
my_results
###Output
_____no_output_____ |
1_transform/us_land_temp.ipynb | ###Markdown
Global Land Temperature by Country* Remove incomplete rows* Deal with error-prone columns* Filter only US country* Drop un-needed columns* Change to lowercasing* normalize the data* save to csv
###Code
import pandas as pd
file_path = '../data/globalLandTemp.csv'
df = pd.read_csv(file_path)
df.head()
###Output
_____no_output_____
###Markdown
Check columns
###Code
df.columns
###Output
_____no_output_____
###Markdown
Data types
###Code
df.dtypes
###Output
_____no_output_____
###Markdown
length of dataset
###Code
df.count()
###Output
_____no_output_____
###Markdown
Dropping any rows that are missing
###Code
df = df.dropna()
df.count()
###Output
_____no_output_____
###Markdown
Filtering only United States
###Code
df = df[df.Country == 'United States']
df
###Output
_____no_output_____
###Markdown
Change Celcius to Fahrenheit* function convertion* apply function to columns AverageTemperature, AverageTemperatureUncertainty
###Code
# define function to convert value(s) from Fahrenheit to Celsius
def cel_to_fer(x):
# convert values from Fahrenheit to Celsius using Celsius = ((Fahrenheit - 32) / 1.8)
# can take single value, single value variable, or numpy array as input
x = (x * 9/5) + 32
# returns value(s) converted from Fahrenheit to Celsius
return(x)
df["AverageTemperature"] = df["AverageTemperature"].apply(cel_to_fer)
df["AverageTemperatureUncertainty"] = df["AverageTemperatureUncertainty"].apply(cel_to_fer)
df.head()
###Output
_____no_output_____
###Markdown
Convert column dt to datetime
###Code
df['dt'] = pd.to_datetime(df['dt'])
df.dtypes
###Output
_____no_output_____
###Markdown
Using .dt to extract year only
###Code
df['dt'] = df['dt'].dt.year
df.head()
###Output
_____no_output_____
###Markdown
Group by date and get the mean average
###Code
group_df = df.groupby(df['dt'])
group_df = group_df.mean()
group_df.head()
group_df.count()
###Output
_____no_output_____
###Markdown
Reset Index to name columns
###Code
group_df.reset_index(level=['dt'], inplace=True)
group_df.head()
###Output
_____no_output_____
###Markdown
lowercase/rename columns
###Code
group_df = group_df.rename(columns={'dt':'year','AverageTemperature':'avg_temp','AverageTemperatureUncertainty':'avg_temp_uncert'})
group_df.head()
###Output
_____no_output_____
###Markdown
Exporting to a csv file
###Code
group_df.to_csv('../data_transformed/us_land_temp.csv')
###Output
_____no_output_____ |
Loops_continuum2.ipynb | ###Markdown
More Quick Lessons & Attendent Exercises Have 15 minutes? Work through some of these little lessons. Finish them up? Write some of your own. You don't have to share them with anyone (although you could): Creating your own problems to solve is easily one of the most effective ways to gain confidence in approaching data science through computation.Some of this will be new, some of it familiar. Work on the parts that are most interesting to you, and trust that the others will become interesting in time. LOOPSOK: Let's review a bit as we move into more new stuff. Let's start by counting from 0 to 9.
###Code
#Here's one way:
a = 0
print(a)
a = 1
print(a)
a = 2
print(a)
a = 3
print(a)
# Here's a second way:
a = 0
print(a)
a = a + 1
print (a)
a = a + 1
print (a)
a = a + 1
print (a)
# Not much better. Here's a third way:
for n in range(0,10):
print(n)
###Output
0
1
2
3
4
5
6
7
8
9
###Markdown
So how'd that work? The `n` in the statement `for n in range(0,10):` works as our counter: It is going to increase ('increment') by 1 every time this line is repeated. (It turns out that this basic act -- adding 1 to a variable -- is one of the most important features of most programming languages).In our statement, the `range()` function defines a range of numbers. In this case, a range with a low of 0 and an upper limit of 10. So one other thing we ought to make clear then: Look back at the output from the `for` loop: Do you notice that we didn't count from one to ten? Instead, we counted from 0 to 9. That seems... weird, right?Right: It started at 0, like you'd expect, but then it ended at 9. Why is that? Didn't we say "10" in our range? Well, yes. Sigh. But that's the way things work: `range(0,10)` means "from 0 and up to but not including 10."
###Code
# Self-evidently, perhaps:
for n in range(2,8):
print(n)
# Again: We use 8 as the upper-most bound,
# but we never actually get to that number.
###Output
2
3
4
5
6
7
###Markdown
Arguments in Range()Fine. But hold on a moment, cowpoke. The upper- and lower-bound part makes sense. But there's always room for another argument: Let's suppose we add another number to that range of yours? What do you suppose this will do? for n in range(1,10,3): print(n)It turns out that the range() function allows UP TO 3 arguments: range (from, up to, step)**From** is the starting number, **up to** tells us the upper bounds (which we'll never reach), and **step** describes the number by which we will increment along the way. By default, **step is assumed to be 1**. That's why we often don't include it in our code: We are lazy, and the computer fills it in for us. In fact, **the same thing goes for the first argument**, `from`: We almost always just assume `from` is going to be a zero.>REMEMBER: You begin to count with the number "1", but computers almost always start counting with the number "0". Why? Because they are sneaky and they want to finish counting before we do. Let's take a look at some examples of how these default values work: This loop, for instance: for n in range(0,5,1): print(n)Is exactly the same as this: for n in range(0,5): print(n)Is exactly the same as this: for n in range(5): print(n)
###Code
for n in range(5):
print(n)
###Output
0
1
2
3
4
###Markdown
Fine. `From`, `Up to`, and `Step` are always there, even if they aren't *always there*. But I can include them and use less conventional values to do cool things.Let's say I've built some kind of super-villain compound and I am going to launch the missiles (... or I'm counting down the final seconds of the end of the year... or I'm holding my breath in an underwater-breath-holding-Olympic-contest, whatever, I don't know, use your imagination): My point: If a step of 1 went up, shouldn't a `step` of -1 go... down?
###Code
# Gateau sec: We set the lower bound as the upper,
# set the upper bound as the lower,
# and set the increment to -1 so that it decrements:
for n in range(5,0,-1):
print(n)
# And remember: Variables are every bit as meaningful as digits.
# Below, I've used variables in place of digits. Variables are
# generally more flexible: They provide relative values, where
# digits only provide absolute ones.
low = 2
high = 11
step = 4
for n in range(low,high,step):
print(n)
# See how it stops before it hits the high bound, no matter how close it was?
# What's more, bear this in mind: The n that we're getting out of
# that counter function will resolve to a numeric value:
# We can plug it right back into some other expression.
# In this case, I've done just that, and I've built
# something called a "nested loop".
# Think of nested-loops as being like Russian nesting dolls
# -- one doll inside another doll. Here, one loop
# runs inside another loop.
# cleaning staff schedule:
# example 'nested loop' code
groundfloor = 1
penthouse = 4
room_low = 10
room_high = 15
for floor in range(groundfloor, penthouse):
for suite in range(room_low, room_high):
assignment = str(floor) + str(suite)
print("Please make ready room: ", assignment)
print("Please also clean Floor", penthouse)
###Output
Please make ready room: 110
Please make ready room: 111
Please make ready room: 112
Please make ready room: 113
Please make ready room: 114
Please make ready room: 210
Please make ready room: 211
Please make ready room: 212
Please make ready room: 213
Please make ready room: 214
Please make ready room: 310
Please make ready room: 311
Please make ready room: 312
Please make ready room: 313
Please make ready room: 314
Please also clean Floor 4
###Markdown
See how well that worked? Well, worked well with one possible exception: If our Penthouse Suite is on level 4, then our cleaning bots will never get there: By assigning a range(1,4), we guaranteed that they never go up past 3. So I just added a final print statement to clean up our loose ends.It's worth pointing out that the never-reach-the-topmost-number thing seems weird -- but on the other side of the number line, there is more weirdness: We never count from 1, but always start at 0. Both of those standards cause a lot of confusion -- but in the end, they tend to balance one another out. So don't overthink this: It often takes care of itself.
###Code
# In any event, the building metaphor is still a good way to
# think about 'nested loops'. If you're going to go through
# every room in a building, you're probably not going to do
# it all willy-nilly and randomly: You'll probably do one
# floor at a time, and while you're on that floor, you
# visit each room in turn.
# And THAT is a nested loop:
# Go to a floor. Go through each room. Go to the next
# floor. Go through each room. Etc.
###Output
_____no_output_____
###Markdown
Note that as before, we can run things backwards this time, high to low, or even add steps to skip through some rooms or some floors. Let's start from the top this time, and just assign our cleaning-bots the even-numbered rooms.
###Code
# cleaning staff schedule 2:
# even rooms, top to bottom
basement = 0
penthouse = 3
room_low = 10
room_high = 16
# now add a step of -1
# Outermost Loop (Floor by floor loop)
for floor in range(penthouse, basement, -1):
# Innermost Loop (Room by room loop)
# add a step of positive 2 (for even numbers)
for suite in range(room_low, room_high, 2):
assignment = str(floor) + str(suite)
print("Please prepare room: ", assignment)
###Output
Please prepare room: 310
Please prepare room: 312
Please prepare room: 314
Please prepare room: 210
Please prepare room: 212
Please prepare room: 214
Please prepare room: 110
Please prepare room: 112
Please prepare room: 114
###Markdown
Printing Output Without LF
###Code
low = 2
high = 15
step = 3
# Outermost loop
for n in range(low,high,step):
print('') #<-- print('nothing')
# Inner loop
for c in range(0, n):
print('*', end='')
###Output
**
*****
********
***********
**************
###Markdown
OK, let me take a moment and provide some explanation for how we used those `print()` statements, above. (Although you shouldn't hesitate to start by making small changes to the code above and see what happens for yourself: Always a better way to learn than to hear me explain it).By default, a `print()` function prints your variable, and then it adds a single additional character: a LineFeed -- in effect, like I've struck *return* or *enter*. (Truth be told, when we write it out, it *looks* like two characters (`\n`)... and I suppose when we say it outloud, it *sounds* like three syllables ("escape-N"). But it is actually just one 8-bit character to the computer. So to make the lines above, I need to tell Python to knock it off with the LineFeeds. I do that (in a surprisingly clunky way) by just appending (adding) a **second argument** to my print statement. I say "Print this", and then I add "Oh, and by the way, you should end a line by printing nothing.)" But I could say "Oh, and could you add a "," and a space at the end, please?" Or whatever. The point being I ask it to print one thing **instead** of a LineFeed (`\n`). Here's what happens when I take that approach an apply it to the code above:
###Code
low = 2
high = 15
step = 3
for n in range(low,high,step):
for c in range(0, n):
print('*', end='')
###Output
****************************************
###Markdown
**Wait**, you'll say. **That's not what we was promised!** 'Tis true, says I. Let's change the code around a bit and see if we can figure out why. (And I'm changing some of the numbers a bit in order to make it easier to follow).
###Code
low = 3
high = 8
step = 1
for n in range(low,high,step):
for c in range(0, n):
print(c, end='')
# Ah-ha! Let me break the line above apart:
# 012 <-- n is 3
# 0123 <-- n is 4
# 01234 <-- n is 5, etc.
# The problem is that after each cycle, it wasn't adding a return:
# That last print statement, on line 7, told it never to do so. end=''
# is the same as "Don't Print a Return".
# BUT if we sneak a blank line INSIDE the repetition of the
# first loop but OUTSIDE the repetition of the second loop,
# then we could separate the lines again. To wit:
low = 3
high = 8
step = 1
for n in range(low,high,step):
print('') # <-- prints nothing, but will add a <return>
# after each new group of digits prints.
# Note also that it is part of the first cycle (for n)
# but it is outside the range of the second cycle (for c)
for c in range(0, n):
print(c, end='')
# We could also do something like this:
low = 3
high = 8
step = 1
for n in range(low,high,step):
print(n, end = ': ')
for c in range(0, n):
print(c, end='')
print('')
###Output
3: 012
4: 0123
5: 01234
6: 012345
7: 0123456
|
Materials/Session_3/web_gathering_tasks.ipynb | ###Markdown
Gathering data from the web | Mini Task **Author:** Ties de Kok ([Personal Website](http://www.tiesdekok.com)) **Last updated:** 27 June 2018 **Python version:** Python 3.6 **License:** MIT License *Introduction* In this notebook I will provide you with "tasks" that you can try to solve. Most of what you need is discussed in the tutorial notebooks, the rest you will have to Google (which is an important exercise in itself). *Relevant notebooks*1) [`0_python_basics.ipynb`](https://nbviewer.jupyter.org/github/TiesdeKok/LearnPythonforResearch/blob/master/0_python_basics.ipynb) 2) [`2_handling_data.ipynb`](https://nbviewer.jupyter.org/github/TiesdeKok/LearnPythonforResearch/blob/master/2_handling_data.ipynb) 3) [`4_web_scraping.ipynb`](https://nbviewer.jupyter.org/github/TiesdeKok/LearnPythonforResearch/blob/master/4_web_scraping.ipynb) Mini Tasks ---------------- The goal of this mini-task is to get hands-on experience with gathering data from the Web using `Requests` and `Requests-HTML`. The tasks below are split up into two sections: 1. API tasks 2. Web scraping tasks Import required packages
###Code
import requests
from requests_html import HTMLSession
import pandas as pd
import numpy as np
###Output
_____no_output_____
###Markdown
This is a convenience function to make it easier to show images in the Jupyter Notebook
###Code
from IPython.display import HTML
import time
def show_image(url):
return HTML('<img src="{}?{}"></img>'.format(url, int(time.time())))
###Output
_____no_output_____
###Markdown
This is a piece of code to prevent annoying warnings
###Code
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
###Output
_____no_output_____ |
intro_exercises2_solutions_2020.ipynb | ###Markdown
Exercises to Lecture 2: Introduction to Python 3.6 By Dr. Anders S. Christensen`anders.christensen @ unibas.ch` Exercise 2.1: define a function One of the fundamental things we learned was how to define functions.For example, the function defined below `square(x)` calculates the square, $x^2$:
###Code
def square(x):
y = 2 * x**2
return y
print(square(2))
###Output
8
###Markdown
Question 2.1.1: Similarly to the above code, make a function called. for example, `poly(x)` to calculate the following polynomial:\begin{equation}p\left( x \right) = 5 x^2 - 4x + 1\end{equation}Lastly, print the value of $p\left( 5 \right)$
###Code
def poly(x):
# Fill out the rest of the function yourself!
p = 5* x**2 - 4*x + 1
return p
# Print the value for x=5
print(poly(5))
###Output
106
###Markdown
Exercise 2.2: Loop within function The code below implements the product of all numbers up to n, i.e. the factorial function "$!$"\begin{equation}f(n) = n! = \prod_{i=1}^n i = 1 \cdot 2 \cdot \quad ... \quad \cdot (n -1) \cdot n\end{equation}As an example, the code to calculate $5!$ is shown here:
###Code
n = 5
f = 1
for i in range(1,n+1):
f = f * i
print("Factorial", n, "is", f)
###Output
Factorial 5 is 120
###Markdown
Question 2.2.1Unfortunately the above code is not very practical and re-usable, and will only work for $n=5$. Instead we would like a function named `factorial(n)`. In this Exercise, write your own function which calculates the factorial.As output print `factorial(10)`.
###Code
def factorial(n):
# Write the rest of the function yourself
f = 1
for i in range(1,n+1):
f = f * i
return f
print(factorial(10))
###Output
3628800
###Markdown
Question 2.2.2: Using the `factorial(n)` function you wrote in the previous question, print all $n!$ for all n from 1 to 20. Hint: Use another for loop!
###Code
# Below, write the code to print all n! from n=1 to n=20
for n in range(1, 21):
print(n, factorial(n))
###Output
1 1
2 2
3 6
4 24
5 120
6 720
7 5040
8 40320
9 362880
10 3628800
11 39916800
12 479001600
13 6227020800
14 87178291200
15 1307674368000
16 20922789888000
17 355687428096000
18 6402373705728000
19 121645100408832000
20 2432902008176640000
###Markdown
Exercise 2.3: `if` / `else` statements `if` and `else` statments can are used to make decisions based on defined criteria.
###Code
n = 10
if n < 10:
print("n is less than 10")
else:
print("n is greater than 10")
###Output
_____no_output_____
###Markdown
Question 2.3.1:One example of mathematical functions that contain such statements are the so-called "rectifier linear unit" (ReLU) functions which are often used in Neural Networks.An example is given here:\begin{equation}f\left(x\right) =\begin{cases} 0 & x\leq 0 \\ x & x \gt 0 \end{cases}\end{equation}In this question, write the above ReLU function, which returns $0$ if $x \leq 0$ and returns $x$ otherwise.Lastly, verify the correctness of your code your code using the 5 print statments in the code block below.
###Code
def relu(x):
# Implement the content of the ReLU function yourself!
if x <= 0.0:
return 0.0
else:
return x
print(relu(-10)) # should return 0
print(relu(-0.1)) # should return 0
print(relu(0)) # should return 0
print(relu(0.1)) # should return 0.1
print(relu(10)) # should return 10
###Output
0.0
0.0
0.0
0.1
10
###Markdown
Exercise 2.4: Plotting In the below example, the value of $$y= x^2$$ is calculated for six values of $x$, and appended to a list.Next, the points are plottet using the `plt.plot()` function from the `pyplot` library.The function `plt.plot()` tells matplotlib to draw a lineplot that connects all the pairs of points in two lists.
###Code
import matplotlib.pyplot as plt
# Some x-values
x = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
# Initialize an empty list for the y-values we wish to plot
y = []
for i in range(len(x)):
# Calculate the square of each x in the list
value = x[i]**2
# Add the calculated value to the list "y"
y.append(value)
# Print the two lists
print(x)
print(y)
# Make a line plot/figure
plt.plot(x, y)
# Actually draw the figure below
plt.show()
###Output
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
[0.0, 1.0, 4.0, 9.0, 16.0, 25.0]
###Markdown
Question 2.4.1:Instead of plotting a line through all points, it is also possible to plot datapoints using a so-called [scatterplot](https://en.wikipedia.org/wiki/Scatter_plot).For this behavior, you can replace the function `plt.plot()` in the above example with the function `plt.scatter()`.In this question you are given two lists of numbers below, `a` and `b`.Use pyplot to draw a *scatterplot* of the two lists.
###Code
a = [10.0, 8.0, 13.0, 9.0, 11.0, 14.0, 6.0, 4.0, 12.0, 7.0, 5.0]
b = [8.04, 6.95, 7.58, 8.81, 8.33, 9.96, 7.24, 4.26, 10.84, 4.82, 5.68]
# Here, write the code to show a scatterplot for the lists a, b
plt.scatter(a, b)
plt.show()
###Output
_____no_output_____
###Markdown
Question 2.4.2: In order to give the plot a title and label the axes, insert the three functions in the code in Question 2.4.1:* `plt.xlabel()`* `plt.ylabel()`* `plt.title()`Make the plot title "Python 2020", label the x-axis "Apples" and the y-axis "Oranges"
###Code
plt.scatter(a, b)
plt.xlabel("Apples")
plt.ylabel("Oranges")
plt.title("Python 2020")
plt.show()
###Output
_____no_output_____
###Markdown
Exercise 2.5: More plotting (harder) Question 2.5.1:In the example in Problem 2.4, the code for a line plot for $y = x^2$ is shown.Write your own code to plot $y = \cos(x)$ from 0 to 10.**Hints:*** Import the `np.cos()` function using `import numpy as np`* If the figure does not look smooth, how can you make the points on the x-axis closer than 1.0?
###Code
# First try:
# Note that there are many ways to solve this exercise!
import numpy as np
x = []
y = []
for i in range(11):
x.append(i)
y.append(np.cos(i))
# Print the two lists to verify that you have the right numbers
print(x)
print(y)
# Make the figure
plt.plot(x, y)
# Actually draw the figure below
plt.show()
# Smooth, with more x-values:
# Note that there are many ways to solve this exercise!
import numpy as np
x = []
y = []
for i in range(100):
# Now there are 100 points between 0 and 10 with 0.1 spacing
# => smoother curve
xval = i/10.0
x.append(xval)
yval = np.cos(xval)
y.append(yval)
# Print the two lists to verify that you have the right numbers
print(x)
print(y)
# Make the figure
plt.plot(x, y)
# Actually draw the figure below
plt.show()
###Output
[0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0, 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9, 3.0, 3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9, 4.0, 4.1, 4.2, 4.3, 4.4, 4.5, 4.6, 4.7, 4.8, 4.9, 5.0, 5.1, 5.2, 5.3, 5.4, 5.5, 5.6, 5.7, 5.8, 5.9, 6.0, 6.1, 6.2, 6.3, 6.4, 6.5, 6.6, 6.7, 6.8, 6.9, 7.0, 7.1, 7.2, 7.3, 7.4, 7.5, 7.6, 7.7, 7.8, 7.9, 8.0, 8.1, 8.2, 8.3, 8.4, 8.5, 8.6, 8.7, 8.8, 8.9, 9.0, 9.1, 9.2, 9.3, 9.4, 9.5, 9.6, 9.7, 9.8, 9.9]
[1.0, 0.9950041652780258, 0.9800665778412416, 0.955336489125606, 0.9210609940028851, 0.8775825618903728, 0.8253356149096783, 0.7648421872844885, 0.6967067093471654, 0.6216099682706644, 0.5403023058681398, 0.4535961214255773, 0.3623577544766736, 0.26749882862458735, 0.16996714290024104, 0.0707372016677029, -0.029199522301288815, -0.12884449429552464, -0.2272020946930871, -0.32328956686350335, -0.4161468365471424, -0.5048461045998576, -0.5885011172553458, -0.6662760212798241, -0.7373937155412454, -0.8011436155469337, -0.8568887533689473, -0.9040721420170612, -0.9422223406686581, -0.9709581651495905, -0.9899924966004454, -0.9991351502732795, -0.9982947757947531, -0.9874797699088649, -0.9667981925794611, -0.9364566872907963, -0.896758416334147, -0.848100031710408, -0.7909677119144168, -0.7259323042001402, -0.6536436208636119, -0.5748239465332692, -0.4902608213406994, -0.40079917207997545, -0.30733286997841935, -0.2107957994307797, -0.11215252693505487, -0.01238866346289056, 0.0874989834394464, 0.18651236942257576, 0.28366218546322625, 0.37797774271298024, 0.4685166713003771, 0.5543743361791608, 0.6346928759426347, 0.70866977429126, 0.7755658785102496, 0.8347127848391598, 0.8855195169413189, 0.9274784307440359, 0.960170286650366, 0.9832684384425845, 0.9965420970232175, 0.9998586363834151, 0.9931849187581926, 0.9765876257280235, 0.9502325919585296, 0.9143831482353194, 0.8693974903498253, 0.8157251001253568, 0.7539022543433046, 0.6845466664428066, 0.6083513145322546, 0.5260775173811053, 0.43854732757439036, 0.3466353178350258, 0.2512598425822557, 0.15337386203786435, 0.05395542056264975, -0.04600212563953695, -0.14550003380861354, -0.2435441537357911, -0.3391548609838345, -0.4313768449706208, -0.5192886541166856, -0.6020119026848236, -0.6787200473200125, -0.7486466455973987, -0.811093014061656, -0.8654352092411123, -0.9111302618846769, -0.9477216021311119, -0.9748436214041636, -0.9922253254526034, -0.9996930420352065, -0.9971721561963784, -0.984687855794127, -0.9623648798313102, -0.9304262721047533, -0.8891911526253609]
|
Lesson 15 - Break/Exercises.ipynb | ###Markdown
67. Multiplication table, part 2
###Code
while True:
int_input = int(input("Enter an integer and see its multiplication table: "))
fst_multiplier = int(input("Starting multiplier integer: "))
lst_multiplier = int(input("Final multiplier integer: "))
for multiplier in range(fst_multiplier, lst_multiplier + 1):
print(f'{int_input} X {multiplier} = {int_input * multiplier}')
option = input('Would you like to continue? [Y/N] ').upper()
if option == 'Y':
continue
else:
break
###Output
_____no_output_____
###Markdown
68. Even or odd, game
###Code
from random import randint
option = " "
while True:
cpu_choice = randint(1, 10)
user_choice = int(input('Enter an integer that is between 1 and 10 (0 to stop): '))
if user_choice != 0:
value = cpu_choice + user_choice
while option not in 'EO':
option = input('Even or odd? [E/O] ').upper().strip()
if (option == 'E' and value % 2 == 0) or (option == 'O' and value % 2 == 1):
print(f"{cpu_choice} + {user_choice} = {value} || You won.")
else:
print(f"{cpu_choice} + {user_choice} = {value} || I won.")
else:
break
###Output
_____no_output_____
###Markdown
69. Group analysis
###Code
data = {}
while True:
name = input('Name: ')
age = int(input('Age: '))
gender = input('Gender [M/F]: ').upper()
person_info = [age, gender]
data[name] = person_info
option = input("Would you like to continue? [Y/N]").upper()
if option != "Y":
break
average_age = sum([info[0] for info in list(data.values())]) / len(list(data.values()))
print(f'{average_age:.2f}')
ages = [info[0] for info in list(data.values())]
oldest_man = list(data.keys())[ages.index(max(ages))]
print(f'{oldest_man}')
young_girls = len([info for info in list(data.values()) if info[0] < 20 and info[1] == "F"])
print(f'{young_girls:.2f}')
###Output
_____no_output_____
###Markdown
70. Product statistics
###Code
price_list = []
while True:
name = input('Product name: ').strip()
price = float(input('Price: R$ '))
price_list.append(price)
option = input('Would you like to continue? [Y/N]').upper().strip()
if option != 'Y':
break
print(sum(price_list))
print(min(price_list))
print(len([price for price in price_list if price > 1000]))
###Output
_____no_output_____
###Markdown
71. ATM simulator
###Code
money = int(input("Amount of money: R$"))
banknotes_dict = {
100: lambda x: x // 100,
50: lambda x: x // 50,
20: lambda x: x // 20,
10: lambda x: x // 10,
5: lambda x: x // 5,
2: lambda x: x // 2,
1: lambda x: x // 1
}
for k, v in banknotes_dict.items():
banknotes = v(money)
money %= k
print(f'Banknotes of R${k:<10} {banknotes}')
###Output
_____no_output_____ |
Bollinger Bands-PFE.ipynb | ###Markdown
Bollinger Bands Bollinger Bands represent a key technical trading tool for financial traders. Bollinger bands are plotted by two (2) standard deviations (a measure of volatility) away from the moving average of a price. Bollinger Bands allow traders to monitor and take advantage of shifts in price volatilities. This provides a **95% confidence interval** that shows the predicted price will face under the given interval. Main Components of a Bollinger BandsUpper Band: The upper band is simply two standard deviations above the moving average of a stock’s price. Middle Band: The middle band is simply the moving average of the stock’s price. Lower Band: Two standard deviations below the moving average is the lower band.
###Code
# import needed libraries
import pandas as pd
import matplotlib.pyplot as plt
from pandas_datareader import data as web
# Make function for calls to Yahoo Finance
def get_adj_close(ticker, start, end):
'''
A function that takes ticker symbols, starting period, ending period
as arguments and returns with a Pandas DataFrame of the Adjusted Close Prices
for the tickers from Yahoo Finance
'''
start = start
end = end
info = web.DataReader(ticker, data_source='yahoo', start=start, end=end)['Adj Close']
return pd.DataFrame(info)
# Get Adjusted Closing Prices for Pfizer between 2015-2019
pfe = get_adj_close('PFE', '1/2/2015', '31/12/2020')
pfe
# Calculate 30 Day Moving Average, Std Deviation, Upper Band and Lower Band
pfe['30 Day MA'] = pfe['Adj Close'].rolling(window=20).mean()
pfe['30 Day STD'] = pfe['Adj Close'].rolling(window=20).std()
pfe['Upper Band'] = pfe['30 Day MA'] + (pfe['30 Day STD'] * 2)
pfe['Lower Band'] = pfe['30 Day MA'] - (pfe['30 Day STD'] * 2)
# Simple 30 Day Bollinger Band for Facebook (2016-2017)
pfe[['Adj Close', '30 Day MA', 'Upper Band', 'Lower Band']].plot(figsize=(15,6))
plt.title('30 Day Bollinger Band for Pfizer')
plt.ylabel('Price (USD)')
plt.show()
# set style, empty figure and axes
plt.style.use('fivethirtyeight')
fig = plt.figure(figsize=(12,6))
ax = fig.add_subplot(111)
# Get index values for the X axis for Pfizer DataFrame
x_axis = pfe.index.get_level_values(0)
# Plot shaded 21 Day Bollinger Band for Pfizer
ax.fill_between(x_axis, pfe['Upper Band'], pfe['Lower Band'], color='grey')
# Plot Adjust Closing Price and Moving Averages
ax.plot(x_axis, pfe['Adj Close'], color='blue', lw=2)
ax.plot(x_axis, pfe['30 Day MA'], color='black', lw=2)
# Set Title & Show the Image
ax.set_title('30 Day Bollinger Band For Pfizer')
ax.set_xlabel('Date (Year/Month)')
ax.set_ylabel('Price(USD)')
ax.legend()
plt.show()
###Output
No handles with labels found to put in legend.
|
examples/analysis-metagenomics.ipynb | ###Markdown
DeepBiome: MetagenomicsAug. 30. 2020@ Youngwon ([email protected])
###Code
import os
import json
import numpy as np
import pandas as pd
import copy
import logging
import sys
import keras.backend as k
import tensorflow as tf
import matplotlib.pyplot as plt
%matplotlib inline
os.environ['CUDA_VISIBLE_DEVICES']=''
from deepbiome.deepbiome import *
if not tf.__version__.startswith('2'):
config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
k.set_session(tf.Session(config=config))
###Output
_____no_output_____
###Markdown
Pick Models
###Code
save = False
kfold=5
# kfold=20
network_model_keys = ['optimizer','lr','decay']
architecture_keys = ['weight_decay', 'weight_l1_penalty', #'weight_l2_penalty',
'tree_thrd', 'weight_initial',
'batch_normalization','drop_out']
network_training_keys = ['batch_size','epochs']
logging.basicConfig(format = '[%(name)-8s|%(levelname)s|%(filename)s:%(lineno)s] %(message)s',
level=logging.DEBUG)
log = logging.getLogger()
filenames = 'simulation_metagenomics.Rmd'
models = [
'realdata_metagenomics/t2d_deep',
'realdata_metagenomics/t2d_deep_l1',
'realdata_metagenomics/t2d_deepbiome',
]
models_aka = [
'DNN',
'DNN+$\ell_1$',
'DeepBiome',
]
num_classes = 1
model_network_info = {}
model_path_info = {}
for model_path in models:
config_data = configuration.Configurator('%s/config/path_info.cfg' % model_path, log, verbose=False)
config_data.set_config_map(config_data.get_section_map())
config_network = configuration.Configurator('%s/config/network_info.cfg' % model_path, log, verbose=False)
config_network.set_config_map(config_network.get_section_map())
model_path_info[model_path] = config_data.get_config_map()
model_network_info[model_path] = config_network.get_config_map()
if num_classes == 0: y_names = ['loss','correlation_coefficient']
elif num_classes==1: y_names = ['loss','binary_accuracy','sensitivity','specificity','gmeasure', 'auc']
else: y_names=['loss','categorical_accuracy','precision','recall','f1', 'auc']
if num_classes == 0: measure_index = np.array([0,1])
elif num_classes==1: measure_index = np.array([2,3,4,1,5])
else: measure_index = np.array([1,2,3,4,5])
###Output
_____no_output_____
###Markdown
Accuracy
###Code
results = []
# log.info('%20s & %s' % ('model', '& '.join(['%s ' % name for name in np.array(y_names)[[measure_index]]])))
print('%10s & %s \\\\\ \hline' % ('model', '& '.join(['%7s & (sd) ' % name for name in np.array(y_names)[[measure_index]]])))
# for model, aka in zip(models, models_aka):
# evaluation = np.load('%s/eval.npy' % model)
# log.info('%20s: %s' % (aka, ''.join(['%10.4f (%10.4f)'%(mean, std) for mean, std in zip(np.mean(evaluation, axis=0),np.std(evaluation, axis=0))])))
# results.append(np.vstack([np.mean(evaluation, axis=0),np.std(evaluation, axis=0)]).transpose())
for model, aka in zip(models, models_aka):
train_evaluation = np.load('%s/train_eval.npy' % model)[:,measure_index]
train_res = '&'.join(['%7.3f & %7.3f'%(mean, std) for mean, std in zip(np.nanmean(train_evaluation, axis=0),np.nanstd(train_evaluation, axis=0))])
test_evaluation = np.load('%s/test_eval.npy' % model)[:,measure_index]
test_res = '&'.join(['%7.3f & %7.3f'%(mean, std) for mean, std in zip(np.nanmean(test_evaluation, axis=0),np.nanstd(test_evaluation, axis=0))])
# log.info('%s & %s & %s \\\\' % (aka, train_res, test_res))
print('%10s & %s & %s \\\\' % (aka, test_res, train_res))
# results.append(np.vstack([np.mean(evaluation, axis=0),np.std(evaluation, axis=0)]).transpose())
for model, aka in zip(models, models_aka):
print('---------------------------------------------------------------------------------------------------------')
print('%14s test' % aka)
print('---------------------------------------------------------------------------------------------------------')
test_evaluation = np.load('%s/test_eval.npy' % model)[:,measure_index]
print(' %s' % ''.join(['%16s'%s.strip() for s in np.array(y_names)[[measure_index]]]))
print('Mean: %s' % ''.join(['%16.4f'%v for v in np.mean(test_evaluation, axis=0)]))
print('Std : %s' % ''.join(['%16.4f'%v for v in np.std(test_evaluation, axis=0)]))
###Output
---------------------------------------------------------------------------------------------------------
DNN test
---------------------------------------------------------------------------------------------------------
sensitivity specificity gmeasure binary_accuracy auc
Mean: 0.5777 0.5465 0.3914 0.5318 0.6804
Std : 0.3889 0.3842 0.1192 0.0411 0.0366
---------------------------------------------------------------------------------------------------------
DNN+$\ell_1$ test
---------------------------------------------------------------------------------------------------------
sensitivity specificity gmeasure binary_accuracy auc
Mean: 0.7035 0.4968 0.5233 0.5900 0.6808
Std : 0.2619 0.2780 0.0606 0.0402 0.0450
---------------------------------------------------------------------------------------------------------
DeepBiome test
---------------------------------------------------------------------------------------------------------
sensitivity specificity gmeasure binary_accuracy auc
Mean: 0.4312 0.7881 0.4964 0.6074 0.6071
Std : 0.2387 0.1468 0.2506 0.0691 0.1009
###Markdown
Weight estimation of DeepBiom DNN
###Code
num=0
model_path = models[num]
model_aka = models_aka[num]
config_data = configuration.Configurator('%s/config/path_info.cfg' % model_path, log, verbose=False)
config_data.set_config_map(config_data.get_section_map())
config_network = configuration.Configurator('%s/config/network_info.cfg' % model_path, log, verbose=False)
config_network.set_config_map(config_network.get_section_map())
path_info = config_data.get_config_map()
network_info = config_network.get_config_map()
path_info['data_info']['data_path'] = '/'.join(path_info['data_info']['data_path'].split('/')[2:])
path_info['data_info']['tree_info_path'] = '/'.join(path_info['data_info']['tree_info_path'].split('/')[2:])
try: path_info['data_info']['count_list_path'] = '/'.join(path_info['data_info']['count_list_path'].split('/')[2:])
except: pass
try: path_info['data_info']['count_path'] = '/'.join(path_info['data_info']['count_path'].split('/')[2:])
except: pass
path_info['data_info']['idx_path'] = '/'.join(path_info['data_info']['idx_path'].split('/')[2:])
path_info['model_info']['model_dir'] = './%s/%s'%(model_path,path_info['model_info']['model_dir'])
log.info('%22s : %s' % ('model', model_path))
log.info('%22s : %s' % ('model_aka', model_aka))
for k in architecture_keys:
log.info('%22s : %s' % (k, network_info['architecture_info'].get(k, None)))
for k in network_model_keys:
log.info('%22s : %s' % (k, network_info['model_info'].get(k, None)))
for k in network_training_keys:
log.info('%22s : %s' % (k, network_info['training_info'].get(k, None)))
weight_path = '%s/weight/%s' % (path_info['model_info']['model_dir'], 'weight_0.h5')
trained_weight_list = deepbiome_get_trained_weight(log, network_info, path_info, num_classes=1, weight_path=weight_path,
tree_level_list=['Species','Genus', 'Family', 'Order', 'Class', 'Phylum'])
log.info(len(trained_weight_list))
log.info(trained_weight_list[0].shape)
trained_weight_list[0]
###Output
_____no_output_____
###Markdown
DNN + $\ell_1$
###Code
num=1
model_path = models[num]
model_aka = models_aka[num]
config_data = configuration.Configurator('%s/config/path_info.cfg' % model_path, log, verbose=False)
config_data.set_config_map(config_data.get_section_map())
config_network = configuration.Configurator('%s/config/network_info.cfg' % model_path, log, verbose=False)
config_network.set_config_map(config_network.get_section_map())
path_info = config_data.get_config_map()
network_info = config_network.get_config_map()
path_info['data_info']['data_path'] = '/'.join(path_info['data_info']['data_path'].split('/')[2:])
path_info['data_info']['tree_info_path'] = '/'.join(path_info['data_info']['tree_info_path'].split('/')[2:])
try: path_info['data_info']['count_list_path'] = '/'.join(path_info['data_info']['count_list_path'].split('/')[2:])
except: pass
try: path_info['data_info']['count_path'] = '/'.join(path_info['data_info']['count_path'].split('/')[2:])
except: pass
path_info['data_info']['idx_path'] = '/'.join(path_info['data_info']['idx_path'].split('/')[2:])
path_info['model_info']['model_dir'] = './%s/%s'%(model_path,path_info['model_info']['model_dir'])
log.info('%22s : %s' % ('model', model_path))
log.info('%22s : %s' % ('model_aka', model_aka))
for k in architecture_keys:
log.info('%22s : %s' % (k, network_info['architecture_info'].get(k, None)))
for k in network_model_keys:
log.info('%22s : %s' % (k, network_info['model_info'].get(k, None)))
for k in network_training_keys:
log.info('%22s : %s' % (k, network_info['training_info'].get(k, None)))
weight_path = '%s/weight/%s' % (path_info['model_info']['model_dir'], 'weight_0.h5')
trained_weight_list = deepbiome_get_trained_weight(log, network_info, path_info, num_classes=1, weight_path=weight_path,
tree_level_list=['Species','Genus', 'Family', 'Order', 'Class', 'Phylum'])
log.info(len(trained_weight_list))
log.info(trained_weight_list[0].shape)
trained_weight_list[0]
###Output
_____no_output_____
###Markdown
DeepBiome
###Code
num=2
model_path = models[num]
model_aka = models_aka[num]
config_data = configuration.Configurator('%s/config/path_info.cfg' % model_path, log, verbose=False)
config_data.set_config_map(config_data.get_section_map())
config_network = configuration.Configurator('%s/config/network_info.cfg' % model_path, log, verbose=False)
config_network.set_config_map(config_network.get_section_map())
path_info = config_data.get_config_map()
network_info = config_network.get_config_map()
path_info['data_info']['data_path'] = '/'.join(path_info['data_info']['data_path'].split('/')[2:])
path_info['data_info']['tree_info_path'] = '/'.join(path_info['data_info']['tree_info_path'].split('/')[2:])
try: path_info['data_info']['count_list_path'] = '/'.join(path_info['data_info']['count_list_path'].split('/')[2:])
except: pass
try: path_info['data_info']['count_path'] = '/'.join(path_info['data_info']['count_path'].split('/')[2:])
except: pass
path_info['data_info']['idx_path'] = '/'.join(path_info['data_info']['idx_path'].split('/')[2:])
path_info['model_info']['model_dir'] = './%s/%s'%(model_path,path_info['model_info']['model_dir'])
log.info('%22s : %s' % ('model', model_path))
log.info('%22s : %s' % ('model_aka', model_aka))
for k in architecture_keys:
log.info('%22s : %s' % (k, network_info['architecture_info'].get(k, None)))
for k in network_model_keys:
log.info('%22s : %s' % (k, network_info['model_info'].get(k, None)))
for k in network_training_keys:
log.info('%22s : %s' % (k, network_info['training_info'].get(k, None)))
weight_path = '%s/weight/%s' % (path_info['model_info']['model_dir'], 'weight_0.h5')
trained_weight_list = deepbiome_get_trained_weight(log, network_info, path_info, num_classes=1, weight_path=weight_path,
tree_level_list=['Species','Genus', 'Family', 'Order', 'Class', 'Phylum'])
log.info(len(trained_weight_list))
log.info(trained_weight_list[0].shape)
trained_weight_list[0]
tot_trained_weight_list = []
for fold in range(kfold):
weight_path = '%s/weight/%s' % (path_info['model_info']['model_dir'], 'weight_%d.h5' % fold)
trained_weight_list = deepbiome_get_trained_weight(log, network_info, path_info, num_classes=1,
weight_path=weight_path,
tree_level_list=['Species','Genus', 'Family', 'Order', 'Class', 'Phylum'],
verbose=False)
tot_trained_weight_list.append(trained_weight_list)
trained_weight_list = []
for i in range(len(tot_trained_weight_list[0])):
level_weights = tot_trained_weight_list[0][i]
for j in range(1,len(tot_trained_weight_list)):
level_weights +=tot_trained_weight_list[j][i]
level_weights /= len(tot_trained_weight_list)
trained_weight_list.append(level_weights)
phylum_color = ['lemonchiffon',
'lightsteelblue',
'moccasin',
'darkseagreen',
'khaki',
'mediumturquoise',
'thistle',
'tan',
'mistyrose',
'lavender',
'teal',
'plum',
'cyan',
'orange',
'lightpink',
'violet',
'hotpink',
'deepskyblue',
# 'lightpink',
# 'violet',
# 'hotpink',
# 'deepskyblue'
]
len(phylum_color)
# img = deepbiome_draw_phylogenetic_tree(log, network_info, path_info, num_classes=1,
# file_name='%%inline', img_w=1000,
# branch_vertical_margin=10, arc_start=0, arc_span=360,
# node_name_on=True, name_fsize=50,
# tree_weight_on=True, tree_weight=trained_weight_list,
# tree_level_list=['Species', 'Genus', 'Family', 'Order', 'Class', 'Phylum'],
# weight_opacity=0.8, weight_max_radios=30,
# phylum_background_color_on=True, phylum_color=phylum_color, phylum_color_legend=True,
# show_covariates=False,
# verbose=True)
# img
img = deepbiome_draw_phylogenetic_tree(log, network_info, path_info, num_classes=1,
file_name='%%inline', img_w=1000,
branch_vertical_margin=10, arc_start=0, arc_span=360,
node_name_on=True, name_fsize=50,
tree_weight_on=True, tree_weight=trained_weight_list[4:],
tree_level_list = ['Class', 'Phylum', 'Disease'],
weight_opacity=0.8, weight_max_radios=30,
phylum_background_color_on=True, phylum_color=phylum_color, phylum_color_legend=True,
show_covariates=False,
verbose=True)
img
###Output
[root |INFO|readers.py:58] -----------------------------------------------------------------------
[root |INFO|readers.py:59] Construct Dataset
[root |INFO|readers.py:60] -----------------------------------------------------------------------
[root |INFO|readers.py:61] Load data
|
course 4 - Convolutional Neural Networks/programming assignments/Autonomous+driving+application+-+Car+detection+-+v3.ipynb | ###Markdown
Autonomous driving - Car detectionWelcome to your week 3 programming assignment. You will learn about object detection using the very powerful YOLO model. Many of the ideas in this notebook are described in the two YOLO papers: Redmon et al., 2016 (https://arxiv.org/abs/1506.02640) and Redmon and Farhadi, 2016 (https://arxiv.org/abs/1612.08242). **You will learn to**:- Use object detection on a car detection dataset- Deal with bounding boxesRun the following cell to load the packages and dependencies that are going to be useful for your journey!
###Code
import argparse
import os
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
import scipy.io
import scipy.misc
import numpy as np
import pandas as pd
import PIL
import tensorflow as tf
from keras import backend as K
from keras.layers import Input, Lambda, Conv2D
from keras.models import load_model, Model
from yolo_utils import read_classes, read_anchors, generate_colors, preprocess_image, draw_boxes, scale_boxes
from yad2k.models.keras_yolo import yolo_head, yolo_boxes_to_corners, preprocess_true_boxes, yolo_loss, yolo_body
%matplotlib inline
###Output
Using TensorFlow backend.
###Markdown
**Important Note**: As you can see, we import Keras's backend as K. This means that to use a Keras function in this notebook, you will need to write: `K.function(...)`. 1 - Problem StatementYou are working on a self-driving car. As a critical component of this project, you'd like to first build a car detection system. To collect data, you've mounted a camera to the hood (meaning the front) of the car, which takes pictures of the road ahead every few seconds while you drive around. Pictures taken from a car-mounted camera while driving around Silicon Valley. We would like to especially thank [drive.ai](https://www.drive.ai/) for providing this dataset! Drive.ai is a company building the brains of self-driving vehicles.You've gathered all these images into a folder and have labelled them by drawing bounding boxes around every car you found. Here's an example of what your bounding boxes look like. **Figure 1** : **Definition of a box** If you have 80 classes that you want YOLO to recognize, you can represent the class label $c$ either as an integer from 1 to 80, or as an 80-dimensional vector (with 80 numbers) one component of which is 1 and the rest of which are 0. The video lectures had used the latter representation; in this notebook, we will use both representations, depending on which is more convenient for a particular step. In this exercise, you will learn how YOLO works, then apply it to car detection. Because the YOLO model is very computationally expensive to train, we will load pre-trained weights for you to use. 2 - YOLO YOLO ("you only look once") is a popular algoritm because it achieves high accuracy while also being able to run in real-time. This algorithm "only looks once" at the image in the sense that it requires only one forward propagation pass through the network to make predictions. After non-max suppression, it then outputs recognized objects together with the bounding boxes. 2.1 - Model detailsFirst things to know:- The **input** is a batch of images of shape (m, 608, 608, 3)- The **output** is a list of bounding boxes along with the recognized classes. Each bounding box is represented by 6 numbers $(p_c, b_x, b_y, b_h, b_w, c)$ as explained above. If you expand $c$ into an 80-dimensional vector, each bounding box is then represented by 85 numbers. We will use 5 anchor boxes. So you can think of the YOLO architecture as the following: IMAGE (m, 608, 608, 3) -> DEEP CNN -> ENCODING (m, 19, 19, 5, 85).Lets look in greater detail at what this encoding represents. **Figure 2** : **Encoding architecture for YOLO** If the center/midpoint of an object falls into a grid cell, that grid cell is responsible for detecting that object. Since we are using 5 anchor boxes, each of the 19 x19 cells thus encodes information about 5 boxes. Anchor boxes are defined only by their width and height.For simplicity, we will flatten the last two last dimensions of the shape (19, 19, 5, 85) encoding. So the output of the Deep CNN is (19, 19, 425). **Figure 3** : **Flattening the last two last dimensions** Now, for each box (of each cell) we will compute the following elementwise product and extract a probability that the box contains a certain class. **Figure 4** : **Find the class detected by each box** Here's one way to visualize what YOLO is predicting on an image:- For each of the 19x19 grid cells, find the maximum of the probability scores (taking a max across both the 5 anchor boxes and across different classes). - Color that grid cell according to what object that grid cell considers the most likely.Doing this results in this picture: **Figure 5** : Each of the 19x19 grid cells colored according to which class has the largest predicted probability in that cell. Note that this visualization isn't a core part of the YOLO algorithm itself for making predictions; it's just a nice way of visualizing an intermediate result of the algorithm. Another way to visualize YOLO's output is to plot the bounding boxes that it outputs. Doing that results in a visualization like this: **Figure 6** : Each cell gives you 5 boxes. In total, the model predicts: 19x19x5 = 1805 boxes just by looking once at the image (one forward pass through the network)! Different colors denote different classes. In the figure above, we plotted only boxes that the model had assigned a high probability to, but this is still too many boxes. You'd like to filter the algorithm's output down to a much smaller number of detected objects. To do so, you'll use non-max suppression. Specifically, you'll carry out these steps: - Get rid of boxes with a low score (meaning, the box is not very confident about detecting a class)- Select only one box when several boxes overlap with each other and detect the same object. 2.2 - Filtering with a threshold on class scoresYou are going to apply a first filter by thresholding. You would like to get rid of any box for which the class "score" is less than a chosen threshold. The model gives you a total of 19x19x5x85 numbers, with each box described by 85 numbers. It'll be convenient to rearrange the (19,19,5,85) (or (19,19,425)) dimensional tensor into the following variables: - `box_confidence`: tensor of shape $(19 \times 19, 5, 1)$ containing $p_c$ (confidence probability that there's some object) for each of the 5 boxes predicted in each of the 19x19 cells.- `boxes`: tensor of shape $(19 \times 19, 5, 4)$ containing $(b_x, b_y, b_h, b_w)$ for each of the 5 boxes per cell.- `box_class_probs`: tensor of shape $(19 \times 19, 5, 80)$ containing the detection probabilities $(c_1, c_2, ... c_{80})$ for each of the 80 classes for each of the 5 boxes per cell.**Exercise**: Implement `yolo_filter_boxes()`.1. Compute box scores by doing the elementwise product as described in Figure 4. The following code may help you choose the right operator: ```pythona = np.random.randn(19*19, 5, 1)b = np.random.randn(19*19, 5, 80)c = a * b shape of c will be (19*19, 5, 80)```2. For each box, find: - the index of the class with the maximum box score ([Hint](https://keras.io/backend/argmax)) (Be careful with what axis you choose; consider using axis=-1) - the corresponding box score ([Hint](https://keras.io/backend/max)) (Be careful with what axis you choose; consider using axis=-1)3. Create a mask by using a threshold. As a reminder: `([0.9, 0.3, 0.4, 0.5, 0.1] < 0.4)` returns: `[False, True, False, False, True]`. The mask should be True for the boxes you want to keep. 4. Use TensorFlow to apply the mask to box_class_scores, boxes and box_classes to filter out the boxes we don't want. You should be left with just the subset of boxes you want to keep. ([Hint](https://www.tensorflow.org/api_docs/python/tf/boolean_mask))Reminder: to call a Keras function, you should use `K.function(...)`.
###Code
# GRADED FUNCTION: yolo_filter_boxes
def yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold = .6):
"""Filters YOLO boxes by thresholding on object and class confidence.
Arguments:
box_confidence -- tensor of shape (19, 19, 5, 1)
boxes -- tensor of shape (19, 19, 5, 4)
box_class_probs -- tensor of shape (19, 19, 5, 80)
threshold -- real value, if [ highest class probability score < threshold], then get rid of the corresponding box
Returns:
scores -- tensor of shape (None,), containing the class probability score for selected boxes
boxes -- tensor of shape (None, 4), containing (b_x, b_y, b_h, b_w) coordinates of selected boxes
classes -- tensor of shape (None,), containing the index of the class detected by the selected boxes
Note: "None" is here because you don't know the exact number of selected boxes, as it depends on the threshold.
For example, the actual output size of scores would be (10,) if there are 10 boxes.
"""
# Step 1: Compute box scores
### START CODE HERE ### (≈ 1 line)
box_scores = box_confidence*box_class_probs
### END CODE HERE ###
# Step 2: Find the box_classes thanks to the max box_scores, keep track of the corresponding score
### START CODE HERE ### (≈ 2 lines)
box_classes = K.argmax(box_scores, axis=-1)
box_class_scores = K.max(box_scores, axis=-1, keepdims = False)
### END CODE HERE ###
# Step 3: Create a filtering mask based on "box_class_scores" by using "threshold". The mask should have the
# same dimension as box_class_scores, and be True for the boxes you want to keep (with probability >= threshold)
### START CODE HERE ### (≈ 1 line)
filtering_mask = (box_class_scores > threshold)
### END CODE HERE ###
# Step 4: Apply the mask to scores, boxes and classes
### START CODE HERE ### (≈ 3 lines)
scores = tf.boolean_mask(box_class_scores, filtering_mask)
boxes = tf.boolean_mask(boxes, filtering_mask)
classes = tf.boolean_mask(box_classes, filtering_mask)
### END CODE HERE ###
return scores, boxes, classes
with tf.Session() as test_a:
box_confidence = tf.random_normal([19, 19, 5, 1], mean=1, stddev=4, seed = 1)
boxes = tf.random_normal([19, 19, 5, 4], mean=1, stddev=4, seed = 1)
box_class_probs = tf.random_normal([19, 19, 5, 80], mean=1, stddev=4, seed = 1)
scores, boxes, classes = yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold = 0.5)
print("scores[2] = " + str(scores[2].eval()))
print("boxes[2] = " + str(boxes[2].eval()))
print("classes[2] = " + str(classes[2].eval()))
print("scores.shape = " + str(scores.shape))
print("boxes.shape = " + str(boxes.shape))
print("classes.shape = " + str(classes.shape))
###Output
scores[2] = 10.7506
boxes[2] = [ 8.42653275 3.27136683 -0.5313437 -4.94137383]
classes[2] = 7
scores.shape = (?,)
boxes.shape = (?, 4)
classes.shape = (?,)
###Markdown
**Expected Output**: **scores[2]** 10.7506 **boxes[2]** [ 8.42653275 3.27136683 -0.5313437 -4.94137383] **classes[2]** 7 **scores.shape** (?,) **boxes.shape** (?, 4) **classes.shape** (?,) 2.3 - Non-max suppression Even after filtering by thresholding over the classes scores, you still end up a lot of overlapping boxes. A second filter for selecting the right boxes is called non-maximum suppression (NMS). **Figure 7** : In this example, the model has predicted 3 cars, but it's actually 3 predictions of the same car. Running non-max suppression (NMS) will select only the most accurate (highest probabiliy) one of the 3 boxes. Non-max suppression uses the very important function called **"Intersection over Union"**, or IoU. **Figure 8** : Definition of "Intersection over Union". **Exercise**: Implement iou(). Some hints:- In this exercise only, we define a box using its two corners (upper left and lower right): `(x1, y1, x2, y2)` rather than the midpoint and height/width.- To calculate the area of a rectangle you need to multiply its height `(y2 - y1)` by its width `(x2 - x1)`.- You'll also need to find the coordinates `(xi1, yi1, xi2, yi2)` of the intersection of two boxes. Remember that: - xi1 = maximum of the x1 coordinates of the two boxes - yi1 = maximum of the y1 coordinates of the two boxes - xi2 = minimum of the x2 coordinates of the two boxes - yi2 = minimum of the y2 coordinates of the two boxes- In order to compute the intersection area, you need to make sure the height and width of the intersection are positive, otherwise the intersection area should be zero. Use `max(height, 0)` and `max(width, 0)`.In this code, we use the convention that (0,0) is the top-left corner of an image, (1,0) is the upper-right corner, and (1,1) the lower-right corner.
###Code
# GRADED FUNCTION: iou
def iou(box1, box2):
"""Implement the intersection over union (IoU) between box1 and box2
Arguments:
box1 -- first box, list object with coordinates (x1, y1, x2, y2)
box2 -- second box, list object with coordinates (x1, y1, x2, y2)
"""
# Calculate the (y1, x1, y2, x2) coordinates of the intersection of box1 and box2. Calculate its Area.
### START CODE HERE ### (≈ 5 lines)
xi1 = np.maximum(box1[0], box2[0])
yi1 = np.maximum(box1[1], box2[1])
xi2 = np.minimum(box1[2], box2[2])
yi2 = np.minimum(box1[3], box2[3])
inter_area = 1.0*max(yi2-yi1,0)*max(xi2-xi1,0)
### END CODE HERE ###
# Calculate the Union area by using Formula: Union(A,B) = A + B - Inter(A,B)
### START CODE HERE ### (≈ 3 lines)
box1_area = 1.0*(box1[3]-box1[1])*(box1[2]-box1[0])
box2_area = 1.0*(box2[3]-box2[1])*(box2[2]-box2[0])
union_area = box1_area + box2_area - inter_area
### END CODE HERE ###
# compute the IoU
### START CODE HERE ### (≈ 1 line)
iou = inter_area/union_area
### END CODE HERE ###
return iou
box1 = (2, 1, 4, 3)
box2 = (1, 2, 3, 4)
print("iou = " + str(iou(box1, box2)))
###Output
iou = 0.142857142857
###Markdown
**Expected Output**: **iou = ** 0.14285714285714285 You are now ready to implement non-max suppression. The key steps are: 1. Select the box that has the highest score.2. Compute its overlap with all other boxes, and remove boxes that overlap it more than `iou_threshold`.3. Go back to step 1 and iterate until there's no more boxes with a lower score than the current selected box.This will remove all boxes that have a large overlap with the selected boxes. Only the "best" boxes remain.**Exercise**: Implement yolo_non_max_suppression() using TensorFlow. TensorFlow has two built-in functions that are used to implement non-max suppression (so you don't actually need to use your `iou()` implementation):- [tf.image.non_max_suppression()](https://www.tensorflow.org/api_docs/python/tf/image/non_max_suppression)- [K.gather()](https://www.tensorflow.org/api_docs/python/tf/gather)
###Code
# GRADED FUNCTION: yolo_non_max_suppression
def yolo_non_max_suppression(scores, boxes, classes, max_boxes = 10, iou_threshold = 0.5):
"""
Applies Non-max suppression (NMS) to set of boxes
Arguments:
scores -- tensor of shape (None,), output of yolo_filter_boxes()
boxes -- tensor of shape (None, 4), output of yolo_filter_boxes() that have been scaled to the image size (see later)
classes -- tensor of shape (None,), output of yolo_filter_boxes()
max_boxes -- integer, maximum number of predicted boxes you'd like
iou_threshold -- real value, "intersection over union" threshold used for NMS filtering
Returns:
scores -- tensor of shape (, None), predicted score for each box
boxes -- tensor of shape (4, None), predicted box coordinates
classes -- tensor of shape (, None), predicted class for each box
Note: The "None" dimension of the output tensors has obviously to be less than max_boxes. Note also that this
function will transpose the shapes of scores, boxes, classes. This is made for convenience.
"""
max_boxes_tensor = K.variable(max_boxes, dtype='int32') # tensor to be used in tf.image.non_max_suppression()
K.get_session().run(tf.variables_initializer([max_boxes_tensor])) # initialize variable max_boxes_tensor
# Use tf.image.non_max_suppression() to get the list of indices corresponding to boxes you keep
### START CODE HERE ### (≈ 1 line)
nms_indices = tf.image.non_max_suppression(boxes, scores, max_boxes_tensor, iou_threshold)
### END CODE HERE ###
# Use K.gather() to select only nms_indices from scores, boxes and classes
### START CODE HERE ### (≈ 3 lines)
scores = K.gather(scores, nms_indices)
boxes = K.gather(boxes, nms_indices)
classes = K.gather(classes, nms_indices)
### END CODE HERE ###
return scores, boxes, classes
with tf.Session() as test_b:
scores = tf.random_normal([54,], mean=1, stddev=4, seed = 1)
boxes = tf.random_normal([54, 4], mean=1, stddev=4, seed = 1)
classes = tf.random_normal([54,], mean=1, stddev=4, seed = 1)
scores, boxes, classes = yolo_non_max_suppression(scores, boxes, classes)
print("scores[2] = " + str(scores[2].eval()))
print("boxes[2] = " + str(boxes[2].eval()))
print("classes[2] = " + str(classes[2].eval()))
print("scores.shape = " + str(scores.eval().shape))
print("boxes.shape = " + str(boxes.eval().shape))
print("classes.shape = " + str(classes.eval().shape))
###Output
scores[2] = 6.9384
boxes[2] = [-5.299932 3.13798141 4.45036697 0.95942086]
classes[2] = -2.24527
scores.shape = (10,)
boxes.shape = (10, 4)
classes.shape = (10,)
###Markdown
**Expected Output**: **scores[2]** 6.9384 **boxes[2]** [-5.299932 3.13798141 4.45036697 0.95942086] **classes[2]** -2.24527 **scores.shape** (10,) **boxes.shape** (10, 4) **classes.shape** (10,) 2.4 Wrapping up the filteringIt's time to implement a function taking the output of the deep CNN (the 19x19x5x85 dimensional encoding) and filtering through all the boxes using the functions you've just implemented. **Exercise**: Implement `yolo_eval()` which takes the output of the YOLO encoding and filters the boxes using score threshold and NMS. There's just one last implementational detail you have to know. There're a few ways of representing boxes, such as via their corners or via their midpoint and height/width. YOLO converts between a few such formats at different times, using the following functions (which we have provided): ```pythonboxes = yolo_boxes_to_corners(box_xy, box_wh) ```which converts the yolo box coordinates (x,y,w,h) to box corners' coordinates (x1, y1, x2, y2) to fit the input of `yolo_filter_boxes````pythonboxes = scale_boxes(boxes, image_shape)```YOLO's network was trained to run on 608x608 images. If you are testing this data on a different size image--for example, the car detection dataset had 720x1280 images--this step rescales the boxes so that they can be plotted on top of the original 720x1280 image. Don't worry about these two functions; we'll show you where they need to be called.
###Code
# GRADED FUNCTION: yolo_eval
def yolo_eval(yolo_outputs, image_shape = (720., 1280.), max_boxes=10, score_threshold=.6, iou_threshold=.5):
"""
Converts the output of YOLO encoding (a lot of boxes) to your predicted boxes along with their scores, box coordinates and classes.
Arguments:
yolo_outputs -- output of the encoding model (for image_shape of (608, 608, 3)), contains 4 tensors:
box_confidence: tensor of shape (None, 19, 19, 5, 1)
box_xy: tensor of shape (None, 19, 19, 5, 2)
box_wh: tensor of shape (None, 19, 19, 5, 2)
box_class_probs: tensor of shape (None, 19, 19, 5, 80)
image_shape -- tensor of shape (2,) containing the input shape, in this notebook we use (608., 608.) (has to be float32 dtype)
max_boxes -- integer, maximum number of predicted boxes you'd like
score_threshold -- real value, if [ highest class probability score < threshold], then get rid of the corresponding box
iou_threshold -- real value, "intersection over union" threshold used for NMS filtering
Returns:
scores -- tensor of shape (None, ), predicted score for each box
boxes -- tensor of shape (None, 4), predicted box coordinates
classes -- tensor of shape (None,), predicted class for each box
"""
### START CODE HERE ###
# Retrieve outputs of the YOLO model (≈1 line)
box_confidence, box_xy, box_wh, box_class_probs = yolo_outputs
# Convert boxes to be ready for filtering functions
boxes = yolo_boxes_to_corners(box_xy, box_wh)
# Use one of the functions you've implemented to perform Score-filtering with a threshold of score_threshold (≈1 line)
scores, boxes, classes = yolo_filter_boxes(box_confidence, boxes, box_class_probs, score_threshold)
# Scale boxes back to original image shape.
boxes = scale_boxes(boxes, image_shape)
# Use one of the functions you've implemented to perform Non-max suppression with a threshold of iou_threshold (≈1 line)
scores, boxes, classes = yolo_non_max_suppression(scores, boxes, classes, max_boxes, iou_threshold)
### END CODE HERE ###
return scores, boxes, classes
with tf.Session() as test_b:
yolo_outputs = (tf.random_normal([19, 19, 5, 1], mean=1, stddev=4, seed = 1),
tf.random_normal([19, 19, 5, 2], mean=1, stddev=4, seed = 1),
tf.random_normal([19, 19, 5, 2], mean=1, stddev=4, seed = 1),
tf.random_normal([19, 19, 5, 80], mean=1, stddev=4, seed = 1))
scores, boxes, classes = yolo_eval(yolo_outputs)
print("scores[2] = " + str(scores[2].eval()))
print("boxes[2] = " + str(boxes[2].eval()))
print("classes[2] = " + str(classes[2].eval()))
print("scores.shape = " + str(scores.eval().shape))
print("boxes.shape = " + str(boxes.eval().shape))
print("classes.shape = " + str(classes.eval().shape))
###Output
scores[2] = 138.791
boxes[2] = [ 1292.32971191 -278.52166748 3876.98925781 -835.56494141]
classes[2] = 54
scores.shape = (10,)
boxes.shape = (10, 4)
classes.shape = (10,)
###Markdown
**Expected Output**: **scores[2]** 138.791 **boxes[2]** [ 1292.32971191 -278.52166748 3876.98925781 -835.56494141] **classes[2]** 54 **scores.shape** (10,) **boxes.shape** (10, 4) **classes.shape** (10,) **Summary for YOLO**:- Input image (608, 608, 3)- The input image goes through a CNN, resulting in a (19,19,5,85) dimensional output. - After flattening the last two dimensions, the output is a volume of shape (19, 19, 425): - Each cell in a 19x19 grid over the input image gives 425 numbers. - 425 = 5 x 85 because each cell contains predictions for 5 boxes, corresponding to 5 anchor boxes, as seen in lecture. - 85 = 5 + 80 where 5 is because $(p_c, b_x, b_y, b_h, b_w)$ has 5 numbers, and and 80 is the number of classes we'd like to detect- You then select only few boxes based on: - Score-thresholding: throw away boxes that have detected a class with a score less than the threshold - Non-max suppression: Compute the Intersection over Union and avoid selecting overlapping boxes- This gives you YOLO's final output. 3 - Test YOLO pretrained model on images In this part, you are going to use a pretrained model and test it on the car detection dataset. As usual, you start by **creating a session to start your graph**. Run the following cell.
###Code
sess = K.get_session()
###Output
_____no_output_____
###Markdown
3.1 - Defining classes, anchors and image shape. Recall that we are trying to detect 80 classes, and are using 5 anchor boxes. We have gathered the information about the 80 classes and 5 boxes in two files "coco_classes.txt" and "yolo_anchors.txt". Let's load these quantities into the model by running the next cell. The car detection dataset has 720x1280 images, which we've pre-processed into 608x608 images.
###Code
class_names = read_classes("model_data/coco_classes.txt")
anchors = read_anchors("model_data/yolo_anchors.txt")
image_shape = (720., 1280.)
###Output
_____no_output_____
###Markdown
3.2 - Loading a pretrained modelTraining a YOLO model takes a very long time and requires a fairly large dataset of labelled bounding boxes for a large range of target classes. You are going to load an existing pretrained Keras YOLO model stored in "yolo.h5". (These weights come from the official YOLO website, and were converted using a function written by Allan Zelener. References are at the end of this notebook. Technically, these are the parameters from the "YOLOv2" model, but we will more simply refer to it as "YOLO" in this notebook.) Run the cell below to load the model from this file.
###Code
yolo_model = load_model("model_data/yolo.h5")
###Output
/opt/conda/lib/python3.6/site-packages/keras/models.py:251: UserWarning: No training configuration found in save file: the model was *not* compiled. Compile it manually.
warnings.warn('No training configuration found in save file: '
###Markdown
This loads the weights of a trained YOLO model. Here's a summary of the layers your model contains.
###Code
yolo_model.summary()
###Output
____________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
====================================================================================================
input_1 (InputLayer) (None, 608, 608, 3) 0
____________________________________________________________________________________________________
conv2d_1 (Conv2D) (None, 608, 608, 32) 864 input_1[0][0]
____________________________________________________________________________________________________
batch_normalization_1 (BatchNorm (None, 608, 608, 32) 128 conv2d_1[0][0]
____________________________________________________________________________________________________
leaky_re_lu_1 (LeakyReLU) (None, 608, 608, 32) 0 batch_normalization_1[0][0]
____________________________________________________________________________________________________
max_pooling2d_1 (MaxPooling2D) (None, 304, 304, 32) 0 leaky_re_lu_1[0][0]
____________________________________________________________________________________________________
conv2d_2 (Conv2D) (None, 304, 304, 64) 18432 max_pooling2d_1[0][0]
____________________________________________________________________________________________________
batch_normalization_2 (BatchNorm (None, 304, 304, 64) 256 conv2d_2[0][0]
____________________________________________________________________________________________________
leaky_re_lu_2 (LeakyReLU) (None, 304, 304, 64) 0 batch_normalization_2[0][0]
____________________________________________________________________________________________________
max_pooling2d_2 (MaxPooling2D) (None, 152, 152, 64) 0 leaky_re_lu_2[0][0]
____________________________________________________________________________________________________
conv2d_3 (Conv2D) (None, 152, 152, 128) 73728 max_pooling2d_2[0][0]
____________________________________________________________________________________________________
batch_normalization_3 (BatchNorm (None, 152, 152, 128) 512 conv2d_3[0][0]
____________________________________________________________________________________________________
leaky_re_lu_3 (LeakyReLU) (None, 152, 152, 128) 0 batch_normalization_3[0][0]
____________________________________________________________________________________________________
conv2d_4 (Conv2D) (None, 152, 152, 64) 8192 leaky_re_lu_3[0][0]
____________________________________________________________________________________________________
batch_normalization_4 (BatchNorm (None, 152, 152, 64) 256 conv2d_4[0][0]
____________________________________________________________________________________________________
leaky_re_lu_4 (LeakyReLU) (None, 152, 152, 64) 0 batch_normalization_4[0][0]
____________________________________________________________________________________________________
conv2d_5 (Conv2D) (None, 152, 152, 128) 73728 leaky_re_lu_4[0][0]
____________________________________________________________________________________________________
batch_normalization_5 (BatchNorm (None, 152, 152, 128) 512 conv2d_5[0][0]
____________________________________________________________________________________________________
leaky_re_lu_5 (LeakyReLU) (None, 152, 152, 128) 0 batch_normalization_5[0][0]
____________________________________________________________________________________________________
max_pooling2d_3 (MaxPooling2D) (None, 76, 76, 128) 0 leaky_re_lu_5[0][0]
____________________________________________________________________________________________________
conv2d_6 (Conv2D) (None, 76, 76, 256) 294912 max_pooling2d_3[0][0]
____________________________________________________________________________________________________
batch_normalization_6 (BatchNorm (None, 76, 76, 256) 1024 conv2d_6[0][0]
____________________________________________________________________________________________________
leaky_re_lu_6 (LeakyReLU) (None, 76, 76, 256) 0 batch_normalization_6[0][0]
____________________________________________________________________________________________________
conv2d_7 (Conv2D) (None, 76, 76, 128) 32768 leaky_re_lu_6[0][0]
____________________________________________________________________________________________________
batch_normalization_7 (BatchNorm (None, 76, 76, 128) 512 conv2d_7[0][0]
____________________________________________________________________________________________________
leaky_re_lu_7 (LeakyReLU) (None, 76, 76, 128) 0 batch_normalization_7[0][0]
____________________________________________________________________________________________________
conv2d_8 (Conv2D) (None, 76, 76, 256) 294912 leaky_re_lu_7[0][0]
____________________________________________________________________________________________________
batch_normalization_8 (BatchNorm (None, 76, 76, 256) 1024 conv2d_8[0][0]
____________________________________________________________________________________________________
leaky_re_lu_8 (LeakyReLU) (None, 76, 76, 256) 0 batch_normalization_8[0][0]
____________________________________________________________________________________________________
max_pooling2d_4 (MaxPooling2D) (None, 38, 38, 256) 0 leaky_re_lu_8[0][0]
____________________________________________________________________________________________________
conv2d_9 (Conv2D) (None, 38, 38, 512) 1179648 max_pooling2d_4[0][0]
____________________________________________________________________________________________________
batch_normalization_9 (BatchNorm (None, 38, 38, 512) 2048 conv2d_9[0][0]
____________________________________________________________________________________________________
leaky_re_lu_9 (LeakyReLU) (None, 38, 38, 512) 0 batch_normalization_9[0][0]
____________________________________________________________________________________________________
conv2d_10 (Conv2D) (None, 38, 38, 256) 131072 leaky_re_lu_9[0][0]
____________________________________________________________________________________________________
batch_normalization_10 (BatchNor (None, 38, 38, 256) 1024 conv2d_10[0][0]
____________________________________________________________________________________________________
leaky_re_lu_10 (LeakyReLU) (None, 38, 38, 256) 0 batch_normalization_10[0][0]
____________________________________________________________________________________________________
conv2d_11 (Conv2D) (None, 38, 38, 512) 1179648 leaky_re_lu_10[0][0]
____________________________________________________________________________________________________
batch_normalization_11 (BatchNor (None, 38, 38, 512) 2048 conv2d_11[0][0]
____________________________________________________________________________________________________
leaky_re_lu_11 (LeakyReLU) (None, 38, 38, 512) 0 batch_normalization_11[0][0]
____________________________________________________________________________________________________
conv2d_12 (Conv2D) (None, 38, 38, 256) 131072 leaky_re_lu_11[0][0]
____________________________________________________________________________________________________
batch_normalization_12 (BatchNor (None, 38, 38, 256) 1024 conv2d_12[0][0]
____________________________________________________________________________________________________
leaky_re_lu_12 (LeakyReLU) (None, 38, 38, 256) 0 batch_normalization_12[0][0]
____________________________________________________________________________________________________
conv2d_13 (Conv2D) (None, 38, 38, 512) 1179648 leaky_re_lu_12[0][0]
____________________________________________________________________________________________________
batch_normalization_13 (BatchNor (None, 38, 38, 512) 2048 conv2d_13[0][0]
____________________________________________________________________________________________________
leaky_re_lu_13 (LeakyReLU) (None, 38, 38, 512) 0 batch_normalization_13[0][0]
____________________________________________________________________________________________________
max_pooling2d_5 (MaxPooling2D) (None, 19, 19, 512) 0 leaky_re_lu_13[0][0]
____________________________________________________________________________________________________
conv2d_14 (Conv2D) (None, 19, 19, 1024) 4718592 max_pooling2d_5[0][0]
____________________________________________________________________________________________________
batch_normalization_14 (BatchNor (None, 19, 19, 1024) 4096 conv2d_14[0][0]
____________________________________________________________________________________________________
leaky_re_lu_14 (LeakyReLU) (None, 19, 19, 1024) 0 batch_normalization_14[0][0]
____________________________________________________________________________________________________
conv2d_15 (Conv2D) (None, 19, 19, 512) 524288 leaky_re_lu_14[0][0]
____________________________________________________________________________________________________
batch_normalization_15 (BatchNor (None, 19, 19, 512) 2048 conv2d_15[0][0]
____________________________________________________________________________________________________
leaky_re_lu_15 (LeakyReLU) (None, 19, 19, 512) 0 batch_normalization_15[0][0]
____________________________________________________________________________________________________
conv2d_16 (Conv2D) (None, 19, 19, 1024) 4718592 leaky_re_lu_15[0][0]
____________________________________________________________________________________________________
batch_normalization_16 (BatchNor (None, 19, 19, 1024) 4096 conv2d_16[0][0]
____________________________________________________________________________________________________
leaky_re_lu_16 (LeakyReLU) (None, 19, 19, 1024) 0 batch_normalization_16[0][0]
____________________________________________________________________________________________________
conv2d_17 (Conv2D) (None, 19, 19, 512) 524288 leaky_re_lu_16[0][0]
____________________________________________________________________________________________________
batch_normalization_17 (BatchNor (None, 19, 19, 512) 2048 conv2d_17[0][0]
____________________________________________________________________________________________________
leaky_re_lu_17 (LeakyReLU) (None, 19, 19, 512) 0 batch_normalization_17[0][0]
____________________________________________________________________________________________________
conv2d_18 (Conv2D) (None, 19, 19, 1024) 4718592 leaky_re_lu_17[0][0]
____________________________________________________________________________________________________
batch_normalization_18 (BatchNor (None, 19, 19, 1024) 4096 conv2d_18[0][0]
____________________________________________________________________________________________________
leaky_re_lu_18 (LeakyReLU) (None, 19, 19, 1024) 0 batch_normalization_18[0][0]
____________________________________________________________________________________________________
conv2d_19 (Conv2D) (None, 19, 19, 1024) 9437184 leaky_re_lu_18[0][0]
____________________________________________________________________________________________________
batch_normalization_19 (BatchNor (None, 19, 19, 1024) 4096 conv2d_19[0][0]
____________________________________________________________________________________________________
conv2d_21 (Conv2D) (None, 38, 38, 64) 32768 leaky_re_lu_13[0][0]
____________________________________________________________________________________________________
leaky_re_lu_19 (LeakyReLU) (None, 19, 19, 1024) 0 batch_normalization_19[0][0]
____________________________________________________________________________________________________
batch_normalization_21 (BatchNor (None, 38, 38, 64) 256 conv2d_21[0][0]
____________________________________________________________________________________________________
conv2d_20 (Conv2D) (None, 19, 19, 1024) 9437184 leaky_re_lu_19[0][0]
____________________________________________________________________________________________________
leaky_re_lu_21 (LeakyReLU) (None, 38, 38, 64) 0 batch_normalization_21[0][0]
____________________________________________________________________________________________________
batch_normalization_20 (BatchNor (None, 19, 19, 1024) 4096 conv2d_20[0][0]
____________________________________________________________________________________________________
space_to_depth_x2 (Lambda) (None, 19, 19, 256) 0 leaky_re_lu_21[0][0]
____________________________________________________________________________________________________
leaky_re_lu_20 (LeakyReLU) (None, 19, 19, 1024) 0 batch_normalization_20[0][0]
____________________________________________________________________________________________________
concatenate_1 (Concatenate) (None, 19, 19, 1280) 0 space_to_depth_x2[0][0]
leaky_re_lu_20[0][0]
____________________________________________________________________________________________________
conv2d_22 (Conv2D) (None, 19, 19, 1024) 11796480 concatenate_1[0][0]
____________________________________________________________________________________________________
batch_normalization_22 (BatchNor (None, 19, 19, 1024) 4096 conv2d_22[0][0]
____________________________________________________________________________________________________
leaky_re_lu_22 (LeakyReLU) (None, 19, 19, 1024) 0 batch_normalization_22[0][0]
____________________________________________________________________________________________________
conv2d_23 (Conv2D) (None, 19, 19, 425) 435625 leaky_re_lu_22[0][0]
====================================================================================================
Total params: 50,983,561
Trainable params: 50,962,889
Non-trainable params: 20,672
____________________________________________________________________________________________________
###Markdown
**Note**: On some computers, you may see a warning message from Keras. Don't worry about it if you do--it is fine.**Reminder**: this model converts a preprocessed batch of input images (shape: (m, 608, 608, 3)) into a tensor of shape (m, 19, 19, 5, 85) as explained in Figure (2). 3.3 - Convert output of the model to usable bounding box tensorsThe output of `yolo_model` is a (m, 19, 19, 5, 85) tensor that needs to pass through non-trivial processing and conversion. The following cell does that for you.
###Code
yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))
###Output
_____no_output_____
###Markdown
You added `yolo_outputs` to your graph. This set of 4 tensors is ready to be used as input by your `yolo_eval` function. 3.4 - Filtering boxes`yolo_outputs` gave you all the predicted boxes of `yolo_model` in the correct format. You're now ready to perform filtering and select only the best boxes. Lets now call `yolo_eval`, which you had previously implemented, to do this.
###Code
scores, boxes, classes = yolo_eval(yolo_outputs, image_shape)
###Output
_____no_output_____
###Markdown
3.5 - Run the graph on an imageLet the fun begin. You have created a (`sess`) graph that can be summarized as follows:1. yolo_model.input is given to `yolo_model`. The model is used to compute the output yolo_model.output 2. yolo_model.output is processed by `yolo_head`. It gives you yolo_outputs 3. yolo_outputs goes through a filtering function, `yolo_eval`. It outputs your predictions: scores, boxes, classes **Exercise**: Implement predict() which runs the graph to test YOLO on an image.You will need to run a TensorFlow session, to have it compute `scores, boxes, classes`.The code below also uses the following function:```pythonimage, image_data = preprocess_image("images/" + image_file, model_image_size = (608, 608))```which outputs:- image: a python (PIL) representation of your image used for drawing boxes. You won't need to use it.- image_data: a numpy-array representing the image. This will be the input to the CNN.**Important note**: when a model uses BatchNorm (as is the case in YOLO), you will need to pass an additional placeholder in the feed_dict {K.learning_phase(): 0}.
###Code
def predict(sess, image_file):
"""
Runs the graph stored in "sess" to predict boxes for "image_file". Prints and plots the preditions.
Arguments:
sess -- your tensorflow/Keras session containing the YOLO graph
image_file -- name of an image stored in the "images" folder.
Returns:
out_scores -- tensor of shape (None, ), scores of the predicted boxes
out_boxes -- tensor of shape (None, 4), coordinates of the predicted boxes
out_classes -- tensor of shape (None, ), class index of the predicted boxes
Note: "None" actually represents the number of predicted boxes, it varies between 0 and max_boxes.
"""
# Preprocess your image
image, image_data = preprocess_image("images/" + image_file, model_image_size = (608, 608))
# Run the session with the correct tensors and choose the correct placeholders in the feed_dict.
# You'll need to use feed_dict={yolo_model.input: ... , K.learning_phase(): 0})
### START CODE HERE ### (≈ 1 line)
out_scores, out_boxes, out_classes = sess.run( (scores, boxes, classes), feed_dict={yolo_model.input:image_data, K.learning_phase(): 0})
### END CODE HERE ###
# Print predictions info
print('Found {} boxes for {}'.format(len(out_boxes), image_file))
# Generate colors for drawing bounding boxes.
colors = generate_colors(class_names)
# Draw bounding boxes on the image file
draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)
# Save the predicted bounding box on the image
image.save(os.path.join("out", image_file), quality=90)
# Display the results in the notebook
output_image = scipy.misc.imread(os.path.join("out", image_file))
imshow(output_image)
return out_scores, out_boxes, out_classes
###Output
_____no_output_____
###Markdown
Run the following cell on the "test.jpg" image to verify that your function is correct.
###Code
out_scores, out_boxes, out_classes = predict(sess, "test.jpg")
###Output
Found 7 boxes for test.jpg
car 0.60 (925, 285) (1045, 374)
car 0.66 (706, 279) (786, 350)
bus 0.67 (5, 266) (220, 407)
car 0.70 (947, 324) (1280, 705)
car 0.74 (159, 303) (346, 440)
car 0.80 (761, 282) (942, 412)
car 0.89 (367, 300) (745, 648)
|
weekly-work/week1/MNIST_for_beginners.ipynb | ###Markdown
MNIST for Beginners from https://www.tensorflow.org/versions/r0.9/tutorials/mnist/beginners/index.html The MNIST Data
###Code
# The MNIST Data are hosted on Yann LeCun's website, but made available directly by the TensorFlow team.
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
###Output
Successfully downloaded train-images-idx3-ubyte.gz 9912422 bytes.
Extracting MNIST_data/train-images-idx3-ubyte.gz
Successfully downloaded train-labels-idx1-ubyte.gz 28881 bytes.
Extracting MNIST_data/train-labels-idx1-ubyte.gz
Successfully downloaded t10k-images-idx3-ubyte.gz 1648877 bytes.
Extracting MNIST_data/t10k-images-idx3-ubyte.gz
Successfully downloaded t10k-labels-idx1-ubyte.gz 4542 bytes.
Extracting MNIST_data/t10k-labels-idx1-ubyte.gz
###Markdown
Implementing Softmax Regression
###Code
import tensorflow as tf
# Assign placeholder to x that will be filled during computation.
# We'll be flattening MNIST images into a 784-dimensional vector,
# represented as a 2-D tensor of floating-point numbers.
x = tf.placeholder(tf.float32, [None, 784])
# Assign the model parameters to Variables, which are modifiable tensors
# within a graph of interacting operations.
# Initialize as zeros.
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
# Implementation proper takes only one line.
y = tf.nn.softmax(tf.matmul(x, W) + b)
###Output
_____no_output_____
###Markdown
Training
###Code
# Assign a placeholder into which we'll be inputting correct answers:
y_ = tf.placeholder(tf.float32, [None, 10])
# Implement cross-entropy, which we'll use as the cost function:
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
# Use gradient descent to minimize cost with learning rate of 0.5.
# The beauty of TensorFlow is that we're effortlessly using backpropagation.
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
# Initialize all variables:
init = tf.initialize_all_variables()
# Launch the model within a session:
sess = tf.Session()
sess.run(init)
# Train with one thousand iterations.
# Batches of one hundred random data points are used for stochastic training (i.e., SGD)
for i in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
###Output
_____no_output_____
###Markdown
Model Evaluation
###Code
# Use argmax to examine whether the most likely predicted label matches reality:
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
# Cast Booleans to floating point numbers and take mean to assess overall accuracy:
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Run and output to screen:
print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
###Output
0.9181
|
0c_Total-vs-PS-Field.ipynb | ###Markdown
OUTDATED, the examples moved to the gallery See https://empymod.github.io/emg3d-gallery---- Total field vs primary-secondary fieldWe usually use `emg3d` for total field calculations. However, we could also use it in a primary-secondary field formulation, where we could calculate the primary field with a (semi-)analytical solution.In this notebook we use `emg3d` to calculate- Total field- Primary field- Secondary fieldand compare the total field to the primary+secondary field.One could replace the primary-field calculation by a 1D modeller such as `empymod`. You could play around with the required calculation-domain: Using a primary-secondary formulation should make it possible to restrict the required calculation domain for the scatterer a lot, therefore speeding up the calculation. However, we do not dive into that in this notebook. BackgroundTotal field is given by$$ s \mu \sigma \mathbf{\hat{E}} + \nabla \times \nabla \times \mathbf{\hat{E}} = -s\mu\mathbf{\hat{J}}_s .$$We can split the total field up into a primary field $\mathbf{\hat{E}}^p$ and a secondary field $\mathbf{\hat{E}}^s$,$$ \mathbf{\hat{E}} = \mathbf{\hat{E}}^p + \mathbf{\hat{E}}^s,$$where we also have to split our conductivity model into$$\sigma = \sigma^p + \Delta\sigma.$$The primary field could just be the direct field, or the direct field plus the air layer, or an entire 1D background, something that can be calculated (semi-)analytically. The secondary field is everything that is not included in the primary field.The primary field is then given by$$ s \mu \sigma^p \mathbf{\hat{E}}^p + \nabla \times \nabla \times \mathbf{\hat{E}}^p = -s\mu\mathbf{\hat{J}}_s ,$$and the secondary field can be calculated using the primary field as source,$$ s \mu \sigma \mathbf{\hat{E}}^s + \nabla \times \nabla \times \mathbf{\hat{E}}^s = -s\mu\Delta\sigma\mathbf{\hat{E}}^p .$$
###Code
import emg3d
import discretize
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
# Style adjustments
%matplotlib notebook
plt.style.use('ggplot')
###Output
_____no_output_____
###Markdown
Survey
###Code
src = [0, 0, -950, 0, 0] # x-directed source at the origin, 50 m above seafloor
off = np.arange(5, 81)*100 # Offsets
rec = [off, off*0, -1000] # In-line receivers on the seafloor
res = [1e10, 0.3, 1] # Resistivities (Hz): [air, seawater, background]
freq = 1.0 # Frequency (Ohm.m)
###Output
_____no_output_____
###Markdown
MeshWe create quite a coarse grid (100 m minimum cell width), to have reasonable fast calculation times.Also note that the mesh here includes a large boundary because of the air layer. If you use a semi-analytical solution for the 1D background you could restrict that domain a lot.
###Code
meshinp = {'freq': freq, 'min_width': 100, 'verb': 0}
xx, x0 = emg3d.utils.get_hx_h0(
res=[res[1], 100.], fixed=src[0], domain=[-100, 8100], **meshinp)
yy, y0 = emg3d.utils.get_hx_h0(
res=[res[1], 100.], fixed=src[1], domain=[-500, 500], **meshinp)
zz, z0 = emg3d.utils.get_hx_h0(
res=[res[1], res[2], 100.], domain=[-2500, 0], fixed=[-1000, 0, -2000], **meshinp)
grid = discretize.TensorMesh([xx, yy, zz], x0=np.array([x0, y0, z0]))
grid
###Output
_____no_output_____
###Markdown
Create model
###Code
# Layered_background
res_x = np.ones(grid.nC)*res[0] # Air resistivity
res_x[grid.gridCC[:, 2] < 0] = res[1] # Water resistivity
res_x[grid.gridCC[:, 2] < -1000] = res[2] # Background resistivity
# Background model
model_pf = emg3d.utils.Model(grid, res_x.copy())
# Include the target
xx = (grid.gridCC[:, 0] >= 0) & (grid.gridCC[:, 0] <= 6000)
yy = abs(grid.gridCC[:, 1]) <= 500
zz = (grid.gridCC[:, 2] > -2500)*(grid.gridCC[:, 2] < -2000)
res_x[xx*yy*zz] = 100. # Target resistivity
# Create target model
model = emg3d.utils.Model(grid, res_x)
# Plot a slice
grid.plot_3d_slicer(model.res_x, zslice=-2250, clim=[0.3, 200],
xlim=(-1000, 8000), ylim=(-4000, 4000), zlim=(-3000, 500),
pcolorOpts={'norm': LogNorm()}
)
###Output
_____no_output_____
###Markdown
Calculate total field with `emg3d`
###Code
modparams = {'verb': -1, 'sslsolver': True, 'semicoarsening': True, 'linerelaxation': True}
sfield_tf = emg3d.utils.get_source_field(grid, src, freq, strength=0)
em3_tf = emg3d.solver.solver(grid, model, sfield_tf, **modparams)
###Output
:: emg3d :: 9.0e-07; 1(4); 0:00:17; CONVERGED
###Markdown
Calculate primary field (1D background) with `emg3d`Here we use `emg3d` to calculate the primary field. This could be replaced by a (semi-)analytical solution.
###Code
sfield_pf = emg3d.utils.get_source_field(grid, src, freq, strength=0)
em3_pf = emg3d.solver.solver(grid, model_pf, sfield_pf, **modparams)
###Output
:: emg3d :: 8.5e-07; 1(4); 0:00:17; CONVERGED
###Markdown
Calculate secondary field (scatterer) with `emg3d` Define the secondary source
###Code
# Get the difference of conductivity as volume-average values
dsigma = grid.vol.reshape(grid.vnC, order='F')*(1/model.res_x-1/model_pf.res_x)
# Here we use the primary field calculated with emg3d. This could be done
# with a 1D modeller such as empymod instead.
fx = em3_pf.fx.copy()
fy = em3_pf.fy.copy()
fz = em3_pf.fz.copy()
# Average delta sigma to the corresponding edges
fx[:, 1:-1, 1:-1] *= 0.25*(dsigma[:, :-1, :-1] + dsigma[:, 1:, :-1] + dsigma[:, :-1, 1:] + dsigma[:, 1:, 1:])
fy[1:-1, :, 1:-1] *= 0.25*(dsigma[:-1, :, :-1] + dsigma[1:, :, :-1] + dsigma[:-1, :, 1:] + dsigma[1:, :, 1:])
fz[1:-1, 1:-1, :] *= 0.25*(dsigma[:-1, :-1, :] + dsigma[1:, :-1, :] + dsigma[:-1, 1:, :] + dsigma[1:, 1:, :])
# Create field instance iwu dsigma E
sfield_sf = sfield_pf.smu0*emg3d.utils.Field(fx, fy, fz, freq=freq)
sfield_sf.ensure_pec
###Output
_____no_output_____
###Markdown
Plot the secondary sourceOur secondary source is the entire target, the scatterer. Here we look at the $E_x$ secondary source field. But note that the secondary source has all three components $E_x$, $E_y$, and $E_z$, even though our primary source was purely $x$-directed. (Change `fx` to `fy` or `fz` in the command below, and simultaneously `Ex` to `Ey` or `Ez`, to show the other source fields.)
###Code
grid.plot_3d_slicer(sfield_sf.fx.ravel('F'), view='abs', vType='Ex',
zslice=-2250, clim=[1e-17, 1e-9],
xlim=(-1000, 8000), ylim=(-4000, 4000), zlim=(-3000, 500),
pcolorOpts={'norm': LogNorm()}
)
###Output
_____no_output_____
###Markdown
Calculate the secondary source
###Code
em3_sf = emg3d.solver.solver(grid, model, sfield_sf, **modparams)
###Output
:: emg3d :: 9.9e-07; 1(6); 0:00:28; CONVERGED
###Markdown
Plot result
###Code
# E = E^p + E^s
em3_ps = em3_pf + em3_sf
# Get the responses at receiver locations
em3_pf_rec = emg3d.utils.get_receiver(grid, em3_pf.fx, (rec[0], rec[1], rec[2]))
em3_tf_rec = emg3d.utils.get_receiver(grid, em3_tf.fx, (rec[0], rec[1], rec[2]))
em3_sf_rec = emg3d.utils.get_receiver(grid, em3_sf.fx, (rec[0], rec[1], rec[2]))
em3_ps_rec = emg3d.utils.get_receiver(grid, em3_ps.fx, (rec[0], rec[1], rec[2]))
plt.figure(figsize=(9, 5))
ax1 = plt.subplot(121)
plt.title('|Real part|')
plt.plot(off/1e3, abs(em3_pf_rec.real), 'k', label='Primary Field (1D Background)')
plt.plot(off/1e3, abs(em3_sf_rec.real), '.4', ls='--', label='Secondary Field (Scatterer)')
plt.plot(off/1e3, abs(em3_ps_rec.real))
plt.plot(off[::2]/1e3, abs(em3_tf_rec[::2].real), '.')
plt.plot(off/1e3, abs(em3_ps_rec.real-em3_tf_rec.real))
plt.xlabel('Offset (km)')
plt.ylabel('$E_x$ (V/m)')
plt.yscale('log')
plt.legend()
ax2 = plt.subplot(122, sharey=ax1)
plt.title('|Imaginary part|')
plt.plot(off/1e3, abs(em3_pf_rec.imag), 'k')
plt.plot(off/1e3, abs(em3_sf_rec.imag), '.4', ls='--')
plt.plot(off/1e3, abs(em3_ps_rec.imag), label='P/S Field')
plt.plot(off[::2]/1e3, abs(em3_tf_rec[::2].imag), '.', label='Total Field')
plt.plot(off/1e3, abs(em3_ps_rec.imag-em3_tf_rec.imag), label='$\Delta$|P/S-Total|')
plt.xlabel('Offset (km)')
plt.ylabel('$E_x$ (V/m)')
plt.yscale('log')
ax2.yaxis.tick_right()
ax2.yaxis.set_label_position("right")
plt.legend()
plt.tight_layout()
plt.show()
emg3d.Report([discretize, ])
###Output
_____no_output_____ |
notebooks/assignments/assignment-5-rho-t-test-chi-square.ipynb | ###Markdown
OverviewIn this notebook you will be doing a t-test on rho, the population correlation coefficient, and a chi-square test for independence. A t-test on rho allows you to test the null hypothesis that a correlation coefficient between a predictor and an outcome variable at the populaton level is zero. The alternative hypothesis is that the population correlation coefficient is not zero. This type of test is suitable for situations where you are looking for a correlation between two variables and the data can be summarized by a scatterplot and fit with a line-of-best-fit. In order to do this a t-test on rho, the following must be true:* The data has interval or ratio measurement scales.* The residuals between the observed outcomes and the line-of-best fit must be normally distributed.Chi-square tests are non-parametric test for differences between frequency distributions of categorical/nominal data. This means it can be employed like an one-sample T-test, an independent samples T-test, or a Mann-Whitney test in situations where your data is nominal/categorical instead of ordinal, interval, or ratio. Chi-square goodness-of-fit tests allow you to test the null hypothesis that a the frequency distribution of observed values of nominal data are no different than a known or expected distribution. For example, let's say you surveyed 99 people asking their political affiliation, Democrat, Republican, or Independent. Your null hypotheis is that all three options are equally likely. You would then expect the frequency distribution to be 33 Democrats, 33 Republicans, and 33 Independents. The alternative hypothesis would be that the three party affiliations are not equally likely. Chi-square tests for homogeneity or independence allo you to test the null hypothesis that the frequency distribution of observed values of nominal data is independent of some other nominal dimansion of your data. For example, let's say that in your political affilation survey you also record the gender of those surveyed. Your null hypotheis could be that the frequency distribution of politial affiliation is independent of gender. The alternative would be that the distribution is dependend on gender, that is, there is a significant difference between the distribution of political affiliation between the genders. In order to do a chi-square test you should ensure the following:* The data set is sufficiently large that the expected number of indivduals assigned to each 'class' of a categorical variable is greater than or equal to 5 at least 80% or the time. Run the following cell (shift-enter) to load needed python packages and modules.
###Code
# RUN BUT DON'T MODIFY THIS CELL
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.stats as stats
###Output
_____no_output_____
###Markdown
Data for 70+ Cereals * Load cereal.csv into a pandas dataframe.* If this was in your library you would use the path `.../library/filename.csv`.* Use the `.head()` method to print out the first 7 rows of the dataframe.* Get the `.shape` (no parentheses) property to see how many rows and columns are in the dataset.**Source:** dowloaded 12/18/2017 from the Kaggle public dataset repository. Credited to Chris Crawford**Description:** Nominal/categorical and ratio/interval data for 70+ different cereals.
###Code
# RUN BUT DON'T MODIFY THIS CELL
url = "https://raw.githubusercontent.com/prof-groff/evns462/master/data/cereal.csv"
cereal = pd.read_csv(url)
print(cereal.head())
print("shape: ", cereal.shape)
###Output
name mfr type calories protein fat sodium fiber \
0 100% Bran N C 70 4 1 130 10.0
1 100% Natural Bran Q C 120 3 5 15 2.0
2 All-Bran K C 70 4 1 260 9.0
3 All-Bran with Extra Fiber K C 50 4 0 140 14.0
4 Almond Delight R C 110 2 2 200 1.0
carbo sugars potass vitamins shelf weight cups rating
0 5.0 6 280 25 3 1.0 0.33 68.402973
1 8.0 8 135 0 3 1.0 1.00 33.983679
2 7.0 5 320 25 3 1.0 0.33 59.425505
3 8.0 0 330 25 3 1.0 0.50 93.704912
4 14.0 8 -1 25 3 1.0 0.75 34.384843
shape: (77, 16)
###Markdown
Is fat content a predictor of calories?In the following cell:* Extract fat and calories data columns and store as x and y, respectively.* Make a scatter plot of calories (y-axis) as a function of fat (x-axis).* Fit the data with a line of best fit. * Do a T-test on rho to test the following null hypothesis.H0: ρ = 0 at α = 0.05HA: ρ not equal to 0.
###Code
# import tools to build linear models and find a line-of-best-fit
from sklearn import linear_model
# create a linear regression object to use to build the linear model
regr = linear_model.LinearRegression()
# pull out the 'fat' column and store is as a variable called x
x = cereal['fat']
x = x.values.reshape(-1,1) # this is needed to reshape x so it works with LinearRegression()
# TODO: UNCOMMENT AND COMPLETE THE FOLLOWING LINES OF CODE TO PULL OUT THE 'CALORIES' COLUMN
# AND STORE IT AS Y THEN, RESHAPE Y.
# y =
# y =
# TODO: UNCOMMENT AND COMPLETE THE FOLLOWING LINE OF CODE TO USE THE regr.fit() METHOD TO FIT A
# LINE-OF-BEST-FIT TO THE X AND Y DATA.
# regr.fit()
# TO DO: UNCOMMENT AND COMPLETE THE FOLLOWING LINES OF CODE THAT TAKE TWO X VALUES, 0 AND 5, AND FINDS
# THE CORRESPONDING Y VALUES ACCORDING TO THE EQUAITON FOR THE LINE OF BEST FIT Y=MX+B USING
# THE regr.predict() METHOD. FINDING THESE TWO POINTS WILL ALLOW US TO DRAW THE LINE-OF-BEST-FIT BECAUSE
# ALL YOU NEED ARE TWO POINTS, (X1,Y1) AND (X2,Y2), TO DRAW A LINE.
# x_fit =
# y_fit =
# TO DO: UNCOMMENT AND COMPLETE THE FOLLOWING LINES OF CODE TO MAKE A SCATTER PLOT OF THE
# DATA AND SHOW THE LINE-OF-BEST-FIT TOO.
# plt.scatter()
# plt.xlabel()
# plt.ylabel()
# plt.plot()
# plt.show()
# TO DO: UNCOMMENT AND COMPLETE THE FOLLOWING LINES OF CODE TO CALCULATE THE COEFFICIENT OF
# DETERMINATION (r^2) USING THE regr.score() METHOD AND THE CORRELATION COEFFICIENT (r) BY
# TAKING THE SQUARE ROOT OF THIS RESULT.
# PRINT BOTH r^2 AND r TO THE CONSOLE USING print() COMMANDS
# rsqr =
# r =
# TO DO: UNCOMMENT AND COMPLETE THE FOLLOWING TO USING THE BUILT IN PYTHONG FUNCTION
# stats.pearsonr() TO CALCULATE r AND ITS CORRESPONDING p-value. PRINT BOTH OF THESE VALUES
# TO THE CONSOLE USING A print() COMMAND
# r, pvalue =
###Output
_____no_output_____
###Markdown
Questions:Is the null hypothsis accepted or rejected, that is, is the population correlation coefficient ρ statistically equal to zero or is it non-zero meaning there is a coorelation between the x and y data? Do certain brands of cereal get better shelf placement?Let's take a look at Kelloggs and General Mills cereals and see if one or the other gets better shelf placement. * H0: Shelf placement is independent of cereal brand.* HA: Shelf placement is not independent of cereal brand.
###Code
# The following line of code uses the .isin() method to pull out all rows of the cereal dataframe
# that have either K or G in the 'mfr' column
k_gm = cereal[cereal['mfr'].isin(('K','G'))]
# TO DO: UNCOMMENT AND COMPLETE THE FOLLOWING TWO LINES OF CODE THAT ACCOMPLISH EXACTLY THE SAME
# END AS THE ABOVE LINE OF CODE BUT USES THE .groupby AND .get_group() METHODS INSTEAD.
# cereal_by_mfr = cereal.groupby()
# K_GM = pd.concat([cereal_by_mfr.get_group(), cereal_by_mfr.get_group()])
# TO DO: UNCOMMENT AND COMPLETE THE FOLLOWING LINE OF CODE THAT USES THE pd.crosstab() FUNCTION
# TO CONTRUCT A CONTINGENCY TABLE WITH A ROW FOR EACH MANUFACTUERER (K AND G) AND A COLUMN FOR EACH
# SHELF PLACEMENT (1, 2, AND 3). THEN USE THE PRINT COMMAND TO PRINT THIS TABLE TO THE CONSOLE
# table =
# TO DO: UNCOMMENT AND COMPLETE THE FOLLOWING LINE OF CODE THAT USES THE stats.chi2_contingency()
# FUNCTION TO TEST THE NULL HYPOTHESIS ABOVE. PRINT THE TEST STATISTICS AND THE COORESPONDING P-VALUE
# TO THE CONSOLE
# statistic, pvalue, dof, exp =
###Output
_____no_output_____ |
IrisDataset_Supervised_Classification.ipynb | ###Markdown
---------------------------------------------------------------------- Supervised Learning - Logistic Regression ---------------------------------------------------------------------- Building a Student Performace Prediction System Classification vs. RegressionThe aim of this project is to predict how likely a student is to pass. Which type of supervised learning problem is this, classification or regression? Why?Answer:This project is a classification supervised learning problem because the variable to predict, i.e. if a student graduates or fails to graduate, is categorical. On this case this a dichotomous categorical variable where the only two possible values are "pass" or "fail". Overview:1.Read the problem statement.2.Get the dataset.3.Explore the dataset.4.Pre-processing of dataset.5.Transform the dataset for building machine learning model.6.Split data into train, test set.7.Build Model.8.Apply the model.9.Evaluate the model.10.Provide insights. Problem Statement Using Logistic Regression **predict the performance of student**. The classification goal is to predict whether the student will pass or fail. Dataset This data approach student achievement in secondary education of two Portuguese schools. The data attributes include student grades, demographic, social and school related features) and it was collected by using school reports and questionnaires. Two datasets are provided regarding the performance in Mathematics.**Source:** https://archive.ics.uci.edu/ml/datasets/Student+Performance Exploring the Data - Reading the dataset file using pandas. Take care about the delimiter.
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.metrics import mean_squared_error, r2_score,accuracy_score, recall_score, precision_score,f1_score,auc, confusion_matrix
# Read dataset using pandas
df = pd.read_csv("students-data.csv", delimiter=";")
df.head(5)
df.shape
df.dtypes
###Output
_____no_output_____
###Markdown
Q1. Drop missing values*Set the index name of the dataframe to **"number"**. Check sample of data to drop if any missing values are there.**Use .dropna() function to drop the NAs* Answer:
###Code
df.isnull().sum()
###Output
_____no_output_____
###Markdown
InsightsThere are no missing values in the dataset Q2. Transform Data*Print all the attribute names which are not numerical.***Hint:** check **select_dtypes()** and its **include** and **exclude** parameters.** Answer:
###Code
df.select_dtypes(include=['object'],exclude=['int64']).head(5)
###Output
_____no_output_____
###Markdown
Q3. Drop variables with less variance*Find the variance of each numerical independent variable and drop whose variance is less than 1. Use .var function to check the variance* Answer:
###Code
df.var()
###Output
_____no_output_____
###Markdown
Variables with less variance are almost same for all the records. Hence, they do not contribute much for classification. Insights The following features have variance less than 1 1. traveltime 2. studytime 3. failures 4. famrel 5. Dalc The feaure freetime is having variance almost approximately close to 1.So not considering that feature for drop
###Code
data = df.drop(['traveltime','studytime','failures','famrel','Dalc'],axis=1)
data.head(5)
###Output
_____no_output_____
###Markdown
Q4. Encode all categorical variables to numericalTake the list of categorical attributes(from the above result) and convert them into neumerical variables. After that, print the head of dataframe and check the values.**Hint:** check **sklearn LabelEncoder()** Answer:
###Code
from sklearn.preprocessing import LabelEncoder
categorical_feature_mask = data.dtypes == object
categorical_cols = data.columns[categorical_feature_mask].tolist()
le = LabelEncoder()
data[categorical_cols] = data[categorical_cols].apply(lambda col : le.fit_transform(col))
data.head(5)
###Output
_____no_output_____
###Markdown
Q5. Convert the continuous values of grades into classes *Consider the values in G1, G2 and G3 with >= 10 as pass(1) and < 10 as fail(0) and encode them into binary values. Print head of dataframe to check the values.* Answer:
###Code
filter1 = data['G1'] >= 10
data['G1'].where(cond=filter1,other=0, inplace=True)
filter1 = data['G2'] >= 10
data['G2'].where(cond=filter1, other=0, inplace=True)
filter1 = data['G3'] >= 10
data['G3'].where(cond=filter1, other=0, inplace=True)
filter1 = data['G1'] == 0
data['G1'].where(filter1, other=1, inplace=True)
filter1 = data['G2'] == 0
data['G2'].where(filter1, other=1, inplace=True)
filter1 = data['G3'] == 0
data['G3'].where(filter1, other=1, inplace=True)
data[['G1','G2','G3']].head(10)
###Output
_____no_output_____
###Markdown
Q6. Consider G3 is the target attribute and remaining all attributes as features to predict G3. Now, separate features and target into separate dataframes and name them X and y respectively. Answer:
###Code
X = data.drop('G3',axis=1)
y = data['G3']
X.head(5)
y.head(5)
###Output
_____no_output_____
###Markdown
Visualization Q7. Plot G2 and G3 and give your understanding on relation between both variables. Hint: Use pd.crosstab(sd.G2,sd.G3).plot(kind='bar') Answer:
###Code
pd.crosstab(data['G2'],data['G3'],margins=True)
pd.crosstab(data['G2'], data['G3']).plot(kind='bar')
l = plt.legend()
l.get_texts()[0].set_text('Fail')
l.get_texts()[1].set_text('Pass')
plt.show()
###Output
_____no_output_____
###Markdown
Insights 1. Students who failed in G2 are passed in G3 and the number is equal to 242. Students who failed in G3 are passed in G2 and this number is equal to 8.3. From the above two statements, students who are underperforming in G2 are performing well in G3 and this number is relatively large when compared to students are underperforming in G3 but performing well in G2. Q8. Plot the number of students in each school and number of students with different ages in separate plots. Hint: use seaborn sns.countplot() Answer:
###Code
sns.countplot(data['school'])
plt.show()
sns.countplot(data['age'])
plt.show()
###Output
_____no_output_____
###Markdown
Insights 1. There are more number of students in School '0' compared to School '1'2. For most of the students, the ages are more concentrated in the interval [15,16,17,18]. Only few students have age of 19 Q9. Training and testing data split *So far, you have converted all categorical features into numeric values. Now, split the data into training and test sets with training size of 300 records. Print the number of train and test records.***Hint:** check **train_test_split()** from **sklearn** Answer:
###Code
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=300, random_state=8)
X_train.shape
y_train.shape
X_test.shape
y_test.shape
###Output
_____no_output_____
###Markdown
Q10. - Model Implementation and Testing the Accuracy*Build a **LogisticRegression** classifier using **fit()** functions in sklearn. * You need to import both Logistic regression and accuracy score from sklearn* Answer:
###Code
model = LogisticRegression()
model.fit(X_train, y_train)
y_pred_train = model.predict(X_train)
model.score(X_train, y_train)
y_pred_test = model.predict(X_test)
model.score(X_test, y_test)
print(confusion_matrix(y_pred_test, y_test))
precision_score(y_test, y_pred_test, average='weighted')
recall_score(y_test, y_pred_test, average='weighted')
accuracy_score(y_test, y_pred_test)
f1_score(y_test, y_pred_test)
###Output
_____no_output_____
###Markdown
Insights From the metrics 1. The Logistic Regression model's accuracy on the training dataset is 92% where as on the test dataset is 94% (round to nearest integer)2. Model's recall is 93% when compared to precision 94%. 3. Model is performing well on the test dataset compared to train dataset. ---------------------------------------------------------------------- Supervised Leaning - Naive Bayes with Iris Data ----------------------------------------------------------------------
###Code
from sklearn.naive_bayes import GaussianNB
from scipy.stats import zscore
###Output
_____no_output_____
###Markdown
Import Iris.csv
###Code
# Load using input file
iris=pd.read_csv("Iris.csv")
iris.head(5)
###Output
_____no_output_____
###Markdown
Treat NaN's/ Null values found
###Code
iris.isnull().sum()
iris=iris.fillna(0)
iris.isnull().sum()
###Output
_____no_output_____
###Markdown
Slice Iris data set for Independent variables and dependent variables Please note 'Species' is your dependent variable, name it y and independent set data as X
###Code
X = iris.drop('Species',axis=1)
y = iris['Species']
X.head(5)
y
###Output
_____no_output_____
###Markdown
Q1. Find the distribution of target variable (Species) and, Plot the distribution of target variable using histogram
###Code
sns.countplot(iris['Species'])
plt.show()
df = iris.drop('Id', axis=1)
df.hist(by='Species')
plt.show()
# Drop Id variable from data
X = X.drop('Id',axis=1)
X.head(5)
###Output
_____no_output_____
###Markdown
Q2. Find Correlation among all variables and give your insights
###Code
corr_matrix = X.corr()
corr_matrix
sns.heatmap(corr_matrix, annot = True)
plt.show()
###Output
_____no_output_____
###Markdown
Insights From Correlation Matrix 1. SepalLength has high strong postive correlation with PetalLength2. SepalLength has strong postive correlation with PetalWidth3. SepalWidth has low negative correlation with PetalLength and PetalWidth. Split data in Training and test set in 80:20.
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state = 8)
###Output
_____no_output_____
###Markdown
Q3. Feature Scaling on X_Train and X_Test
###Code
# Use StandardScaler or similar methods
X_train = zscore(X_train)
X_test = zscore(X_test)
###Output
_____no_output_____
###Markdown
Q4. Train and Fit NaiveBayes Model
###Code
model = GaussianNB()
model.fit(X_train, y_train)
y_pred_train = model.predict(X_train)
model.score(X_train, y_train)
y_pred_test = model.predict(X_test)
model.score(X_test, y_test)
###Output
_____no_output_____
###Markdown
Q5. Print Accuracy and Confusion Matrix and Conclude your findings
###Code
print(confusion_matrix(y_test, y_pred_test))
accuracy_score(y_test, y_pred_test)
precision_score(y_test, y_pred_test, average='weighted')
recall_score(y_test, y_pred_test, average='weighted')
###Output
_____no_output_____ |
agol-to-agol-notebooks/Configure_Target_Users.ipynb | ###Markdown
Configure Target UsersIn this notebook, we add users to the delivery org and invite them to groups. Users will be able to find the invites to the new groups on the 'Groups' tab or in their notifications. Getting Started1. Find the group IDs for the featured group and for any groups in the project org you will be adding delivery org users to.2. Set up the csv file of users. This csv should have columns for email, firstname, lastname, username, password, role, level, and groups. The groups column should contain group IDs that are separated by commas. There is a sample user csv in the Sample Config folder, and there is more information about formatting [`here`](https://learn.arcgis.com/en/projects/set-up-an-arcgis-enterprise-portal/lessons/add-members-to-the-organization.htm) in steps 4 & 5. Make sure that the User Roles you choose are able to be added to groups in other organizations! The default 'Viewer' role is not able to be added to groups outside of the organization.3. Change the variables in cell 1. Example variables: - TARGET_URL = "https://esrienergy.maps.arcgis.com" - TARGET_USERNAME = "portaladmin" - GROUP_IDS = ["a7903db4086641b98570bce5856a6364", "4d7ff4f81d6340428ef290b7de801204"] - FEATURED_GROUP_ID = "4f4fcac023dc430294cea226231ab448"4. Run the notebook cell by cell. - In code cell 4, you will need to enter the passwords for the delivery organizations when prompted - In code cell 6, the script will print out the users that were added to the delivery org and the groups they were added to5. At the end, your delivery org should have all the users in the csv file added and each should get an invitation in their Organization Account for each group listed for them. Once they accept it, they will be shown in the group members in the Project Organization as well. The delivery org will also be customized.
###Code
# Global Variables Set by User **change these values before running script**
# URL of the portal that will be customized and have users added
TARGET_URL = "https://pdo-scripts.maps.arcgis.com"
# Delivery username for log-on (needs to be admin)
TARGET_USERNAME = "target_username"
# project groups to share with *ALL* delivery users
GROUP_IDS = ["5e831faa6e574e46ae2e019d0e897a4f"]
# featured group to display on delivery home page #aec-test-group
FEATURED_GROUP_ID = "5e831faa6e574e46ae2e019d0e897a4f"
# import libraries
from arcgis.gis import GIS
from arcgis.gis import Group
from arcgis.gis import UserManager
import os
import csv
# get UI component filepaths
USERS = os.path.join(FOLDER, USERS_FILENAME)
USER_FIELDS = ["Email", "First Name", "Last Name", "Username", "Password", "Role", "User Type", "groups"]
# connect to delivery org
print("Connecting to {} with username {}...".format(TARGET_URL, TARGET_USERNAME))
delivery = GIS(TARGET_URL, TARGET_USERNAME)
# Creation of functions to be used later
def optional_args_handler(key, user, default=None):
"""Sets optional user creation arguments appropriately"""
try:
var = None
if key in user:
var = user[key]
else:
if default:
var = default
return var
except Exception as e:
print("Optional args handler failed with args {}, {}, and {}: {}".format(key, user, default, e))
def add_user(user, target, groups=[]):
"""Add user to the gis and to specified groups
* Abstraction for creating from dict such as with csv
args:
user -- a dictionary containing user fields, see fields:
http://esri.github.io/arcgis-python-api/apidoc/html/arcgis.gis.toc.html#arcgis.gis.UserManager.create
target -- the delivery org, where users are added
groups -- (optional) destination groups, compliments those in dict
"""
try:
print("INFO: Creating user {}".format(user["Username"]))
role = optional_args_handler('Role', user, 'viewer') # default is viewer
user_type = optional_args_handler('User Type', user, 'viewer') # default is viewer
# create array of groups for user
user_groups = []
for grp in groups:
user_groups.append(grp)
group_str = user.pop('groups', None)
if group_str:
group_list = group_str.split(",")
for g in group_list:
try:
group_search = Group(target, g)
if group_search.title:
user_groups.append(group_search)
except Exception as e:
print("ERR: Could not find group id {}".format(g))
continue
print("User will be invited to join: {}".format([g.title for g in user_groups]))
result = target.users.create(username=user['Username'], password=user['Password'], firstname=user['First Name'],
lastname=user['Last Name'], email=user['Email'], role=role, user_type=user_type)
# create users returns None if it was unsuccessful
if not result:
print("Did not create user: Check username {}".format(user['Username']))
return
# Invite user to groups
print("Inviting to groups")
for g in user_groups:
try:
res = target.groups.get(g.groupid).invite_users([user['Username']])
if res == True:
print("Invited user {} to group {}".format(user['Username'], g.title))
else: # res == False
print("Failed to invite user {} to group {}: {}".format(user['Username'], g.title, res))
except Exception as e:
print("ERR: Could not add user {} to group {}: {}".format(user['Username'], g, e))
continue
return result
except Exception as e:
print("ERR: Could not create User {}: Add User encountered error {}".format(user['Username'], e))
def add_users_csv(csv_file, target, groups=None):
"""Add users from csv to gis
args:
csv_file -- path to csv with users to create
target -- delivery organization where users are added
groups -- (optional) destination groups, compliments those in csv (default [])
"""
try:
results = []
with open(csv_file, 'r') as users_csv:
users = csv.DictReader(users_csv, fieldnames=USER_FIELDS)
next(users, None) # skip the header
for user in users:
result = add_user(user, target, groups=groups)
results.append(result)
return results
except Exception as e:
print("Add users csv failed with args {}, {}, and {}: {}".format(csv_file, target, groups, e))
# fetch groups where *ALL* users will be added
share_groups = tuple(Group(delivery, g) for g in GROUP_IDS)
print(share_groups)
# add users to the delivery org and invite them to groups in target org
add_users_csv(USERS, delivery, groups=share_groups)
###Output
_____no_output_____ |
notebook/datalake_file_segmentation_merge_sample.ipynb | ###Markdown
Image Segmentation アノテーション済みのpngファイル・colormapダウンロード
###Code
from abeja.datalake import Client as DatalakeClient
from abeja.datalake import APIClient
import urllib.request
import os
from tqdm import tqdm
import json
import re
###Output
_____no_output_____
###Markdown
設定- OUTPUT_DATALAKE_CHANNEL_ID:annotation済みpngが格納されているDatalake Channel IDを指定してください- ANNOTATION_JSON_NAME:Annotation ToolよりダウンロードしたJSONファイルをNotebookにアップロードしファイル名を指定してください- download_folder_name:pngファイルおよびJSONファイルの格納先フォルダを作成するため、フォルダ名を入力してください
###Code
OUTPUT_DATALAKE_CHANNEL_ID = 'XXXXXXXXXX'
ANNOTATION_JSON_NAME = 'XXXXXXXX.json'
api_client = APIClient()
download_folder_name = 'XXXXXXXX'
datalake_client = DatalakeClient()
channel = datalake_client.get_channel(OUTPUT_DATALAKE_CHANNEL_ID)
os.mkdir(download_folder_name)
#def outputfile():
f = open(ANNOTATION_JSON_NAME,'r')
json_dict = json.load(f)
list_fileid = []
list_filename = []
list_ano_map = []
for x in tqdm(json_dict):
anotation_data = x['information']
origin_data = x['task']['metadata']
origin_in = [d for d in origin_data if 'information' in d]
origin_str = ','.join(map(str,origin_in))
origin_name_pick = origin_str.split(':', 15)[10]
origin_name = origin_name_pick.split(',', 2)[0]
origin_id = re.sub(r"[ ,']", "", origin_name)
ano_in = [s for s in anotation_data if 'is_combined' in s]
list_str = ','.join(map(str,ano_in))
file_id_pick = list_str.split(':', 2)[1]
file_id_fix = file_id_pick.split(',', 2)[0]
file_id = re.sub(r"[ ,']", "", file_id_fix)
# Annotation済みpngファイルのDownload
download_name = download_folder_name + '/' + origin_id
file_download_url_map = api_client.get_channel_file_download(OUTPUT_DATALAKE_CHANNEL_ID, file_id)
file_download_url = file_download_url_map['download_url']
urllib.request.urlretrieve(file_download_url, download_name)
# Colormap JSONファイルの作成
ano_map = [m for m in anotation_data if 'color' in m]
ano_map_list_str = ''.join(map(str,ano_map))
ano_map_color = ano_map_list_str.split(',', 10)
ano_map_label = ano_map_list_str.split(',', 4)[2]
list_fileid.append(file_id)
list_filename.append(origin_id)
list_ano_map.append(ano_map)
# Colormap JSONファイルのDownload
file_dict = dict(zip(list_filename,list_ano_map))
text = json.dumps(file_dict, indent=2)
with open(download_folder_name + '/colormap.json', 'w') as f:
json.dump(file_dict, f)
print('Download OK')
###Output
|
CoronaSimLessM&Recovery.ipynb | ###Markdown
###Code
def get_init():
xcord = [np.random.random()*box_width-box_width/2 for i in range(nparticles)]
ycord = [np.random.random()*box_width-box_width/2 for i in range(nparticles)]
return xcord, ycord
def get_initial_velo():
x_vel = [2*(np.random.random()-0.5)*box_width for i in range(int(nparticles))]
y_vel = [2*(np.random.random()-0.5)*box_width for i in range(int(nparticles))]
return x_vel, y_vel
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import matplotlib
import random
nparticles = 100
box_width = 10
n_steps = 5000
dt = 0.001
clr = [0 for i in range(nparticles)]
n = int(random.randint(nparticles*.70,nparticles))
clr[n] += 5 #infecting the RANDOM PATIENT
infect_time = [0 for i in range(nparticles)] #time for which a patient becomes infected
infection_period = 900 #multiple of 60 as FPS =60 so 15 seconds
infect_time[n] += 1200
infection_range = 0.1 #how close the two people have to be in order to get infected
fig, ax = plt.subplots(figsize=(7,7))
xcord, ycord = get_init()
x_vel, y_vel = get_initial_velo()
for each in range(int(nparticles*.70)): # making 60 percent prticles stationary
x_vel[each] = 0
y_vel[each] = 0
points = ax.scatter(xcord,ycord, c=clr,cmap="Reds",edgecolors="black",linewidths=0.5)
#plot returns a tuple so use points, use ',' after the variable name
fig.tight_layout()
ax.set_ylim(-5, 5)
ax.set_xlim(-5,5)
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([]) #removes the axes
ax.text(1, -4.75, 'By Achal Dixit',
fontsize=10, color='black',
ha='right', va='bottom', alpha=0.5)
ax.set_xlabel("Sreading pattern with 100% movement and no recovery")
#for i in range(n_steps):
# xcord, ycord, x_vel, y_vel = take_step(xcord,ycord, x_vel, y_vel)
def update(frame):
for i in range(int(nparticles)):
xcord[i] += x_vel[i]*dt
ycord[i] += y_vel[i]*dt
if abs( xcord[i]) > box_width/2:
x_vel[i] = -x_vel[i]
xcord[i] += x_vel[i]*dt
if abs( ycord[i]) > box_width/2:
y_vel[i] = -y_vel[i]
ycord[i] += y_vel[i]*dt
#infecting the particles
for y in range(i+1,int(nparticles)):
if xcord[i] == xcord[y] and ycord[i] == ycord[y] or abs(xcord[i] - xcord[y]) < infection_range and abs(ycord[i] - ycord[y]) < infection_range :
#tweak the infection_range parameter to get the infection range.
# if xcord[i] == xcord[y] and ycord[i] == ycord[y]: will not work as the points need to same after many decimal places which is almost impossible
if(clr[i]>0 and clr[y]< 5) :
clr[y] += 5;
infect_time[y] += 900;
elif(clr[i]<5 and clr[y]>0):
clr[i] += 5;
infect_time[i] += 900;
else:
True
if infect_time[i] > 0: #treating the particles
infect_time[i] -= 1
if infect_time[i] == 0:
clr[i] = 0
#points.set_xdata(xcord)
#points.set_ydata(ycord)
#plt.cla()
ax.cla()
points = ax.scatter(xcord,ycord, c=clr,cmap="Reds",edgecolors="black",linewidths=0.5)
ax.get_xaxis().set_ticks([]) #hides the axis
ax.get_yaxis().set_ticks([]) #hides the axis
ax.text(1, -4.75, 'By Achal Dixit',
fontsize=10, color='black',
ha='right', va='bottom', alpha=0.5)
ax.set_xlabel("Sreading pattern with 30% movement and Recovery")
#points.set_offsets(np.c_[xcord,ycord])
return points
%matplotlib inline
matplotlib.rcParams['animation.embed_limit'] = 2**128
ani = animation.FuncAnimation(fig, update,frames= np.arange(1,4000),interval=1)
ani.save('animationSocialDist&Rec.mp4', fps=60);
plt.show()
from IPython.display import HTML
HTML(ani.to_jshtml())
import os
print( os.getcwd() )
print( os.listdir() )
from google.colab import files
files.download( "animationFirst.mp4" )
###Output
_____no_output_____ |
test_lesson_1.ipynb | ###Markdown
###Code
import torch
###Output
_____no_output_____
###Markdown
###Code
def activation(x):
""" Sigmoid activation function
Arguments
---------
x: torch.Tensor
"""
return 1/(1+torch.exp(-x))
torch.manual_seed(7)
features = torch.randn((1,5))
features.shape[0], features.shape[1]
weights = torch.rand_like(features)
weights.shape
bias = torch.randn((1,1))
bias.shape
y = activation(torch.sum(features * weights) + bias)
y
###Output
_____no_output_____
###Markdown
Stack it up
###Code
torch.manual_seed(7)
features = torch.randn(1,3)
n_inputs = features.shape[1]
n_hidden = 2
n_output = 1
#Weights for input to hidden layer
W1 = torch.randn(n_inputs, n_hidden)
#Weights for hidden layer to output layer
W2 = torch.randn(n_hidden, n_output)
W1.shape, W2.shape
# and bias terms for hidden and output layers
B1 = torch.randn((1, n_hidden))
B2 = torch.randn((1, n_output))
B1.shape, B2.shape
h = activation(torch.mm(features, W1) + B1)
y = activation(torch.mm(h, W2) + B2)
y.shape, y
###Output
_____no_output_____
###Markdown
MNIST Data set handling
###Code
# Import necessary packages
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import torch
import helper
import matplotlib.pyplot as plt
### Run this cell
from torchvision import datasets, transforms
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,)),
])
# Download and load the training data
trainset = datasets.MNIST('~/.pytorch/MNIST_data/', download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
dataiter = iter(trainloader)
images, labels = dataiter.next()
print(type(images))
print(images.shape)
print(labels.shape)
plt.imshow(images[1].numpy().squeeze(), cmap='Greys_r');
images[1].shape
# Flatten the input images
inputs = images.view(images.shape[0], -1)
inputs.shape
w1 = torch.randn(inputs.shape[1], 256)
b1 = torch.randn(256)
w1.shape, b1.shape
w2 = torch.randn(256, 10)
b2 = torch.randn(10)
w2.shape, b2.shape
h = activation(torch.mm(inputs, w1) + b1)
out = activation(torch.mm(h, w2) + b2)
out.shape
###Output
_____no_output_____
###Markdown
Softmax $$\Large \sigma(x_i) = \cfrac{e^{x_i}}{\sum_k^K{e^{x_k}}}$$
###Code
def softmax(x):
return torch.exp(x)/torch.sum(torch.exp(x), dim=1).view(-1,1)
probabilities = softmax(out)
probabilities.shape
val = torch.exp(out)
val.shape
val = torch.sum(torch.exp(out))
val.shape
val = torch.sum(torch.exp(out), dim=0)
val.shape
val = torch.sum(torch.exp(out), dim=1)
val.shape
val = torch.sum(torch.exp(out), dim=1).view(-1,1)
val.shape
from torch import nn
??nn.Linear
class Network(nn.Module):
def __init__(self):
super().__init__()
# Inputs to hidden layer linear transformation
self.hidden = nn.Linear(784, 256)
# Output layer, 10 units - one for each digit
self.output = nn.Linear(256, 10)
# Define sigmoid activation and softmax output
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
# Pass the input tensor through each of our operations
x = self.hidden(x)
x = self.sigmoid(x)
x = self.output(x)
x = self.softmax(x)
return x
###Output
_____no_output_____
###Markdown
Functional way of defining the same network using the ```pythonimport torch.nn.functional as F```
###Code
import torch.nn.functional as F
import torch.nn.functional as F
class Network(nn.Module):
def __init__(self):
super().__init__()
# Inputs to hidden layer linear transformation
self.hidden = nn.Linear(784, 256)
# Output layer, 10 units - one for each digit
self.output = nn.Linear(256, 10)
def forward(self, x):
# Hidden layer with sigmoid activation
x = F.sigmoid(self.hidden(x))
# Output layer with softmax activation
x = F.softmax(self.output(x), dim=1)
return x
class ReluNetwork(nn.Module):
def __init__(self):
super().__init__()
self.input = nn.Linear(784, 128)
self.fc1 = nn.Linear(128, 64)
self.output = nn.Linear(64, 10)
def forward(self, x):
x = F.relu(self.input(x))
x = F.relu(self.fc1(x))
x = F.softmax(self.output(x), dim=1)
return x
model = ReluNetwork()
model
###Output
_____no_output_____
###Markdown
Using nn.sequential
###Code
from torch import nn
input_size = 784
hidden_layer_size = [128, 64]
output_size = 10
model = nn.Sequential(nn.Linear(input_size, hidden_layer_size[0]),
nn.ReLU(),
nn.Linear(hidden_layer_size[0], hidden_layer_size[1]),
nn.ReLU(),
nn.Linear(hidden_layer_size[1], output_size),
nn.Softmax(dim=1))
model
model[0].weight
from collections import OrderedDict
model = nn.Sequential(OrderedDict([
('fc1', nn.Linear(input_size, hidden_layer_size[0])),
('relu1', nn.ReLU()),
('fc2', nn.Linear(hidden_layer_size[0], hidden_layer_size[1])),
('relu2', nn.ReLU()),
('output', nn.Linear(hidden_layer_size[1], output_size)),
('softmax', nn.Softmax(dim=1))]))
model
model.fc1.weight
# Define the loss
criterion = nn.CrossEntropyLoss()
criterion
###Output
_____no_output_____
###Markdown
**CrossEntropy loss definition**$$ \large \text{loss}(x, class) = -\log\left(\frac{\exp(x[class])}{\sum_j \exp(x[j])}\right) = -x[class] + \log\left(\sum_j \exp(x[j])\right) $$OR$$ \large \text{loss}(x, class) = weight[class] \left(-x[class] + \log\left(\sum_j \exp(x[j])\right)\right) $$
###Code
# Get our data
images, labels = next(iter(trainloader))
# Flatten images
images = images.view(images.shape[0], -1)
# Forward pass, get our logits
logits = model(images)
# Calculate the loss with the logits and the labels
loss = criterion(logits, labels)
print(loss)
###Output
tensor(2.3042, grad_fn=<NllLossBackward>)
###Markdown
**Exercise:** Build a model that returns the `log-softmax` as the output and calculate the loss using the negative log likelihood loss. Note that for `nn.LogSoftmax` and ` F.log_softmax` you'll need to set the dim keyword argument appropriately. `dim=0` calculates softmax across the rows, so each column sums to 1, while `dim=1` calculates across the columns so each row sums to 1. Think about what you want the output to be and choose dim appropriately.
###Code
model = nn.Sequential(OrderedDict([
('fc1', nn.Linear(input_size, hidden_layer_size[0])),
('relu1', nn.ReLU()),
('fc2', nn.Linear(hidden_layer_size[0], hidden_layer_size[1])),
('relu2', nn.ReLU()),
('output', nn.Linear(hidden_layer_size[1], output_size)),
('softmax', nn.LogSoftmax(dim=1))]))
model
criterion = nn.NLLLoss()
criterion
# Get our data
images, labels = next(iter(trainloader))
# Flatten images
images = images.view(images.shape[0], -1)
# Forward pass, get our logits
logits = model(images)
# Calculate the loss with the logits and the labels
loss = criterion(logits, labels)
print(loss)
###Output
tensor(2.3015, grad_fn=<NllLossBackward>)
###Markdown
Losses in PyTorch
###Code
model = nn.Sequential(OrderedDict([
('fc1', nn.Linear(784, 128)),
('Relu1', nn.ReLU()),
('fc2', nn.Linear(128, 64)),
('Relu2', nn.ReLU()),
('output', nn.Linear(64, 10)),
('logSoftmax', nn.LogSoftmax(dim=1))
]))
model
criterion = nn.NLLLoss()
from torch import optim
# Optimizers require the parameters to optimize and a learning rate
optimizer = optim.SGD(model.parameters(), lr=0.01)
# Clear the gradients, do this because gradients are accumulated
optimizer.zero_grad()
# Get our data
images, labels = next(iter(trainloader))
# Flatten images
images = images.view(images.shape[0], -1)
# Forward pass, get our logits
logPs = model(images)
# Calculate the loss with the logits and the labels
loss = criterion(logPs, labels)
print(loss)
loss.backward()
print('Gradient -', model.fc1.weight.grad)
# Take an update step and few the new weights
optimizer.step()
print('Updated weights - ', model.fc1.weight.grad)
###Output
Updated weights - tensor([[-5.1349e-03, -5.1349e-03, -5.1349e-03, ..., -5.1349e-03,
-5.1349e-03, -5.1349e-03],
[ 5.8969e-04, 5.8969e-04, 5.8969e-04, ..., 5.8969e-04,
5.8969e-04, 5.8969e-04],
[-2.3754e-03, -2.3754e-03, -2.3754e-03, ..., -2.3754e-03,
-2.3754e-03, -2.3754e-03],
...,
[ 2.6402e-03, 2.6402e-03, 2.6402e-03, ..., 2.6402e-03,
2.6402e-03, 2.6402e-03],
[ 5.2306e-05, 5.2306e-05, 5.2306e-05, ..., 5.2306e-05,
5.2306e-05, 5.2306e-05],
[-1.7939e-03, -1.7939e-03, -1.7939e-03, ..., -1.7939e-03,
-1.7939e-03, -1.7939e-03]])
###Markdown
Solution to the exercise
###Code
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10),
nn.LogSoftmax(dim=1))
criterion = nn.NLLLoss()
optimizer = optim.SGD(model.parameters(), lr=0.003)
epochs = 5
for e in range(epochs):
running_loss = 0
for images, labels in trainloader:
# Flatten MNIST images into a 784 long vector
images = images.view(images.shape[0], -1)
optimizer.zero_grad()
# Forward pass, get our logits
logPs = model(images)
# Calculate the loss with the logits and the labels
loss = criterion(logPs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
else:
print(f"Training loss: {running_loss/len(trainloader)}")
%matplotlib inline
import helper
images, labels = next(iter(trainloader))
img = images[0].view(1, 784)
# Turn off gradients to speed up this part
with torch.no_grad():
logps = model(img)
# Output of the network are log-probabilities, need to take exponential for probabilities
#ps = logps
ps = torch.exp(logps)
helper.view_classify(img.view(1, 28, 28), ps)
###Output
_____no_output_____
###Markdown
What will happen??**Invalid resluts when we do** ```pythonoptimizer.zero_grad()```**at wrong place**
###Code
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10),
nn.LogSoftmax(dim=1))
criterion = nn.NLLLoss()
optimizer = optim.SGD(model.parameters(), lr=0.003)
epochs = 5
for e in range(epochs):
running_loss = 0
optimizer.zero_grad() # --> Not the correct place to reset the optimizer
for images, labels in trainloader:
# Flatten MNIST images into a 784 long vector
images = images.view(images.shape[0], -1)
# TODO: Training pass
# Forward pass, get our logits
logPs = model(images)
# Calculate the loss with the logits and the labels
loss = criterion(logPs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
else:
print(f"Training loss: {running_loss/len(trainloader)}")
%matplotlib inline
import helper
images, labels = next(iter(trainloader))
img = images[0].view(1, 784)
# Turn off gradients to speed up this part
with torch.no_grad():
logps = model(img)
# Output of the network are log-probabilities, need to take exponential for probabilities
ps = torch.exp(logps)
helper.view_classify(img.view(1, 28, 28), ps)
??helper.view_classify
###Output
_____no_output_____ |
experiment/classification/ClassifyC1byItemName_E2.ipynb | ###Markdown
Encode C1
###Code
encode_df, encode_col = one_hot_encode_feature(df, encode_column='c1',drop_first=False)
encode_df.shape
encode_df.head(5)
encode_df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 50000 entries, 0 to 49999
Data columns (total 18 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 train_id 50000 non-null int64
1 name 50000 non-null object
2 item_condition_id 50000 non-null int64
3 brand_name 50000 non-null object
4 price 50000 non-null float64
5 shipping 50000 non-null int64
6 item_description 50000 non-null object
7 c2 50000 non-null object
8 c3 50000 non-null object
9 c1_beauty 50000 non-null uint8
10 c1_electronics 50000 non-null uint8
11 c1_home 50000 non-null uint8
12 c1_kids 50000 non-null uint8
13 c1_men 50000 non-null uint8
14 c1_other 50000 non-null uint8
15 c1_sports & outdoors 50000 non-null uint8
16 c1_vintage & collectibles 50000 non-null uint8
17 c1_women 50000 non-null uint8
dtypes: float64(1), int64(3), object(5), uint8(9)
memory usage: 3.9+ MB
###Markdown
Extract Item Name Features
###Code
lemmatizer = WordNetLemmatizer()
stop_words = stopwords.words('english')
num_of_processes = 8
col_name = 'name'
clean_col_name = "clean_%s" % col_name
df = parallelize(encode_df, partial(extract_counts, col_name=col_name, prefix="bef"), num_of_processes)
df = parallelize(encode_df, partial(extract_info, col_name=col_name, stop_words=stop_words), num_of_processes)
###Output
progress-bar: 100%|██████████| 6250/6250 [00:42<00:00, 148.15it/s]
progress-bar: 100%|██████████| 6250/6250 [00:42<00:00, 148.02it/s]
progress-bar: 100%|██████████| 6250/6250 [00:42<00:00, 147.88it/s]
progress-bar: 100%|██████████| 6250/6250 [00:42<00:00, 147.43it/s]
progress-bar: 100%|██████████| 6250/6250 [00:42<00:00, 147.72it/s]
progress-bar: 100%|██████████| 6250/6250 [00:42<00:00, 147.35it/s]
progress-bar: 100%|██████████| 6250/6250 [00:42<00:00, 147.31it/s]
progress-bar: 100%|██████████| 6250/6250 [00:42<00:00, 147.33it/s]
progress-bar: 100%|██████████| 6250/6250 [01:05<00:00, 95.18it/s]
progress-bar: 100%|██████████| 6250/6250 [01:05<00:00, 95.25it/s]
progress-bar: 100%|██████████| 6250/6250 [01:05<00:00, 95.17it/s]
progress-bar: 100%|██████████| 6250/6250 [01:05<00:00, 95.21it/s]
progress-bar: 100%|██████████| 6250/6250 [01:05<00:00, 95.13it/s]
progress-bar: 100%|██████████| 6250/6250 [01:05<00:00, 95.03it/s]
progress-bar: 100%|██████████| 6250/6250 [01:05<00:00, 94.96it/s]
progress-bar: 100%|██████████| 6250/6250 [01:05<00:00, 94.89it/s]
###Markdown
Split Train Test
###Code
X = df.select_dtypes(include=['int64']).drop(columns=list(encode_col)+['train_id'])
x_col = X.columns
X['name'] = df['name']
X.info()
y = encode_df[encode_col]
y.info()
X_train, X_test, y_train, y_test = train_test_split(X, y.values,
test_size=0.30, random_state=42, stratify = y)
###Output
_____no_output_____
###Markdown
Vectorization of Item Name
###Code
num_features = 20
tv = TfidfVectorizer(max_features=num_features)
train_name_feature = tv.fit_transform(X_train.name.to_list())
train_name_feature.toarray().shape
X_train = np.concatenate((train_name_feature.toarray(), X_train[x_col].values),axis=1)
X_test = np.concatenate((tv.transform(X_test.name.to_list()).toarray(), X_test[x_col].values),axis=1)
names = tv.get_feature_names()
names
x_tf_names = ['tf%02d_%s'%(i,names[i-1]) for i in range(1, num_features+1)]
x_tf_names += list(x_col)
###Output
_____no_output_____
###Markdown
Classification - Random Forest
###Code
rf_model, y_train, y_test, y_train_pred, y_test_pred = train_classification_model(RandomForestClassifier(n_estimators=50),
X_train, X_test, y_train, y_test,
target_classname = encode_col)
feature_importances = visualize_model_feature_importances(rf_model, x_tf_names, title = "Feature Importance")
visualize_2d_cluster_with_legend('c1', feature_importances[0][0], feature_importances[1][0],
x_tf_names, encode_col, X_train, X_test,
y_train, y_test, y_train_pred,y_test_pred)
###Output
_____no_output_____
###Markdown
Classification - Logistic Regression
###Code
lr_model, y_train, y_test, y_train_pred, y_test_pred = train_classification_model(LogisticRegression(max_iter=500),
X_train, X_test, y_train, y_test,
target_classname = encode_col)
feature_importances = visualize_model_feature_importances(lr_model, x_tf_names, title = "Feature Importance")
visualize_2d_cluster_with_legend('c1', feature_importances[0][0], feature_importances[1][0],
x_tf_names, encode_col, X_train, X_test,
y_train, y_test, y_train_pred,y_test_pred,
legend = True, title = "c1 prediction")
###Output
_____no_output_____ |
Black Friday Dataset Analysis-Predictions.ipynb | ###Markdown
Análise e Previsão de vendas da Black FridayEste conjunto de dados é composto por transações de vendas capturadas em uma loja de varejo. É um conjunto de dados clássico para explorar e expandir as habilidades de engenharia de recursos e compreensão do dia a dia de várias experiências de compra. O conjunto de dados possui 550.069 linhas e 12 colunas. O projeto consiste na análise e a aplicação de algoritmos de machine learning de regressão ponta a ponta baseado no capítulo 2 do livro 'Mãos à Obra: Aprendizado de Máquina com Scikit-Learn & TensorFlow' de Aurélien Géron.This dataset comprises of sales transactions captured at a retail store. It is a classic data set for exploring and expanding the resource engineering skills and day-to-day understanding of various shopping experiences. The dataset has 550,069 rows and 12 columns. The project consists of analyzing and applying end-to-end regression machine learning algorithms based on chapter 2 of the book 'Hands on Work: Machine Learning with Scikit-Learn & TensorFlow' by Aurélien Géron.Link original: https://datahack.analyticsvidhya.com/contest/black-friday/**Objetivo**: Determinar qual o melhor modelo de regressão para este problema.**Problem**: Determine the best regression model for this problem. Obtendo os dados Bibliotecas
###Code
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
import warnings
warnings.filterwarnings("ignore")
df = pd.read_csv('https://raw.githubusercontent.com/Daniel-RPS/Black-Friday-Dataset-Analysis-Predictions/main/BlackFriday.csv')
df.head()
df.columns #Colunas
###Output
_____no_output_____
###Markdown
Dicionário de dados * `User_ID`: Unique ID of the user. There are a total of 5891 users in the dataset.* `Product_ID`: Unique ID of the product. There are a total of 3623 products in the dataset.* `Gender`: indicates the gender of the person making the transaction.* `Age`: indicates the age group of the person making the transaction.* `Occupation`: shows the occupation of the user, already labeled with numbers 0 to 20.* `City_Category`: User's living city category. Cities are categorized into 3 different categories 'A', 'B' and 'C'.* `Stay_In_Current_City_Years`: Indicates how long the users has lived in this city.* `Marital_Status`: is 0 if the user is not married and 1 otherwise.* `Product_Category_1 to _3`: Category of the product. All 3 are already labaled with numbers.* `Purchase`: Purchase amount.
###Code
df.shape #linhas e colunas.
df.info() #descrição dos dados
df.describe() #sumário de atributos numéricos
df.describe(include='O') #descrição estatística das variáveis catgóricas
#Valores nulos no dataset
df.isnull().sum()
# Transformando os valores nulos na média de cada atributo
import math
med_cat2 = df['Product_Category_2'].mean()
med_cat3 = df['Product_Category_3'].mean()
med_cat2 = math.floor(med_cat2)
med_cat3 = math.floor(med_cat3)
df['Product_Category_2'].fillna(med_cat2, inplace=True)
df['Product_Category_3'].fillna(med_cat3, inplace=True)
###Output
_____no_output_____
###Markdown
Retirando todos os valores não numéricos 'Stay_In_Current_City_Years' pode ser um valor numérico. O sinal de '+' pode ser retirado e o atributo ser tranformado em numérico
###Code
#Retiando o sinal de '+'
df['Stay_In_Current_City_Years'] = [x[:1] for x in df['Stay_In_Current_City_Years']]
#Transformando a coluna 'Stay_In_Current_City_Years' em int64
df['Stay_In_Current_City_Years'] = df['Stay_In_Current_City_Years'].astype(np.int64)
#Valores nulos no dataset
df.isnull().sum()
df.hist(bins=50,figsize=(20,15))#histograma
plt.show()
###Output
_____no_output_____
###Markdown
Criando Conjunto de Testes Escolher o modelo adequado
###Code
from sklearn.model_selection import train_test_split
train_set, test_set = train_test_split(df, test_size=0.2, random_state=42)
test_set.head()
###Output
_____no_output_____
###Markdown
Qual atributo é o mais importante para estimar o valor da compra? De acordo com a tabela de correlações, é o Product_Category_1. Portanto, será feita uma amostragem estratificada deste atributo
###Code
corr_matrix = df.corr()
corr_matrix['Purchase'].sort_values(ascending=False)
###Output
_____no_output_____
###Markdown
Criando um atributo na categoria Product_Category_1
###Code
df["Product_Category_1"].hist();
df["pc1_cat"] = pd.cut(df["Product_Category_1"],
bins=[0., 1.5, 3.0, 4.5, 6., np.inf],
labels=[1, 2, 3, 4, 5])
#Transformando 'pc1_cat' em float.
df['pc1_cat'] = df['pc1_cat'].astype(float)
# Em seguida, os valores nulos foram transformados na média de cada atributo
med_pc1 = df['pc1_cat'].mean()
med_pc1 = math.floor(med_pc1)
df['pc1_cat'].fillna(med_pc1, inplace=True)
df["pc1_cat"].value_counts()
df["pc1_cat"].hist();
###Output
_____no_output_____
###Markdown
Mostragem estratificada com base nos Produtos de Categoria 1
###Code
from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(df, df["pc1_cat"]):
strat_train_set = df.loc[train_index]
strat_test_set = df.loc[test_index]
#Análise das proporções de Produtos de Categoria 1 no conjunto de testes
strat_test_set["pc1_cat"].value_counts() / len(strat_test_set)
#Análise das proporções de Produtos de Categoria 1 no conjunto completo de dados
df["pc1_cat"].value_counts() / len(df)
def income_cat_proportions(data):
return data["pc1_cat"].value_counts() / len(data)
train_set, test_set = train_test_split(df, test_size=0.2, random_state=42)
compare_props = pd.DataFrame({
"Overall": income_cat_proportions(df),
"Stratified": income_cat_proportions(strat_test_set),
"Random": income_cat_proportions(test_set),
}).sort_index()
compare_props["Rand. %error"] = 100 * compare_props["Random"] / compare_props["Overall"] - 100
compare_props["Strat. %error"] = 100 * compare_props["Stratified"] / compare_props["Overall"] - 100
###Output
_____no_output_____
###Markdown
Comparação de viés de amostragem estratificada versus mostragem aleatória
###Code
compare_props
###Output
_____no_output_____
###Markdown
O conjunto de testes gerado com a amostragem estratificada tem proporções da categoria Product_Category_1 quase idênticas as do conjunto completo de dados. Já o conjunto de testes gerado com amostragem aleatória é um pouco distorcido.
###Code
#Removendo o atributo Product_Category_1 para que os dados voltem ao seu estado original
for set_ in (strat_train_set, strat_test_set):
set_.drop("pc1_cat", axis=1, inplace=True)
#Criando uma cópia para treinar com ela sem prejudicar o conjunto de treinamento
df = strat_train_set.copy()
###Output
_____no_output_____
###Markdown
Explorando os dados Bucando correlações
###Code
corr_matrix = df.corr()
corr_matrix['Purchase'].sort_values(ascending=False)
#Combinações de atributos
df['occupation_pc1'] = df['Occupation']/df['Product_Category_1']
df['occupation_pc3'] = df['Occupation']/df['Product_Category_3']
df['marital_pc1'] = df['Marital_Status']/df['Product_Category_1']
corr_matrix = df.corr()
corr_matrix['Purchase'].sort_values(ascending=False)
###Output
_____no_output_____
###Markdown
Há algumas correlações interessantes entre Occupation e Product_Category_1 e 3 que poderiam ser exploradas, assim como Marital_Status e Product_Category_1. Purchase também poderia ser explorada em comparação com as categorias de produto (1, 2 e 3) e também com a Occupation
###Code
df.describe()
###Output
_____no_output_____
###Markdown
Explorando algumas hipóteses 1 - Qual o público que mais consome durante a Black Friday? O principal público consumidor na Black Friday são as pessoas 26 a 35 anos de idade.
###Code
index = df['Age'].value_counts().index #Ordenar as colunas do maior para o menor
sns.catplot(x='Age', kind="count",palette="Set2", size = 8,order = index, data=df)
plt.xlabel('Idade', fontsize=13)
plt.ylabel('Número de consumidores')
plt.title('Consumo por público',fontsize=21);
###Output
_____no_output_____
###Markdown
2 - Os casados consomem mais na Black Friday em relação aos não-casados. Falso. Os não-casados consomem mais produtos da Black Friday em relação aos casados.
###Code
plt.figure(figsize=(12,5))
ms = df['Marital_Status'].value_counts()
plt.figure(figsize=(7,7))
plt.pie(ms, labels=['Não casado', 'Casado'], autopct='%1.1f%%', shadow=True);
###Output
_____no_output_____
###Markdown
3 - Quem gasta mais em média na Black Friday, homens ou mulheres? No dataset, os homens gastam mais que as mulheres, em média, na Black Friday.
###Code
plt.figure(figsize=(12,5))
df[['Gender','Purchase']].groupby('Gender').mean().reset_index()
sns.barplot(x='Gender', y='Purchase',data=df);
###Output
_____no_output_____
###Markdown
4 - Média dos compradores na Black Friday por gênero Os que mais consomem são homens entre 26-35 anos. Os menores de idade (0-17 anos) são os que menos consomem na Black Friday.
###Code
plt.figure(figsize=(12,5))
sns.countplot(df['Age'],hue=df['Gender'])
plt.show();
###Output
_____no_output_____
###Markdown
5 - Homens de cidades de categoria A são os que mais consomem na Black Friday? Falso. Homens de cidades de categoria B são os que mais aparecem no dataset. Mulheres de cidades de categoria A são as que menos aparecem.
###Code
df[['City_Category','Gender']].value_counts().sort_values(ascending=False).plot.bar(stacked=True, figsize=(15,5))
plt.xticks(rotation = 0);
###Output
_____no_output_____
###Markdown
6 - Média de preços de Product_Category_1 Este atributo tem forte correlação com Purchase
###Code
plt.figure(figsize=(15,5))
sns.barplot(x='Product_Category_1', y='Purchase',data=df)
plt.show();
###Output
_____no_output_____
###Markdown
7 - Presença de produtos consumidos por categoria no dataset
###Code
plt.figure(figsize=(15,25))
prod_cat1 = df.groupby('Product_Category_1')['Product_ID'].nunique()
prod_cat2 = df.groupby('Product_Category_2')['Product_ID'].nunique()
prod_cat3 = df.groupby('Product_Category_3')['Product_ID'].nunique()
plt.subplot(4, 1, 1)
sns.barplot(x=prod_cat1.index,y=prod_cat1.values)
plt.title('Product Category 1')
plt.subplot(4, 1, 2)
sns.barplot(x=prod_cat2.index,y=prod_cat2.values)
plt.title('Product Category 2')
plt.subplot(4, 1, 3)
sns.barplot(x=prod_cat3.index,y=prod_cat3.values)
plt.title('Product Category 3')
plt.show()
###Output
_____no_output_____
###Markdown
Prepare the data for Machine Learning algorithms
###Code
#Reverter para um conjunto de treinamento limpo e criar uma cópia de dados
df = strat_train_set.drop("Purchase", axis=1) # drop labels for training set
df_labels = strat_train_set["Purchase"].copy()
from sklearn.impute import SimpleImputer
imputer = SimpleImputer(strategy="median")
df_num = df.drop(['User_ID','Product_ID','Age','Gender','City_Category'], axis=1)
imputer.fit(df_num)
imputer.statistics_
#Transformando o conjunto de treinamento:
X = imputer.transform(df_num)
###Output
_____no_output_____
###Markdown
Manipulando textos e atributos categóricos Aqui, serão manipulados os atributos 'Gender' e 'City_Category'
###Code
df_cat = df[['Gender','City_Category']]
df_cat.head(10)
from sklearn.preprocessing import OrdinalEncoder
###Output
_____no_output_____
###Markdown
* O que será feito? A maioria dos algoritmos de ML prefere trabalhar com números, então essas categorias serão convertidas de texto para números, utilizando o método OrdinalEncoder().* No atributo Gender, M poderá ser 0 e F poderá ser 1. No atributo City_Category, A poderá ser 0, B poderá ser 1, e assim por diante.
###Code
ordinal_encoder = OrdinalEncoder()
df_cat_encoded = ordinal_encoder.fit_transform(df_cat)
df_cat_encoded[:10]
ordinal_encoder.categories_
from sklearn.preprocessing import OneHotEncoder
cat_encoder = OneHotEncoder()
df_cat_1hot = cat_encoder.fit_transform(df_cat)
df_cat_1hot
df_cat_1hot.toarray()
cat_encoder.categories_
df.columns
###Output
_____no_output_____
###Markdown
Customizando Transformadores
###Code
from sklearn.base import BaseEstimator, TransformerMixin
# get the right column indices: safer than hard-coding indices 3, 4, 5, 6
occupation_ix, city_years_ix, marital_ix, pc1_ix, pc2_ix, pc3_ix = [
list(df.columns).index(col)
for col in ("Occupation", "Stay_In_Current_City_Years", "Marital_Status", "Product_Category_1",
"Product_Category_2", "Product_Category_3")]
class CombinedAttributesAdder(BaseEstimator, TransformerMixin):
def __init__(self, add_city_years_per_pc1 = True): # no *args or **kwargs
self.add_city_years_per_pc1 = add_city_years_per_pc1
def fit(self, X, y=None):
return self # nothing else to do
def transform(self, X, y=None):
marital_per_pc1_ix = X[:, marital_ix] / X[:, pc1_ix]
occupation_per_pc1_ix = X[:, occupation_ix] / X[:, pc1_ix]
if self.add_city_years_per_pc1:
city_years_per_pc1 = X[:, city_years_ix] / X[:, pc1_ix]
return np.c_[X, marital_per_pc1_ix, occupation_per_pc1_ix,
city_years_per_pc1]
else:
return np.c_[X, marital_per_pc1_ix, occupation_per_pc1_ix]
attr_adder = CombinedAttributesAdder(add_city_years_per_pc1=False)
df_extra_attribs = attr_adder.transform(df.values)
from sklearn.preprocessing import FunctionTransformer
def add_extra_features(X, add_city_years_per_pc1 = True):
marital_per_pc1_ix = X[:, marital_ix] / X[:, pc1_ix]
occupation_per_pc1_ix = X[:, occupation_ix] / X[:, pc1_ix]
if add_city_years_per_pc1:
city_years_per_pc1 = X[:, city_years_ix] / X[:, pc1_ix]
return np.c_[X, marital_per_pc1_ix, occupation_per_pc1_ix, city_years_per_pc1]
else:
return np.c_[X, marital_per_pc1_ix, occupation_per_pc1_ix]
attr_adder = FunctionTransformer(add_extra_features, validate=False,
kw_args={"add_city_years_per_pc1": False})
df_extra_attribs = attr_adder.fit_transform(df.values)
df_extra_attribs = pd.DataFrame(
df_extra_attribs,
columns=list(df.columns)+["marital_per_pc1_ix", "occupation_per_pc1_ix"],
index=df.index)
df_extra_attribs.head()
###Output
_____no_output_____
###Markdown
Pipelines de Transformação * Será construído um pipeline para pré-processar os atributos numéricos.
###Code
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
num_pipeline = Pipeline([
('imputer', SimpleImputer(strategy="median")),
('attribs_adder', FunctionTransformer()),
('std_scaler', StandardScaler()),
])
df_num_tr = num_pipeline.fit_transform(df_num)
df_num_tr
from sklearn.compose import ColumnTransformer
num_attribs = list(df_num)
cat_attribs = ['Gender','City_Category']
full_pipeline = ColumnTransformer([
("num", num_pipeline, num_attribs),
("cat", OneHotEncoder(), cat_attribs),
])
df_prepared = full_pipeline.fit_transform(df)
df_prepared
df_prepared.shape
###Output
_____no_output_____
###Markdown
* Fornecendo um nosso pipeline um DataFrame Pandas que contenha colunas não numéricas em vez de extraí-las manualmente em um array NumPy.
###Code
from sklearn.base import BaseEstimator, TransformerMixin
# Create a class to select numerical or categorical columns
class OldDataFrameSelector(BaseEstimator, TransformerMixin):
def __init__(self, attribute_names):
self.attribute_names = attribute_names
def fit(self, X, y=None):
return self
def transform(self, X):
return X[self.attribute_names].values
###Output
_____no_output_____
###Markdown
* Juntando todos esses componentes em um grande pipeline que irá pré-processar os recursos numéricos e categóricos.
###Code
num_attribs = list(df_num)
cat_attribs = ['Gender','City_Category']
old_num_pipeline = Pipeline([
('selector', OldDataFrameSelector(num_attribs)),
('imputer', SimpleImputer(strategy="median")),
('attribs_adder', FunctionTransformer()),
('std_scaler', StandardScaler()),
])
old_cat_pipeline = Pipeline([
('selector', OldDataFrameSelector(cat_attribs)),
('cat_encoder', OneHotEncoder(sparse=False)),
])
from sklearn.pipeline import FeatureUnion
old_full_pipeline = FeatureUnion(transformer_list=[
("num_pipeline", old_num_pipeline),
("cat_pipeline", old_cat_pipeline),
])
old_df_prepared = old_full_pipeline.fit_transform(df)
old_df_prepared
np.allclose(df_prepared, old_df_prepared)
df_prepared.shape
###Output
_____no_output_____
###Markdown
* No início, haviam 537577 linhas no dataset* Agora, existem 430061 linhas Select and train a model * Regressão Linear
###Code
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(df_prepared, df_labels)
# let's try the full preprocessing pipeline on a few training instances
some_data = df.iloc[:5]
some_labels = df_labels.iloc[:5]
some_data_prepared = full_pipeline.transform(some_data)
print("Predictions:", lin_reg.predict(some_data_prepared))
print("Labels:", list(some_labels))
some_data_prepared
###Output
_____no_output_____
###Markdown
* Funciona, embora não seja precisa (a primeira previsão tem cerca de 20% de erro)* 7788.4375 para 6080 -> +/- 21,9% de diferença
###Code
#RMSE - Mean squared error
from sklearn.metrics import mean_squared_error
df_predictions = lin_reg.predict(df_prepared)
lin_mse = mean_squared_error(df_labels, df_predictions)
lin_rmse = np.sqrt(lin_mse)
lin_rmse
#MAE - Mean absolute error
from sklearn.metrics import mean_absolute_error
lin_mae = mean_absolute_error(df_labels, df_predictions)
lin_mae
###Output
_____no_output_____
###Markdown
* Árvores de Decisão
###Code
from sklearn.tree import DecisionTreeRegressor
tree_reg = DecisionTreeRegressor(random_state=42)
tree_reg.fit(df_prepared, df_labels)
df_predictions = tree_reg.predict(df_prepared)
tree_mse = mean_squared_error(df_labels, df_predictions)
tree_rmse = np.sqrt(tree_mse)
tree_rmse
###Output
_____no_output_____
###Markdown
* Random Forest Regressor
###Code
from sklearn.ensemble import RandomForestRegressor
forest_reg = RandomForestRegressor(n_estimators=10, random_state=42)
forest_reg.fit(df_prepared, df_labels)
df_predictions = forest_reg.predict(df_prepared)
forest_mse = mean_squared_error(df_labels, df_predictions)
forest_rmse = np.sqrt(forest_mse)
forest_rmse
###Output
_____no_output_____
###Markdown
Avaliando melhor com Validação Cruzada * Árvores de Decisão
###Code
from sklearn.model_selection import cross_val_score
scores = cross_val_score(tree_reg, df_prepared, df_labels,
scoring="neg_mean_squared_error", cv=10)
tree_rmse_scores = np.sqrt(-scores)
def display_scores(scores):
print("Scores:", scores)
print("Mean:", scores.mean())
print("Standard deviation:", scores.std())
display_scores(tree_rmse_scores)
###Output
Scores: [3242.21081799 3240.06091506 3258.15457359 3236.49788007 3223.05660966
3260.02329743 3253.06285082 3254.82063309 3243.0235843 3268.50480745]
Mean: 3247.941596944711
Standard deviation: 12.712283254035556
###Markdown
* Random Forest Regressor
###Code
from sklearn.model_selection import cross_val_score
forest_scores = cross_val_score(forest_reg, df_prepared, df_labels,
scoring="neg_mean_squared_error", cv=10)
forest_rmse_scores = np.sqrt(-forest_scores)
display_scores(forest_rmse_scores)
###Output
Scores: [3093.62505475 3096.61739872 3099.97415404 3096.77565746 3093.37820088
3113.25023394 3110.95535262 3106.15728503 3104.41889439 3127.31649143]
Mean: 3104.246872324635
Standard deviation: 10.125146371552066
###Markdown
* Regressão Linear
###Code
from sklearn.model_selection import cross_val_score
lin_scores = cross_val_score(lin_reg, df_prepared, df_labels,
scoring="neg_mean_squared_error", cv=10)
lin_rmse_scores = np.sqrt(-lin_scores)
display_scores(lin_rmse_scores)
###Output
Scores: [4689.75612327 4687.01666743 4684.90024194 4703.85367844 4679.17265164
4712.07681919 4700.65271483 4703.79048731 4706.84013052 4717.63997825]
Mean: 4698.569949281753
Standard deviation: 12.034471596146583
###Markdown
Resultados
###Code
plt.figure(figsize=(10,5))
modelos = ['Random Forest Regressor','Árvores de Decisão','Regressão Linear']
rmse_results = [forest_rmse_scores.mean(),tree_rmse_scores.mean(),lin_rmse_scores.mean()]
plt.bar(modelos,rmse_results)
plt.show()
###Output
_____no_output_____
###Markdown
* O modelo que apresentou o melhor resultado foi o Random Forest Regressor, com uma média de 3104.25 e Desvio padrão de 10.12. * O modelo que apresentou o pior resultado foi Regressão Linear, com uma média de 4698.57. Ajustando o modelo
###Code
from sklearn.model_selection import GridSearchCV
param_grid = [
# try 12 (3×4) combinations of hyperparameters
{'n_estimators': [3, 10, 30], 'max_features': [2, 4, 6, 8]},
# then try 6 (2×3) combinations with bootstrap set as False
{'bootstrap': [False], 'n_estimators': [3, 10], 'max_features': [2, 3, 4]},
]
forest_reg = RandomForestRegressor(random_state=42)
# train across 5 folds, that's a total of (12+6)*5=90 rounds of training
grid_search = GridSearchCV(forest_reg, param_grid, cv=5,
scoring='neg_mean_squared_error', return_train_score=True)
grid_search.fit(df_prepared, df_labels)
grid_search.best_params_
grid_search.best_estimator_
cvres = grid_search.cv_results_
for mean_score, params in zip(cvres["mean_test_score"], cvres["params"]):
print(np.sqrt(-mean_score), params)
pd.DataFrame(grid_search.cv_results_)
###Output
_____no_output_____
###Markdown
* A melhor solução obtido foi definindo os seguintes hiperparâmetros: 3090.7681177947175 {'max_features': 6, 'n_estimators': 30}. A pontuação RMSE é um pouco melhor do que o resultado obtido anteriormente utilizando os valores padrão do hiperparâmetro (3104.246872324635). O modelo foi ajustado com sucesso. Analisando os melhores modelos e seus erros
###Code
feature_importances = grid_search.best_estimator_.feature_importances_
feature_importances
extra_attribs = ['occupation_pc1', 'occupation_pc3', 'marital_pc1']
cat_encoder = full_pipeline.named_transformers_["cat"]
cat_one_hot_attribs = list(cat_encoder.categories_[0])
attributes = num_attribs + extra_attribs + cat_one_hot_attribs
sorted(zip(feature_importances, attributes), reverse=True)
###Output
_____no_output_____
###Markdown
* Aparentemente, apenas a categoria 'Product_Category_1' é realmente útil. Avaliando o sistema de conjunto de testes
###Code
final_model = grid_search.best_estimator_
X_test = strat_test_set.drop('Purchase', axis=1)
y_test = strat_test_set['Purchase'].copy()
X_test_prepared = full_pipeline.transform(X_test)
final_predictions = final_model.predict(X_test_prepared)
final_mse = mean_squared_error(y_test, final_predictions)
final_rmse = np.sqrt(final_mse)
final_mse
final_rmse
###Output
_____no_output_____
###Markdown
Conclusão * O RMSE final foi melhor do que quando medido com validação, sendo feitos poucos ajustes de hiperparâmetros. A tabela abaixo mostra a diferença.
###Code
resultados = {'Modelo': ['RMSE Final','RMSE Cross-Validation'],
'Resultado': [final_rmse, forest_rmse_scores.mean()]
}
table = pd.DataFrame(resultados, columns=["Modelo","Resultado"])
table
###Output
_____no_output_____
###Markdown
* O objetivo proposto era determinar qual o melhor modelo de regressão para este problema. O Random Forest Regressor final foi o modelo que apresentou os melhores resultados, em relação à Regressão Linear e Árvores de Decisão, com o menor RMSE entre todos.
###Code
plt.figure(figsize=(10,5))
modelos = ['Random Forest Regressor','Árvores de Decisão','Regressão Linear']
rmse_results = [final_rmse,tree_rmse_scores.mean(),lin_rmse_scores.mean()]
plt.bar(modelos,rmse_results)
plt.show()
###Output
_____no_output_____ |
references/video_classification/receptive fields.ipynb | ###Markdown
Receptive field calculation
###Code
import torch
import torch.nn as nn
from torch.autograd import Variable
from collections import OrderedDict
import numpy as np
def check_same(stride):
if isinstance(stride, (list, tuple)):
assert len(stride) == 2 and stride[0] == stride[1]
stride = stride[0]
return stride
def receptive_field(model, input_size, batch_size=-1, device="cuda"):
'''
:parameter
'input_size': tuple of (Channel, Height, Width)
:return OrderedDict of `Layername`->OrderedDict of receptive field stats {'j':,'r':,'start':,'conv_stage':,'output_shape':,}
'j' for "jump" denotes how many pixels do the receptive fields of spatially neighboring units in the feature tensor
do not overlap in one direction.
i.e. shift one unit in this feature map == how many pixels shift in the input image in one direction.
'r' for "receptive_field" is the spatial range of the receptive field in one direction.
'start' denotes the center of the receptive field for the first unit (start) in on direction of the feature tensor.
Convention is to use half a pixel as the center for a range. center for `slice(0,5)` is 2.5.
'''
def register_hook(module):
def hook(module, input, output):
class_name = str(module.__class__).split(".")[-1].split("'")[0]
module_idx = len(receptive_field)
m_key = "%i" % module_idx
p_key = "%i" % (module_idx - 1)
receptive_field[m_key] = OrderedDict()
if not receptive_field["0"]["conv_stage"]:
print("Enter in deconv_stage")
receptive_field[m_key]["j"] = 0
receptive_field[m_key]["r"] = 0
receptive_field[m_key]["start"] = 0
else:
p_j = receptive_field[p_key]["j"]
p_r = receptive_field[p_key]["r"]
p_start = receptive_field[p_key]["start"]
if class_name == "Conv2d" or class_name == "MaxPool2d":
kernel_size = module.kernel_size
stride = module.stride
padding = module.padding
kernel_size, stride, padding = map(check_same, [kernel_size, stride, padding])
receptive_field[m_key]["j"] = p_j * stride
receptive_field[m_key]["r"] = p_r + (kernel_size - 1) * p_j
receptive_field[m_key]["start"] = p_start + ((kernel_size - 1) / 2 - padding) * p_j
elif class_name == "BatchNorm2d" or class_name == "ReLU" or class_name == "Bottleneck":
receptive_field[m_key]["j"] = p_j
receptive_field[m_key]["r"] = p_r
receptive_field[m_key]["start"] = p_start
elif class_name == "ConvTranspose2d":
receptive_field["0"]["conv_stage"] = False
receptive_field[m_key]["j"] = 0
receptive_field[m_key]["r"] = 0
receptive_field[m_key]["start"] = 0
else:
print('skip module', class_name)
# raise ValueError("module %s not ok" % class_name)
pass
receptive_field[m_key]["input_shape"] = list(input[0].size()) # only one
receptive_field[m_key]["input_shape"][0] = batch_size
if isinstance(output, (list, tuple)):
# list/tuple
receptive_field[m_key]["output_shape"] = [
[-1] + list(o.size())[1:] for o in output
]
else:
# tensor
receptive_field[m_key]["output_shape"] = list(output.size())
receptive_field[m_key]["output_shape"][0] = batch_size
if (
not isinstance(module, nn.Sequential)
and not isinstance(module, nn.ModuleList)
and not (module == model)
):
hooks.append(module.register_forward_hook(hook))
device = device.lower()
assert device in [
"cuda",
"cpu",
], "Input device is not valid, please specify 'cuda' or 'cpu'"
if device == "cuda" and torch.cuda.is_available():
dtype = torch.cuda.FloatTensor
else:
dtype = torch.FloatTensor
# check if there are multiple inputs to the network
if isinstance(input_size[0], (list, tuple)):
x = [Variable(torch.rand(2, *in_size)).type(dtype) for in_size in input_size]
else:
x = Variable(torch.rand(2, *input_size)).type(dtype)
# create properties
receptive_field = OrderedDict()
receptive_field["0"] = OrderedDict()
receptive_field["0"]["j"] = 1.0
receptive_field["0"]["r"] = 1.0
receptive_field["0"]["start"] = 0.5
receptive_field["0"]["conv_stage"] = True
receptive_field["0"]["output_shape"] = list(x.size())
receptive_field["0"]["output_shape"][0] = batch_size
hooks = []
# register hook
model.apply(register_hook)
# make a forward pass
model(x)
# remove these hooks
for h in hooks:
h.remove()
print("------------------------------------------------------------------------------")
line_new = "{:>20} {:>10} {:>10} {:>10} {:>15} ".format("Layer (type)", "map size", "start", "jump", "receptive_field")
print(line_new)
print("==============================================================================")
total_params = 0
total_output = 0
trainable_params = 0
for layer in receptive_field:
# input_shape, output_shape, trainable, nb_params
assert "start" in receptive_field[layer], layer
assert len(receptive_field[layer]["output_shape"]) == 4
line_new = "{:7} {:12} {:>10} {:>10} {:>10} {:>15} ".format(
"",
layer,
str(receptive_field[layer]["output_shape"][2:]),
str(receptive_field[layer]["start"]),
str(receptive_field[layer]["j"]),
format(str(receptive_field[layer]["r"]))
)
print(line_new)
print("==============================================================================")
# add input_shape
receptive_field["input_size"] = input_size
return receptive_field
def receptive_field_for_unit(receptive_field_dict, layer, unit_position):
"""Utility function to calculate the receptive field for a specific unit in a layer
using the dictionary calculated above
:parameter
'layer': layer name, should be a key in the result dictionary
'unit_position': spatial coordinate of the unit (H, W)
```
alexnet = models.alexnet()
model = alexnet.features.to('cuda')
receptive_field_dict = receptive_field(model, (3, 224, 224))
receptive_field_for_unit(receptive_field_dict, "8", (6,6))
```
Out: [(62.0, 161.0), (62.0, 161.0)]
"""
input_shape = receptive_field_dict["input_size"]
if layer in receptive_field_dict:
rf_stats = receptive_field_dict[layer]
assert len(unit_position) == 2
feat_map_lim = rf_stats['output_shape'][2:]
if np.any([unit_position[idx] < 0 or
unit_position[idx] >= feat_map_lim[idx]
for idx in range(2)]):
raise Exception("Unit position outside spatial extent of the feature tensor ((H, W) = (%d, %d)) " % tuple(feat_map_lim))
# X, Y = tuple(unit_position)
rf_range = [(rf_stats['start'] + idx * rf_stats['j'] - rf_stats['r'] / 2,
rf_stats['start'] + idx * rf_stats['j'] + rf_stats['r'] / 2) for idx in unit_position]
if len(input_shape) == 2:
limit = input_shape
else: # input shape is (channel, H, W)
limit = input_shape[1:3]
rf_range = [(max(0, rf_range[axis][0]), min(limit[axis], rf_range[axis][1])) for axis in range(2)]
print("Receptive field size for layer %s, unit_position %s, is \n %s" % (layer, unit_position, rf_range))
return rf_range
else:
raise KeyError("Layer name incorrect, or not included in the model.")
###Output
_____no_output_____
###Markdown
Comment out downsampling logic of resnet (it doesn't affect receptive field) (Also comment out last linear layer and average pool)
###Code
import torch
import torch.nn as nn
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
# self.downsample = downsample
self.stride = stride
def forward(self, x):
# identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
# if self.downsample is not None:
# identity = self.downsample(x)
# out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])#, stride=2)
self.layer2 = self._make_layer(block, 128, layers[1], stride=1,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=1,
dilate=replace_stride_with_dilation[1])
# self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
# dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
print(x.shape)
x = self.layer1(x)
print(x.shape)
x = self.layer2(x)
print(x.shape)
x = self.layer3(x)
print(x.shape)
# x = self.layer4(x)
# x = self.avgpool(x)
# x = torch.flatten(x, 1)
# print(x.shape)
# x = self.fc(x)
return x
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained=False, progress=True, **kwargs):
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
# model = resnet18()
model = _resnet('resnet18', BasicBlock, [1, 1, 1, 0], False, False)
# print(list(model.children())[1])
# print(model)
def walk(m):
out = []
# print(m.)
for mm in m.children():
# print(k)
# if 'downsample' in k:
# pass
if len(list(mm.children())) == 0:
out += [mm]
else:
out += walk(mm)
return out
modules = walk(model)
# for m in modules:
# print(m)
# print(model)
model2 = nn.Sequential(*modules[:-2])
dummy = torch.zeros(1, 3, 256, 256)
print(model(dummy).shape)
# print(model2(dummy).shape)
receptive_field_dict = receptive_field(model2.cuda(), (3, 256, 256))
receptive_field_for_unit(receptive_field_dict, "2", (1, 1))
import torch.nn as nn
from receptivefield.pytorch import PytorchReceptiveField
from receptivefield.image import get_default_image
# model
# define model functions
def model_fn() -> nn.Module:
model = resnet18()
model.eval()
return model
input_shape = [96, 96, 3]
rf = PytorchReceptiveField(model_fn)
rf_params = rf.compute(input_shape = input_shape)
# plot receptive fields
rf.plot_rf_grids(
custom_image=get_default_image(input_shape, name='cat'),
figsize=(20, 12),
layout=(1, 2))
###Output
_____no_output_____ |
csc528-computer-vision/CSC528_A3.ipynb | ###Markdown
CSC528 Assignment 3Alex Teboul Helpful Resources:* [OpenCV](https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_histograms/py_histogram_begins/py_histogram_begins.html) Problem 1: Expectation MaximizationThis problem comes (approximately) from Chapter 17 of the Forsyth book: create a Gaussian mixture model using expectation maximization to segment an image. You are allowed to manually specify how many Gaussians you will have in the final result. (Easiest case to test might be 2: foreground and background; you might want to experiment with larger numbers to reflect more objects in the image.) You need only do this for single parameter images (gray-scale), although you can use color if you wish (harder). Do not use existing packages. Think of this as fitting a Gaussian mixture model to the image histogram: we don’t care about where the pixel is (although we could); we only care about intensities and their probabilities.You might also look at the Wikipedia article on mixture modeling (https://en.wikipedia.org/wiki/Mixture_modelGaussian_mixture_model). Brilliant.org also had a nice read on Gaussian mixture modelling (https://brilliant.org/wiki/gaussian-mixture-model/)Try your algorithm on an image of your choice. Provide me the original image and an image with pixels labeled by Gaussian model to which they belonged. (You can use color or grayscale to do the labelling.)Put all your work into a single file: all images and program code. Submit using the dropbox in D2L.
###Code
from google.colab import drive
drive.mount('/content/gdrive')
###Output
Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&response_type=code&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly
Enter your authorization code:
··········
Mounted at /content/gdrive
###Markdown
Get the Image(s)
###Code
import cv2
import matplotlib.pyplot as plt
%matplotlib inline
# read in my image
img = cv2.imread("/content/gdrive/My Drive/A3-images/appleorange.jpg")
#Bring it different images to test ------------------
#apple-orange example
rgb_ao = cv2.cvtColor(cv2.imread("/content/gdrive/My Drive/A3-images/appleorange.jpg"),cv2.COLOR_BGR2RGB)
gray_ao = cv2.imread("/content/gdrive/My Drive/A3-images/appleorange.jpg",cv2.IMREAD_GRAYSCALE)
#astro example
rgb_as = cv2.cvtColor(cv2.imread("/content/gdrive/My Drive/A3-images/astro.jpg"),cv2.COLOR_BGR2RGB)
gray_as = cv2.imread("/content/gdrive/My Drive/A3-images/astro.jpg",cv2.IMREAD_GRAYSCALE)
#castle example
rgb_ca = cv2.cvtColor(cv2.imread("/content/gdrive/My Drive/A3-images/castle.jpg"),cv2.COLOR_BGR2RGB)
gray_ca = cv2.imread("/content/gdrive/My Drive/A3-images/castle.jpg",cv2.IMREAD_GRAYSCALE)
#astro2 example
rgb_p = cv2.cvtColor(cv2.imread("/content/gdrive/My Drive/A3-images/person.jpg"),cv2.COLOR_BGR2RGB)
gray_p = cv2.imread("/content/gdrive/My Drive/A3-images/person.jpg",cv2.IMREAD_GRAYSCALE)
# Resize the images in case necessary
gray_ao2 = cv2.resize(gray_ao, (256,256), interpolation = cv2.INTER_CUBIC)
gray_as2 = cv2.resize(gray_as, (256,256), interpolation = cv2.INTER_CUBIC)
gray_ca2 = cv2.resize(gray_ca, (256,256), interpolation = cv2.INTER_CUBIC)
gray_p2 = cv2.resize(gray_p, (256,256), interpolation = cv2.INTER_CUBIC)
###Output
_____no_output_____
###Markdown
Display Image
###Code
#Display image
plt.title("Original Image (color)")
plt.imshow(rgb_ca)
plt.title("Original Image (gray-scale)")
plt.imshow(gray_ca, cmap="gray")
#The castle image has 2 gaussians on the other hand
plt.title("castle grayscale histogram")
plt.hist(gray_ca.ravel(), bins=40)
###Output
_____no_output_____
###Markdown
Start EM and Gaussian Mixture Modeling
###Code
import numpy as np
from scipy import ndimage
import matplotlib.pyplot as plt
from sklearn.mixture import GaussianMixture
#select image
img = gray_ca2
hist, bin_edges = np.histogram(img, bins=40)
bin_centers = 0.5*(bin_edges[:-1] + bin_edges[1:])
#classify model points with gaussian mixture model of 2 components
model = GaussianMixture(n_components=2)
model.fit(img.reshape((img.size, 1)))
# Evaluate GMM
gmm_x = np.linspace(0,253,256)
gmm_y = np.exp(model.score_samples(gmm_x.reshape(-1,1)))
#threshold - since there are only 2 components, pixel intensities below the threshold are dropped
threshold = np.mean(model.means_)
GMM_selected_img = img < threshold
#plot original gray-scale image----
plt.figure(figsize=(11,4))
plt.subplot(131)
plt.title("Original Image (gray-scale)")
plt.imshow(gray_ca2, cmap="gray")
plt.axis('off')
#plot gaussian model
plt.subplot(132)
plt.title("GMM w/ gaussian \n means threshold line")
plt.plot(gmm_x, gmm_y, color="crimson", lw=2)
#this is the threshold line. Such that
plt.axvline(169, color='r', ls='--', lw=2)
#plot image by gaussian model values
plt.yticks([])
plt.subplot(133)
#plt.imshow(GMM_selected_img, cmap='Blues')
plt.title("segmented w/EM in GMM")
plt.imshow(GMM_selected_img, cmap='Blues', interpolation='nearest') #This makes it binary
plt.axis('off')
plt.subplots_adjust(wspace=0.02, hspace=0.3, top=1, bottom=0.1, left=0, right=1)
plt.show()
# Plot histograms and gaussian curves
fig, ax = plt.subplots()
plt.title("GMM Image Histogram Pixel Intensities")
ax.plot(gmm_x, gmm_y, color="crimson", lw=2)
plt.title("bin centers image histogram comparison")
plt.plot(bin_centers, hist, lw=2)
#So I cut out pixels above the threshold because those belonged to the sky. This way land is selected based on the first gaussian.
# I assumed the mean of the gaussian means is the point of difference between the two gaussians.
threshold
###Output
_____no_output_____ |
scripts/LogReg.ipynb | ###Markdown
Logistic Regression Section
###Code
train = pd.read_csv('train_data.csv')
test = pd.read_csv('test_data.csv')
X_train = train.drop(['High Income'], axis=1)
y_train = train['High Income']
X_test = test.drop(['High Income'], axis=1)
y_test = test['High Income']
###Output
_____no_output_____
###Markdown
Logistic Regression Coefficients
###Code
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
coef = logreg.coef_
print("Coefficients: " + str(logreg.coef_))
print("Intercept" + str(logreg.intercept_))
###Output
Coefficients: [[0.25213684 0.52613559]]
Intercept[-16.3964274]
###Markdown
Get Data Points for Desmos graphs
###Code
train['linreg'] = train['Age']*0.25213684 + train['Years of Education']*0.52613559 - 16.3964274
train['logreg'] = 1 / (1 + np.exp(-train['linreg']))
mask_tr = train.applymap(type) != bool
d = {True: 1, False: 0}
train = train.where(mask_tr, train.replace(d))
def merge(list1, list2):
merged_list = [(list1[i], list2[i]) for i in range(0, len(list1))]
return merged_list
train_Ytuple = list(train['High Income'])
train_Xtuple = list(train['linreg'])
train_tuples = merge(train_Xtuple, train_Ytuple)
test['linreg'] = test['Age']*0.25213684 + test['Years of Education']*0.52613559 - 16.3964274
test['logreg'] = 1 / (1 + np.exp(-test['linreg']))
mask_te = test.applymap(type) != bool
test = test.where(mask_te, test.replace(d))
test_Ytuple = list(test['High Income'])
test_Xtuple = list(test['linreg'])
test_tuples = merge(test_Xtuple, test_Ytuple)
###Output
_____no_output_____
###Markdown
Model Performance Section
###Code
adult = pd.read_csv("adult.csv")
print(adult.shape)
mask = adult.applymap(type) == bool
d = {">50K": 1, "<=50K": 0}
adult = adult.where(mask, adult.replace(d))
data = adult[['age', 'education.num', 'income']]
data['income'] = data["income"].astype(str).astype(int)
X = data.drop(['income'], axis=1)
y = data['income']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 42)
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
print("Train accuracy:" + str(logreg.score(X_train, y_train)))
print("Test accuracy:" + str(logreg.score(X_test, y_test)))
logreg = LogisticRegression()
cross_val_score(logreg, X_train, y_train, cv=5)
###Output
_____no_output_____ |
stock_model_using_template.ipynb | ###Markdown
Import the package.
###Code
import stock_model
###Output
_____no_output_____
###Markdown
Apply stock_model class to a variable.
###Code
template = stock_model.stock_model()
###Output
_____no_output_____
###Markdown
Report parameters and get explaination.
###Code
template.para()
template.para_explain()
template.para_explain(['ticker','lag'])
###Output
ticker: ticker of the asset, default SPY. string
lag: time lag to consider for the model, default 50. int
###Markdown
Change parameters.
###Code
template.ticker = 'aapl'
template.para_change({'ticker':'aapl','task_type':'reg','target':'return','normalization':False})
template.para()
###Output
ticker: aapl
start: 1990-01-01
end: 2019-11-15
task_type: reg
target: return
model_type: sk
model_name: rf
test_size: 0.05
lag: 50
ta: False
normalization: False
drift_include: False
commission: 0.0
###Markdown
Getting and processing the data.
###Code
template.target = 'price'
template.data_prepare()
###Output
[*********************100%***********************] 1 of 1 completed
###Markdown
Descriptive analysis of the data.
###Code
template.plot_all(lag = 2000, ran = 1)
template.analyze_raw()
###Output
_____no_output_____
###Markdown
Build the model.
###Code
template.model_build_sk(model_para='n_jobs = -1')
###Output
_____no_output_____
###Markdown
Train the model.
###Code
template.model_train_sk()
###Output
c:\users\zfan2\anaconda3\envs\tf_gpu\lib\site-packages\sklearn\ensemble\forest.py:245: FutureWarning: The default value of n_estimators will change from 10 in version 0.20 to 100 in 0.22.
"10 in version 0.20 to 100 in 0.22.", FutureWarning)
###Markdown
Evaluate the model.
###Code
template.score_analyze()
###Output
_____no_output_____
###Markdown
Plot the model prediction.
###Code
template.score_plot_prediction()
template.score_plot_return()
###Output
_____no_output_____
###Markdown
Quick Version
###Code
import stock_model
template = stock_model.stock_model()
template.para_change({'ticker':'dia','task_type':'classification','target':'return','model_name':'rnn','model_type':'tf'})
template.para()
template.data_prepare()
template.plot_all()
template.model_build_tf(number_layer = 2, width = 50)
template.model_train_tf(epoch = 10, batch_size = 64)
template.score_analyze()
###Output
WARNING:tensorflow:From c:\users\zfan2\anaconda3\envs\tf_gpu\lib\site-packages\tensorflow\python\ops\init_ops.py:1251: calling VarianceScaling.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version.
Instructions for updating:
Call initializer instance with the dtype argument instead of passing it to the constructor
WARNING:tensorflow:From c:\users\zfan2\anaconda3\envs\tf_gpu\lib\site-packages\tensorflow\python\ops\math_grad.py:1250: add_dispatch_support.<locals>.wrapper (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.where in 2.0, which has the same broadcast rule as np.where
Epoch 1/10
5168/5168 [==============================] - 15s 3ms/sample - loss: 0.7158 - acc: 0.5064
Epoch 2/10
5168/5168 [==============================] - 14s 3ms/sample - loss: 0.6999 - acc: 0.51241s - loss: 0.6986 -
Epoch 3/10
5168/5168 [==============================] - 14s 3ms/sample - loss: 0.7002 - acc: 0.5155
Epoch 4/10
5168/5168 [==============================] - 14s 3ms/sample - loss: 0.6985 - acc: 0.50432s - loss: 0.6990 -
Epoch 5/10
5168/5168 [==============================] - 14s 3ms/sample - loss: 0.6955 - acc: 0.51929s - loss: 0.6 -
Epoch 6/10
5168/5168 [==============================] - 14s 3ms/sample - loss: 0.6958 - acc: 0.5037
Epoch 7/10
5168/5168 [==============================] - 14s 3ms/sample - loss: 0.6956 - acc: 0.5130
Epoch 8/10
5168/5168 [==============================] - 14s 3ms/sample - loss: 0.6931 - acc: 0.5213
Epoch 9/10
5168/5168 [==============================] - 14s 3ms/sample - loss: 0.6934 - acc: 0.5193
Epoch 10/10
5168/5168 [==============================] - 14s 3ms/sample - loss: 0.6944 - acc: 0.51453s - loss
275/275 [==============================] - 1s 3ms/sample - loss: 0.6898 - acc: 0.5564
|
pydata/07_summary.ipynb | ###Markdown
At the end of the day What can you do now with what you learned today? How i started this work p.s. i like to take notes on a *mac* with **letterspace** - You may use the same notebook server - on your laptop - on a server (requires admin to launch the docker image) - You can add missing libraries - via `pip` or `conda` bash comands inside the notebook - You may use github to save your progress and collaborate with others * you may start your work forking Cineca's lectures note: we are working on a Python 3 notebook docker image The final hint Ipython kernels can be builded for other languages! https://github.com/ipython/ipython/wiki/IPython-kernels-for-other-languages For R toohttps://github.com/IRkernel/IRkernel note: we are working on a Rnotebook image to push in the cineca docker hub How to start your next notebook
###Code
# Data packages
import numpy as np
import scipy as sp
import pandas as pd
# Plot packages
%matplotlib inline
import matplotlib as plt
import seaborn as sns
###Output
_____no_output_____ |
Scatter Plots.ipynb | ###Markdown
Now we will see how the color and sizes can actually be on a per mark basis rather than applying them to all of the marks , so hav ing the ability to get multiple color and sizes actually allow us to add additional data set into our plot , now in our current data plot we wanna add some extra information for example assume our current data is survey data of bunch of people and we wanna breakdown the data to something more specific , so assume we made these people to write something from 1-10 and we somehow want to plot their rating as well , to do so we can simply assign different numbers to the different possiblities and then those will give you different colors on your scatter plot as long as you pass that in your method
###Code
# So the numbers in the colors list these are no. bw 1-10 , so each of these values will corresponds to our
# data point in our x and y data sets so now if we pass this in our scatter method as the color arguement
#
colors = [7, 5, 9, 7, 5, 7, 2, 5, 3, 7, 1, 2, 8, 1, 9, 2, 5, 6, 7, 5]
# This is some x, y data between 1-10 to plot
x = [5, 7, 8, 5, 6, 7, 9, 2, 3, 4, 4, 4, 2, 6, 3, 6, 8, 6, 4, 1]
y = [7, 4, 3, 9, 1, 3, 2, 5, 2, 4, 8, 7, 1, 6, 4, 9, 7, 7, 9, 1]
# Now we can also change the color of these by using builtin color maps jus like marker symbols
plt.scatter(x, y, s = 100, c = colors, cmap = 'Greens', edgecolor = 'black', linewidth = 1, alpha = 0.75)
# We will get different shades of green as per intensity
plt.tight_layout()
plt.show()
# So you must wanna add the cmap labels so people would know what the color is meant for, to do that we can
# add a colorbar legend
# This is some x, y data between 1-10 to plot
x = [5, 7, 8, 5, 6, 7, 9, 2, 3, 4, 4, 4, 2, 6, 3, 6, 8, 6, 4, 1]
y = [7, 4, 3, 9, 1, 3, 2, 5, 2, 4, 8, 7, 1, 6, 4, 9, 7, 7, 9, 1]
# Now we can also change the color of these by using builtin color maps jus like marker symbols
plt.scatter(x, y, s = 100, c = colors, cmap = 'Greens', edgecolor = 'black', linewidth = 1, alpha = 0.75)
cbar = plt.colorbar()
cbar.set_label('Satisfaction')
plt.tight_layout()
plt.show()
# We can also change the size of pur data points as well, so just like color it will add some more ways to
# explain our data so to do this we will define a sizes list
sizes = [209, 486, 381, 255, 191, 315, 185, 228, 174,
538, 239, 394, 399, 153, 273, 293, 436, 501, 397, 539]
# This is the list that corresponds to the x and y data set.
# This is some x, y data between 1-10 to plot
x = [5, 7, 8, 5, 6, 7, 9, 2, 3, 4, 4, 4, 2, 6, 3, 6, 8, 6, 4, 1]
y = [7, 4, 3, 9, 1, 3, 2, 5, 2, 4, 8, 7, 1, 6, 4, 9, 7, 7, 9, 1]
# Now we can also change the color of these by using builtin color maps jus like marker symbols
plt.scatter(x, y, s = sizes,
c = colors, cmap = 'Greens', edgecolor = 'black', linewidth = 1, alpha = 0.75)
cbar = plt.colorbar()
cbar.set_label('Satisfaction')
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Plotting Real Worl Data Using CSV file Which is some day's you trending page data
###Code
data = pd.read_csv('data/ScatterPlotData.csv')
# Scatter plot of their total views and total likes and ratio of likes/dislikes as well
view_count = data['view_count']
likes = data['likes']
ratio = data['ratio']
plt.scatter(view_count, likes, edgecolor = 'black', linewidth = 1, alpha = 0.75)
plt.title('Trending Youtube Videos')
plt.xlabel('View Count')
plt.ylabel('Total likes')
cbar = plt.colorbar()
plt.tight_layout()
plt.show()
# We can also use log scale with scatter plot.
view_count = data['view_count']
likes = data['likes']
ratio = data['ratio']
plt.scatter(view_count, likes, edgecolor = 'black', linewidth = 1, alpha = 0.75)
plt.xscale('log')
plt.yscale('log')
plt.title('Trending Youtube Videos')
plt.xlabel('View Count')
plt.ylabel('Total likes')
cbar = plt.colorbar()
plt.tight_layout()
plt.show()
# Lets also use the ratio it would be good metric for colors of our points we can also use that for sizes
view_count = data['view_count']
likes = data['likes']
ratio = data['ratio']
plt.scatter(view_count, likes,c = ratio, cmap = 'summer', edgecolor = 'black', linewidth = 1, alpha = 0.75)
plt.xscale('log')
plt.yscale('log')
plt.title('Trending Youtube Videos')
plt.xlabel('View Count')
plt.ylabel('Total likes')
cbar = plt.colorbar()
cbar.set_label('Like Dislike Ratio')
plt.tight_layout()
plt.show()
###Output
_____no_output_____ |
Notebooks/.ipynb_checkpoints/Query_GNI_via_Solr-checkpoint.ipynb | ###Markdown
Query GNI via SolrJenna Jordan22 January 2020 - 12 February 2020
###Code
import pandas as pd
from query_solr_functions import query_solr, to_daily_timeseries, run_all_queries, filter_results
###Output
_____no_output_____
###Markdown
Prep for querying BulkLexisNexis corpus- specify fields to query- create the base time-series, which the to_daily_timeseries & run_all_queries functions require- key for converting publishers to acronyms
###Code
ts_fields = ['aid', 'publication_date', 'publisher']
AP_daterange = pd.date_range(start='1977-01-01', end='2019-08-18')
SWB_daterange = pd.date_range(start='1979-01-01', end='2019-08-18')
AFP_daterange = pd.date_range(start='1991-05-05', end='2019-08-18')
XGNS_daterange = pd.date_range(start='1977-01-01', end='2019-08-18')
NYT_daterange = pd.date_range(start='1980-06-01', end='2019-08-18')
WP_daterange = pd.date_range(start='1977-01-01', end='2019-08-18')
UPI_daterange = pd.date_range(start='1980-09-26', end='2019-08-16')
DPA_daterange = pd.date_range(start='1994-07-03', end='2019-08-18')
IPS_daterange = pd.date_range(start='2010-01-13', end='2019-07-17')
publishers = [{'name': 'BBC Monitoring: International Reports', 'abbr': 'SWB', 'dates': SWB_daterange},
{'name': 'The New York Times', 'abbr': 'NYT', 'dates': NYT_daterange},
{'name': 'The Washington Post', 'abbr': 'WP', 'dates': WP_daterange},
{'name': 'The Associated Press', 'abbr': 'AP', 'dates': AP_daterange},
{'name': 'Agence France Presse - English', 'abbr': 'AFP', 'dates': AFP_daterange},
{'name': 'Xinhua General News Service', 'abbr': 'XGNS', 'dates': XGNS_daterange},
{'name': 'UPI (United Press International)', 'abbr': 'UPI', 'dates': UPI_daterange},
{'name': 'dpa international (Englischer Dienst)', 'abbr': 'DPA', 'dates': DPA_daterange},
{'name': 'Inter Press Service', 'abbr': 'IPS', 'dates': IPS_daterange}]
all_dfs = []
for pub in publishers:
df = pd.DataFrame(pub['dates'], columns=['publication_date'])
df['publisher'] = pub['name']
df['publisher'] = df['publisher'].astype('category')
all_dfs.append(df)
base_ts = pd.concat(all_dfs)
pubmap = {}
for pub in publishers:
pubmap[pub['name']]= pub['abbr']
###Output
_____no_output_____
###Markdown
Query to get total article counts within BLNNote: this is seperate so that it doesn't have to be re-run each time a new query is added (because this one takes the longest)
###Code
biggest_query = [
{'name': 'BLN_total',
'query':
"""
(content:*) AND source_name:BulkLexisNexis
"""}
]
bln_df = run_all_queries(biggest_query, ts_fields, base_ts)
bln_df = bln_df.reset_index()
bln_df.publisher = bln_df.publisher.map(pubmap)
bln_df.to_csv("../Data/bln_daily_total.csv", index=False)
###Output
_____no_output_____
###Markdown
All Queries
###Code
all_queries = [
{'name': 'insect_population',
'query':
"""
(content: (insect OR pollinator OR bee OR honeybee OR moth) AND ("insect population"~5 OR "pollinator population"~5 OR "bee population"~5 OR "honeybee population"~5 OR "moth population"~5 OR "biological diversity" OR biodiversity OR biomass OR ecolog* OR ecosystem* OR entomolog*) AND (study OR professor OR experiment OR research OR analysis OR data)) AND (source_name:BulkLexisNexis)
"""},
{'name': 'insect_decline',
'query':
"""
(content: (insect OR pollinator OR bee OR honeybee OR moth) AND ("insect population"~5 OR "pollinator population"~5 OR "bee population"~5 OR "honeybee population"~5 OR "moth population"~5 OR "biological diversity" OR biodiversity OR biomass OR ecolog* OR ecosystem* OR entomolog*) AND (study OR professor OR experiment OR research OR analysis OR data) AND (crisis OR "colony collapse" OR apocalypse OR armageddon OR extinct OR "insect decline"~5 OR "insect drop"~5 OR "insect decrease"~5 OR "insect disappear"~5 OR "population decline"~5 OR "population drop"~5 OR "population decrease"~5 OR "population disappear"~5 OR "abundance decline"~5 OR "abundance drop"~5 OR "abundance decrease"~5 OR "abundance disappear"~5)) AND (source_name:BulkLexisNexis)
"""},
{'name': 'pollinator_population',
'query':
"""
(content: ((insect AND pollinator) OR (bee OR honeybee OR moth)) AND ("insect population"~5 OR "pollinator population"~5 OR "bee population"~5 OR "honeybee population"~5 OR "moth population"~5 OR "biological diversity" OR biodiversity OR biomass OR ecolog* OR ecosystem* OR entomolog*) AND (study OR professor OR experiment OR research OR analysis OR data)) AND (source_name:BulkLexisNexis)
"""},
{'name': 'pollinator_decline',
'query':
"""
(content: ((insect AND pollinator) OR (bee OR honeybee OR moth)) AND ("insect population"~5 OR "pollinator population"~5 OR "bee population"~5 OR "honeybee population"~5 OR "moth population"~5 OR "biological diversity" OR biodiversity OR biomass OR ecolog* OR ecosystem* OR entomolog*) AND (study OR professor OR experiment OR research OR analysis OR data) AND (crisis OR "colony collapse" OR apocalypse OR armageddon OR extinct OR "insect decline"~5 OR "insect drop"~5 OR "insect decrease"~5 OR "insect disappear"~5 OR "population decline"~5 OR "population drop"~5 OR "population decrease"~5 OR "population disappear"~5 OR "abundance decline"~5 OR "abundance drop"~5 OR "abundance decrease"~5 OR "abundance disappear"~5)) AND (source_name:BulkLexisNexis)
"""},
{'name': 'insect_apocalypse',
'query':
"""
(content:"insect apocalypse"~5 OR "insect armageddon"~5 OR "beepocalypse") AND source_name:BulkLexisNexis
"""},
{'name': 'colony_collapse',
'query':
"""
(content:"colony collapse" AND (bee OR honeybee)) AND source_name:BulkLexisNexis
"""},
{'name': 'climate_change',
'query':
"""
(content:"climate change" OR "global warming") AND source_name:BulkLexisNexis
"""},
{'name': 'climate_change_IPCCreport',
'query':
"""
(content:("climate change" OR "global warming") AND ("IPCC" OR "Intergovernmental Panel on Climate Change") AND report) AND source_name:BulkLexisNexis
"""},
{'name': 'insect_population_studies',
'query':
"""
(content: ("Krefeld" OR "the German study" OR "Hans de Kroon" OR "Martin Sorg" OR "Werner Stenmans" OR "Dave Goulson" OR "Brad Lister" OR "Andres Garcia" OR "the Puerto Rico study" OR "S?nchez-Bayo" OR "Wyckhuys" OR "Rob Dunn" OR "David Wagner" OR "Chris Thomas" OR "Anders Tottrup" OR "Kevin Gaston" OR "Chris Thomas" OR "Roel van Klink" OR "Arthur Shapiro" OR "Aletta Bonn" OR "E.O. Wilson") AND (insect OR pollinator OR bee OR honeybee OR moth) AND ("insect population"~5 OR "pollinator population"~5 OR "bee population"~5 OR "honeybee population"~5 OR "moth population"~5 OR "biological diversity" OR biodiversity OR biomass OR ecolog* OR ecosystem* OR entomolog*) AND (study OR professor OR experiment OR research OR analysis OR data)) AND (source_name:BulkLexisNexis)
"""}
]
###Output
_____no_output_____
###Markdown
note: this list of query dictionaries is meant to be used with the filter_results function (and when prune=True for run_all_queries).After running a statistical analysis to compare pruned vs un-pruned data, we decided to not prune the data because there was no effect on the final analysis, and explaining the pruning process would be too complicated.
###Code
all_queries_pruned = [
{'name': 'insect_population_pruned',
'query':
"""
(content: (insect OR pollinator OR bee OR honeybee OR moth)^5 AND ("insect population"~5 OR "pollinator population"~5 OR "bee population"~5 OR "honeybee population"~5 OR "moth population"~5 OR "biological diversity" OR biodiversity OR biomass OR ecolog* OR ecosystem* OR entomolog*)^5 AND (study OR professor OR experiment OR research OR analysis OR data)) AND (source_name:BulkLexisNexis)^0.00001
""",
'add_fields': ['score'],
'filter_method': ('score', 35.73716)},
{'name': 'insect_decline_pruned',
'query':
"""
(content: (insect OR pollinator OR bee OR honeybee OR moth)^5 AND ("insect population"~5 OR "pollinator population"~5 OR "bee population"~5 OR "honeybee population"~5 OR "moth population"~5 OR "biological diversity" OR biodiversity OR biomass OR ecolog* OR ecosystem* OR entomolog*)^5 AND (study OR professor OR experiment OR research OR analysis OR data) AND (crisis OR "colony collapse" OR apocalypse OR armageddon OR extinct OR "insect decline"~5 OR "insect drop"~5 OR "insect decrease"~5 OR "insect disappear"~5 OR "population decline"~5 OR "population drop"~5 OR "population decrease"~5 OR "population disappear"~5 OR "abundance decline"~5 OR "abundance drop"~5 OR "abundance decrease"~5 OR "abundance disappear"~5)) AND (source_name:BulkLexisNexis)^0.00001
""",
'add_fields': ['score'],
'filter_method': ('score', 34.896996)},
{'name': 'pollinator_population_pruned',
'query':
"""
(content: ((insect AND pollinator) OR (bee OR honeybee OR moth))^5 AND ("insect population"~5 OR "pollinator population"~5 OR "bee population"~5 OR "honeybee population"~5 OR "moth population"~5 OR "biological diversity" OR biodiversity OR biomass OR ecolog* OR ecosystem* OR entomolog*)^5 AND (study OR professor OR experiment OR research OR analysis OR data)) AND (source_name:BulkLexisNexis)^0.00001
""",
'add_fields': ['score'],
'filter_method': ('score', 32.83181)},
{'name': 'pollinator_decline_pruned',
'query':
"""
(content: ((insect AND pollinator) OR (bee OR honeybee OR moth))^5 AND ("insect population"~5 OR "pollinator population"~5 OR "bee population"~5 OR "honeybee population"~5 OR "moth population"~5 OR "biological diversity" OR biodiversity OR biomass OR ecolog* OR ecosystem* OR entomolog*)^5 AND (study OR professor OR experiment OR research OR analysis OR data) AND (crisis OR "colony collapse" OR apocalypse OR armageddon OR extinct OR "insect decline"~5 OR "insect drop"~5 OR "insect decrease"~5 OR "insect disappear"~5 OR "population decline"~5 OR "population drop"~5 OR "population decrease"~5 OR "population disappear"~5 OR "abundance decline"~5 OR "abundance drop"~5 OR "abundance decrease"~5 OR "abundance disappear"~5)) AND (source_name:BulkLexisNexis)^0.00001
""",
'add_fields': ['score'],
'filter_method': ('score', 30.817253)},
{'name': 'climate_change_pruned',
'query':
"""
(content:"climate change" OR "global warming") AND source_name:BulkLexisNexis
""",
'add_fields': ['termfreq(content, climate)', 'termfreq(content, warming)'],
'filter_method': ('termfreq', 2)}
]
###Output
_____no_output_____
###Markdown
Create Time-Series dataset
###Code
query_df = run_all_queries(all_queries, ts_fields, base_ts)
query_df = query_df.reset_index()
query_df.publisher = query_df.publisher.map(pubmap)
query_df.to_csv("../Data/query_results_bln-ts_26Feb.csv", index=False)
###Output
_____no_output_____
###Markdown
Create article-level dataset to compare articles across queries
###Code
article_dfs = []
for q in all_queries:
result = query_solr(q['query'], ['aid', 'publisher', 'publication_date', 'title', 'url'])
qname = q['name']
result[qname] = 1
article_dfs.append(result)
article_df = article_dfs[0]
for df in article_dfs[1:]:
article_df = article_df.merge(df, on=['aid', 'publication_date', 'publisher', 'title', 'url'], how = 'outer')
article_df = article_df.fillna(0)
article_df = article_df.astype(int, errors='ignore')
article_df.publisher = article_df.publisher.map(pubmap)
article_df['publication_date'] = pd.to_datetime(article_df['publication_date'].astype('str').str[:10], format='%Y-%m-%d', errors='coerce')
bad_publisher_mask = article_df[article_df['publisher'].isin(['IPS', 'UPI', 'SWB'])].index
article_df = article_df.drop(bad_publisher_mask)
bad_date_mask = article_df[article_df['publication_date'].isna()].index
article_df = article_df.drop(bad_date_mask)
article_df = article_df.sort_values(by=['publisher', 'publication_date'])
article_df
article_df.to_csv("../Data/Analyze/BLNqueries_compare_article-level_26Feb.csv", index=False)
article_df['aid'].to_csv("../Data/Metadata/aids_for_metadata_request.csv", index=False)
###Output
_____no_output_____ |
scraping/DF_Human.ipynb | ###Markdown
ScrapingFirst, we scrap the people and Member council tables.
###Code
scrap = Scraper()
df_person = scrap.get('Person')
df_member_council = scrap.get('MemberCouncil')
###Output
GET: https://ws.parlament.ch/odata.svc/Person?$top=1000&$filter=Language%20eq%20'FR'&$skip=0
GET: https://ws.parlament.ch/odata.svc/Person?$top=1000&$filter=Language%20eq%20'FR'&$skip=1000
GET: https://ws.parlament.ch/odata.svc/Person?$top=1000&$filter=Language%20eq%20'FR'&$skip=2000
GET: https://ws.parlament.ch/odata.svc/Person?$top=1000&$filter=Language%20eq%20'FR'&$skip=3000
GET: https://ws.parlament.ch/odata.svc/Person?$top=1000&$filter=Language%20eq%20'FR'&$skip=4000
[OK] table Person correctly scraped, df.shape = 3525 as expected
GET: https://ws.parlament.ch/odata.svc/MemberCouncil?$top=1000&$filter=Language%20eq%20'FR'&$skip=0
GET: https://ws.parlament.ch/odata.svc/MemberCouncil?$top=1000&$filter=Language%20eq%20'FR'&$skip=1000
GET: https://ws.parlament.ch/odata.svc/MemberCouncil?$top=1000&$filter=Language%20eq%20'FR'&$skip=2000
GET: https://ws.parlament.ch/odata.svc/MemberCouncil?$top=1000&$filter=Language%20eq%20'FR'&$skip=3000
GET: https://ws.parlament.ch/odata.svc/MemberCouncil?$top=1000&$filter=Language%20eq%20'FR'&$skip=4000
[OK] table MemberCouncil correctly scraped, df.shape = 3514 as expected
###Markdown
Now, we check the shape of both DF.
###Code
print("Length person: ", df_person.shape)
print("Length member council: ", df_member_council.shape)
###Output
Length person: (3525, 21)
Length member council: (3514, 43)
###Markdown
As we can see, the DF personn is bigger than the DF Member Council. Therefore, we will get the IDs of the persons that are unique in Person.
###Code
# IDS That are only in person
id_unique_person = list(set(df_person['ID']) - set(df_member_council['ID']))
print(id_unique_person)
# Create DF of the unique persons
df_unique_person = df_person.loc[df_person['ID'].isin(id_unique_person)]
df_unique_person
###Output
['4133', '4043', '832', '830', '1309', '4010', '3991', '3990', '4211', '831', '4127']
###Markdown
We checked on Wikipedia as well as on http://parlament.ch who are these persons:- Secretary General of the Federal Assembly: - Jean-Marc Sauvant: from 1981-1992 - Mariangela Wallimann-Bornatico: from 1999 to 2008 - Christoph Lanz: from 2008 to 2013 - Philippe Schwab: from 2013 to Now - Vice-Chancelor of Switzerland: - Achille Casanova: from 1981 to 2005 - Oswald Sigg: from 2005 to 2009 - Hanna Muralt Müller: before 2005 - Thomas Helbling: from 2008 to 2016 - André Simonazzi: from 2009 to Now - Jörg de Bernardi: from 2016 to Now- Deputy Secretary General and Secretary of the Council of States: - Martina Buol: Inbound These guys didn't have any vote. So, we can simply remove them. Now, let's check if other Vice-Chancelor are in the Member Council.
###Code
df_person.columns
# Show the columns in member_council
df_member_council.columns
# Take the example of Corina Casanova.
df_member_council[df_member_council['LastName']=='Casanova']
# Let's check her Value for Council Name
df_member_council[df_member_council['LastName']=='Casanova']['CouncilName']
###Output
_____no_output_____
###Markdown
We see here that she's in the member council table. But we also see that her function is "None". Therefore, let's check the unique values for `CouncilName`.
###Code
df_member_council['CouncilName'].unique()
# Extract the None for the CouncilName
df_member_no_council = df_member_council[df_member_council['CouncilName'].isnull()]
df_member_no_council[['LastName', 'FirstName']]
###Output
_____no_output_____
###Markdown
So, these people are either Chancelor, Vice-Chancelor or from the Secretary General of Switzerland. Therefore, we can remove them from both the DF.
###Code
idx_remove_council = df_member_no_council.index
print(idx_remove_council)
idx_remove_person_1 = df_unique_person.index
print(idx_remove_person_1)
idx_remove_person_2 = df_person[df_person['ID'].isin(list(df_member_no_council['ID']))].index
print(idx_remove_person_2)
idx_remove_person = idx_remove_person_1.union(idx_remove_person_2)
print(idx_remove_person)
# No we remove them
df_person_clean = df_person.drop(idx_remove_person)
print('Size person: ', df_person_clean.shape)
df_member_council_clean = df_member_council.drop(idx_remove_council)
print('Size member council: ', df_member_council_clean.shape)
idx = 1297
df_person_clean[df_person_clean.index == idx]['LastName']
df_member_council_clean[df_member_council_clean.index == idx]['LastName']
df_people = df_member_council_clean.merge(df_person_clean, on='ID', suffixes=('_Council', '_Person'))
df_people.shape
df_people.head()
columns_both = list(df_person_clean.columns) + list(df_member_council_clean.columns)
columns_people = list(df_people.columns)
both_no_people = list(set(columns_both)-set(columns_people))
people_no_both = list(set(columns_people)-set(columns_both))
for i in both_no_people:
print('Columns for \'%s\' equals: '%i, df_people[i+'_Council'].equals(df_people[i+'_Person']))
###Output
Columns for 'MilitaryRank' equals: True
Columns for 'MaritalStatusText' equals: True
Columns for 'FirstName' equals: True
Columns for 'NumberOfChildren' equals: True
Columns for 'GenderAsString' equals: True
Columns for 'LastName' equals: True
Columns for 'PersonIdCode' equals: True
Columns for 'OfficialName' equals: True
Columns for 'Modified' equals: True
Columns for 'MaritalStatus' equals: True
Columns for 'DateOfBirth' equals: True
Columns for 'PersonNumber' equals: True
Columns for 'MilitaryRankText' equals: True
Columns for 'DateOfDeath' equals: True
Columns for 'Language' equals: True
###Markdown
All columns are the same. So, we can remove one of them.
###Code
for i in both_no_people:
df_people = df_people.drop(i+'_Council', axis=1)
df_people = df_people.rename(columns={i+'_Person':i})
# display all pandas columns
pd.set_option('display.max_columns', 100)
df_people.head()
# Remove the following columns:
col_to_remove = ['Canton', 'Council', 'ParlGroupFunction', 'ParlGroupNumber', 'Party', 'MaritalStatus', 'MilitaryRank', 'Title', 'BirthPlace_Canton', 'BirthPlace_City', 'Language', 'Modified']
df_people_clean = df_people.drop(col_to_remove, axis=1)
df_people_clean.shape
df_people_clean.head()
df_people_clean[df_people_clean['Active'] == 'true'].shape
###Output
_____no_output_____
###Markdown
There's the good number of active people. (200 National Council, 46 State Council and 7 Federal Council). Let's save it!
###Code
df_people_clean.to_csv('data/people.csv', encoding='utf-8', index=False)
###Output
_____no_output_____ |
notebooks/06_train_network_gsp17)old.ipynb | ###Markdown
Train Neural Networks
###Code
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os
import PIL
import pathlib
from sklearn.utils import class_weight
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.applications.vgg19 import VGG19
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.applications.inception_v3 import InceptionV3
from tensorflow.keras.applications.xception import Xception
from tensorflow.keras.applications.inception_resnet_v2 import InceptionResNetV2
from tensorflow.keras.applications.densenet import DenseNet121, DenseNet169, DenseNet201
from tensorflow.keras.optimizers import SGD, Adam, RMSprop
from tensorflow.keras.layers import Flatten, Dense, Dropout, GlobalAveragePooling2D, GlobalMaxPooling2D
print(tf.__version__)
print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
# %load_ext tensorboard
###Output
2.3.1
Num GPUs Available: 0
###Markdown
Data Load data **data is structured as:** ../data/ dataset/ train/ Aloe_Vera/ Aloe_Vera_1.jpeg Aloe_Vera_2.jpeg ... ... Umbrella_Tree/ Umbrella_Tree_1.jpeg Umbrella_Tree_2.jpeg ... test/ Aloe_Vera/ Aloe_Vera_1.jpeg Aloe_Vera_2.jpeg ... ... Umbrella_Tree/ Umbrella_Tree_1.jpeg Umbrella_Tree_2.jpeg ... val/ Aloe_Vera/ Aloe_Vera_1.jpeg Aloe_Vera_2.jpeg ... ... Umbrella_Tree/ Umbrella_Tree_1.jpeg Umbrella_Tree_2.jpeg ... House_Plants.csv **Define dataset location and desired size:**
###Code
data_path = '../data/gsp15_ttv/'
class_names = ['Aloe_Vera', 'Asparagus_Fern', 'Baby_Rubber_Plant', 'Boston_Fern', 'Easter_Lily',
'Fiddle_Leaf_Fig', 'Jade_Plant', 'Monstera','Parlor_Palm', 'Peace_Lily', 'Pothos',
'Rubber_Plant', 'Snake_Plant', 'Spider_Plant', 'Umbrella_Tree']
###Output
_____no_output_____
###Markdown
Load data
###Code
train_data_dir = f'{data_path}/train'
validation_data_dir = f'{data_path}/test'
img_width, img_height = 224, 224
batch_size = 32
# import training with augmentation at each epoch
print('Training:')
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.1,
zoom_range=0.2,
rotation_range=30,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
classes=class_names,
class_mode='categorical',
seed = 2020,
shuffle = True)
# import validation
print('\nValidation:')
val_datagen = ImageDataGenerator(rescale=1. / 255)
validation_generator = val_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
classes=class_names,
class_mode='categorical',
seed = 2020,
shuffle = True)
plt.figure(figsize=(10, 10))
images, labels = next(train_generator)
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(images[i])
plt.title(class_names[np.argmax(labels[0])])
plt.axis("off")
# # 80% training set
# train_ds = tf.keras.preprocessing.image_dataset_from_directory(
# data_path,
# validation_split=0.2,
# subset="training",
# seed=123,
# image_size=(img_height, img_width),
# batch_size=batch_size)
# # 20% validation set
# val_ds = tf.keras.preprocessing.image_dataset_from_directory(
# data_path,
# validation_split=0.2,
# subset="validation",
# seed=123,
# image_size=(img_height, img_width),
# batch_size=batch_size)
###Output
_____no_output_____
###Markdown
**List classes**
###Code
class_names = train_ds.class_names
no_classes = len(class_names)
print(class_names)
###Output
_____no_output_____
###Markdown
**Sanity check the shape of inputs**
###Code
for image_batch, labels_batch in train_ds:
print(image_batch.shape)
print(labels_batch.shape)
break
###Output
_____no_output_____
###Markdown
**Define class weights for imbalanced data**  Using class_weights changes the range of the loss. This may affect the stability of the training depending on the optimizer. Optimizers whose step size is dependent on the magnitude of the gradient, like optimizers. SGD, may fail. The optimizer used here, optimizers. Adam, is unaffected by the scaling change. Also note that because of the weighting, the total losses are not comparable between the two models.
###Code
label_list = []
for images, labels in train_ds.take(-1):
batch_labels = labels.numpy()
label_list.append(batch_labels)
label_list = np.concatenate( label_list, axis=0 )
class_weight_arr = class_weight.compute_class_weight('balanced',
np.unique(label_list),
label_list)
class_weight_dict = dict(zip(np.arange(no_classes), class_weight_arr)) #.astype(str)
###Output
_____no_output_____
###Markdown
Visualize raw data
###Code
plt.figure(figsize=(10, 10))
for images, labels in train_ds.take(1):
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(class_names[labels[i]])
plt.axis("off")
###Output
_____no_output_____
###Markdown
Preprocess the data
###Code
data_augmentation = keras.Sequential(
[
layers.experimental.preprocessing.RandomFlip("horizontal", input_shape=(img_height, img_width, 3)),
layers.experimental.preprocessing.RandomRotation(0.1),
layers.experimental.preprocessing.RandomZoom(0.3),
]
)
###Output
_____no_output_____
###Markdown
**Visualize**
###Code
plt.figure(figsize=(10, 10))
for images, _ in train_ds.take(1):
for i in range(9):
augmented_images = data_augmentation(images)
ax = plt.subplot(3, 3, i + 1)
plt.imshow(augmented_images[0].numpy().astype("uint8"))
plt.axis("off")
###Output
_____no_output_____
###Markdown
Prefetch the data
###Code
AUTOTUNE = tf.data.experimental.AUTOTUNE
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
###Output
_____no_output_____
###Markdown
Build model **First try**
###Code
def get_prelim():
model = Sequential([
layers.experimental.preprocessing.RandomFlip("horizontal_and_vertical",
input_shape=(img_height, img_width, 3)),
layers.experimental.preprocessing.RandomRotation(0.2),
layers.experimental.preprocessing.RandomZoom(0.3),
layers.experimental.preprocessing.Rescaling(1./255, input_shape=(img_height, img_width, 3)),
layers.Conv2D(16, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(32, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(64, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dense(no_classes),
layers.Activation('softmax')
])
return model
def get_double_conv_1():
model = keras.models.Sequential([
layers.experimental.preprocessing.RandomFlip("horizontal_and_vertical",
input_shape=(img_height, img_width, 3)),
layers.experimental.preprocessing.RandomRotation(0.2),
layers.experimental.preprocessing.RandomZoom(0.3),
layers.experimental.preprocessing.Rescaling(1./255, input_shape=(img_height, img_width, 3)),
layers.Conv2D(64, 7, activation='relu', padding='same'),
layers.MaxPooling2D(2),
layers.Conv2D(128, 3, activation='relu', padding='same'),
layers.Conv2D(128, 3, activation='relu', padding='same'),
layers.MaxPooling2D(2),
layers.Conv2D(256, 3, activation='relu', padding='same'),
layers.Conv2D(256, 3, activation='relu', padding='same'),
layers.MaxPooling2D(2),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dense(64, activation='relu'),
layers.Dense(no_classes, activation='softmax')
])
return model
def get_dropout_1():
model = Sequential([
layers.experimental.preprocessing.RandomFlip("horizontal_and_vertical",
input_shape=(img_height, img_width, 3)),
layers.experimental.preprocessing.RandomRotation(0.2),
layers.experimental.preprocessing.RandomZoom(0.3),
layers.experimental.preprocessing.Rescaling(1./255),
layers.Conv2D(16, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(32, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(64, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Dropout(0.2),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dense(no_classes, activation='softmax')
])
return model
def get_VGG16tl():
resize_layer = layers.experimental.preprocessing.Rescaling(1./255, input_shape=(img_height, img_width, 3))
# create the base model from the pre-trained model VGG16
# note that, if using a Kaggle server, internet has to be turned on
pretrained_model = VGG16(input_shape=(img_height, img_width, 3),
include_top=False,
weights='imagenet')
# freeze the convolutional base
pretrained_model.trainable = False
model = tf.keras.Sequential([
# layers.experimental.preprocessing.RandomFlip("horizontal_and_vertical",
# input_shape=(img_height, img_width, 3)),
# layers.experimental.preprocessing.RandomRotation(0.2),
# layers.experimental.preprocessing.RandomZoom(0.3),
resize_layer,
pretrained_model,
# GlobalAveragePooling2D(), # maybe make max (first) or not at all
Flatten(),
Dense(256, activation='relu'),
Dropout(0.5), # try 0.5
Dense(256, activation='relu'),
Dense(no_classes, activation='softmax')])
return model
def get_InceptionV3tl():
resize_layer = layers.experimental.preprocessing.Rescaling(1./255, input_shape=(img_height, img_width, 3))
# create the base model from the pre-trained model VGG16
# note that, if using a Kaggle server, internet has to be turned on
pretrained_model = InceptionV3(input_shape=(img_height, img_width, 3),
include_top=False,
weights='imagenet')
# freeze the convolutional base
pretrained_model.trainable = False
model = tf.keras.Sequential([resize_layer,
pretrained_model,
# GlobalMaxPooling2D(),
Flatten(),
Dense(256, activation='relu'),
Dropout(0.5),
Dense(256, activation='relu'),
Dense(no_classes, activation='softmax')])
return model
def get_InceptionV3tl2():
resize_layer = layers.experimental.preprocessing.Rescaling(1./255, input_shape=(img_height, img_width, 3))
# create the base model from the pre-trained model VGG16
# note that, if using a Kaggle server, internet has to be turned on
pretrained_model = InceptionV3(input_shape=(img_height, img_width, 3),
include_top=False,
weights='imagenet')
# freeze the convolutional base
pretrained_model.trainable = False
model = tf.keras.Sequential([resize_layer,
pretrained_model,
# GlobalMaxPooling2D(),
Flatten(),
Dense(1024, activation='relu'),
Dropout(0.2),
Dense(no_classes, activation='softmax')])
return model
def get_ResNet50tl():
resize_layer = layers.experimental.preprocessing.Rescaling(1./255, input_shape=(img_height, img_width, 3))
# create the base model from the pre-trained model VGG16
# note that, if using a Kaggle server, internet has to be turned on
pretrained_model = ResNet50(input_shape=(img_height, img_width, 3),
include_top=False,
weights='imagenet')
# freeze the convolutional base
pretrained_model.trainable = False
model = tf.keras.Sequential([resize_layer,
pretrained_model,
# GlobalAveragePooling2D(),
Flatten(),
Dense(256, activation='relu'),
Dropout(0.5),
Dense(256, activation='relu'),
Dense(no_classes, activation='softmax')])
return model
def get_InceptionResNetV2tl():
resize_layer = layers.experimental.preprocessing.Rescaling(1./255, input_shape=(img_height, img_width, 3))
# create the base model from the pre-trained model VGG16
# note that, if using a Kaggle server, internet has to be turned on
pretrained_model = InceptionResNetV2(input_shape=(img_height, img_width, 3),
include_top=False,
weights='imagenet')
# freeze the convolutional base
pretrained_model.trainable = False
model = tf.keras.Sequential([resize_layer,
pretrained_model,
# GlobalAveragePooling2D(),
Flatten(),
Dense(256, activation='relu'),
Dropout(0.5),
Dense(256, activation='relu'),
Dense(no_classes, activation='softmax')])
return model
def generate_model(model_name):
if model_name == 'prelim':
model = get_prelim()
data_augmentation
elif model_name == 'double_conv_1':
model = get_double_conv_1()
elif model_name == 'dropout_1':
model = get_dropout_1()
elif model_name == 'VGG16':
model = get_VGG16tl()
elif model_name == 'ResNet50':
model = get_ResNet50tl()
elif model_name == 'InceptionV3':
model = get_InceptionV3tl()
elif model_name == 'InceptionV3_1024':
model = get_InceptionV3tl2()
elif model_name == 'InceptionResNetV2':
model = get_InceptionResNetV2tl()
else:
print('please select a valid model')
# model = tf.keras.Sequential([
# data_augmentation,
# model
# ])
return model
model = generate_model('InceptionV3_1024')
model.compile(loss = 'sparse_categorical_crossentropy',
optimizer = 'adam',
metrics = ['accuracy', 'sparse_top_k_categorical_accuracy'])
model.summary()
###Output
_____no_output_____
###Markdown
Train model
###Code
initial_epochs=40
history = model.fit(train_ds,
epochs=initial_epochs,
validation_data=val_ds,
class_weight=class_weight_dict)
# unfreeze the layers
model.trainable = True
model.compile(loss = 'sparse_categorical_crossentropy',
optimizer = keras.optimizers.Adam(1e-5),
metrics = ['accuracy', 'sparse_top_k_categorical_accuracy'])
model.summary()
fine_tune_epochs = 200
total_epochs = initial_epochs + fine_tune_epochs
history_fine = model.fit(train_ds,
epochs=total_epochs,
initial_epoch=history.epoch[-1]+1,
validation_data=val_ds,
class_weight=class_weight_dict)
###Output
_____no_output_____
###Markdown
Save model/metrics and plot
###Code
model_name = 'InceptionV3_40_200e_GSP1.0'
model.save_weights(f'../models/{model_name}_weights.h5')
model.save(f'../models/{model_name}_model.h5')
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
t5acc = history.history['sparse_top_k_categorical_accuracy']
t5val_acc = history.history['val_sparse_top_k_categorical_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
acc += history_fine.history['accuracy']
val_acc += history_fine.history['val_accuracy']
t5acc += history_fine.history['sparse_top_k_categorical_accuracy']
t5val_acc += history_fine.history['val_sparse_top_k_categorical_accuracy']
loss += history_fine.history['loss']
val_loss += history_fine.history['val_loss']
fig, ax = plt.subplots(nrows=2, ncols=1, figsize=(10,8), sharex=True)
x_plot = np.arange(1, total_epochs+1)
ax[0].plot(x_plot, acc[:total_epochs], '+-', label='training')
ax[0].plot(x_plot, val_acc[:total_epochs], '+-', label='validation')
ax[0].plot(x_plot, t5acc[:total_epochs], '+-', label='top 5 training')
ax[0].plot(x_plot, t5val_acc[:total_epochs], '+-', label='top 5 validation')
ax[0].legend()
ax[0].set_ylabel('accuracy')
# ax[0].set_ylim(0.5, 1)
ax[0].grid(ls='--', c='C7')
ax[0].set_title('accuracy')
ax[0].axvline(initial_epochs, c='C7', ls='--')
ax[1].plot(x_plot, loss[:total_epochs], '+-', label='training')
ax[1].plot(x_plot, val_loss[:total_epochs], '+-', label='validation')
ax[1].legend()
ax[1].set_ylabel('cross entropy')
# ax[1].set_ylim(0, 1)
ax[1].grid(ls='--', c='C7')
ax[1].set_title('loss')
ax[1].set_xlabel('epoch')
ax[1].axvline(initial_epochs, c='C7', ls='--')
plt.show()
plt.savefig(f'../models/{model_name}_graph.svg')
plt.savefig(f'../models/{model_name}_graph.png', dpi=400)
graph_vals = pd.DataFrame({'acc':acc[:total_epochs],
'val_acc':val_acc[:total_epochs],
'loss':loss[:total_epochs],
'val_loss':val_loss[:total_epochs],
't5':t5acc[:total_epochs],
'val_t5':t5val_acc[:total_epochs]})
graph_vals.to_csv(f'../models/{model_name}_metrics.csv', index=False)
val_predictions = model.predict(val_ds, batch_size=BATCH_SIZE)
def plot_cm(labels, predictions, p=0.5):
cm = confusion_matrix(labels, predictions > p)
plt.figure(figsize=(5,5))
sns.heatmap(cm, annot=True, fmt="d")r4t
plt.title('Confusion matrix @{:.2f}'.format(p))
plt.ylabel('Actual label')
plt.xlabel('Predicted label')
plt.savefig('../models/VGG16_70e_1.0.svg')
model.save_weights('../models/VGG16_20_100e_1.0.h5')
model.save('../models/VGG16_20_100e_1.0.h5')
def plot_confusion_matrix(cm, class_names):
"""
Returns a matplotlib figure containing the plotted confusion matrix.
Args:
cm (array, shape = [n, n]): a confusion matrix of integer classes
class_names (array, shape = [n]): String names of the integer classes
"""
figure = plt.figure(figsize=(8, 8))
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
plt.title("Confusion matrix")
plt.colorbar()
tick_marks = np.arange(len(class_names))
plt.xticks(tick_marks, class_names, rotation=45)
plt.yticks(tick_marks, class_names)
# Normalize the confusion matrix.
cm = np.around(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis], decimals=2)
# Use white text if squares are dark; otherwise black.
threshold = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
color = "white" if cm[i, j] > threshold else "black"
plt.text(j, i, cm[i, j], horizontalalignment="center", color=color)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
return figure
# Use the model to predict the values from the validation dataset.
test_pred_raw = model.predict(val_ds)
test_pred = np.argmax(test_pred_raw, axis=1)
# Calculate the confusion matrix.
cm = sklearn.metrics.confusion_matrix(test_labels, test_pred)
# Log the confusion matrix as an image summary.
figure = plot_confusion_matrix(cm, class_names=class_names)
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
fig, ax = plt.subplots(nrows=2, ncols=1, figsize=(10,8), sharex=True)
x_vals = np.arange(1, epochs+1)
ax[0].plot(x_vals, acc, '+-', label='training')
ax[0].plot(x_vals, val_acc, '+-', label='validation')
ax[0].legend()
ax[0].set_ylabel('accuracy')
ax[0].set_ylim(0, 1)
ax[0].grid(ls='--', c='C7')
ax[0].set_title('accuracy')
ax[1].plot(x_vals, loss, '+-', label='training')
ax[1].plot(x_vals, val_loss, '+-', label='validation')
ax[1].legend()
ax[1].set_ylabel('cross entropy')
ax[1].set_ylim(0, 3)
ax[1].grid(ls='--', c='C7')
ax[1].set_title('loss')
ax[1].set_xlabel('epoch')
plt.show()
model.save_weights('../models/.h5')
model.save('../models/.h5')
###Output
_____no_output_____
###Markdown
Evaluation
###Code
import glob
pred_path = '../data/pred_16c_only1/'
pred_ds = tf.keras.preprocessing.image_dataset_from_directory(
pred_path,
# labels = [0]*len(glob.glob(f'{pred_path}*')),
image_size=(img_height, img_width),
batch_size=batch_size
)
normalization_layer = layers.experimental.preprocessing.Rescaling(1./255)
normalized_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))
predictions = model.predict(pred_ds)
print(predictions)
# Generate arg maxes for predictions
classes = np.argmax(predictions, axis = 1)
print(classes[0])
print(class_names[classes[0]])
temp = tf.keras.models.load_model('../models/convmod_1.0.h5')
temp.summary()
dot_img_file = '../models/convmod_1.0.png'
tf.keras.utils.plot_model(model, to_file=dot_img_file, show_shapes=True)
###Output
_____no_output_____ |
Datatypes-Day1.ipynb | ###Markdown
Numbers
###Code
a = 10
b = 20
c = a + b
c
type (a)
a + b
c = 3.5
d = 2.3
c + d
type (c-d)
###Output
_____no_output_____
###Markdown
Arithmetic Operations
###Code
a-b
a*b
a/b
a%b
a//b
b//a
abs(a)
abs(c)
abs(-25)
###Output
_____no_output_____
###Markdown
Strings
###Code
str = "ITM FCS"
str[0]
str[:]
str[:3]
str[::-2]
str[4]
str[3]
str[::2]
str[::1]
str[:4:1]
str[:4:2]
str[:::]
str[::]
str[::1]
###Output
_____no_output_____
###Markdown
Write a program with string slicing and dicing in which string "The quick brown fox jumps over the lazy dog" is printed from 5th to last fourth letter with every 3rd consecutive letter
###Code
str = "The quick brown fox jumps over the lazy dog"
str [5::3]
str [5:-4:3]
str [4:-4:3]
s= "hassim"
s= s+3010
t=3010
s+t
t="3010"
s.upper()
s.count()
s.count(0)
s.capitalize(0)
s.capitalize()
s.casefold()
help(count())
s.count('s')
s.casefold('h')
help(s.casefold())
s="hassim"
s
###Output
_____no_output_____ |
resources/notebooks/variational_bayes/.ipynb_checkpoints/EM Notes-checkpoint.ipynb | ###Markdown
Expectation MaximizationTo illustrate the Expectation Maximization (EM) process, we first generate four distributions, shown by their sample points:
###Code
# data
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
%matplotlib inline
class Distribution(object):
def __init__(self, color):
self.color = color
num_gaussians = 4
pi = np.array([0.1, 0.3, 0.2, 0.3])
num_samples = 10000
mu = ([1.7, .5],
[2, 4],
[0, 6],
[5, 6]
)
sigma = ([[.9, 0], [0, .5]],
[[.4, .3], [.3, .5]],
[[2, .7], [.2, .8]],
[[.6, .6], [.3, .6]]
)
distributions = {}
colors = ['r','g','b','y']
for i in range(num_gaussians):
name = 'Sampled Distribution {}'.format(i + 1)
distributions[name] = Distribution(colors[i])
distributions[name].samples = np.random.multivariate_normal(
mu[i], sigma[i], int(pi[i] * num_samples))
# Plot everything
fig, ax = plt.subplots()
for name, distribution in distributions.iteritems():
ax.scatter(distribution.samples[:,0],
distribution.samples[:,1],
c=distribution.color,
s=20,
lw=0
)
ax.set_title('Sampled distributions')
###Output
_____no_output_____
###Markdown
Next, we try to approximate these distributions using EM. At a high level, the algorithm iterates between two steps:**Expecation Step:**Using fixed parameters, compute the expected value of the log-likelihood of the observed data (its *responsibility*) for each.**Maximization Step**Estimate the parameters that maximize the expected value of the log-likelihood of the observed data.If converged after the maximization step, exit.
###Code
# Initial setup
K = 4 # <>But how do we know?
mu_hats = []
sigma_hats = []
pi_hats = []
for k in range(K):
mu_hats.append(np.rand.randint(-10,10))
sigma_hats.append(np.eye(2))
pi_hat
from IPython.core.display import HTML
# Borrowed style from Probabilistic Programming and Bayesian Methods for Hackers
def css_styling():
styles = open("../styles/custom.css", "r").read()
return HTML(styles)
css_styling()
###Output
_____no_output_____ |
2017/coursera/code/0110/family_tree_implementation_adam.ipynb | ###Markdown
Learning Internal Representation by Error Propagation- A example implementation of the following classic paper that changed the history of deep learning>Rumelhart, D. E., Hinton, G. E., & Williams, R. J. (1985). [Learning internal representations by error propagation](http://www.cs.toronto.edu/~fritz/absps/pdp8.pdf) (No. ICS-8506). CALIFORNIA UNIV SAN DIEGO LA JOLLA INST FOR COGNITIVE SCIENCE. Related Paper- Varona-Moya, S., & Cobos, P. L. (2012, September). [Analogical inferences in the family trees task: a review. In International Conference on Artificial Neural Networks (pp. 221-228)](https://www.researchgate.net/publication/229164083_Analogical_Inferences_in_the_Family_Trees_Task_A_Review). Springer Berlin Heidelberg.- Paccanaro, A., & Hinton, G. E. (2001). [Learning distributed representations of concepts using linear relational embedding. IEEE Transactions on Knowledge and Data Engineering](https://www.researchgate.net/publication/3296950_Learning_distributed_representations_of_concepts_using_Linear_Relational_Embedding), 13(2), 232-244. Network structure Data Creation
###Code
person_1_input = [[1.0 if target == person else 0.0 for target in range(24) ] for person in range(24)]
person_2_output = person_1_input[:] # Data copy - Person 1 is the same data as person 2.
relationship_input = [[1.0 if target == relationship else 0.0 for target in range(12) ] for relationship in range(12)]
###Output
_____no_output_____
###Markdown
Relationship Representation
###Code
# (colin has-father james)
# (colin has-mother victoria)
# (james has-wife victoria)
# (charlotte has-brother colin)
# (victoria has-brother arthur)
# (charlotte has-uncle arthur)
# 아래의 리스트는 가족관계도에 있는 관계를 위의 예시와 같은 방법으로 나타낸 것입니다.
# [input_person, relationship, output_person]
triple_relationship = [[0, 3, 1], [0, 4, 3], [0, 5, 4],
[1, 2, 0], [1, 4, 3], [1, 5, 4],
[2, 2, 3],
[3, 3, 2], [3, 0, 0], [3, 1, 1], [3, 9, 4], [3, 10, 10], [3, 11, 11],
[4, 2, 5], [4, 0, 0], [4, 1, 1], [4, 5, 3], [4, 4, 10], [4, 5, 11],
[5, 3, 4], [5, 0, 6], [5, 1, 7], [5, 9, 9], [5, 4, 10], [5, 5, 11],
[6, 3, 7], [6, 4, 5], [6, 5, 8],
[7, 2, 6], [7, 4, 5], [7, 5, 8],
[8, 2, 9], [8, 0, 6], [8, 1, 7], [8, 8, 5], [8, 10, 10], [8, 11, 11],
[9, 3, 8],
[10, 0, 5], [10, 1, 4], [10, 9, 11], [10, 6, 3], [10, 7, 8],
[11, 0, 5], [11, 1, 4], [11, 8, 10], [11, 6, 3], [11, 7, 8],
[12, 3, 13], [12, 4, 15], [12, 5, 16],
[13, 2, 12], [13, 4, 15], [13, 5, 16],
[14, 2, 15],
[15, 3, 14], [15, 0, 12], [15, 1, 13], [15, 9, 16], [15, 10, 22], [15, 11, 23],
[16, 2, 17], [16, 0, 12], [16, 1, 15], [16, 5, 15], [16, 4, 22], [16, 5, 23],
[17, 3, 16], [17, 0, 18], [17, 1, 19], [17, 9, 21], [17, 4, 22], [17, 5, 23],
[18, 3, 19], [18, 4, 17], [18, 5, 20],
[19, 2, 18], [19, 4, 17], [19, 5, 20],
[20, 2, 21], [20, 0, 18], [20, 1, 19], [20, 8, 17], [20, 10, 22], [8, 11, 23],
[21, 3, 20],
[22, 0, 17], [22, 1, 16], [22, 9, 23], [22, 6, 15], [22, 7, 20],
[23, 0, 17], [23, 1, 16], [23, 8, 22], [23, 6, 15], [23, 7, 20]]
###Output
_____no_output_____
###Markdown
Code
###Code
import tensorflow as tf
import numpy as np
x1_data = np.array([person_1_input[data[0]] for data in triple_relationship],dtype=np.float32)
x2_data = np.array([relationship_input[data[1]] for data in triple_relationship],dtype=np.float32)
y_data = np.array([person_2_output[data[2]] for data in triple_relationship],dtype=np.float32)
X1 = tf.placeholder(tf.float32, [None, 24])
X2 = tf.placeholder(tf.float32, [None, 12])
Y = tf.placeholder(tf.float32, [None, 24])
# Weights and bias
W11 = tf.Variable(tf.zeros([24, 6]))
W12 = tf.Variable(tf.zeros([12, 6]))
W21 = tf.Variable(tf.zeros([6, 12]))
W22 = tf.Variable(tf.zeros([6, 12]))
W3 = tf.Variable(tf.zeros([12, 24]))
b11 = tf.Variable(tf.zeros([6]))
b12 = tf.Variable(tf.zeros([6]))
b2 = tf.Variable(tf.zeros([12]))
b3 = tf.Variable(tf.zeros([24]))
# Hypothesis
L11 = tf.sigmoid(tf.matmul(X1, W11) + b11) # 24 by 6 mat
L12 = tf.sigmoid(tf.matmul(X2, W12) + b12) # 12 by 6 mat
# L2 = tf.sigmoid(tf.matmul(L11, W21) + tf.matmul(L12, W22) + b2) # Dimensions must be equal, but are 24 and 12 for 'add_22' (op: 'Add') with input shapes: [24,12], [12,12].
L2 = tf.sigmoid(tf.matmul(L11, W21) + tf.matmul(L12, W22) + b2)
hypothesis = tf.nn.softmax(tf.matmul(L2, W3) + b3)
# Minimize cost.
a = tf.Variable(0.01)
# cost = tf.reduce_mean(hypothesis, Y)
cost = tf.reduce_mean(-tf.reduce_sum(Y*tf.log(hypothesis), reduction_indices=1))
train_step = tf.train.AdamOptimizer(a).minimize(cost)
# Initializa all variables.
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
# Loop
for i in range(1000):
sess.run(train_step, feed_dict={
X1: x1_data,
X2: x2_data,
Y:y_data}
)
if i % 100 == 0:
print(
i,
sess.run(cost, feed_dict={X1:x1_data, X2:x2_data, Y:y_data})
)
correct_prediction = tf.equal(tf.argmax(hypothesis,1), tf.argmax(Y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict={X1: x1_data, X2: x2_data, Y:y_data}))
print(sess.run(tf.argmax(hypothesis,1), feed_dict={X1: x1_data, X2: x2_data, Y:y_data}))
print(sess.run(tf.argmax(Y,1), feed_dict={X1: x1_data, X2: x2_data, Y:y_data}))
print()
data = sess.run(W11, feed_dict={X1: x1_data, X2: x2_data, Y:y_data})
data = data.transpose()
data.shape
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
for index, values in enumerate(data):
plt.subplot(2, 4, index + 1)
values.shape = (2,12)
plt.axis('off')
plt.imshow(values, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Case %i' % index)
###Output
_____no_output_____ |
Finance/car_loans.ipynb | ###Markdown
How a Monthly Payment (Equated Monthly Installment) is Calculated Calculating a Monthly Payment (Simplified)
###Code
P = 31115 * (1.075)
r = 0.0702 / 12
n = 60
numerator = (r *((1 + r)**(n)) )
denominator = ((1 + r)**(n)) - 1
emi = P * (numerator / denominator)
np.round(emi,2)
###Output
_____no_output_____
###Markdown
Calculating a Monthly Payment (with some fees included)
###Code
P = 31115 + (32615 * 0.0975) + 50 + 200 + 65 + 80
r = 0.0702 / 12
n = 60
numerator = (r *((1 + r)**(n)) )
denominator = ((1 + r)**(n)) - 1
emi = P * (numerator / denominator)
np.round(emi,2)
'The Monthly Payment with fees included is {} higher'.format(np.round(687.23 - 662.64,2))
###Output
_____no_output_____
###Markdown
How Interest Rates/APR Affects Monthly Payments Calculate Total Interest Paid Here are the steps to do this1-) Divide your interest rate by the number of payments (12) you'll make in the year (interest rates are expressed annually).
###Code
# Calculate one month of interest
P = 34689.9625
r = 0.0702 / 12
r * P
###Output
_____no_output_____
###Markdown
2-) Calculate new principal (after one payment)
###Code
34689.9625 - (687.23 - 202.94)
###Output
_____no_output_____
###Markdown
3-) Repeat steps 1 and 2 using the new principal until the principal reaches 0. You can see can example of this in the Python code below.
###Code
import numpy as np
import pandas as pd
term = 60
P = 34689.96
def calc_interest(P,emi,interest_rate = 0.0702):
interest_paid = np.floor(((interest_rate/12)*P)*100)/100
principal_paid = np.round(emi-interest_paid, 2)
new_balance = np.round(P - principal_paid,2)
return(emi, interest_paid, principal_paid, new_balance)
payment_list = []
for n in range(1, term + 1):
emi,i_paid,p_paid,new_p = calc_interest(P, emi)
payment_list.append([n, P, emi, i_paid, p_paid, new_p])
P = np.round(new_p,2)
c_names = ['Month','Starting Balance','Repayment','Interest Paid','Principal Paid','New Balance']
payment_table = pd.DataFrame(payment_list, columns = c_names)
payment_table.head(10)
np.round(payment_table['Interest Paid'].sum(), 2)
###Output
_____no_output_____
###Markdown
Loan and Principal Plot https://stackoverflow.com/questions/21918718/how-to-label-certain-x-values
###Code
fig, axes = plt.subplots(nrows = 1, ncols = 1, figsize=(10, 5) )
axes.plot(payment_table['Month'], payment_table['Principal Paid'], c = 'b', label = 'Principal');
axes.plot(payment_table['Month'], payment_table['Interest Paid'], c = 'k', label = 'Interest');
axes.set_xlim((1, 60));
axes.set_xticks([1, 10, 20, 30, 40, 50, 60])
axes.set_ylim((0, 700));
axes.set_ylabel('Dollars', fontsize = 22);
axes.set_xlabel('Month', fontsize = 22);
plt.xticks(fontsize = 20)
plt.yticks(fontsize = 20)
axes.set_title('Interest and Principal Paid Each Month', fontsize = 24)
plt.legend(bbox_to_anchor=(1.02,0), loc="lower left", borderaxespad=0, fontsize = 20)
plt.tight_layout()
plt.grid(axis = 'both')
plt.savefig('Interest_Principal.png', dpi = 1000)
###Output
_____no_output_____
###Markdown
Refinancing Cost Comparison 3.59% vs 7.02% (show the cost of refinancing a car, assuming no prepayment penalty)
###Code
P = 34689.96
term = 60
def generate_loan_table(P, term, interest_rate=0.0702):
def calc_emi(P, n, interest_rate):
r = interest_rate / 12
numerator = (r *((1 + r)**(n)) )
denominator = ((1 + r)**(n)) - 1
emi = P * (numerator / denominator)
emi = np.round(emi, 2)
return(emi)
def calc_interest(P, emi, interest_rate):
i_paid = np.floor(((interest_rate/12)*P)*100)/100
p_paid = np.round(emi - i_paid, 2)
new_p = np.round(P - p_paid,2)
return(emi, i_paid, p_paid, new_p)
emi = calc_emi(P, term, interest_rate)
payment_list = []
for n in range(1, term + 1):
emi,i_paid,p_paid, new_p = calc_interest(P, emi, interest_rate)
payment_list.append([n, P,emi, i_paid, p_paid, new_p])
P = np.round(new_p,2)
payment_table = pd.DataFrame(payment_list, columns = ['Month',
'Starting Balance',
'Repayment',
'Interest Paid',
'Principal Paid',
'New Balance'])
return(payment_table, np.round(payment_table['Interest Paid'].sum(), 2), emi)
o_table, o_paid, o_emi = generate_loan_table(P,term,interest_rate=0.0702)
r_table, r_paid, r_emi = generate_loan_table(P,term,interest_rate=0.0359)
original_paid, refinanced_paid
"Refinancing could save: {}".format(6543.51 - 3257.88)
original_emi
refinanced_emi
np.round(original_emi - refinanced_emi, 2)
###Output
_____no_output_____
###Markdown
Total Interest Through Different Loan Terms
###Code
original_table, original_paid, original_emi = generate_loan_table(P, term = 60, interest_rate = 0.0702)
seventyTwo_table, seventyTwo_paid, seventyTwo_emi = generate_loan_table(P, term = 72, interest_rate = 0.0702)
original_paid, refinanced_paid, np.round(refinanced_paid - original_paid, 2)
original_emi, seventyTwo_emi
fig, axes = plt.subplots(nrows = 1, ncols = 1, figsize=(10, 5) )
axes.plot(seventyTwo_table['Month'], seventyTwo_table['Interest Paid'].cumsum(), c = 'k', marker = '.', markersize = 10, label = '72 Month Term Loan');
axes.plot(original_table['Month'], original_table['Interest Paid'].cumsum(), c = 'b', marker = '.', markersize = 10, label = '60 Month Term Loan');
axes.set_xlim((1, 72));
axes.set_xticks([1, 12, 24, 36, 48, 60, 72])
axes.set_ylim((0, 9000));
axes.set_ylabel('Dollars', fontsize = 24);
axes.set_xlabel('Month', fontsize = 24);
plt.xticks(fontsize = 22)
plt.yticks(fontsize = 22)
axes.set_title('Total Interest Paid (7.02% Interest)', fontsize = 26)
plt.legend(loc="lower right", fontsize = 20)
plt.tight_layout()
plt.grid(axis = 'both')
plt.savefig('Total_Interest_Paid.png', dpi = 1000)
###Output
_____no_output_____ |
Basics/03-Methods and Functions/08-Functions and Methods Homework.ipynb | ###Markdown
Functions and Methods Homework Complete the following questions:____**Write a function that computes the volume of a sphere given its radius.**The volume of a sphere is given as $$\frac{4}{3} πr^3$$
###Code
def vol(rad):
import math
return (4/3)*math.pi*math.pow(rad,3)
pass
# Check
vol(2)
###Output
_____no_output_____
###Markdown
___**Write a function that checks whether a number is in a given range (inclusive of high and low)**
###Code
def ran_check(num,low,high):
if num in range(low,high+1):
print((num),"is in range between",(low,high))
else:
print((num, "is out of range."))
pass
# Check
ran_check(5,2,7)
###Output
5 is in range between (2, 7)
###Markdown
If you only wanted to return a boolean:
###Code
def ran_bool(num,low,high):
return num in range(low,high+1)
pass
ran_bool(3,1,10)
###Output
_____no_output_____
###Markdown
____**Write a Python function that accepts a string and calculates the number of upper case letters and lower case letters.** Sample String : 'Hello Mr. Rogers, how are you this fine Tuesday?' Expected Output : No. of Upper case characters : 4 No. of Lower case Characters : 33HINT: Two string methods that might prove useful: **.isupper()** and **.islower()**If you feel ambitious, explore the Collections module to solve this problem!
###Code
def up_low(s): # i assume we had to make a dictionary with two keywords UPPERCASE and LOWERCASE.
dic={"upper":0, "lower":0}
for charac in s:
if charac.isupper():
dic["upper"]+=1
elif charac.islower():
dic["lower"]+=1
else:
pass
print("Original String:", (s))
print("No. of Upper Case Character : ", (dic["upper"]))
print("No. of Lower Case Character : ", (dic["lower"]))
pass
s = 'Hello Mr. Rogers, how are you this fine Tuesday?'
up_low(s)
###Output
Original String: Hello Mr. Rogers, how are you this fine Tuesday?
No. of Upper Case Character : 4
No. of Lower Case Character : 33
###Markdown
____**Write a Python function that takes a list and returns a new list with unique elements of the first list.** Sample List : [1,1,1,1,2,2,3,3,3,3,4,5] Unique List : [1, 2, 3, 4, 5]
###Code
def unique_list(lst):
print(set(lst)) #easy as peasy!
pass
unique_list([1,1,1,1,2,2,3,3,3,3,4,5])
###Output
{1, 2, 3, 4, 5}
###Markdown
____**Write a Python function to multiply all the numbers in a list.** Sample List : [1, 2, 3, -4] Expected Output : -24
###Code
def multiply(numbers):
product=1
for num in numbers:
product*=num
return product
pass
multiply([1,2,3,-4])
###Output
_____no_output_____
###Markdown
____**Write a Python function that checks whether a passed in string is palindrome or not.**Note: A palindrome is word, phrase, or sequence that reads the same backward as forward, e.g., madam or nurses run.
###Code
def palindrome(s):
return s==s[::-1]
pass
palindrome('helleh')
###Output
_____no_output_____
###Markdown
____ Hard:**Write a Python function to check whether a string is pangram or not.** Note : Pangrams are words or sentences containing every letter of the alphabet at least once. For example : "The quick brown fox jumps over the lazy dog"Hint: Look at the string module
###Code
import string
def ispangram(str1, alphabet=string.ascii_lowercase):
alphabetset=set(alphabet)
return alphabetset <= set(str1.lower())
pass
ispangram("The quick brown fox jumps over the lazy dog")
string.ascii_lowercase
###Output
_____no_output_____
###Markdown
Great Job!
###Code
ispangram("Waltz, bad nymph, for quick jigs vex.")
ispangram("Sphinx of black quartz, judge my vow.")
###Output
_____no_output_____ |
Project-1-Udacity-Blogpost.ipynb | ###Markdown
Blogpost: Analysing StackOverflow DataStackOverflow: - community of software developers, coders, companies - public platform for coding questions and answers & other products- self-reportedly one of the 50 most popular websites in the world - different products: StackOverflow, StackOverflow for Teams, Stack Overflow Advertising, Stack Overflow Talent- supports companies when looking for new employeeshttps://stackoverflow.com/companyData is available at: https://insights.stackoverflow.com/survey/gather information on "all aspects of the developer experience"In the course, the questions were:- How to enter the field?- What are job placement and salary rates for bootcamps?- What relates to salary/job satisfaction?Now, additionally look at StackOverflow Survey Data from 2015 to 2019Suggestions: What languages were most popular in each year? What other changes can you observe over time?Questions:0. Why does StackOverflow collect the data? Which questions are included every year? - get to know users (e.g. educational and programming background, demographics etc.)- get info to improve career services, other StackOverflow products for recruitersspecifically: - what is connected with higher job satisfaction or better payment? - role of remote workWhy is this interesting for recruiters?1. 2. 3. Load & Prepare Data
###Code
# import data for year 2015
df_raw_2015 = pd.read_csv('data/survey_results_public_2015.csv', low_memory=False, header=None)
# inspect df
df_raw_2015.head(3)
# needs cleaning, header is first row
# clean data
# drop first row by selecting all rows from first row onwards
df_2015 = df_raw_2015.iloc[1: , :]
df_2015.head(2)
# use helper function
new_df_2015 = row_to_header(df_2015, 0)
# check df
new_df_2015.head(2)
# import data for year 2016
df_raw_2016 = pd.read_csv('data/survey_results_public_2016.csv', low_memory=False)
# inspect df
df_raw_2016.head(2)
# import data for year 2017
df_raw_2017 = pd.read_csv('data/survey_results_public_2017.csv', low_memory=False)
# inspect df
df_raw_2017.head(2)
# import data for year 2018
df_raw_2018 = pd.read_csv('data/survey_results_public_2018.csv', low_memory=False)
# inspect df
df_raw_2018.head(2)
# import data for year 2019
df_raw_2019 = pd.read_csv('data/survey_results_public_2019.csv', low_memory=False)
# inspect df
df_raw_2019.head(2)
# check data types in dfs
df_raw_2016.dtypes
df_raw_2016.info()
# filter for numeric vars
df_numerics_only = df_raw_2016.select_dtypes(include=np.number)
df_numerics_only
# filter for categorical vars
###Output
_____no_output_____
###Markdown
Which variables appear in all years? - demographics: age, gender, country- education: - occupation: - job satisfaction
###Code
new_df_2015.columns.values.tolist()
# ['Country', 'Age', 'Gender', 'Occupation', 'Compensation', 'Compensation: midpoint', 'Employment Status', 'Job Satisfaction',
# 'Years IT / Programming Experience',"How often are Stack Overflow's answers helpful",
df_raw_2016.columns.values.tolist()
# 'country', 'gender', 'education', 'occupation', 'employment_status', 'salary_range', 'salary_midpoint', 'job_satisfaction', 'why_stack_overflow'
df_raw_2017.columns.values.tolist()
# ['Gender', 'Country', 'FormalEducation', 'Professional', 'Salary', 'JobSatisfaction',
df_raw_2018.columns.values.tolist()
# 'Age', 'Gender','Country', 'Employment', 'Salary', 'FormalEducation', 'JobSatisfaction',
df_raw_2019.columns.values.tolist()
# 'Country', 'Age', 'Gender', 'Employment', 'EdLevel', 'JobSat',
###Output
_____no_output_____
###Markdown
check for missing values
###Code
# useful?
num_rows = df.shape[0] #number of rows in the dataset
num_cols = df.shape[1] #number of columns in the dataset
no_nulls = set(df.columns[~df.isnull().any()]) #Provide a set of columns with 0 missing values.
most_missing_cols = df.columns[df.isnull().sum()/len(df) > .75].tolist() #Provide a set of columns with more than 75% of the values missing
most_missing_cols
# drop rows but only when all values are missing
all_row = df.dropna(axis=0, how='all')
###Output
_____no_output_____
###Markdown
check if columns have correct data type explore data with bar charts, histograms and scatterplots
###Code
# Histograms
status_vals = df['Professional'].value_counts() #pandas series of the counts for each Professional status
# bar chart of the proportion of individuals in each professional category
(status_vals/df.shape[0]).plot(kind="bar");
plt.title("What kind of developer are you?");
# Which variables are of interest?
# get list of all columns
new_df_2015.columns.values.tolist()
# ['Country', 'Age', 'Gender', 'Years IT / Programming Experience', 'Occupation', 'Desktop Operating System',
# 'Employment Status', 'Industry', 'Job Satisfaction', 'Purchasing Power', 'Remote Status', 'Changed Jobs in last 12 Months',
# 'Open to new job opportunities',
# 'Why use Stack Overflow: Help for job', 'Why use Stack Overflow: To give help',
# "Why use Stack Overflow: Can't do job without it", 'Why use Stack Overflow: Maintain online presence',
# 'Why use Stack Overflow: Demonstrate expertise', 'Why use Stack Overflow: Communicate with others',
# 'Why use Stack Overflow: Receive help on personal projects','Why use Stack Overflow: Love to learn',
# "Why use Stack Overflow: I don't use Stack Overflow",
# "How often are Stack Overflow's answers helpful",
# 'Why answer: Help a programmer in need', 'Why answer: Help future programmers',
# 'Why answer: Demonstrate expertise', 'Why answer: Self promotion',
# 'Why answer: Sense of responsibility to developers', 'Why answer: No idea',
# "Why answer: I don't answer and I don't want to", "Why answer: I don't answer but I want to"]
###Output
_____no_output_____
###Markdown
Questions: descriptive: Why do people use SO & why do they answer (or not)?descriptive: 1. Do people with more work experience in IT find StackOverflow less or more helpful than people with less work experience in IT? 2.
###Code
# tidy data
# transform data
# visualise data
#Subset to only quantitative vars
num_vars = df[['Salary', 'CareerSatisfaction', 'HoursPerWeek', 'JobSatisfaction', 'StackOverflowSatisfaction']]
prop_sals = len(num_vars.dropna(subset=['Salary'])) / len(num_vars) # Proportion of individuals in the dataset with salary reported
prop_sals
X = sal_rm[['CareerSatisfaction', 'HoursPerWeek', 'JobSatisfaction', 'StackOverflowSatisfaction']] #Create X using explanatory variables from sal_rm
y = sal_rm['Salary'] #Create y using the response variable of Salary
# Split data into training and test data, and fit a linear model
X_train, X_test, y_train, y_test = train_test_split(X, y , test_size=.30, random_state=42)
lm_model = LinearRegression(normalize=True)
# If our model works, it should just fit our model to the data. Otherwise, it will let us know.
try:
lm_model.fit(X_train, y_train)
except:
print("Oh no! It doesn't work!!!")
# Remove the rows associated with nan values in any column from num_vars (this was the removal process used in the screencast). Store the dataframe with these rows removed in all_rem.
all_rm = num_vars.dropna() # dataframe with rows for nan in any column removed
# visualising
df_flights.boxplot('dep_time','origin',rot = 30,figsize=(5,6))
###Output
_____no_output_____
###Markdown
Modeling
###Code
# model
# communicate
###Output
_____no_output_____ |
_build/html/_sources/curriculum-notebooks/Mathematics/ProbabilityExperiment/probability-experiment.ipynb | ###Markdown
 To run this notebook press the >> Button and confirm "Restart and Run all".
###Code
from IPython.display import HTML
HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide();
} else {
$('div.input').show();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
The raw code for this IPython notebook is by default hidden for easier reading.
To toggle on/off the raw code, click <a href="javascript:code_toggle()">here</a>.''')
%%html
<style>
.output_wrapper button.btn.btn-default,
.output_wrapper .ui-dialog-titlebar {
display: none;
}
</style>
# Modules
import string
import numpy as np
import pandas as pd
import qgrid as q
import matplotlib.pyplot as plt
# Widgets & Display modules, etc..
from ipywidgets import widgets as w
from ipywidgets import Button, Layout
from IPython.display import display, Javascript, Markdown
# grid features for interactive grids
grid_features = { 'fullWidthRows': True,
'syncColumnCellResize': True,
'forceFitColumns': True,
'rowHeight': 40,
'enableColumnReorder': True,
'enableTextSelectionOnCells': True,
'editable': True,
'filterable': False,
'sortable': False,
'highlightSelectedRow': True}
from ipywidgets import Button , Layout , interact,widgets
from IPython.display import Javascript, display
# Function: executes previous cell on button widget click event and hides achievement indicators message
def run_current(ev):
display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()+0,IPython.notebook.get_selected_index()+1)'))
# Counter for toggling achievement indicator on/off
button_ctr = 0
# Achievement Indicators
line_1 = "#### Achievement Indicators"
line_2 = "**General Outcome:**"
line_3 = "* The general outcome of this notebook is to use experimental or theoretical probabilities to represent and solve problems involving uncertainty."
line_4 = "**Specific Outcome 4:**"
line_5 = "* Express probabilities as ratios, fractions and percents."
line_6 = "**Specific Outcome 5:**"
line_7 = "* Identify the sample space (where the combined sample space has 36 or fewer elements) for a probability experiment involving two independent events.*"
line_8 = "**Specific Outcome 6:**"
line_9 = "* Conduct a probability experiment to compare the theoretical probability (determined using a tree diagram, table or other graphic organizer) and experimental probability of two independent events*"
# Use to print lines, then save in lines_list
def print_lines(n):
lines_str = ""
for i in range(1,n+1):
lines_str = lines_str + "line_"+str(i)+","
lines_str = lines_str[:-1]
print(lines_str)
lines_list = [line_1,line_2,line_3,line_4,line_5,line_6,line_7,line_8,line_9]
# Show/Hide buttons
ai_button_show = widgets.Button(button_style='info',description="Show Achievement Indicators", layout=Layout(width='25%', height='30px') )
ai_button_hide = widgets.Button(button_style='info',description="Hide Achievement Indicators", layout=Layout(width='25%', height='30px') )
display(Markdown("For instructors:"))
button_ctr += 1
if(button_ctr % 2 == 0):
for line in lines_list:
display(Markdown(line))
display(ai_button_hide)
ai_button_hide.on_click( run_current )
else:
display(ai_button_show)
ai_button_show.on_click( run_current )
###Output
_____no_output_____
###Markdown
Statistics and Probability Chance and Uncertainty Grade 11 Math OverviewIn this notebook we will explore basic notions and properties about expressing and manipulating probabilities. The general outcome of this notebook is to use experimental or theoretical probabilities to represent and solve problems involving uncertainty. Probabilities as ratios, fractions and percentsA natural question to ask is: how do we measure the probability associated to an event of a given probability experiment, i.e. for a given event, what is the probability it occurs? We define this now, illustrate with a simple example involving dice, and we will then provide an interactive exercise. Definition. The probability of an event is the ratio between the size of the event (as a collection of outcomes) and the size of the sample space. The sample space of rolling a single dice is given by $\lbrace$1,2,3,4,5,6 $\rbrace$ and had sample size 6. If we assume each face is equally likely to occur (i.e. the dice is unbiased), then the probability of getting each face is the ratio or fraction $\dfrac{1}{6}$. We will denote the probability of getting each number as $P(i)$, where $i$ can either be 1,2,3,4,5,6. Then, the probability of getting event 1, denoted $P(1)$ is equal to $\dfrac{1}{6}$; more precisely: $P(1)=P(2)=P(3)=P(4)=P(5)=P(6)=\dfrac{1}{6}$.This is equivalent to stating that the probability of getting any given face is 1 in 6, or $1:6$. Using ratios we have $P(i) = 1:6$, where $i = 1,2,3,4,5,6$. We can also express probabilities using percents. The total number of outcomes is considered 100%. Since there are 6 possible outcomes and assuming equal probability, $P(i) = 100 / 6 = 16.67 \%$. We summarize in the table below.|Event $i$ |1 |2 |3 |4 |5 |6 ||----------------------------|--|--|--|--|--|--||Probability $P(i)$ as a fraction|$\dfrac{1}{6}$|$\dfrac{1}{6}$|$\dfrac{1}{6}$|$\dfrac{1}{6}$|$\dfrac{1}{6}$|$\dfrac{1}{6}$||Probability $P(i)$ as a ratio |1:6|1:6|1:6|1:6|1:6|1:6||Probability $P(i)$ as a percent|16.67%|16.67%|16.67%|16.67%|16.67%|16.67%|
###Code
import numpy as np
import matplotlib
from matplotlib.patches import Rectangle
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import random
import matplotlib.gridspec as gridspec
import ipywidgets
from ipywidgets import interact,interact_manual,widgets
%matplotlib inline
###Output
_____no_output_____
###Markdown
Interactive Example: Probabilities as Fractions, Ratios and PercentsThe widget below illustrates a probability experiment using roulettes of various sizes. The basic experiment consists in spinning a roulette, divided in a given number of compartments, each of the same size and associated with a unique number that identifies it. The outcome of the experiment is the compartment of the roulette whose number shows in red. Basically, this is an experiment similar to rolling a dice, but where we control the number of faces (number of compartments in the roulette). We assume the roulette is unbiased: each compartment has the same chance to appear in red whenspinning the roulette.On the upper side of the widget you find a drop down menu indicating the size of the sample space, which is the number of compartments of the roulette. In this case, we are considering roulettes whose outcomes are integers from 1 to the size of the sample space. We consider roulettes with sample spaces of size 2, 4, 6 and 8.Below the drop down menu you will find find a red button. Click it to play. On the left hand side of the widget you will see a roulette with numbers in black and one number in red. The number in red corresponds to the outcome of the experiment. On the right hand side you will find a printed message explaining what the probability of the event associated to the obtained outcome is. Play multiple times to simulate what would happen if you spun the roulette. Change the size of the sample space to learn what the different probabilities associated to each event in the roulette are.
###Code
###
def roulette(number_parititions,value):
if value==True or value==False:
lucky_number_one = random.choice(np.arange(1,2*number_parititions+1))
axalpha = 0.05
figcolor = 'white'
dpi = 80
fig = plt.figure(figsize=(15,10), dpi=dpi,facecolor='black')
plt.subplot(211)
plt.subplot(212)
fig.patch.set_edgecolor(figcolor)
fig.patch.set_facecolor(figcolor)
ax = plt.subplot(121,projection='polar',facecolor="red")
ax.patch.set_alpha(axalpha)
ax.set_axisbelow(True)
ax1 = plt.subplot(122)
ax1.grid(False)
ax1.set_xlim(0.1,0.9)
ax1.set_ylim(0.4,0.8)
ax1.set_xticklabels([])
ax1.set_yticklabels([])
ax1.axis("off")
arc = 2. * np.pi
N = number_parititions
theta = np.arange(0.0, arc, arc/N)
if number_parititions == 4:
radii_ar = [1.0 for i in range(number_parititions)]
width_ar = [1.0 for i in range(number_parititions)]
radii = 10 * np.array(radii_ar)
width = np.pi/4 * np.array(width_ar)
bars = ax.bar(theta, radii, width=width, bottom=0.0)
for r, bar in zip(radii, bars):
bar.set_facecolor("pink")
bar.set_alpha(0.6)
ax.text(0, 7, "1", fontsize=20,transform=ax.transData._b,)
ax.text(5, 5, "2", fontsize=20,transform=ax.transData._b)
ax.text(7, 0, "3", fontsize=20,transform=ax.transData._b)
ax.text(4, -5.5, "4", fontsize=20,transform=ax.transData._b)
ax.text(0, -7, "5", fontsize=20,transform=ax.transData._b)
ax.text(-5, -5, "6", fontsize=20,transform=ax.transData._b)
ax.text(-5, 5, "8", fontsize=20,transform=ax.transData._b)
ax.text(-7, 0, "7", fontsize=20,transform=ax.transData._b)
if lucky_number_one==1:
ax.text(0, 7, "1", fontsize=20,transform=ax.transData._b,color="red")
elif lucky_number_one==2:
ax.text(5, 5, "2", fontsize=20,transform=ax.transData._b,color="red")
elif lucky_number_one==3:
ax.text(7, 0, "3", fontsize=20,transform=ax.transData._b,color='red')
elif lucky_number_one==4:
ax.text(4, -5.5, "4", fontsize=20,transform=ax.transData._b,color='red')
elif lucky_number_one==5:
ax.text(0, -7, "5", fontsize=20,transform=ax.transData._b,color="red")
elif lucky_number_one==6:
ax.text(-5, -5, "6", fontsize=20,transform=ax.transData._b,color="red")
elif lucky_number_one==7:
ax.text(-7, 0, "7", fontsize=20,transform=ax.transData._b,color='red')
elif lucky_number_one==8:
ax.text(-5, 5, "8", fontsize=20,transform=ax.transData._b,color='red')
ax1.text(0.1,0.77,"Probability of each event",fontsize=25)
ax1.text(0.1,0.5,"P(1) = 1/8 = 1:8 = 12.5%\n\nP(2) = 1/8 = 1:8 = 12.5%\n\nP(3) = 1/8 = 1:8 = 12.5%\n\nP(4) = 1/8 = 1:8 = 12.5%\n\nP(5) = 1/8 = 1:8 = 12.5%\n\nP(6) = 1/8 = 1:8 = 12.5%\n\nP(7) = 1/8 = 1:8 = 12.5%\n\nP(8) = 1/8 = 1:8 = 12.5%"\
,fontsize=20)
ax1.text(0.1,0.45,"Probability Outcome",fontsize=25)
ax1.text(0.1,0.42,"The result after spinning is " + str(lucky_number_one),fontsize=20)
elif number_parititions == 3:
radii_ar = [1.0 for i in range(number_parititions)]
width_ar = [1.3 for i in range(number_parititions)]
radii = 10 * np.array(radii_ar)
width = np.pi/4 * np.array(width_ar)
bars = ax.bar(theta, radii, width=width, bottom=0.0)
for r, bar in zip(radii, bars):
bar.set_facecolor("pink")
bar.set_alpha(0.6)
ax.text(-4, 7, "1", fontsize=20,transform=ax.transData._b)
ax.text(3, 6.5, "2", fontsize=20,transform=ax.transData._b)
ax.text(7, 0, "3", fontsize=20,transform=ax.transData._b)
ax.text(3, -6.5, "4", fontsize=20,transform=ax.transData._b)
ax.text(-4, -7, "5", fontsize=20,transform=ax.transData._b)
ax.text(-8, 0, "6", fontsize=20,transform=ax.transData._b)
if lucky_number_one==1:
ax.text(-4, 7, "1", fontsize=20,transform=ax.transData._b,color='red')
elif lucky_number_one==2:
ax.text(3, 6.5, "2", fontsize=20,transform=ax.transData._b,color='red')
elif lucky_number_one==3:
ax.text(7, 0, "3", fontsize=20,transform=ax.transData._b,color='red')
elif lucky_number_one==4:
ax.text(3, -6.5, "4", fontsize=20,transform=ax.transData._b,color='red')
elif lucky_number_one==5:
ax.text(-4, -7, "5", fontsize=20,transform=ax.transData._b,color='red')
elif lucky_number_one==6:
ax.text(-8, 0, "6", fontsize=20,transform=ax.transData._b,color='red')
ax1.text(0.1,0.74,"Probability of each event",fontsize=25)
ax1.text(0.1,0.55,"P(1) = 1/6 = 1:6 = 16.67%\n\nP(2) = 1/6 = 1:6 = 16.67%\n\nP(3) = 1/6 = 1:6 = 16.67\n\nP(4) = 1/6 = 1:6 = 16.67%\n\nP(5) = 1/6 = 1:6 = 16.67%\n\nP(6) = 1/6 = 1:6 = 16.67%"\
,fontsize=20)
ax1.text(0.1,0.5,"Probability Outcome",fontsize=25)
ax1.text(0.1,0.48,"The result after spinning is " + str(lucky_number_one),fontsize=20)
elif number_parititions == 2:
radii_ar = [1.0 for i in range(number_parititions)]
width_ar = [1.7 for i in range(number_parititions)]
radii = 10 * np.array(radii_ar)
width = np.pi/4 * np.array(width_ar)
bars = ax.bar(theta, radii, width=width, bottom=0.0)
for r, bar in zip(radii, bars):
bar.set_facecolor("pink")
bar.set_alpha(0.6)
ax.text(0, 8, "1", fontsize=20,transform=ax.transData._b)
ax.text(7, 0, "2", fontsize=20,transform=ax.transData._b)
ax.text(0, -8, "3", fontsize=20,transform=ax.transData._b)
ax.text(-8, 0, "4", fontsize=20,transform=ax.transData._b)
if lucky_number_one==1:
ax.text(0, 8, "1", fontsize=20,transform=ax.transData._b,color='red')
elif lucky_number_one==2:
ax.text(7, 0, "2", fontsize=20,transform=ax.transData._b,color='red')
elif lucky_number_one==3:
ax.text(0, -8, "3", fontsize=20,transform=ax.transData._b,color='red')
elif lucky_number_one==4:
ax.text(-8, 0, "4", fontsize=20,transform=ax.transData._b,color='red')
ax1.text(0.1,0.74,"Probability of each event",fontsize=25)
ax1.text(0.1,0.6,"P(1) = 1/4 = 1:4 = 25%\n\nP(2) = 1/4 = 1:4 =25%\n\nP(3) = 1/4 = 1:4 =25%\n\nP(4) = 1/4 = 1:4 =25%",fontsize=20)
#ax1.text(0.1,0.5,"This is equivalent to 50%.",fontsize=20)
ax1.text(0.1,0.54,"Probability Outcome",fontsize=25)
ax1.text(0.1,0.52,"The result after spinning is " + str(lucky_number_one),fontsize=20)
elif number_parititions == 1:
radii_ar = [1.0 for i in range(number_parititions)]
width_ar = [4 for i in range(number_parititions)]
radii = 10 * np.array(radii_ar)
width = np.pi/4 * np.array(width_ar)
bars = ax.bar(theta, radii, width=width, bottom=0.0)
for r, bar in zip(radii, bars):
bar.set_facecolor("pink")
bar.set_alpha(0.6)
ax.text(8, 0, "1", fontsize=20,transform=ax.transData._b)
ax.text(-8, 0, "2", fontsize=20,transform=ax.transData._b)
if lucky_number_one==1:
ax.text(8, 0, "1", fontsize=20,transform=ax.transData._b,color='red')
elif lucky_number_one==2:
ax.text(-8, 0, "2", fontsize=20,transform=ax.transData._b,color='red')
ax1.text(0.1,0.7,"Probability of each event",fontsize=25)
ax1.text(0.1,0.6,"P(1) = 1/2 = 1:2 = 50%\n\nP(2) = 1/2 = 1:2 =50%",fontsize=20)
ax1.text(0.1,0.54,"Probability Outcome",fontsize=25)
ax1.text(0.1,0.52,"The result after spinning is " + str(lucky_number_one),fontsize=20)
ax.tick_params(labelbottom=False, labeltop=False,
labelleft=False, labelright=False)
ax.grid(False)
ax.set_yticks(np.arange(1, 9, 2))
ax1.set_yticks([])
ax1.set_xticks([])
ax1.set_title("Probabilities as fractions, ratios and percents",fontsize=30)
plt.show()
style = {'description_width': 'initial'}
lucky = interact(roulette,number_parititions = widgets.Dropdown(
options={'Two': 1, 'Four': 2, 'Six': 3,'Eight':4},
value=4,
description='Size of sample space:',
style=style
),
value = widgets.ToggleButton(
value=True,
description='Click to Play!',
disabled=False,
button_style='danger', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Description',
icon='check'
))
###Output
_____no_output_____
###Markdown
Question 1Using fractions, what is the probability of the event 1, denoted $P(1)$, if the sample size of the roulette is 4?
###Code
from ipywidgets import interact_manual,widgets
s = {'description_width': 'initial'}
@interact(answer =widgets.Select(
options=["Select option","1/2",\
"4","1/3",\
"1/4"],
value='Select option',
description="Probability as fraction",
disabled=False,
style=s
))
def reflective_angle_question(answer):
if answer=="Select option":
print("Click on the correct probability expressed as a fraction.")
elif answer=="1/4":
print("Correct!\nWith a sample space of size 4, each with equal likelihood, P(1)=1/4.")
elif answer != "1/4" or answer != "Select Option":
print("Hint: What is P(1) if P(i) = 1/4 for all i = 1,2,3,4?")
###Output
_____no_output_____
###Markdown
Question 2What is P(1) if the size of the sample space is 4, but this time expressed as percent?
###Code
from ipywidgets import interact_manual,widgets
s = {'description_width': 'initial'}
@interact(answer =widgets.Select(
options=["Select option","25%",\
"100%","40%",\
"10%"],
value='Select option',
description="Probability as percent",
disabled=False,
style=s
))
def reflective_angle_question(answer):
if answer=="Select option":
print("Click on the correct probability expressed in percent.")
elif answer=="25%":
print("Correct!\nWith four probability events, each with equal likelihood, P(1) = 25%.")
elif answer != "25%" or answer != "Select Option":
print("Hint: The total number of outcomes is 4, which corresponds to 100%. What is 100/4?")
###Output
_____no_output_____
###Markdown
Question 3Using the widget created above, change the Size of sample space to 8. What is $P(7)$ as a ratio?
###Code
from ipywidgets import interact_manual,widgets
s = {'description_width': 'initial'}
@interact(answer =widgets.Select(
options=["Select option","1:6",\
"1:4","1:8",\
"1:7"],
value='Select option',
description="Probability as ratio",
disabled=False,
style=s
))
def reflective_angle_question(answer):
if answer=="Select option":
print("Click on the correct probability expressed as a percentage.")
elif answer=="1:8":
print("Correct!\nWith eight probability events, each with equal likelihood, \nP(7) = 1:8.")
elif answer != "1:8" or answer != "Select Option":
print("Hint: 1 in every 8 corresponds to the event 7. What is P(7) in ratio?")
###Output
_____no_output_____
###Markdown
Independent Events & Sample SpaceIn this section, we define the concept of **independent probability experiments** as well as the corresponding sample space, and provide a game where we can experiment with the two concepts. Definition. We say that two probability experiments are independent if the outcome of one does not affect the outcome of the other. For example, if we spin two roulettes at the same time, then the two experiments are independent since the outcome of each does not affect the other.Try spinning the two roulettes below via using the red button. As before, in each the number in red denotes the outcome of the experiment. These two roulettes are not linked and as you can see from a few spins, their outcomes are not related at all: spinning them are independent experiments.
###Code
def spin(value):
if value==True or value==False:
lucky_number_one = random.choice([1,2,3,4,5,6])
lucky_number_one_c = random.choice([1,2,3,4,5,6])
x_t,y_t = [i/10 for i in range(10)],[i/10 for i in range(10)]
axalpha = 0.05
figcolor = 'white'
dpi = 80
gs = gridspec.GridSpec(2, 3)
fig = plt.figure(figsize=(15,8), dpi=dpi,facecolor='black')
fig.patch.set_edgecolor(figcolor)
fig.patch.set_facecolor(figcolor)
ax1 = plt.subplot(gs[0, 0])
ax1.grid(False)
ax1.axis("Off")
ax2 = plt.subplot(gs[1, 0])
ax2.grid(False)
ax2.axis("Off")
ax5 = plt.subplot(gs[0, 2])
ax5.grid(False)
ax5.axis("Off")
ax6 = plt.subplot(gs[1, 2])
ax6.grid(False)
ax6.axis("Off")
#ax.axis("Off")
ax = plt.subplot(gs[0,0],projection='polar',facecolor="red") # row 0, col 0
plt.plot([0,1])
ax.plot(x_t,y_t,transform=ax.transData._b,color="#FEE9FF",linewidth=5)
ax.patch.set_alpha(axalpha)
ax.set_axisbelow(True)
number_parititions = 3
arc = 2. * np.pi
N = number_parititions
theta = np.arange(0.0, arc, arc/N)
radii_ar = [1.0 for i in range(number_parititions)]
width_ar = [1.3 for i in range(number_parititions)]
radii = 10 * np.array(radii_ar)
width = np.pi/4 * np.array(width_ar)
bars = ax.bar(theta, radii, width=width, bottom=0.0)
for r, bar in zip(radii, bars):
bar.set_facecolor("pink")
bar.set_alpha(0.6)
ax.text(-4, 7, "1", fontsize=20,transform=ax.transData._b)
ax.text(3, 6.5, "2", fontsize=20,transform=ax.transData._b)
ax.text(7, 0, "3", fontsize=20,transform=ax.transData._b)
ax.text(3, -6.5, "4", fontsize=20,transform=ax.transData._b)
ax.text(-4, -7, "5", fontsize=20,transform=ax.transData._b)
ax.text(-8, 0, "6", fontsize=20,transform=ax.transData._b)
if lucky_number_one==1:
#bar3.set_facecolor("#000000")
ax.text(-4, 7, "1", fontsize=20,transform=ax.transData._b,color='red')
elif lucky_number_one==2:
#bar2.set_facecolor("#000000")
ax.text(3, 6.5, "2", fontsize=20,transform=ax.transData._b,color='red')
elif lucky_number_one==3:
#bar1.set_facecolor("#000000")
ax.text(7, 0, "3", fontsize=20,transform=ax.transData._b,color='red')
elif lucky_number_one==4:
#bar6.set_facecolor("#000000")
ax.text(3, -6.5, "4", fontsize=20,transform=ax.transData._b,color='red')
elif lucky_number_one==5:
#bar5.set_facecolor("#000000")
ax.text(-4, -7, "5", fontsize=20,transform=ax.transData._b,color='red')
elif lucky_number_one==6:
#bar4.set_facecolor("#000000")
ax.text(-8, 0, "6", fontsize=20,transform=ax.transData._b,color='red')
ax.tick_params(labelbottom=False, labeltop=False,
labelleft=False, labelright=False)
ax.grid(False)
ax.axis("On")
ax.set_title("Top Roulette",fontsize=20)
ax4 = plt.subplot(gs[1, 0],projection='polar',facecolor="red") # row 0, col 0
plt.plot([0,1])
ax4.plot(x_t,y_t,transform=ax4.transData._b,color="#FEE9FF",linewidth=5)
ax4.patch.set_alpha(axalpha)
ax4.set_axisbelow(True)
bars = ax4.bar(theta, radii, width=width, bottom=0.0)
for r, bar in zip(radii, bars):
bar.set_facecolor("pink")
bar.set_alpha(0.6)
ax4.text(-4, 7, "1", fontsize=20,transform=ax4.transData._b)
ax4.text(3, 6.5, "2", fontsize=20,transform=ax4.transData._b)
ax4.text(7, 0, "3", fontsize=20,transform=ax4.transData._b)
ax4.text(3, -6.5, "4", fontsize=20,transform=ax4.transData._b)
ax4.text(-4, -7, "5", fontsize=20,transform=ax4.transData._b)
ax4.text(-8, 0, "6", fontsize=20,transform=ax4.transData._b)
if lucky_number_one_c==1:
#bar3.set_facecolor("#000000")
ax4.text(-4, 7, "1", fontsize=20,transform=ax4.transData._b,color='red')
elif lucky_number_one_c==2:
#bar2.set_facecolor("#000000")
ax4.text(3, 6.5, "2", fontsize=20,transform=ax4.transData._b,color='red')
elif lucky_number_one_c==3:
#bar1.set_facecolor("#000000")
ax4.text(7, 0, "3", fontsize=20,transform=ax4.transData._b,color='red')
elif lucky_number_one_c==4:
#bar6.set_facecolor("#000000")
ax4.text(3, -6.5, "4", fontsize=20,transform=ax4.transData._b,color='red')
elif lucky_number_one_c==5:
#bar5.set_facecolor("#000000")
ax4.text(-4, -7, "5", fontsize=20,transform=ax4.transData._b,color='red')
elif lucky_number_one_c==6:
#bar4.set_facecolor("#000000")
ax4.text(-8, 0, "6", fontsize=20,transform=ax4.transData._b,color='red')
ax4.tick_params(labelbottom=False, labeltop=False,
labelleft=False, labelright=False)
ax4.grid(False)
ax4.axis("On")
ax4.set_title("Bottom Roulette",fontsize=20)
x,y = np.array([i/10 for i in range(11)]),np.array([i/10 for i in range(11)])
ax2 = plt.subplot(gs[0,1:]) # row 0, col 0
plt.plot([0,1])
ax2.grid(False)
ax2.axis("Off")
#ax2.set_title("Top Roulette Outcome",fontsize=20)
ax2.plot(x,y,color='white',linewidth=4)
ax2.text(0.1,0.5,"Top Roulette Outcome: " +str(lucky_number_one),fontsize=20)
ax5 = plt.subplot(gs[1,1:]) # row 0, col 0
plt.plot([0,1])
ax5.grid(False)
ax5.axis("Off")
#ax5.set_title("Bottom Roulette Outcome",fontsize=20)
ax5.plot(x,y,color='white',linewidth=4)
ax5.text(0.1,0.5,"Bottom Roulette Outcome: " + str(lucky_number_one_c),fontsize=20)
lucky = interact(spin,value = widgets.ToggleButton(
value=True,
description="Spin",
disabled=False,
button_style='danger', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Description',
icon='check'
))
###Output
_____no_output_____
###Markdown
Recall that Definition. The sample space of an experiment is the set of all possible outcomes of that experiment.If, for example, we take the two roulettes above, we can define the sample space of spinning both of then at the same time as the set of all ordered pairs $(n_t,n_b)$, where $n_t$ denotes the outcome of spinning the top roulette and $n_b$ denotes the outcome of spinning the bottom roulette. This sample space is given by the table below, which is a 6 by 6 table where each entry is a pair $(n_t,n_b)$.
###Code
def spin_sample_space(value):
if value==True or value==False:
lucky_number_one = random.choice([1,2,3,4,5,6])
lucky_number_one_c = random.choice([1,2,3,4,5,6])
x_t,y_t = [i/10 for i in range(10)],[i/10 for i in range(10)]
axalpha = 0.05
figcolor = 'white'
dpi = 80
gs = gridspec.GridSpec(2, 2)
fig = plt.figure(figsize=(15,8), dpi=dpi,facecolor='black')
fig.patch.set_edgecolor(figcolor)
fig.patch.set_facecolor(figcolor)
ax = plt.subplot(gs[0, 0],projection='polar',facecolor="red")
plt.plot([0,1])
ax.patch.set_alpha(axalpha)
ax.set_axisbelow(True)
ax.plot(x_t,y_t,transform=ax.transData._b,color="#FEE9FF",linewidth=5)
number_parititions = 3
arc = 2. * np.pi
N = number_parititions
theta = np.arange(0.0, arc, arc/N)
radii_ar = [1.0 for i in range(number_parititions)]
width_ar = [1.3 for i in range(number_parititions)]
radii = 10 * np.array(radii_ar)
width = np.pi/4 * np.array(width_ar)
bars = ax.bar(theta, radii, width=width, bottom=0.0)
for r, bar in zip(radii, bars):
bar.set_facecolor("pink")
bar.set_alpha(0.6)
ax.text(-4, 7, "1", fontsize=20,transform=ax.transData._b)
ax.text(3, 6.5, "2", fontsize=20,transform=ax.transData._b)
ax.text(7, 0, "3", fontsize=20,transform=ax.transData._b)
ax.text(3, -6.5, "4", fontsize=20,transform=ax.transData._b)
ax.text(-4, -7, "5", fontsize=20,transform=ax.transData._b)
ax.text(-8, 0, "6", fontsize=20,transform=ax.transData._b)
if lucky_number_one==1:
#bar3.set_facecolor("#000000")
ax.text(-4, 7, "1", fontsize=20,transform=ax.transData._b,color='red')
elif lucky_number_one==2:
#bar2.set_facecolor("#000000")
ax.text(3, 6.5, "2", fontsize=20,transform=ax.transData._b,color='red')
elif lucky_number_one==3:
#bar1.set_facecolor("#000000")
ax.text(7, 0, "3", fontsize=20,transform=ax.transData._b,color='red')
elif lucky_number_one==4:
#bar6.set_facecolor("#000000")
ax.text(3, -6.5, "4", fontsize=20,transform=ax.transData._b,color='red')
elif lucky_number_one==5:
#bar5.set_facecolor("#000000")
ax.text(-4, -7, "5", fontsize=20,transform=ax.transData._b,color='red')
elif lucky_number_one==6:
#bar4.set_facecolor("#000000")
ax.text(-8, 0, "6", fontsize=20,transform=ax.transData._b,color='red')
ax.tick_params(labelbottom=False, labeltop=False,
labelleft=False, labelright=False)
ax.grid(False)
ax.axis("On")
ax.set_title("Top Roulette",fontsize=20)
ax4 = plt.subplot(gs[1, 0],projection='polar',facecolor="red") # row 0, col 0
plt.plot([0,1])
ax4.patch.set_alpha(axalpha)
ax4.set_axisbelow(True)
ax4.plot(x_t,y_t,transform=ax4.transData._b,color="#FEE9FF",linewidth=5)
bars = ax4.bar(theta, radii, width=width, bottom=0.0)
for r, bar in zip(radii, bars):
bar.set_facecolor("pink")
bar.set_alpha(0.6)
ax4.text(-4, 7, "1", fontsize=20,transform=ax4.transData._b)
ax4.text(3, 6.5, "2", fontsize=20,transform=ax4.transData._b)
ax4.text(7, 0, "3", fontsize=20,transform=ax4.transData._b)
ax4.text(3, -6.5, "4", fontsize=20,transform=ax4.transData._b)
ax4.text(-4, -7, "5", fontsize=20,transform=ax4.transData._b)
ax4.text(-8, 0, "6", fontsize=20,transform=ax4.transData._b)
if lucky_number_one_c==1:
#bar3.set_facecolor("#000000")
ax4.text(-4, 7, "1", fontsize=20,transform=ax4.transData._b,color='red')
elif lucky_number_one_c==2:
#bar2.set_facecolor("#000000")
ax4.text(3, 6.5, "2", fontsize=20,transform=ax4.transData._b,color='red')
elif lucky_number_one_c==3:
#bar1.set_facecolor("#000000")
ax4.text(7, 0, "3", fontsize=20,transform=ax4.transData._b,color='red')
elif lucky_number_one_c==4:
#bar6.set_facecolor("#000000")
ax4.text(3, -6.5, "4", fontsize=20,transform=ax4.transData._b,color='red')
elif lucky_number_one_c==5:
#bar5.set_facecolor("#000000")
ax4.text(-4, -7, "5", fontsize=20,transform=ax4.transData._b,color='red')
elif lucky_number_one_c==6:
#bar4.set_facecolor("#000000")
ax4.text(-8, 0, "6", fontsize=20,transform=ax4.transData._b,color='red')
ax4.tick_params(labelbottom=False, labeltop=False,
labelleft=False, labelright=False)
ax4.grid(False)
ax4.axis("On")
ax4.set_title("Bottom Roulette",fontsize=20)
ax1 = plt.subplot(gs[:, 1:],facecolor='#0475A8') # row 1, span all columns
plt.plot([0,1])
ax1.set_axisbelow(True)
ax1.grid(color='black', linestyle='-', linewidth=2)
ax1.set_xlim(0.1,0.7)
ax1.set_ylim(0.1,0.7)
rec = Rectangle([lucky_number_one/10,lucky_number_one_c/10],0.1,0.1,facecolor="black")
ax1.add_patch(rec)
x,y = [lucky_number_one/10,lucky_number_one/10+ 0.1], [lucky_number_one_c/10,lucky_number_one_c/10 + 0.1]
ax1.plot(x,y,color='black',linewidth=4)
for i in range(1,7):
for j in range(1,7):
ax1.text(i/10 + 0.02,j/10 + 0.055,"(" + str(i)+","+ str(j)+")",fontsize = 15,color='white')
ax1.set_xticklabels([" ",1,2,3,4,5,6])
ax1.set_yticklabels([" ",1,2,3,4,5,6])
ax1.set_xlabel("Top Roulette Outcome",fontsize = 20)
ax1.set_ylabel("Bottom Roulette Outcome",fontsize = 20)
ax1.xaxis.tick_top()
ax1.invert_yaxis()
plt.show()
lucky = interact(spin_sample_space,value = widgets.ToggleButton(
value=True,
description="Spin",
disabled=False,
button_style='danger', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Description',
icon='check'
))
###Output
_____no_output_____
###Markdown
In this case, since the events are independent and each event has six possible outcomes, the sample space contains$$6 \times 6 = 36$$possible outcomes, where each outcome is a pair of the form $(n_t,n_b)$. The probability of obtaining any given pair $(n_t,n_b)$, is given by the probability of obtaining $n_t$ with the top roulette (first experiment) multiplied by the probability of obtaining $n_b$ with the bottom roulette (second experiment). If we assume that each event is equally likely to occur$$P(n_t,n_b) = \dfrac{1}{6} \times \dfrac{1}{6} = \dfrac{1}{36}$$Note that we can multiply * the sizes of the two sample spaces (for each roulette) to obtain the sample space size of the combined experiments* the probabilities of obtaining $n_t$ and $n_b$ to obtain the probability of the event $(n_t,n_b)$**because** the two experiments are **independent**. This is an important property of joint probability experiments.Property. Consider two independent probability experiments $E_1$ and $E_2$ of respective sample space sizes $N_1$ and $N_2$. The size of the sample space of the joint experiment $(E_1,E_2)$ is $N_1\times N_2$. The probability of the event $(n_1,n_2)$ is $P(n_1)\times P(n_2)$. Question 4What is the probability assigned to the event (1,1)?
###Code
from ipywidgets import interact_manual,widgets
s = {'description_width': 'initial'}
@interact(answer =widgets.Select(
options=["Select option","2/36",\
"1/6","36",\
"1/36"],
value='Select option',
description="Probability as fraction",
disabled=False,
style=s
))
def reflective_angle_question(answer):
if answer=="Select option":
print("Click on the correct probability expressed as a fraction.")
elif answer=="1/36":
print("Correct!")
elif answer != "1/36" or answer != "Select Option":
print("Hint: There are 36 events, each with equal likelihood of occurrence. \nYou also know that each pair is unique.")
###Output
_____no_output_____
###Markdown
A second example of a sample spaceLet's take the two roulettes as before but this time let's define the sample space of spinning both of them at the same time as the as the **parity** of the sum $$n_t + n_b$$ where as before $n_t$ denotes the outcome of the Top Roulette and $n_b$ denotes the outcome of the Bottom Roulette.Then the sample space is given by the set $\lbrace \text{even},\text{odd} \rbrace$This sample space is given by the table below, which is a 6 by 6 table where each entry is a pair contains the **parity** of the sum $$n_t + n_b$$
###Code
def fair(value):
if value==True or value==False:
lucky_number_one = random.choice([1,2,3,4,5,6])
lucky_number_one_c = random.choice([1,2,3,4,5,6])
x_t,y_t = [i/10 for i in range(10)],[i/10 for i in range(10)]
axalpha = 0.05
figcolor = 'white'
dpi = 80
gs = gridspec.GridSpec(2, 2)
fig = plt.figure(figsize=(15,8), dpi=dpi,facecolor='black')
fig.patch.set_edgecolor(figcolor)
fig.patch.set_facecolor(figcolor)
ax = plt.subplot(gs[0, 0],projection='polar',facecolor="red") # row 0, col 0
plt.plot([0,1])
ax.patch.set_alpha(axalpha)
ax.set_axisbelow(True)
ax.plot(x_t,y_t,transform=ax.transData._b,color="#FEE9FF",linewidth=5)
number_parititions = 3
arc = 2. * np.pi
N = number_parititions
theta = np.arange(0.0, arc, arc/N)
radii_ar = [1.0 for i in range(number_parititions)]
width_ar = [1.3 for i in range(number_parititions)]
radii = 10 * np.array(radii_ar)
width = np.pi/4 * np.array(width_ar)
bars = ax.bar(theta, radii, width=width, bottom=0.0)
for r, bar in zip(radii, bars):
bar.set_facecolor("pink")
bar.set_alpha(0.6)
ax.text(-4, 7, "1", fontsize=20,transform=ax.transData._b)
ax.text(3, 6.5, "2", fontsize=20,transform=ax.transData._b)
ax.text(7, 0, "3", fontsize=20,transform=ax.transData._b)
ax.text(3, -6.5, "4", fontsize=20,transform=ax.transData._b)
ax.text(-4, -7, "5", fontsize=20,transform=ax.transData._b)
ax.text(-8, 0, "6", fontsize=20,transform=ax.transData._b)
if lucky_number_one==1:
#bar3.set_facecolor("#000000")
ax.text(-4, 7, "1", fontsize=20,transform=ax.transData._b,color='red')
elif lucky_number_one==2:
#bar2.set_facecolor("#000000")
ax.text(3, 6.5, "2", fontsize=20,transform=ax.transData._b,color='red')
elif lucky_number_one==3:
#bar1.set_facecolor("#000000")
ax.text(7, 0, "3", fontsize=20,transform=ax.transData._b,color='red')
elif lucky_number_one==4:
#bar6.set_facecolor("#000000")
ax.text(3, -6.5, "4", fontsize=20,transform=ax.transData._b,color='red')
elif lucky_number_one==5:
#bar5.set_facecolor("#000000")
ax.text(-4, -7, "5", fontsize=20,transform=ax.transData._b,color='red')
elif lucky_number_one==6:
#bar4.set_facecolor("#000000")
ax.text(-8, 0, "6", fontsize=20,transform=ax.transData._b,color='red')
ax.tick_params(labelbottom=False, labeltop=False,
labelleft=False, labelright=False)
ax.grid(False)
ax.axis("On")
ax.set_title("Top Roulette",fontsize=20)
ax1 = plt.subplot(gs[:, 1:],facecolor='#0475A8') # row 1, span all columns
plt.plot([0,1])
ax1.set_axisbelow(True)
ax1.grid(color='black', linestyle='-', linewidth=2)
ax1.set_xlim(0.1,0.7)
ax1.set_ylim(0.1,0.7)
ax1.set_xlabel("Top Roulette Outcome",fontsize=18)
ax1.set_ylabel("Bottom Roulette Outcome",fontsize=18)
ax1.xaxis.tick_top()
ax1.invert_yaxis()
#
for i in range(1,7):
for j in range(1,7):
if (i+j)%2==0:
ax1.text(i/10 + 0.02,j/10 + 0.055,"even" ,fontsize = 15,color='white')
else:
ax1.text(i/10 + 0.02,j/10 + 0.055,"odd" ,fontsize = 15,color='white')
rec = Rectangle([lucky_number_one/10,lucky_number_one_c/10],0.1,0.1,facecolor="black")
x,y = [lucky_number_one/10,lucky_number_one/10+ 0.1], [lucky_number_one_c/10,lucky_number_one_c/10 + 0.1]
ax1.plot(x,y,color='black',linewidth=4)
#rec_c = Rectangle([lucky_number_one_c/10,lucky_number_two_c/10],0.1,0.1,facecolor="#6b6e72")
ax1.add_patch(rec)
#ax1.add_patch(rec_c)
ax1.set_xticklabels([" ",1,2,3,4,5,6])
ax1.set_yticklabels([" ",1,2,3,4,5,6])
ax4 = plt.subplot(gs[1, 0],projection='polar',facecolor="red") # row 0, col 0
plt.plot([0,1])
ax4.patch.set_alpha(axalpha)
ax4.set_axisbelow(True)
ax4.plot(x_t,y_t,transform=ax4.transData._b,color="#FEE9FF",linewidth=5)
bars = ax4.bar(theta, radii, width=width, bottom=0.0)
for r, bar in zip(radii, bars):
bar.set_facecolor("pink")
bar.set_alpha(0.6)
ax4.text(-4, 7, "1", fontsize=20,transform=ax4.transData._b)
ax4.text(3, 6.5, "2", fontsize=20,transform=ax4.transData._b)
ax4.text(7, 0, "3", fontsize=20,transform=ax4.transData._b)
ax4.text(3, -6.5, "4", fontsize=20,transform=ax4.transData._b)
ax4.text(-4, -7, "5", fontsize=20,transform=ax4.transData._b)
ax4.text(-8, 0, "6", fontsize=20,transform=ax4.transData._b)
if lucky_number_one_c==1:
#bar3.set_facecolor("#000000")
ax4.text(-4, 7, "1", fontsize=20,transform=ax4.transData._b,color='red')
elif lucky_number_one_c==2:
#bar2.set_facecolor("#000000")
ax4.text(3, 6.5, "2", fontsize=20,transform=ax4.transData._b,color='red')
elif lucky_number_one_c==3:
#bar1.set_facecolor("#000000")
ax4.text(7, 0, "3", fontsize=20,transform=ax4.transData._b,color='red')
elif lucky_number_one_c==4:
#bar6.set_facecolor("#000000")
ax4.text(3, -6.5, "4", fontsize=20,transform=ax4.transData._b,color='red')
elif lucky_number_one_c==5:
#bar5.set_facecolor("#000000")
ax4.text(-4, -7, "5", fontsize=20,transform=ax4.transData._b,color='red')
elif lucky_number_one_c==6:
#bar4.set_facecolor("#000000")
ax4.text(-8, 0, "6", fontsize=20,transform=ax4.transData._b,color='red')
ax4.tick_params(labelbottom=False, labeltop=False,
labelleft=False, labelright=False)
ax4.grid(False)
ax4.axis("On")
ax4.set_title("Bottom Roulette",fontsize=20)
sum_n = lucky_number_one + lucky_number_one_c
#print("Top Roulette Outcome + Bottom Roulette Outcome")
print(str(lucky_number_one) + " + " + str(lucky_number_one_c) + " = " + str(sum_n))
if sum_n %2==0:
print("OUTCOME: even" )
else:
print("OUTCOME: odd" )
plt.show()
lucky = interact(fair,value = widgets.ToggleButton(
value=True,
description="Let's Play",
disabled=False,
button_style='danger', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Description',
icon='check'
))
###Output
_____no_output_____
###Markdown
Question 5We claim that this game is fair, but how can we verify it? Recall that a probability experiment is fair if every outcome is equally likely to occur. In order for this experiment to be fair, the probability of the event **even** *must* be equal to the probability of event **odd**. What is the probability that the sum of the numbers in the top and bottom roulettes is even?
###Code
from ipywidgets import interact_manual,widgets
s = {'description_width': 'initial'}
@interact(answer =widgets.Select(
options=["Select option","1/2",\
"4/36","1/3",\
"18/36"],
value='Select option',
description="Probability sum is even",
disabled=False,
style=s
))
def fair_game(answer):
if answer=="Select option":
print("Click on the correct probability expressed as a fraction.")
elif answer=="1/2" or answer=="18/36":
print("Correct!\nThere are a total of 36 possible outcomes. 18 out of 36 are even numbers.\nThus the probability P(even) = 18/36 or 1/2. ")
elif answer != "1/2" or answer != "Select Option" or answer!="18/36":
print("Hint: There are 36 entries in our sample space, each with equal likelihood of occurrence.\nHow many of the 36 correspond to even numbers")
###Output
_____no_output_____
###Markdown
Question 6What is the probability that the sum of the numbers in the top and bottom roulettes is odd? In other words, what is the probability that Bob will win?
###Code
from ipywidgets import interact_manual,widgets
s = {'description_width': 'initial'}
@interact(answer =widgets.Select(
options=["Select option","19/36",\
"17/36","18/36",\
"1/2"],
value='Select option',
description="Probability sum is odd",
disabled=False,
style=s
))
def fair_game(answer):
if answer=="Select option":
print("Click on the correct probability expressed as a fraction.")
elif answer=="1/2" or answer=="18/36":
print("Correct!\nThere are a total of 36 possible outcomes. 18 out of 36 are odd numbers.\nThus the P(odd) = 18/36 or 1/2. ")
elif answer != "1/2" or answer != "Select Option" or answer!="18/36":
print("Hint: There are 36 entries in our sample space, each with equal likelihood of occurrence.\nHow many of the 36 correspond to odd numbers?")
###Output
_____no_output_____
###Markdown
In the section above we learned that the there are 18 out of 36 possible outcomes where the sum $$n_t + n_b$$is an even number. Thus$$P(even) = \dfrac{18}{36} = \dfrac{1}{2}$$Similarly, there are 18 out of 36 possible outcomes where the sum$$n_t + n_b$$is an odd number. Thus$$P(odd) = \dfrac{18}{36} = \dfrac{1}{2}$$Then $P(odd) = P(even)$. With this we verify that indeed the experiment is fair. Theoretical vs Experimental Probability We begin by stating a few definitions. Definition. The Theoretical Probability of an event $A$, denoted $P_T(A)$, is the ratio of the number of outcomes corresponding to this event to the number of possible outcomes. $$P_T(A) = \dfrac{\text{Total Number of Instances of event A in the Sample Space}}{\text{Total Number of Possible Outcomes}}$$If we take our fair experiment with two roulettes and sample space parity of outcome sum $\lbrace \text{even}, \text{odd} \rbrace$, the theoretical probability of an even event is$$P_T(\text{even}) = \dfrac{18}{36}$$Definition. The Experimental Probability of an event $A$, denoted $P_E(A)$, is computed over running the probability experiment a number of times and computing the observed ratio between the number of time the event occured and the number of trials of the experiment. $$P_E(A) = \dfrac{\text{Number of Times Event A Actually Occurred}}{\text{Number of trials}}$$In order to determine $P_E(\text{even})$, we first need to spin the roulettes a few times and compare.Use the widget below to simulate spinning the two roulettes. Use the slider to set a number of trials. In this interactive exercise, you can set number of trials as an integer between 1 and 100. Press `Run Interact` to run an experiment for the given number of trials.On the right hand side you will find a printed message outlining the experimental probability of each event: Sum is Even and Sum is Odd from the number of trial specified using the widget. On the left hand side you can find a graph comparing both. Press the `Run Interact` button several times.
###Code
%matplotlib inline
def die(number):
count_A,count_C = 0,0
for i in range(number):
lucky_number_one = random.choice([1,2,3,4,5,6])
lucky_number_two = random.choice([1,2,3,4,5,6])
if lucky_number_one - lucky_number_two >=0:
count_A +=1
else:
count_C +=1
return [count_A,count_C]
def even_sum(number):
count_A,count_C = 0,0
for i in range(number):
lucky_number_one = random.choice([1,2,3,4,5,6])
lucky_number_two = random.choice([1,2,3,4,5,6])
sum_n = lucky_number_one + lucky_number_two
if sum_n%2 == 0:
count_A +=1
else:
count_C +=1
return [count_A,count_C]
def experimental_prob(number):
[varoi_1,varoi_2] = even_sum(number)
fig,(ax1,ax2,ax3) = plt.subplots(1,3,sharey=True,figsize=(15,4))
ax2.axis("Off")
ax3.axis("Off")
axalpha = 0.05
even = varoi_1/number
odd = varoi_2/number
labels = ['', '', 'Even Sum', '','Odd Sum']
ax1 = plt.subplot(131,facecolor="white")
ax1.set_title("Experimental Probability",fontsize=20)
ax1.set_ylabel("Probability",fontsize=15)
ax1.set_xlabel("Outcomes",fontsize=15)
ax1.set_xlim([0,3])
ax1.set_xticklabels(labels)
x = np.arange(1,3)
f1,f2= ax1.bar(x,[even,odd])
f1.set_facecolor("#8642f4")
f2.set_facecolor("#518900")
ax1.grid(which='both')
ax1.grid(b=True,which='minor',alpha=0.2,linestyle='--',color='black')
ax1.grid(which='major', alpha=0.2,linestyle='--',color='black')
ax3 = plt.subplot(132)
ax3.axis("Off")
#ax3.set_title( "Positive vs Negative\nLuck Roulette: Outcome",fontsize=20)
rec1 = Rectangle((0.1,0.8),0.3,0.1,facecolor="#8642f4")
ax3.add_patch(rec1)
ax3.text(0.5,0.83,"Experimental Probability: Sum is Even",fontsize=20)
rec2 = Rectangle((0.1,0.7),0.3,0.1,facecolor="#518900")
ax3.add_patch(rec2)
ax3.text(0.5,0.73,"Experimental Probability: Sum is Odd",fontsize=20)
ax2 = plt.subplot(133,facecolor="white")
ax2.axis("Off")
ax2.text(0.9,0.83,str(varoi_1) + "/" + str(number),fontsize=20)
ax2.text(0.9,0.73,str(varoi_2) + "/" + str(number),fontsize=20)
ax3.set_title(" Even or Odd Sum Experiment:\n",fontsize=20)
plt.show()
def run_fair_exp(number):
experimental_prob(number)
#experimental_prob(number,"Negative")
interact_manual(experimental_prob,number=widgets.IntSlider(
value=10,
min=1,
max=100,
step=1,
description='Total Number of Trials',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d',
style =style
));
###Output
_____no_output_____
###Markdown
Question 7Use the box below to enter your observations. How do the experimental probabilities of each event change as you increase the number of trials?
###Code
from ipywidgets import widgets as w
from ipywidgets import Button, Layout
from IPython.display import display, Javascript, Markdown
def rerun_cell( b ):
display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()+1,IPython.notebook.get_selected_index()+2)'))
style = {'description_width': 'initial'}
question7_text = w.Textarea( value='', placeholder='Write your answer here. Press Record Answer when you finish.', description='', disabled=False , layout=Layout(width='100%', height='75px') )
question7_button = w.Button(button_style='info',description="Record Answer", layout=Layout(width='15%', height='30px'))
display(question7_text)
display(question7_button)
question7_button.on_click( rerun_cell )
question7_input = question7_text.value
if(question7_input != ''):
question7_text.close()
question7_button.close()
display(Markdown("### Your answer for Question 7: Conclusions"))
display(Markdown(question7_input))
###Output
_____no_output_____
###Markdown
**Remarks**We observe that every time we press the `Run Interact` button on the interactive above, the experimental probability of each event varies. However, it seems like as we increase the number of trials, the experimental probabilities of each event approach $1/2$. Let us explore what happens if we increase the number of trials to, say 10,000. In the widget below you can find a slider that allows you to control the number of trials. In this interactive exercise, you can set number of trials as an integer between 1 and 10,000. On the left hand side you can find a plot like the one we explored above. On the right hand side you can find the theoretical probability of events Sum is Even and Sum is Odd. Press the `Run Interact` button. Increase the Total Number of Trials and press the `Run Interact` button multiple times.
###Code
def toss(number):
store_head = []
store_tail = []
other = []
for i in range(number):
toss_coin= random.choice(np.arange(2))
if toss_coin==0:
store_head.append(toss_coin)
elif toss_coin==1:
store_tail.append(toss_coin)
return [store_head,store_tail]
def plot_coin_experiment(number):
varoi = toss(number)
fig,ax = plt.subplots(figsize=(5,5))
ax.set_title("Distribution of Experimental Coin Flipping",fontsize=35)
plt.ylabel("Frequency",fontsize=25)
plt.xlabel("Heads or Tails",fontsize=25)
plt.xticks(np.arange(2), ('Total Number of Heads', 'Total Number of Tails'))
plt.hist(varoi[0])
plt.hist(varoi[1])
plt.grid(which='both')
plt.grid(b=True,which='minor',alpha=0.2,linestyle='--',color='black')
plt.grid(which='major', alpha=0.2,linestyle='--',color='black')
def plot_die_experiment(number):
varoi = die(number)
theor_A = 21/36
theor_C = 15/36
#print(theor)
fig,(ax1,ax2) = plt.subplots(1,2,sharey=True,figsize=(15,8))
# Experimental Probability
ax1.set_title("Experimental Distribution",fontsize=25)
ax1.set_ylabel("Frequency",fontsize=25)
ax1.set_xlabel("Outcomes",fontsize=25)
ax1.set_xlim([0,3])
ax1.set_xticks([])
x = np.arange(1,3)
dice = [varoi[0]/number,varoi[1]/number]
f1,f2 = ax1.bar(x,dice)
f1.set_facecolor("#8642f4")
f2.set_facecolor("#518900")
ax1.grid(which='both')
ax1.grid(b=True,which='minor',alpha=0.2,linestyle='--',color='black')
ax1.grid(which='major', alpha=0.2,linestyle='--',color='black')
# Theoretical Probability
ax2.set_title("Theoretical Distribution",fontsize=25)
ax2.set_ylabel("Frequency",fontsize=25)
ax2.set_xlabel("Outcomes",fontsize=25)
x = np.arange(1,3)
dice_exp = [theor_A,theor_C]
f11,f21 = ax2.bar(x,dice_exp)
f11.set_facecolor("#8642f4")
f21.set_facecolor("#518900")
ax2.set_xlim([0,3])
ax2.set_xticks(["Even","Odd"])
ax2.grid(which='both')
ax2.grid(b=True,which='minor',alpha=0.2,linestyle='--',color='black')
ax2.grid(b=True,which='major', alpha=0.2,linestyle='--',color='black')
plt.ylim(0,number)
plt.show()
def plot_fair_experiment(number):
[varoi_1,varoi_2] = even_sum(number)
theor_A = 18/36
theor_C = 18/36
even = varoi_1/number
odd= varoi_2/number
x = np.arange(1,3)
labels = ['', '', 'Even Sum', '','Odd Sum']
fig,(ax1,ax2) = plt.subplots(1,2,sharey=True,figsize=(15,8))
# Experimental Probability
ax1.set_title("Even or Odd Sum Probability Experiment:\nExperimental Probability",fontsize=20)
ax1.set_ylabel("Probability",fontsize=25)
ax1.set_xlabel("Events",fontsize=25)
ax1.set_xlim([0,3])
ax1.set_xticklabels(labels)
ax1.grid(which='major', alpha=0.2,linestyle='--',color='black')
f1,f2 = ax1.bar(x,[even,odd])
f1.set_facecolor("#8642f4")
f2.set_facecolor("#518900")
# Theoretical Probability
ax2.set_title("Even or Odd Sum Probability Experiment:\nTheoretical Probability",fontsize=20)
ax2.set_ylabel("Probability",fontsize=25)
ax2.set_xlabel("Events",fontsize=25)
ax2.set_xlim([0,3])
ax2.set_xticklabels(labels)
ax2.grid(b=True,which='major', alpha=0.2,linestyle='--',color='black')
dice_exp = [theor_A,theor_C]
f11,f21 = ax2.bar(x,dice_exp)
f11.set_facecolor("#8642f4")
f21.set_facecolor("#518900")
plt.ylim(0,1)
plt.show()
interact_manual(plot_fair_experiment,number=widgets.IntSlider(
value=5,
min=1,
max=10000,
step=1,
description='Total Number of Trials',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d',
style =style
));
###Output
_____no_output_____
###Markdown
Question 8How does the experimental probability of each event change as we increase the number of trials? Use the textbox below to record your answers.
###Code
from ipywidgets import widgets as w
from ipywidgets import Button, Layout
from IPython.display import display, Javascript, Markdown
def rerun_cell( b ):
display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()+1,IPython.notebook.get_selected_index()+2)'))
style = {'description_width': 'initial'}
question8_text = w.Textarea( value='', placeholder='Write your answer here. Press Record Answer when you finish.', description='', disabled=False , layout=Layout(width='100%', height='75px') )
question8_button = w.Button(button_style='info',description="Record Answer", layout=Layout(width='15%', height='30px'))
display(question8_text)
display(question8_button)
question8_button.on_click( rerun_cell )
question8_input = question8_text.value
if(question8_input != ''):
question8_text.close()
question8_button.close()
display(Markdown("### Your answer for Question 8: Conclusions"))
display(Markdown(question8_input))
###Output
_____no_output_____ |
Intro2PracDS_2020_03-2_PolynomialRegression.ipynb | ###Markdown
実践データ科学入門 2020年度木曜4限 第3回 その2 多項式回帰
###Code
%matplotlib inline
#%matplotlib notebook # if necessary to rotate figures in 3D plot
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import art3d
from ipywidgets import interact
from sklearn import linear_model
###Output
_____no_output_____
###Markdown
多項式回帰とは多項式回帰は説明変数 $x$ 1つの場合に,$x$ による単回帰ではなく,$x$, $x^2$, $x^3$ など,$x$ のべき乘の功を回帰変数に用いる手法で,$$y = a_0 + a_1 x + a_2 x^2 + \ldots + a_m x^m = \sum_{j=0}^M a_j x^j + \xi$$というモデルを当てはめる方法である.多項式回帰モデルは,回帰変数を $x_j = x^j$ とした重回帰モデルであると言える.データに対して単回帰を用いて直線を当てはめるのが不適切とわかる場合に用いられる.ここで- $y$ は目的変数(データとして与えられるもの)- $x^j \ (j=0, 1, 2, \ldots, M)$ を説明変数と取る(データとして与えられるもの)- $a_j \ (j=0, 1, 2, \ldots, M)$ は回帰係数(データから求めるもの)- $\xi$ はノイズ(モデルでは当てはめられないランダムな要因)以下,特に注意しない限り__データは実数値__とする.
###Code
# 真のパラメータ
A0 = 1.2
A1 = -5.6
A2 = 2.2
# dataset
X = np.arange(0, 3, 0.3)
N = X.size
Y = A0 + A1*X + A2*X**2 +0.5*np.random.randn(N)
# 回帰直線のプロット
def plot_XY_and_regressionline(a0=0.0, a1=1.0):
fig = plt.figure()
ax = plt.axes()
ax.set_xlabel("X", size=20)
ax.set_ylabel("Y", size=20)
ax.set_xticks
ax.set_ylim(-3, 3)
ax.scatter(X, Y)
ax.plot([np.min(X), np.max(X)], [a0+a1*np.min(X), a0+a1*np.max(X)], linewidth=3, color='tab:red')
ax.set_title('MSE = %f'%(np.sum((Y-a0-a1*X)**2)/Y.size), size=20)
ax.tick_params(labelsize=12)
interact(plot_XY_and_regressionline, a0=(-5.0, 5.0, 0.1), a1=(-5.0, 10.0, 0.1))
# 青点がデータ点
# 赤が回帰直線
# MSE = Mean Squared Error = 平均二乗誤差
# 必ずしも真のパラメータのときに平均二乗誤差最小になるわけではないことに注意しよう
###Output
_____no_output_____
###Markdown
直線でのフィッティングでは適切なモデルとは言えない.多項式フィッティングを考えよう. 多項式フィッティング回帰変数2つの重回帰では,データ空間 $(x_1, x_2, y)$ の3次元空間に回帰平面を引き,データから平面までの高さの二乗和が最小になるように選ぶ.
###Code
# 真のパラメータ
A0 = 1.2
A1 = -5.6
A2 = 2.2
# dataset
X = np.arange(0, 3, 0.3)
Y = A0 + A1*X + A2*X**2 +0.5*np.random.randn(X.size)
# フィッティングに用いる多項式の最大次数
M = 2
# dataset
X_train = np.zeros((X.size, M))
reg = linear_model.LinearRegression()
for j in range(M):
X_train[:, j] = X**(j+1)
reg.fit(X_train, Y)
A_pred = np.zeros(M+1)
A_pred[0] = reg.intercept_
A_pred[1:M+1] = reg.coef_
XX = np.arange(-0.2, 3, 0.1)
YY = np.ones(XX.size)*A_pred[0]
Y_pred = np.ones(X.size)*A_pred[0]
for j in range(1, M+1):
YY += A_pred[j] * XX**j
Y_pred += A_pred[j] * X**j
fig = plt.figure()
ax = plt.axes()
ax.set_xlabel("X", size=20)
ax.set_ylabel("Y", size=20)
ax.set_xticks
ax.set_ylim(-3, 3)
ax.scatter(X, Y, s=80)
ax.plot(XX, YY, linewidth=3, c='tab:orange')
ax.set_title('MSE = %f'%(np.sum((Y-Y_pred)**2)/Y.size), size=20)
ax.tick_params(labelsize=12)
print('A0 = %f'%(reg.intercept_))
for j in range(1, M+1):
print('A%1d = %f'%(j, reg.coef_[j-1]))
###Output
_____no_output_____
###Markdown
2次関数でうまくフィッティングができている.しかし,フィッティングに用いる多項式の次数を増やすと MSE をもっと下げることができる.
###Code
# 多項式回帰のプロット
def plot_polynomialregression(M=2):
# dataset
X_train = np.zeros((N, M))
reg = linear_model.LinearRegression()
for j in range(M):
X_train[:, j] = X**(j+1)
reg.fit(X_train, Y)
A_pred = np.zeros(M+1)
A_pred[0] = reg.intercept_
A_pred[1:M+1] = reg.coef_
XX = np.arange(-0.2, 3, 0.01)
YY = np.ones(XX.size)*A_pred[0]
Y_pred = np.ones(X.size)*A_pred[0]
for j in range(1, M+1):
YY += A_pred[j] * XX**j
Y_pred += A_pred[j] * X**j
fig = plt.figure()
ax = plt.axes()
ax.set_xlabel("X", size=20)
ax.set_ylabel("Y", size=20)
ax.set_xticks
ax.set_ylim(-5, 5)
ax.scatter(X, Y, s=80)
ax.plot(XX, YY, linewidth=3, c='tab:orange')
ax.set_title('MSE = %f'%(np.sum((Y-Y_pred)**2)/Y.size), size=20)
ax.tick_params(labelsize=12)
#print('A0 = %f'%(reg.intercept_))
#for j in range(1, M+1):
# print('A%1d = %f'%(j, reg.coef_[j-1]))
interact(plot_polynomialregression, M=(1, 20, 1))
###Output
_____no_output_____
###Markdown
多項式の次数をデータ数と比べて十分高く取ると,全ての点を通るような多項式曲線を描くことができる.この時はデータ点に対する回帰誤差は必ずゼロとなる.MSE だけを減らせば良いというわけではないことがわかる.このことを一般に過学習 (overfitting) という.回帰変数を増やせば,すなわちモデルを複雑にすれば MSE をいくらでも下げることができる一つの例である.過学習となっている場合は,学習データ近傍であっても回帰曲線は正しい推定値を与えない.推定誤差が低くなってしまう.このことを確かめるために同じ分布に従う別なデータセットを取って確かめてみよう.
###Code
# テスト用 dataset
X2 = np.arange(0.1, 3, 0.3)
Y2 = A0 + A1*X2 + A2*X2**2 +0.5*np.random.randn(X2.size)
# 多項式回帰とテストデータの比較
def plot_polynomialregression_fortestdata(M=2):
# dataset
X_train = np.zeros((N, M))
reg = linear_model.LinearRegression()
for j in range(M):
X_train[:, j] = X**(j+1)
reg.fit(X_train, Y)
A_pred = np.zeros(M+1)
A_pred[0] = reg.intercept_
A_pred[1:M+1] = reg.coef_
XX = np.arange(-0.2, 3, 0.01)
YY = np.ones(XX.size)*A_pred[0]
Y2_pred = np.ones(X2.size)*A_pred[0]
for j in range(1, M+1):
YY += A_pred[j] * XX**j
Y2_pred += A_pred[j] * X2**j
fig = plt.figure()
ax = plt.axes()
ax.set_xlabel("X", size=20)
ax.set_ylabel("Y", size=20)
ax.set_xticks
ax.set_ylim(-5, 5)
ax.scatter(X2, Y2, s=80)
ax.plot(XX, YY, linewidth=3, c='tab:orange')
ax.set_title('MSE = %f'%(np.sum((Y2-Y2_pred)**2)), size=20)
ax.tick_params(labelsize=12)
interact(plot_polynomialregression_fortestdata, M=(1, 20, 1))
###Output
_____no_output_____
###Markdown
テストデータに対する MSE は次数を上げるにつれて大きくなることがわかる.学習で得られたモデルをテストデータに適用して得られた回帰誤差を汎化誤差と呼ぶ.一方,学習時の回帰誤差を学習誤差と呼ぶ.学習誤差は多項式の次数を上げることでいくらでも下げられるが,汎化誤差はそうとは限らない.汎化誤差も小さいモデルが良いモデルであると言える.
###Code
Mmax = 15
MSE_train = np.zeros(Mmax+1)
MSE_test = np.zeros(Mmax+1)
for M in range(1, Mmax+1):
# dataset
X_train = np.zeros((X.size, M))
reg = linear_model.LinearRegression()
for j in range(M):
X_train[:, j] = X**(j+1)
reg.fit(X_train, Y)
A_pred = np.zeros(M+1)
A_pred[0] = reg.intercept_
A_pred[1:M+1] = reg.coef_
XX = np.arange(-0.2, 3, 0.01)
YY = np.ones(XX.size)*A_pred[0]
Y_pred = np.ones(X.size)*A_pred[0]
Y2_pred = np.ones(X2.size)*A_pred[0]
for j in range(1, M+1):
YY += A_pred[j] * XX**j
Y_pred += A_pred[j] * X**j
Y2_pred += A_pred[j] * X2**j
MSE_train[M] = np.sum((Y-Y_pred)**2)/Y.size
MSE_test[M] = np.sum((Y2-Y2_pred)**2)/Y.size
# MSE のプロット
fig = plt.figure()
ax = plt.axes()
ax.set_xlabel("M: degree of polynomial", size=20)
ax.set_ylabel("$\log(MSE)$", size=20)
ax.set_ylim(-5, 6)
ax.set_xticks(np.arange(1, Mmax+1, 1))
ax.plot(np.arange(1, Mmax+1, 1), np.log10(MSE_train[1:]), 'o-', linewidth=3, c='tab:blue', label='training MSE')
ax.plot(np.arange(1, Mmax+1, 1), np.log10(MSE_test[1:]), 'o-', linewidth=3, c='tab:orange', label='test MSE')
ax.tick_params(labelsize=12)
ax.legend(fontsize=12)
###Output
_____no_output_____ |
Notebook + Data Files/Explore_weather_trends.ipynb | ###Markdown
UDACITY Data Analysis Nanodegree Program Project 1 (Exploring Weather Trends) By: Qasim HassanThis is the second step of the project which is about reading the data and visualize it properly by using 'Moving Average'
###Code
#All nesessary Libaries whom i used to find insights of Data
import pandas as pd # for finding Insight /manging data
import matplotlib.pyplot as plt # for visualizing the data
import numpy as np# for calculating the moving average
#Loading CSV files
global_temp = pd.read_csv('global_data.csv') # importing 'global tempreature data'
city_temp = pd.read_csv('city_data.csv') # importing 'city tempreature data' which is a data for one city i: e karachi(Pakistan)
global_temp.head() #checkig first five to make sure my data is loaded successfully
city_temp.head() # repeating above step for City also
#Checking for missing or null value
city_temp["avg_temp"].isna()
#taking mean by rolling of 10
glb_mv_avg = global_temp['avg_temp'].rolling(10).mean()
local_mv_avg = city_temp['avg_temp'].rolling(10).mean()
#using matplotlib library to visualize the data
plt.plot(city_temp['year'],local_mv_avg,label='Karachi')
plt.legend()
plt.xlabel("Years")
plt.ylabel("Temperature (°C)")
plt.title(" moving-average temperature of Karachi")
plt.show()
#using matplotlib library to visualize the data
plt.plot(global_temp['year'],glb_mv_avg,label='Global')
plt.legend()
plt.xlabel("Years")
plt.ylabel("Temperature (°C)")
plt.title(" moving-average temperture of earth")
plt.show()
#using matplotlib library to visualize the data
plt.plot(global_temp['year'],glb_mv_avg,label='Global')
plt.plot(city_temp['year'],local_mv_avg,label='Karachi')
plt.legend()
plt.xlabel("Years")
plt.ylabel("Temperature (°C)")
plt.title("10-year moving-average comparison of Temperature, Earth vs Karachi")
plt.show()
###Output
_____no_output_____ |
statistical_rethinking_pyth_2022/week02.ipynb | ###Markdown
Week 2 1.Construct a linear regression of weight as predicted by height, using theadults (age 18 or greater) from the Howell1 dataset. The heights listed belowwere recorded in the !Kung census, but weights were not recorded for theseindividuals. Provide predicted weights and 89% compatibility intervals foreach of these individuals. That is, fill in the table below, using model-basedpredictions.| Individual | height | expected weight | 89% interval1 | 1402 | 1603 | 175
###Code
import numpy as np
heights=np.array([140, 160, 175])
import pandas as pd
data=pd.read_csv('data/Howell1.csv', sep=';')
data=data[data['age']>18]
data
data.height.hist(bins=70, legend=True);
data.weight.hist(bins=70, legend=True);
data.age.hist(bins=70,alpha=.6, legend=True);
# data.height-=data.height.mean()
import bayes_window as baw
bw=baw.BayesWindow(df=data, y='height', treatment='weight')
bw.plot(x='weight', color='male', add_box=False)
import numpyro.distributions as dist
br=baw.BayesRegression(bw,dist_slope=dist.LogNormal).fit(center_intercept=True)
br.plot( add_data=False)
from scipy import stats
stats.norm.rvs(loc=br.posterior['intercept']['intercept center interval'].values[0] +
br.posterior['slope']['center interval'].values * heights, scale=br.posterior['sigma_obs'])
###Output
_____no_output_____
###Markdown
2. From the Howell1 dataset, consider only the people younger than 13 yearsold. Estimate the causal association between age and weight. Assume thatage influences weight through two paths. First, age influences height, andheight influences weight. Second, age directly influences weight through agerelated changes in muscle growth and body proportions. All of this impliesthis causal model (DAG):A -> ->H ->WUse a linear regression to estimate the total (not just direct) causal effect ofeach year of growth on weight. Be sure to carefully consider the priors. Tryusing prior predictive simulation to assess what they imply.
###Code
import pandas as pd
data=pd.read_csv('data/Howell1.csv', sep=';')
data=data[data['age']<13]
data
bw=baw.BayesWindow(df=data, y='age', treatment='weight', )
bw.plot(x='weight', add_box=False)
br=baw.BayesRegression(bw,dist_slope=dist.LogNormal).fit(center_intercept=True)
br.plot(add_data=False)
###Output
_____no_output_____
###Markdown
3. Now suppose the causal association between age and weight might be different for boys and girls. Use a single linear regression, with a categoricalvariable for sex, to estimate the total causal effect of age on weight separatelyfor boys and girls. How do girls and boys differ? Provide one or more posterior contrasts as a summary.
###Code
bw=baw.BayesWindow(df=data, y='height', treatment='weight', condition=['male'])
bw.plot(x='weight', color='male', add_box=False)
br=baw.BayesRegression(bw,dist_slope=dist.LogNormal).fit(center_intercept=True)
br.plot(x='male', add_data=False)
import arviz as az
az.plot_forest(br.trace,var_names='slope_per_condition', combined=True, rope=[-1,1]);
az.plot_posterior(br.trace.posterior['slope_per_condition'].sel(male=1)-
br.trace.posterior['slope_per_condition'].sel(male=0));
# from numpyro.infer import Predictive
# Predictive(br.mcmc,num_samples=100).posterior_samples
###Output
_____no_output_____ |
keras_unet_67_final_save.ipynb | ###Markdown
Get the dataLet's first import all the images and associated masks. I downsample both the training and test images to keep things light and manageable, but we need to keep a record of the original sizes of the test images to upsample our predicted masks and create correct run-length encodings later on. There are definitely better ways to handle this, but it works fine for now!
###Code
train = []
train_mask = []
test = []
for n, id_ in tqdm(enumerate(train_ids), total=len(train_ids)):
file = "../input/stage1_train/{}/images/{}.png".format(id_,id_)
mfile = "../input/stage1_train/{}/masks/*.png".format(id_)
image = cv2.imread(file)
image = rescale_intensity(image, out_range=np.uint8)
masks = imread_collection(mfile).concatenate()
train.append(image)
train_mask.append(masks)
for n, id_ in tqdm(enumerate(test_ids), total=len(test_ids)):
file = "../input/stage1_test/{}/images/{}.png".format(id_,id_)
image = cv2.imread(file)
image = rescale_intensity(image, out_range=np.uint8)
test.append((image))
def to_flip(img_rgb):
# do not flip colored images
if (img_rgb[:,:,0] != img_rgb[:,:,1]).any():
return img_rgb
#green channel happends to produce slightly better results
#than the grayscale image and other channels
img_gray=img_rgb[:,:,1]#cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
#morphological opening (size tuned on training data)
circle7=cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7,7))
img_open=cv2.morphologyEx(img_gray, cv2.MORPH_OPEN, circle7)
#Otsu thresholding
img_th=cv2.threshold(img_open,0,255,cv2.THRESH_OTSU)[1]
#Invert the image in case the objects of interest are in the dark side
if (np.sum(img_th==255)>np.sum(img_th==0)):
return img_rgb
else:
return 255 - img_rgb
train = [to_flip(img_rgb) for img_rgb in train]
test = [to_flip(img_rgb) for img_rgb in test]
def split_aux(img):
height = img.shape[0]
width = img.shape[1]
if height > 2*width:
half = int(height//2)
return [img[:half, :, :], img[half:, :, :]]
elif height > width:
return [img[:width, :, :], img[height-width:, :, :]]
elif width > 2*height:
half = int(width//2)
return [img[:, :half, :], img[:, half:, :]]
else:
return [img[:, :height, :], img[:, width-height:, :]]
def split(img):
s = split_aux(img)
return s
def split_mask_aux(img):
height = img.shape[1]
width = img.shape[2]
if height > 2*width:
half = int(height//2)
return [img[:, :half, :], img[:, half:, :]]
elif height > width:
return [img[:, :width, :], img[:, height-width:, :]]
elif width > 2*height:
half = int(width//2)
return [img[:, :, :half], img[:, :, half:]]
else:
return [img[:, :, :height], img[:, :, width-height:]]
def split_mask(img):
s = split_mask_aux(img)
return s
train_split = [split(img) for img in train]
train_split = [t_split[i] for t_split in train_split for i in [0, 1] ]
train_mask_split = [split_mask(img) for img in train_mask]
train_mask_split = [t_split[i] for t_split in train_mask_split for i in [0, 1] ]
test_split = [split(img) for img in test]
test_split = [t_split[i] for t_split in test_split for i in [0, 1] ]
# Get and resize train images and masks
X_train = np.zeros((len(train_split) * 4, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), np.uint8)
Y_train = np.zeros((len(train_split) * 4, IMG_HEIGHT, IMG_WIDTH, 1), np.uint8)
Z_train = np.zeros((len(train_split) * 4, IMG_HEIGHT, IMG_WIDTH, 3), np.uint8)
print('Getting and resizing train images and masks ... ')
sys.stdout.flush()
for n, (img, masks) in enumerate(zip(tqdm(train_split), train_mask_split)):
img = img[:, :, :IMG_CHANNELS]
img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True)
img_mean = np.mean(img, axis=2).astype(np.uint8)
for c in range(IMG_CHANNELS):
img[:,:,c] = img_mean
X_train[n * 4 + 0] = img
X_train[n * 4 + 1] = np.fliplr(img)
X_train[n * 4 + 2] = np.flipud(img)
X_train[n * 4 + 3] = np.flipud(np.fliplr(img))
mask = np.zeros((IMG_HEIGHT, IMG_WIDTH), np.uint8)
mask_lr = np.zeros((IMG_HEIGHT, IMG_WIDTH), np.uint8)
mask_ud = np.zeros((IMG_HEIGHT, IMG_WIDTH), np.uint8)
mask_lr_ud = np.zeros((IMG_HEIGHT, IMG_WIDTH), np.uint8)
for mask_id in range(masks.shape[0]):
mask_ = masks[mask_id, :, :]
mask_ = mask_ // 255
mask_ = resize(mask_, (IMG_HEIGHT, IMG_WIDTH), mode='constant',
preserve_range=True).astype(np.uint8)
mask = np.maximum(mask, mask_)
mask_lr = np.maximum(mask_lr, np.fliplr(mask_))
mask_ud = np.maximum(mask_ud, np.flipud(mask_))
mask_lr_ud = np.maximum(mask_lr_ud, np.flipud(np.fliplr(mask_)))
Y_train[4*n + 0, :, :, 0] = mask
Y_train[4*n + 1, :, :, 0] = mask_lr
Y_train[4*n + 2, :, :, 0] = mask_ud
Y_train[4*n + 3, :, :, 0] = mask_lr_ud
mask = np.zeros((IMG_HEIGHT, IMG_WIDTH), np.uint8)
mask_lr = np.zeros((IMG_HEIGHT, IMG_WIDTH), np.uint8)
mask_ud = np.zeros((IMG_HEIGHT, IMG_WIDTH), np.uint8)
mask_lr_ud = np.zeros((IMG_HEIGHT, IMG_WIDTH), np.uint8)
for mask_id in range(masks.shape[0]):
mask_ = masks[mask_id, :, :]
mask_ = mask_ // 255
mask_ = resize(mask_, (IMG_HEIGHT, IMG_WIDTH), mode='constant',
preserve_range=True).astype(np.uint8)
mask_ = binary_dilation(mask_, selem=square(3))
mask += mask_
mask_lr += np.fliplr(mask_)
mask_ud += np.flipud(mask_)
mask_lr_ud += np.flipud(np.fliplr(mask_))
Z_train[4*n + 0, :, :, 0] = (mask > 1)
Z_train[4*n + 1, :, :, 0] = (mask_lr > 1)
Z_train[4*n + 2, :, :, 0] = (mask_ud > 1)
Z_train[4*n + 3, :, :, 0] = (mask_lr_ud > 1)
for i in tqdm(range(len(train_split) * 4)):
Z_train[i, :, :, 1] = Y_train[i, :, :, 0]
Z_train[i, :, :, 2] = np.where(Z_train[i, :, :, 0] == 1, 0, 1 - Y_train[i, :, :, 0])
# Get and resize test images and masks
X_test = np.zeros((len(test_split) * 4, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), np.uint8)
print('Getting and resizing test images ... ')
sys.stdout.flush()
for n, img in enumerate(tqdm(test_split)):
img = img[:, :, :IMG_CHANNELS]
img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True)
img_mean = np.mean(img, axis=2).astype(np.uint8)
for c in range(IMG_CHANNELS):
img[:,:,c] = img_mean
X_test[n * 4 + 0] = img
X_test[n * 4 + 1] = np.fliplr(img)
X_test[n * 4 + 2] = np.flipud(img)
X_test[n * 4 + 3] = np.flipud(np.fliplr(img))
with open('../data/X_train_%s.pkl' % fname, 'wb') as file:
pkl.dump(X_train, file, protocol=pkl.HIGHEST_PROTOCOL)
with open('../data/Z_train_%s.pkl' % fname, 'wb') as file:
pkl.dump(Z_train, file, protocol=pkl.HIGHEST_PROTOCOL)
with open('../data/X_test_%s.pkl' % fname, 'wb') as file:
pkl.dump(X_test, file, protocol=pkl.HIGHEST_PROTOCOL)
with open('../data/X_train_%s.pkl' % fname, 'rb') as file:
X_train= pkl.load(file)
with open('../data/Z_train_%s.pkl' % fname, 'rb') as file:
Z_train = pkl.load(file)
with open('../data/X_test_%s.pkl' % fname, 'rb') as file:
X_test = pkl.load(file)
###Output
_____no_output_____
###Markdown
Let's see if things look all right by drawing some random images and their associated masks.
###Code
# Check if training data looks all right
ix = 23#random.randint(0, len(train_ids))
print(ix, train[ix].shape)
imshow(train[ix])
plt.show()
for i in range(8*ix, 8*ix + 2):
print(i, X_train[i].shape)
imshow(X_train[i])
plt.show()
imshow(255*Z_train[i])
plt.show()
###Output
23 (256, 256, 3)
###Markdown
Build and train our neural networkNext we build our U-Net model, loosely based on [U-Net: Convolutional Networks for Biomedical Image Segmentation](https://arxiv.org/pdf/1505.04597.pdf) and very similar to [this repo](https://github.com/jocicmarko/ultrasound-nerve-segmentation) from the Kaggle Ultrasound Nerve Segmentation competition.
###Code
import keras.backend as K
def pixelwise_crossentropy(target, output):
_epsilon = 10e-8
output = tf.clip_by_value(output, _epsilon, 1. - _epsilon)
weight = 30 * target[:,:,:,0:1] + 3 * target[:,:,:,1:2] + 1 * target[:,:,:,2:3]
return - tf.reduce_sum(target * weight * tf.log(output) +
(1 - target) * tf.log(1 - output),
len(output.get_shape()) - 1)
from keras.engine import Layer
from keras import backend as K
class SpeckleNoise(Layer):
"""Apply multiplicative one-centered Gaussian noise.
This is useful to mitigate overfitting
(you could see it as a form of random data augmentation).
Speckle Noise (GS) is a natural choice as corruption process
for real valued inputs.
As it is a regularization layer, it is only active at training time.
# Arguments
stddev: float, standard deviation of the noise distribution.
# Input shape
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
# Output shape
Same shape as input.
"""
# @interfaces.legacy_specklenoise_support
def __init__(self, stddev, **kwargs):
super(SpeckleNoise, self).__init__(**kwargs)
self.supports_masking = True
self.stddev = stddev
def call(self, inputs, training=None):
def noised():
return K.clip(inputs * K.random_normal(shape=K.shape(inputs),
mean=1.,
stddev=self.stddev), 0.0, 1.0)
return K.in_train_phase(noised, inputs, training=training)
def get_config(self):
config = {'stddev': self.stddev}
base_config = super(SpeckleNoise, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_output_shape(self, input_shape):
return input_shape
# Build U-Net model
def get_model(verbose=False):
inputs = Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))
s = Lambda(lambda x: x / 255) (inputs)
s = SpeckleNoise(0.01)(s) #skimage speckel var defaults to 0.01
c1 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (s)
c1 = Dropout(0.1) (c1)
c1 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c1)
p1 = MaxPooling2D((2, 2)) (c1)
c2 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (p1)
c2 = Dropout(0.1) (c2)
c2 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c2)
p2 = MaxPooling2D((2, 2)) (c2)
c3 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (p2)
c3 = Dropout(0.2) (c3)
c3 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c3)
p3 = MaxPooling2D((2, 2)) (c3)
c4 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (p3)
c4 = Dropout(0.2) (c4)
c4 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c4)
p4 = MaxPooling2D(pool_size=(2, 2)) (c4)
c5 = Conv2D(256, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (p4)
c5 = Dropout(0.3) (c5)
c5 = Conv2D(256, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c5)
u6 = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same') (c5)
u6 = concatenate([u6, c4])
c6 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (u6)
c6 = Dropout(0.2) (c6)
c6 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c6)
u7 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same') (c6)
u7 = concatenate([u7, c3])
c7 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (u7)
c7 = Dropout(0.2) (c7)
c7 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c7)
u8 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same') (c7)
u8 = concatenate([u8, c2])
c8 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (u8)
c8 = Dropout(0.1) (c8)
c8 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c8)
u9 = Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same') (c8)
u9 = concatenate([u9, c1], axis=3)
c9 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (u9)
c9 = Dropout(0.1) (c9)
c9 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c9)
outputs = Conv2D(3, (1, 1), activation='sigmoid') (c9)
model = Model(inputs=[inputs], outputs=[outputs])
optimizer = Adam(clipvalue=5)
model.compile(optimizer=optimizer, loss=pixelwise_crossentropy)
if verbose:
model.summary()
return model
###Output
_____no_output_____
###Markdown
*Update: Changed to ELU units, added dropout.*Next we fit the model on the training data, using a validation split of 0.1. We use a small batch size because we have so little data. I recommend using checkpointing and early stopping when training your model. I won't do it here to make things a bit more reproducible (although it's very likely that your results will be different anyway). I'll just train for 10 epochs, which takes around 10 minutes in the Kaggle kernel with the current parameters. *Update: Added early stopping and checkpointing and increased to 30 epochs.*
###Code
try:
sess.close()
except:
pass
sess = init_seeds(0)
# even number of folds because we duplicate images
kf = KFold(6, shuffle=False)
models = []
for fold, (train_idx, val_idx) in enumerate(kf.split(X_train)):
print('*' * 40)
print('Fold:', fold)
X_train_kf = X_train[train_idx]
X_val_kf = X_train[val_idx]
Z_train_kf = Z_train[train_idx]
Z_val_kf = Z_train[val_idx]
model = get_model(verbose=(fold==0))
models.append(model)
# Fit model
earlystopper = EarlyStopping(patience=5, verbose=1)
checkpointer = ModelCheckpoint('model_%s_%d.h5' % (fname, fold),
verbose=1, save_best_only=True)
results = model.fit(X_train_kf, Z_train_kf,
validation_data = (X_val_kf, Z_val_kf),
batch_size=2, epochs=40, shuffle=True,
callbacks=[earlystopper, checkpointer])
#sess.close()
###Output
****************************************
Fold: 0
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_7 (InputLayer) (None, 256, 256, 3) 0
__________________________________________________________________________________________________
lambda_7 (Lambda) (None, 256, 256, 3) 0 input_7[0][0]
__________________________________________________________________________________________________
speckle_noise_7 (SpeckleNoise) (None, 256, 256, 3) 0 lambda_7[0][0]
__________________________________________________________________________________________________
conv2d_115 (Conv2D) (None, 256, 256, 16) 448 speckle_noise_7[0][0]
__________________________________________________________________________________________________
dropout_55 (Dropout) (None, 256, 256, 16) 0 conv2d_115[0][0]
__________________________________________________________________________________________________
conv2d_116 (Conv2D) (None, 256, 256, 16) 2320 dropout_55[0][0]
__________________________________________________________________________________________________
max_pooling2d_25 (MaxPooling2D) (None, 128, 128, 16) 0 conv2d_116[0][0]
__________________________________________________________________________________________________
conv2d_117 (Conv2D) (None, 128, 128, 32) 4640 max_pooling2d_25[0][0]
__________________________________________________________________________________________________
dropout_56 (Dropout) (None, 128, 128, 32) 0 conv2d_117[0][0]
__________________________________________________________________________________________________
conv2d_118 (Conv2D) (None, 128, 128, 32) 9248 dropout_56[0][0]
__________________________________________________________________________________________________
max_pooling2d_26 (MaxPooling2D) (None, 64, 64, 32) 0 conv2d_118[0][0]
__________________________________________________________________________________________________
conv2d_119 (Conv2D) (None, 64, 64, 64) 18496 max_pooling2d_26[0][0]
__________________________________________________________________________________________________
dropout_57 (Dropout) (None, 64, 64, 64) 0 conv2d_119[0][0]
__________________________________________________________________________________________________
conv2d_120 (Conv2D) (None, 64, 64, 64) 36928 dropout_57[0][0]
__________________________________________________________________________________________________
max_pooling2d_27 (MaxPooling2D) (None, 32, 32, 64) 0 conv2d_120[0][0]
__________________________________________________________________________________________________
conv2d_121 (Conv2D) (None, 32, 32, 128) 73856 max_pooling2d_27[0][0]
__________________________________________________________________________________________________
dropout_58 (Dropout) (None, 32, 32, 128) 0 conv2d_121[0][0]
__________________________________________________________________________________________________
conv2d_122 (Conv2D) (None, 32, 32, 128) 147584 dropout_58[0][0]
__________________________________________________________________________________________________
max_pooling2d_28 (MaxPooling2D) (None, 16, 16, 128) 0 conv2d_122[0][0]
__________________________________________________________________________________________________
conv2d_123 (Conv2D) (None, 16, 16, 256) 295168 max_pooling2d_28[0][0]
__________________________________________________________________________________________________
dropout_59 (Dropout) (None, 16, 16, 256) 0 conv2d_123[0][0]
__________________________________________________________________________________________________
conv2d_124 (Conv2D) (None, 16, 16, 256) 590080 dropout_59[0][0]
__________________________________________________________________________________________________
conv2d_transpose_25 (Conv2DTran (None, 32, 32, 128) 131200 conv2d_124[0][0]
__________________________________________________________________________________________________
concatenate_25 (Concatenate) (None, 32, 32, 256) 0 conv2d_transpose_25[0][0]
conv2d_122[0][0]
__________________________________________________________________________________________________
conv2d_125 (Conv2D) (None, 32, 32, 128) 295040 concatenate_25[0][0]
__________________________________________________________________________________________________
dropout_60 (Dropout) (None, 32, 32, 128) 0 conv2d_125[0][0]
__________________________________________________________________________________________________
conv2d_126 (Conv2D) (None, 32, 32, 128) 147584 dropout_60[0][0]
__________________________________________________________________________________________________
conv2d_transpose_26 (Conv2DTran (None, 64, 64, 64) 32832 conv2d_126[0][0]
__________________________________________________________________________________________________
concatenate_26 (Concatenate) (None, 64, 64, 128) 0 conv2d_transpose_26[0][0]
conv2d_120[0][0]
__________________________________________________________________________________________________
conv2d_127 (Conv2D) (None, 64, 64, 64) 73792 concatenate_26[0][0]
__________________________________________________________________________________________________
dropout_61 (Dropout) (None, 64, 64, 64) 0 conv2d_127[0][0]
__________________________________________________________________________________________________
conv2d_128 (Conv2D) (None, 64, 64, 64) 36928 dropout_61[0][0]
__________________________________________________________________________________________________
conv2d_transpose_27 (Conv2DTran (None, 128, 128, 32) 8224 conv2d_128[0][0]
__________________________________________________________________________________________________
concatenate_27 (Concatenate) (None, 128, 128, 64) 0 conv2d_transpose_27[0][0]
conv2d_118[0][0]
__________________________________________________________________________________________________
conv2d_129 (Conv2D) (None, 128, 128, 32) 18464 concatenate_27[0][0]
__________________________________________________________________________________________________
dropout_62 (Dropout) (None, 128, 128, 32) 0 conv2d_129[0][0]
__________________________________________________________________________________________________
conv2d_130 (Conv2D) (None, 128, 128, 32) 9248 dropout_62[0][0]
__________________________________________________________________________________________________
conv2d_transpose_28 (Conv2DTran (None, 256, 256, 16) 2064 conv2d_130[0][0]
__________________________________________________________________________________________________
concatenate_28 (Concatenate) (None, 256, 256, 32) 0 conv2d_transpose_28[0][0]
conv2d_116[0][0]
__________________________________________________________________________________________________
conv2d_131 (Conv2D) (None, 256, 256, 16) 4624 concatenate_28[0][0]
__________________________________________________________________________________________________
dropout_63 (Dropout) (None, 256, 256, 16) 0 conv2d_131[0][0]
__________________________________________________________________________________________________
conv2d_132 (Conv2D) (None, 256, 256, 16) 2320 dropout_63[0][0]
__________________________________________________________________________________________________
conv2d_133 (Conv2D) (None, 256, 256, 3) 51 conv2d_132[0][0]
==================================================================================================
Total params: 1,941,139
Trainable params: 1,941,139
Non-trainable params: 0
__________________________________________________________________________________________________
###Markdown
All right, looks good! Loss seems to be a bit erratic, though. I'll leave it to you to improve the model architecture and parameters! Make predictionsLet's make predictions both on the test set, the val set and the train set (as a sanity check). Remember to load the best saved model if you've used early stopping and checkpointing.
###Code
try:
sess.close()
except:
pass
sess = init_seeds(0)
# even number of folds because we duplicate images
kf = KFold(6, shuffle=False)
preds_train = np.zeros(Z_train.shape)
preds_test = 0
# Predict on train, val and test
for fold, (train_idx, val_idx) in enumerate(kf.split(X_train)):
model = load_model('model_%s_%d.h5' % (fname, fold),
custom_objects={'pixelwise_crossentropy':pixelwise_crossentropy,
'SpeckleNoise':SpeckleNoise,
})
X_val_kf = X_train[val_idx]
preds_train[val_idx] = model.predict(X_val_kf, verbose=1)
preds_test += model.predict(X_test, verbose=1)
preds_test /= 6
# Create list of upsampled train preds
preds_train_upsampled = []
for i in tqdm(range(len(train_split))):
train_i = train_split[i]
shape = (train_i.shape[0], train_i.shape[1], 3)
pred = resize((preds_train[4*i + 0]),
shape,
mode='constant', preserve_range=True)
pred += np.fliplr(resize((preds_train[4*i + 1]),
shape,
mode='constant', preserve_range=True))
pred += np.flipud(resize((preds_train[4*i + 2]),
shape,
mode='constant', preserve_range=True))
pred += np.flipud(np.fliplr(resize((preds_train[4*i + 3]),
shape,
mode='constant', preserve_range=True)))
#pred = (pred > 4*threshold).astype(np.uint8)
pred /= 4
preds_train_upsampled.append(pred)
def merge(img1, img2, shape):
ov2 = 5
height = shape[0]
width = shape[1]
img = np.zeros((height, width, 3), dtype=np.float32)
w = np.zeros((height, width, 1), dtype=np.float32)
height1 = img1.shape[0]
width1 = img1.shape[1]
height2 = img2.shape[0]
width2 = img2.shape[1]
w1 = 10*ov2*np.ones((height1, width1, 1), dtype=np.float32)
w2 = 10*ov2*np.ones((height2, width2, 1), dtype=np.float32)
for i in range(ov2, 0, -1):
w1[i-1,:] = 10*i
w1[height1 - i, :] = 10*i
w1[:, i-1] = 10*i
w1[:, width1 - i] = 10*i
w2[i-1,:] = 10*i
w2[height2 - i, :] = 10*i
w2[:, i-1] = 10*i
w2[:, width2 - i] = 10*i
if height > 2*width:
half = int(height//2)
img[:half, :, :] += w1*img1
img[half:, :, :] += w2*img2
w[:half, :] += w1
w[half:, :] += w2
img /= w
elif height > width:
img[:width, :, :] += w1*img1
img[height-width:, :, :] += w2*img2
w[:width, :] += w1
w[height-width:, :] += w2
img /= w
elif width > 2*height:
half = int(width//2)
img[:, :half, :] += w1*img1
img[:, half:, :] += w2*img2
w[:, :half] += w1
w[:, half:] += w2
img /= w
else:
img[:, :height, :] += w1*img1
img[:, width-height:, :] += w2*img2
w[:, :height] += w1
w[:, width-height:] += w1
img /= w
return (255*img).astype(np.uint8)
preds_train_merged = []
for ix in tqdm(range(len(train))):
merged = merge(preds_train_upsampled[2*ix+0],
preds_train_upsampled[2*ix+1],
train[ix].shape
)
preds_train_merged.append(merged)
preds_test_upsampled = []
for i in tqdm(range(len(test_split))):
test_i = test_split[i]
pred = resize(np.squeeze(preds_test[4*i + 0]),
(test_i.shape[0], test_i.shape[1]),
mode='constant', preserve_range=True)
pred += np.fliplr(resize(np.squeeze(preds_test[4*i + 1]),
(test_i.shape[0], test_i.shape[1]),
mode='constant', preserve_range=True))
pred += np.flipud(resize(np.squeeze(preds_test[4*i + 2]),
(test_i.shape[0], test_i.shape[1]),
mode='constant', preserve_range=True))
pred += np.flipud(np.fliplr(resize(np.squeeze(preds_test[4*i + 3]),
(test_i.shape[0], test_i.shape[1]),
mode='constant', preserve_range=True)))
#pred = (pred > 4*threshold).astype(np.uint8)
pred /= 4
preds_test_upsampled.append(pred)
preds_test_merged = []
for ix in tqdm(range(len(test))):
merged = merge(preds_test_upsampled[2*ix+0],
preds_test_upsampled[2*ix+1],
test[ix].shape
)
preds_test_merged.append(merged)
ix = 17
imshow(train[ix])
plt.show()
imshow(train_split[2*ix+0])
plt.show()
imshow(np.sum(train_mask_split[2*ix+0], axis=0))
plt.show()
imshow((preds_train_upsampled[2*ix+0]))
plt.show()
imshow(merge(preds_train_upsampled[2*ix+0],
preds_train_upsampled[2*ix+1],
train[ix].shape
))
plt.show()
from skimage.morphology import label
def get_labels(y):
labels = np.zeros((y.shape[1], y.shape[2]))
for i in range(y.shape[0]):
labels = np.where(y[i,:,:] > 0, i+1, labels)
return labels
def iou_score_cuk(y_true, y_pred, verbose=True, thresholds=np.arange(0.5, 1.0, 0.05)):
y_true = get_labels(y_true)
y_pred = get_labels(y_pred)
# Compute number of objects
true_objects = len(np.unique(y_true))
pred_objects = len(np.unique(y_pred))
if verbose:
print("Number of true objects:", true_objects - 1)
print("Number of predicted objects:", pred_objects - 1)
intersection = np.histogram2d(y_true.flatten(), y_pred.flatten(),
bins=(true_objects, pred_objects))[0].astype('int')
area_true = np.histogram(y_true, bins = true_objects)[0]
area_true = np.expand_dims(area_true, -1)
area_pred = np.histogram(y_pred, bins = pred_objects)[0]
area_pred = np.expand_dims(area_pred, 0)
union = area_true + area_pred - intersection
intersection = intersection[1:,1:]
union = union[1:,1:]
union[union == 0] = 1e-9
iou = intersection / union
# Precision helper function
def precision_at(threshold, iou):
matches = iou > threshold
true_positives = np.sum(matches, axis=1) == 1 # Correct objects
false_positives = np.sum(matches, axis=0) == 0 # Missed objects
false_negatives = np.sum(matches, axis=1) == 0 # Extra objects
tp, fp, fn = np.sum(true_positives), np.sum(false_positives), np.sum(false_negatives)
return tp, fp, fn
prec = []
if verbose:
print("Thresh\tTP\tFP\tFN\tPrec.")
for t in thresholds:
tp, fp, fn = precision_at(t, iou)
if (tp + fp + fn) == 0:
p = 1
else:
p = tp / (tp + fp + fn)
if verbose:
print("{:1.3f}\t{}\t{}\t{}\t{:1.3f}".format(t, tp, fp, fn, p))
prec.append(p)
if verbose:
print("AP\t-\t-\t-\t{:1.3f}".format(np.mean(prec)))
return np.mean(prec)
from scipy.ndimage.morphology import binary_fill_holes
def get_pred_watershed(upsampled, area_threshold, threshold, sep_threshold,
spread_threshold, alpha, connectivity=2):
img = ((upsampled[:,:,1] > 255 * threshold) &
(upsampled[:,:,0] < 255 * sep_threshold))
img = binary_fill_holes(img)
img = remove_small_objects(img, area_threshold)
lab_img = label(img, connectivity=connectivity)
distance = upsampled[:,:,1] + alpha * upsampled[:,:,0]
img = 1 * ((distance > 255 * spread_threshold) )
lab_img = img * watershed(- upsampled[:,:,1], lab_img)
y_pred = np.zeros((lab_img.max(), lab_img.shape[0], lab_img.shape[1]), np.uint16)
i = 0
for lab in range(lab_img.max()):
tmp = (lab_img == lab+1)
if np.sum(tmp.ravel()) > area_threshold:
y_pred[i,:,:] = tmp
i += 1
return y_pred[:i]
from scipy.ndimage.morphology import binary_fill_holes
def get_pred_random_walker(upsampled, area_threshold, threshold, sep_threshold,
spread_threshold, alpha, connectivity=2):
img = ((upsampled[:,:,1] > 255 * threshold) &
(upsampled[:,:,0] < 255 * sep_threshold))
img = binary_fill_holes(img)
img = remove_small_objects(img, area_threshold)
markers = label(img, connectivity=connectivity)
distance = upsampled[:,:,1] + alpha * upsampled[:,:,0]
mask = ((distance > 255 * spread_threshold) )
markers[~mask] = -1
lab_img = random_walker(mask, markers)
y_pred = np.zeros((lab_img.max(), lab_img.shape[0], lab_img.shape[1]), np.uint16)
i = 0
for lab in range(lab_img.max()):
tmp = (lab_img == lab+1)
if np.sum(tmp.ravel()) > area_threshold:
y_pred[i,:,:] = tmp
i += 1
return y_pred[:i]
def get_pred(upsampled, area_threshold, threshold, sep_threshold,
spread_threshold, alpha, connectivity=2):
try:
return get_pred_random_walker(upsampled, area_threshold, threshold,
sep_threshold, spread_threshold,
alpha, connectivity)
except:
return get_pred_watershed(upsampled, area_threshold, threshold,
sep_threshold, spread_threshold,
alpha, connectivity)
area_threshold = 20
threshold = 0.75
sep_threshold = 0.6
spread_threshold = 0.4
alpha=0.4
def get_pred(upsampled, area_threshold=area_threshold,
threshold=threshold, sep_threshold=sep_threshold,
spread_threshold=spread_threshold, alpha=alpha, connectivity=2):
try:
return get_pred_random_walker(upsampled, area_threshold, threshold,
sep_threshold, spread_threshold,
alpha, connectivity=2)
except:
return get_pred_watershed(upsampled, area_threshold, threshold,
sep_threshold, spread_threshold,
alpha, connectivity=2)
# Run-length encoding stolen from https://www.kaggle.com/rakhlin/fast-run-length-encoding-python
def rle_encoding(x):
dots = np.where(x.T.flatten() == 1)[0]
run_lengths = []
prev = -2
for b in dots:
if (b>prev+1): run_lengths.extend((b + 1, 0))
run_lengths[-1] += 1
prev = b
return run_lengths
def pred_to_rles(y_pred):
for i in range(y_pred.shape[0]):
tmp = y_pred[i]
yield rle_encoding(tmp)
score = np.zeros(len(train))
new_train_ids = []
rles = []
for n, id_ in enumerate(tqdm(train_ids)):
y_pred = get_pred(preds_train_merged[n])
score[n] = iou_score_cuk(train_mask[n], y_pred, verbose=False)
rle = list(pred_to_rles(y_pred))
rles.extend(rle)
new_train_ids.extend([id_] * len(rle))
print(len(rles))
sub = pd.DataFrame()
sub['ImageId'] = new_train_ids
sub['EncodedPixels'] = pd.Series(rles).apply(lambda x: ' '.join(str(y) for y in x))
sub.to_csv('../submissions/keras_unet_67_train.csv', index=False)
train_score = np.mean(score)
print('%0.5f' % train_score)
new_test_ids = []
rles = []
for n, id_ in tqdm(enumerate(test_ids)):
y_pred = get_pred(preds_test_merged[n])
rle = list(pred_to_rles(y_pred))
rles.extend(rle)
new_test_ids.extend([id_] * len(rle))
print(len(rles))
# Create submission DataFrame
sub = pd.DataFrame()
sub['ImageId'] = new_test_ids
sub['EncodedPixels'] = pd.Series(rles).apply(lambda x: ' '.join(str(y) for y in x))
sub.to_csv('../submissions/keras_unet_67_test.csv', index=False)
with open('../data/%s_train_pred.pkl' % fname, 'wb') as file:
pkl.dump(preds_train_merged, file)
with open('../data/%s_test_pred.pkl' % fname, 'wb') as file:
pkl.dump(preds_test_merged, file)
###Output
_____no_output_____ |
Weights Widget.ipynb | ###Markdown
To use the notebook, please click Cell → Run All.
###Code
from IPython.display import HTML
HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide();
} else {
$('div.input').show();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
<form action="javascript:code_toggle()"><input type="submit" value="View/Hide Code"></form>''')
%matplotlib widget
# %matplotlib notebook
import ipywidgets as widgets
from ipywidgets import Layout
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import numpy as np
from matplotlib.gridspec import GridSpec
import random
from palettable.colorbrewer.sequential import Greys_9
class app_test(object):
def __init__(self):
#Setting all the data variables as instance variables here and in the on_button_clicked function
self.out = widgets.Output()
self.hys_per_side = 101
self.beta_values = np.around(np.linspace(0, 1, self.hys_per_side),2) # beta values
self.alpha_values = np.around(np.linspace(1,0, self.hys_per_side),2) # alpha values
self.beta_grid, self.alpha_grid = np.meshgrid(self.beta_values, self.alpha_values)
#Dropdowns:
self.dropdown1 = widgets.Dropdown(value='top_heavy',
options=['none','uniform', 'linear', 'top_heavy', 'right_heavy','left_heavy','center_light_alpha','center_light_beta',
'center_heavy_alpha','single_line','upper_left'],
description ="",layout = Layout(width='200px')
)
self.dropdown1.observe(self.on_button_clicked)
self.dropdown2 = widgets.Dropdown(value='none',
options=['none','uniform', 'linear', 'top_heavy', 'right_heavy','left_heavy','center_light_alpha','center_light_beta',
'center_heavy_alpha','single_line','upper_left'],
description ="",layout = Layout(width='200px')
)
self.dropdown2.observe(self.on_button_clicked)
#Slider
self.sinputs = widgets.FloatSlider(
value=0,
min=0, max=1, step=0.01,
description='input $u$',
continuous_update=True,
layout=Layout(width='50%')
)
self.sinputs.observe(self.update_app, 'value')
#Reset Button
self.reset_button = widgets.Button(
description='Reset / Start',
icon="trash",
style={'font_weight': 'bold', 'button_color': 'yellow'}
)
self.reset_button.on_click(self.on_button_clicked)# if you click it will activate the function
self.on_button_clicked(1) # We force a click to reset all the plots
def center_heavy_alpha_beta(self, m):
mu = np.where(self.alpha_grid>=self.beta_grid, np.abs(0.5-m), np.nan)
mu = np.where(self.alpha_grid>=self.beta_grid, np.abs(mu-1), np.nan)
return mu
def weights(self, method):
# -----INPUTS-----Its the same as the original
alpha_grid = self.alpha_grid
beta_grid = self.beta_grid
if method=="none":
return np.array([[0],[0]])
mu = np.zeros((self.hys_per_side, self.hys_per_side))#creating empty array
mu = {
'uniform': np.where(alpha_grid>=beta_grid, 1, np.nan),
'linear': np.where(alpha_grid==beta_grid, 1, np.nan),
'top_heavy': np.where(alpha_grid>=beta_grid, alpha_grid, np.nan),
'bottom_heavy': np.where(alpha_grid>=beta_grid, 1-alpha_grid, np.nan),
'right_heavy': np.where(alpha_grid>=beta_grid, beta_grid, np.nan),
'left_heavy': np.where(alpha_grid>=beta_grid, 1-beta_grid, np.nan),
'center_light_alpha': np.where(alpha_grid>=beta_grid, np.abs(0.5-alpha_grid), np.nan),
'center_light_beta': np.where(alpha_grid>=beta_grid, np.abs(0.5-beta_grid), np.nan),
'center_heavy_alpha': self.center_heavy_alpha_beta(alpha_grid),
'center_heavy_beta': self.center_heavy_alpha_beta(beta_grid),
'single_line': np.where(np.logical_and(0.3<beta_grid, beta_grid<0.5), 1, 0),
'upper_left': np.where(np.logical_and(0.6>beta_grid, alpha_grid>0.95), 1, 0)
}[method] #Inserting the whole thing into a dict and calling the right function as key
mu = np.where(alpha_grid>=beta_grid, mu, np.nan)# Getting rid of the other triangle
return mu / np.nansum(mu)
def RegularPreisach(self, u, mu, outputs):
"""Simulation of discrete scalar Preisach model of hysteresis with single input
determined by slider
-----INPUTS-----
u -- 1d array of previous input values
alpha/beta -- 2d array of alpha/beta coordinates of hysterons
mu -- 2d array with weights of hysterons
preisach_triangle -- 2d array showing whether a given hysteron is on or off
outputs -- 1d array with output values
-----OUTPUTS-----
preisach_triangle -- 2d array showing whether a given hysteron is on or off
outputs -- 1d array with output values
"""
alpha = self.alpha_grid
beta = self.beta_grid
preisach_triangle = self.preisach_triangle
# compare new input to previous input value and change hysteron values accordingly
"""This if u.new==0 is in order to cancel the last thin black patch in the left side
of the priesach trianle and make it a whole gray when u=0"""
if u.new==0:
preisach_triangle=np.where(self.alpha_grid>self.beta_grid, 0, np.nan)
preisach_triangle[-1][0]=1
self.inputs[-1]=-0.01
elif u.new > u.old: # if input increases
preisach_triangle = np.where(u.new>alpha, 1, preisach_triangle)
elif u.new < u.old: # if input increases
preisach_triangle = np.where(u.new<beta, 0, preisach_triangle)
# values outside the presiach half-plane are set to nan
preisach_triangle = np.where(alpha>=beta, preisach_triangle, np.nan)
# calculate weighted presiach triangle
weighted_preisach = preisach_triangle*mu
# new output value
f = np.nansum(weighted_preisach)
outputs = np.concatenate((outputs, np.array([f])))
return outputs, preisach_triangle
def draw_arrow(self,ax, start, end):
ax.annotate('', xy=end, xytext=start, xycoords='data', textcoords='data',
arrowprops=dict(headwidth=4.0, headlength=4.0, width=0.2,
facecolor = "black", linewidth = 0.5),zorder=0)
def on_button_clicked(self, b):
"""The all_info array holds all the lines info by:
0 1
0 dropdown1 value dict of line 1 - {"mu": mu1 ,
"outputs" : outputs1,
"plot" : plot 1 }
1 dropdown2 value dict of line 2 - {"mu": mu2 ,
"outputs" : outputs2,
"plot" : plot 2 }
That means row 0 is for line 1 and row 1 is for line 2.
-----------------------------------------------------------------------------------------------
1) For example if we want to get mu1 -
all_info[0][1]["mu"]
^
line 1
2) If we want to get outputs2 -
all_info[1][1]["outputs"]
^
line 2
The column will always be 1 unless we want to use the dropdown value
"""
self.preisach_triangle = np.where(self.alpha_grid>self.beta_grid, 0, np.nan)
dict1 ={}
dict2 ={}
self.all_info = np.array([[str(self.dropdown1.value),dict1],[str(self.dropdown2.value),dict2]])
self.hys_per_side=101
self.all_info[0][1].update({'mu' : self.weights(str(self.dropdown1.value))})
self.all_info[1][1].update({'mu' : self.weights(str(self.dropdown2.value))})
self.all_info[0][1].update({'outputs' : np.array([np.nansum(self.preisach_triangle)])})
self.all_info[1][1].update({'outputs' : np.array([np.nansum(self.preisach_triangle)])})
self.initial_input = 0
self.inputs = np.array([self.initial_input])
#left=0.50, right=0.95,
#left=0.10, right=0.35,
# Resetting plots
plt.clf()
self.fig = plt.figure(1, figsize=(10,5))
gs1 = GridSpec(1, 1, width_ratios=[1],
height_ratios=[1],
left=0.50, right=0.95,
bottom=0.05, top=0.9,
wspace=0, hspace=0)
gs2 = GridSpec(1, 1, width_ratios=[1],
height_ratios=[1],
left=0.05, right=0.35,
bottom=0.50, top=0.9,
wspace=0, hspace=0)
gs3 = GridSpec(1, 2, width_ratios=[1,1],
height_ratios=[1],
left=-0, right=0.35,
bottom=0.05, top=0.45,
wspace=0.55, hspace=0.5)
self.ax1 = plt.subplot(gs1[0, 0])
self.ax2 = plt.subplot(gs2[0, 0])
self.ax2.set_aspect('equal', adjustable='box')
self.ax3 = plt.subplot(gs3[0, 0])
self.ax4 = plt.subplot(gs3[0, 1])
# Main graph
plt.subplots_adjust(bottom = 0.0,hspace = 1)
self.ax1.set_xlim([-0.05, 1.05])
self.ax1.set_ylim([-0.05, 1.05])
self.ax1.axis('off')
self.ax1.text(-0.1, 1.12, "Output")
self.ax1.text(1.1, -0.05, r"$u$")
self.draw_arrow(self.ax1, (-0.05, -0.05),(-0.05, 1.05))
self.draw_arrow(self.ax1, (-0.05, -0.05),(1.05, -0.05))
self.all_info[0][1].update({'plot' : self.ax1.plot(self.inputs[0] , self.all_info[0][1]["outputs"][0],
color="tab:red",label="Weight 1")[0]})
self.all_info[0][1].update({'marker' : self.ax1.plot(-1 ,-1
,marker='o',
color="tab:red")[0]})
self.all_info[1][1].update({'plot' : self.ax1.plot(self.inputs[0] , self.all_info[1][1]["outputs"][0],
color="tab:blue",label="Weight 2")[0]})
self.all_info[1][1].update({'marker' : self.ax1.plot(-1 ,-1
,marker='o',
color="tab:blue")[0]})
self.ax1.legend(loc="upper left")
# The two weight triangles
self.my_Reds = self.truncate_colormap(plt.get_cmap('Reds'), 0.2, 1.0)
if self.all_info[0][0]!="none":
self.cb3 = self.fig.colorbar(self.ax3.imshow(np.fliplr(np.flip(self.all_info[0][1]["mu"])), cmap=self.my_Reds,
vmin=0-0*np.nanmax(self.all_info[0][1]["mu"]),vmax=np.nanmax(self.all_info[0][1]["mu"]) )
,ax=self.ax3,fraction=0.046, pad=0.04)
self.cb3.set_ticks([self.cb3.vmin,self.cb3.vmax])
self.cb3.set_ticklabels(["Light\nWeights","Heavy\nWeights"])
self.cb3.ax.tick_params(labelsize=8)
self.ax3.set_xlim([-0.05, 100.05])
self.ax3.set_ylim([-0.05, 100.05])
self.ax3.set_xticks([])
self.ax3.set_yticks([])
self.ax3.spines['top'].set_visible(False)
self.ax3.spines['right'].set_visible(False)
self.ax3.spines['bottom'].set_visible(False)
self.ax3.spines['left'].set_visible(False)
self.ax3.set_xlabel('\n Weight 1 ')
self.my_Blues = self.truncate_colormap(plt.get_cmap('Blues'), 0.2, 1.0)
if self.all_info[1][0]!="none":
self.cb4 = self.fig.colorbar(self.ax4.imshow(np.fliplr(np.flip(self.all_info[1][1]["mu"])), cmap=self.my_Blues,
vmin=0-0*np.nanmax(self.all_info[1][1]["mu"]),vmax=np.nanmax(self.all_info[1][1]["mu"]) )
,ax=self.ax4,fraction=0.046, pad=0.04)
self.cb4.set_ticks([self.cb4.vmin,self.cb4.vmax])
self.cb4.set_ticklabels(["Light\nWeights","Heavy\nWeights"])
self.cb4.ax.tick_params(labelsize=8)
self.ax4.set_xlim([-0.05, 100.05])
self.ax4.set_ylim([-0.05, 100.05])
self.ax4.set_xticks([])
self.ax4.set_yticks([])
self.ax4.spines['top'].set_visible(False)
self.ax4.spines['right'].set_visible(False)
self.ax4.spines['bottom'].set_visible(False)
self.ax4.spines['left'].set_visible(False)
self.ax4.set_xlabel('\n Weight 2 ')
# The interactive preisach triangles
self.ax2.set_xlim([-0.003,1])
self.ax2.set_ylim([0,1])
self.ax2.axis('off')
self.ax2.set_title("Limit Triangle\n")
self.ax2.text(1.01, 0, "\u03B2")
self.ax2.text(-0.02, 1.09, "\u03B1")
self.ax2.annotate('', xy=(0, 0), xycoords=('data'),
xytext=(0, 1.075), textcoords='data',
ha='left', va='center',
arrowprops=dict(arrowstyle='<|-', fc='black'),zorder=2)
self.draw_arrow(self.ax2, (0,0),(0.983, 0))
self.x = np.linspace(0, 1, self.hys_per_side)
self.ax2.fill_between(self.x, self.x, 1, color="gray")
self.on_state = self.ax2.fill_between([0], [0], 0, color="black")
# After ploting all plots we can get rid of the none line
self.all_info = np.delete(self.all_info, np.where(self.all_info == "none")[0], axis=0)
# Resseting slider value - MUST BE in the end of function
"""
Everytime we activated reset, the function xy_on would be activate and get preisch triangle as a whole 0 triangle
and thus making errors. That why we update the triangle with a single 1 in the bottom after we displayed it as a
whole 0. It excludes the function xy_on making errors by:
def xy_on(self, triangle):
m = triangle
d = np.diff(m,axis=0) <---- never be empty
x = np.where(d==1)[1] <---- never be empty
x = np.append(x, x[-1]+1)
^
never try to search last element in an empty array
...
"""
self.preisach_triangle[-1][0]=1
# This part is active every set up with the value of 1.
self.sinputs.value=0 # First we refer the u as zero in order to make a change to 1, no matter what value had been before.
self.sinputs.observe(self.update_app, 'value')
self.sinputs.value=1
self.sinputs.observe(self.update_app, 'value')
self.inputs = [1]
for line in self.all_info:
line=line[1] # Using only the dict of each line
y = line["plot"].get_ydata()[-1]
line["plot"].set_ydata([])
line["plot"].set_xdata([])
line["outputs"]=[y]
def xy_on(self, triangle):
m = triangle
d = np.diff(m,axis=0) # Calculate the difference between an element and the above
x = np.where(d==1)[1] # Getting all the x-columns coordinate
y = self.hys_per_side-np.where(d==1)[0]-1 # Getting all the y-raws coordinate
x = np.append(x, x[-1]+1) # adding missing point at...
y = np.append(y, y[-1]) # the end of the array
step_index = np.where(np.diff(y)!= 0)[0]
x = np.insert(x, step_index+1, x[step_index+1])
y = np.insert(y, step_index, y[step_index])
return [x/(self.hys_per_side-1), y/(self.hys_per_side-1)]
def update_app(self, u):
self.inputs = np.concatenate((self.inputs, np.array([u.new])))
for line in self.all_info:
line=line[1] # Using only the dict of each line
line["outputs"], self.preisach_triangle = self.RegularPreisach(u, line["mu"] , line["outputs"])
line["plot"].set_ydata(line["outputs"])
line["plot"].set_xdata(self.inputs)
line["marker"].set_ydata(line["outputs"][-1])
line["marker"].set_xdata(self.inputs[-1])
self.on_state.remove()
x_on, y_on = self.xy_on(self.preisach_triangle)
self.on_state = self.ax2.fill_between(x_on, x_on, y_on, color="black")
def truncate_colormap(self, cmap, minval=0.0, maxval=1.0, n=100):
new_cmap = colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
my_app_test = app_test()
widgets.HBox([my_app_test.dropdown1, my_app_test.dropdown2, my_app_test.sinputs,my_app_test.reset_button])
###Output
_____no_output_____ |
cia_factbook_sql.ipynb | ###Markdown
Analyzing CIA Factbook Data Using SQLIn this project, we'll work with data from the CIA World Factbook, a compendium of statistics about all of the countries on Earth. The Factbook contains demographic information like the following:- ```population``` — the global population.- ```population_growth``` — the annual population growth rate, as a percentage.- ```area``` — the total land and water area.We will use SQL in Jupyter to analyze the data.
###Code
%%capture
%load_ext sql
%sql sqlite:///factbook.db
###Output
_____no_output_____
###Markdown
Overview of the DataLet's start with a simple query to return basic information about the database.
###Code
%%sql
SELECT *
FROM sqlite_master
WHERE type='table';
%%sql
SELECT *
FROM facts
LIMIT 5;
###Output
Done.
###Markdown
Here is the data dictionary for our ```facts``` database- ```name``` — the name of the country.- ```area```— the country's total area (both land and water).- ```area_land``` — the country's land area in square kilometers.- ```area_water``` — the country's waterarea in square kilometers.- ```population``` — the country's population.- ```population_growth```— the country's population growth as a percentage.- ```birth_rate``` — the country's birth rate, or the number of births per year per 1,000 people.- ```death_rate``` — the country's death rate, or the number of death per year per 1,000 people. Summary Statistics
###Code
%%sql
SELECT MIN(population), MAX(population), MIN(population_growth), MAX(population_growth)
FROM facts;
###Output
Done.
###Markdown
A minimum population of 0 must be an error in the data, so let's look into that. Similarly, a population growth of 0 also is suspicious. The maximum population of 7.2 billion is about the population of the world.Let's look into these outliers.
###Code
%%sql
SELECT name, population, population_growth
FROM facts
WHERE population == 0;
%%sql
SELECT name, population, population_growth
FROM facts
WHERE population == (SELECT MAX(population)
FROM facts);
%%sql
SELECT name, population, population_growth
FROM facts
WHERE population_growth == 0;
###Output
Done.
###Markdown
We see there is are entries for Antarctica and the World, which account for our smallest and largest population. Let's take care to not include these in future analysis. There are a few countries with no population growth, mostly ones with very small populations. The exception is Greenland. For all of these, the reported population growth could be a result of rounding.Let's recalculate the summary statistics we did earlier excluding the World and Antarctica.
###Code
%%sql
SELECT MIN(population), MAX(population), MIN(population_growth), MAX(population_growth)
FROM facts
WHERE name <> 'Antarctica'
AND name <> 'World';
###Output
Done.
###Markdown
Exploring Average Population and Area
###Code
%%sql
SELECT AVG(population) AS 'Avg Pop', AVG(area) AS 'Avg Area'
FROM facts
WHERE name <> 'Antarctica'
AND name <> 'World';
###Output
Done.
###Markdown
Finding Densely Populated Countries
###Code
%%sql
SELECT name, population, area
FROM facts
WHERE population > (SELECT AVG(population)
FROM facts
WHERE name <> 'Antarctica'
AND name <> 'World')
AND area < (SELECT AVG(area)
FROM facts
WHERE name <> 'Antarctica'
AND name <> 'World');
###Output
Done.
###Markdown
Which countries will add the most people to their populations next year?
###Code
%%sql
SELECT name, population AS 'Current Population',
ROUND((population_growth * population),0) AS 'Expected New People'
FROM facts
WHERE name <> 'World'
ORDER BY (population_growth * population) DESC
LIMIT 5;
###Output
Done.
###Markdown
Which countries have more water than land?
###Code
%%sql
SELECT name, ROUND((CAST(area_water as float) / CAST(area as float)), 3) AS 'Fraction Water'
FROM facts
WHERE area_water > area_land;
###Output
Done.
|
notebooks/ntbk05_backtrader_quickstart.ipynb | ###Markdown
Basic Setup
###Code
from __future__ import (absolute_import, division, print_function, unicode_literals)
import backtrader as bt
if __name__ == '__main__':
cerebro = bt.Cerebro()
print('Starting Portfolio Value: %.2f' % cerebro.broker.getvalue())
cerebro.run()
print('Final Portfolio Value: %.2f' % cerebro.broker.getvalue())
###Output
Starting Portfolio Value: 10000.00
Final Portfolio Value: 10000.00
###Markdown
Setting the Cash
###Code
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import backtrader as bt
if __name__ == '__main__':
cerebro = bt.Cerebro()
cerebro.broker.setcash(100000.0)
print('Starting Portfolio Value: %.2f' % cerebro.broker.getvalue())
cerebro.run()
print('Final Portfolio Value: %.2f' % cerebro.broker.getvalue())
###Output
Starting Portfolio Value: 100000.00
Final Portfolio Value: 100000.00
###Markdown
Adding a Data Feed
###Code
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import datetime # For datetime objects
import os.path # To manage paths
import sys # To find out the script name (in argv[0])
# Import the backtrader platform
import backtrader as bt
if __name__ == '__main__':
# Create a cerebro entity
cerebro = bt.Cerebro()
# Datas are in a subfolder of the samples. Need to find where the script is
# because it could have been called from anywhere
modpath = r"../data/raw"
datapath = os.path.join(modpath, 'datas/orcl-1995-2014.txt')
# Create a Data Feed
data = bt.feeds.YahooFinanceCSVData(
dataname=datapath,
# Do not pass values before this date
fromdate=datetime.datetime(2000, 1, 1),
# Do not pass values after this date
todate=datetime.datetime(2000, 12, 31),
reverse=False)
# Add the Data Feed to Cerebro
cerebro.adddata(data)
# Set our desired cash start
cerebro.broker.setcash(100000.0)
# Print out the starting conditions
print('Starting Portfolio Value: %.2f' % cerebro.broker.getvalue())
# Run over everything
cerebro.run()
# Print out the final result
print('Final Portfolio Value: %.2f' % cerebro.broker.getvalue())
###Output
Starting Portfolio Value: 100000.00
Final Portfolio Value: 100000.00
###Markdown
Our First Strategy
###Code
# from __future__ import (absolute_import, division, print_function,
# unicode_literals)
# import datetime # For datetime objects
# import os.path # To manage paths
# import sys # To find out the script name (in argv[0])
# # Import the backtrader platform
# import backtrader as bt
# # Create a Stratey
# class TestStrategy(bt.Strategy):
# def log(self, txt, dt=None):
# ''' Logging function for this strategy'''
# dt = dt or self.datas[0].datetime.date(0)
# print('%s, %s' % (dt.isoformat(), txt))
# def __init__(self):
# # Keep a reference to the "close" line in the data[0] dataseries
# self.dataclose = self.datas[0].close
# def next(self):
# # Simply log the closing price of the series from the reference
# self.log('Close, %.2f' % self.dataclose[0])
# if __name__ == '__main__':
# # Create a cerebro entity
# cerebro = bt.Cerebro()
# # Add a strategy
# cerebro.addstrategy(TestStrategy)
# # Datas are in a subfolder of the samples. Need to find where the script is
# # because it could have been called from anywhere
# modpath = r"../data/raw"
# datapath = os.path.join(modpath, 'datas/orcl-1995-2014.txt')
# bt.feeds.GenericCSVData()
# # Create a Data Feed
# data = bt.feeds.YahooFinanceCSVData(
# dataname=datapath,
# # Do not pass values before this date
# fromdate=datetime.datetime(2000, 1, 1),
# # Do not pass values before this date
# todate=datetime.datetime(2000, 12, 31),
# # Do not pass values after this date
# reverse=False)
# # Add the Data Feed to Cerebro
# cerebro.adddata(data)
# # Set our desired cash start
# cerebro.broker.setcash(100000.0)
# # Print out the starting conditions
# print('Starting Portfolio Value: %.2f' % cerebro.broker.getvalue())
# # Run over everything
# cerebro.run()
# # Print out the final result
# print('Final Portfolio Value: %.2f' % cerebro.broker.getvalue())
###Output
_____no_output_____
###Markdown
Adding some Logic to the Strategy
###Code
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import datetime # For datetime objects
import os.path # To manage paths
import sys # To find out the script name (in argv[0])
# Import the backtrader platform
import backtrader as bt
# Create a Stratey
class TestStrategy(bt.Strategy):
def log(self, txt, dt=None):
''' Logging function fot this strategy'''
dt = dt or self.datas[0].datetime.date(0)
print('%s, %s' % (dt.isoformat(), txt))
def __init__(self):
# Keep a reference to the "close" line in the data[0] dataseries
self.dataclose = self.datas[0].close
def next(self):
# Simply log the closing price of the series from the reference
self.log('Close, %.2f' % self.dataclose[0])
if self.dataclose[0] < self.dataclose[-1]:
# current close less than previous close
if self.dataclose[-1] < self.dataclose[-2]:
# previous close less than the previous close
# BUY, BUY, BUY!!! (with all possible default parameters)
self.log('BUY CREATE, %.2f' % self.dataclose[0])
self.buy()
if __name__ == '__main__':
# Create a cerebro entity
cerebro = bt.Cerebro()
# Add a strategy
cerebro.addstrategy(TestStrategy)
# Datas are in a subfolder of the samples. Need to find where the script is
# because it could have been called from anywhere
modpath = r"../data/raw"
datapath = os.path.join(modpath, 'datas/orcl-1995-2014.txt')
# Create a Data Feed
data = bt.feeds.YahooFinanceCSVData(
dataname=datapath,
# Do not pass values before this date
fromdate=datetime.datetime(2000, 1, 1),
# Do not pass values before this date
todate=datetime.datetime(2000, 12, 31),
# Do not pass values after this date
reverse=False)
# Add the Data Feed to Cerebro
cerebro.adddata(data)
# Set our desired cash start
cerebro.broker.setcash(100000.0)
# Print out the starting conditions
print('Starting Portfolio Value: %.2f' % cerebro.broker.getvalue())
# Run over everything
cerebro.run()
# Print out the final result
print('Final Portfolio Value: %.2f' % cerebro.broker.getvalue())
###Output
Starting Portfolio Value: 100000.00
2000-01-03, Close, 26.27
2000-01-04, Close, 23.95
2000-01-05, Close, 22.68
2000-01-05, BUY CREATE, 22.68
2000-01-06, Close, 21.35
2000-01-06, BUY CREATE, 21.35
2000-01-07, Close, 22.99
2000-01-10, Close, 25.74
2000-01-11, Close, 24.99
2000-01-12, Close, 23.49
2000-01-12, BUY CREATE, 23.49
2000-01-13, Close, 23.36
2000-01-13, BUY CREATE, 23.36
2000-01-14, Close, 23.75
2000-01-18, Close, 24.74
2000-01-19, Close, 25.41
2000-01-20, Close, 26.35
2000-01-21, Close, 26.55
2000-01-24, Close, 24.10
2000-01-25, Close, 25.10
2000-01-26, Close, 24.49
2000-01-27, Close, 23.04
2000-01-27, BUY CREATE, 23.04
2000-01-28, Close, 21.07
2000-01-28, BUY CREATE, 21.07
2000-01-31, Close, 22.22
2000-02-01, Close, 24.02
2000-02-02, Close, 24.16
2000-02-03, Close, 25.21
2000-02-04, Close, 25.71
2000-02-07, Close, 26.66
2000-02-08, Close, 26.49
2000-02-09, Close, 26.66
2000-02-10, Close, 27.71
2000-02-11, Close, 26.55
2000-02-14, Close, 27.66
2000-02-15, Close, 27.30
2000-02-16, Close, 27.24
2000-02-16, BUY CREATE, 27.24
2000-02-17, Close, 27.41
2000-02-18, Close, 26.05
2000-02-22, Close, 26.38
2000-02-23, Close, 28.05
2000-02-24, Close, 27.55
2000-02-25, Close, 31.41
2000-02-28, Close, 30.52
2000-02-29, Close, 33.02
2000-03-01, Close, 31.80
2000-03-02, Close, 30.47
2000-03-02, BUY CREATE, 30.47
2000-03-03, Close, 33.36
2000-03-06, Close, 33.69
2000-03-07, Close, 33.33
2000-03-08, Close, 36.97
2000-03-09, Close, 37.36
2000-03-10, Close, 36.30
2000-03-13, Close, 35.02
2000-03-13, BUY CREATE, 35.02
2000-03-14, Close, 34.25
2000-03-14, BUY CREATE, 34.25
2000-03-15, Close, 34.97
2000-03-16, Close, 36.44
2000-03-17, Close, 35.50
2000-03-20, Close, 34.75
2000-03-20, BUY CREATE, 34.75
2000-03-21, Close, 35.89
2000-03-22, Close, 37.39
2000-03-23, Close, 38.64
2000-03-24, Close, 38.69
2000-03-27, Close, 39.33
2000-03-28, Close, 38.50
2000-03-29, Close, 36.69
2000-03-29, BUY CREATE, 36.69
2000-03-30, Close, 34.88
2000-03-30, BUY CREATE, 34.88
2000-03-31, Close, 34.72
2000-03-31, BUY CREATE, 34.72
2000-04-03, Close, 34.19
2000-04-03, BUY CREATE, 34.19
2000-04-04, Close, 33.77
2000-04-04, BUY CREATE, 33.77
2000-04-05, Close, 34.80
2000-04-06, Close, 36.55
2000-04-07, Close, 38.75
2000-04-10, Close, 36.69
2000-04-11, Close, 34.41
2000-04-11, BUY CREATE, 34.41
2000-04-12, Close, 32.52
2000-04-12, BUY CREATE, 32.52
2000-04-13, Close, 31.99
2000-04-13, BUY CREATE, 31.99
2000-04-14, Close, 27.80
2000-04-14, BUY CREATE, 27.80
2000-04-17, Close, 33.27
2000-04-18, Close, 35.11
2000-04-19, Close, 33.16
2000-04-20, Close, 31.49
2000-04-20, BUY CREATE, 31.49
2000-04-24, Close, 32.22
2000-04-25, Close, 33.61
2000-04-26, Close, 32.11
2000-04-27, Close, 34.38
2000-04-28, Close, 35.55
2000-05-01, Close, 35.44
2000-05-02, Close, 34.61
2000-05-02, BUY CREATE, 34.61
2000-05-03, Close, 33.72
2000-05-03, BUY CREATE, 33.72
2000-05-04, Close, 33.02
2000-05-04, BUY CREATE, 33.02
2000-05-05, Close, 34.16
2000-05-08, Close, 32.16
2000-05-09, Close, 32.02
2000-05-09, BUY CREATE, 32.02
2000-05-10, Close, 30.08
2000-05-10, BUY CREATE, 30.08
2000-05-11, Close, 32.19
2000-05-12, Close, 32.99
2000-05-15, Close, 34.25
2000-05-16, Close, 35.22
2000-05-17, Close, 34.77
2000-05-18, Close, 32.49
2000-05-18, BUY CREATE, 32.49
2000-05-19, Close, 31.16
2000-05-19, BUY CREATE, 31.16
2000-05-22, Close, 30.16
2000-05-22, BUY CREATE, 30.16
2000-05-23, Close, 27.85
2000-05-23, BUY CREATE, 27.85
2000-05-24, Close, 28.57
2000-05-25, Close, 29.55
2000-05-26, Close, 29.80
2000-05-30, Close, 32.99
2000-05-31, Close, 31.97
2000-06-01, Close, 34.63
2000-06-02, Close, 35.66
2000-06-05, Close, 36.00
2000-06-06, Close, 34.27
2000-06-07, Close, 35.58
2000-06-08, Close, 36.64
2000-06-09, Close, 36.77
2000-06-12, Close, 35.83
2000-06-13, Close, 36.33
2000-06-14, Close, 35.13
2000-06-15, Close, 36.69
2000-06-16, Close, 36.41
2000-06-19, Close, 38.25
2000-06-20, Close, 38.27
2000-06-21, Close, 38.33
2000-06-22, Close, 36.25
2000-06-23, Close, 35.36
2000-06-23, BUY CREATE, 35.36
2000-06-26, Close, 36.77
2000-06-27, Close, 36.58
2000-06-28, Close, 36.89
2000-06-29, Close, 35.97
2000-06-30, Close, 37.39
2000-07-03, Close, 35.66
2000-07-05, Close, 32.16
2000-07-05, BUY CREATE, 32.16
2000-07-06, Close, 33.63
2000-07-07, Close, 33.75
2000-07-10, Close, 32.97
2000-07-11, Close, 32.16
2000-07-11, BUY CREATE, 32.16
2000-07-12, Close, 33.22
2000-07-13, Close, 33.69
2000-07-14, Close, 33.86
2000-07-17, Close, 33.86
2000-07-18, Close, 32.99
2000-07-19, Close, 32.80
2000-07-19, BUY CREATE, 32.80
2000-07-20, Close, 34.75
2000-07-21, Close, 33.55
2000-07-24, Close, 33.36
2000-07-24, BUY CREATE, 33.36
2000-07-25, Close, 33.80
2000-07-26, Close, 34.13
2000-07-27, Close, 33.38
2000-07-28, Close, 32.19
2000-07-28, BUY CREATE, 32.19
2000-07-31, Close, 33.44
2000-08-01, Close, 32.52
2000-08-02, Close, 32.52
2000-08-03, Close, 34.44
2000-08-04, Close, 36.27
2000-08-07, Close, 36.41
2000-08-08, Close, 36.91
2000-08-09, Close, 36.19
2000-08-10, Close, 35.61
2000-08-10, BUY CREATE, 35.61
2000-08-11, Close, 36.08
2000-08-14, Close, 36.64
2000-08-15, Close, 36.14
2000-08-16, Close, 36.11
2000-08-16, BUY CREATE, 36.11
2000-08-17, Close, 37.33
2000-08-18, Close, 36.16
2000-08-21, Close, 37.00
2000-08-22, Close, 37.16
2000-08-23, Close, 36.86
2000-08-24, Close, 37.66
2000-08-25, Close, 37.64
2000-08-28, Close, 38.58
2000-08-29, Close, 39.03
2000-08-30, Close, 39.25
2000-08-31, Close, 40.44
2000-09-01, Close, 41.19
2000-09-05, Close, 40.50
2000-09-06, Close, 39.69
2000-09-06, BUY CREATE, 39.69
2000-09-07, Close, 40.56
2000-09-08, Close, 38.50
2000-09-11, Close, 37.11
2000-09-11, BUY CREATE, 37.11
2000-09-12, Close, 35.30
2000-09-12, BUY CREATE, 35.30
2000-09-13, Close, 36.39
2000-09-14, Close, 37.78
2000-09-15, Close, 34.83
2000-09-18, Close, 34.01
2000-09-18, BUY CREATE, 34.01
2000-09-19, Close, 35.27
2000-09-20, Close, 35.55
2000-09-21, Close, 35.11
2000-09-22, Close, 35.91
2000-09-25, Close, 35.02
2000-09-26, Close, 35.33
2000-09-27, Close, 35.52
2000-09-28, Close, 36.24
2000-09-29, Close, 35.02
2000-10-02, Close, 35.02
2000-10-03, Close, 30.91
2000-10-04, Close, 30.30
2000-10-04, BUY CREATE, 30.30
2000-10-05, Close, 30.38
2000-10-06, Close, 30.08
2000-10-09, Close, 29.69
2000-10-09, BUY CREATE, 29.69
2000-10-10, Close, 28.74
2000-10-10, BUY CREATE, 28.74
2000-10-11, Close, 27.69
2000-10-11, BUY CREATE, 27.69
2000-10-12, Close, 28.02
2000-10-13, Close, 31.69
2000-10-16, Close, 30.74
2000-10-17, Close, 29.96
2000-10-17, BUY CREATE, 29.96
2000-10-18, Close, 29.85
2000-10-18, BUY CREATE, 29.85
2000-10-19, Close, 32.36
2000-10-20, Close, 31.35
2000-10-23, Close, 30.30
2000-10-23, BUY CREATE, 30.30
2000-10-24, Close, 31.85
2000-10-25, Close, 30.58
2000-10-26, Close, 30.30
2000-10-26, BUY CREATE, 30.30
2000-10-27, Close, 30.41
2000-10-30, Close, 28.13
2000-10-31, Close, 29.35
2000-11-01, Close, 27.91
2000-11-02, Close, 26.30
2000-11-02, BUY CREATE, 26.30
2000-11-03, Close, 26.96
2000-11-06, Close, 24.85
2000-11-07, Close, 23.63
2000-11-07, BUY CREATE, 23.63
2000-11-08, Close, 22.07
2000-11-08, BUY CREATE, 22.07
2000-11-09, Close, 24.18
2000-11-10, Close, 22.63
2000-11-13, Close, 22.01
2000-11-13, BUY CREATE, 22.01
2000-11-14, Close, 25.24
2000-11-15, Close, 25.68
2000-11-16, Close, 24.35
2000-11-17, Close, 25.63
2000-11-20, Close, 22.01
2000-11-21, Close, 21.24
2000-11-21, BUY CREATE, 21.24
2000-11-22, Close, 19.85
2000-11-22, BUY CREATE, 19.85
2000-11-24, Close, 21.46
2000-11-27, Close, 20.57
2000-11-28, Close, 20.15
2000-11-28, BUY CREATE, 20.15
2000-11-29, Close, 20.35
2000-11-30, Close, 23.57
2000-12-01, Close, 23.52
2000-12-04, Close, 25.07
2000-12-05, Close, 28.02
2000-12-06, Close, 26.85
2000-12-07, Close, 25.18
2000-12-07, BUY CREATE, 25.18
2000-12-08, Close, 26.74
2000-12-11, Close, 28.41
2000-12-12, Close, 27.35
2000-12-13, Close, 25.24
2000-12-13, BUY CREATE, 25.24
2000-12-14, Close, 24.46
2000-12-14, BUY CREATE, 24.46
2000-12-15, Close, 25.41
2000-12-18, Close, 28.46
2000-12-19, Close, 27.24
2000-12-20, Close, 25.35
2000-12-20, BUY CREATE, 25.35
2000-12-21, Close, 26.24
2000-12-22, Close, 28.35
2000-12-26, Close, 27.52
2000-12-27, Close, 27.30
2000-12-27, BUY CREATE, 27.30
2000-12-28, Close, 27.63
2000-12-29, Close, 25.85
Final Portfolio Value: 99740.45
###Markdown
Do not only buy … but SELL
###Code
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import datetime # For datetime objects
import os.path # To manage paths
import sys # To find out the script name (in argv[0])
# Import the backtrader platform
import backtrader as bt
# Create a Stratey
class TestStrategy(bt.Strategy):
def log(self, txt, dt=None):
''' Logging function fot this strategy'''
dt = dt or self.datas[0].datetime.date(0)
print('%s, %s' % (dt.isoformat(), txt))
def __init__(self):
# Keep a reference to the "close" line in the data[0] dataseries
self.dataclose = self.datas[0].close
# To keep track of pending orders
self.order = None
def notify_order(self, order):
if order.status in [order.Submitted, order.Accepted]:
# Buy/Sell order submitted/accepted to/by broker - Nothing to do
return
# Check if an order has been completed
# Attention: broker could reject order if not enough cash
if order.status in [order.Completed]:
if order.isbuy():
self.log('BUY EXECUTED, %.2f' % order.executed.price)
elif order.issell():
self.log('SELL EXECUTED, %.2f' % order.executed.price)
self.bar_executed = len(self)
elif order.status in [order.Canceled, order.Margin, order.Rejected]:
self.log('Order Canceled/Margin/Rejected')
# Write down: no pending order
self.order = None
def next(self):
# Simply log the closing price of the series from the reference
self.log('Close, %.2f' % self.dataclose[0])
# Check if an order is pending ... if yes, we cannot send a 2nd one
if self.order:
return
# Check if we are in the market
if not self.position:
# Not yet ... we MIGHT BUY if ...
if self.dataclose[0] < self.dataclose[-1]:
# current close less than previous close
if self.dataclose[-1] < self.dataclose[-2]:
# previous close less than the previous close
# BUY, BUY, BUY!!! (with default parameters)
self.log('BUY CREATE, %.2f' % self.dataclose[0])
# Keep track of the created order to avoid a 2nd order
self.order = self.buy()
else:
# Already in the market ... we might sell
if len(self) >= (self.bar_executed + 5):
# SELL, SELL, SELL!!! (with all possible default parameters)
self.log('SELL CREATE, %.2f' % self.dataclose[0])
# Keep track of the created order to avoid a 2nd order
self.order = self.sell()
if __name__ == '__main__':
# Create a cerebro entity
cerebro = bt.Cerebro()
# Add a strategy
cerebro.addstrategy(TestStrategy)
# Datas are in a subfolder of the samples. Need to find where the script is
# because it could have been called from anywhere
modpath = r"../data/raw"
datapath = os.path.join(modpath, 'datas/orcl-1995-2014.txt')
# Create a Data Feed
data = bt.feeds.YahooFinanceCSVData(
dataname=datapath,
# Do not pass values before this date
fromdate=datetime.datetime(2000, 1, 1),
# Do not pass values before this date
todate=datetime.datetime(2000, 12, 31),
# Do not pass values after this date
reverse=False)
# Add the Data Feed to Cerebro
cerebro.adddata(data)
# Set our desired cash start
cerebro.broker.setcash(100000.0)
# Print out the starting conditions
print('Starting Portfolio Value: %.2f' % cerebro.broker.getvalue())
# Run over everything
cerebro.run()
# Print out the final result
print('Final Portfolio Value: %.2f' % cerebro.broker.getvalue())
###Output
Starting Portfolio Value: 100000.00
2000-01-03, Close, 26.27
2000-01-04, Close, 23.95
2000-01-05, Close, 22.68
2000-01-05, BUY CREATE, 22.68
2000-01-06, BUY EXECUTED, 22.27
2000-01-06, Close, 21.35
2000-01-07, Close, 22.99
2000-01-10, Close, 25.74
2000-01-11, Close, 24.99
2000-01-12, Close, 23.49
2000-01-13, Close, 23.36
2000-01-13, SELL CREATE, 23.36
2000-01-14, SELL EXECUTED, 24.24
2000-01-14, Close, 23.75
2000-01-18, Close, 24.74
2000-01-19, Close, 25.41
2000-01-20, Close, 26.35
2000-01-21, Close, 26.55
2000-01-24, Close, 24.10
2000-01-25, Close, 25.10
2000-01-26, Close, 24.49
2000-01-27, Close, 23.04
2000-01-27, BUY CREATE, 23.04
2000-01-28, BUY EXECUTED, 22.90
2000-01-28, Close, 21.07
2000-01-31, Close, 22.22
2000-02-01, Close, 24.02
2000-02-02, Close, 24.16
2000-02-03, Close, 25.21
2000-02-04, Close, 25.71
2000-02-04, SELL CREATE, 25.71
2000-02-07, SELL EXECUTED, 26.38
2000-02-07, Close, 26.66
2000-02-08, Close, 26.49
2000-02-09, Close, 26.66
2000-02-10, Close, 27.71
2000-02-11, Close, 26.55
2000-02-14, Close, 27.66
2000-02-15, Close, 27.30
2000-02-16, Close, 27.24
2000-02-16, BUY CREATE, 27.24
2000-02-17, BUY EXECUTED, 27.46
2000-02-17, Close, 27.41
2000-02-18, Close, 26.05
2000-02-22, Close, 26.38
2000-02-23, Close, 28.05
2000-02-24, Close, 27.55
2000-02-25, Close, 31.41
2000-02-25, SELL CREATE, 31.41
2000-02-28, SELL EXECUTED, 31.69
2000-02-28, Close, 30.52
2000-02-29, Close, 33.02
2000-03-01, Close, 31.80
2000-03-02, Close, 30.47
2000-03-02, BUY CREATE, 30.47
2000-03-03, BUY EXECUTED, 31.63
2000-03-03, Close, 33.36
2000-03-06, Close, 33.69
2000-03-07, Close, 33.33
2000-03-08, Close, 36.97
2000-03-09, Close, 37.36
2000-03-10, Close, 36.30
2000-03-10, SELL CREATE, 36.30
2000-03-13, SELL EXECUTED, 34.91
2000-03-13, Close, 35.02
2000-03-13, BUY CREATE, 35.02
2000-03-14, BUY EXECUTED, 36.41
2000-03-14, Close, 34.25
2000-03-15, Close, 34.97
2000-03-16, Close, 36.44
2000-03-17, Close, 35.50
2000-03-20, Close, 34.75
2000-03-21, Close, 35.89
2000-03-21, SELL CREATE, 35.89
2000-03-22, SELL EXECUTED, 36.02
2000-03-22, Close, 37.39
2000-03-23, Close, 38.64
2000-03-24, Close, 38.69
2000-03-27, Close, 39.33
2000-03-28, Close, 38.50
2000-03-29, Close, 36.69
2000-03-29, BUY CREATE, 36.69
2000-03-30, BUY EXECUTED, 34.91
2000-03-30, Close, 34.88
2000-03-31, Close, 34.72
2000-04-03, Close, 34.19
2000-04-04, Close, 33.77
2000-04-05, Close, 34.80
2000-04-06, Close, 36.55
2000-04-06, SELL CREATE, 36.55
2000-04-07, SELL EXECUTED, 37.22
2000-04-07, Close, 38.75
2000-04-10, Close, 36.69
2000-04-11, Close, 34.41
2000-04-11, BUY CREATE, 34.41
2000-04-12, BUY EXECUTED, 34.66
2000-04-12, Close, 32.52
2000-04-13, Close, 31.99
2000-04-14, Close, 27.80
2000-04-17, Close, 33.27
2000-04-18, Close, 35.11
2000-04-19, Close, 33.16
2000-04-19, SELL CREATE, 33.16
2000-04-20, SELL EXECUTED, 32.83
2000-04-20, Close, 31.49
2000-04-20, BUY CREATE, 31.49
2000-04-24, BUY EXECUTED, 29.96
2000-04-24, Close, 32.22
2000-04-25, Close, 33.61
2000-04-26, Close, 32.11
2000-04-27, Close, 34.38
2000-04-28, Close, 35.55
2000-05-01, Close, 35.44
2000-05-01, SELL CREATE, 35.44
2000-05-02, SELL EXECUTED, 35.11
2000-05-02, Close, 34.61
2000-05-02, BUY CREATE, 34.61
2000-05-03, BUY EXECUTED, 34.19
2000-05-03, Close, 33.72
2000-05-04, Close, 33.02
2000-05-05, Close, 34.16
2000-05-08, Close, 32.16
2000-05-09, Close, 32.02
2000-05-10, Close, 30.08
2000-05-10, SELL CREATE, 30.08
2000-05-11, SELL EXECUTED, 30.66
2000-05-11, Close, 32.19
2000-05-12, Close, 32.99
2000-05-15, Close, 34.25
2000-05-16, Close, 35.22
2000-05-17, Close, 34.77
2000-05-18, Close, 32.49
2000-05-18, BUY CREATE, 32.49
2000-05-19, BUY EXECUTED, 32.02
2000-05-19, Close, 31.16
2000-05-22, Close, 30.16
2000-05-23, Close, 27.85
2000-05-24, Close, 28.57
2000-05-25, Close, 29.55
2000-05-26, Close, 29.80
2000-05-26, SELL CREATE, 29.80
2000-05-30, SELL EXECUTED, 30.63
2000-05-30, Close, 32.99
2000-05-31, Close, 31.97
2000-06-01, Close, 34.63
2000-06-02, Close, 35.66
2000-06-05, Close, 36.00
2000-06-06, Close, 34.27
2000-06-07, Close, 35.58
2000-06-08, Close, 36.64
2000-06-09, Close, 36.77
2000-06-12, Close, 35.83
2000-06-13, Close, 36.33
2000-06-14, Close, 35.13
2000-06-15, Close, 36.69
2000-06-16, Close, 36.41
2000-06-19, Close, 38.25
2000-06-20, Close, 38.27
2000-06-21, Close, 38.33
2000-06-22, Close, 36.25
2000-06-23, Close, 35.36
2000-06-23, BUY CREATE, 35.36
2000-06-26, BUY EXECUTED, 35.69
2000-06-26, Close, 36.77
2000-06-27, Close, 36.58
2000-06-28, Close, 36.89
2000-06-29, Close, 35.97
2000-06-30, Close, 37.39
2000-07-03, Close, 35.66
2000-07-03, SELL CREATE, 35.66
2000-07-05, SELL EXECUTED, 34.16
2000-07-05, Close, 32.16
2000-07-05, BUY CREATE, 32.16
2000-07-06, BUY EXECUTED, 31.91
2000-07-06, Close, 33.63
2000-07-07, Close, 33.75
2000-07-10, Close, 32.97
2000-07-11, Close, 32.16
2000-07-12, Close, 33.22
2000-07-13, Close, 33.69
2000-07-13, SELL CREATE, 33.69
2000-07-14, SELL EXECUTED, 33.88
2000-07-14, Close, 33.86
2000-07-17, Close, 33.86
2000-07-18, Close, 32.99
2000-07-19, Close, 32.80
2000-07-19, BUY CREATE, 32.80
2000-07-20, BUY EXECUTED, 33.27
2000-07-20, Close, 34.75
2000-07-21, Close, 33.55
2000-07-24, Close, 33.36
2000-07-25, Close, 33.80
2000-07-26, Close, 34.13
2000-07-27, Close, 33.38
2000-07-27, SELL CREATE, 33.38
2000-07-28, SELL EXECUTED, 33.41
2000-07-28, Close, 32.19
2000-07-28, BUY CREATE, 32.19
2000-07-31, BUY EXECUTED, 31.91
2000-07-31, Close, 33.44
2000-08-01, Close, 32.52
2000-08-02, Close, 32.52
2000-08-03, Close, 34.44
2000-08-04, Close, 36.27
2000-08-07, Close, 36.41
2000-08-07, SELL CREATE, 36.41
2000-08-08, SELL EXECUTED, 36.02
2000-08-08, Close, 36.91
2000-08-09, Close, 36.19
2000-08-10, Close, 35.61
2000-08-10, BUY CREATE, 35.61
2000-08-11, BUY EXECUTED, 35.55
2000-08-11, Close, 36.08
2000-08-14, Close, 36.64
2000-08-15, Close, 36.14
2000-08-16, Close, 36.11
2000-08-17, Close, 37.33
2000-08-18, Close, 36.16
2000-08-18, SELL CREATE, 36.16
2000-08-21, SELL EXECUTED, 36.52
2000-08-21, Close, 37.00
2000-08-22, Close, 37.16
2000-08-23, Close, 36.86
2000-08-24, Close, 37.66
2000-08-25, Close, 37.64
2000-08-28, Close, 38.58
2000-08-29, Close, 39.03
2000-08-30, Close, 39.25
2000-08-31, Close, 40.44
2000-09-01, Close, 41.19
2000-09-05, Close, 40.50
2000-09-06, Close, 39.69
2000-09-06, BUY CREATE, 39.69
2000-09-07, BUY EXECUTED, 40.08
2000-09-07, Close, 40.56
2000-09-08, Close, 38.50
2000-09-11, Close, 37.11
2000-09-12, Close, 35.30
2000-09-13, Close, 36.39
2000-09-14, Close, 37.78
2000-09-14, SELL CREATE, 37.78
2000-09-15, SELL EXECUTED, 36.08
2000-09-15, Close, 34.83
2000-09-18, Close, 34.01
2000-09-18, BUY CREATE, 34.01
2000-09-19, BUY EXECUTED, 34.44
2000-09-19, Close, 35.27
2000-09-20, Close, 35.55
2000-09-21, Close, 35.11
2000-09-22, Close, 35.91
2000-09-25, Close, 35.02
2000-09-26, Close, 35.33
2000-09-26, SELL CREATE, 35.33
2000-09-27, SELL EXECUTED, 35.66
2000-09-27, Close, 35.52
2000-09-28, Close, 36.24
2000-09-29, Close, 35.02
2000-10-02, Close, 35.02
2000-10-03, Close, 30.91
2000-10-04, Close, 30.30
2000-10-04, BUY CREATE, 30.30
2000-10-05, BUY EXECUTED, 30.38
2000-10-05, Close, 30.38
2000-10-06, Close, 30.08
2000-10-09, Close, 29.69
2000-10-10, Close, 28.74
2000-10-11, Close, 27.69
2000-10-12, Close, 28.02
2000-10-12, SELL CREATE, 28.02
2000-10-13, SELL EXECUTED, 27.57
2000-10-13, Close, 31.69
2000-10-16, Close, 30.74
2000-10-17, Close, 29.96
2000-10-17, BUY CREATE, 29.96
2000-10-18, BUY EXECUTED, 28.07
2000-10-18, Close, 29.85
2000-10-19, Close, 32.36
2000-10-20, Close, 31.35
2000-10-23, Close, 30.30
2000-10-24, Close, 31.85
2000-10-25, Close, 30.58
2000-10-25, SELL CREATE, 30.58
2000-10-26, SELL EXECUTED, 30.91
2000-10-26, Close, 30.30
2000-10-26, BUY CREATE, 30.30
2000-10-27, BUY EXECUTED, 30.69
2000-10-27, Close, 30.41
2000-10-30, Close, 28.13
2000-10-31, Close, 29.35
2000-11-01, Close, 27.91
2000-11-02, Close, 26.30
2000-11-03, Close, 26.96
2000-11-03, SELL CREATE, 26.96
2000-11-06, SELL EXECUTED, 27.30
2000-11-06, Close, 24.85
2000-11-07, Close, 23.63
2000-11-07, BUY CREATE, 23.63
2000-11-08, BUY EXECUTED, 24.35
2000-11-08, Close, 22.07
2000-11-09, Close, 24.18
2000-11-10, Close, 22.63
2000-11-13, Close, 22.01
2000-11-14, Close, 25.24
2000-11-15, Close, 25.68
2000-11-15, SELL CREATE, 25.68
2000-11-16, SELL EXECUTED, 25.57
2000-11-16, Close, 24.35
2000-11-17, Close, 25.63
2000-11-20, Close, 22.01
2000-11-21, Close, 21.24
2000-11-21, BUY CREATE, 21.24
2000-11-22, BUY EXECUTED, 21.01
2000-11-22, Close, 19.85
2000-11-24, Close, 21.46
2000-11-27, Close, 20.57
2000-11-28, Close, 20.15
2000-11-29, Close, 20.35
2000-11-30, Close, 23.57
2000-11-30, SELL CREATE, 23.57
2000-12-01, SELL EXECUTED, 23.46
2000-12-01, Close, 23.52
2000-12-04, Close, 25.07
2000-12-05, Close, 28.02
2000-12-06, Close, 26.85
2000-12-07, Close, 25.18
2000-12-07, BUY CREATE, 25.18
2000-12-08, BUY EXECUTED, 26.74
2000-12-08, Close, 26.74
2000-12-11, Close, 28.41
2000-12-12, Close, 27.35
2000-12-13, Close, 25.24
2000-12-14, Close, 24.46
2000-12-15, Close, 25.41
2000-12-15, SELL CREATE, 25.41
2000-12-18, SELL EXECUTED, 26.68
2000-12-18, Close, 28.46
2000-12-19, Close, 27.24
2000-12-20, Close, 25.35
2000-12-20, BUY CREATE, 25.35
2000-12-21, BUY EXECUTED, 24.74
2000-12-21, Close, 26.24
2000-12-22, Close, 28.35
2000-12-26, Close, 27.52
2000-12-27, Close, 27.30
2000-12-28, Close, 27.63
2000-12-29, Close, 25.85
2000-12-29, SELL CREATE, 25.85
Final Portfolio Value: 100017.52
###Markdown
The broker says: Show me the money!
###Code
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import datetime # For datetime objects
import os.path # To manage paths
import sys # To find out the script name (in argv[0])
# Import the backtrader platform
import backtrader as bt
# Create a Stratey
class TestStrategy(bt.Strategy):
def log(self, txt, dt=None):
''' Logging function fot this strategy'''
dt = dt or self.datas[0].datetime.date(0)
print('%s, %s' % (dt.isoformat(), txt))
def __init__(self):
# Keep a reference to the "close" line in the data[0] dataseries
self.dataclose = self.datas[0].close
# To keep track of pending orders and buy price/commission
self.order = None
self.buyprice = None
self.buycomm = None
def notify_order(self, order):
if order.status in [order.Submitted, order.Accepted]:
# Buy/Sell order submitted/accepted to/by broker - Nothing to do
return
# Check if an order has been completed
# Attention: broker could reject order if not enough cash
if order.status in [order.Completed]:
if order.isbuy():
self.log(
'BUY EXECUTED, Price: %.2f, Cost: %.2f, Comm %.2f' %
(order.executed.price,
order.executed.value,
order.executed.comm))
self.buyprice = order.executed.price
self.buycomm = order.executed.comm
else: # Sell
self.log('SELL EXECUTED, Price: %.2f, Cost: %.2f, Comm %.2f' %
(order.executed.price,
order.executed.value,
order.executed.comm))
self.bar_executed = len(self)
elif order.status in [order.Canceled, order.Margin, order.Rejected]:
self.log('Order Canceled/Margin/Rejected')
self.order = None
def notify_trade(self, trade):
if not trade.isclosed:
return
self.log('OPERATION PROFIT, GROSS %.2f, NET %.2f' %
(trade.pnl, trade.pnlcomm))
def next(self):
# Simply log the closing price of the series from the reference
self.log('Close, %.2f' % self.dataclose[0])
# Check if an order is pending ... if yes, we cannot send a 2nd one
if self.order:
return
# Check if we are in the market
if not self.position:
# Not yet ... we MIGHT BUY if ...
if self.dataclose[0] < self.dataclose[-1]:
# current close less than previous close
if self.dataclose[-1] < self.dataclose[-2]:
# previous close less than the previous close
# BUY, BUY, BUY!!! (with default parameters)
self.log('BUY CREATE, %.2f' % self.dataclose[0])
# Keep track of the created order to avoid a 2nd order
self.order = self.buy()
else:
# Already in the market ... we might sell
if len(self) >= (self.bar_executed + 5):
# SELL, SELL, SELL!!! (with all possible default parameters)
self.log('SELL CREATE, %.2f' % self.dataclose[0])
# Keep track of the created order to avoid a 2nd order
self.order = self.sell()
if __name__ == '__main__':
# Create a cerebro entity
cerebro = bt.Cerebro()
# Add a strategy
cerebro.addstrategy(TestStrategy)
# Datas are in a subfolder of the samples. Need to find where the script is
# because it could have been called from anywhere
modpath = r"../data/raw"
datapath = os.path.join(modpath, 'datas/orcl-1995-2014.txt')
# Create a Data Feed
data = bt.feeds.YahooFinanceCSVData(
dataname=datapath,
# Do not pass values before this date
fromdate=datetime.datetime(2000, 1, 1),
# Do not pass values before this date
todate=datetime.datetime(2000, 12, 31),
# Do not pass values after this date
reverse=False)
# Add the Data Feed to Cerebro
cerebro.adddata(data)
# Set our desired cash start
cerebro.broker.setcash(100000.0)
# Set the commission - 0.1% ... divide by 100 to remove the %
cerebro.broker.setcommission(commission=0.001)
# Print out the starting conditions
print('Starting Portfolio Value: %.2f' % cerebro.broker.getvalue())
# Run over everything
cerebro.run()
# Print out the final result
print('Final Portfolio Value: %.2f' % cerebro.broker.getvalue())
###Output
Starting Portfolio Value: 100000.00
2000-01-03, Close, 26.27
2000-01-04, Close, 23.95
2000-01-05, Close, 22.68
2000-01-05, BUY CREATE, 22.68
2000-01-06, BUY EXECUTED, Price: 22.27, Cost: 22.27, Comm 0.02
2000-01-06, Close, 21.35
2000-01-07, Close, 22.99
2000-01-10, Close, 25.74
2000-01-11, Close, 24.99
2000-01-12, Close, 23.49
2000-01-13, Close, 23.36
2000-01-13, SELL CREATE, 23.36
2000-01-14, SELL EXECUTED, Price: 24.24, Cost: 22.27, Comm 0.02
2000-01-14, OPERATION PROFIT, GROSS 1.97, NET 1.92
2000-01-14, Close, 23.75
2000-01-18, Close, 24.74
2000-01-19, Close, 25.41
2000-01-20, Close, 26.35
2000-01-21, Close, 26.55
2000-01-24, Close, 24.10
2000-01-25, Close, 25.10
2000-01-26, Close, 24.49
2000-01-27, Close, 23.04
2000-01-27, BUY CREATE, 23.04
2000-01-28, BUY EXECUTED, Price: 22.90, Cost: 22.90, Comm 0.02
2000-01-28, Close, 21.07
2000-01-31, Close, 22.22
2000-02-01, Close, 24.02
2000-02-02, Close, 24.16
2000-02-03, Close, 25.21
2000-02-04, Close, 25.71
2000-02-04, SELL CREATE, 25.71
2000-02-07, SELL EXECUTED, Price: 26.38, Cost: 22.90, Comm 0.03
2000-02-07, OPERATION PROFIT, GROSS 3.48, NET 3.43
2000-02-07, Close, 26.66
2000-02-08, Close, 26.49
2000-02-09, Close, 26.66
2000-02-10, Close, 27.71
2000-02-11, Close, 26.55
2000-02-14, Close, 27.66
2000-02-15, Close, 27.30
2000-02-16, Close, 27.24
2000-02-16, BUY CREATE, 27.24
2000-02-17, BUY EXECUTED, Price: 27.46, Cost: 27.46, Comm 0.03
2000-02-17, Close, 27.41
2000-02-18, Close, 26.05
2000-02-22, Close, 26.38
2000-02-23, Close, 28.05
2000-02-24, Close, 27.55
2000-02-25, Close, 31.41
2000-02-25, SELL CREATE, 31.41
2000-02-28, SELL EXECUTED, Price: 31.69, Cost: 27.46, Comm 0.03
2000-02-28, OPERATION PROFIT, GROSS 4.23, NET 4.17
2000-02-28, Close, 30.52
2000-02-29, Close, 33.02
2000-03-01, Close, 31.80
2000-03-02, Close, 30.47
2000-03-02, BUY CREATE, 30.47
2000-03-03, BUY EXECUTED, Price: 31.63, Cost: 31.63, Comm 0.03
2000-03-03, Close, 33.36
2000-03-06, Close, 33.69
2000-03-07, Close, 33.33
2000-03-08, Close, 36.97
2000-03-09, Close, 37.36
2000-03-10, Close, 36.30
2000-03-10, SELL CREATE, 36.30
2000-03-13, SELL EXECUTED, Price: 34.91, Cost: 31.63, Comm 0.03
2000-03-13, OPERATION PROFIT, GROSS 3.28, NET 3.21
2000-03-13, Close, 35.02
2000-03-13, BUY CREATE, 35.02
2000-03-14, BUY EXECUTED, Price: 36.41, Cost: 36.41, Comm 0.04
2000-03-14, Close, 34.25
2000-03-15, Close, 34.97
2000-03-16, Close, 36.44
2000-03-17, Close, 35.50
2000-03-20, Close, 34.75
2000-03-21, Close, 35.89
2000-03-21, SELL CREATE, 35.89
2000-03-22, SELL EXECUTED, Price: 36.02, Cost: 36.41, Comm 0.04
2000-03-22, OPERATION PROFIT, GROSS -0.39, NET -0.46
2000-03-22, Close, 37.39
2000-03-23, Close, 38.64
2000-03-24, Close, 38.69
2000-03-27, Close, 39.33
2000-03-28, Close, 38.50
2000-03-29, Close, 36.69
2000-03-29, BUY CREATE, 36.69
2000-03-30, BUY EXECUTED, Price: 34.91, Cost: 34.91, Comm 0.03
2000-03-30, Close, 34.88
2000-03-31, Close, 34.72
2000-04-03, Close, 34.19
2000-04-04, Close, 33.77
2000-04-05, Close, 34.80
2000-04-06, Close, 36.55
2000-04-06, SELL CREATE, 36.55
2000-04-07, SELL EXECUTED, Price: 37.22, Cost: 34.91, Comm 0.04
2000-04-07, OPERATION PROFIT, GROSS 2.31, NET 2.24
2000-04-07, Close, 38.75
2000-04-10, Close, 36.69
2000-04-11, Close, 34.41
2000-04-11, BUY CREATE, 34.41
2000-04-12, BUY EXECUTED, Price: 34.66, Cost: 34.66, Comm 0.03
2000-04-12, Close, 32.52
2000-04-13, Close, 31.99
2000-04-14, Close, 27.80
2000-04-17, Close, 33.27
2000-04-18, Close, 35.11
2000-04-19, Close, 33.16
2000-04-19, SELL CREATE, 33.16
2000-04-20, SELL EXECUTED, Price: 32.83, Cost: 34.66, Comm 0.03
2000-04-20, OPERATION PROFIT, GROSS -1.83, NET -1.90
2000-04-20, Close, 31.49
2000-04-20, BUY CREATE, 31.49
2000-04-24, BUY EXECUTED, Price: 29.96, Cost: 29.96, Comm 0.03
2000-04-24, Close, 32.22
2000-04-25, Close, 33.61
2000-04-26, Close, 32.11
2000-04-27, Close, 34.38
2000-04-28, Close, 35.55
2000-05-01, Close, 35.44
2000-05-01, SELL CREATE, 35.44
2000-05-02, SELL EXECUTED, Price: 35.11, Cost: 29.96, Comm 0.04
2000-05-02, OPERATION PROFIT, GROSS 5.15, NET 5.08
2000-05-02, Close, 34.61
2000-05-02, BUY CREATE, 34.61
2000-05-03, BUY EXECUTED, Price: 34.19, Cost: 34.19, Comm 0.03
2000-05-03, Close, 33.72
2000-05-04, Close, 33.02
2000-05-05, Close, 34.16
2000-05-08, Close, 32.16
2000-05-09, Close, 32.02
2000-05-10, Close, 30.08
2000-05-10, SELL CREATE, 30.08
2000-05-11, SELL EXECUTED, Price: 30.66, Cost: 34.19, Comm 0.03
2000-05-11, OPERATION PROFIT, GROSS -3.53, NET -3.59
2000-05-11, Close, 32.19
2000-05-12, Close, 32.99
2000-05-15, Close, 34.25
2000-05-16, Close, 35.22
2000-05-17, Close, 34.77
2000-05-18, Close, 32.49
2000-05-18, BUY CREATE, 32.49
2000-05-19, BUY EXECUTED, Price: 32.02, Cost: 32.02, Comm 0.03
2000-05-19, Close, 31.16
2000-05-22, Close, 30.16
2000-05-23, Close, 27.85
2000-05-24, Close, 28.57
2000-05-25, Close, 29.55
2000-05-26, Close, 29.80
2000-05-26, SELL CREATE, 29.80
2000-05-30, SELL EXECUTED, Price: 30.63, Cost: 32.02, Comm 0.03
2000-05-30, OPERATION PROFIT, GROSS -1.39, NET -1.45
2000-05-30, Close, 32.99
2000-05-31, Close, 31.97
2000-06-01, Close, 34.63
2000-06-02, Close, 35.66
2000-06-05, Close, 36.00
2000-06-06, Close, 34.27
2000-06-07, Close, 35.58
2000-06-08, Close, 36.64
2000-06-09, Close, 36.77
2000-06-12, Close, 35.83
2000-06-13, Close, 36.33
2000-06-14, Close, 35.13
2000-06-15, Close, 36.69
2000-06-16, Close, 36.41
2000-06-19, Close, 38.25
2000-06-20, Close, 38.27
2000-06-21, Close, 38.33
2000-06-22, Close, 36.25
2000-06-23, Close, 35.36
2000-06-23, BUY CREATE, 35.36
2000-06-26, BUY EXECUTED, Price: 35.69, Cost: 35.69, Comm 0.04
2000-06-26, Close, 36.77
2000-06-27, Close, 36.58
2000-06-28, Close, 36.89
2000-06-29, Close, 35.97
2000-06-30, Close, 37.39
2000-07-03, Close, 35.66
2000-07-03, SELL CREATE, 35.66
2000-07-05, SELL EXECUTED, Price: 34.16, Cost: 35.69, Comm 0.03
2000-07-05, OPERATION PROFIT, GROSS -1.53, NET -1.60
2000-07-05, Close, 32.16
2000-07-05, BUY CREATE, 32.16
2000-07-06, BUY EXECUTED, Price: 31.91, Cost: 31.91, Comm 0.03
2000-07-06, Close, 33.63
2000-07-07, Close, 33.75
2000-07-10, Close, 32.97
2000-07-11, Close, 32.16
2000-07-12, Close, 33.22
2000-07-13, Close, 33.69
2000-07-13, SELL CREATE, 33.69
2000-07-14, SELL EXECUTED, Price: 33.88, Cost: 31.91, Comm 0.03
2000-07-14, OPERATION PROFIT, GROSS 1.97, NET 1.90
2000-07-14, Close, 33.86
2000-07-17, Close, 33.86
2000-07-18, Close, 32.99
2000-07-19, Close, 32.80
2000-07-19, BUY CREATE, 32.80
2000-07-20, BUY EXECUTED, Price: 33.27, Cost: 33.27, Comm 0.03
2000-07-20, Close, 34.75
2000-07-21, Close, 33.55
2000-07-24, Close, 33.36
2000-07-25, Close, 33.80
2000-07-26, Close, 34.13
2000-07-27, Close, 33.38
2000-07-27, SELL CREATE, 33.38
2000-07-28, SELL EXECUTED, Price: 33.41, Cost: 33.27, Comm 0.03
2000-07-28, OPERATION PROFIT, GROSS 0.14, NET 0.07
2000-07-28, Close, 32.19
2000-07-28, BUY CREATE, 32.19
2000-07-31, BUY EXECUTED, Price: 31.91, Cost: 31.91, Comm 0.03
2000-07-31, Close, 33.44
2000-08-01, Close, 32.52
2000-08-02, Close, 32.52
2000-08-03, Close, 34.44
2000-08-04, Close, 36.27
2000-08-07, Close, 36.41
2000-08-07, SELL CREATE, 36.41
2000-08-08, SELL EXECUTED, Price: 36.02, Cost: 31.91, Comm 0.04
2000-08-08, OPERATION PROFIT, GROSS 4.11, NET 4.04
2000-08-08, Close, 36.91
2000-08-09, Close, 36.19
2000-08-10, Close, 35.61
2000-08-10, BUY CREATE, 35.61
2000-08-11, BUY EXECUTED, Price: 35.55, Cost: 35.55, Comm 0.04
2000-08-11, Close, 36.08
2000-08-14, Close, 36.64
2000-08-15, Close, 36.14
2000-08-16, Close, 36.11
2000-08-17, Close, 37.33
2000-08-18, Close, 36.16
2000-08-18, SELL CREATE, 36.16
2000-08-21, SELL EXECUTED, Price: 36.52, Cost: 35.55, Comm 0.04
2000-08-21, OPERATION PROFIT, GROSS 0.97, NET 0.90
2000-08-21, Close, 37.00
2000-08-22, Close, 37.16
2000-08-23, Close, 36.86
2000-08-24, Close, 37.66
2000-08-25, Close, 37.64
2000-08-28, Close, 38.58
2000-08-29, Close, 39.03
2000-08-30, Close, 39.25
2000-08-31, Close, 40.44
2000-09-01, Close, 41.19
2000-09-05, Close, 40.50
2000-09-06, Close, 39.69
2000-09-06, BUY CREATE, 39.69
2000-09-07, BUY EXECUTED, Price: 40.08, Cost: 40.08, Comm 0.04
2000-09-07, Close, 40.56
2000-09-08, Close, 38.50
2000-09-11, Close, 37.11
2000-09-12, Close, 35.30
2000-09-13, Close, 36.39
2000-09-14, Close, 37.78
2000-09-14, SELL CREATE, 37.78
2000-09-15, SELL EXECUTED, Price: 36.08, Cost: 40.08, Comm 0.04
2000-09-15, OPERATION PROFIT, GROSS -4.00, NET -4.08
2000-09-15, Close, 34.83
2000-09-18, Close, 34.01
2000-09-18, BUY CREATE, 34.01
2000-09-19, BUY EXECUTED, Price: 34.44, Cost: 34.44, Comm 0.03
2000-09-19, Close, 35.27
2000-09-20, Close, 35.55
2000-09-21, Close, 35.11
2000-09-22, Close, 35.91
2000-09-25, Close, 35.02
2000-09-26, Close, 35.33
2000-09-26, SELL CREATE, 35.33
2000-09-27, SELL EXECUTED, Price: 35.66, Cost: 34.44, Comm 0.04
2000-09-27, OPERATION PROFIT, GROSS 1.22, NET 1.15
2000-09-27, Close, 35.52
2000-09-28, Close, 36.24
2000-09-29, Close, 35.02
2000-10-02, Close, 35.02
2000-10-03, Close, 30.91
2000-10-04, Close, 30.30
2000-10-04, BUY CREATE, 30.30
2000-10-05, BUY EXECUTED, Price: 30.38, Cost: 30.38, Comm 0.03
2000-10-05, Close, 30.38
2000-10-06, Close, 30.08
2000-10-09, Close, 29.69
2000-10-10, Close, 28.74
2000-10-11, Close, 27.69
2000-10-12, Close, 28.02
2000-10-12, SELL CREATE, 28.02
2000-10-13, SELL EXECUTED, Price: 27.57, Cost: 30.38, Comm 0.03
2000-10-13, OPERATION PROFIT, GROSS -2.81, NET -2.87
2000-10-13, Close, 31.69
2000-10-16, Close, 30.74
2000-10-17, Close, 29.96
2000-10-17, BUY CREATE, 29.96
2000-10-18, BUY EXECUTED, Price: 28.07, Cost: 28.07, Comm 0.03
2000-10-18, Close, 29.85
2000-10-19, Close, 32.36
2000-10-20, Close, 31.35
2000-10-23, Close, 30.30
2000-10-24, Close, 31.85
2000-10-25, Close, 30.58
2000-10-25, SELL CREATE, 30.58
2000-10-26, SELL EXECUTED, Price: 30.91, Cost: 28.07, Comm 0.03
2000-10-26, OPERATION PROFIT, GROSS 2.84, NET 2.78
2000-10-26, Close, 30.30
2000-10-26, BUY CREATE, 30.30
2000-10-27, BUY EXECUTED, Price: 30.69, Cost: 30.69, Comm 0.03
2000-10-27, Close, 30.41
2000-10-30, Close, 28.13
2000-10-31, Close, 29.35
2000-11-01, Close, 27.91
2000-11-02, Close, 26.30
2000-11-03, Close, 26.96
2000-11-03, SELL CREATE, 26.96
2000-11-06, SELL EXECUTED, Price: 27.30, Cost: 30.69, Comm 0.03
2000-11-06, OPERATION PROFIT, GROSS -3.39, NET -3.45
2000-11-06, Close, 24.85
2000-11-07, Close, 23.63
2000-11-07, BUY CREATE, 23.63
2000-11-08, BUY EXECUTED, Price: 24.35, Cost: 24.35, Comm 0.02
2000-11-08, Close, 22.07
2000-11-09, Close, 24.18
2000-11-10, Close, 22.63
2000-11-13, Close, 22.01
2000-11-14, Close, 25.24
2000-11-15, Close, 25.68
2000-11-15, SELL CREATE, 25.68
2000-11-16, SELL EXECUTED, Price: 25.57, Cost: 24.35, Comm 0.03
2000-11-16, OPERATION PROFIT, GROSS 1.22, NET 1.17
2000-11-16, Close, 24.35
2000-11-17, Close, 25.63
2000-11-20, Close, 22.01
2000-11-21, Close, 21.24
2000-11-21, BUY CREATE, 21.24
2000-11-22, BUY EXECUTED, Price: 21.01, Cost: 21.01, Comm 0.02
2000-11-22, Close, 19.85
2000-11-24, Close, 21.46
2000-11-27, Close, 20.57
2000-11-28, Close, 20.15
2000-11-29, Close, 20.35
2000-11-30, Close, 23.57
2000-11-30, SELL CREATE, 23.57
2000-12-01, SELL EXECUTED, Price: 23.46, Cost: 21.01, Comm 0.02
2000-12-01, OPERATION PROFIT, GROSS 2.45, NET 2.41
2000-12-01, Close, 23.52
2000-12-04, Close, 25.07
2000-12-05, Close, 28.02
2000-12-06, Close, 26.85
2000-12-07, Close, 25.18
2000-12-07, BUY CREATE, 25.18
2000-12-08, BUY EXECUTED, Price: 26.74, Cost: 26.74, Comm 0.03
2000-12-08, Close, 26.74
2000-12-11, Close, 28.41
2000-12-12, Close, 27.35
2000-12-13, Close, 25.24
2000-12-14, Close, 24.46
2000-12-15, Close, 25.41
2000-12-15, SELL CREATE, 25.41
2000-12-18, SELL EXECUTED, Price: 26.68, Cost: 26.74, Comm 0.03
2000-12-18, OPERATION PROFIT, GROSS -0.06, NET -0.11
2000-12-18, Close, 28.46
2000-12-19, Close, 27.24
2000-12-20, Close, 25.35
2000-12-20, BUY CREATE, 25.35
2000-12-21, BUY EXECUTED, Price: 24.74, Cost: 24.74, Comm 0.02
2000-12-21, Close, 26.24
###Markdown
Customizing the Strategy: Parameters
###Code
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import datetime # For datetime objects
import os.path # To manage paths
import sys # To find out the script name (in argv[0])
# Import the backtrader platform
import backtrader as bt
# Create a Stratey
class TestStrategy(bt.Strategy):
params = (
('exitbars', 5),
)
def log(self, txt, dt=None):
''' Logging function fot this strategy'''
dt = dt or self.datas[0].datetime.date(0)
print('%s, %s' % (dt.isoformat(), txt))
def __init__(self):
# Keep a reference to the "close" line in the data[0] dataseries
self.dataclose = self.datas[0].close
# To keep track of pending orders and buy price/commission
self.order = None
self.buyprice = None
self.buycomm = None
def notify_order(self, order):
if order.status in [order.Submitted, order.Accepted]:
# Buy/Sell order submitted/accepted to/by broker - Nothing to do
return
# Check if an order has been completed
# Attention: broker could reject order if not enough cash
if order.status in [order.Completed]:
if order.isbuy():
self.log(
'BUY EXECUTED, Price: %.2f, Cost: %.2f, Comm %.2f' %
(order.executed.price,
order.executed.value,
order.executed.comm))
self.buyprice = order.executed.price
self.buycomm = order.executed.comm
else: # Sell
self.log('SELL EXECUTED, Price: %.2f, Cost: %.2f, Comm %.2f' %
(order.executed.price,
order.executed.value,
order.executed.comm))
self.bar_executed = len(self)
elif order.status in [order.Canceled, order.Margin, order.Rejected]:
self.log('Order Canceled/Margin/Rejected')
self.order = None
def notify_trade(self, trade):
if not trade.isclosed:
return
self.log('OPERATION PROFIT, GROSS %.2f, NET %.2f' %
(trade.pnl, trade.pnlcomm))
def next(self):
# Simply log the closing price of the series from the reference
self.log('Close, %.2f' % self.dataclose[0])
# Check if an order is pending ... if yes, we cannot send a 2nd one
if self.order:
return
# Check if we are in the market
if not self.position:
# Not yet ... we MIGHT BUY if ...
if self.dataclose[0] < self.dataclose[-1]:
# current close less than previous close
if self.dataclose[-1] < self.dataclose[-2]:
# previous close less than the previous close
# BUY, BUY, BUY!!! (with default parameters)
self.log('BUY CREATE, %.2f' % self.dataclose[0])
# Keep track of the created order to avoid a 2nd order
self.order = self.buy()
else:
# Already in the market ... we might sell
if len(self) >= (self.bar_executed + self.params.exitbars):
# SELL, SELL, SELL!!! (with all possible default parameters)
self.log('SELL CREATE, %.2f' % self.dataclose[0])
# Keep track of the created order to avoid a 2nd order
self.order = self.sell()
if __name__ == '__main__':
# Create a cerebro entity
cerebro = bt.Cerebro()
# Add a strategy
cerebro.addstrategy(TestStrategy)
# Datas are in a subfolder of the samples. Need to find where the script is
# because it could have been called from anywhere
modpath = r"../data/raw"
datapath = os.path.join(modpath, 'datas/orcl-1995-2014.txt')
# Create a Data Feed
data = bt.feeds.YahooFinanceCSVData(
dataname=datapath,
# Do not pass values before this date
fromdate=datetime.datetime(2000, 1, 1),
# Do not pass values before this date
todate=datetime.datetime(2000, 12, 31),
# Do not pass values after this date
reverse=False)
# Add the Data Feed to Cerebro
cerebro.adddata(data)
# Set our desired cash start
cerebro.broker.setcash(100000.0)
# Add a FixedSize sizer according to the stake
cerebro.addsizer(bt.sizers.FixedSize, stake=10)
# Set the commission - 0.1% ... divide by 100 to remove the %
cerebro.broker.setcommission(commission=0.001)
# Print out the starting conditions
print('Starting Portfolio Value: %.2f' % cerebro.broker.getvalue())
# Run over everything
cerebro.run()
# Print out the final result
print('Final Portfolio Value: %.2f' % cerebro.broker.getvalue())
###Output
Starting Portfolio Value: 100000.00
2000-01-03, Close, 26.27
2000-01-04, Close, 23.95
2000-01-05, Close, 22.68
2000-01-05, BUY CREATE, 22.68
2000-01-06, BUY EXECUTED, Price: 22.27, Cost: 222.70, Comm 0.22
2000-01-06, Close, 21.35
2000-01-07, Close, 22.99
2000-01-10, Close, 25.74
2000-01-11, Close, 24.99
2000-01-12, Close, 23.49
2000-01-13, Close, 23.36
2000-01-13, SELL CREATE, 23.36
2000-01-14, SELL EXECUTED, Price: 24.24, Cost: 222.70, Comm 0.24
2000-01-14, OPERATION PROFIT, GROSS 19.70, NET 19.23
2000-01-14, Close, 23.75
2000-01-18, Close, 24.74
2000-01-19, Close, 25.41
2000-01-20, Close, 26.35
2000-01-21, Close, 26.55
2000-01-24, Close, 24.10
2000-01-25, Close, 25.10
2000-01-26, Close, 24.49
2000-01-27, Close, 23.04
2000-01-27, BUY CREATE, 23.04
2000-01-28, BUY EXECUTED, Price: 22.90, Cost: 229.00, Comm 0.23
2000-01-28, Close, 21.07
2000-01-31, Close, 22.22
2000-02-01, Close, 24.02
2000-02-02, Close, 24.16
2000-02-03, Close, 25.21
2000-02-04, Close, 25.71
2000-02-04, SELL CREATE, 25.71
2000-02-07, SELL EXECUTED, Price: 26.38, Cost: 229.00, Comm 0.26
2000-02-07, OPERATION PROFIT, GROSS 34.80, NET 34.31
2000-02-07, Close, 26.66
2000-02-08, Close, 26.49
2000-02-09, Close, 26.66
2000-02-10, Close, 27.71
2000-02-11, Close, 26.55
2000-02-14, Close, 27.66
2000-02-15, Close, 27.30
2000-02-16, Close, 27.24
2000-02-16, BUY CREATE, 27.24
2000-02-17, BUY EXECUTED, Price: 27.46, Cost: 274.60, Comm 0.27
2000-02-17, Close, 27.41
2000-02-18, Close, 26.05
2000-02-22, Close, 26.38
2000-02-23, Close, 28.05
2000-02-24, Close, 27.55
2000-02-25, Close, 31.41
2000-02-25, SELL CREATE, 31.41
2000-02-28, SELL EXECUTED, Price: 31.69, Cost: 274.60, Comm 0.32
2000-02-28, OPERATION PROFIT, GROSS 42.30, NET 41.71
2000-02-28, Close, 30.52
2000-02-29, Close, 33.02
2000-03-01, Close, 31.80
2000-03-02, Close, 30.47
2000-03-02, BUY CREATE, 30.47
2000-03-03, BUY EXECUTED, Price: 31.63, Cost: 316.30, Comm 0.32
2000-03-03, Close, 33.36
2000-03-06, Close, 33.69
2000-03-07, Close, 33.33
2000-03-08, Close, 36.97
2000-03-09, Close, 37.36
2000-03-10, Close, 36.30
2000-03-10, SELL CREATE, 36.30
2000-03-13, SELL EXECUTED, Price: 34.91, Cost: 316.30, Comm 0.35
2000-03-13, OPERATION PROFIT, GROSS 32.80, NET 32.13
2000-03-13, Close, 35.02
2000-03-13, BUY CREATE, 35.02
2000-03-14, BUY EXECUTED, Price: 36.41, Cost: 364.10, Comm 0.36
2000-03-14, Close, 34.25
2000-03-15, Close, 34.97
2000-03-16, Close, 36.44
2000-03-17, Close, 35.50
2000-03-20, Close, 34.75
2000-03-21, Close, 35.89
2000-03-21, SELL CREATE, 35.89
2000-03-22, SELL EXECUTED, Price: 36.02, Cost: 364.10, Comm 0.36
2000-03-22, OPERATION PROFIT, GROSS -3.90, NET -4.62
2000-03-22, Close, 37.39
2000-03-23, Close, 38.64
2000-03-24, Close, 38.69
2000-03-27, Close, 39.33
2000-03-28, Close, 38.50
2000-03-29, Close, 36.69
2000-03-29, BUY CREATE, 36.69
2000-03-30, BUY EXECUTED, Price: 34.91, Cost: 349.10, Comm 0.35
2000-03-30, Close, 34.88
2000-03-31, Close, 34.72
2000-04-03, Close, 34.19
2000-04-04, Close, 33.77
2000-04-05, Close, 34.80
2000-04-06, Close, 36.55
2000-04-06, SELL CREATE, 36.55
2000-04-07, SELL EXECUTED, Price: 37.22, Cost: 349.10, Comm 0.37
2000-04-07, OPERATION PROFIT, GROSS 23.10, NET 22.38
2000-04-07, Close, 38.75
2000-04-10, Close, 36.69
2000-04-11, Close, 34.41
2000-04-11, BUY CREATE, 34.41
2000-04-12, BUY EXECUTED, Price: 34.66, Cost: 346.60, Comm 0.35
2000-04-12, Close, 32.52
2000-04-13, Close, 31.99
2000-04-14, Close, 27.80
2000-04-17, Close, 33.27
2000-04-18, Close, 35.11
2000-04-19, Close, 33.16
2000-04-19, SELL CREATE, 33.16
2000-04-20, SELL EXECUTED, Price: 32.83, Cost: 346.60, Comm 0.33
2000-04-20, OPERATION PROFIT, GROSS -18.30, NET -18.97
2000-04-20, Close, 31.49
2000-04-20, BUY CREATE, 31.49
2000-04-24, BUY EXECUTED, Price: 29.96, Cost: 299.60, Comm 0.30
2000-04-24, Close, 32.22
2000-04-25, Close, 33.61
2000-04-26, Close, 32.11
2000-04-27, Close, 34.38
2000-04-28, Close, 35.55
2000-05-01, Close, 35.44
2000-05-01, SELL CREATE, 35.44
2000-05-02, SELL EXECUTED, Price: 35.11, Cost: 299.60, Comm 0.35
2000-05-02, OPERATION PROFIT, GROSS 51.50, NET 50.85
2000-05-02, Close, 34.61
2000-05-02, BUY CREATE, 34.61
2000-05-03, BUY EXECUTED, Price: 34.19, Cost: 341.90, Comm 0.34
2000-05-03, Close, 33.72
2000-05-04, Close, 33.02
2000-05-05, Close, 34.16
2000-05-08, Close, 32.16
2000-05-09, Close, 32.02
2000-05-10, Close, 30.08
2000-05-10, SELL CREATE, 30.08
2000-05-11, SELL EXECUTED, Price: 30.66, Cost: 341.90, Comm 0.31
2000-05-11, OPERATION PROFIT, GROSS -35.30, NET -35.95
2000-05-11, Close, 32.19
2000-05-12, Close, 32.99
2000-05-15, Close, 34.25
2000-05-16, Close, 35.22
2000-05-17, Close, 34.77
2000-05-18, Close, 32.49
2000-05-18, BUY CREATE, 32.49
2000-05-19, BUY EXECUTED, Price: 32.02, Cost: 320.20, Comm 0.32
2000-05-19, Close, 31.16
2000-05-22, Close, 30.16
2000-05-23, Close, 27.85
2000-05-24, Close, 28.57
2000-05-25, Close, 29.55
2000-05-26, Close, 29.80
2000-05-26, SELL CREATE, 29.80
2000-05-30, SELL EXECUTED, Price: 30.63, Cost: 320.20, Comm 0.31
2000-05-30, OPERATION PROFIT, GROSS -13.90, NET -14.53
2000-05-30, Close, 32.99
2000-05-31, Close, 31.97
2000-06-01, Close, 34.63
2000-06-02, Close, 35.66
2000-06-05, Close, 36.00
2000-06-06, Close, 34.27
2000-06-07, Close, 35.58
2000-06-08, Close, 36.64
2000-06-09, Close, 36.77
2000-06-12, Close, 35.83
2000-06-13, Close, 36.33
2000-06-14, Close, 35.13
2000-06-15, Close, 36.69
2000-06-16, Close, 36.41
2000-06-19, Close, 38.25
2000-06-20, Close, 38.27
2000-06-21, Close, 38.33
2000-06-22, Close, 36.25
2000-06-23, Close, 35.36
2000-06-23, BUY CREATE, 35.36
2000-06-26, BUY EXECUTED, Price: 35.69, Cost: 356.90, Comm 0.36
2000-06-26, Close, 36.77
2000-06-27, Close, 36.58
2000-06-28, Close, 36.89
2000-06-29, Close, 35.97
2000-06-30, Close, 37.39
2000-07-03, Close, 35.66
2000-07-03, SELL CREATE, 35.66
2000-07-05, SELL EXECUTED, Price: 34.16, Cost: 356.90, Comm 0.34
2000-07-05, OPERATION PROFIT, GROSS -15.30, NET -16.00
2000-07-05, Close, 32.16
2000-07-05, BUY CREATE, 32.16
2000-07-06, BUY EXECUTED, Price: 31.91, Cost: 319.10, Comm 0.32
2000-07-06, Close, 33.63
2000-07-07, Close, 33.75
2000-07-10, Close, 32.97
2000-07-11, Close, 32.16
2000-07-12, Close, 33.22
2000-07-13, Close, 33.69
2000-07-13, SELL CREATE, 33.69
2000-07-14, SELL EXECUTED, Price: 33.88, Cost: 319.10, Comm 0.34
2000-07-14, OPERATION PROFIT, GROSS 19.70, NET 19.04
2000-07-14, Close, 33.86
2000-07-17, Close, 33.86
2000-07-18, Close, 32.99
2000-07-19, Close, 32.80
2000-07-19, BUY CREATE, 32.80
2000-07-20, BUY EXECUTED, Price: 33.27, Cost: 332.70, Comm 0.33
2000-07-20, Close, 34.75
2000-07-21, Close, 33.55
2000-07-24, Close, 33.36
2000-07-25, Close, 33.80
2000-07-26, Close, 34.13
2000-07-27, Close, 33.38
2000-07-27, SELL CREATE, 33.38
2000-07-28, SELL EXECUTED, Price: 33.41, Cost: 332.70, Comm 0.33
2000-07-28, OPERATION PROFIT, GROSS 1.40, NET 0.73
2000-07-28, Close, 32.19
2000-07-28, BUY CREATE, 32.19
2000-07-31, BUY EXECUTED, Price: 31.91, Cost: 319.10, Comm 0.32
2000-07-31, Close, 33.44
2000-08-01, Close, 32.52
2000-08-02, Close, 32.52
2000-08-03, Close, 34.44
2000-08-04, Close, 36.27
2000-08-07, Close, 36.41
2000-08-07, SELL CREATE, 36.41
2000-08-08, SELL EXECUTED, Price: 36.02, Cost: 319.10, Comm 0.36
2000-08-08, OPERATION PROFIT, GROSS 41.10, NET 40.42
2000-08-08, Close, 36.91
2000-08-09, Close, 36.19
2000-08-10, Close, 35.61
2000-08-10, BUY CREATE, 35.61
2000-08-11, BUY EXECUTED, Price: 35.55, Cost: 355.50, Comm 0.36
2000-08-11, Close, 36.08
2000-08-14, Close, 36.64
2000-08-15, Close, 36.14
2000-08-16, Close, 36.11
2000-08-17, Close, 37.33
2000-08-18, Close, 36.16
2000-08-18, SELL CREATE, 36.16
2000-08-21, SELL EXECUTED, Price: 36.52, Cost: 355.50, Comm 0.37
2000-08-21, OPERATION PROFIT, GROSS 9.70, NET 8.98
2000-08-21, Close, 37.00
2000-08-22, Close, 37.16
2000-08-23, Close, 36.86
2000-08-24, Close, 37.66
2000-08-25, Close, 37.64
2000-08-28, Close, 38.58
2000-08-29, Close, 39.03
2000-08-30, Close, 39.25
2000-08-31, Close, 40.44
2000-09-01, Close, 41.19
2000-09-05, Close, 40.50
2000-09-06, Close, 39.69
2000-09-06, BUY CREATE, 39.69
2000-09-07, BUY EXECUTED, Price: 40.08, Cost: 400.80, Comm 0.40
2000-09-07, Close, 40.56
2000-09-08, Close, 38.50
2000-09-11, Close, 37.11
2000-09-12, Close, 35.30
2000-09-13, Close, 36.39
2000-09-14, Close, 37.78
2000-09-14, SELL CREATE, 37.78
2000-09-15, SELL EXECUTED, Price: 36.08, Cost: 400.80, Comm 0.36
2000-09-15, OPERATION PROFIT, GROSS -40.00, NET -40.76
2000-09-15, Close, 34.83
2000-09-18, Close, 34.01
2000-09-18, BUY CREATE, 34.01
2000-09-19, BUY EXECUTED, Price: 34.44, Cost: 344.40, Comm 0.34
2000-09-19, Close, 35.27
2000-09-20, Close, 35.55
2000-09-21, Close, 35.11
2000-09-22, Close, 35.91
2000-09-25, Close, 35.02
2000-09-26, Close, 35.33
2000-09-26, SELL CREATE, 35.33
2000-09-27, SELL EXECUTED, Price: 35.66, Cost: 344.40, Comm 0.36
2000-09-27, OPERATION PROFIT, GROSS 12.20, NET 11.50
2000-09-27, Close, 35.52
2000-09-28, Close, 36.24
2000-09-29, Close, 35.02
2000-10-02, Close, 35.02
2000-10-03, Close, 30.91
2000-10-04, Close, 30.30
2000-10-04, BUY CREATE, 30.30
2000-10-05, BUY EXECUTED, Price: 30.38, Cost: 303.80, Comm 0.30
2000-10-05, Close, 30.38
2000-10-06, Close, 30.08
2000-10-09, Close, 29.69
2000-10-10, Close, 28.74
2000-10-11, Close, 27.69
2000-10-12, Close, 28.02
2000-10-12, SELL CREATE, 28.02
###Markdown
Adding an indicator
###Code
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import datetime # For datetime objects
import os.path # To manage paths
import sys # To find out the script name (in argv[0])
# Import the backtrader platform
import backtrader as bt
# Create a Stratey
class TestStrategy(bt.Strategy):
params = (
('maperiod', 15),
)
def log(self, txt, dt=None):
''' Logging function fot this strategy'''
dt = dt or self.datas[0].datetime.date(0)
print('%s, %s' % (dt.isoformat(), txt))
def __init__(self):
# Keep a reference to the "close" line in the data[0] dataseries
self.dataclose = self.datas[0].close
# To keep track of pending orders and buy price/commission
self.order = None
self.buyprice = None
self.buycomm = None
# Add a MovingAverageSimple indicator
self.sma = bt.indicators.SimpleMovingAverage(
self.datas[0], period=self.params.maperiod)
def notify_order(self, order):
if order.status in [order.Submitted, order.Accepted]:
# Buy/Sell order submitted/accepted to/by broker - Nothing to do
return
# Check if an order has been completed
# Attention: broker could reject order if not enough cash
if order.status in [order.Completed]:
if order.isbuy():
self.log(
'BUY EXECUTED, Price: %.2f, Cost: %.2f, Comm %.2f' %
(order.executed.price,
order.executed.value,
order.executed.comm))
self.buyprice = order.executed.price
self.buycomm = order.executed.comm
else: # Sell
self.log('SELL EXECUTED, Price: %.2f, Cost: %.2f, Comm %.2f' %
(order.executed.price,
order.executed.value,
order.executed.comm))
self.bar_executed = len(self)
elif order.status in [order.Canceled, order.Margin, order.Rejected]:
self.log('Order Canceled/Margin/Rejected')
self.order = None
def notify_trade(self, trade):
if not trade.isclosed:
return
self.log('OPERATION PROFIT, GROSS %.2f, NET %.2f' %
(trade.pnl, trade.pnlcomm))
def next(self):
# Simply log the closing price of the series from the reference
self.log('Close, %.2f' % self.dataclose[0])
# Check if an order is pending ... if yes, we cannot send a 2nd one
if self.order:
return
# Check if we are in the market
if not self.position:
# Not yet ... we MIGHT BUY if ...
if self.dataclose[0] > self.sma[0]:
# BUY, BUY, BUY!!! (with all possible default parameters)
self.log('BUY CREATE, %.2f' % self.dataclose[0])
# Keep track of the created order to avoid a 2nd order
self.order = self.buy()
else:
if self.dataclose[0] < self.sma[0]:
# SELL, SELL, SELL!!! (with all possible default parameters)
self.log('SELL CREATE, %.2f' % self.dataclose[0])
# Keep track of the created order to avoid a 2nd order
self.order = self.sell()
if __name__ == '__main__':
# Create a cerebro entity
cerebro = bt.Cerebro()
# Add a strategy
cerebro.addstrategy(TestStrategy)
# Datas are in a subfolder of the samples. Need to find where the script is
# because it could have been called from anywhere
modpath = r"../data/raw"
datapath = os.path.join(modpath, 'datas/orcl-1995-2014.txt')
# Create a Data Feed
data = bt.feeds.YahooFinanceCSVData(
dataname=datapath,
# Do not pass values before this date
fromdate=datetime.datetime(2000, 1, 1),
# Do not pass values before this date
todate=datetime.datetime(2000, 12, 31),
# Do not pass values after this date
reverse=False)
# Add the Data Feed to Cerebro
cerebro.adddata(data)
# Set our desired cash start
cerebro.broker.setcash(1000.0)
# Add a FixedSize sizer according to the stake
cerebro.addsizer(bt.sizers.FixedSize, stake=10)
# Set the commission
cerebro.broker.setcommission(commission=0.0)
# Print out the starting conditions
print('Starting Portfolio Value: %.2f' % cerebro.broker.getvalue())
# Run over everything
cerebro.run()
# Print out the final result
print('Final Portfolio Value: %.2f' % cerebro.broker.getvalue())
###Output
Starting Portfolio Value: 1000.00
2000-01-24, Close, 24.10
2000-01-25, Close, 25.10
2000-01-25, BUY CREATE, 25.10
2000-01-26, BUY EXECUTED, Price: 25.24, Cost: 252.40, Comm 0.00
2000-01-26, Close, 24.49
2000-01-27, Close, 23.04
2000-01-27, SELL CREATE, 23.04
2000-01-28, SELL EXECUTED, Price: 22.90, Cost: 252.40, Comm 0.00
2000-01-28, OPERATION PROFIT, GROSS -23.40, NET -23.40
2000-01-28, Close, 21.07
2000-01-31, Close, 22.22
2000-02-01, Close, 24.02
2000-02-02, Close, 24.16
2000-02-02, BUY CREATE, 24.16
2000-02-03, BUY EXECUTED, Price: 24.63, Cost: 246.30, Comm 0.00
2000-02-03, Close, 25.21
2000-02-04, Close, 25.71
2000-02-07, Close, 26.66
2000-02-08, Close, 26.49
2000-02-09, Close, 26.66
2000-02-10, Close, 27.71
2000-02-11, Close, 26.55
2000-02-14, Close, 27.66
2000-02-15, Close, 27.30
2000-02-16, Close, 27.24
2000-02-17, Close, 27.41
2000-02-18, Close, 26.05
2000-02-18, SELL CREATE, 26.05
2000-02-22, SELL EXECUTED, Price: 26.30, Cost: 246.30, Comm 0.00
2000-02-22, OPERATION PROFIT, GROSS 16.70, NET 16.70
2000-02-22, Close, 26.38
2000-02-22, BUY CREATE, 26.38
2000-02-23, BUY EXECUTED, Price: 26.77, Cost: 267.70, Comm 0.00
2000-02-23, Close, 28.05
2000-02-24, Close, 27.55
2000-02-25, Close, 31.41
2000-02-28, Close, 30.52
2000-02-29, Close, 33.02
2000-03-01, Close, 31.80
2000-03-02, Close, 30.47
2000-03-03, Close, 33.36
2000-03-06, Close, 33.69
2000-03-07, Close, 33.33
2000-03-08, Close, 36.97
2000-03-09, Close, 37.36
2000-03-10, Close, 36.30
2000-03-13, Close, 35.02
2000-03-14, Close, 34.25
2000-03-15, Close, 34.97
2000-03-16, Close, 36.44
2000-03-17, Close, 35.50
2000-03-20, Close, 34.75
2000-03-21, Close, 35.89
2000-03-22, Close, 37.39
2000-03-23, Close, 38.64
2000-03-24, Close, 38.69
2000-03-27, Close, 39.33
2000-03-28, Close, 38.50
2000-03-29, Close, 36.69
2000-03-30, Close, 34.88
2000-03-30, SELL CREATE, 34.88
2000-03-31, SELL EXECUTED, Price: 35.66, Cost: 267.70, Comm 0.00
2000-03-31, OPERATION PROFIT, GROSS 88.90, NET 88.90
2000-03-31, Close, 34.72
2000-04-03, Close, 34.19
2000-04-04, Close, 33.77
2000-04-05, Close, 34.80
2000-04-06, Close, 36.55
2000-04-06, BUY CREATE, 36.55
2000-04-07, BUY EXECUTED, Price: 37.22, Cost: 372.20, Comm 0.00
2000-04-07, Close, 38.75
2000-04-10, Close, 36.69
2000-04-11, Close, 34.41
2000-04-11, SELL CREATE, 34.41
2000-04-12, SELL EXECUTED, Price: 34.66, Cost: 372.20, Comm 0.00
2000-04-12, OPERATION PROFIT, GROSS -25.60, NET -25.60
2000-04-12, Close, 32.52
2000-04-13, Close, 31.99
2000-04-14, Close, 27.80
2000-04-17, Close, 33.27
2000-04-18, Close, 35.11
2000-04-18, BUY CREATE, 35.11
2000-04-19, BUY EXECUTED, Price: 34.97, Cost: 349.70, Comm 0.00
2000-04-19, Close, 33.16
2000-04-19, SELL CREATE, 33.16
2000-04-20, SELL EXECUTED, Price: 32.83, Cost: 349.70, Comm 0.00
2000-04-20, OPERATION PROFIT, GROSS -21.40, NET -21.40
2000-04-20, Close, 31.49
2000-04-24, Close, 32.22
2000-04-25, Close, 33.61
2000-04-26, Close, 32.11
2000-04-27, Close, 34.38
2000-04-27, BUY CREATE, 34.38
2000-04-28, BUY EXECUTED, Price: 34.91, Cost: 349.10, Comm 0.00
2000-04-28, Close, 35.55
2000-05-01, Close, 35.44
2000-05-02, Close, 34.61
2000-05-03, Close, 33.72
2000-05-04, Close, 33.02
2000-05-04, SELL CREATE, 33.02
2000-05-05, SELL EXECUTED, Price: 32.91, Cost: 349.10, Comm 0.00
2000-05-05, OPERATION PROFIT, GROSS -20.00, NET -20.00
2000-05-05, Close, 34.16
2000-05-05, BUY CREATE, 34.16
2000-05-08, BUY EXECUTED, Price: 33.49, Cost: 334.90, Comm 0.00
2000-05-08, Close, 32.16
2000-05-08, SELL CREATE, 32.16
2000-05-09, SELL EXECUTED, Price: 32.77, Cost: 334.90, Comm 0.00
2000-05-09, OPERATION PROFIT, GROSS -7.20, NET -7.20
2000-05-09, Close, 32.02
2000-05-10, Close, 30.08
2000-05-11, Close, 32.19
2000-05-12, Close, 32.99
2000-05-15, Close, 34.25
2000-05-15, BUY CREATE, 34.25
2000-05-16, BUY EXECUTED, Price: 34.52, Cost: 345.20, Comm 0.00
2000-05-16, Close, 35.22
2000-05-17, Close, 34.77
2000-05-18, Close, 32.49
2000-05-18, SELL CREATE, 32.49
2000-05-19, SELL EXECUTED, Price: 32.02, Cost: 345.20, Comm 0.00
2000-05-19, OPERATION PROFIT, GROSS -25.00, NET -25.00
2000-05-19, Close, 31.16
2000-05-22, Close, 30.16
2000-05-23, Close, 27.85
2000-05-24, Close, 28.57
2000-05-25, Close, 29.55
2000-05-26, Close, 29.80
2000-05-30, Close, 32.99
2000-05-30, BUY CREATE, 32.99
2000-05-31, BUY EXECUTED, Price: 32.58, Cost: 325.80, Comm 0.00
2000-05-31, Close, 31.97
2000-06-01, Close, 34.63
2000-06-02, Close, 35.66
2000-06-05, Close, 36.00
2000-06-06, Close, 34.27
2000-06-07, Close, 35.58
2000-06-08, Close, 36.64
2000-06-09, Close, 36.77
2000-06-12, Close, 35.83
2000-06-13, Close, 36.33
2000-06-14, Close, 35.13
2000-06-15, Close, 36.69
2000-06-16, Close, 36.41
2000-06-19, Close, 38.25
2000-06-20, Close, 38.27
2000-06-21, Close, 38.33
2000-06-22, Close, 36.25
2000-06-22, SELL CREATE, 36.25
2000-06-23, SELL EXECUTED, Price: 35.94, Cost: 325.80, Comm 0.00
2000-06-23, OPERATION PROFIT, GROSS 33.60, NET 33.60
2000-06-23, Close, 35.36
2000-06-26, Close, 36.77
2000-06-26, BUY CREATE, 36.77
2000-06-27, BUY EXECUTED, Price: 36.64, Cost: 366.40, Comm 0.00
2000-06-27, Close, 36.58
2000-06-27, SELL CREATE, 36.58
2000-06-28, SELL EXECUTED, Price: 36.50, Cost: 366.40, Comm 0.00
2000-06-28, OPERATION PROFIT, GROSS -1.40, NET -1.40
2000-06-28, Close, 36.89
2000-06-28, BUY CREATE, 36.89
2000-06-29, BUY EXECUTED, Price: 36.50, Cost: 365.00, Comm 0.00
2000-06-29, Close, 35.97
2000-06-29, SELL CREATE, 35.97
2000-06-30, SELL EXECUTED, Price: 35.75, Cost: 365.00, Comm 0.00
2000-06-30, OPERATION PROFIT, GROSS -7.50, NET -7.50
2000-06-30, Close, 37.39
2000-06-30, BUY CREATE, 37.39
2000-07-03, BUY EXECUTED, Price: 36.08, Cost: 360.80, Comm 0.00
2000-07-03, Close, 35.66
2000-07-03, SELL CREATE, 35.66
2000-07-05, SELL EXECUTED, Price: 34.16, Cost: 360.80, Comm 0.00
2000-07-05, OPERATION PROFIT, GROSS -19.20, NET -19.20
2000-07-05, Close, 32.16
2000-07-06, Close, 33.63
2000-07-07, Close, 33.75
2000-07-10, Close, 32.97
2000-07-11, Close, 32.16
2000-07-12, Close, 33.22
2000-07-13, Close, 33.69
2000-07-14, Close, 33.86
2000-07-17, Close, 33.86
2000-07-18, Close, 32.99
2000-07-19, Close, 32.80
2000-07-20, Close, 34.75
2000-07-20, BUY CREATE, 34.75
2000-07-21, BUY EXECUTED, Price: 34.44, Cost: 344.40, Comm 0.00
2000-07-21, Close, 33.55
2000-07-21, SELL CREATE, 33.55
2000-07-24, SELL EXECUTED, Price: 34.30, Cost: 344.40, Comm 0.00
2000-07-24, OPERATION PROFIT, GROSS -1.40, NET -1.40
2000-07-24, Close, 33.36
2000-07-25, Close, 33.80
2000-07-25, BUY CREATE, 33.80
2000-07-26, BUY EXECUTED, Price: 33.27, Cost: 332.70, Comm 0.00
2000-07-26, Close, 34.13
2000-07-27, Close, 33.38
2000-07-27, SELL CREATE, 33.38
2000-07-28, SELL EXECUTED, Price: 33.41, Cost: 332.70, Comm 0.00
2000-07-28, OPERATION PROFIT, GROSS 1.40, NET 1.40
2000-07-28, Close, 32.19
2000-07-31, Close, 33.44
2000-07-31, BUY CREATE, 33.44
2000-08-01, BUY EXECUTED, Price: 33.44, Cost: 334.40, Comm 0.00
2000-08-01, Close, 32.52
2000-08-01, SELL CREATE, 32.52
2000-08-02, SELL EXECUTED, Price: 32.47, Cost: 334.40, Comm 0.00
2000-08-02, OPERATION PROFIT, GROSS -9.70, NET -9.70
2000-08-02, Close, 32.52
2000-08-03, Close, 34.44
2000-08-03, BUY CREATE, 34.44
2000-08-04, BUY EXECUTED, Price: 34.83, Cost: 348.30, Comm 0.00
2000-08-04, Close, 36.27
2000-08-07, Close, 36.41
2000-08-08, Close, 36.91
2000-08-09, Close, 36.19
2000-08-10, Close, 35.61
2000-08-11, Close, 36.08
2000-08-14, Close, 36.64
2000-08-15, Close, 36.14
2000-08-16, Close, 36.11
2000-08-17, Close, 37.33
2000-08-18, Close, 36.16
2000-08-21, Close, 37.00
2000-08-22, Close, 37.16
2000-08-23, Close, 36.86
2000-08-24, Close, 37.66
2000-08-25, Close, 37.64
2000-08-28, Close, 38.58
2000-08-29, Close, 39.03
2000-08-30, Close, 39.25
2000-08-31, Close, 40.44
2000-09-01, Close, 41.19
2000-09-05, Close, 40.50
2000-09-06, Close, 39.69
2000-09-07, Close, 40.56
2000-09-08, Close, 38.50
2000-09-08, SELL CREATE, 38.50
2000-09-11, SELL EXECUTED, Price: 38.28, Cost: 348.30, Comm 0.00
2000-09-11, OPERATION PROFIT, GROSS 34.50, NET 34.50
2000-09-11, Close, 37.11
2000-09-12, Close, 35.30
2000-09-13, Close, 36.39
2000-09-14, Close, 37.78
2000-09-15, Close, 34.83
2000-09-18, Close, 34.01
2000-09-19, Close, 35.27
2000-09-20, Close, 35.55
2000-09-21, Close, 35.11
2000-09-22, Close, 35.91
2000-09-25, Close, 35.02
2000-09-26, Close, 35.33
2000-09-27, Close, 35.52
2000-09-28, Close, 36.24
2000-09-28, BUY CREATE, 36.24
2000-09-29, BUY EXECUTED, Price: 36.18, Cost: 361.80, Comm 0.00
2000-09-29, Close, 35.02
2000-09-29, SELL CREATE, 35.02
2000-10-02, SELL EXECUTED, Price: 35.47, Cost: 361.80, Comm 0.00
2000-10-02, OPERATION PROFIT, GROSS -7.10, NET -7.10
2000-10-02, Close, 35.02
2000-10-03, Close, 30.91
2000-10-04, Close, 30.30
2000-10-05, Close, 30.38
2000-10-06, Close, 30.08
2000-10-09, Close, 29.69
2000-10-10, Close, 28.74
2000-10-11, Close, 27.69
2000-10-12, Close, 28.02
2000-10-13, Close, 31.69
2000-10-16, Close, 30.74
2000-10-17, Close, 29.96
2000-10-18, Close, 29.85
2000-10-19, Close, 32.36
2000-10-19, BUY CREATE, 32.36
2000-10-20, BUY EXECUTED, Price: 32.13, Cost: 321.30, Comm 0.00
2000-10-20, Close, 31.35
2000-10-23, Close, 30.30
2000-10-24, Close, 31.85
2000-10-25, Close, 30.58
2000-10-26, Close, 30.30
2000-10-27, Close, 30.41
2000-10-30, Close, 28.13
2000-10-30, SELL CREATE, 28.13
2000-10-31, SELL EXECUTED, Price: 29.02, Cost: 321.30, Comm 0.00
2000-10-31, OPERATION PROFIT, GROSS -31.10, NET -31.10
2000-10-31, Close, 29.35
###Markdown
Visual Inspection: Plotting
###Code
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import datetime # For datetime objects
import os.path # To manage paths
import sys # To find out the script name (in argv[0])
# Import the backtrader platform
import backtrader as bt
# Create a Stratey
class TestStrategy(bt.Strategy):
params = (
('maperiod', 15),
)
def log(self, txt, dt=None):
''' Logging function fot this strategy'''
dt = dt or self.datas[0].datetime.date(0)
print('%s, %s' % (dt.isoformat(), txt))
def __init__(self):
# Keep a reference to the "close" line in the data[0] dataseries
self.dataclose = self.datas[0].close
# To keep track of pending orders and buy price/commission
self.order = None
self.buyprice = None
self.buycomm = None
# Add a MovingAverageSimple indicator
self.sma = bt.indicators.SimpleMovingAverage(
self.datas[0], period=self.params.maperiod)
# Indicators for the plotting show
bt.indicators.ExponentialMovingAverage(self.datas[0], period=25)
bt.indicators.WeightedMovingAverage(self.datas[0], period=25,
subplot=True)
bt.indicators.StochasticSlow(self.datas[0])
bt.indicators.MACDHisto(self.datas[0])
rsi = bt.indicators.RSI(self.datas[0])
bt.indicators.SmoothedMovingAverage(rsi, period=10)
bt.indicators.ATR(self.datas[0], plot=False)
def notify_order(self, order):
if order.status in [order.Submitted, order.Accepted]:
# Buy/Sell order submitted/accepted to/by broker - Nothing to do
return
# Check if an order has been completed
# Attention: broker could reject order if not enough cash
if order.status in [order.Completed]:
if order.isbuy():
self.log(
'BUY EXECUTED, Price: %.2f, Cost: %.2f, Comm %.2f' %
(order.executed.price,
order.executed.value,
order.executed.comm))
self.buyprice = order.executed.price
self.buycomm = order.executed.comm
else: # Sell
self.log('SELL EXECUTED, Price: %.2f, Cost: %.2f, Comm %.2f' %
(order.executed.price,
order.executed.value,
order.executed.comm))
self.bar_executed = len(self)
elif order.status in [order.Canceled, order.Margin, order.Rejected]:
self.log('Order Canceled/Margin/Rejected')
# Write down: no pending order
self.order = None
def notify_trade(self, trade):
if not trade.isclosed:
return
self.log('OPERATION PROFIT, GROSS %.2f, NET %.2f' %
(trade.pnl, trade.pnlcomm))
def next(self):
# Simply log the closing price of the series from the reference
self.log('Close, %.2f' % self.dataclose[0])
# Check if an order is pending ... if yes, we cannot send a 2nd one
if self.order:
return
# Check if we are in the market
if not self.position:
# Not yet ... we MIGHT BUY if ...
if self.dataclose[0] > self.sma[0]:
# BUY, BUY, BUY!!! (with all possible default parameters)
self.log('BUY CREATE, %.2f' % self.dataclose[0])
# Keep track of the created order to avoid a 2nd order
self.order = self.buy()
else:
if self.dataclose[0] < self.sma[0]:
# SELL, SELL, SELL!!! (with all possible default parameters)
self.log('SELL CREATE, %.2f' % self.dataclose[0])
# Keep track of the created order to avoid a 2nd order
self.order = self.sell()
if __name__ == '__main__':
# Create a cerebro entity
cerebro = bt.Cerebro()
# Add a strategy
cerebro.addstrategy(TestStrategy)
# Datas are in a subfolder of the samples. Need to find where the script is
# because it could have been called from anywhere
modpath = r"../data/raw"
datapath = os.path.join(modpath, 'datas/orcl-1995-2014.txt')
# Create a Data Feed
data = bt.feeds.YahooFinanceCSVData(
dataname=datapath,
# Do not pass values before this date
fromdate=datetime.datetime(2000, 1, 1),
# Do not pass values before this date
todate=datetime.datetime(2000, 12, 31),
# Do not pass values after this date
reverse=False)
# Add the Data Feed to Cerebro
cerebro.adddata(data)
# Set our desired cash start
cerebro.broker.setcash(1000.0)
# Add a FixedSize sizer according to the stake
cerebro.addsizer(bt.sizers.FixedSize, stake=10)
# Set the commission
cerebro.broker.setcommission(commission=0.0)
# Print out the starting conditions
print('Starting Portfolio Value: %.2f' % cerebro.broker.getvalue())
# Run over everything
cerebro.run()
# Print out the final result
print('Final Portfolio Value: %.2f' % cerebro.broker.getvalue())
# Plot the result
cerebro.plot()
###Output
Starting Portfolio Value: 1000.00
2000-02-18, Close, 26.05
2000-02-22, Close, 26.38
2000-02-22, BUY CREATE, 26.38
2000-02-23, BUY EXECUTED, Price: 26.77, Cost: 267.70, Comm 0.00
2000-02-23, Close, 28.05
2000-02-24, Close, 27.55
2000-02-25, Close, 31.41
2000-02-28, Close, 30.52
2000-02-29, Close, 33.02
2000-03-01, Close, 31.80
2000-03-02, Close, 30.47
2000-03-03, Close, 33.36
2000-03-06, Close, 33.69
2000-03-07, Close, 33.33
2000-03-08, Close, 36.97
2000-03-09, Close, 37.36
2000-03-10, Close, 36.30
2000-03-13, Close, 35.02
2000-03-14, Close, 34.25
2000-03-15, Close, 34.97
2000-03-16, Close, 36.44
2000-03-17, Close, 35.50
2000-03-20, Close, 34.75
2000-03-21, Close, 35.89
2000-03-22, Close, 37.39
2000-03-23, Close, 38.64
2000-03-24, Close, 38.69
2000-03-27, Close, 39.33
2000-03-28, Close, 38.50
2000-03-29, Close, 36.69
2000-03-30, Close, 34.88
2000-03-30, SELL CREATE, 34.88
2000-03-31, SELL EXECUTED, Price: 35.66, Cost: 267.70, Comm 0.00
2000-03-31, OPERATION PROFIT, GROSS 88.90, NET 88.90
2000-03-31, Close, 34.72
2000-04-03, Close, 34.19
2000-04-04, Close, 33.77
2000-04-05, Close, 34.80
2000-04-06, Close, 36.55
2000-04-06, BUY CREATE, 36.55
2000-04-07, BUY EXECUTED, Price: 37.22, Cost: 372.20, Comm 0.00
2000-04-07, Close, 38.75
2000-04-10, Close, 36.69
2000-04-11, Close, 34.41
2000-04-11, SELL CREATE, 34.41
2000-04-12, SELL EXECUTED, Price: 34.66, Cost: 372.20, Comm 0.00
2000-04-12, OPERATION PROFIT, GROSS -25.60, NET -25.60
2000-04-12, Close, 32.52
2000-04-13, Close, 31.99
2000-04-14, Close, 27.80
2000-04-17, Close, 33.27
2000-04-18, Close, 35.11
2000-04-18, BUY CREATE, 35.11
2000-04-19, BUY EXECUTED, Price: 34.97, Cost: 349.70, Comm 0.00
2000-04-19, Close, 33.16
2000-04-19, SELL CREATE, 33.16
2000-04-20, SELL EXECUTED, Price: 32.83, Cost: 349.70, Comm 0.00
2000-04-20, OPERATION PROFIT, GROSS -21.40, NET -21.40
2000-04-20, Close, 31.49
2000-04-24, Close, 32.22
2000-04-25, Close, 33.61
2000-04-26, Close, 32.11
2000-04-27, Close, 34.38
2000-04-27, BUY CREATE, 34.38
2000-04-28, BUY EXECUTED, Price: 34.91, Cost: 349.10, Comm 0.00
2000-04-28, Close, 35.55
2000-05-01, Close, 35.44
2000-05-02, Close, 34.61
2000-05-03, Close, 33.72
2000-05-04, Close, 33.02
2000-05-04, SELL CREATE, 33.02
2000-05-05, SELL EXECUTED, Price: 32.91, Cost: 349.10, Comm 0.00
2000-05-05, OPERATION PROFIT, GROSS -20.00, NET -20.00
2000-05-05, Close, 34.16
2000-05-05, BUY CREATE, 34.16
2000-05-08, BUY EXECUTED, Price: 33.49, Cost: 334.90, Comm 0.00
2000-05-08, Close, 32.16
2000-05-08, SELL CREATE, 32.16
2000-05-09, SELL EXECUTED, Price: 32.77, Cost: 334.90, Comm 0.00
2000-05-09, OPERATION PROFIT, GROSS -7.20, NET -7.20
2000-05-09, Close, 32.02
2000-05-10, Close, 30.08
2000-05-11, Close, 32.19
2000-05-12, Close, 32.99
2000-05-15, Close, 34.25
2000-05-15, BUY CREATE, 34.25
2000-05-16, BUY EXECUTED, Price: 34.52, Cost: 345.20, Comm 0.00
2000-05-16, Close, 35.22
2000-05-17, Close, 34.77
2000-05-18, Close, 32.49
2000-05-18, SELL CREATE, 32.49
2000-05-19, SELL EXECUTED, Price: 32.02, Cost: 345.20, Comm 0.00
2000-05-19, OPERATION PROFIT, GROSS -25.00, NET -25.00
2000-05-19, Close, 31.16
2000-05-22, Close, 30.16
2000-05-23, Close, 27.85
2000-05-24, Close, 28.57
2000-05-25, Close, 29.55
2000-05-26, Close, 29.80
2000-05-30, Close, 32.99
2000-05-30, BUY CREATE, 32.99
2000-05-31, BUY EXECUTED, Price: 32.58, Cost: 325.80, Comm 0.00
2000-05-31, Close, 31.97
2000-06-01, Close, 34.63
2000-06-02, Close, 35.66
2000-06-05, Close, 36.00
2000-06-06, Close, 34.27
2000-06-07, Close, 35.58
2000-06-08, Close, 36.64
2000-06-09, Close, 36.77
2000-06-12, Close, 35.83
2000-06-13, Close, 36.33
2000-06-14, Close, 35.13
2000-06-15, Close, 36.69
2000-06-16, Close, 36.41
2000-06-19, Close, 38.25
2000-06-20, Close, 38.27
2000-06-21, Close, 38.33
2000-06-22, Close, 36.25
2000-06-22, SELL CREATE, 36.25
2000-06-23, SELL EXECUTED, Price: 35.94, Cost: 325.80, Comm 0.00
2000-06-23, OPERATION PROFIT, GROSS 33.60, NET 33.60
2000-06-23, Close, 35.36
2000-06-26, Close, 36.77
2000-06-26, BUY CREATE, 36.77
2000-06-27, BUY EXECUTED, Price: 36.64, Cost: 366.40, Comm 0.00
2000-06-27, Close, 36.58
2000-06-27, SELL CREATE, 36.58
2000-06-28, SELL EXECUTED, Price: 36.50, Cost: 366.40, Comm 0.00
2000-06-28, OPERATION PROFIT, GROSS -1.40, NET -1.40
2000-06-28, Close, 36.89
2000-06-28, BUY CREATE, 36.89
2000-06-29, BUY EXECUTED, Price: 36.50, Cost: 365.00, Comm 0.00
2000-06-29, Close, 35.97
2000-06-29, SELL CREATE, 35.97
2000-06-30, SELL EXECUTED, Price: 35.75, Cost: 365.00, Comm 0.00
2000-06-30, OPERATION PROFIT, GROSS -7.50, NET -7.50
2000-06-30, Close, 37.39
2000-06-30, BUY CREATE, 37.39
2000-07-03, BUY EXECUTED, Price: 36.08, Cost: 360.80, Comm 0.00
2000-07-03, Close, 35.66
2000-07-03, SELL CREATE, 35.66
2000-07-05, SELL EXECUTED, Price: 34.16, Cost: 360.80, Comm 0.00
2000-07-05, OPERATION PROFIT, GROSS -19.20, NET -19.20
2000-07-05, Close, 32.16
2000-07-06, Close, 33.63
2000-07-07, Close, 33.75
2000-07-10, Close, 32.97
2000-07-11, Close, 32.16
2000-07-12, Close, 33.22
2000-07-13, Close, 33.69
2000-07-14, Close, 33.86
2000-07-17, Close, 33.86
2000-07-18, Close, 32.99
2000-07-19, Close, 32.80
2000-07-20, Close, 34.75
2000-07-20, BUY CREATE, 34.75
2000-07-21, BUY EXECUTED, Price: 34.44, Cost: 344.40, Comm 0.00
2000-07-21, Close, 33.55
2000-07-21, SELL CREATE, 33.55
2000-07-24, SELL EXECUTED, Price: 34.30, Cost: 344.40, Comm 0.00
2000-07-24, OPERATION PROFIT, GROSS -1.40, NET -1.40
2000-07-24, Close, 33.36
2000-07-25, Close, 33.80
2000-07-25, BUY CREATE, 33.80
2000-07-26, BUY EXECUTED, Price: 33.27, Cost: 332.70, Comm 0.00
2000-07-26, Close, 34.13
2000-07-27, Close, 33.38
2000-07-27, SELL CREATE, 33.38
2000-07-28, SELL EXECUTED, Price: 33.41, Cost: 332.70, Comm 0.00
2000-07-28, OPERATION PROFIT, GROSS 1.40, NET 1.40
2000-07-28, Close, 32.19
2000-07-31, Close, 33.44
2000-07-31, BUY CREATE, 33.44
2000-08-01, BUY EXECUTED, Price: 33.44, Cost: 334.40, Comm 0.00
2000-08-01, Close, 32.52
2000-08-01, SELL CREATE, 32.52
2000-08-02, SELL EXECUTED, Price: 32.47, Cost: 334.40, Comm 0.00
2000-08-02, OPERATION PROFIT, GROSS -9.70, NET -9.70
2000-08-02, Close, 32.52
2000-08-03, Close, 34.44
2000-08-03, BUY CREATE, 34.44
2000-08-04, BUY EXECUTED, Price: 34.83, Cost: 348.30, Comm 0.00
2000-08-04, Close, 36.27
2000-08-07, Close, 36.41
2000-08-08, Close, 36.91
2000-08-09, Close, 36.19
2000-08-10, Close, 35.61
2000-08-11, Close, 36.08
2000-08-14, Close, 36.64
2000-08-15, Close, 36.14
2000-08-16, Close, 36.11
2000-08-17, Close, 37.33
2000-08-18, Close, 36.16
2000-08-21, Close, 37.00
2000-08-22, Close, 37.16
2000-08-23, Close, 36.86
2000-08-24, Close, 37.66
2000-08-25, Close, 37.64
2000-08-28, Close, 38.58
2000-08-29, Close, 39.03
2000-08-30, Close, 39.25
2000-08-31, Close, 40.44
2000-09-01, Close, 41.19
2000-09-05, Close, 40.50
2000-09-06, Close, 39.69
2000-09-07, Close, 40.56
2000-09-08, Close, 38.50
2000-09-08, SELL CREATE, 38.50
2000-09-11, SELL EXECUTED, Price: 38.28, Cost: 348.30, Comm 0.00
2000-09-11, OPERATION PROFIT, GROSS 34.50, NET 34.50
2000-09-11, Close, 37.11
2000-09-12, Close, 35.30
2000-09-13, Close, 36.39
2000-09-14, Close, 37.78
2000-09-15, Close, 34.83
2000-09-18, Close, 34.01
2000-09-19, Close, 35.27
2000-09-20, Close, 35.55
2000-09-21, Close, 35.11
2000-09-22, Close, 35.91
2000-09-25, Close, 35.02
2000-09-26, Close, 35.33
2000-09-27, Close, 35.52
2000-09-28, Close, 36.24
2000-09-28, BUY CREATE, 36.24
2000-09-29, BUY EXECUTED, Price: 36.18, Cost: 361.80, Comm 0.00
2000-09-29, Close, 35.02
2000-09-29, SELL CREATE, 35.02
2000-10-02, SELL EXECUTED, Price: 35.47, Cost: 361.80, Comm 0.00
2000-10-02, OPERATION PROFIT, GROSS -7.10, NET -7.10
2000-10-02, Close, 35.02
2000-10-03, Close, 30.91
2000-10-04, Close, 30.30
2000-10-05, Close, 30.38
2000-10-06, Close, 30.08
2000-10-09, Close, 29.69
2000-10-10, Close, 28.74
2000-10-11, Close, 27.69
2000-10-12, Close, 28.02
2000-10-13, Close, 31.69
2000-10-16, Close, 30.74
2000-10-17, Close, 29.96
2000-10-18, Close, 29.85
2000-10-19, Close, 32.36
2000-10-19, BUY CREATE, 32.36
2000-10-20, BUY EXECUTED, Price: 32.13, Cost: 321.30, Comm 0.00
2000-10-20, Close, 31.35
2000-10-23, Close, 30.30
2000-10-24, Close, 31.85
2000-10-25, Close, 30.58
2000-10-26, Close, 30.30
2000-10-27, Close, 30.41
2000-10-30, Close, 28.13
2000-10-30, SELL CREATE, 28.13
2000-10-31, SELL EXECUTED, Price: 29.02, Cost: 321.30, Comm 0.00
2000-10-31, OPERATION PROFIT, GROSS -31.10, NET -31.10
2000-10-31, Close, 29.35
2000-11-01, Close, 27.91
2000-11-02, Close, 26.30
2000-11-03, Close, 26.96
2000-11-06, Close, 24.85
2000-11-07, Close, 23.63
2000-11-08, Close, 22.07
2000-11-09, Close, 24.18
2000-11-10, Close, 22.63
2000-11-13, Close, 22.01
2000-11-14, Close, 25.24
2000-11-15, Close, 25.68
2000-11-16, Close, 24.35
2000-11-17, Close, 25.63
2000-11-17, BUY CREATE, 25.63
2000-11-20, BUY EXECUTED, Price: 21.63, Cost: 216.30, Comm 0.00
2000-11-20, Close, 22.01
2000-11-20, SELL CREATE, 22.01
###Markdown
Let’s Optimize
###Code
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import datetime # For datetime objects
import os.path # To manage paths
import sys # To find out the script name (in argv[0])
# Import the backtrader platform
import backtrader as bt
# Create a Stratey
class TestStrategy(bt.Strategy):
params = (
('maperiod', 15),
('printlog', False),
)
def log(self, txt, dt=None, doprint=False):
''' Logging function fot this strategy'''
if self.params.printlog or doprint:
dt = dt or self.datas[0].datetime.date(0)
print('%s, %s' % (dt.isoformat(), txt))
def __init__(self):
# Keep a reference to the "close" line in the data[0] dataseries
self.dataclose = self.datas[0].close
# To keep track of pending orders and buy price/commission
self.order = None
self.buyprice = None
self.buycomm = None
# Add a MovingAverageSimple indicator
self.sma = bt.indicators.SimpleMovingAverage(
self.datas[0], period=self.params.maperiod)
def notify_order(self, order):
if order.status in [order.Submitted, order.Accepted]:
# Buy/Sell order submitted/accepted to/by broker - Nothing to do
return
# Check if an order has been completed
# Attention: broker could reject order if not enough cash
if order.status in [order.Completed]:
if order.isbuy():
self.log(
'BUY EXECUTED, Price: %.2f, Cost: %.2f, Comm %.2f' %
(order.executed.price,
order.executed.value,
order.executed.comm))
self.buyprice = order.executed.price
self.buycomm = order.executed.comm
else: # Sell
self.log('SELL EXECUTED, Price: %.2f, Cost: %.2f, Comm %.2f' %
(order.executed.price,
order.executed.value,
order.executed.comm))
self.bar_executed = len(self)
elif order.status in [order.Canceled, order.Margin, order.Rejected]:
self.log('Order Canceled/Margin/Rejected')
# Write down: no pending order
self.order = None
def notify_trade(self, trade):
if not trade.isclosed:
return
self.log('OPERATION PROFIT, GROSS %.2f, NET %.2f' %
(trade.pnl, trade.pnlcomm))
def next(self):
# Simply log the closing price of the series from the reference
self.log('Close, %.2f' % self.dataclose[0])
# Check if an order is pending ... if yes, we cannot send a 2nd one
if self.order:
return
# Check if we are in the market
if not self.position:
# Not yet ... we MIGHT BUY if ...
if self.dataclose[0] > self.sma[0]:
# BUY, BUY, BUY!!! (with all possible default parameters)
self.log('BUY CREATE, %.2f' % self.dataclose[0])
# Keep track of the created order to avoid a 2nd order
self.order = self.buy()
else:
if self.dataclose[0] < self.sma[0]:
# SELL, SELL, SELL!!! (with all possible default parameters)
self.log('SELL CREATE, %.2f' % self.dataclose[0])
# Keep track of the created order to avoid a 2nd order
self.order = self.sell()
def stop(self):
self.log('(MA Period %2d) Ending Value %.2f' %
(self.params.maperiod, self.broker.getvalue()), doprint=True)
if __name__ == '__main__':
# Create a cerebro entity
cerebro = bt.Cerebro()
# Add a strategy
strats = cerebro.optstrategy(
TestStrategy,
maperiod=range(10, 31))
# Datas are in a subfolder of the samples. Need to find where the script is
# because it could have been called from anywhere
modpath = r"../data/raw"
datapath = os.path.join(modpath, 'datas/orcl-1995-2014.txt')
# Create a Data Feed
data = bt.feeds.YahooFinanceCSVData(
dataname=datapath,
# Do not pass values before this date
fromdate=datetime.datetime(2000, 1, 1),
# Do not pass values before this date
todate=datetime.datetime(2000, 12, 31),
# Do not pass values after this date
reverse=False)
# Add the Data Feed to Cerebro
cerebro.adddata(data)
# Set our desired cash start
cerebro.broker.setcash(1000.0)
# Add a FixedSize sizer according to the stake
cerebro.addsizer(bt.sizers.FixedSize, stake=10)
# Set the commission
cerebro.broker.setcommission(commission=0.0)
# Run over everything
cerebro.run(maxcpus=1)
###Output
_____no_output_____ |
postprocess-avgs/00-etasw-and-things-daily-to-monthly-averging.ipynb | ###Markdown
Code to create monthly sum,avg
###Code
%env CURL_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt
# import sys
import datetime
import os
# import time
import rasterio
import numpy as np
# from glob import glob
def create_s3_list_of_days(main_prefix, year, output_name='etasw_'):
the_list = []
for i in range(1,366):
month = f'{i:03d}'
file_object = main_prefix + '/' + str(year) + '/' + output_name + str(year) + month + '.tif'
the_list.append(file_object)
return the_list
working_bucket = 'dev-et-data'
main_prefix = 's3://dev-et-data/enduser/DelawareRiverBasin/Run09_13_2020/ward_sandford_customer'
year = 2003
output_name = 'etasw_'
ppt_list = create_s3_list_of_days(main_prefix, year, output_name)
ppt_list
#read in file with rasterio
def read_file(file):
print("reading file ...", file)
with rasterio.open(file) as src:
return(src.read(1))
def monthly_average(file_list, out_dir, out_product):
# what months to summarize
start_mon = 1 #start month
end_mon = 12 #end month
#loop through month 1,2,..12
for i in range(start_mon,(end_mon+1)):
print('Month averaged up is: ' + str(i))
Listras = []
for et_in in file_list:
doy = int(et_in.split('.')[0][-3:])
#doy = int(et_in[-3:])
#print 'Day of the year: ' + str(doy)
datea = str(datetime.date(year,1,1) + datetime.timedelta(doy-1))
mon = int(datea.split('-')[1])
#print 'Month is: ' + str(mon)
if mon == i: #if month = i then append grid to list for summing up
Listras.append(et_in)
#print('daily grids for month ' + str(i) + ' :')
#print(Listras)
if Listras == []:
print('No daily data for month' + str(i) + ' available..continue to next month')
continue
else:
# Read all data as a list of numpy arrays
array_list = [read_file(x) for x in Listras]
# Perform averaging
array_out = np.nanmean(array_list, axis=0)
# Get metadata from one of the input files
with rasterio.open(file_list[0]) as src:
meta = src.meta
meta.update(dtype=rasterio.float32)
# Write output file
#out_name = 'ppt_avg_' + str(year) + (('0'+ str(i))[-2:]) +'.tif'
out_name = out_product + str(year) + (('0'+ str(i))[-2:]) +'.tif'
with rasterio.open(out_dir + '/' + out_name, 'w', **meta) as dst:
dst.write(array_out.astype(rasterio.float32), 1)
print('Created monthly grid!', out_name)
def _mkdir(directory):
try:
os.stat(directory)
except:
os.mkdir(directory)
_mkdir('./junkbox')
out_dir = './junkbox'
out_product = 'etasw_avg_'
for year in range(2003,2016):
file_list = create_s3_list_of_days(main_prefix, year, output_name)
monthly_average(file_list, out_dir, out_product)
! ls -lh ./junkbox
###Output
total 13G
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:38 dd_avg_200301.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:38 dd_avg_200302.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:40 dd_avg_200303.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:40 dd_avg_200304.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:41 dd_avg_200305.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:41 dd_avg_200306.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:42 dd_avg_200307.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:42 dd_avg_200308.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:43 dd_avg_200309.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:44 dd_avg_200310.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:44 dd_avg_200311.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:45 dd_avg_200312.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:46 dd_avg_200401.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:47 dd_avg_200402.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:48 dd_avg_200403.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:48 dd_avg_200404.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:49 dd_avg_200405.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:49 dd_avg_200406.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:50 dd_avg_200407.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:50 dd_avg_200408.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:51 dd_avg_200409.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:51 dd_avg_200410.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:52 dd_avg_200411.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:53 dd_avg_200412.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:54 dd_avg_200501.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:54 dd_avg_200502.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:55 dd_avg_200503.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:56 dd_avg_200504.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:56 dd_avg_200505.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:57 dd_avg_200506.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:57 dd_avg_200507.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:58 dd_avg_200508.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:58 dd_avg_200509.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:59 dd_avg_200510.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:59 dd_avg_200511.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:00 dd_avg_200512.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:01 dd_avg_200601.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:02 dd_avg_200602.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:03 dd_avg_200603.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:03 dd_avg_200604.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:04 dd_avg_200605.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:04 dd_avg_200606.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:05 dd_avg_200607.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:05 dd_avg_200608.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:06 dd_avg_200609.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:06 dd_avg_200610.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:07 dd_avg_200611.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:08 dd_avg_200612.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:09 dd_avg_200701.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:10 dd_avg_200702.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:11 dd_avg_200703.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:11 dd_avg_200704.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:12 dd_avg_200705.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:12 dd_avg_200706.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:13 dd_avg_200707.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:13 dd_avg_200708.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:14 dd_avg_200709.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:14 dd_avg_200710.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:15 dd_avg_200711.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:15 dd_avg_200712.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:16 dd_avg_200801.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:17 dd_avg_200802.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:18 dd_avg_200803.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:19 dd_avg_200804.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:19 dd_avg_200805.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:20 dd_avg_200806.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:20 dd_avg_200807.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:21 dd_avg_200808.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:21 dd_avg_200809.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:22 dd_avg_200810.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:22 dd_avg_200811.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:23 dd_avg_200812.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:24 dd_avg_200901.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:24 dd_avg_200902.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:25 dd_avg_200903.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:26 dd_avg_200904.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:26 dd_avg_200905.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:27 dd_avg_200906.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:27 dd_avg_200907.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:28 dd_avg_200908.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:28 dd_avg_200909.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:29 dd_avg_200910.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:29 dd_avg_200911.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:30 dd_avg_200912.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:31 dd_avg_201001.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:32 dd_avg_201002.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:33 dd_avg_201003.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:34 dd_avg_201004.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:34 dd_avg_201005.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:34 dd_avg_201006.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:35 dd_avg_201007.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:36 dd_avg_201008.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:36 dd_avg_201009.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:37 dd_avg_201010.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:37 dd_avg_201011.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:38 dd_avg_201012.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:39 dd_avg_201101.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:39 dd_avg_201102.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:40 dd_avg_201103.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:41 dd_avg_201104.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:42 dd_avg_201105.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:42 dd_avg_201106.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:43 dd_avg_201107.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:43 dd_avg_201108.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:44 dd_avg_201109.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:45 dd_avg_201110.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:45 dd_avg_201111.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:46 dd_avg_201112.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:47 dd_avg_201201.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:48 dd_avg_201202.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:49 dd_avg_201203.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:49 dd_avg_201204.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:50 dd_avg_201205.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:50 dd_avg_201206.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:51 dd_avg_201207.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:51 dd_avg_201208.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:51 dd_avg_201209.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:52 dd_avg_201210.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:52 dd_avg_201211.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:53 dd_avg_201212.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:54 dd_avg_201301.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:55 dd_avg_201302.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:55 dd_avg_201303.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:56 dd_avg_201304.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:57 dd_avg_201305.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:57 dd_avg_201306.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:58 dd_avg_201307.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:58 dd_avg_201308.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:58 dd_avg_201309.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:59 dd_avg_201310.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:59 dd_avg_201311.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:00 dd_avg_201312.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:01 dd_avg_201401.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:02 dd_avg_201402.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:03 dd_avg_201403.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:04 dd_avg_201404.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:05 dd_avg_201405.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:05 dd_avg_201406.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:06 dd_avg_201407.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:06 dd_avg_201408.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:06 dd_avg_201409.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:07 dd_avg_201410.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:07 dd_avg_201411.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:08 dd_avg_201412.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:09 dd_avg_201501.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:10 dd_avg_201502.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:11 dd_avg_201503.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:11 dd_avg_201504.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:12 dd_avg_201505.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:12 dd_avg_201506.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:13 dd_avg_201507.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:13 dd_avg_201508.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:14 dd_avg_201509.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:14 dd_avg_201510.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:15 dd_avg_201511.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:15 dd_avg_201512.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 6 23:55 etasw_avg_200301.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 6 23:56 etasw_avg_200302.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 6 23:58 etasw_avg_200303.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 00:03 etasw_avg_200304.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 00:05 etasw_avg_200305.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 00:07 etasw_avg_200306.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 00:10 etasw_avg_200307.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 00:12 etasw_avg_200308.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 00:17 etasw_avg_200309.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 00:19 etasw_avg_200310.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 00:22 etasw_avg_200311.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 00:24 etasw_avg_200312.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 00:26 etasw_avg_200401.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 00:28 etasw_avg_200402.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 00:31 etasw_avg_200403.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 00:33 etasw_avg_200404.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 00:36 etasw_avg_200405.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 00:40 etasw_avg_200406.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 00:42 etasw_avg_200407.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 00:45 etasw_avg_200408.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 00:47 etasw_avg_200409.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 00:49 etasw_avg_200410.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 00:51 etasw_avg_200411.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 00:54 etasw_avg_200412.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 00:56 etasw_avg_200501.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 00:58 etasw_avg_200502.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:00 etasw_avg_200503.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:03 etasw_avg_200504.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:05 etasw_avg_200505.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:07 etasw_avg_200506.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:10 etasw_avg_200507.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:14 etasw_avg_200508.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:17 etasw_avg_200509.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:19 etasw_avg_200510.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:21 etasw_avg_200511.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:24 etasw_avg_200512.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:26 etasw_avg_200601.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:28 etasw_avg_200602.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:30 etasw_avg_200603.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:32 etasw_avg_200604.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:35 etasw_avg_200605.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:37 etasw_avg_200606.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:39 etasw_avg_200607.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:42 etasw_avg_200608.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:44 etasw_avg_200609.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:46 etasw_avg_200610.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:49 etasw_avg_200611.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:51 etasw_avg_200612.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:53 etasw_avg_200701.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:55 etasw_avg_200702.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 01:57 etasw_avg_200703.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:00 etasw_avg_200704.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:02 etasw_avg_200705.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:04 etasw_avg_200706.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:07 etasw_avg_200707.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:09 etasw_avg_200708.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:11 etasw_avg_200709.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:13 etasw_avg_200710.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:16 etasw_avg_200711.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:18 etasw_avg_200712.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:20 etasw_avg_200801.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:22 etasw_avg_200802.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:24 etasw_avg_200803.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:27 etasw_avg_200804.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:29 etasw_avg_200805.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:31 etasw_avg_200806.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:33 etasw_avg_200807.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:36 etasw_avg_200808.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:38 etasw_avg_200809.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:40 etasw_avg_200810.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:42 etasw_avg_200811.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:44 etasw_avg_200812.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:46 etasw_avg_200901.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:48 etasw_avg_200902.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:51 etasw_avg_200903.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:53 etasw_avg_200904.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:55 etasw_avg_200905.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:57 etasw_avg_200906.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 02:59 etasw_avg_200907.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:01 etasw_avg_200908.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:04 etasw_avg_200909.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:06 etasw_avg_200910.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:08 etasw_avg_200911.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:10 etasw_avg_200912.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:13 etasw_avg_201001.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:15 etasw_avg_201002.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:17 etasw_avg_201003.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:19 etasw_avg_201004.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:21 etasw_avg_201005.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:23 etasw_avg_201006.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:26 etasw_avg_201007.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:28 etasw_avg_201008.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:30 etasw_avg_201009.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:32 etasw_avg_201010.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:34 etasw_avg_201011.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:37 etasw_avg_201012.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:39 etasw_avg_201101.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:41 etasw_avg_201102.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:43 etasw_avg_201103.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:45 etasw_avg_201104.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:47 etasw_avg_201105.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:49 etasw_avg_201106.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:51 etasw_avg_201107.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:54 etasw_avg_201108.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:56 etasw_avg_201109.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 03:58 etasw_avg_201110.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 04:02 etasw_avg_201111.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 04:04 etasw_avg_201112.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 04:07 etasw_avg_201201.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 04:09 etasw_avg_201202.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 04:11 etasw_avg_201203.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 04:13 etasw_avg_201204.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 04:15 etasw_avg_201205.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 04:17 etasw_avg_201206.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 04:20 etasw_avg_201207.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 04:22 etasw_avg_201208.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 04:26 etasw_avg_201209.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 04:28 etasw_avg_201210.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 04:30 etasw_avg_201211.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 04:33 etasw_avg_201212.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 04:35 etasw_avg_201301.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 04:37 etasw_avg_201302.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 04:39 etasw_avg_201303.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 04:41 etasw_avg_201304.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 04:43 etasw_avg_201305.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 04:46 etasw_avg_201306.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 04:48 etasw_avg_201307.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 04:50 etasw_avg_201308.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 04:52 etasw_avg_201309.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 04:57 etasw_avg_201310.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 04:59 etasw_avg_201311.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 05:01 etasw_avg_201312.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 05:03 etasw_avg_201401.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 05:05 etasw_avg_201402.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 05:07 etasw_avg_201403.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 05:10 etasw_avg_201404.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 05:12 etasw_avg_201405.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 05:14 etasw_avg_201406.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 05:17 etasw_avg_201407.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 05:19 etasw_avg_201408.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 05:21 etasw_avg_201409.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 05:24 etasw_avg_201410.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 05:26 etasw_avg_201411.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 05:28 etasw_avg_201412.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 05:30 etasw_avg_201501.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 05:32 etasw_avg_201502.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 05:34 etasw_avg_201503.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 05:36 etasw_avg_201504.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 05:39 etasw_avg_201505.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 05:41 etasw_avg_201506.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 05:43 etasw_avg_201507.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 05:45 etasw_avg_201508.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 05:47 etasw_avg_201509.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 05:49 etasw_avg_201510.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 05:54 etasw_avg_201511.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 43M Oct 7 05:56 etasw_avg_201512.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:43 ppt_avg_200301.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:43 ppt_avg_200302.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:43 ppt_avg_200303.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:43 ppt_avg_200304.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:43 ppt_avg_200305.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:43 ppt_avg_200306.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:43 ppt_avg_200307.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:43 ppt_avg_200308.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:43 ppt_avg_200309.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:43 ppt_avg_200310.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:43 ppt_avg_200311.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:43 ppt_avg_200312.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:44 ppt_avg_200401.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:44 ppt_avg_200402.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:44 ppt_avg_200403.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:44 ppt_avg_200404.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:44 ppt_avg_200405.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:44 ppt_avg_200406.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:44 ppt_avg_200407.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:44 ppt_avg_200408.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:44 ppt_avg_200409.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:44 ppt_avg_200410.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:44 ppt_avg_200411.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:45 ppt_avg_200412.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:45 ppt_avg_200501.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:45 ppt_avg_200502.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:45 ppt_avg_200503.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:45 ppt_avg_200504.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:45 ppt_avg_200505.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:45 ppt_avg_200506.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:45 ppt_avg_200507.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:45 ppt_avg_200508.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:45 ppt_avg_200509.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:45 ppt_avg_200510.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:45 ppt_avg_200511.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:45 ppt_avg_200512.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:46 ppt_avg_200601.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:46 ppt_avg_200602.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:46 ppt_avg_200603.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:46 ppt_avg_200604.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:46 ppt_avg_200605.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:46 ppt_avg_200606.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:46 ppt_avg_200607.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:46 ppt_avg_200608.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:46 ppt_avg_200609.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:46 ppt_avg_200610.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:46 ppt_avg_200611.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:46 ppt_avg_200612.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:47 ppt_avg_200701.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:47 ppt_avg_200702.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:47 ppt_avg_200703.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:47 ppt_avg_200704.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:47 ppt_avg_200705.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:47 ppt_avg_200706.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:47 ppt_avg_200707.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:47 ppt_avg_200708.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:47 ppt_avg_200709.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:47 ppt_avg_200710.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:47 ppt_avg_200711.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:47 ppt_avg_200712.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:47 ppt_avg_200801.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:48 ppt_avg_200802.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:48 ppt_avg_200803.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:48 ppt_avg_200804.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:48 ppt_avg_200805.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:48 ppt_avg_200806.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:48 ppt_avg_200807.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:48 ppt_avg_200808.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:48 ppt_avg_200809.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:48 ppt_avg_200810.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:48 ppt_avg_200811.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:48 ppt_avg_200812.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:48 ppt_avg_200901.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:49 ppt_avg_200902.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:49 ppt_avg_200903.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:49 ppt_avg_200904.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:49 ppt_avg_200905.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:49 ppt_avg_200906.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:49 ppt_avg_200907.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:49 ppt_avg_200908.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:49 ppt_avg_200909.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:49 ppt_avg_200910.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:49 ppt_avg_200911.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:49 ppt_avg_200912.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:49 ppt_avg_201001.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:50 ppt_avg_201002.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:50 ppt_avg_201003.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:50 ppt_avg_201004.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:50 ppt_avg_201005.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:50 ppt_avg_201006.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:50 ppt_avg_201007.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:50 ppt_avg_201008.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:50 ppt_avg_201009.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:50 ppt_avg_201010.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:50 ppt_avg_201011.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:50 ppt_avg_201012.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:50 ppt_avg_201101.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:51 ppt_avg_201102.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:51 ppt_avg_201103.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:51 ppt_avg_201104.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:51 ppt_avg_201105.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:51 ppt_avg_201106.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:51 ppt_avg_201107.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:51 ppt_avg_201108.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:51 ppt_avg_201109.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:51 ppt_avg_201110.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:51 ppt_avg_201111.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:51 ppt_avg_201112.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:52 ppt_avg_201201.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:52 ppt_avg_201202.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:52 ppt_avg_201203.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:52 ppt_avg_201204.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:52 ppt_avg_201205.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:52 ppt_avg_201206.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:52 ppt_avg_201207.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:52 ppt_avg_201208.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:52 ppt_avg_201209.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:52 ppt_avg_201210.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:52 ppt_avg_201211.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:52 ppt_avg_201212.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:53 ppt_avg_201301.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:53 ppt_avg_201302.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:53 ppt_avg_201303.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:53 ppt_avg_201304.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:53 ppt_avg_201305.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:53 ppt_avg_201306.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:53 ppt_avg_201307.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:53 ppt_avg_201308.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:53 ppt_avg_201309.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:53 ppt_avg_201310.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:53 ppt_avg_201311.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:53 ppt_avg_201312.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:54 ppt_avg_201401.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:54 ppt_avg_201402.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:54 ppt_avg_201403.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:54 ppt_avg_201404.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:54 ppt_avg_201405.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:54 ppt_avg_201406.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:54 ppt_avg_201407.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:54 ppt_avg_201408.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:54 ppt_avg_201409.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:54 ppt_avg_201410.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:54 ppt_avg_201411.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:55 ppt_avg_201412.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:55 ppt_avg_201501.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:55 ppt_avg_201502.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:55 ppt_avg_201503.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:55 ppt_avg_201504.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:55 ppt_avg_201505.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:55 ppt_avg_201506.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:55 ppt_avg_201507.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:55 ppt_avg_201508.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:55 ppt_avg_201509.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:55 ppt_avg_201510.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:55 ppt_avg_201511.tif
-rw-r--r-- 1 jupyter-wzell jupyter-wzell 35K Oct 6 21:56 ppt_avg_201512.tif
|
Convolutional Neural Networks/Convolutional Neural Networks Step by Step/Convolution_model_Step_by_Step_v2a.ipynb | ###Markdown
Convolutional Neural Networks: Step by StepWelcome to Course 4's first assignment! In this assignment, you will implement convolutional (CONV) and pooling (POOL) layers in numpy, including both forward propagation and (optionally) backward propagation. **Notation**:- Superscript $[l]$ denotes an object of the $l^{th}$ layer. - Example: $a^{[4]}$ is the $4^{th}$ layer activation. $W^{[5]}$ and $b^{[5]}$ are the $5^{th}$ layer parameters.- Superscript $(i)$ denotes an object from the $i^{th}$ example. - Example: $x^{(i)}$ is the $i^{th}$ training example input. - Subscript $i$ denotes the $i^{th}$ entry of a vector. - Example: $a^{[l]}_i$ denotes the $i^{th}$ entry of the activations in layer $l$, assuming this is a fully connected (FC) layer. - $n_H$, $n_W$ and $n_C$ denote respectively the height, width and number of channels of a given layer. If you want to reference a specific layer $l$, you can also write $n_H^{[l]}$, $n_W^{[l]}$, $n_C^{[l]}$. - $n_{H_{prev}}$, $n_{W_{prev}}$ and $n_{C_{prev}}$ denote respectively the height, width and number of channels of the previous layer. If referencing a specific layer $l$, this could also be denoted $n_H^{[l-1]}$, $n_W^{[l-1]}$, $n_C^{[l-1]}$. We assume that you are already familiar with `numpy` and/or have completed the previous courses of the specialization. Let's get started! Updates If you were working on the notebook before this update...* The current notebook is version "v2a".* You can find your original work saved in the notebook with the previous version name ("v2") * To view the file directory, go to the menu "File->Open", and this will open a new tab that shows the file directory. List of updates* clarified example used for padding function. Updated starter code for padding function.* `conv_forward` has additional hints to help students if they're stuck.* `conv_forward` places code for `vert_start` and `vert_end` within the `for h in range(...)` loop; to avoid redundant calculations. Similarly updated `horiz_start` and `horiz_end`. **Thanks to our mentor Kevin Brown for pointing this out.*** `conv_forward` breaks down the `Z[i, h, w, c]` single line calculation into 3 lines, for clarity.* `conv_forward` test case checks that students don't accidentally use n_H_prev instead of n_H, use n_W_prev instead of n_W, and don't accidentally swap n_H with n_W* `pool_forward` properly nests calculations of `vert_start`, `vert_end`, `horiz_start`, and `horiz_end` to avoid redundant calculations.* `pool_forward' has two new test cases that check for a correct implementation of stride (the height and width of the previous layer's activations should be large enough relative to the filter dimensions so that a stride can take place). * `conv_backward`: initialize `Z` and `cache` variables within unit test, to make it independent of unit testing that occurs in the `conv_forward` section of the assignment.* **Many thanks to our course mentor, Paul Mielke, for proposing these test cases.** 1 - PackagesLet's first import all the packages that you will need during this assignment. - [numpy](www.numpy.org) is the fundamental package for scientific computing with Python.- [matplotlib](http://matplotlib.org) is a library to plot graphs in Python.- np.random.seed(1) is used to keep all the random function calls consistent. It will help us grade your work.
###Code
import numpy as np
import h5py
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
%load_ext autoreload
%autoreload 2
np.random.seed(1)
###Output
_____no_output_____
###Markdown
2 - Outline of the AssignmentYou will be implementing the building blocks of a convolutional neural network! Each function you will implement will have detailed instructions that will walk you through the steps needed:- Convolution functions, including: - Zero Padding - Convolve window - Convolution forward - Convolution backward (optional)- Pooling functions, including: - Pooling forward - Create mask - Distribute value - Pooling backward (optional) This notebook will ask you to implement these functions from scratch in `numpy`. In the next notebook, you will use the TensorFlow equivalents of these functions to build the following model:**Note** that for every forward function, there is its corresponding backward equivalent. Hence, at every step of your forward module you will store some parameters in a cache. These parameters are used to compute gradients during backpropagation. 3 - Convolutional Neural NetworksAlthough programming frameworks make convolutions easy to use, they remain one of the hardest concepts to understand in Deep Learning. A convolution layer transforms an input volume into an output volume of different size, as shown below. In this part, you will build every step of the convolution layer. You will first implement two helper functions: one for zero padding and the other for computing the convolution function itself. 3.1 - Zero-PaddingZero-padding adds zeros around the border of an image: **Figure 1** : **Zero-Padding** Image (3 channels, RGB) with a padding of 2. The main benefits of padding are the following:- It allows you to use a CONV layer without necessarily shrinking the height and width of the volumes. This is important for building deeper networks, since otherwise the height/width would shrink as you go to deeper layers. An important special case is the "same" convolution, in which the height/width is exactly preserved after one layer. - It helps us keep more of the information at the border of an image. Without padding, very few values at the next layer would be affected by pixels as the edges of an image.**Exercise**: Implement the following function, which pads all the images of a batch of examples X with zeros. [Use np.pad](https://docs.scipy.org/doc/numpy/reference/generated/numpy.pad.html). Note if you want to pad the array "a" of shape $(5,5,5,5,5)$ with `pad = 1` for the 2nd dimension, `pad = 3` for the 4th dimension and `pad = 0` for the rest, you would do:```pythona = np.pad(a, ((0,0), (1,1), (0,0), (3,3), (0,0)), mode='constant', constant_values = (0,0))```
###Code
# GRADED FUNCTION: zero_pad
def zero_pad(X, pad):
"""
Pad with zeros all images of the dataset X. The padding is applied to the height and width of an image,
as illustrated in Figure 1.
Argument:
X -- python numpy array of shape (m, n_H, n_W, n_C) representing a batch of m images
pad -- integer, amount of padding around each image on vertical and horizontal dimensions
Returns:
X_pad -- padded image of shape (m, n_H + 2*pad, n_W + 2*pad, n_C)
"""
### START CODE HERE ### (≈ 1 line)
X_pad = np.pad(X, ((0,0), (pad, pad), (pad, pad), (0,0)), mode='constant', constant_values = (0,0))
### END CODE HERE ###
return X_pad
np.random.seed(1)
x = np.random.randn(4, 3, 3, 2)
x_pad = zero_pad(x, 2)
print ("x.shape =\n", x.shape)
print ("x_pad.shape =\n", x_pad.shape)
print ("x[1,1] =\n", x[1,1])
print ("x_pad[1,1] =\n", x_pad[1,1])
fig, axarr = plt.subplots(1, 2)
axarr[0].set_title('x')
axarr[0].imshow(x[0,:,:,0])
axarr[1].set_title('x_pad')
axarr[1].imshow(x_pad[0,:,:,0])
###Output
x.shape =
(4, 3, 3, 2)
x_pad.shape =
(4, 7, 7, 2)
x[1,1] =
[[ 0.90085595 -0.68372786]
[-0.12289023 -0.93576943]
[-0.26788808 0.53035547]]
x_pad[1,1] =
[[ 0. 0.]
[ 0. 0.]
[ 0. 0.]
[ 0. 0.]
[ 0. 0.]
[ 0. 0.]
[ 0. 0.]]
###Markdown
**Expected Output**:```x.shape = (4, 3, 3, 2)x_pad.shape = (4, 7, 7, 2)x[1,1] = [[ 0.90085595 -0.68372786] [-0.12289023 -0.93576943] [-0.26788808 0.53035547]]x_pad[1,1] = [[ 0. 0.] [ 0. 0.] [ 0. 0.] [ 0. 0.] [ 0. 0.] [ 0. 0.] [ 0. 0.]]``` 3.2 - Single step of convolution In this part, implement a single step of convolution, in which you apply the filter to a single position of the input. This will be used to build a convolutional unit, which: - Takes an input volume - Applies a filter at every position of the input- Outputs another volume (usually of different size) **Figure 2** : **Convolution operation** with a filter of 3x3 and a stride of 1 (stride = amount you move the window each time you slide) In a computer vision application, each value in the matrix on the left corresponds to a single pixel value, and we convolve a 3x3 filter with the image by multiplying its values element-wise with the original matrix, then summing them up and adding a bias. In this first step of the exercise, you will implement a single step of convolution, corresponding to applying a filter to just one of the positions to get a single real-valued output. Later in this notebook, you'll apply this function to multiple positions of the input to implement the full convolutional operation. **Exercise**: Implement conv_single_step(). [Hint](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.sum.html). **Note**: The variable b will be passed in as a numpy array. If we add a scalar (a float or integer) to a numpy array, the result is a numpy array. In the special case when a numpy array contains a single value, we can cast it as a float to convert it to a scalar.
###Code
# GRADED FUNCTION: conv_single_step
def conv_single_step(a_slice_prev, W, b):
"""
Apply one filter defined by parameters W on a single slice (a_slice_prev) of the output activation
of the previous layer.
Arguments:
a_slice_prev -- slice of input data of shape (f, f, n_C_prev)
W -- Weight parameters contained in a window - matrix of shape (f, f, n_C_prev)
b -- Bias parameters contained in a window - matrix of shape (1, 1, 1)
Returns:
Z -- a scalar value, the result of convolving the sliding window (W, b) on a slice x of the input data
"""
### START CODE HERE ### (≈ 2 lines of code)
# Element-wise product between a_slice_prev and W. Do not add the bias yet.
s = a_slice_prev * W
# Sum over all entries of the volume s.
Z = np.sum(s)
# Add bias b to Z. Cast b to a float() so that Z results in a scalar value.
Z = float(Z + b)
### END CODE HERE ###
return Z
np.random.seed(1)
a_slice_prev = np.random.randn(4, 4, 3)
W = np.random.randn(4, 4, 3)
b = np.random.randn(1, 1, 1)
Z = conv_single_step(a_slice_prev, W, b)
print("Z =", Z)
###Output
Z = -6.999089450680221
###Markdown
**Expected Output**: **Z** -6.99908945068 3.3 - Convolutional Neural Networks - Forward passIn the forward pass, you will take many filters and convolve them on the input. Each 'convolution' gives you a 2D matrix output. You will then stack these outputs to get a 3D volume: **Exercise**: Implement the function below to convolve the filters `W` on an input activation `A_prev`. This function takes the following inputs:* `A_prev`, the activations output by the previous layer (for a batch of m inputs); * Weights are denoted by `W`. The filter window size is `f` by `f`.* The bias vector is `b`, where each filter has its own (single) bias. Finally you also have access to the hyperparameters dictionary which contains the stride and the padding. **Hint**: 1. To select a 2x2 slice at the upper left corner of a matrix "a_prev" (shape (5,5,3)), you would do:```pythona_slice_prev = a_prev[0:2,0:2,:]```Notice how this gives a 3D slice that has height 2, width 2, and depth 3. Depth is the number of channels. This will be useful when you will define `a_slice_prev` below, using the `start/end` indexes you will define.2. To define a_slice you will need to first define its corners `vert_start`, `vert_end`, `horiz_start` and `horiz_end`. This figure may be helpful for you to find out how each of the corner can be defined using h, w, f and s in the code below. **Figure 3** : **Definition of a slice using vertical and horizontal start/end (with a 2x2 filter)** This figure shows only a single channel. **Reminder**:The formulas relating the output shape of the convolution to the input shape is:$$ n_H = \lfloor \frac{n_{H_{prev}} - f + 2 \times pad}{stride} \rfloor +1 $$$$ n_W = \lfloor \frac{n_{W_{prev}} - f + 2 \times pad}{stride} \rfloor +1 $$$$ n_C = \text{number of filters used in the convolution}$$For this exercise, we won't worry about vectorization, and will just implement everything with for-loops. Additional Hints if you're stuck* You will want to use array slicing (e.g.`varname[0:1,:,3:5]`) for the following variables: `a_prev_pad` ,`W`, `b` Copy the starter code of the function and run it outside of the defined function, in separate cells. Check that the subset of each array is the size and dimension that you're expecting. * To decide how to get the vert_start, vert_end; horiz_start, horiz_end, remember that these are indices of the previous layer. Draw an example of a previous padded layer (8 x 8, for instance), and the current (output layer) (2 x 2, for instance). The output layer's indices are denoted by `h` and `w`. * Make sure that `a_slice_prev` has a height, width and depth.* Remember that `a_prev_pad` is a subset of `A_prev_pad`. Think about which one should be used within the for loops.
###Code
# GRADED FUNCTION: conv_forward
def conv_forward(A_prev, W, b, hparameters):
"""
Implements the forward propagation for a convolution function
Arguments:
A_prev -- output activations of the previous layer,
numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)
W -- Weights, numpy array of shape (f, f, n_C_prev, n_C)
b -- Biases, numpy array of shape (1, 1, 1, n_C)
hparameters -- python dictionary containing "stride" and "pad"
Returns:
Z -- conv output, numpy array of shape (m, n_H, n_W, n_C)
cache -- cache of values needed for the conv_backward() function
"""
### START CODE HERE ###
# Retrieve dimensions from A_prev's shape (≈1 line)
(m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape
# Retrieve dimensions from W's shape (≈1 line)
(f, f, n_C_prev, n_C) = W.shape
# Retrieve information from "hparameters" (≈2 lines)
stride = hparameters["stride"]
pad = hparameters["pad"]
# Compute the dimensions of the CONV output volume using the formula given above.
# Hint: use int() to apply the 'floor' operation. (≈2 lines)
n_H = int((n_H_prev - f + 2 * pad) / stride) + 1
n_W = int((n_W_prev - f + 2 * pad) / stride) + 1
# Initialize the output volume Z with zeros. (≈1 line)
Z = np.zeros((m, n_H, n_W, n_C))
# Create A_prev_pad by padding A_prev
A_prev_pad = zero_pad(A_prev, pad)
for i in range(m): # loop over the batch of training examples
a_prev_pad = A_prev_pad[i, :, :, :] # Select ith training example's padded activation
for h in range(n_H): # loop over vertical axis of the output volume
# Find the vertical start and end of the current "slice" (≈2 lines)
vert_start = h * stride
vert_end = h * stride + f
for w in range(n_W): # loop over horizontal axis of the output volume
# Find the horizontal start and end of the current "slice" (≈2 lines)
horiz_start = w * stride
horiz_end = w * stride + f
for c in range(n_C): # loop over channels (= #filters) of the output volume
# Use the corners to define the (3D) slice of a_prev_pad (See Hint above the cell). (≈1 line)
a_slice_prev = a_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :]
# Convolve the (3D) slice with the correct filter W and bias b, to get back one output neuron. (≈3 line)
weights = W[:,:,:,c]
biases = b[:,:,:,c]
Z[i, h, w, c] = conv_single_step(a_slice_prev, weights, biases)
### END CODE HERE ###
# Making sure your output shape is correct
assert(Z.shape == (m, n_H, n_W, n_C))
# Save information in "cache" for the backprop
cache = (A_prev, W, b, hparameters)
return Z, cache
np.random.seed(1)
A_prev = np.random.randn(10,5,7,4)
W = np.random.randn(3,3,4,8)
b = np.random.randn(1,1,1,8)
hparameters = {"pad" : 1,
"stride": 2}
Z, cache_conv = conv_forward(A_prev, W, b, hparameters)
print("Z's mean =\n", np.mean(Z))
print("Z[3,2,1] =\n", Z[3,2,1])
print("cache_conv[0][1][2][3] =\n", cache_conv[0][1][2][3])
###Output
Z's mean =
0.692360880758
Z[3,2,1] =
[ -1.28912231 2.27650251 6.61941931 0.95527176 8.25132576
2.31329639 13.00689405 2.34576051]
cache_conv[0][1][2][3] =
[-1.1191154 1.9560789 -0.3264995 -1.34267579]
###Markdown
**Expected Output**:```Z's mean = 0.692360880758Z[3,2,1] = [ -1.28912231 2.27650251 6.61941931 0.95527176 8.25132576 2.31329639 13.00689405 2.34576051]cache_conv[0][1][2][3] = [-1.1191154 1.9560789 -0.3264995 -1.34267579]``` Finally, CONV layer should also contain an activation, in which case we would add the following line of code:```python Convolve the window to get back one output neuronZ[i, h, w, c] = ... Apply activationA[i, h, w, c] = activation(Z[i, h, w, c])```You don't need to do it here. 4 - Pooling layer The pooling (POOL) layer reduces the height and width of the input. It helps reduce computation, as well as helps make feature detectors more invariant to its position in the input. The two types of pooling layers are: - Max-pooling layer: slides an ($f, f$) window over the input and stores the max value of the window in the output.- Average-pooling layer: slides an ($f, f$) window over the input and stores the average value of the window in the output.These pooling layers have no parameters for backpropagation to train. However, they have hyperparameters such as the window size $f$. This specifies the height and width of the $f \times f$ window you would compute a *max* or *average* over. 4.1 - Forward PoolingNow, you are going to implement MAX-POOL and AVG-POOL, in the same function. **Exercise**: Implement the forward pass of the pooling layer. Follow the hints in the comments below.**Reminder**:As there's no padding, the formulas binding the output shape of the pooling to the input shape is:$$ n_H = \lfloor \frac{n_{H_{prev}} - f}{stride} \rfloor +1 $$$$ n_W = \lfloor \frac{n_{W_{prev}} - f}{stride} \rfloor +1 $$$$ n_C = n_{C_{prev}}$$
###Code
# GRADED FUNCTION: pool_forward
def pool_forward(A_prev, hparameters, mode = "max"):
"""
Implements the forward pass of the pooling layer
Arguments:
A_prev -- Input data, numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)
hparameters -- python dictionary containing "f" and "stride"
mode -- the pooling mode you would like to use, defined as a string ("max" or "average")
Returns:
A -- output of the pool layer, a numpy array of shape (m, n_H, n_W, n_C)
cache -- cache used in the backward pass of the pooling layer, contains the input and hparameters
"""
# Retrieve dimensions from the input shape
(m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape
# Retrieve hyperparameters from "hparameters"
f = hparameters["f"]
stride = hparameters["stride"]
# Define the dimensions of the output
n_H = int(1 + (n_H_prev - f) / stride)
n_W = int(1 + (n_W_prev - f) / stride)
n_C = n_C_prev
# Initialize output matrix A
A = np.zeros((m, n_H, n_W, n_C))
### START CODE HERE ###
for i in range(m): # loop over the training examples
for h in range(n_H): # loop on the vertical axis of the output volume
# Find the vertical start and end of the current "slice" (≈2 lines)
vert_start = h * stride
vert_end = h * stride + f
for w in range(n_W): # loop on the horizontal axis of the output volume
# Find the vertical start and end of the current "slice" (≈2 lines)
horiz_start = w * stride
horiz_end = w * stride + f
for c in range (n_C): # loop over the channels of the output volume
# Use the corners to define the current slice on the ith training example of A_prev, channel c. (≈1 line)
a_prev_slice = A_prev[i, vert_start:vert_end, horiz_start:horiz_end, c]
# Compute the pooling operation on the slice.
# Use an if statement to differentiate the modes.
# Use np.max and np.mean.
if mode == "max":
A[i, h, w, c] = np.max(a_prev_slice)
elif mode == "average":
A[i, h, w, c] = np.mean(a_prev_slice)
### END CODE HERE ###
# Store the input and hparameters in "cache" for pool_backward()
cache = (A_prev, hparameters)
# Making sure your output shape is correct
assert(A.shape == (m, n_H, n_W, n_C))
return A, cache
# Case 1: stride of 1
np.random.seed(1)
A_prev = np.random.randn(2, 5, 5, 3)
hparameters = {"stride" : 1, "f": 3}
A, cache = pool_forward(A_prev, hparameters)
print("mode = max")
print("A.shape = " + str(A.shape))
print("A =\n", A)
print()
A, cache = pool_forward(A_prev, hparameters, mode = "average")
print("mode = average")
print("A.shape = " + str(A.shape))
print("A =\n", A)
###Output
mode = max
A.shape = (2, 3, 3, 3)
A =
[[[[ 1.74481176 0.90159072 1.65980218]
[ 1.74481176 1.46210794 1.65980218]
[ 1.74481176 1.6924546 1.65980218]]
[[ 1.14472371 0.90159072 2.10025514]
[ 1.14472371 0.90159072 1.65980218]
[ 1.14472371 1.6924546 1.65980218]]
[[ 1.13162939 1.51981682 2.18557541]
[ 1.13162939 1.51981682 2.18557541]
[ 1.13162939 1.6924546 2.18557541]]]
[[[ 1.19891788 0.84616065 0.82797464]
[ 0.69803203 0.84616065 1.2245077 ]
[ 0.69803203 1.12141771 1.2245077 ]]
[[ 1.96710175 0.84616065 1.27375593]
[ 1.96710175 0.84616065 1.23616403]
[ 1.62765075 1.12141771 1.2245077 ]]
[[ 1.96710175 0.86888616 1.27375593]
[ 1.96710175 0.86888616 1.23616403]
[ 1.62765075 1.12141771 0.79280687]]]]
mode = average
A.shape = (2, 3, 3, 3)
A =
[[[[ -3.01046719e-02 -3.24021315e-03 -3.36298859e-01]
[ 1.43310483e-01 1.93146751e-01 -4.44905196e-01]
[ 1.28934436e-01 2.22428468e-01 1.25067597e-01]]
[[ -3.81801899e-01 1.59993515e-02 1.70562706e-01]
[ 4.73707165e-02 2.59244658e-02 9.20338402e-02]
[ 3.97048605e-02 1.57189094e-01 3.45302489e-01]]
[[ -3.82680519e-01 2.32579951e-01 6.25997903e-01]
[ -2.47157416e-01 -3.48524998e-04 3.50539717e-01]
[ -9.52551510e-02 2.68511000e-01 4.66056368e-01]]]
[[[ -1.73134159e-01 3.23771981e-01 -3.43175716e-01]
[ 3.80634669e-02 7.26706274e-02 -2.30268958e-01]
[ 2.03009393e-02 1.41414785e-01 -1.23158476e-02]]
[[ 4.44976963e-01 -2.61694592e-03 -3.10403073e-01]
[ 5.08114737e-01 -2.34937338e-01 -2.39611830e-01]
[ 1.18726772e-01 1.72552294e-01 -2.21121966e-01]]
[[ 4.29449255e-01 8.44699612e-02 -2.72909051e-01]
[ 6.76351685e-01 -1.20138225e-01 -2.44076712e-01]
[ 1.50774518e-01 2.89111751e-01 1.23238536e-03]]]]
###Markdown
** Expected Output**```mode = maxA.shape = (2, 3, 3, 3)A = [[[[ 1.74481176 0.90159072 1.65980218] [ 1.74481176 1.46210794 1.65980218] [ 1.74481176 1.6924546 1.65980218]] [[ 1.14472371 0.90159072 2.10025514] [ 1.14472371 0.90159072 1.65980218] [ 1.14472371 1.6924546 1.65980218]] [[ 1.13162939 1.51981682 2.18557541] [ 1.13162939 1.51981682 2.18557541] [ 1.13162939 1.6924546 2.18557541]]] [[[ 1.19891788 0.84616065 0.82797464] [ 0.69803203 0.84616065 1.2245077 ] [ 0.69803203 1.12141771 1.2245077 ]] [[ 1.96710175 0.84616065 1.27375593] [ 1.96710175 0.84616065 1.23616403] [ 1.62765075 1.12141771 1.2245077 ]] [[ 1.96710175 0.86888616 1.27375593] [ 1.96710175 0.86888616 1.23616403] [ 1.62765075 1.12141771 0.79280687]]]]mode = averageA.shape = (2, 3, 3, 3)A = [[[[ -3.01046719e-02 -3.24021315e-03 -3.36298859e-01] [ 1.43310483e-01 1.93146751e-01 -4.44905196e-01] [ 1.28934436e-01 2.22428468e-01 1.25067597e-01]] [[ -3.81801899e-01 1.59993515e-02 1.70562706e-01] [ 4.73707165e-02 2.59244658e-02 9.20338402e-02] [ 3.97048605e-02 1.57189094e-01 3.45302489e-01]] [[ -3.82680519e-01 2.32579951e-01 6.25997903e-01] [ -2.47157416e-01 -3.48524998e-04 3.50539717e-01] [ -9.52551510e-02 2.68511000e-01 4.66056368e-01]]] [[[ -1.73134159e-01 3.23771981e-01 -3.43175716e-01] [ 3.80634669e-02 7.26706274e-02 -2.30268958e-01] [ 2.03009393e-02 1.41414785e-01 -1.23158476e-02]] [[ 4.44976963e-01 -2.61694592e-03 -3.10403073e-01] [ 5.08114737e-01 -2.34937338e-01 -2.39611830e-01] [ 1.18726772e-01 1.72552294e-01 -2.21121966e-01]] [[ 4.29449255e-01 8.44699612e-02 -2.72909051e-01] [ 6.76351685e-01 -1.20138225e-01 -2.44076712e-01] [ 1.50774518e-01 2.89111751e-01 1.23238536e-03]]]]```
###Code
# Case 2: stride of 2
np.random.seed(1)
A_prev = np.random.randn(2, 5, 5, 3)
hparameters = {"stride" : 2, "f": 3}
A, cache = pool_forward(A_prev, hparameters)
print("mode = max")
print("A.shape = " + str(A.shape))
print("A =\n", A)
print()
A, cache = pool_forward(A_prev, hparameters, mode = "average")
print("mode = average")
print("A.shape = " + str(A.shape))
print("A =\n", A)
###Output
mode = max
A.shape = (2, 2, 2, 3)
A =
[[[[ 1.74481176 0.90159072 1.65980218]
[ 1.74481176 1.6924546 1.65980218]]
[[ 1.13162939 1.51981682 2.18557541]
[ 1.13162939 1.6924546 2.18557541]]]
[[[ 1.19891788 0.84616065 0.82797464]
[ 0.69803203 1.12141771 1.2245077 ]]
[[ 1.96710175 0.86888616 1.27375593]
[ 1.62765075 1.12141771 0.79280687]]]]
mode = average
A.shape = (2, 2, 2, 3)
A =
[[[[-0.03010467 -0.00324021 -0.33629886]
[ 0.12893444 0.22242847 0.1250676 ]]
[[-0.38268052 0.23257995 0.6259979 ]
[-0.09525515 0.268511 0.46605637]]]
[[[-0.17313416 0.32377198 -0.34317572]
[ 0.02030094 0.14141479 -0.01231585]]
[[ 0.42944926 0.08446996 -0.27290905]
[ 0.15077452 0.28911175 0.00123239]]]]
###Markdown
**Expected Output:** ```mode = maxA.shape = (2, 2, 2, 3)A = [[[[ 1.74481176 0.90159072 1.65980218] [ 1.74481176 1.6924546 1.65980218]] [[ 1.13162939 1.51981682 2.18557541] [ 1.13162939 1.6924546 2.18557541]]] [[[ 1.19891788 0.84616065 0.82797464] [ 0.69803203 1.12141771 1.2245077 ]] [[ 1.96710175 0.86888616 1.27375593] [ 1.62765075 1.12141771 0.79280687]]]]mode = averageA.shape = (2, 2, 2, 3)A = [[[[-0.03010467 -0.00324021 -0.33629886] [ 0.12893444 0.22242847 0.1250676 ]] [[-0.38268052 0.23257995 0.6259979 ] [-0.09525515 0.268511 0.46605637]]] [[[-0.17313416 0.32377198 -0.34317572] [ 0.02030094 0.14141479 -0.01231585]] [[ 0.42944926 0.08446996 -0.27290905] [ 0.15077452 0.28911175 0.00123239]]]]``` Congratulations! You have now implemented the forward passes of all the layers of a convolutional network. The remainder of this notebook is optional, and will not be graded. 5 - Backpropagation in convolutional neural networks (OPTIONAL / UNGRADED)In modern deep learning frameworks, you only have to implement the forward pass, and the framework takes care of the backward pass, so most deep learning engineers don't need to bother with the details of the backward pass. The backward pass for convolutional networks is complicated. If you wish, you can work through this optional portion of the notebook to get a sense of what backprop in a convolutional network looks like. When in an earlier course you implemented a simple (fully connected) neural network, you used backpropagation to compute the derivatives with respect to the cost to update the parameters. Similarly, in convolutional neural networks you can calculate the derivatives with respect to the cost in order to update the parameters. The backprop equations are not trivial and we did not derive them in lecture, but we will briefly present them below. 5.1 - Convolutional layer backward pass Let's start by implementing the backward pass for a CONV layer. 5.1.1 - Computing dA:This is the formula for computing $dA$ with respect to the cost for a certain filter $W_c$ and a given training example:$$ dA += \sum _{h=0} ^{n_H} \sum_{w=0} ^{n_W} W_c \times dZ_{hw} \tag{1}$$Where $W_c$ is a filter and $dZ_{hw}$ is a scalar corresponding to the gradient of the cost with respect to the output of the conv layer Z at the hth row and wth column (corresponding to the dot product taken at the ith stride left and jth stride down). Note that at each time, we multiply the the same filter $W_c$ by a different dZ when updating dA. We do so mainly because when computing the forward propagation, each filter is dotted and summed by a different a_slice. Therefore when computing the backprop for dA, we are just adding the gradients of all the a_slices. In code, inside the appropriate for-loops, this formula translates into:```pythonda_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :] += W[:,:,:,c] * dZ[i, h, w, c]``` 5.1.2 - Computing dW:This is the formula for computing $dW_c$ ($dW_c$ is the derivative of one filter) with respect to the loss:$$ dW_c += \sum _{h=0} ^{n_H} \sum_{w=0} ^ {n_W} a_{slice} \times dZ_{hw} \tag{2}$$Where $a_{slice}$ corresponds to the slice which was used to generate the activation $Z_{ij}$. Hence, this ends up giving us the gradient for $W$ with respect to that slice. Since it is the same $W$, we will just add up all such gradients to get $dW$. In code, inside the appropriate for-loops, this formula translates into:```pythondW[:,:,:,c] += a_slice * dZ[i, h, w, c]``` 5.1.3 - Computing db:This is the formula for computing $db$ with respect to the cost for a certain filter $W_c$:$$ db = \sum_h \sum_w dZ_{hw} \tag{3}$$As you have previously seen in basic neural networks, db is computed by summing $dZ$. In this case, you are just summing over all the gradients of the conv output (Z) with respect to the cost. In code, inside the appropriate for-loops, this formula translates into:```pythondb[:,:,:,c] += dZ[i, h, w, c]```**Exercise**: Implement the `conv_backward` function below. You should sum over all the training examples, filters, heights, and widths. You should then compute the derivatives using formulas 1, 2 and 3 above.
###Code
def conv_backward(dZ, cache):
"""
Implement the backward propagation for a convolution function
Arguments:
dZ -- gradient of the cost with respect to the output of the conv layer (Z), numpy array of shape (m, n_H, n_W, n_C)
cache -- cache of values needed for the conv_backward(), output of conv_forward()
Returns:
dA_prev -- gradient of the cost with respect to the input of the conv layer (A_prev),
numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)
dW -- gradient of the cost with respect to the weights of the conv layer (W)
numpy array of shape (f, f, n_C_prev, n_C)
db -- gradient of the cost with respect to the biases of the conv layer (b)
numpy array of shape (1, 1, 1, n_C)
"""
### START CODE HERE ###
# Retrieve information from "cache"
(A_prev, W, b, hparameters) = cache
# Retrieve dimensions from A_prev's shape
(m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape
# Retrieve dimensions from W's shape
(f, f, n_C_prev, n_C) = W.shape
# Retrieve information from "hparameters"
stride = hparameters["stride"]
pad = hparameters["pad"]
# Retrieve dimensions from dZ's shape
(m, n_H, n_W, n_C) = dZ.shape
# Initialize dA_prev, dW, db with the correct shapes
dA_prev = np.zeros_like(A_prev)
dW = np.zeros_like(W)
db = np.zeros_like(b)
# Pad A_prev and dA_prev
A_prev_pad = zero_pad(A_prev, pad)
dA_prev_pad = zero_pad(dA_prev, pad)
for i in range(m): # loop over the training examples
# select ith training example from A_prev_pad and dA_prev_pad
a_prev_pad = A_prev_pad[i]
da_prev_pad = dA_prev_pad[i]
for h in range(n_H): # loop over vertical axis of the output volume
for w in range(n_W): # loop over horizontal axis of the output volume
for c in range(n_C): # loop over the channels of the output volume
# Find the corners of the current "slice"
vert_start = h
vert_end = h + f
horiz_start = w
horiz_end = w + f
# Use the corners to define the slice from a_prev_pad
a_slice = a_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :]
# Update gradients for the window and the filter's parameters using the code formulas given above
da_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :] += W[:,:,:,c] * dZ[i, h, w, c]
dW[:,:,:,c] += a_slice * dZ[i, h, w, c]
db[:,:,:,c] += dZ[i, h, w, c]
# Set the ith training example's dA_prev to the unpadded da_prev_pad (Hint: use X[pad:-pad, pad:-pad, :])
dA_prev[i, :, :, :] = da_prev_pad[pad:-pad, pad:-pad, :]
### END CODE HERE ###
# Making sure your output shape is correct
assert(dA_prev.shape == (m, n_H_prev, n_W_prev, n_C_prev))
return dA_prev, dW, db
# We'll run conv_forward to initialize the 'Z' and 'cache_conv",
# which we'll use to test the conv_backward function
np.random.seed(1)
A_prev = np.random.randn(10,4,4,3)
W = np.random.randn(2,2,3,8)
b = np.random.randn(1,1,1,8)
hparameters = {"pad" : 2,
"stride": 2}
Z, cache_conv = conv_forward(A_prev, W, b, hparameters)
# Test conv_backward
dA, dW, db = conv_backward(Z, cache_conv)
print("dA_mean =", np.mean(dA))
print("dW_mean =", np.mean(dW))
print("db_mean =", np.mean(db))
###Output
dA_mean = 0.634770447265
dW_mean = 1.55726574285
db_mean = 7.83923256462
###Markdown
** Expected Output: ** **dA_mean** 1.45243777754 **dW_mean** 1.72699145831 **db_mean** 7.83923256462 5.2 Pooling layer - backward passNext, let's implement the backward pass for the pooling layer, starting with the MAX-POOL layer. Even though a pooling layer has no parameters for backprop to update, you still need to backpropagation the gradient through the pooling layer in order to compute gradients for layers that came before the pooling layer. 5.2.1 Max pooling - backward pass Before jumping into the backpropagation of the pooling layer, you are going to build a helper function called `create_mask_from_window()` which does the following: $$ X = \begin{bmatrix}1 && 3 \\4 && 2\end{bmatrix} \quad \rightarrow \quad M =\begin{bmatrix}0 && 0 \\1 && 0\end{bmatrix}\tag{4}$$As you can see, this function creates a "mask" matrix which keeps track of where the maximum of the matrix is. True (1) indicates the position of the maximum in X, the other entries are False (0). You'll see later that the backward pass for average pooling will be similar to this but using a different mask. **Exercise**: Implement `create_mask_from_window()`. This function will be helpful for pooling backward. Hints:- [np.max()]() may be helpful. It computes the maximum of an array.- If you have a matrix X and a scalar x: `A = (X == x)` will return a matrix A of the same size as X such that:```A[i,j] = True if X[i,j] = xA[i,j] = False if X[i,j] != x```- Here, you don't need to consider cases where there are several maxima in a matrix.
###Code
def create_mask_from_window(x):
"""
Creates a mask from an input matrix x, to identify the max entry of x.
Arguments:
x -- Array of shape (f, f)
Returns:
mask -- Array of the same shape as window, contains a True at the position corresponding to the max entry of x.
"""
### START CODE HERE ### (≈1 line)
mask = (x == np.max(x))
### END CODE HERE ###
return mask
np.random.seed(1)
x = np.random.randn(2,3)
mask = create_mask_from_window(x)
print('x = ', x)
print("mask = ", mask)
###Output
x = [[ 1.62434536 -0.61175641 -0.52817175]
[-1.07296862 0.86540763 -2.3015387 ]]
mask = [[ True False False]
[False False False]]
###Markdown
**Expected Output:** **x =**[[ 1.62434536 -0.61175641 -0.52817175] [-1.07296862 0.86540763 -2.3015387 ]] **mask =**[[ True False False] [False False False]] Why do we keep track of the position of the max? It's because this is the input value that ultimately influenced the output, and therefore the cost. Backprop is computing gradients with respect to the cost, so anything that influences the ultimate cost should have a non-zero gradient. So, backprop will "propagate" the gradient back to this particular input value that had influenced the cost. 5.2.2 - Average pooling - backward pass In max pooling, for each input window, all the "influence" on the output came from a single input value--the max. In average pooling, every element of the input window has equal influence on the output. So to implement backprop, you will now implement a helper function that reflects this.For example if we did average pooling in the forward pass using a 2x2 filter, then the mask you'll use for the backward pass will look like: $$ dZ = 1 \quad \rightarrow \quad dZ =\begin{bmatrix}1/4 && 1/4 \\1/4 && 1/4\end{bmatrix}\tag{5}$$This implies that each position in the $dZ$ matrix contributes equally to output because in the forward pass, we took an average. **Exercise**: Implement the function below to equally distribute a value dz through a matrix of dimension shape. [Hint](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.ones.html)
###Code
def distribute_value(dz, shape):
"""
Distributes the input value in the matrix of dimension shape
Arguments:
dz -- input scalar
shape -- the shape (n_H, n_W) of the output matrix for which we want to distribute the value of dz
Returns:
a -- Array of size (n_H, n_W) for which we distributed the value of dz
"""
### START CODE HERE ###
# Retrieve dimensions from shape (≈1 line)
(n_H, n_W) = shape
# Compute the value to distribute on the matrix (≈1 line)
average = dz / (n_H * n_W)
# Create a matrix where every entry is the "average" value (≈1 line)
a = average * np.ones((n_H, n_W))
### END CODE HERE ###
return a
a = distribute_value(2, (2,2))
print('distributed value =', a)
###Output
distributed value = [[ 0.5 0.5]
[ 0.5 0.5]]
###Markdown
**Expected Output**: distributed_value =[[ 0.5 0.5] [ 0.5 0.5]] 5.2.3 Putting it together: Pooling backward You now have everything you need to compute backward propagation on a pooling layer.**Exercise**: Implement the `pool_backward` function in both modes (`"max"` and `"average"`). You will once again use 4 for-loops (iterating over training examples, height, width, and channels). You should use an `if/elif` statement to see if the mode is equal to `'max'` or `'average'`. If it is equal to 'average' you should use the `distribute_value()` function you implemented above to create a matrix of the same shape as `a_slice`. Otherwise, the mode is equal to '`max`', and you will create a mask with `create_mask_from_window()` and multiply it by the corresponding value of dA.
###Code
def pool_backward(dA, cache, mode = "max"):
"""
Implements the backward pass of the pooling layer
Arguments:
dA -- gradient of cost with respect to the output of the pooling layer, same shape as A
cache -- cache output from the forward pass of the pooling layer, contains the layer's input and hparameters
mode -- the pooling mode you would like to use, defined as a string ("max" or "average")
Returns:
dA_prev -- gradient of cost with respect to the input of the pooling layer, same shape as A_prev
"""
### START CODE HERE ###
# Retrieve information from cache (≈1 line)
(A_prev, hparameters) = cache
# Retrieve hyperparameters from "hparameters" (≈2 lines)
stride = hparameters["stride"]
f = hparameters["f"]
# Retrieve dimensions from A_prev's shape and dA's shape (≈2 lines)
m, n_H_prev, n_W_prev, n_C_prev = A_prev.shape
m, n_H, n_W, n_C = dA.shape
# Initialize dA_prev with zeros (≈1 line)
dA_prev = np.zeros_like(A_prev)
for i in range(m): # loop over the training examples
# select training example from A_prev (≈1 line)
a_prev = A_prev[i]
for h in range(n_H): # loop on the vertical axis
for w in range(n_W): # loop on the horizontal axis
for c in range(n_C): # loop over the channels (depth)
# Find the corners of the current "slice" (≈4 lines)
vert_start = h
vert_end = h + f
horiz_start = w
horiz_end = w + f
# Compute the backward propagation in both modes.
if mode == "max":
# Use the corners and "c" to define the current slice from a_prev (≈1 line)
a_prev_slice = a_prev[vert_start:vert_end, horiz_start:horiz_end, c]
# Create the mask from a_prev_slice (≈1 line)
mask = create_mask_from_window(a_prev_slice)
# Set dA_prev to be dA_prev + (the mask multiplied by the correct entry of dA) (≈1 line)
dA_prev[i, vert_start: vert_end, horiz_start: horiz_end, c] += mask * dA[i, h, w, c]
elif mode == "average":
# Get the value a from dA (≈1 line)
da = dA[i, h, w, c]
# Define the shape of the filter as fxf (≈1 line)
shape = (f, f)
# Distribute it to get the correct slice of dA_prev. i.e. Add the distributed value of da. (≈1 line)
dA_prev[i, vert_start: vert_end, horiz_start: horiz_end, c] += distribute_value(da, shape)
### END CODE ###
# Making sure your output shape is correct
assert(dA_prev.shape == A_prev.shape)
return dA_prev
np.random.seed(1)
A_prev = np.random.randn(5, 5, 3, 2)
hparameters = {"stride" : 1, "f": 2}
A, cache = pool_forward(A_prev, hparameters)
dA = np.random.randn(5, 4, 2, 2)
dA_prev = pool_backward(dA, cache, mode = "max")
print("mode = max")
print('mean of dA = ', np.mean(dA))
print('dA_prev[1,1] = ', dA_prev[1,1])
print()
dA_prev = pool_backward(dA, cache, mode = "average")
print("mode = average")
print('mean of dA = ', np.mean(dA))
print('dA_prev[1,1] = ', dA_prev[1,1])
###Output
mode = max
mean of dA = 0.145713902729
dA_prev[1,1] = [[ 0. 0. ]
[ 5.05844394 -1.68282702]
[ 0. 0. ]]
mode = average
mean of dA = 0.145713902729
dA_prev[1,1] = [[ 0.08485462 0.2787552 ]
[ 1.26461098 -0.25749373]
[ 1.17975636 -0.53624893]]
|
notebooks/Parameterizing the probability distribution of k-tours.ipynb | ###Markdown
TSP Trajectory generation using node placementsThis is Algorithm 4.7.2 from Rubinstein and Kroese, *The Cross-Entropy Method: A Unified Approach to Combinatorial Optimization, Monte-Carlo Simulation, and Machine Learning* (2004). Given $n$ points in the plane $\mathbb{R}^2$, we want to construct a TSP tour. Consider the point labels $[0,1,\dots,n-1]$. It suffices to construct the tour on these labels. The idea from the book is as follows: suppose we have a discrete-time Markov chain on the labels with a transition probability matrix $P$. Starting from state $0$, draw the next state according to the distribution $P_{0, :}$. But in order to make sure that we don't accidentally choose $0$ again (invalid as a TSP tour), we negate the column $0$ and renormalize the entire matrix $P$. Then we proceed by drawing the next state. In general, given that we are at state $j$, we negate the $j$ column and renormalize $P$. Then we draw the next state from $P_{j,:}$. After we have done this $n-1$ times, we are finished, because we then must return to state $0$. In pseudocode, it resembles:1. Define $P^{(0)} = P$. Let $j = 0$.2. Generate $X_j$ from the distribution formed by the $j$th row of $P^{(j)}$. Obtain the matrix $P^{(j+1)}$ from $P^{(j)}$ by first setting the $X_j$th column of $P^{(j)}$ to $0$ and then normalizing the rows to sum up to $1$.3. If $j = n-1$, stop. Otherwise, set $j = j + 1$ and repeat 2. RelevanceFor the Cross-Entropy method, we need to create a way to parameterize the class of probability distributions over the space of tours. While there is a natural way to do so when we assume at the outset that all tours are equally useful---and hence probable---for CE to work we need to be able to adjust the distribution so that it emphasizes the better-scoring tours. Introducing the transition probability matrix $P$ gives us exactly this feature. Now we can adjust the likelihood of drawing a given tour depending on how successful that particular tour shows itself to be during the simulation. Here is an implementation of the TSP trajectory generation technique:
###Code
def normalize_nonnegative_matrix(matrix, axis):
"""
Given a nonnegative matrix P and an axis (either 0 -- for normalizing along columns -- or 1 -- for normalizing along rows), normalize the matrix.
This is an inplace transformation: it modifies the original input matrix.
"""
matrix /= np.sum(matrix, axis=axis)[:,None]
def draw_from(distribution, size=1):
"""
Given a finite distribution [p0, p1, ... pn-1] (here pj is the probability of drawing j) such that sum(pj for pj in distribution) == 1,
draw a random variable X in [0, 1, ... n-1] which has the pmf of the distribution.
The way it works is as follows. First, the cumulative distribution function is computed. Then, a uniform random variate U ~ U(0,1) is drawn.
We find the largest j such that U < cdf(j), and return it.
"""
# The actual code involves some abuse of NumPy
# return np.argmax(1 - (np.cumsum(distribution) < np.random.rand()))
return np.random.choice(np.arange(len(distribution)), size=size, p=distribution)
def generate_trajectory(transition_matrix):
"""
Generate a trajectory on the points [0, 1, ..., n-1] in accordance with the transition matrix of a Markov chain on these points. This method follows
the algorithm in (Rubinstein and Kroese 2004, Algorithm 4.7.2)
"""
n = transition_matrix.shape[0]
matrix = transition_matrix.copy()
trajectory = [0]
for j in range(len(matrix)-1):
matrix[:,trajectory[j]] = 0.
normalize_nonnegative_matrix(matrix, 1)
trajectory.append(np.asscalar(draw_from(matrix[trajectory[j],:].flatten())))
return trajectory
# Demonstration
n = 12
P = np.ones((n,n)) #np.random.rand(n,n) # P[i,j] ~ Unif(0,1)
normalize_nonnegative_matrix(P, axis=1)
generate_trajectory(P)
###Output
_____no_output_____
###Markdown
$k$-Drone Trajectory generation using node placementsThe challenge for us is to generalize this model for $k$-drone tours. Again, for CE to work well, we need to be able to adjust the distribution over the space of $k$-drone tours so that as the simulation proceeds, better $k$-drone tours are given higher preference. We would like a similar parameterization to the TSP ($1$-drone tour) case above, but that incorporates the added flexibility of multiple tours.Here's what I propose to be the new algorithm. Given input of a transition probability matrix $P$ *and* a probabilty distribution on $[0, 1, \dots, n-1]$ denoted $[p_0, \dots, p_{k-1}]$, we proceed with the trajectory genreation but with a modification. First, we select $k$ starting positions according to the probabilitiy distribution on the labels. Next, before we determine the next step in tours, we roll a $k$-sided die. Whichever drone $i$ the die lands on, we perform Step (2) from the original algorithm. Then we proceed as before.1. Define $P^{(0)} = P$. Let $j = 0$. Set $X_{0,0}, \dots, X_{k-1,0}$ as a simple random sample according to the distribution $[p_0, \dots, p_{k-1}]$.2. Roll a $k$-sided die. Let $i$ be the result.3. Generate $X_{i,-1}$ from the distribution formed by the $i$th row of $P^{(j)}$. Obtain the matrix $P^{(j+1)}$ from $P^{(j)}$ by first setting the $X_{i,-1}$th column of $P^{(j)}$ to $0$ and then normalizing the rows to sum up to $1$.4. If $j = n-1$, stop. Otherwise, set $j = j + 1$ and repeat 2.
###Code
def generate_k_trajectory(transition_matrix, start_dist, k):
"""
Generates a k trajectory according to a specified transition probability matrix for a discrete-time Markov chain, a probability distribution over the starting sites, and a specified number of drones.
It works more or less as before in the 1-drone example, but now we have control over how many drones are in play as well as their initial locations.
"""
n = transition_matrix.shape[0]
matrix = transition_matrix.copy()
starts = draw_from(start_dist, size=k)
trajectories = {"Drone {0:02d}".format(i): [start] for (i, start) in enumerate(starts)}
for (i, start) in enumerate(starts):
matrix[:, start] = 0.
normalize_nonnegative_matrix(matrix, 1)
for j in range(len(matrix)-k):
i = "Drone {0:02d}".format(np.random.randint(0, k))
trajectories[i].append(np.asscalar(draw_from(matrix[trajectories[i][-1], :].flatten())))
if j < len(matrix) - k-1:
matrix[:, trajectories[i][-1]] = 0.
normalize_nonnegative_matrix(matrix, 1)
return trajectories
# Demonstration
n = 12
P = np.random.rand(n, n)
starts = np.random.rand(n)
starts /= np.sum(starts)
normalize_nonnegative_matrix(P, 1)
generate_k_trajectory(P, starts, 4)
###Output
_____no_output_____ |
Autocorrelation_Simulation.ipynb | ###Markdown
Autocorrelation Simulation Experiment Econometrics 2 Alen Rožac, 19121848 ([email protected])
###Code
import numpy as np
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
**Static regression** - data generating process (DGP):$$ y_t = \alpha + \beta x_t + u_t $$Compare the case where $u_t$ is white noise and the case where:$$ u_t = \rho u_{t-1} + v_t $$Parameters of DGP: $\alpha=1, \beta=1, \rho= 0.5$*****AR(1) model** - DGP$$ y_t = \beta y_{t-1} + u_t $$Parameters of DGP: $\beta=0.5, \rho=0.5$
###Code
# Generate noise
def noise(T=1000, mean=0, std=0.5):
return (np.random.normal(loc=mean, scale=std, size=T))
# Generate random from normal distribution
def normal(T=1000, mean=1, std=2):
return (np.random.normal(loc=mean, scale=std, size=T))
# Calculate slope (b)
from scipy.stats import linregress
def slope(y, x):
slope, intercept,_,_,_ = linregress(x, y)
return(slope)
# Calculate no constant slope(b)
import statsmodels.api as sm
def slope2(y, x):
result = sm.OLS(y, x).fit()
return(float(result.params))
# Functions to return regression variables. DGP returns y and x (or y_t-1 in ac case)
# Static DGP - White Noise
def StaticDGP_wn(alpha=1, beta=1):
x = normal()
u_wn = noise()
y_wn = alpha + beta*x + u_wn
return(y_wn, x)
# Static DGP - Residual Autocorrelation
def StaticDGP_ac(alpha=1, beta=1, rho=0.5, T=1000):
x = normal()
ut_ac = np.zeros(T)
for t in range(1,T):
ut_ac[t] = rho * ut_ac[t-1] + noise(1)
y_ac = alpha + beta*x + ut_ac
return(y_ac, x)
# Dynamic DGP - White Noise
def DynamicDGP_wn(beta=0.5, T=1000):
yt_wn = np.ones(T)
for t in range(1,T):
yt_wn[t] = beta * yt_wn[t-1] + noise(1)
return(yt_wn, np.roll(yt_wn, -1))
# Dynamic DGP - Residual Autocorrelation
def DynamicDGP_ac(beta=0.5, rho=0.5, T=1000):
ut_ac, yt_ac = np.zeros(T), np.ones(T)
for t in range(1,T):
ut_ac[t] = rho * ut_ac[t-1] + noise(1)
yt_ac[t] = beta * yt_ac[-1] + ut_ac[t]
return(yt_ac, np.roll(yt_ac, -1))
###Output
_____no_output_____
###Markdown
Simulation 1: Static Regression
###Code
simulations = 5000
b_wn, b_ac = [],[]
for n in range(simulations):
y_wn, x_wn = StaticDGP_wn()
y_ac, x_ac = StaticDGP_ac()
b_wn.append(slope(y_wn, x_wn))
b_ac.append(slope(y_ac, x_ac))
# Plot Histograms
plt.hist(b_wn, bins=40, alpha=0.5, color="b", label="White noise")
plt.hist(b_ac, bins=40, alpha=0.5, color="r", label="Resid. Autocorr.")
plt.title("Static Regression - True beta=1")
plt.legend();
###Output
_____no_output_____
###Markdown
Simulation 2: AR(1) Model
###Code
simulations = 5000
bar_wn, bar_ac = [],[]
for n in range(simulations):
yt_wn, yt1_wn = DynamicDGP_wn()
yt_ac, yt1_ac = DynamicDGP_ac()
bar_wn.append(slope2(yt_wn, yt1_wn))
bar_ac.append(slope2(yt_ac, yt1_ac))
# Plot Histograms:
plt.hist(bar_wn, bins=30, alpha=0.5, color="b", label="White noise")
plt.hist(bar_ac, bins=30, alpha=0.5, color="r", label="Resid. Autocorr.")
plt.title("AR(1) Model - True beta=0.5")
plt.legend();
###Output
_____no_output_____ |
Stacking Classifier.ipynb | ###Markdown
Stacking Classifier SVM Model
###Code
Cs = [0.001, 0.01, 0.1, 1, 10]
gammas = [0.001, 0.01, 0.1, 1]
param_grid = {'C': Cs, 'gamma' : gammas}
grid_search_svm = GridSearchCV(SVC(kernel='rbf'), param_grid, cv=10)
grid_search_svm.fit(X_tr, y_train)
best_param=grid_search_svm.best_params_
print("Best Hyperparameter: ",best_param)
from sklearn.metrics import roc_curve, auc
SVM_model= SVC(kernel='rbf',C=best_param['C'],gamma=best_param['gamma'],probability=True)
#DT = DecisionTreeClassifier(max_depth=50,min_samples_split=5)
###Output
_____no_output_____
###Markdown
LR Model
###Code
param_grid = {'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000] }
classifier = GridSearchCV(LogisticRegression(), param_grid,cv=10,scoring='roc_auc',return_train_score=True)
classifier.fit(X_tr, y_train)
best_param=classifier.best_params_
print("Best Hyperparameter: ",best_param)
p_C=best_param['C']
from sklearn.metrics import roc_curve, auc
Log_model = LogisticRegression(C=p_C)
###Output
C:\Users\ARAVIND NACHIAPPAN\Anaconda3\lib\site-packages\sklearn\linear_model\_logistic.py:940: ConvergenceWarning: lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
Increase the number of iterations (max_iter) or scale the data as shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
Please also refer to the documentation for alternative solver options:
https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression
extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG)
C:\Users\ARAVIND NACHIAPPAN\Anaconda3\lib\site-packages\sklearn\linear_model\_logistic.py:940: ConvergenceWarning: lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
Increase the number of iterations (max_iter) or scale the data as shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
Please also refer to the documentation for alternative solver options:
https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression
extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG)
C:\Users\ARAVIND NACHIAPPAN\Anaconda3\lib\site-packages\sklearn\linear_model\_logistic.py:940: ConvergenceWarning: lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
Increase the number of iterations (max_iter) or scale the data as shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
Please also refer to the documentation for alternative solver options:
https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression
extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG)
C:\Users\ARAVIND NACHIAPPAN\Anaconda3\lib\site-packages\sklearn\linear_model\_logistic.py:940: ConvergenceWarning: lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
Increase the number of iterations (max_iter) or scale the data as shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
Please also refer to the documentation for alternative solver options:
https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression
extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG)
C:\Users\ARAVIND NACHIAPPAN\Anaconda3\lib\site-packages\sklearn\linear_model\_logistic.py:940: ConvergenceWarning: lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
Increase the number of iterations (max_iter) or scale the data as shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
Please also refer to the documentation for alternative solver options:
https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression
extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG)
C:\Users\ARAVIND NACHIAPPAN\Anaconda3\lib\site-packages\sklearn\linear_model\_logistic.py:940: ConvergenceWarning: lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
Increase the number of iterations (max_iter) or scale the data as shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
Please also refer to the documentation for alternative solver options:
https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression
extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG)
C:\Users\ARAVIND NACHIAPPAN\Anaconda3\lib\site-packages\sklearn\linear_model\_logistic.py:940: ConvergenceWarning: lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
Increase the number of iterations (max_iter) or scale the data as shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
Please also refer to the documentation for alternative solver options:
https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression
extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG)
C:\Users\ARAVIND NACHIAPPAN\Anaconda3\lib\site-packages\sklearn\linear_model\_logistic.py:940: ConvergenceWarning: lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
Increase the number of iterations (max_iter) or scale the data as shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
Please also refer to the documentation for alternative solver options:
https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression
extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG)
C:\Users\ARAVIND NACHIAPPAN\Anaconda3\lib\site-packages\sklearn\linear_model\_logistic.py:940: ConvergenceWarning: lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
Increase the number of iterations (max_iter) or scale the data as shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
Please also refer to the documentation for alternative solver options:
https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression
extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG)
C:\Users\ARAVIND NACHIAPPAN\Anaconda3\lib\site-packages\sklearn\linear_model\_logistic.py:940: ConvergenceWarning: lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
Increase the number of iterations (max_iter) or scale the data as shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
Please also refer to the documentation for alternative solver options:
https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression
extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG)
C:\Users\ARAVIND NACHIAPPAN\Anaconda3\lib\site-packages\sklearn\linear_model\_logistic.py:940: ConvergenceWarning: lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
Increase the number of iterations (max_iter) or scale the data as shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
Please also refer to the documentation for alternative solver options:
https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression
extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG)
C:\Users\ARAVIND NACHIAPPAN\Anaconda3\lib\site-packages\sklearn\linear_model\_logistic.py:940: ConvergenceWarning: lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
Increase the number of iterations (max_iter) or scale the data as shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
Please also refer to the documentation for alternative solver options:
https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression
extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG)
C:\Users\ARAVIND NACHIAPPAN\Anaconda3\lib\site-packages\sklearn\linear_model\_logistic.py:940: ConvergenceWarning: lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
Increase the number of iterations (max_iter) or scale the data as shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
Please also refer to the documentation for alternative solver options:
https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression
extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG)
C:\Users\ARAVIND NACHIAPPAN\Anaconda3\lib\site-packages\sklearn\linear_model\_logistic.py:940: ConvergenceWarning: lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
Increase the number of iterations (max_iter) or scale the data as shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
Please also refer to the documentation for alternative solver options:
https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression
extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG)
C:\Users\ARAVIND NACHIAPPAN\Anaconda3\lib\site-packages\sklearn\linear_model\_logistic.py:940: ConvergenceWarning: lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
Increase the number of iterations (max_iter) or scale the data as shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
Please also refer to the documentation for alternative solver options:
https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression
extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG)
C:\Users\ARAVIND NACHIAPPAN\Anaconda3\lib\site-packages\sklearn\linear_model\_logistic.py:940: ConvergenceWarning: lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
Increase the number of iterations (max_iter) or scale the data as shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
Please also refer to the documentation for alternative solver options:
https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression
extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG)
###Markdown
Decision Tree Model
###Code
min_sample_leaf_val=[1,2,3,4,5,6,7,8,9,10]
criterion_val=['entropy','gini']
max_depth=[1,2,3,4,5,6,7,8,9,10]
min_samples_split=[10,100,150,200,250]
param_grid = {'max_depth':max_depth,'criterion':criterion_val,'min_samples_leaf':min_sample_leaf_val,'min_samples_split':min_samples_split}
DT_model=DecisionTreeClassifier()
clf = GridSearchCV(estimator=DT_model, param_grid=param_grid, cv=10)
clf.fit(X_tr,y_train)
best_param=clf.best_params_
print("Best Hyperparameter: ",best_param)
max_depth_DT=best_param['max_depth']
min_samples_split_DT=best_param['min_samples_split']
min_samples_leaf_DT=best_param['min_samples_leaf']
criterion_DT=best_param['criterion']
from sklearn.metrics import roc_curve, auc
DT_model= DecisionTreeClassifier(max_depth=max_depth_DT,min_samples_leaf=min_samples_leaf_DT,criterion=criterion_DT,min_samples_split=min_samples_split_DT)
#DT = DecisionTreeClassifier(max_depth=50,min_samples_split=5)
###Output
_____no_output_____
###Markdown
RF Model
###Code
n_estimator_val = [100,150,300,500,1000]
n_sample_leaf_val = [1,2,3,4,5,6]
max_feature_val=["auto","sqrt",None,0.9]
param_grid = {'n_estimators': n_estimator_val, 'min_samples_leaf' : n_sample_leaf_val,'max_features':max_feature_val}
RF_model=RandomForestClassifier()
grid_search_RF = GridSearchCV(estimator = RF_model,param_grid=param_grid, cv=10,scoring='roc_auc',return_train_score=True)
grid_search_RF.fit(X_tr, y_train)
best_param=grid_search_RF.best_params_
print("Best Hyperparameter: ",best_param)
from sklearn.metrics import roc_curve, auc
RF_model= RandomForestClassifier(n_estimators=best_param['n_estimators'],min_samples_leaf=best_param['min_samples_leaf'],max_features=best_param['max_features'])
#DT = DecisionTreeClassifier(max_depth=50,min_samples_split=5)
###Output
_____no_output_____
###Markdown
NB Model
###Code
from sklearn.naive_bayes import MultinomialNB
NB = MultinomialNB()
param_grid = {'alpha': [0.00001,0.0005, 0.0001,0.005,0.001,0.05,0.01,0.1,0.5,1,5,10,50,100],'class_prior': [None,[0.5,0.5], [0.1,0.9],[0.2,0.8]]}
clf = GridSearchCV(NB, param_grid=param_grid, cv=10, scoring='roc_auc',return_train_score=True)
clf.fit(X_tr, y_train)
results = pd.DataFrame.from_dict(clf.cv_results_)
results = results.sort_values(['param_alpha'])
train_auc= results['mean_train_score']
train_auc_std= results['std_train_score']
cv_auc = results['mean_test_score']
cv_auc_std= results['std_test_score']
A = results['param_alpha']
plt.plot(A, train_auc, label='Train AUC')
# this code is copied from here: https://stackoverflow.com/a/48803361/4084039
# plt.gca().fill_between(K, train_auc - train_auc_std,train_auc + train_auc_std,alpha=0.2,color='darkblue')
plt.plot(A, cv_auc, label='CV AUC')
# this code is copied from here: https://stackoverflow.com/a/48803361/4084039
# plt.gca().fill_between(K, cv_auc - cv_auc_std,cv_auc + cv_auc_std,alpha=0.2,color='darkorange')
plt.scatter(A, train_auc, label='Train AUC points')
plt.scatter(A, cv_auc, label='CV AUC points')
plt.xscale('log')
plt.legend()
plt.xlabel("Alpha: hyperparameter")
plt.ylabel("AUC")
plt.title("Hyper parameter Vs AUC plot")
plt.grid()
plt.show()
best_param=clf.best_params_
print("Best Hyperparameter: ",best_param)
from sklearn.metrics import roc_curve, auc
NB = MultinomialNB(alpha=best_param['alpha'],class_prior=best_param['class_prior'])
###Output
_____no_output_____
###Markdown
KNN Model
###Code
n_neighbors_val=[5,10,20,30,40,50]
KNN_model = KNeighborsClassifier()
param_grid={'n_neighbors':n_neighbors_val}
clf=GridSearchCV(estimator=KNN_model,param_grid=param_grid,cv=10,scoring='roc_auc',return_train_score=True)
clf.fit(X_tr,y_train)
results = pd.DataFrame.from_dict(clf.cv_results_)
results = results.sort_values(['param_n_neighbors'])
train_auc= results['mean_train_score']
train_auc_std= results['std_train_score']
cv_auc = results['mean_test_score']
cv_auc_std= results['std_test_score']
A = results['param_n_neighbors']
plt.plot(A, train_auc, label='Train AUC')
# this code is copied from here: https://stackoverflow.com/a/48803361/4084039
# plt.gca().fill_between(K, train_auc - train_auc_std,train_auc + train_auc_std,alpha=0.2,color='darkblue')
plt.plot(A, cv_auc, label='CV AUC')
# this code is copied from here: https://stackoverflow.com/a/48803361/4084039
# plt.gca().fill_between(K, cv_auc - cv_auc_std,cv_auc + cv_auc_std,alpha=0.2,color='darkorange')
plt.scatter(A, train_auc, label='Train AUC points')
plt.scatter(A, cv_auc, label='CV AUC points')
plt.xscale('log')
plt.legend()
plt.xlabel("Neighbor: hyperparameter")
plt.ylabel("AUC")
plt.title("Hyper parameter Vs AUC plot")
plt.grid()
plt.show()
best_param=clf.best_params_
print("Best Hyperparameter: ",best_param)
Neighbor=best_param['n_neighbors']
from sklearn.metrics import roc_curve, auc
Knn = KNeighborsClassifier(n_neighbors=best_param['n_neighbors'])
###Output
_____no_output_____
###Markdown
Stacking models
###Code
from mlxtend.classifier import StackingClassifier
import copy
sclf = StackingClassifier(classifiers=[Knn,NB,SVM_model,DT_model,RF_model], meta_classifier=Log_model)
sclf.fit(X_tr,y_train)
y_train_pred = sclf.predict_proba(X_tr)
y_test_pred = sclf.predict_proba(X_te)
train_fpr, train_tpr, tr_thresholds = roc_curve(y_train, y_train_pred[:,1])
test_fpr, test_tpr, te_thresholds = roc_curve(y_test, y_test_pred[:,1])
plt.plot(train_fpr, train_tpr, label="train AUC ="+str(auc(train_fpr, train_tpr)))
plt.plot(test_fpr, test_tpr, label="test AUC ="+str(auc(test_fpr, test_tpr)))
plt.legend()
plt.xlabel("FPR")
plt.ylabel("TPR")
plt.title("AUC ROC Curve")
plt.grid()
plt.show()
y_test_predict=sclf.predict(X_te)
print("Recall for Stacked model:",metrics.recall_score(y_test,y_test_predict))
print("Precision for Stacked model:",metrics.precision_score(y_test,y_test_predict))
print("Accuracy for Stacked model:",metrics.accuracy_score(y_test,y_test_predict))
print("F-score for Stacked model:",metrics.f1_score(y_test,y_test_predict))
print("Log-loss for Stacked model:",metrics.log_loss(y_test,y_test_predict))
###Output
Recall for Stacked model: 0.0
Precision for Stacked model: 0.0
Accuracy for Stacked model: 0.6896551724137931
F-score for Stacked model: 0.0
Log-loss for Stacked model: 10.718930605317109
|
Score Prediction/Supervised_machine_learning.ipynb | ###Markdown
Score-Predictor : Supervised Machine Learning Task Predict the percentage of marks that a student is expected to score based upon the number of hours they studied. Approach Using a simple linear regression task as it involves just two variables ( marks and hours ) Datset used is available [here](https://raw.githubusercontent.com/AdiPersonalWorks/Random/master/student_scores%20-%20student_scores.csv) Data Preparation
###Code
### import all required libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
## import the data
url = "http://bit.ly/w-data"
df = pd.read_csv(url)
print('Data read successfully!!')
## display the first 5 rows of dataframe
df.head()
###Output
_____no_output_____
###Markdown
Exploring the dataset
###Code
## display columns
df.columns
###Output
_____no_output_____
###Markdown
* There are only two columns , hours and scores
###Code
## check for null values
df.isnull().sum()
###Output
_____no_output_____
###Markdown
* There are no null values in both the columns
###Code
## print the shape of dataset
df.shape
###Output
_____no_output_____
###Markdown
* It's very small dataset
###Code
## check for datatype
df.dtypes
df.describe()
###Output
_____no_output_____
###Markdown
Graph
###Code
## plot the graph
df.plot(x = 'Hours', y = 'Scores', style = 'x')
plt.title('Hours of study v/s score obtained')
plt.xlabel('Hours')
plt.ylabel('Score')
plt.show()
###Output
_____no_output_____
###Markdown
Splitting of data
###Code
X = df.iloc[:,:-1].values
y = df.iloc[:,1].values
print(X)
print(y)
#Importing train_test_split function from sklearn package
from sklearn.model_selection import train_test_split
#Splitting dataset into training and testing
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.2, random_state=0)
print(X_train)
print(X_test)
print(y_train)
print(y_test)
###Output
[[3.8]
[1.9]
[7.8]
[6.9]
[1.1]
[5.1]
[7.7]
[3.3]
[8.3]
[9.2]
[6.1]
[3.5]
[2.7]
[5.5]
[2.7]
[8.5]
[2.5]
[4.8]
[8.9]
[4.5]]
[[1.5]
[3.2]
[7.4]
[2.5]
[5.9]]
[35 24 86 76 17 47 85 42 81 88 67 30 25 60 30 75 21 54 95 41]
[20 27 69 30 62]
###Markdown
Train the model
###Code
from sklearn.linear_model import LinearRegression
#Building linear regression function
regr = LinearRegression()
#Training the linear regression model
regr.fit(X_train,y_train)
print("Training model is completed")
###Output
Training model is completed
###Markdown
Make Predictions
###Code
#Making predictions for testing data
print(X_test)
y_pred = regr.predict(X_test)
print(y_pred)
###Output
[[1.5]
[3.2]
[7.4]
[2.5]
[5.9]]
[16.88414476 33.73226078 75.357018 26.79480124 60.49103328]
###Markdown
Compare actual and prediction values
###Code
comp = pd.DataFrame({'Actual': y_test, 'Predicted': y_pred})
comp
# simple line graph
comp.plot();
###Output
_____no_output_____
###Markdown
Train v/s Test data
###Code
plt.scatter(x = X_train, y = y_train)
plt.plot(X_test, y_pred, color = 'red')
plt.title('Train v/s Test')
plt.xlabel('Hours studied')
plt.ylabel('Score obtained')
plt.show()
###Output
_____no_output_____
###Markdown
Model Evaluation
###Code
#Model evaluation using metrics function
from sklearn import metrics
print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))
print('Mean Squared Error:',metrics.mean_squared_error(y_test,y_pred))
print('R-Squared Score:', metrics.r2_score(y_test,y_pred))
print("Training accuracy: {} %".format(regr.score(X_train,y_train)*100))
print("Testing accuracy : {} %".format(regr.score(X_test,y_test)*100))
###Output
Mean Absolute Error: 4.183859899002975
Mean Squared Error: 21.598769307217406
R-Squared Score: 0.9454906892105355
Training accuracy: 95.15510725211553 %
Testing accuracy : 94.54906892105355 %
###Markdown
Question1 : What will be predicted score if a student study for 9.25 hrs in a day?
###Code
hours = 9.25
new_pred = regr.predict([[hours]])
print("No of Hours studied = {}".format(hours))
print("Predicted Score = {}".format(int(round(new_pred[0]))))
###Output
No of Hours studied = 9.25
Predicted Score = 94
###Markdown
Question2 : What will be predicted score if a student study for 5 hrs in a day?
###Code
hours = 5.0
new_pred = regr.predict([[hours]])
print("No of Hours studied = {}".format(hours))
print("Predicted Score = {}".format(int(round(new_pred[0]))))
# save the model
import pickle
filename = 'student_score.h5'
pickle.dump(regr,open(filename, 'wb'))
# load the model from disk
loaded_model = pickle.load(open(filename, 'rb'))
hour = 10
print('Prediction : ', int(round(loaded_model.predict([[hour]])[0])))
###Output
Prediction : 101
|
CropCNN_Demo.ipynb | ###Markdown
EE244: Final Project Demo
###Code
# Import Libaries
import torch
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.data import Subset
import torchvision
from torchvision import datasets
from torchvision.transforms import ToTensor, Lambda, Compose
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from torch.optim import lr_scheduler
import time
import os
import copy
from torch.utils.data.sampler import WeightedRandomSampler
import seaborn as sn
import pandas as pd
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import confusion_matrix
###Output
_____no_output_____
###Markdown
Load the Model
###Code
# Connect to Google Drive
from google.colab import drive
drive.mount('/content/gdrive')
# Load the Best Saved Model
path_to_saved_model = '/content/gdrive/MyDrive/EE244/output/'
model_trained = torch.load(path_to_saved_model+"model-best.pth")
model_trained.eval() # Convert to evaluation only, this is faster
# Label Names
labels = ["Crop","Weed1","Weed2","Weed3","Weed4","Weed5","Weed6","Weed7","Weed8","Weed9"]
###Output
Drive already mounted at /content/gdrive; to attempt to forcibly remount, call drive.mount("/content/gdrive", force_remount=True).
###Markdown
Load Sample Data
###Code
# Path to the Demo Data
path_to_data = '/content/gdrive/MyDrive/EE244/data/Demo'
# Set up the same image transforms
transform = transforms.Compose(
[
transforms.Resize([224,224]),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)) #MEAN & STDDEV for ResNet
])
# Check GPU or CPU
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Load the Images from the Image Folder
dataset = datasets.ImageFolder(root=path_to_data, transform=transform)
# Build a Data Loader
dataloader = DataLoader(dataset,batch_size=1, num_workers=2,shuffle=True)
###Output
_____no_output_____
###Markdown
Demo the Model
###Code
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean # Add back the mean and std for visualization
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
def runDemo(model,dataloader,label_names,show_img=True,verbose=True):
"""Calculates the Confusion Matrix for the given dataloader"""
# Fill out the confusion Matrix
y_label = np.array([])
y_predict = np.array([])
with torch.no_grad():
for i, (inputs, classes) in enumerate(dataloader):
inputs = inputs.to(device)
classes = classes.to(device)
outputs = model(inputs)
# Gets the predicted label
_, preds = torch.max(outputs, 1)
temp_labels = classes.view(1,-1).cpu().detach().numpy()
temp_preds = preds.view(1,-1).cpu().detach().numpy()
if verbose:
print("Labels:",temp_labels)
print("Predictions:",temp_preds)
y_label = np.hstack([y_label,temp_labels]) if y_label.size else temp_labels
y_predict = np.hstack([y_predict,temp_preds]) if y_predict.size else temp_preds
if show_img:
name_actual = label_names[int(temp_labels.flatten())]
name_predict = label_names[int(temp_preds.flatten())]
if verbose:
print(name_actual)
print(name_predict)
title = "Predicted: " + name_predict + " | Actual: " + name_actual
out = torchvision.utils.make_grid(inputs.cpu().detach())
imshow(out,title)
runDemo(model_trained,dataloader,labels,True,False)
###Output
_____no_output_____ |
manuscript/scrna/scrna.ipynb | ###Markdown
###Code
!git clone https://github.com/KrishnaswamyLab/MAGIC
!git clone https://github.com/zsteve/wtf
!pip install magic-impute
!pip install tensorly
!pip install scanpy
!pip install pykeops[colab]
!git clone https://github.com/ComputationalSystemsBiology/ot-scOmics
import magic
import scprep
import copy
import numpy as np
import pandas as pd
import scanpy as sc
import matplotlib.pyplot as plt
PLT_CELL = 2.5
X = pd.read_csv('/content/ot-scOmics/data/liu_scrna_preprocessed.csv.gz', index_col=0)
X_orig = copy.copy(X)
X[np.random.uniform(size = X.shape) < 0.9] = 0
clusters = np.array([col.split('_')[-1] for col in X.columns])
clusters_id = np.unique(clusters, return_inverse = True)[1]
import umap
reducer = umap.UMAP()
embedding = reducer.fit_transform(X.T)
plt.scatter(embedding[:, 0], embedding[:, 1], c = clusters_id)
X_norm = np.array(X/X.sum(0))
import torch
import tensorly as tl
from tensorly import tenalg, decomposition, cp_tensor
from tensorly.contrib.sparse import tensor as sptensor
tl.set_backend("pytorch")
torch.set_default_tensor_type(torch.DoubleTensor)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
tl_dtype = tl.float64
import sys
sys.path.insert(0, "/content/wtf/src")
import wtf
genenames = X.index.values
x = tl.tensor(np.array(X), dtype = tl_dtype).to(device)
expressed_tot = x.sum(1)
thresh = np.quantile(expressed_tot.cpu(), 0.25)
expressed_idx = (x.sum(1) > thresh).cpu()
# expressed_idx = expressed_idx | ((genenames == 'VIM') | (genenames == 'CDH1') | (genenames == 'ZEB1'))
import scipy
from scipy import stats
plt.scatter(*scipy.stats.probplot(expressed_tot.cpu())[0])
expressed_idx = expressed_idx.bool()
x.shape
import pykeops
from pykeops.torch import LazyTensor
x = x[expressed_idx, :]
x = x/x.sum(0)
gene_means = x.mean(1).reshape(-1, 1)
gene_dev = torch.sqrt(((x - gene_means)**2).sum(1)).reshape(-1, 1)
x_std = (x - gene_means)/gene_dev
x_i = LazyTensor(x_std.view(1, x.shape[0], x.shape[1]))
x_j = LazyTensor(x_std.view(x.shape[0], 1, x.shape[1]))
C = (1 - (x_std @ x_std.T) + 1e-9).sqrt()
# C = 1 - (x_i * x_j).sum(2)
import sklearn
from sklearn import decomposition
import copy
r_nmf = [10, ]*2
S_nmf = tl.zeros(r_nmf).to(device)
for i in range(r_nmf[0]):
S_nmf[i, i] = 1
X0_nmf = (x.T).to(device)
nmf_model = sklearn.decomposition.NMF(n_components = r_nmf[0], init = "nndsvd", max_iter = 1)
U_nmf = torch.Tensor(nmf_model.fit_transform(X0_nmf.cpu()))
V_nmf = torch.Tensor(nmf_model.components_)
U_nmf = (U_nmf.T/U_nmf.sum(1)).T
V_nmf = (V_nmf.T/V_nmf.sum(1)).T
A_nmf = copy.deepcopy([U_nmf, V_nmf.T])
A_nmf = [a.to(device) for a in A_nmf]
params_nmf = {"n_iter" : 10}
params_nmf['lr'] = np.ones(params_nmf['n_iter'])*1
params_nmf['lamda'] = np.array([np.ones(2), ]*params_nmf['n_iter'])*10
params_nmf['optim_modes'] = [0, ]
params_nmf['rho'] = np.array([np.ones(2), ]*params_nmf['n_iter'])*0.01
params_nmf['eps'] = np.array([np.ones(2), ]*params_nmf['n_iter'])*0.05
import importlib
importlib.reload(wtf)
max_iter, print_inter, check_iter, unbal = (100, 10, 10, False)
tol = 1e-2
mode = "lbfgs"
for i in range(params_nmf['n_iter']):
print("Block iteration ", i)
print("Mode 0")
m0 = wtf.FactorsModel(X0_nmf, 0, [C, ], S_nmf, A_nmf, params_nmf['rho'][i, :], params_nmf['eps'][i, :], params_nmf['lamda'][i, :],
ot_mode = "slice", U_init = None, device = device, unbal = unbal, norm = "row")
wtf.solve(m0, lr = params_nmf['lr'][i], mode = mode, max_iter = max_iter, print_inter = print_inter, check_iter = check_iter, tol = tol)
A_nmf[0] = m0.compute_primal_variable().detach()
print("Mode 1")
m1 = wtf.FactorsModel(X0_nmf, 1, [C, ], S_nmf, A_nmf, params_nmf['rho'][i, :], params_nmf['eps'][i, :], params_nmf['lamda'][i, :],
ot_mode = "slice", U_init = None, device = device, unbal = unbal, norm = "col")
wtf.solve(m1, lr = params_nmf['lr'][i], mode = mode, max_iter = max_iter, print_inter = print_inter, check_iter = check_iter, tol = tol)
A_nmf[1] = m1.compute_primal_variable().detach()
X_hat_nmf = tl.tenalg.multi_mode_dot(S_nmf, A_nmf ).cpu()
nmf_model = sklearn.decomposition.NMF(n_components = r_nmf[0], init = "nndsvd")
U_nmf = torch.Tensor(nmf_model.fit_transform(X0_nmf.cpu()))
V_nmf = torch.Tensor(nmf_model.components_)
X_nmf = U_nmf @ V_nmf
plt.subplot(1, 2, 1)
plt.imshow(A_nmf[0].cpu(), interpolation = "nearest")
plt.axis("auto")
plt.subplot(1, 2, 2)
plt.imshow(A_nmf[1].cpu(), interpolation = "nearest", vmax = 1e-3)
plt.axis("auto")
plt.subplot(1, 2, 1)
plt.imshow(U_nmf, interpolation = "nearest")
plt.axis("auto")
plt.subplot(1, 2, 2)
plt.imshow(V_nmf.T, interpolation = "nearest")
plt.axis("auto")
plt.imshow(X_hat_nmf, interpolation = 'nearest', vmax = 0.005)
plt.axis("auto")
plt.imshow(x.cpu().T, interpolation = 'nearest', vmax = 0.005)
plt.axis('auto')
pca = sklearn.decomposition.PCA(n_components = 2)
X_pca = pca.fit_transform(X)
plt.scatter(X_pca[:, 0], X_pca[:, 1], c = clusters_id)
embedding = reducer.fit_transform(A_nmf[0].cpu())
plt.scatter(embedding[:, 0], embedding[:, 1], c = clusters_id)
plt.title("WNMF")
embedding = reducer.fit_transform(U_nmf)
plt.scatter(embedding[:, 0], embedding[:, 1], c = clusters_id)
plt.title("NMF")
embedding_orig = embed_umap.fit_transform(X_orig)
embedding = embed_umap.fit_transform(X_norm.T)
embedding_wnmf = embed_umap.fit_transform(X_hat_nmf)
embedding_nmf = embed_umap.fit_transform(X_nmf)
import sklearn.manifold
embed_umap = umap.UMAP()
plt.figure(figsize = (4*PLT_CELL, 1*PLT_CELL))
plt.subplot(1, 4, 1)
plt.scatter(embedding_orig[:, 0], embedding_orig[:, 1], c = clusters_id, alpha = 0.5, s = 16)
plt.title("Original")
plt.subplot(1, 4, 2)
plt.scatter(embedding[:, 0], embedding[:, 1], c = clusters_id, alpha = 0.5, s = 16)
plt.title("Noisy")
plt.subplot(1, 4, 3)
plt.scatter(embedding_wnmf[:, 0], embedding_wnmf[:, 1], c = clusters_id, alpha = 0.5, s = 16)
plt.title("WNMF")
plt.subplot(1, 4, 4)
plt.scatter(embedding_nmf[:, 0], embedding_nmf[:, 1], c = clusters_id, alpha = 0.5, s = 16)
plt.title("NMF")
plt.tight_layout()
plt.savefig("liu_scrna_noisy0.9.pdf")
import sklearn.manifold
embed_spec = sklearn.manifold.SpectralEmbedding()
plt.figure(figsize = (10, 2.5))
plt.subplot(1, 4, 1)
embedding = embed_spec.fit_transform(X_orig)
plt.scatter(embedding[:, 0], embedding[:, 1], c = clusters_id, alpha = 0.5)
plt.title("Original")
plt.subplot(1, 4, 2)
embedding = embed_spec.fit_transform(X)
plt.scatter(embedding[:, 0], embedding[:, 1], c = clusters_id, alpha = 0.5)
plt.title("Noisy")
plt.subplot(1, 4, 3)
embedding = embed_spec.fit_transform(X_hat_nmf)
plt.scatter(embedding[:, 0], embedding[:, 1], c = clusters_id, alpha = 0.5)
plt.title("WNMF")
plt.subplot(1, 4, 4)
embedding = embed_spec.fit_transform(X_nmf)
plt.scatter(embedding[:, 0], embedding[:, 1], c = clusters_id, alpha = 0.5)
plt.title("NMF")
plt.figure(figsize = (15, 5))
plt.subplot(1, 3, 1)
plt.scatter(X_nmf[:, genenames[expressed_idx] == 'VIM'], X_nmf[:, genenames[expressed_idx] == 'CDH1'],
c = X_nmf[:, genenames[expressed_idx] == 'ZEB1'])
plt.xlabel('vim')
plt.ylabel('cdh1')
plt.subplot(1, 3, 2)
plt.scatter(X_hat_nmf[:, genenames[expressed_idx] == 'VIM'], X_hat_nmf[:, genenames[expressed_idx] == 'CDH1'],
c = X_hat_nmf[:, genenames[expressed_idx] == 'ZEB1'])
plt.xlabel('vim')
plt.ylabel('cdh1')
plt.subplot(1, 3, 3)
plt.scatter(X0_nmf[:, genenames[expressed_idx] == 'VIM'].cpu(), X0_nmf[:, genenames[expressed_idx] == 'CDH1'].cpu(),
c = X0_nmf[:, genenames[expressed_idx] == 'ZEB1'].cpu())
plt.xlabel('vim')
plt.ylabel('cdh1')
###Output
_____no_output_____ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.