path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
class4/model_save.ipynb | ###Markdown
###Code
import tensorflow as tf
import os
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=['sparse_categorical_accuracy'])
checkpoint_save_path = "./checkpoint/mnist.ckpt"
if os.path.exists(checkpoint_save_path + '.index'):
print('-------------load the model-----------------')
model.load_weights(checkpoint_save_path)
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_save_path,
save_weights_only=True,
save_best_only=True)
history = model.fit(x_train, y_train, batch_size=32, epochs=5, validation_data=(x_test, y_test), validation_freq=1,
callbacks=[cp_callback])
model.summary()
###Output
_____no_output_____ |
Big-Data-Clusters/CU6/Public/content/install/sop054-install-azdata.ipynb | ###Markdown
SOP054 - Install azdata CLI (using pip)=======================================Steps----- Common functionsDefine helper functions used in this notebook.
###Code
# Define `run` function for transient fault handling, hyperlinked suggestions, and scrolling updates on Windows
import sys
import os
import re
import json
import platform
import shlex
import shutil
import datetime
from subprocess import Popen, PIPE
from IPython.display import Markdown
retry_hints = {} # Output in stderr known to be transient, therefore automatically retry
error_hints = {} # Output in stderr where a known SOP/TSG exists which will be HINTed for further help
install_hint = {} # The SOP to help install the executable if it cannot be found
first_run = True
rules = None
debug_logging = False
def run(cmd, return_output=False, no_output=False, retry_count=0, base64_decode=False, return_as_json=False):
"""Run shell command, stream stdout, print stderr and optionally return output
NOTES:
1. Commands that need this kind of ' quoting on Windows e.g.:
kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='data-pool')].metadata.name}
Need to actually pass in as '"':
kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='"'data-pool'"')].metadata.name}
The ' quote approach, although correct when pasting into Windows cmd, will hang at the line:
`iter(p.stdout.readline, b'')`
The shlex.split call does the right thing for each platform, just use the '"' pattern for a '
"""
MAX_RETRIES = 5
output = ""
retry = False
global first_run
global rules
if first_run:
first_run = False
rules = load_rules()
# When running `azdata sql query` on Windows, replace any \n in """ strings, with " ", otherwise we see:
#
# ('HY090', '[HY090] [Microsoft][ODBC Driver Manager] Invalid string or buffer length (0) (SQLExecDirectW)')
#
if platform.system() == "Windows" and cmd.startswith("azdata sql query"):
cmd = cmd.replace("\n", " ")
# shlex.split is required on bash and for Windows paths with spaces
#
cmd_actual = shlex.split(cmd)
# Store this (i.e. kubectl, python etc.) to support binary context aware error_hints and retries
#
user_provided_exe_name = cmd_actual[0].lower()
# When running python, use the python in the ADS sandbox ({sys.executable})
#
if cmd.startswith("python "):
cmd_actual[0] = cmd_actual[0].replace("python", sys.executable)
# On Mac, when ADS is not launched from terminal, LC_ALL may not be set, which causes pip installs to fail
# with:
#
# UnicodeDecodeError: 'ascii' codec can't decode byte 0xc5 in position 4969: ordinal not in range(128)
#
# Setting it to a default value of "en_US.UTF-8" enables pip install to complete
#
if platform.system() == "Darwin" and "LC_ALL" not in os.environ:
os.environ["LC_ALL"] = "en_US.UTF-8"
# When running `kubectl`, if AZDATA_OPENSHIFT is set, use `oc`
#
if cmd.startswith("kubectl ") and "AZDATA_OPENSHIFT" in os.environ:
cmd_actual[0] = cmd_actual[0].replace("kubectl", "oc")
# To aid supportability, determine which binary file will actually be executed on the machine
#
which_binary = None
# Special case for CURL on Windows. The version of CURL in Windows System32 does not work to
# get JWT tokens, it returns "(56) Failure when receiving data from the peer". If another instance
# of CURL exists on the machine use that one. (Unfortunately the curl.exe in System32 is almost
# always the first curl.exe in the path, and it can't be uninstalled from System32, so here we
# look for the 2nd installation of CURL in the path)
if platform.system() == "Windows" and cmd.startswith("curl "):
path = os.getenv('PATH')
for p in path.split(os.path.pathsep):
p = os.path.join(p, "curl.exe")
if os.path.exists(p) and os.access(p, os.X_OK):
if p.lower().find("system32") == -1:
cmd_actual[0] = p
which_binary = p
break
# Find the path based location (shutil.which) of the executable that will be run (and display it to aid supportability), this
# seems to be required for .msi installs of azdata.cmd/az.cmd. (otherwise Popen returns FileNotFound)
#
# NOTE: Bash needs cmd to be the list of the space separated values hence shlex.split.
#
if which_binary == None:
which_binary = shutil.which(cmd_actual[0])
# Display an install HINT, so the user can click on a SOP to install the missing binary
#
if which_binary == None:
if user_provided_exe_name in install_hint and install_hint[user_provided_exe_name] is not None:
display(Markdown(f'HINT: Use [{install_hint[user_provided_exe_name][0]}]({install_hint[user_provided_exe_name][1]}) to resolve this issue.'))
raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)")
else:
cmd_actual[0] = which_binary
start_time = datetime.datetime.now().replace(microsecond=0)
print(f"START: {cmd} @ {start_time} ({datetime.datetime.utcnow().replace(microsecond=0)} UTC)")
print(f" using: {which_binary} ({platform.system()} {platform.release()} on {platform.machine()})")
print(f" cwd: {os.getcwd()}")
# Command-line tools such as CURL and AZDATA HDFS commands output
# scrolling progress bars, which causes Jupyter to hang forever, to
# workaround this, use no_output=True
#
# Work around a infinite hang when a notebook generates a non-zero return code, break out, and do not wait
#
wait = True
try:
if no_output:
p = Popen(cmd_actual)
else:
p = Popen(cmd_actual, stdout=PIPE, stderr=PIPE, bufsize=1)
with p.stdout:
for line in iter(p.stdout.readline, b''):
line = line.decode()
if return_output:
output = output + line
else:
if cmd.startswith("azdata notebook run"): # Hyperlink the .ipynb file
regex = re.compile(' "(.*)"\: "(.*)"')
match = regex.match(line)
if match:
if match.group(1).find("HTML") != -1:
display(Markdown(f' - "{match.group(1)}": "{match.group(2)}"'))
else:
display(Markdown(f' - "{match.group(1)}": "[{match.group(2)}]({match.group(2)})"'))
wait = False
break # otherwise infinite hang, have not worked out why yet.
else:
print(line, end='')
if rules is not None:
apply_expert_rules(line)
if wait:
p.wait()
except FileNotFoundError as e:
if install_hint is not None:
display(Markdown(f'HINT: Use {install_hint} to resolve this issue.'))
raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") from e
exit_code_workaround = 0 # WORKAROUND: azdata hangs on exception from notebook on p.wait()
if not no_output:
for line in iter(p.stderr.readline, b''):
try:
line_decoded = line.decode()
except UnicodeDecodeError:
# NOTE: Sometimes we get characters back that cannot be decoded(), e.g.
#
# \xa0
#
# For example see this in the response from `az group create`:
#
# ERROR: Get Token request returned http error: 400 and server
# response: {"error":"invalid_grant",# "error_description":"AADSTS700082:
# The refresh token has expired due to inactivity.\xa0The token was
# issued on 2018-10-25T23:35:11.9832872Z
#
# which generates the exception:
#
# UnicodeDecodeError: 'utf-8' codec can't decode byte 0xa0 in position 179: invalid start byte
#
print("WARNING: Unable to decode stderr line, printing raw bytes:")
print(line)
line_decoded = ""
pass
else:
# azdata emits a single empty line to stderr when doing an hdfs cp, don't
# print this empty "ERR:" as it confuses.
#
if line_decoded == "":
continue
print(f"STDERR: {line_decoded}", end='')
if line_decoded.startswith("An exception has occurred") or line_decoded.startswith("ERROR: An error occurred while executing the following cell"):
exit_code_workaround = 1
# inject HINTs to next TSG/SOP based on output in stderr
#
if user_provided_exe_name in error_hints:
for error_hint in error_hints[user_provided_exe_name]:
if line_decoded.find(error_hint[0]) != -1:
display(Markdown(f'HINT: Use [{error_hint[1]}]({error_hint[2]}) to resolve this issue.'))
# apply expert rules (to run follow-on notebooks), based on output
#
if rules is not None:
apply_expert_rules(line_decoded)
# Verify if a transient error, if so automatically retry (recursive)
#
if user_provided_exe_name in retry_hints:
for retry_hint in retry_hints[user_provided_exe_name]:
if line_decoded.find(retry_hint) != -1:
if retry_count < MAX_RETRIES:
print(f"RETRY: {retry_count} (due to: {retry_hint})")
retry_count = retry_count + 1
output = run(cmd, return_output=return_output, retry_count=retry_count)
if return_output:
if base64_decode:
import base64
return base64.b64decode(output).decode('utf-8')
else:
return output
elapsed = datetime.datetime.now().replace(microsecond=0) - start_time
# WORKAROUND: We avoid infinite hang above in the `azdata notebook run` failure case, by inferring success (from stdout output), so
# don't wait here, if success known above
#
if wait:
if p.returncode != 0:
raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(p.returncode)}.\n')
else:
if exit_code_workaround !=0 :
raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(exit_code_workaround)}.\n')
print(f'\nSUCCESS: {elapsed}s elapsed.\n')
if return_output:
if base64_decode:
import base64
return base64.b64decode(output).decode('utf-8')
else:
return output
def load_json(filename):
"""Load a json file from disk and return the contents"""
with open(filename, encoding="utf8") as json_file:
return json.load(json_file)
def load_rules():
"""Load any 'expert rules' from the metadata of this notebook (.ipynb) that should be applied to the stderr of the running executable"""
# Load this notebook as json to get access to the expert rules in the notebook metadata.
#
try:
j = load_json("sop054-install-azdata.ipynb")
except:
pass # If the user has renamed the book, we can't load ourself. NOTE: Is there a way in Jupyter, to know your own filename?
else:
if "metadata" in j and \
"azdata" in j["metadata"] and \
"expert" in j["metadata"]["azdata"] and \
"expanded_rules" in j["metadata"]["azdata"]["expert"]:
rules = j["metadata"]["azdata"]["expert"]["expanded_rules"]
rules.sort() # Sort rules, so they run in priority order (the [0] element). Lowest value first.
# print (f"EXPERT: There are {len(rules)} rules to evaluate.")
return rules
def apply_expert_rules(line):
"""Determine if the stderr line passed in, matches the regular expressions for any of the 'expert rules', if so
inject a 'HINT' to the follow-on SOP/TSG to run"""
global rules
for rule in rules:
notebook = rule[1]
cell_type = rule[2]
output_type = rule[3] # i.e. stream or error
output_type_name = rule[4] # i.e. ename or name
output_type_value = rule[5] # i.e. SystemExit or stdout
details_name = rule[6] # i.e. evalue or text
expression = rule[7].replace("\\*", "*") # Something escaped *, and put a \ in front of it!
if debug_logging:
print(f"EXPERT: If rule '{expression}' satisfied', run '{notebook}'.")
if re.match(expression, line, re.DOTALL):
if debug_logging:
print("EXPERT: MATCH: name = value: '{0}' = '{1}' matched expression '{2}', therefore HINT '{4}'".format(output_type_name, output_type_value, expression, notebook))
match_found = True
display(Markdown(f'HINT: Use [{notebook}]({notebook}) to resolve this issue.'))
print('Common functions defined successfully.')
# Hints for binary (transient fault) retry, (known) error and install guide
#
retry_hints = {'python': [], 'azdata': ['Endpoint sql-server-master does not exist', 'Endpoint livy does not exist', 'Failed to get state for cluster', 'Endpoint webhdfs does not exist', 'Adaptive Server is unavailable or does not exist', 'Error: Address already in use', 'Login timeout expired (0) (SQLDriverConnect)']}
error_hints = {'python': [['Library not loaded: /usr/local/opt/unixodbc', 'SOP012 - Install unixodbc for Mac', '../install/sop012-brew-install-odbc-for-sql-server.ipynb'], ['WARNING: You are using pip version', 'SOP040 - Upgrade pip in ADS Python sandbox', '../install/sop040-upgrade-pip.ipynb']], 'azdata': [['The token is expired', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Reason: Unauthorized', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Max retries exceeded with url: /api/v1/bdc/endpoints', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Look at the controller logs for more details', 'TSG027 - Observe cluster deployment', '../diagnose/tsg027-observe-bdc-create.ipynb'], ['provided port is already allocated', 'TSG062 - Get tail of all previous container logs for pods in BDC namespace', '../log-files/tsg062-tail-bdc-previous-container-logs.ipynb'], ['Create cluster failed since the existing namespace', 'SOP061 - Delete a big data cluster', '../install/sop061-delete-bdc.ipynb'], ['Failed to complete kube config setup', 'TSG067 - Failed to complete kube config setup', '../repair/tsg067-failed-to-complete-kube-config-setup.ipynb'], ['Error processing command: "ApiError', 'TSG110 - Azdata returns ApiError', '../repair/tsg110-azdata-returns-apierror.ipynb'], ['Error processing command: "ControllerError', 'TSG036 - Controller logs', '../log-analyzers/tsg036-get-controller-logs.ipynb'], ['ERROR: 500', 'TSG046 - Knox gateway logs', '../log-analyzers/tsg046-get-knox-logs.ipynb'], ['Data source name not found and no default driver specified', 'SOP069 - Install ODBC for SQL Server', '../install/sop069-install-odbc-driver-for-sql-server.ipynb'], ["Can't open lib 'ODBC Driver 17 for SQL Server", 'SOP069 - Install ODBC for SQL Server', '../install/sop069-install-odbc-driver-for-sql-server.ipynb'], ['Control plane upgrade failed. Failed to upgrade controller.', 'TSG108 - View the controller upgrade config map', '../diagnose/tsg108-controller-failed-to-upgrade.ipynb'], ["[Errno 2] No such file or directory: '..\\\\", 'TSG053 - ADS Provided Books must be saved before use', '../repair/tsg053-save-book-first.ipynb'], ["NameError: name 'azdata_login_secret_name' is not defined", 'SOP013 - Create secret for azdata login (inside cluster)', '../common/sop013-create-secret-for-azdata-login.ipynb'], ['ERROR: No credentials were supplied, or the credentials were unavailable or inaccessible.', "TSG124 - 'No credentials were supplied' error from azdata login", '../repair/tsg124-no-credentials-were-supplied.ipynb']]}
install_hint = {'python': [], 'azdata': ['SOP063 - Install azdata CLI (using package manager)', '../install/sop063-packman-install-azdata.ipynb']}
###Output
_____no_output_____
###Markdown
Install azdata CLI
###Code
run(f'python --version')
run(f'python -m pip install -r https://aka.ms/azdata')
###Output
_____no_output_____
###Markdown
Display azdata version
###Code
run("azdata --version")
###Output
_____no_output_____
###Markdown
Related (SOP063, SOP054)
###Code
print('Notebook execution complete.')
###Output
_____no_output_____ |
notebooks/in_progress/part_3_1_tf_idf_random_forest.ipynb | ###Markdown
Detecting and Classifying Toxic Comments Part 3-1: TF*IDF & Random Forest ClassifiersIt may be possible to employ sequential binary models in order to get better results with rarer cases.If we first classify Toxic and Not Toxic, we could further process only the Toxic results against models that had been trained only to recognise sub-classes of toxic models. Python Library Imports
###Code
import pandas as pd
import numpy as np
###Output
_____no_output_____
###Markdown
Import spaCy
###Code
import spacy
from spacy.lang.en import English
spacy_stopwords = spacy.lang.en.stop_words.STOP_WORDS
from spacy.tokens import Doc
# import custom trained spaCy model
nlp = spacy.load("../models/spacy_2/")
###Output
_____no_output_____
###Markdown
Import nltk
###Code
# nltk imports
import nltk
from nltk.corpus import stopwords
###Output
_____no_output_____
###Markdown
Import Custom Functions
###Code
import sys
# add src folder to path
sys.path.insert(1, '../src')
# from text_prep import tidy_series, uppercase_proportion_column
from spacy_helper import doc_check
###Output
_____no_output_____
###Markdown
Getting info from preserved spaCy docsI've had a little difficulty with getting the doc properties to un-pickle and maintain the ability to further process them later. docs seem to depend on some vocab properties of the model that are not saved within the doc itself.
###Code
%%time
'''
CPU times: user 3min 44s, sys: 33.5 s, total: 4min 17s
Wall time: 4min 32s
'''
X_train = pd.read_pickle('../data/basic_df_split/X_train_2-1.pkl')
# load y_train
! ls ../data/basic_df_split/
y_train = pd.read_pickle('../data/basic_df_split/basic_y_train.pkl')
X_test = pd.read_pickle('../data/basic_df_split/basic_X_test.pkl')
y_test = pd.read_pickle('../data/basic_df_split/basic_y_test.pkl')
X_train.columns
###Output
_____no_output_____
###Markdown
Create list of lemmas, less nltk stopwords
###Code
stopw_set = set(stopwords.words('english'))
%%time
# remove lemmas that appear in nltk stopword list
X_train['lemmas_less'] = X_train['lemmas'].apply(lambda row: [lemma for lemma in row if lemma not in stopw_set])
###Output
CPU times: user 1.48 s, sys: 993 ms, total: 2.47 s
Wall time: 3.03 s
###Markdown
Further reduce lemmas by min_length & max_lengthExploration of corpus vocabulary suggests that lemmas of 2 or fewer characters are likely not very useful and can be removed to reduce features. Lemmas of longer than 20 characters are often run-on words (where spaces have been omitted). Although a few of them have words hidden within them that may be considered toxic, the rarity and non-standard format make them unlikely to be generalizable.
###Code
%%time
min_l = 3
max_l = 20
X_train['lemmas_less'] = X_train['lemmas'].apply(lambda row: [lemma for lemma in row if len(lemma) >= min_l or len(lemma) <= max_l])
###Output
CPU times: user 663 ms, sys: 77.1 ms, total: 740 ms
Wall time: 817 ms
###Markdown
TF*IDF Scikit Learn Imports
###Code
from sklearn.feature_extraction.text import TfidfVectorizer
%%time
tfidf_sklearn = TfidfVectorizer(ngram_range = (1,3),
min_df = 2)
# return sparse matrix
# join list into individual strings
tfidf_values = tfidf_sklearn.fit_transform(X_train['lemmas_less'].apply(lambda x: " ".join(x)))
'''
last values (with min_df as 1):
<106912x4089577 sparse matrix of type '<class 'numpy.float64'>'
with 8002628 stored elements in Compressed Sparse Row format>
'''
tfidf_values
%%time
'''
CPU times: user 11.9 s, sys: 8.27 s, total: 20.2 s
Wall time: 23 s
'''
lemmas_less_tfidf = pd.DataFrame(tfidf_values.toarray(),
columns=tfidf_sklearn.get_feature_names())
lemmas_less_tfidf.shape
y_train['toxic'].shape
# sum(lemmas_less_tfidf['jerk'])
# search_term = 'jerk'
# # running this portion will crash the kernel
# bool_mask = lemmas_less_tfidf.sort_values(search_term, ascending=False)[search_term][:10]
# bool_mask
###Output
_____no_output_____
###Markdown
Toxic: Random Forest ClassifierResources:- [Explanation of Warm Start for RFC (not what you may think)](https://stackoverflow.com/questions/42757892/how-to-use-warm-start/42763502) - [TfidfVectorizer Docs](https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.htmlsklearn.feature_extraction.text.TfidfVectorizer)
###Code
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
toxic_rfc = RandomForestClassifier(n_estimators=100,
max_depth = 10,
oob_score=True,
n_jobs=-1,
random_state=42,
warm_start=True)
X = lemmas_less_tfidf
y = y_train
toxic_logistic = LogisticRegression(warm_start=True,
random_state=42,
verbose=True,
solver='sag',
multi_class='ovr',
max_iter=100,
n_jobs=-1)
y.columns
%%time
toxic_logistic.fit(X[:10],y['obscene'][:10])
def batch_train(X, y, model, batch_size=1000, verbose=False, start=0):
remaining = len(X)
if batch_size > remaining:
bach_size = remaining
b = start
e = batch_size
while e <= remaining:
model.fit(X[b:e], y[b:e])
#
# naive_toxic = BernoulliNB
# print(vect_X_train.shape)
# print(y_train.shape)
# print(X_train.shape)
# vect_X_train.iloc[0]
# naive_toxic.fit(vect_X_train, y_train)
%load_ext autoreload
%autoreload 2
import sys
# add src folder to path
sys.path.insert(1, '../tokens')
# from text_prep import tidy_series, uppercase_proportion_column
from twitter_tok import twitter_api_key, api_secret_key, bearer_token, eml, password
import requests
# import sys
# class ListStream:
# def __init__(self):
# self.data = []
# def write(self, s):
# self.data.append(s)
# sys.stdout = x = ListStream()
# for i in range(2):
# print ('i = ', i)
# sys.stdout = sys.__stdout__
# print(x.data)
###Output
_____no_output_____
###Markdown
https://github.com/twitterdev/Twitter-API-v2-sample-code/blob/master/Sampled-Stream/sampled-stream.pyhttps://developer.twitter.com/en/docs/twitter-api/tweets/sampled-stream/introduction
###Code
%load_ext autoreload
%autoreload 2
sys.path.insert(1, '../tokens')
from twitter_stream import *
import requests
import os
import json
import os
import tweepy as tw
import pandas as pd
###Output
_____no_output_____
###Markdown
Had little luck with get old tweetshttps://pypi.org/project/GetOldTweets3/
###Code
import twitter_rules
# twitter_rules.main()
list_ = list()
twitter_rules.main(text_list=list_, total_tweets=10)
print(len(list_))
list_
###Output
10
|
Web_Crawling_Project10_Celebrity_Collection.ipynb | ###Markdown
3 ~ 17번까지 한국 연예인 연예인 이름 목록 목록 크롤링
###Code
driver = webdriver.Chrome()
name_list_url = 'https://namu.wiki/w/%EC%97%B0%EC%98%88%EC%9D%B8/%EB%B3%B8%EB%AA%85%20%EB%B0%8F%20%EC%98%88%EB%AA%85'
driver.get(name_list_url)
def preprocess(name):
name = re.sub('\(.+\)', '', name)
name = name.strip()
return name
name_list = []
error_list = []
for i in range(3,18):
class_ = driver.find_elements_by_class_name('wiki-heading-content')[i]
raw_name_list = class_.find_elements_by_css_selector('ul.wiki-list li')
for raw_name in raw_name_list:
split_name = raw_name.text.split('→')
stage_name = preprocess(split_name[0])
try:
real_name = re.search('[가-힣]+', split_name[1])[0].strip()
except Exception as e:
error_list.append(raw_name.text)
continue
new_name = stage_name + ' ' + real_name
name_list.append(new_name)
print(f'{len(name_list)} persons returned')
print(f'{len(error_list)} persons got error')
print(error_list)
###Output
1602 persons returned
3 persons got error
['동호(前 유키스) - 신동호☆', '마크(NCT) → Mark Lee☆', '유정(라붐) → → 김유정☆']
###Markdown
전처리오타로 인해 다른 이름과 다르게 표시가 된 이름들 수정
###Code
stage_name = preprocess(error_list[0].split('-')[0])
real_name = re.search('[가-힣]+', error_list[0].split('-')[1])[0]
print(stage_name, real_name)
name_list.append(stage_name + ' ' + real_name)
stage_name = preprocess(error_list[1].split('→')[0])
real_name = re.search('[가-힣A-Za-z ]+', error_list[1].split('→')[1])[0].strip()
print(stage_name, real_name)
name_list.append(stage_name + ' ' + real_name)
stage_name = preprocess(error_list[2].split('→')[0])
real_name = re.search('[가-힣A-Za-z ]+', error_list[2].split('→')[-1])[0].strip()
print(stage_name, real_name)
name_list.append(stage_name + ' ' + real_name)
###Output
동호 신동호
마크 Mark Lee
유정 김유정
###Markdown
목록 이름 저장
###Code
name_list = name_list[6:]
with open('celebrity/celebrity_name.pkl', 'wb') as f:
pickle.dump(name_list, f)
with open('celebrity/celebrity_name.pkl', 'rb') as f:
celebrity_name_list = pickle.load(f)
len(celebrity_name_list)
###Output
_____no_output_____
###Markdown
얼굴 크롤링 간 조건다양한 얼굴 중 임베딩에 가장 적합한 이미지를 크롤링하기위해 조건 설정- 얼굴 탐지 및 Embedding이 가능한 이미지- 썬글라스 착용 시 제외- 마스크 착용 시 제외- 얼굴 각도가 좌,우로 돌아간 경우 제외- 이미지 크기가 340x340보다 작으면 제외 - 제외되는 경우가 너무 많아서 크기 수정 썬글라스 탐지눈 주위의 픽셀값을 이용해 썬글라스 착용 여부 탐지- 왼쪽 : Landmark 36 ~ 41- 오른쪽 : Landmakr 42 ~ 47- luminance를 이용해 밝기 측정
###Code
example = fr.load_image_file('celebrity/GRAY 이성화.jpg')
example_show = example.copy()
landmarks = fr.face_landmarks(example)
landmarks = []
for k, v in fr.face_landmarks(example)[0].items():
landmarks.extend(v)
for number, landmark in enumerate(landmarks):
cv2.circle(example_show, landmark, 2, (0, 255, 255), -1)
plt.imshow(example_show)
def get_brightness_around_eye(image):
try:
landmarks = []
for k, v in fr.face_landmarks(image)[0].items():
landmarks.extend(v)
left1, right1 = landmarks[36][0], landmarks[39][0]
top1, bottom1 = landmarks[37][1], landmarks[41][1]
left2, right2 = landmarks[42][0], landmarks[45][0]
top2, bottom2 = landmarks[43][1], landmarks[47][1]
image[top1:bottom1, left1:right1] = np.nan
image[top2:bottom2, left2:right2] = np.nan
left_glass = image[top1-10:bottom1+10, left1-5:right1+5]
right_glass = image[top2-10:bottom2+10, left2-5:right2+5]
luminance_left = np.nanmean(0.2126*left_glass[:,:,0] + 0.7152*left_glass[:,:,1] + 0.0722*left_glass[:,:,2])
luminance_right = np.nanmean(0.2126*right_glass[:,:,0] + 0.7152*right_glass[:,:,1] + 0.0722*right_glass[:,:,2])
#luminance_left2 = np.nanmean(left_glass[:,:,0] + left_glass[:,:,1] + left_glass[:,:,2])
#luminance_right2 = np.nanmean(right_glass[:,:,0] + right_glass[:,:,1] + right_glass[:,:,2])
return luminance_left, luminance_right
except:
return None
###Output
_____no_output_____
###Markdown
마스크 착용 탐지다양한 얼굴 중 마스크 착용 시 제외- 입 주변의 픽셀값을 이용해 마스크 착용 여부 탐지- Lanmark 2, 6, 11, 14 사용
###Code
# 랜드마크 2, 6, 11, 14
def get_brightness_around_mouse(image):
landmarks = []
for k, v in fr.face_landmarks(image)[0].items():
landmarks.extend(v)
left1, right1 = landmarks[6][0], landmarks[11][0]
top1, bottom1 = landmarks[2][1], landmarks[6][1]
mask = image[top1:bottom1, left1:right1]
mask_luminance = np.nanmean(0.2126*mask[:,:,0] + 0.7152*mask[:,:,1] + 0.0722*mask[:,:,2])
return mask_luminance
###Output
_____no_output_____
###Markdown
얼굴 각도 측정얼굴이 좌,우로 돌아간 정도를 측정해 정면이 아닐 시 제외- 왼쪽과 오른쪽 볼의 길이차를 이용해 회전 정도 측정- Landmark 2, 14, 30 사용
###Code
def ratio_of_face_rotate(image):
landmarks = []
for k, v in fr.face_landmarks(image)[0].items():
landmarks.extend(v)
left = np.linalg.norm(np.array(landmarks[30]) - np.array(landmarks[2]))
right = np.linalg.norm(np.array(landmarks[30]) - np.array(landmarks[14]))
ratio = min(left, right) / max(left, right)
return ratio
###Output
_____no_output_____
###Markdown
이미지 사이즈 조건행 : 404.16열 : 340.0 이미지 크롤링크롤링 간 사용하는 유용함 함수는 따로 정의 이미지 저장사이트로부터 얻은 이미지의 URL을 이용해 이미지 다운로드
###Code
def save_image_from_url(image_url):
with urlopen(image_url.get_attribute('src')) as f:
with open(f'celebrity/{name}.jpg', 'wb') as file_name:
img = f.read()
file_name.write(img)
return img, file_name
###Output
_____no_output_____
###Markdown
이미지 다운로드
###Code
delay=2
error_name = []
driver = webdriver.Chrome()
for name in name_list:
driver.get(f'https://search.naver.com/search.naver?where=image&sm=tab_jum&query={name}')
time.sleep(0.2)
try:
myElem = WebDriverWait(driver, delay).until(EC.presence_of_element_located((By.TAG_NAME, 'section')))
driver.find_element_by_tag_name('html').send_keys(Keys.END)
image_url_list = driver.find_elements_by_css_selector('div.tile_item._item img._image._listImage')[:50]
is_succeed = 0
for image_url in image_url_list:
img, file_name = save_image_from_url(image_url)
error_message = ''
try:
image = fr.load_image_file(file_name.name)
if (image.shape[0] < 200) or (image.shape[1] < 200):
print(name, 'Too Small')
continue
locations = fr.face_locations(image)
if len(locations) != 1:
print(name, 'No Face')
continue
top, right, bottom, left = locations[0]
face_cropped = image[top:bottom, left:right]
if (face_cropped.shape[0] < 30) or (face_cropped.shape[1] < 30):
print(name, 'Cropped Too Small')
continue
face_embedding = fr.face_encodings(face_cropped)
if len(face_embedding) != 1:
print(name, 'No Embedding')
continue
ratio = ratio_of_face_rotate(image)
if ratio < 0.85:
print(name, 'Rotated')
continue
left_eye, right_eye = get_brightness_around_eye(image)
if (left_eye < 60) & (right_eye < 60):
print(name, 'Maybe Sunglasses')
continue
mask = get_brightness_around_mouse(image)
if mask > 220:
print(name, 'Maybe Mask?')
is_succeed = 1
print(name, 'Succeed')
break
except Exception as ex:
print(name, 'failed detection', ex)
continue
if is_succeed == 0:
os.remove(file_name.name)
error_name.append(name)
print(name, 'collecting Failed')
except Exception as ex:
print(name, ex)
error_name.append(name)
###Output
_____no_output_____ |
05_Custom_Data_Image/04_Images_With_Labels_In_A_CSV_File/01_Images_With_Labels_In_a_csv_file.ipynb | ###Markdown
Loading images with labels in a csv file.In this notebook we will be able to load images with their respective labels from a `csv` file. All images will be mixed in the same folder. The structures of folders will look as follows:```pythonmnist- - 0_1.png - 0_2.png ... train.csv```So we will load the images of this nature as follows.
###Code
import numpy as np
import tensorflow as tf
from tensorflow import keras
import os
import pandas as pd
root_path = "mnist/"
df = pd.read_csv(os.path.join(root_path, "train.csv"))
df.head()
file_names = df["file_name"].values
labels = df["label"].values
###Output
_____no_output_____
###Markdown
> Reading the `data`.
###Code
ds_train = tf.data.Dataset.from_tensor_slices((file_names, labels))
ds_train
###Output
_____no_output_____
###Markdown
> Define the function that will read the ``image``.
###Code
def read_image(path, label):
image = tf.io.read_file(root_path + path)
image = tf.image.decode_image(image, channels=1, dtype=tf.float32)
return image, label
###Output
_____no_output_____
###Markdown
> Define a function that will perform some data `argumentation`.
###Code
def data_argumentation(image, label):
image = tf.image.random_brightness(image, 0.05)
return image, label
ds_train = ds_train.map(read_image).map(data_argumentation).batch(4)
###Output
_____no_output_____
###Markdown
> Creating a model that will train on our dataset.
###Code
model_1 = keras.Sequential([
keras.layers.Input(shape=(28, 28, 1)),
keras.layers.Flatten(),
keras.layers.Dense(64, activation="relu"),
keras.layers.Dense(128, activation="relu"),
keras.layers.Dense(10, activation='softmax')
] ,name="model_1")
model_1.compile(
loss = keras.losses.SparseCategoricalCrossentropy(),
optimizer = "adam",
metrics=["accuracy"]
)
model_1.fit(ds_train, epochs=2, verbose=1)
###Output
Epoch 1/2
13/13 [==============================] - 1s 6ms/step - loss: 2.3528 - accuracy: 0.0987
Epoch 2/2
13/13 [==============================] - 0s 6ms/step - loss: 1.9329 - accuracy: 0.6079
|
models_regression/simple/simple_machine_learning.ipynb | ###Markdown
Simple machine learning model
###Code
#load data
alldata_15G=np.loadtxt('../../mddata/15grid_shuffled.dat')
alldata = alldata_15G
###Output
_____no_output_____
###Markdown
Linear Regression
###Code
def linear_models_with_regularizations(X_train, X_test, y_train, y_test, alpha_ridge, alpha_lasso):
"""
Parameters
--------------
X_train, X_test: numpy matrix
y_train, y_test: numpy array
ridge: boolean
set ridge = True for including Ridge regression
Return: float
r2_score
"""
logTrans = False
if logTrans is True:
y_test = np.log(y_test)
y_train = np.log(y_train)
regr = linear_model.LinearRegression()
regr.fit(X_train, y_train)
y_pred_regr = regr.predict(X_test)
#accuracy_score(Y_test, Y_pred)
# The coefficients
#print('Coefficients: \n', regr.coef_)
print("Mean squared error Linear Regression: %.2f" % mean_squared_error(y_test, y_pred_regr))
# Explained variance score: 1 is perfect prediction
#ac1 = r2_score(y_test, y_pred)
print("RMSE: %lf" %np.sqrt(np.sum(np.square(y_test-y_pred_regr))/len(y_test)))
print('r2_score: %.2f' % r2_score(y_test, y_pred_regr))
ysorted= np.sort(y_test)
xx = np.linspace(ysorted[0], ysorted[-1], len(ysorted))
plt.plot(xx, xx, 'r')
plt.plot(y_pred_regr, y_test, 'bo', alpha=0.5)
plt.xlabel('Predicted yield stress') #change the name here stress/strain
plt.ylabel('True yield stress')
plt.title('OLS with polynomial degree=2')
#plt.ylim(0, 1.2)
#plt.xlim(0, 1.2)
#plt.show()
#yy = y_test.reshape((len(y_test), 1))
plt.show()
ridge = linear_model.Ridge(alpha=alpha_ridge)
ridge.fit(X_train, y_train)
y_pred_ridge=ridge.predict(X_test)
#accuracy_score(Y_test, Y_pred)
# The coefficients
#print('Coefficients: \n', clf.coef_)
print("Mean squared error Ridge Regression: %.2f" % mean_squared_error(y_test, y_pred_ridge))
# Explained variance score: 1 is perfect prediction
print("RMSE: %lf" %np.sqrt(np.sum(np.square(y_test-y_pred_ridge))/len(y_test)))
print('r2_score: %.2f' % r2_score(y_test, y_pred_ridge))
#ac_ridge = r2_score(y_test, y_pred)
#plt.plot(y_pred, y_test, 'bo', alpha=0.5)
#plt.xlabel('y_test (fracture strain)')
#plt.ylabel('y_pred (fracture strain)')
#plt.title('Ridge Regression')
lasso = linear_model.Lasso(alpha=alpha_lasso)
lasso.fit(X_train, y_train)
y_pred_lasso=lasso.predict(X_test)
#accuracy_score(Y_test, Y_pred)
# The coefficients
#print('Coefficients: \n', clf.coef_)
print("Mean squared error LASSO: %.2f" % mean_squared_error(y_test, y_pred_lasso))
# Explained variance score: 1 is perfect prediction
print("RMSE: %lf" %np.sqrt(np.sum(np.square(y_test-y_pred_lasso))/len(y_test)))
print('r2_score: %.2f' % r2_score(y_test, y_pred_lasso))
#ac_lasso = r2_score(y_test, y_pred)
#plt.plot(y_test, y_pred, 'o')
#plt.xlabel('y_test (fracture strain)')
#plt.ylabel('y_pred (fracture strain)')
#plt.title('LASSO Regression')
#plt.show()
return y_pred_regr, y_pred_ridge, y_pred_lasso, regr.coef_, ridge.coef_, lasso.coef_
###Output
_____no_output_____
###Markdown
Training You can choose how many features to train
###Code
#split data into training and test set
sns.set_context("paper", font_scale=1.5, rc={"lines.linewidth": .5})
#np.random.shuffle(alldata)
x, y=create_matrix(alldata, False, 2, 0.3, 15)
x = (x-.5)*2
X_train, X_valid, X_test, y_train, y_valid, y_test = split_data(x, y, 0.8, 0.2)
#choose polynomial degrees
poly = PolynomialFeatures(2, interaction_only=True, include_bias=True)
#poly = PolynomialFeatures(2)
X_train2 = poly.fit_transform(X_train)
print("Number of features: %d" %len(X_train2[0]))
X_test2 = poly.fit_transform(X_test)
#linear_models(X_train2, X_test2, y_train, y_test, ridge=True)
#y_train = (y_train-0.45937603178269587)/0.22056868516982353
#y_test = (y_test-0.45937603178269587)/0.22056868516982353
alpha=0.1
y_pred_regr, y_pred_ridge, y_pred_lasso, coef_regr, coef_ridge, coef_lasso = linear_models_with_regularizations(X_train2, X_test2, y_train, y_test, 10, 0.1)
#split data into training and test set
sns.set_context("paper", font_scale=1.5, rc={"lines.linewidth": .5})
#np.random.shuffle(alldata)
x, y=create_matrix(alldata, False, 0, 0.3, 15)
x = (x-.5)*2
X_train, X_valid, X_test, y_train, y_valid, y_test = split_data(x, y, 0.8, 0.2)
#choose polynomial degrees
poly = PolynomialFeatures(3, interaction_only=True, include_bias=True)
#poly = PolynomialFeatures(2)
X_train2 = poly.fit_transform(X_train)
print("Number of features: %d" %len(X_train2[0]))
X_test2 = poly.fit_transform(X_test)
#linear_models(X_train2, X_test2, y_train, y_test, ridge=True)
#y_train = (y_train-0.45937603178269587)/0.22056868516982353
#y_test = (y_test-0.45937603178269587)/0.22056868516982353
alpha=0.1
y_pred_regr, y_pred_ridge, y_pred_lasso, coef_regr, coef_ridge, coef_lasso = linear_models_with_regularizations(X_train2, X_test2, y_train, y_test, 10, 0.1)
def NN_regressor(alldata, hl, obj, transform):
nn_regr = MLPRegressor(solver='lbfgs', alpha=1e-2, hidden_layer_sizes=hl, activation='relu', random_state=1)
#sorted_data = alldata[alldata[:,15].argsort()] #index 18 prob bad design, small -> goode design
np.random.shuffle(alldata)
#0nly fit top 20%
#sorted_data = sorted_data[int(0.8*len(sorted_data)):]
#np.random.shuffle(sorted_data)
#cutoff = sorted_data[int(len(alldata)/2), 17]
#x, y=create_matrix(sorted_data, True, 2, 30, NCcell_x*NCcell_y)
x, y=create_matrix(alldata, False, obj, 0.375, 15)
X_train, X_valid, X_test, y_train, y_valid, y_test = split_data(x, y, 0.8, 0.2)
#poly = PolynomialFeatures(1, interaction_only=True, include_bias=False)
#poly = PolynomialFeatures(interaction_only=True)
#X_train2 = X_train
#poly.fit_transform(X_train)
#x2 = poly.fit_transform(x)
#print("Number of features: %d" %len(X_train2[0]))
#X_test2 = poly.fit_transform(X_test)
if (transform is True):
poly = PolynomialFeatures(2, interaction_only=True, include_bias=False)
#poly = PolynomialFeatures(interaction_only=True)
X_train2 = poly.fit_transform(X_train)
#x2 = poly.fit_transform(x)
#print("Number of features: %d" %len(X_train2[0]))
X_test2 = poly.fit_transform(X_test)
else:
X_train2 = X_train
X_test2 = X_test
nn_regr.fit(X_train2, y_train)
y_pred_nn= nn_regr.predict(X_test2)
ysorted= np.sort(y_test)
xx = np.linspace(ysorted[0], ysorted[-1], len(ysorted))
plt.plot(xx, xx, 'r')
plt.plot(y_pred_nn, y_test, 'bo', alpha=0.5)
plt.xlabel('Predicted yield stress')
plt.ylabel('True yield strain')
plt.title('Neural Network')
print("Mean squared error: %lf" % mean_squared_error(y_test, y_pred_nn))
print("RMSE: %lf" %np.sqrt(np.sum(np.square(y_test-y_pred_nn))/len(y_test)))
# Explained variance score: 1 is perfect prediction
print('r2_score: %.2f' % r2_score(y_test, y_pred_nn))
return hl[0], np.sqrt(np.sum(np.square(y_test-y_pred_nn))/len(y_test)), r2_score(y_test, y_pred_nn), y_test, y_pred_nn
hl, rmse, ac, y_test, y_pred=NN_regressor(alldata, (1024, ), 0, False)
###Output
Number of good designs 12353 out of total 29791
Mean squared error: 0.003286
RMSE: 0.057324
r2_score: 0.89
|
notebooks/evaluation_test.ipynb | ###Markdown
Evaluation Metric Testing The point of this notebook is to walk through an evaluation metric taken from one of the kernels posted on [Kaggle](https://www.kaggle.com/wcukierski/example-metric-implementation) to ensure that was it was functioning correctly and gain a deeper undertanding of the [IoU](https://www.kaggle.com/c/data-science-bowl-2018evaluation) metric.
###Code
!pwd
import skimage
import importlib
import numpy as np
from scipy import stats
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import matplotlib as mpl
import pandas as pd
%matplotlib inline
from utils import imaging
from utils import evaluate
importlib.reload(imaging)
importlib.reload(evaluate)
###Output
_____no_output_____
###Markdown
Notes on method below When we are calculating the the intersection of objects between images we use a 2D histogram of the two images. This takes the two images flattened and compares the pixel values at each location. It reutrns an $\mathbf{n}$ $\times$ $\mathbf{m}$ matrix where $\mathbf{n}$ is the number of true objects and $\mathbf{m}$ is the number of predicted objects. The values of this matrix are counts of the paired pixel values between the two images. So if row 1, column 2 = 50, this means a pixel value of 1 in the true image was given a value of 2 in the predicted image.$\textbf{Note:}$ It doesn't matter if the pixel value of the predicted mask is different than the ground truth mask (unless it is 0). All we care about is that a predict object has pixels that overlaps with a ground truth object. Evaluate a single image Evaluate a single image to check the result of our evaluation metric is reasonable.
###Code
image_id = '0a7d30b252359a10fd298b638b90cb9ada3acced4e0c0e5a3692013f432ee4e9'
gt_path = imaging.get_path('output_train_1_lab_gt')
seg_path = imaging.get_path('output_train_1_lab_seg')
gt_image_1 = skimage.io.imread(gt_path + image_id + '.png' )
seg_image_1 = skimage.io.imread(seg_path + image_id + '.png' )
f, axarr = plt.subplots(1,2,figsize=(15,15))
axarr[0].imshow(gt_image_1)
axarr[0].set_title('Ground Truth')
axarr[1].imshow(seg_image_1)
axarr[1].set_title('Segmented')
###Output
_____no_output_____
###Markdown
Evaluate test cases We examine how the evaluation metric peforms in a few scenarios.1. Perfect overlap with nonmatching class labels.2. Not predicting one the ground truth objects (False Negative)3. How a 50% overlap performs with a threshold of .54. Two predicted objects that lay over the ground truth object.
###Code
n = 1000 # matrices will be nxn
gt = np.zeros((n,n))
gt[300:700,300:700] = 1
gt[800:850,800:850] = 2
t1 = np.zeros((n,n))
t2 = np.zeros((n,n))
t3 = np.zeros((n,n))
t4 = np.zeros((n,n))
# perfect prediction
t1[300:700,300:700] = 2
t1[800:850,800:850] = 1
# different labels
t2[300:700,300:700] = 20
# 50% overlap
t3[300:700,500:900] = 1
# Having to small sub regions where the 1 truth region is.
t4[300:500,300:700] = 1 # creating first small sub region
t4[500:700,300:700] = 2 # creating second small sub region
test_cases = [t1,t2,t3,t4]
f, axarr = plt.subplots(1,5,figsize=(15,15))
axarr[0].imshow(gt)
axarr[0].set_title('gt')
axarr[1].imshow(t1)
axarr[1].set_title('t1')
axarr[2].imshow(t2)
axarr[2].set_title('t2')
axarr[3].imshow(t3)
axarr[3].set_title('t3')
axarr[4].imshow(t4)
axarr[4].set_title('t4')
f.tight_layout()
###Output
_____no_output_____
###Markdown
Test case 1
###Code
evaluate.evaluate_image(gt, t1)
###Output
# true nuclei: 2
# predicted pred: 2
thresh tp fp fn p
0.500 2 0 0 1.000
0.550 2 0 0 1.000
0.600 2 0 0 1.000
0.650 2 0 0 1.000
0.700 2 0 0 1.000
0.750 2 0 0 1.000
0.800 2 0 0 1.000
0.850 2 0 0 1.000
0.900 2 0 0 1.000
0.950 2 0 0 1.000
###Markdown
As we would hope, inverting the labels has no effect on the evaluation. Test case 2
###Code
evaluate.evaluate_image(gt, t2)
###Output
# true nuclei: 2
# predicted pred: 1
thresh tp fp fn p
0.500 1 0 1 0.500
0.550 1 0 1 0.500
0.600 1 0 1 0.500
0.650 1 0 1 0.500
0.700 1 0 1 0.500
0.750 1 0 1 0.500
0.800 1 0 1 0.500
0.850 1 0 1 0.500
0.900 1 0 1 0.500
0.950 1 0 1 0.500
###Markdown
Since there is one correctly predicted object and 1 missed sobject, so 1 TP and 1 FN, the average precision is .5 as expected. Test case 3
###Code
evaluate.evaluate_image(gt, t3)
###Output
# true nuclei: 2
# predicted pred: 1
thresh tp fp fn p
0.500 0 1 2 0.000
0.550 0 1 2 0.000
0.600 0 1 2 0.000
0.650 0 1 2 0.000
0.700 0 1 2 0.000
0.750 0 1 2 0.000
0.800 0 1 2 0.000
0.850 0 1 2 0.000
0.900 0 1 2 0.000
0.950 0 1 2 0.000
###Markdown
The object has a 50$\%$ overlap with the ground truth object so the IoU is .3, which gives 0 TPs for all thresholds used. Test case 4
###Code
evaluate.evaluate_image(gt, t4)
###Output
# true nuclei: 2
# predicted pred: 2
thresh tp fp fn p
0.500 0 2 2 0.000
0.550 0 2 2 0.000
0.600 0 2 2 0.000
0.650 0 2 2 0.000
0.700 0 2 2 0.000
0.750 0 2 2 0.000
0.800 0 2 2 0.000
0.850 0 2 2 0.000
0.900 0 2 2 0.000
0.950 0 2 2 0.000
###Markdown
Neither predicted object has an IoU with the ground truth object that satisifies any of the thresholds so there are 0 TPs for each threshold. Evaluate all images Evaluate all images in stage 1 to test the `evaluate_images` function and see the distribution of scores.
###Code
scores = evaluate.evaluate_images(stage_num=1)
scores.head()
f, axarr = plt.subplots(1,2,figsize=(15,5))
axarr[0].hist(scores.score, bins=50)
axarr[0].set_title('Histogram of scores')
axarr[0].set_xlabel('score')
axarr[0].set_ylabel('# of images')
axarr[1].boxplot(scores.score, 0, 'rs', 0)
axarr[1].set_title('Box plot of scores')
axarr[1].set_xlabel('score')
f.tight_layout()
###Output
_____no_output_____ |
DATA 512 A4.ipynb | ###Markdown
Data Acquisition
###Code
import pandas as pd
from datetime import datetime
def revertStrToDate(date_str_list):
datelist = []
for date_str in date_str_list:
date_temp = datetime.strptime(date_str, '%m/%d/%y').date()
datelist.append(date_temp)
return datelist
us_confirm_data = pd.read_csv('data/RAW_us_confirmed_cases.csv')
mask_mandates_data = pd.read_csv('data/U.S._State_and_Territorial_Public_Mask_Mandates_From_April_10__2020_through_August_15__2021_by_County_by_Day.csv')
mask_use_data = pd.read_csv('data/mask-use-by-county.csv')
us_confirm_data.head(10)
mask_mandates_data.head(10)
mask_use_data.head(10)
###Output
_____no_output_____
###Markdown
Data Cleaning
###Code
county_confirm_data = pd.DataFrame(us_confirm_data[us_confirm_data['Admin2'] =='Essex'])
county_confirm_data = pd.DataFrame(county_confirm_data[us_confirm_data['Province_State'] =='Massachusetts'])
county_confirm_data
county_confirm_data = county_confirm_data.drop(['Province_State', 'Admin2','UID','iso2' ,'iso3', 'code3','FIPS','Country_Region','Lat','Long_','Combined_Key'], axis=1)
date = list(county_confirm_data.columns)
datelist1 = revertStrToDate(date)
cum_cases = county_confirm_data.values.tolist()[0]
con_cases = []
i = 0
for i in range(len(cum_cases) - 1):
con = cum_cases[i + 1] - cum_cases[i]
con_cases.append(con)
i = i + 1
con_cases.append(0)
new_cases = []
i = 0
for i in range(len(con_cases) - 1):
new = con_cases[i + 1] - con_cases[i]
new_cases.append(new)
i = i + 1
new_cases.append(0)
county_confirm = pd.DataFrame(datelist1, columns=['date'])
county_confirm.insert(loc=1, column='confirmed_cases', value = con_cases)
county_confirm.insert(loc=2, column='new_cases', value = new_cases)
county_mask_mandate_data = pd.DataFrame(mask_mandates_data[mask_mandates_data['County_Name'].str.contains('Essex')])
county_mask_mandate_data = pd.DataFrame(county_mask_mandate_data[county_mask_mandate_data['State_Tribe_Territory']=='MA'])
county_mask_mandate_data.head()
county_mask_mandate_data = county_mask_mandate_data.drop(['State_Tribe_Territory','County_Name','FIPS_State','FIPS_County','order_code', 'Source_of_Action','URL','Citation'], axis = 1)
county_mask_mandate_data = county_mask_mandate_data.fillna(0)
county_mask_mandate_data['Face_Masks_Required_in_Public'] = county_mask_mandate_data['Face_Masks_Required_in_Public'].replace({'No': 0, 'Yes': 1})
county_mask_mandate_data.head()
date2 = county_mask_mandate_data['date'].tolist()
cvt_date = []
for temp in date2:
d = temp[0:-4] + temp[-2:]
cvt_date.append(d)
datelist2 = revertStrToDate(cvt_date)
county_mask_mandate_data['date'] = datelist2
county_mask_use_data = pd.DataFrame(mask_use_data[mask_use_data['COUNTYFP'] == 25009])
county_mask_use_data
county_confirm.head(10)
county_mask_mandate_data.head(10)
print(county_mask_mandate_data['Face_Masks_Required_in_Public'].tolist())
mask_require = pd.DataFrame(county_mask_mandate_data[county_mask_mandate_data['Face_Masks_Required_in_Public'] ==1])
mask_require.head()
mask_require.tail()
county_covid_df = county_confirm
county_covid_df = county_covid_df.set_index('date')
###Output
_____no_output_____
###Markdown
Visualization
###Code
import matplotlib.pyplot as plt
from matplotlib.dates import date2num
# Plot the visualization
plt.figure(figsize=(14,8))
plt.title('Covid Confirmed Cases in Essex, MA from Feb,2020 to Oct,2021')
plt.plot(county_covid_df['confirmed_cases'],label = 'confirmed cases')
plt.axvspan(date2num(datetime(2020,5,6)), date2num(datetime(2021,5,28)),
label="Mask Require",color="green", alpha=0.2)
plt.xlabel('Year')
plt.ylabel('Confirmed Cases')
plt.legend()
plt.savefig('covid confirmed cases vs time.png')
plt.show
###Output
C:\Users\SophiaShao\Anaconda3\lib\site-packages\pandas\plotting\_matplotlib\converter.py:103: FutureWarning: Using an implicitly registered datetime converter for a matplotlib plotting method. The converter was registered by pandas on import. Future versions of pandas will require you to explicitly register matplotlib converters.
To register the converters:
>>> from pandas.plotting import register_matplotlib_converters
>>> register_matplotlib_converters()
warnings.warn(msg, FutureWarning)
|
utils/example_plots/methods_paper_plots/fig_8_initial_core_final_mass_relations/make_fig_8.ipynb | ###Markdown
Initial-Core-Final Mass Relation Plot COMPAS methods paper Figure 8 A notebook for reproducing the initial-core-final mass relation plot in the COMPAS methods paper.
###Code
import numpy as np
import h5py as h5
import matplotlib.pyplot as plt
import astropy.constants as consts
import matplotlib
import astropy.units as u
# make the plots pretty
%config InlineBackend.figure_format = 'retina'
plt.rc('font', family='serif')
fs = 24
params = {'legend.fontsize': fs,
'axes.labelsize': fs,
'xtick.labelsize':0.7*fs,
'ytick.labelsize':0.7*fs}
plt.rcParams.update(params)
###Output
_____no_output_____
###Markdown
Get the stellar typesFirst we can import the stellar types array to use the same colour palette as the other plots.
###Code
import sys
sys.path.append("../")
from stellar_types import stellar_types
###Output
_____no_output_____
###Markdown
Get the data
###Code
def get_COMPAS_vars(file, group, var_list):
if isinstance(var_list, str):
return file[group][var_list][...]
else:
return [file[group][var][...] for var in var_list]
###Output
_____no_output_____
###Markdown
Top panel: Solar metallicity default prescription
###Code
def core_remnant_mass_comparison(file, fig=None, ax=None, show=True):
if fig is None or ax is None:
fig, ax = plt.subplots(figsize=(10, 8))
with h5.File(file, "r") as compas:
m_ZAMS, m_final, m_co_core_atCO, Z, stellar_type = get_COMPAS_vars(compas, "SSE_System_Parameters",
["Mass@ZAMS",
"Mass",
"Mass_CO_Core@CO",
"Metallicity",
"Stellar_Type"])
# only plot things for solar metallicity
solar = Z == 0.01416
uni_types = np.unique(stellar_type[solar])
# annotate the plot with the stellar types
for i in range(len(uni_types)):
ax.annotate(stellar_types[uni_types[i]]["short"], xy=(0.02, 0.93 - 0.05 * i),
xycoords="axes fraction", color=plt.get_cmap("tab10")(i / 10), fontsize=0.7*fs, weight="bold")
for i in range(len(uni_types)):
# plot the final white dwarf mass for WDs
if uni_types[i] in [10, 11, 12]:
ax.loglog(m_ZAMS[solar][stellar_type[solar] == uni_types[i]],
m_final[solar][stellar_type[solar] == uni_types[i]],
lw=3, color=plt.get_cmap("tab10")(i / 10))
# plot CO core mass at CO formation for NSs and BHs
elif uni_types[i] in [13, 14]:
ax.loglog(m_ZAMS[solar][stellar_type[solar] == uni_types[i]],
m_co_core_atCO[solar][stellar_type[solar] == uni_types[i]],
lw=3, color=plt.get_cmap("tab10")(i / 10))
# annotate with solar metallicity
ax.annotate(r"$Z = Z_{\rm \odot}$", xy=(0.97, 0.04), xycoords="axes fraction", fontsize=0.7*fs, ha="right")
ax.set_ylabel(r"Core Mass $[\rm M_{\odot}]$")
if show:
plt.show()
return fig, ax
###Output
_____no_output_____
###Markdown
Middle panel: Illustrate effect of metallicity on core masses
###Code
def remnant_mass_across_metallicity(file, fig=None, ax=None, show=True):
if fig is None or ax is None:
fig, ax = plt.subplots(figsize=(10, 8))
with h5.File(file, "r") as compas:
m_ZAMS, m_final, m_co_core_atCO, Z, stellar_type = get_COMPAS_vars(compas, "SSE_System_Parameters",
["Mass@ZAMS",
"Mass",
"Mass_CO_Core@CO",
"Metallicity",
"Stellar_Type"])
# create an inset axis for the linear version
inset_ax = ax.inset_axes([0.05, 0.55, 0.53, 0.425])
inset_ax.tick_params(labelsize=0.5*fs)
# plot three different metallicities
for Z_match, style in [(0.01, "-"), (0.001, "--"), (0.0001, "dotted")]:
matching_Z = Z == Z_match
uni_types = np.unique(stellar_type[matching_Z])
for i in range(len(uni_types)):
# plot in same way as top panel
matching_type = stellar_type[matching_Z] == uni_types[i]
y_quantity = m_final if uni_types[i] in [10, 11, 12] else m_co_core_atCO
ax.loglog(m_ZAMS[matching_Z][matching_type], y_quantity[matching_Z][matching_type],
lw=2, linestyle=style, markevery=25, color=plt.get_cmap("tab10")(i / 10),
label=r"$Z = {{{}}}$".format(Z_match) if i == len(uni_types) - 1 else None)
# for black holes also plot in the inset axis
if uni_types[i] == 14:
inset_ax.plot(m_ZAMS[matching_Z][matching_type], y_quantity[matching_Z][matching_type],
lw=2, linestyle=style, markevery=25, color=plt.get_cmap("tab10")(i / 10))
ax.legend(loc="lower right", fontsize=0.7 * fs)
ax.set_ylabel(r"Core Mass $[\rm M_{\odot}]$")
if show:
plt.show()
return fig, ax
###Output
_____no_output_____
###Markdown
Bottom panel: Demonstrate how remnant mass prescriptions differ at solar metallicity
###Code
def remnant_mass_prescription_comparison(prescriptions, fig=None, ax=None, show=True):
if fig is None or ax is None:
fig, ax = plt.subplots(figsize=(10, 8))
for file, style, label in prescriptions:
with h5.File("COMPAS_Output_{}/COMPAS_Output_{}.h5".format(file, file), "r") as compas:
m_ZAMS, m_final, Z, stellar_type = get_COMPAS_vars(compas, "SSE_System_Parameters",
["Mass@ZAMS",
"Mass",
"Metallicity",
"Stellar_Type"])
solar = Z == 0.01416
uni_types = np.unique(stellar_type[solar])
for i in range(len(uni_types)):
# only plot the NSs and BHs
if uni_types[i] >= 13:
# use scatter points for the Mandel & Mueller prescription
if file == "MM20":
ax.scatter(m_ZAMS[solar][stellar_type[solar] == uni_types[i]],
m_final[solar][stellar_type[solar] == uni_types[i]],
s=0.2, alpha=0.5, color=plt.get_cmap("tab10")(i / 10),
label=label if i == len(uni_types) - 1 else None)
# use lines for the other ones
else:
ax.loglog(m_ZAMS[solar][stellar_type[solar] == uni_types[i]],
m_final[solar][stellar_type[solar] == uni_types[i]],
lw=2, linestyle=style, color=plt.get_cmap("tab10")(i / 10),
label=label if i == len(uni_types) - 1 else None, zorder=10)
ax.set_xscale("log")
ax.set_yscale("log")
leg = ax.legend(fontsize=0.7 * fs, loc="lower right", markerscale=25, title=r"$Z = Z_{\rm \odot}$")
leg.get_title().set_fontsize(0.7 * fs)
ax.set_ylabel(r"Remnant Mass $[\rm M_{\odot}]$")
if show:
plt.show()
return fig, ax
###Output
_____no_output_____
###Markdown
Create the whole plot!
###Code
fig, axes = plt.subplots(3, figsize=(10, 24))
fig, axes[0] = core_remnant_mass_comparison("COMPAS_Output_default/COMPAS_Output_default.h5",
fig=fig, ax=axes[0], show=False)
fig, axes[1] = remnant_mass_across_metallicity("COMPAS_Output_default/COMPAS_Output_default.h5",
fig=fig, ax=axes[1], show=False)
fig, axes[2] = remnant_mass_prescription_comparison([("default", "dotted", "Fryer+2012 Delayed"),
("rapid", "--", "Fryer+2012 Rapid"),
("MM20", None, "Mandel & Mueller 2020")],
fig=fig, ax=axes[2], show=False)
for ax, xticks, yticks in zip(axes,
[[0.1, 1, 10, 100], [0.1, 1, 10, 100], [10.0, 100.0]],
[[0.1, 1, 10], [0.1, 1, 10], [1.0, 10.0]]):
ax.set_xticks(xticks)
ax.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax.set_xlabel(r"Initial Mass $[\rm M_{\rm \odot}]$")
ax.set_yticks(yticks)
ax.get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax.tick_params(which="major", length=7)
ax.tick_params(which="minor", length=4)
plt.savefig("initial_core_final_mass_relations.pdf", format="pdf", bbox_inches="tight")
plt.show()
###Output
_____no_output_____ |
ml-models/pacs008/linear-learner/pacs008_linear_learner_inference_pipeline.ipynb | ###Markdown
SageMaker Inference Pipeline with Scikit Learn and Linear LearnerISO20022 pacs.008 inference pipeline notebook. This notebook uses training dataset to perform model training. It uses SageMaker Linear Learner to train a model. The problem is defined to be a `binary classification` problem of accepting or rejecting a pacs.008 message.Amazon SageMaker provides a very rich set of [builtin algorithms](https://docs.aws.amazon.com/sagemaker/latest/dg/algorithms-choose.html) for model training and development. This notebook uses [Amazon SageMaker Linear Learner Algorithm](https://docs.aws.amazon.com/sagemaker/latest/dg/linear-learner.html) on training dataset to perform model training. The Amazon SageMaker linear learner algorithm provides a solution for both classification and regression problems. With the SageMaker algorithm, you can simultaneously explore different training objectives and choose the best solution from a validation set. You can also explore a large number of models and choose the best. The best model optimizes either of the following:* Continuous objectives, such as mean square error, cross entropy loss, absolute error (regression models).* Discrete objectives suited for classification, such as F1 measure, precision, recall, or accuracy (classification models).ML Model development is an iterative process with several tasks that data scientists go through to produce an effective model that can solve business problem. The process typically involves:* Data exploration and analysis* Feature engineering* Model development* Model training and tuning* Model deploymentWe provide the accompanying notebook [pacs008_xgboost_local.ipynb](./pacs008_xgboost_local.ipynb) which demonstrates data exploration, analysis and feature engineering, focussing on text feature engineering. This notebook uses the results of analysis in [pacs008_xgboost_local.ipynb](./pacs008_xgboost_local.ipynb) to create a feature engineering pipeline using [SageMaker Inference Pipeline](https://docs.aws.amazon.com/sagemaker/latest/dg/inference-pipelines.html).Here we define the ML problem to be a `binary classification` problem, that of predicting if a pacs.008 XML message with be processed sucessfully or lead to exception process. The predicts `Success` i.e. 1 or `Failure` i.e. 0. **Feature Engineering** Data pre-processing and featurizing the dataset by incorporating standard techniques or prior knowledge is a standard mechanism to make dataset meaningful for training. Once data has been pre-processed and transformed, it can be finally used to train an ML model using an algorithm. However, when the trained model is used for processing real time or batch prediction requests, the model receives data in a format which needs to be pre-processed (e.g. featurized) before it can be passed to the algorithm. In this notebook, we will demonstrate how you can build your ML Pipeline leveraging the Sagemaker Scikit-learn container and SageMaker XGBoost algorithm. After a model is trained, we deploy the Pipeline (Data preprocessing and XGBoost) as an **Inference Pipeline** behind a **single Endpoint** for real time inference and for **batch inferences** using Amazon SageMaker Batch Transform.We use pacs.008 xml element `TEXT` to perform feature engineer i.e featurize text into new numeric features that can be used in making prodictions.Since we featurize `InstrForNxtAgt` to numeric representations during training, we have to pre-processs to transform text into numeric features before using the trained model to make predictions.**Inference Pipeline**The diagram below shows how Amazon SageMaker Inference Pipeline works. It is used to deploy multi-container endpoints.**Inference Endpoint** The diagram below shows the places in the cross-border payment message flow where a call to ML inference endpoint can be injected to get inference from the ML model. The inference result can be used to take additional actions, including corrective actions before sending the message downstream.**Further Reading:** For information on Amazon SageMaker Linear Learner algorithm and SageMaker Inference Pipeline visit the following references: [SageMaker Linear Learner Algorithm](https://docs.aws.amazon.com/sagemaker/latest/dg/linear-learner.html) [SageMaker Inference Pipeline](https://docs.aws.amazon.com/sagemaker/latest/dg/inference-pipelines.html) Basic SetupIn this step we do basic setup needed for rest of the notebook:* Amazon SageMaker API client using boto3* Amazon SageMaker session object* AWS region* AWS IAM role
###Code
import os
import boto3
import sagemaker
from sagemaker import get_execution_role
sm_client = boto3.Session().client('sagemaker')
sm_session = sagemaker.Session()
region = boto3.session.Session().region_name
role = get_execution_role()
print ("Notebook is running with assumed role {}".format (role))
print("Working with AWS services in the {} region".format(region))
###Output
_____no_output_____
###Markdown
Provide S3 Bucket Name
###Code
# Working directory for the notebook
WORKDIR = os.getcwd()
BASENAME = os.path.dirname(WORKDIR)
print(f"WORKDIR: {WORKDIR}")
print(f"BASENAME: {BASENAME}")
# Create a directory storing local data
iso20022_data_path = 'iso20022-data'
if not os.path.exists(iso20022_data_path):
# Create a new directory because it does not exist
os.makedirs(iso20022_data_path)
# Store all prototype assets in this bucket
s3_bucket_name = 'iso20022-prototype-t3'
s3_bucket_uri = 's3://' + s3_bucket_name
# Prefix for all files in this prototype
prefix = 'iso20022'
pacs008_prefix = prefix + '/pacs008'
raw_data_prefix = pacs008_prefix + '/raw-data'
labeled_data_prefix = pacs008_prefix + '/labeled-data'
training_data_prefix = pacs008_prefix + '/training-data'
training_headers_prefix = pacs008_prefix + '/training-headers'
test_data_prefix = pacs008_prefix + '/test-data'
training_job_output_prefix = pacs008_prefix + '/training-output'
print(f"Training data with headers will be uploaded to {s3_bucket_uri + '/' + training_headers_prefix}")
print(f"Training data will be uploaded to {s3_bucket_uri + '/' + training_data_prefix}")
print(f"Test data will be uploaded to {s3_bucket_uri + '/' + test_data_prefix}")
print(f"Training job output will be stored in {s3_bucket_uri + '/' + training_job_output_prefix}")
labeled_data_location = s3_bucket_uri + '/' + labeled_data_prefix
training_data_w_headers_location = s3_bucket_uri + '/' + training_headers_prefix
training_data_location = s3_bucket_uri + '/' + training_data_prefix
test_data_location = s3_bucket_uri + '/' + test_data_prefix
print(f"Raw labeled data location = {labeled_data_location}")
print(f"Training data with headers location = {training_data_w_headers_location}")
print(f"Training data location = {training_data_location}")
print(f"Test data location = {test_data_location}")
###Output
_____no_output_____
###Markdown
Prepare Training Dataset 1. Select training dataset from raw labeled dataset.1. Split labeled dataset to training and test datasets.
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import string
from sklearn.model_selection import train_test_split
from sklearn import ensemble, metrics, model_selection, naive_bayes
color = sns.color_palette()
%matplotlib inline
###Output
_____no_output_____
###Markdown
Download raw labeled dataset
###Code
# Download labeled raw dataset from S3
s3_client = boto3.client('s3')
s3_client.download_file(s3_bucket_name, labeled_data_prefix + '/labeled_data.csv', 'iso20022-data/labeled_data.csv')
# Read the train and test dataset and check the top few lines ##
labeled_raw_df = pd.read_csv("iso20022-data/labeled_data.csv")
labeled_raw_df.head()
###Output
_____no_output_____
###Markdown
Select features for training
###Code
# Training features
fts=[
'y_target',
'Document_FIToFICstmrCdtTrf_CdtTrfTxInf_Dbtr_PstlAdr_Ctry',
'Document_FIToFICstmrCdtTrf_CdtTrfTxInf_Cdtr_PstlAdr_Ctry',
'Document_FIToFICstmrCdtTrf_CdtTrfTxInf_RgltryRptg_DbtCdtRptgInd',
'Document_FIToFICstmrCdtTrf_CdtTrfTxInf_RgltryRptg_Authrty_Ctry',
'Document_FIToFICstmrCdtTrf_CdtTrfTxInf_RgltryRptg_Dtls_Cd',
'Document_FIToFICstmrCdtTrf_CdtTrfTxInf_InstrForNxtAgt_InstrInf',
]
# New data frame with selected features
selected_df = labeled_raw_df[fts]
selected_df.head()
# Rename columns
selected_df = selected_df.rename(columns={
'Document_FIToFICstmrCdtTrf_CdtTrfTxInf_Dbtr_PstlAdr_Ctry': 'Dbtr_PstlAdr_Ctry',
'Document_FIToFICstmrCdtTrf_CdtTrfTxInf_Cdtr_PstlAdr_Ctry': 'Cdtr_PstlAdr_Ctry',
'Document_FIToFICstmrCdtTrf_CdtTrfTxInf_RgltryRptg_DbtCdtRptgInd': 'RgltryRptg_DbtCdtRptgInd',
'Document_FIToFICstmrCdtTrf_CdtTrfTxInf_RgltryRptg_Authrty_Ctry': 'RgltryRptg_Authrty_Ctry',
'Document_FIToFICstmrCdtTrf_CdtTrfTxInf_RgltryRptg_Dtls_Cd': 'RgltryRptg_Dtls_Cd',
'Document_FIToFICstmrCdtTrf_CdtTrfTxInf_InstrForNxtAgt_InstrInf': 'InstrForNxtAgt',
})
selected_df.head()
from sklearn.preprocessing import LabelEncoder
# Assign Pandas data types.
categorical_fts=[
'Dbtr_PstlAdr_Ctry',
'Cdtr_PstlAdr_Ctry',
'RgltryRptg_DbtCdtRptgInd',
'RgltryRptg_Authrty_Ctry',
'RgltryRptg_Dtls_Cd'
]
integer_fts=[
]
numeric_fts=[
]
text_fts=[
# Leave text as object
# 'InstrForNxtAgt'
]
# Categorical features to categorical data type.
for col in categorical_fts:
selected_df[col] = selected_df[col].astype(str).astype('category')
# Integer features to int64 data type.
for col in integer_fts:
selected_df[col] = selected_df[col].astype(str).astype('int64')
# Numeric features to float64 data type.
for col in numeric_fts:
selected_df[col] = selected_df[col].astype(str).astype('float64')
# Text features to string data type.
for col in text_fts:
selected_df[col] = selected_df[col].astype(str).astype('string')
label_encoder = LabelEncoder()
selected_df['y_target'] = label_encoder.fit_transform(selected_df['y_target'])
selected_df.dtypes
selected_df.info()
selected_df
X_train_df, X_test_df, y_train_df, y_test_df = train_test_split(selected_df, selected_df['y_target'], test_size=0.20, random_state=299, shuffle=True)
print("Number of rows in train dataset : ",X_train_df.shape[0])
print("Number of rows in test dataset : ",X_test_df.shape[0])
X_train_df
X_test_df
## Save training and test datasets to CSV
train_data_w_headers_output_path = 'iso20022-data/train_data_w_headers.csv'
print(f'Saving training data with headers to {train_data_w_headers_output_path}')
X_train_df.to_csv(train_data_w_headers_output_path, index=False)
train_data_output_path = 'iso20022-data/train_data.csv'
print(f'Saving training data without headers to {train_data_output_path}')
X_train_df.to_csv(train_data_output_path, header=False, index=False)
test_data_output_path = 'iso20022-data/test_data.csv'
print(f'Saving test data without headers to {test_data_output_path}')
X_test_df.to_csv(test_data_output_path, header=False, index=False)
###Output
_____no_output_____
###Markdown
Upload training and test datasets to S3 for training
###Code
train_input_data_location = sm_session.upload_data(
path=train_data_w_headers_output_path,
bucket=s3_bucket_name,
key_prefix=training_headers_prefix,
)
print(f'Uploaded traing data with headers to: {train_input_data_location}')
train_input_data_location = sm_session.upload_data(
path=train_data_output_path,
bucket=s3_bucket_name,
key_prefix=training_data_prefix,
)
print(f'Uploaded data without headers to: {train_input_data_location}')
test_input_data_location = sm_session.upload_data(
path=test_data_output_path,
bucket=s3_bucket_name,
key_prefix=test_data_prefix,
)
print(f'Uploaded data without headers to: {test_input_data_location}')
###Output
_____no_output_____
###Markdown
Feature Engineering Create a Scikit-learn script to train with To run Scikit-learn on Sagemaker `SKLearn` Estimator with a script as an entry point. The training script is very similar to a training script you might run outside of SageMaker, but you can access useful properties about the training environment through various environment variables, such as:* SM_MODEL_DIR: A string representing the path to the directory to write model artifacts to. These artifacts are uploaded to S3 for model hosting.* SM_OUTPUT_DIR: A string representing the filesystem path to write output artifacts to. Output artifacts may include checkpoints, graphs, and other files to save, not including model artifacts. These artifacts are compressed and uploaded to S3 to the same S3 prefix as the model artifacts.Supposing two input channels, 'train' and 'test', were used in the call to the Chainer estimator's fit() method, the following will be set, following the format SM_CHANNEL_[channel_name]:* SM_CHANNEL_TRAIN: A string representing the path to the directory containing data in the 'train' channel* SM_CHANNEL_TEST: Same as above, but for the 'test' channel.A typical training script loads data from the input channels, configures training with hyperparameters, trains a model, and saves a model to model_dir so that it can be hosted later. Hyperparameters are passed to your script as arguments and can be retrieved with an argparse.ArgumentParser instance. Create SageMaker Scikit Estimator To run our Scikit-learn training script on SageMaker, we construct a `sagemaker.sklearn.estimator.sklearn` estimator, which accepts several constructor arguments:* __entry_point__: The path to the Python script SageMaker runs for training and prediction.* __role__: Role ARN* __framework_version__: Scikit-learn version you want to use for executing your model training code.* __train_instance_type__ *(optional)*: The type of SageMaker instances for training. __Note__: Because Scikit-learn does not natively support GPU training, Sagemaker Scikit-learn does not currently support training on GPU instance types.* __sagemaker_session__ *(optional)*: The session used to train on Sagemaker.
###Code
from sagemaker.sklearn.estimator import SKLearn
preprocessing_job_name = 'pacs008-preprocessor-ll'
print('data preprocessing job name: ' + preprocessing_job_name)
FRAMEWORK_VERSION = "0.23-1"
source_dir = "../sklearn-transformers"
script_file = "pacs008_sklearn_featurizer.py"
sklearn_preprocessor = SKLearn(
entry_point=script_file,
source_dir=source_dir,
role=role,
framework_version=FRAMEWORK_VERSION,
instance_type="ml.c4.xlarge",
sagemaker_session=sm_session,
base_job_name=preprocessing_job_name,
)
sklearn_preprocessor.fit({"train": train_input_data_location})
###Output
_____no_output_____
###Markdown
Batch transform our training data Now that our proprocessor is properly fitted, let's go ahead and preprocess our training data. Let's use batch transform to directly preprocess the raw data and store right back into s3.
###Code
# Define a SKLearn Transformer from the trained SKLearn Estimator
transformer = sklearn_preprocessor.transformer(
instance_count=1,
instance_type="ml.m5.xlarge",
assemble_with="Line",
accept="text/csv",
)
# Preprocess training input
transformer.transform(train_input_data_location, content_type="text/csv")
print("Waiting for transform job: " + transformer.latest_transform_job.job_name)
transformer.wait()
preprocessed_train = transformer.output_path
###Output
_____no_output_____
###Markdown
Train a Linear Learner Model Fit a LinearLearner Model with the preprocessed data Let's take the preprocessed training data and fit a LinearLearner Model. Sagemaker provides prebuilt algorithm containers that can be used with the Python SDK. The previous Scikit-learn job preprocessed the labeled raw pacs.008 dataset into useable training data that we can now use to fit a binary classifier Linear Learner model.For more on Linear Learner see: https://docs.aws.amazon.com/sagemaker/latest/dg/linear-learner.html
###Code
from sagemaker.image_uris import retrieve
ll_image = retrieve("linear-learner", boto3.Session().region_name)
# Set job name
training_job_name = 'pacs008-ll-training'
print('Linear Learner training job name: ' + training_job_name)
# S3 bucket for storing model artifacts
training_job_output_location = s3_bucket_uri + '/' + training_job_output_prefix + '/ll_model'
ll_estimator = sagemaker.estimator.Estimator(
ll_image,
role,
instance_count=1,
instance_type="ml.m4.2xlarge",
volume_size=20,
max_run=3600,
input_mode="File",
output_path=training_job_output_location,
sagemaker_session=sm_session,
base_job_name=training_job_name,
)
# binary_classifier_model_selection_criteria: accuracy is default
# - accuracy | f_beta | precision_at_target_recall |recall_at_target_precision | loss_function
# feature_dim=auto, # auto or actual number, default is auto
# epochs=15, default is 15
# learning_rate=auto or actual number 0.05 or 0.005
# loss=logistic | auto |hinge_loss, default is logistic
# mini_batch_size=32, default is 1000
# num_models=auto, or a number
# optimizer=auto or sgd | adam | rmsprop
ll_estimator.set_hyperparameters(
predictor_type="binary_classifier",
binary_classifier_model_selection_criteria="accuracy",
epochs=15,
mini_batch_size=32)
ll_train_data = sagemaker.inputs.TrainingInput(
preprocessed_train, # set after preprocessing job completes
distribution="FullyReplicated",
content_type="text/csv",
s3_data_type="S3Prefix",
)
data_channels = {"train": ll_train_data}
ll_estimator.fit(inputs=data_channels, logs=True)
###Output
_____no_output_____
###Markdown
Serial Inference Pipeline with Scikit preprocessor and Linear Learner Set up the inference pipeline Setting up a Machine Learning pipeline can be done with the Pipeline Model. This sets up a list of models in a single endpoint. We configure our pipeline model with the fitted Scikit-learn inference model (data preprocessing/feature engineering model) and the fitted Linear Learner model. Deploying the model follows the standard ```deploy``` pattern in the SageMaker Python SDK.
###Code
from sagemaker.model import Model
from sagemaker.pipeline import PipelineModel
import boto3
from time import gmtime, strftime
timestamp_prefix = strftime("%Y-%m-%d-%H-%M-%S", gmtime())
# The two SageMaker Models: one for data preprocessing, and second for inference
scikit_learn_inferencee_model = sklearn_preprocessor.create_model()
linear_learner_model = ll_estimator.create_model()
model_name = "pacs008-ll-inference-pipeline-" + timestamp_prefix
endpoint_name = "pacs008-ll-inference-pipeline-ep-" + timestamp_prefix
sm_model = PipelineModel(
name=model_name, role=role, models=[scikit_learn_inferencee_model, linear_learner_model]
)
sm_model.deploy(initial_instance_count=1, instance_type="ml.c4.xlarge", endpoint_name=endpoint_name)
###Output
_____no_output_____
###Markdown
Store Model Name and Endpoint Name in Notebook Magic StoreThese notebook magic store values are used in the example batch transform notebook.
###Code
%store model_name
%store endpoint_name
###Output
_____no_output_____
###Markdown
Make a request to our pipeline endpoint The diagram below shows the places in the cross-border payment message flow where a call to ML inference endpoint can be injected to get inference from the ML model. The inference result can be used to take additional actions, including corrective actions before sending the message downstream.Here we just grab the first line from the test data (you'll notice that the inference python script is very particular about the ordering of the inference request data). The ```ContentType``` field configures the first container, while the ```Accept``` field configures the last container. You can also specify each container's ```Accept``` and ```ContentType``` values using environment variables.We make our request with the payload in ```'text/csv'``` format, since that is what our script currently supports. If other formats need to be supported, this would have to be added to the ```output_fn()``` method in our entry point. Note that we set the ```Accept``` to ```application/json```, since Linear Learner does not support ```text/csv``` ```Accept```. The inference output in this case is trying to predict `Success` or `Failure` of ISO20022 pacs.008 payment message using only the subset of message XML elements in the message i.e. features on which model was trained.
###Code
from sagemaker.predictor import Predictor
from sagemaker.serializers import CSVSerializer
# payload_1, expect: Failure
#payload_1 = "US,GB,,,,/SVC/It is to be delivered in three days. Greater than three days penalty add 2bp per day"
payload_1 = "MX,GB,,,,/SVC/It is to be delivered in four days. Greater than four days penalty add 2bp per day"
# payload_2, expect: Success
payload_2 = "MX,GB,,,,"
#payload_2 = "US,IE,,,,/TRSY/Treasury Services Platinum Customer"
# payload_3, expect: Failure
payload_3 = "TH,US,,,,/SVC/It is to be delivered in four days. Greater than four days penalty add 2bp per day"
#payload_3 = "CA,US,,,,/SVC/It is to be delivered in three days. Greater than three days penalty add 2bp per day"
# payload_4, expect: Success
payload_4 = "IN,CA,DEBT,IN,00.P0006,"
# payload_5, expect: Success
payload_5 = "IE,IN,CRED,IN,0,/REG/15.X0003 FDI in Transportation"
# Failure
payload_5 = "IE,IN,CRED,IN,0,/REG/15.X0009 FDI in Agriculture "
# Failure
payload_5 = "IE,IN,CRED,IN,0,/REG/15.X0004 retail"
# payload_6, expect: Failure
payload_6 = "IE,IN,CRED,IN,0,/REG/99.C34698"
#payload_6 = "MX,IE,,,,/TRSY/eweweww"
endpoint_name = 'pacs008-ll-inference-pipeline-ep-2021-11-25-00-58-52'
predictor = Predictor(
endpoint_name=endpoint_name, sagemaker_session=sm_session, serializer=CSVSerializer()
)
print(f"1. Expect Failure i.e. 0, {predictor.predict(payload_1)}")
print(f"2. Expect Success i.e. 1, {predictor.predict(payload_2)}")
print(f"3. Expect Failure i.e. 0, {predictor.predict(payload_3)}")
print(f"4. Expect Success i.e. 1, {predictor.predict(payload_4)}")
print(f"5. Expect Success i.e. 1, {predictor.predict(payload_5)}")
print(f"6. Expect Failure i.e. 0, {predictor.predict(payload_6)}")
###Output
_____no_output_____
###Markdown
Delete EndpointOnce we are finished with the endpoint, we clean up the resources!
###Code
sm_client = sm_session.boto_session.client("sagemaker")
sm_client.delete_endpoint(EndpointName=endpoint_name)
###Output
_____no_output_____ |
CEK_problems/equilibrium_04.ipynb | ###Markdown
The hydrolysis Sucrose into Glucose and Fructose is catalysed by the enzyme Invertase.\begin{equation}Sucrose + Invertase + \mathrm{H_2O} \to Glucose + Fructose\end{equation}There are however several substances that can inhibit the efficacy of the catalystImagine performing a series of experiments using different initial concentration of Sucrose where you measure the rate of formation of Glucose with. The results of your experiments are affected by the presence of a contaminating substance that interferes with the catalytic reaction. Although you can somewhat control the concentration of the contaminant, you cannot completely eliminate it.1. Determine whether the contaminating substance inhibits the catalytic reaction and the type of the inhibition mechanism, *e.g.* Competitive, Uncompetitive, Non-competitive or Mixed.2. Determine the maximum rate achieved by the reaction, $V_{max}$ and the Michaelis constant, $K_M$, in the case you could completely eliminate the contamininat. Tips:- Note that every time you restart the experiment the type of the inhibition mechanism may change. Instructions:- Use the slide bar below to select temperature at which you perform the virtual experiment, - Click `Perform measurement` to run the virtual experiment and obtain the result of the experiment,- Click `Download CSV` to export the complete data set for all the experiments as a CSV file.
###Code
# define path to results.csv file
respath = os.path.join(os.getcwd(), "..", "results.csv")
# delete existing result file and setup rng
if os.path.exists(respath):
os.remove(respath)
class system:
def __init__(self, vol=0, conc=0, press=0):
self.vol = vol
self.conc = conc
self.press = press
self.inhibition = 0
self.seed = 0
self.Vm = 0
self.Km = 0
self.Ki = 0
self.Kip= 0
class data:
def __init__(self, start=-1, error=0, label='none', units='pure', value=0,
minval=-1, maxval=3, text='none'):
self.start = start
self.minval = minval
self.maxval = maxval
self.error = error
self.label = label
self.units = units
self.value = value
self.text = text
# Experiment setup (+ hidden paramters)
system = system()
def initialiseExperiment():
global n
global system
global columns_list
global scatter
scatter = 0.01
n = []
columns_list = []
n.append(len(args)) # number of input adjustable parameters
n.append(len(result)) # number of results for the experiment
for i in range(0, n[0]):
columns_list.append(f"{args[i].label} [{args[i].units}]")
for i in range(0, n[1]):
columns_list.append(f"{result[i].label} [{result[i].units}]")
# Random number seed
t = int( time.time() * 1000.0 )
system.seed = ((t & 0xff000000) >> 24) + ((t & 0x00ff0000) >> 8) +((t & 0x0000ff00) << 8) +((t & 0x000000ff) << 24)
random.seed(system.seed)
# Random inhibition type
rnd = random.random()
system.inhibition = int(5 * rnd)
if (system.inhibition > 4):
system.inhibition = 4
system.Vm = params["Vm"] * (1 + random.random()/2)
system.Km = params["Km"] * (1 + random.random()/2)
system.Ki = system.Km * random.random()
system.Kip= system.Km * random.random()
# Adjustable input parameters
def initialiseVariables():
global logScale
logScale = True
global args
args = []
args.append(
data(
label = "[S]",
minval = -3,
maxval = 1,
start = 0.001,
units = "mol/L",
value = 0.
)
)
args.append(
data(
label = "[I]",
minval = -3,
maxval = 0,
start = 0.001,
units = "mol/L",
value = 0.
)
)
# Results
def initialiseResults():
global result
result = []
result.append(
data(
label = "Reaction Rate",
start = 0.,
error = random.random() / 10.,
units = "mol/L·min"
)
)
def measure():
concS = float(args[0].text.value)
concI = float(args[0].text.value)
Vm = system.Vm
Km = system.Km
Ki = system.Ki
Kip= system.Kip
# no inhibition
a = 1
ap = 1
# competitive
if (system.inhibition == 1):
a = 1 + concI / Ki
ap = 1
adp = 1
# non-competitive
elif (system.inhibition == 4):
a = 1
ap = 1 + concI / Ki
adp = 1
# un-competitive
elif (system.inhibition == 2):
a = 1
ap = 1
adp = 1. / (1 + concI / Kip)
# mixed
elif (system.inhibition == 3):
a = 1 + concI / Ki
ap = 1
adp = 1. / (1 + concI / Kip)
res = (ap * adp) * Vm * concS / ((a * adp)*Km + concS)
return res
initialiseVariables()
out_P = ipw.Output()
out_L = ipw.Output()
out_X = ipw.Output()
with out_L:
display(Markdown("[Download CSV](../results.csv)"))
def calc(btn):
out_P.clear_output()
# Measurement result
result[0].value = measure()
# Random error
result[0].error = result[0].value * scatter * (0.5 - random.random()) * 2
# Output result
out_R[0].value = f"{result[0].value + result[0].error:.3e}"
# Read previous lines
res = pd.read_csv(respath)
var_list = []
for i in range(0, n[0]):
var_list.append(args[i].text.value)
for i in range(0, n[1]):
var_list.append(result[i].value + result[i].error)
# Append result
res.loc[len(res)] = var_list
res.to_csv(respath, index=False)
with out_P:
display(res.tail(50))
def reset(btn):
if os.path.exists(respath):
os.remove(respath)
initialiseResults()
initialiseExperiment()
res = pd.DataFrame(columns=columns_list)
res.to_csv(respath, index=False)
with out_P:
out_P.clear_output()
display(res.tail(1))
with out_X:
out_X.clear_output()
btn_reset = ipw.Button(description="Restart Laboratory", layout=ipw.Layout(width="150px"))
btn_reset.on_click(reset)
btn_calc = ipw.Button(description="Perform measurement", layout=ipw.Layout(width="150px"))
btn_calc.on_click(calc)
# ---
rows = []
reset(btn_reset)
args[0].text = ipw.Text(str(args[0].start))
rows.append(ipw.HBox([ipw.Label('Initial concentration of ' + args[0].label + ' : '),args[0].text]))
args[1].text = ipw.Text(str(args[1].start))
rows.append(ipw.HBox([ipw.Label('Initial concentration of ' + args[1].label + ' : '),args[1].text]))
out_R = []
for i in range(0, n[1]):
out_R.append(ipw.Label(value=""))
rows.append(ipw.HBox([ipw.Label(value=f"Measured {result[i].label} [{result[i].units}]:",
layout=ipw.Layout(width="250px")),
out_R[i]]))
rows.append(ipw.HBox([btn_reset, btn_calc, out_L]))
def calc2(btn):
random.seed(system.seed)
rnd = random.random()
iType = int(4 * rnd) + 1
with out_X:
out_X.clear_output()
if (iType == 1):
display(Markdown(r'Competitive inhibition'))
elif (iType == 2):
display(Markdown(r'Un-Competitive inhibition'))
elif (iType == 3):
display(Markdown(r'Mixed inhibition'))
elif (iType == 4):
display(Markdown(r'Non-Competitive inhibition'))
else:
display(Markdown(r'No inhibition'))
display(Markdown(r'$K_M$ = 'rf'{system.Km:7.5}'))
display(Markdown(r'$V_{max}$ = 'rf'{system.Ki:7.5}'))
if (iType == 1) or (iType == 3) or (iType == 4):
display(Markdown(r'$K_i$ = 'rf'{system.Ki:7.5}'))
if (iType == 2) or (iType == 3) or (iType == 4):
display(Markdown(r'$K_i^\prime$ = 'rf'{system.Kip:7.5}'))
display(out_X)
btn_calc2 = ipw.Button(description="Check Inhibition Type", layout=ipw.Layout(width="150px"))
btn_calc2.on_click(calc2)
rows.append(ipw.HBox([btn_calc2]))
rows.append(ipw.HBox([out_P]))
ipw.VBox(rows)
###Output
_____no_output_____ |
notebooks/0_4_AV_model_pca_features.ipynb | ###Markdown
Model with PCA- Same baseline model just with some PCA on features Imports
###Code
import os
import numpy as np
import pandas as pd
from sklearn import preprocessing
import xgboost as xgb
from sklearn.preprocessing import minmax_scale
from sklearn.decomposition import PCA
###Output
_____no_output_____
###Markdown
Colab Kaggle Init
###Code
import os.path
if not os.path.exists('~/.kaggle/'):
print("Kaggle Folder doesn't exist yet")
from google.colab import files
print("Please click on button an upload your kaggle.json api file")
files.upload()
!mkdir -p ~/.kaggle
!cp kaggle.json ~/.kaggle/
!chmod 600 ~/.kaggle/kaggle.json
!ls ~/.kaggle
!pip install -q kaggle
!pip install -q kaggle-cli
!kaggle competitions download -c ieee-fraud-detection
!unzip \*.zip
from IPython.display import clear_output
clear_output()
print("DONE!")
else:
print("Data already exists")
###Output
DONE!
###Markdown
Functions
###Code
## Function to reduce the DF size
def reduce_mem_usage(df, verbose=True):
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
start_mem = df.memory_usage().sum() / 1024**2
for col in df.columns:
col_type = df[col].dtypes
if col_type in numerics:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
end_mem = df.memory_usage().sum() / 1024**2
if verbose: print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(end_mem, 100 * (start_mem - end_mem) / start_mem))
return df
###Output
_____no_output_____
###Markdown
Data
###Code
# Read in datasets
df_train_ident = pd.read_csv('train_identity.csv', index_col='TransactionID')
df_test_ident = pd.read_csv('test_identity.csv', index_col='TransactionID')
df_train_trans = pd.read_csv('train_transaction.csv', index_col='TransactionID')
df_test_trans = pd.read_csv('test_transaction.csv', index_col='TransactionID')
df_sample_submission = pd.read_csv('sample_submission.csv', index_col='TransactionID')
# Merge datasets
df_train = df_train_trans.merge(df_train_ident, how='left', left_index=True, right_index=True)
df_test = df_test_trans.merge(df_test_ident, how='left', left_index=True, right_index=True)
# Print shapes
print(df_train.shape)
print(df_test.shape)
###Output
(590540, 433)
(506691, 432)
###Markdown
Preprocessing
###Code
df_train = reduce_mem_usage(df_train)
df_test = reduce_mem_usage(df_test)
emails = {'gmail': 'google', 'att.net': 'att', 'twc.com': 'spectrum',
'scranton.edu': 'other', 'optonline.net': 'other', 'hotmail.co.uk': 'microsoft',
'comcast.net': 'other', 'yahoo.com.mx': 'yahoo', 'yahoo.fr': 'yahoo',
'yahoo.es': 'yahoo', 'charter.net': 'spectrum', 'live.com': 'microsoft',
'aim.com': 'aol', 'hotmail.de': 'microsoft', 'centurylink.net': 'centurylink',
'gmail.com': 'google', 'me.com': 'apple', 'earthlink.net': 'other', 'gmx.de': 'other',
'web.de': 'other', 'cfl.rr.com': 'other', 'hotmail.com': 'microsoft',
'protonmail.com': 'other', 'hotmail.fr': 'microsoft', 'windstream.net': 'other',
'outlook.es': 'microsoft', 'yahoo.co.jp': 'yahoo', 'yahoo.de': 'yahoo',
'servicios-ta.com': 'other', 'netzero.net': 'other', 'suddenlink.net': 'other',
'roadrunner.com': 'other', 'sc.rr.com': 'other', 'live.fr': 'microsoft',
'verizon.net': 'yahoo', 'msn.com': 'microsoft', 'q.com': 'centurylink',
'prodigy.net.mx': 'att', 'frontier.com': 'yahoo', 'anonymous.com': 'other',
'rocketmail.com': 'yahoo', 'sbcglobal.net': 'att', 'frontiernet.net': 'yahoo',
'ymail.com': 'yahoo', 'outlook.com': 'microsoft', 'mail.com': 'other',
'bellsouth.net': 'other', 'embarqmail.com': 'centurylink', 'cableone.net': 'other',
'hotmail.es': 'microsoft', 'mac.com': 'apple', 'yahoo.co.uk': 'yahoo', 'netzero.com': 'other',
'yahoo.com': 'yahoo', 'live.com.mx': 'microsoft', 'ptd.net': 'other', 'cox.net': 'other',
'aol.com': 'aol', 'juno.com': 'other', 'icloud.com': 'apple'}
us_emails = ['gmail', 'net', 'edu']
# https://www.kaggle.com/c/ieee-fraud-detection/discussion/100499#latest-579654
for c in ['P_emaildomain', 'R_emaildomain']:
df_train[c + '_bin'] = df_train[c].map(emails)
df_test[c + '_bin'] = df_test[c].map(emails)
df_train[c + '_suffix'] = df_train[c].map(lambda x: str(x).split('.')[-1])
df_test[c + '_suffix'] = df_test[c].map(lambda x: str(x).split('.')[-1])
df_train[c + '_suffix'] = df_train[c + '_suffix'].map(lambda x: x if str(x) not in us_emails else 'us')
df_test[c + '_suffix'] = df_test[c + '_suffix'].map(lambda x: x if str(x) not in us_emails else 'us')
df_train.head(10)
# Encode all categorical features
for f in df_train.drop('isFraud', axis=1).columns:
if df_train[f].dtype=='object' or df_test[f].dtype=='object':
lbl = preprocessing.LabelEncoder()
lbl.fit(list(df_train[f].values) + list(df_test[f].values))
df_train[f] = lbl.transform(list(df_train[f].values))
df_test[f] = lbl.transform(list(df_test[f].values))
# Some feature engineering
df_train['Trans_min_mean'] = df_train['TransactionAmt'] - df_train['TransactionAmt'].mean()
df_train['Trans_min_std'] = df_train['Trans_min_mean'] / df_train['TransactionAmt'].std()
df_test['Trans_min_mean'] = df_test['TransactionAmt'] - df_test['TransactionAmt'].mean()
df_test['Trans_min_std'] = df_test['Trans_min_mean'] / df_test['TransactionAmt'].std()
df_train['TransactionAmt_to_mean_card1'] = df_train['TransactionAmt'] / df_train.groupby(['card1'])['TransactionAmt'].transform('mean')
df_train['TransactionAmt_to_mean_card4'] = df_train['TransactionAmt'] / df_train.groupby(['card4'])['TransactionAmt'].transform('mean')
df_train['TransactionAmt_to_std_card1'] = df_train['TransactionAmt'] / df_train.groupby(['card1'])['TransactionAmt'].transform('std')
df_train['TransactionAmt_to_std_card4'] = df_train['TransactionAmt'] / df_train.groupby(['card4'])['TransactionAmt'].transform('std')
df_test['TransactionAmt_to_mean_card1'] = df_test['TransactionAmt'] / df_test.groupby(['card1'])['TransactionAmt'].transform('mean')
df_test['TransactionAmt_to_mean_card4'] = df_test['TransactionAmt'] / df_test.groupby(['card4'])['TransactionAmt'].transform('mean')
df_test['TransactionAmt_to_std_card1'] = df_test['TransactionAmt'] / df_test.groupby(['card1'])['TransactionAmt'].transform('std')
df_test['TransactionAmt_to_std_card4'] = df_test['TransactionAmt'] / df_test.groupby(['card4'])['TransactionAmt'].transform('std')
df_train.head()
df_train['TransactionAmt'] = np.log(df_train['TransactionAmt'])
df_test['TransactionAmt'] = np.log(df_test['TransactionAmt'])
###Output
_____no_output_____
###Markdown
PCA
###Code
df_test['isFraud'] = 'test'
df = pd.concat([df_train, df_test], axis = 0, sort=False)
df = df.reset_index()
#df = df.drop('index', axis=1)
df.head()
def PCA_change(df, cols, n_components, prefix='PCA_', rand_seed=4):
from sklearn.preprocessing import minmax_scale
from sklearn.decomposition import PCA
pca = PCA(n_components=n_components, random_state=rand_seed)
principalComponents = pca.fit_transform(df[cols])
principalDf = pd.DataFrame(principalComponents)
df.drop(cols, axis=1, inplace=True)
principalDf.rename(columns=lambda x: str(prefix)+str(x), inplace=True)
df = pd.concat([df, principalDf], axis=1)
return df
# Get list with all V columns
mas_v = list(filter(lambda x: str.startswith(x, 'V'), list(df_train.columns)))
len(mas_v)
for col in mas_v:
# Fill nas with minimum - 2
df[col] = df[col].fillna((df[col].min() - 2))
# Scale feature
df[col] = (minmax_scale(df[col], feature_range=(0,1)))
df = PCA_change(df, mas_v, prefix='PCA_V_', n_components=30)
df.head()
df.dtypes
# Encode all categorical features
for f in df.drop('isFraud', axis=1).columns:
if df[f].dtype=='object':
lbl = preprocessing.LabelEncoder()
lbl.fit(list(df[f].values))
df[f] = lbl.transform(list(df[f].values))
df_train, df_test = df[df['isFraud'] != 'test'], df[df['isFraud'] == 'test'].drop('isFraud', axis=1)
df_train.shape
X_train = df_train.sort_values('TransactionDT').drop(['isFraud',
'TransactionDT',
#'Card_ID'
],
axis=1)
y_train = df_train.sort_values('TransactionDT')['isFraud'].astype(bool)
X_test = df_test.sort_values('TransactionDT').drop(['TransactionDT',
#'Card_ID'
],
axis=1)
del df_train
df_test = df_test[["TransactionDT"]]
X_train.dtypes
% time
from sklearn.model_selection import KFold
from sklearn.metrics import roc_auc_score
EPOCHS = 3
kf = KFold(n_splits = EPOCHS, shuffle = True)
y_preds = np.zeros(df_sample_submission.shape[0])
y_oof = np.zeros(X_train.shape[0])
i = 0
for tr_idx, val_idx in kf.split(X_train, y_train):
i += 1
print("Split {}".format(i))
clf = xgb.XGBClassifier(
n_estimators=500,
max_depth=9,
learning_rate=0.05,
subsample=0.9,
colsample_bytree=0.9,
tree_method='gpu_hist'
)
X_tr, X_vl = X_train.iloc[tr_idx, :], X_train.iloc[val_idx, :]
y_tr, y_vl = y_train.iloc[tr_idx], y_train.iloc[val_idx]
clf.fit(X_tr, y_tr)
y_pred_train = clf.predict_proba(X_vl)[:,1]
y_oof[val_idx] = y_pred_train
print('ROC AUC {}'.format(roc_auc_score(y_vl, y_pred_train)))
y_preds+= clf.predict_proba(X_test)[:,1] / EPOCHS
X_test_preds = X_test.assign(isFraud = y_preds)
X_test_preds.head()
X_test_preds = X_test_preds[['TransactionID', 'isFraud']]
X_test_preds.head()
X_test_preds.set_index('TransactionID', inplace=True)
X_test_preds.head()
X_test_preds.to_csv('preds.csv')
pd.read_csv('preds.csv').head()
#!kaggle competitions submit -c ieee-fraud-detection -f preds.csv -m "PCA"
###Output
_____no_output_____
###Markdown
Hyperparam search
###Code
from sklearn.model_selection import KFold,TimeSeriesSplit, StratifiedKFold
from sklearn.metrics import roc_auc_score
from xgboost import plot_importance
from sklearn.metrics import make_scorer
from hyperopt import STATUS_OK, Trials, fmin, hp, tpe
import time
def objective(params):
time1 = time.time()
params = {
'max_depth': int(params['max_depth']),
'gamma': "{:.3f}".format(params['gamma']),
'subsample': "{:.2f}".format(params['subsample']),
'reg_alpha': "{:.3f}".format(params['reg_alpha']),
'reg_lambda': "{:.3f}".format(params['reg_lambda']),
'learning_rate': "{:.3f}".format(params['learning_rate']),
'num_leaves': '{:.3f}'.format(params['num_leaves']),
'colsample_bytree': '{:.3f}'.format(params['colsample_bytree']),
'min_child_samples': '{:.3f}'.format(params['min_child_samples']),
'feature_fraction': '{:.3f}'.format(params['feature_fraction']),
'bagging_fraction': '{:.3f}'.format(params['bagging_fraction'])
}
print("\n############## New Run ################")
print(f"params = {params}")
FOLDS = 7
count=1
skf = StratifiedKFold(n_splits=FOLDS, shuffle=True, random_state=42)
tss = TimeSeriesSplit(n_splits=FOLDS)
y_preds = np.zeros(df_sample_submission.shape[0])
y_oof = np.zeros(X_train.shape[0])
score_mean = 0
for tr_idx, val_idx in tss.split(X_train, y_train):
clf = xgb.XGBClassifier(
n_estimators=600, random_state=4, verbose=True,
tree_method='gpu_hist',
**params
)
X_tr, X_vl = X_train.iloc[tr_idx, :], X_train.iloc[val_idx, :]
y_tr, y_vl = y_train.iloc[tr_idx], y_train.iloc[val_idx]
clf.fit(X_tr, y_tr)
#y_pred_train = clf.predict_proba(X_vl)[:,1]
#print(y_pred_train)
score = make_scorer(roc_auc_score, needs_proba=True)(clf, X_vl, y_vl)
# plt.show()
score_mean += score
print(f'{count} CV - score: {round(score, 4)}')
count += 1
time2 = time.time() - time1
print(f"Total Time Run: {round(time2 / 60,2)}")
gc.collect()
print(f'Mean ROC_AUC: {score_mean / FOLDS}')
del X_tr, X_vl, y_tr, y_vl, clf, score
return -(score_mean / FOLDS)
space = {
# The maximum depth of a tree, same as GBM.
# Used to control over-fitting as higher depth will allow model
# to learn relations very specific to a particular sample.
# Should be tuned using CV.
# Typical values: 3-10
'max_depth': hp.quniform('max_depth', 7, 23, 1),
# reg_alpha: L1 regularization term. L1 regularization encourages sparsity
# (meaning pulling weights to 0). It can be more useful when the objective
# is logistic regression since you might need help with feature selection.
'reg_alpha': hp.uniform('reg_alpha', 0.01, 0.4),
# reg_lambda: L2 regularization term. L2 encourages smaller weights, this
# approach can be more useful in tree-models where zeroing
# features might not make much sense.
'reg_lambda': hp.uniform('reg_lambda', 0.01, .4),
# eta: Analogous to learning rate in GBM
# Makes the model more robust by shrinking the weights on each step
# Typical final values to be used: 0.01-0.2
'learning_rate': hp.uniform('learning_rate', 0.01, 0.2),
# colsample_bytree: Similar to max_features in GBM. Denotes the
# fraction of columns to be randomly samples for each tree.
# Typical values: 0.5-1
'colsample_bytree': hp.uniform('colsample_bytree', 0.3, .9),
# A node is split only when the resulting split gives a positive
# reduction in the loss function. Gamma specifies the
# minimum loss reduction required to make a split.
# Makes the algorithm conservative. The values can vary depending on the loss function and should be tuned.
'gamma': hp.uniform('gamma', 0.01, .7),
# more increases accuracy, but may lead to overfitting.
# num_leaves: the number of leaf nodes to use. Having a large number
# of leaves will improve accuracy, but will also lead to overfitting.
'num_leaves': hp.choice('num_leaves', list(range(20, 250, 10))),
# specifies the minimum samples per leaf node.
# the minimum number of samples (data) to group into a leaf.
# The parameter can greatly assist with overfitting: larger sample
# sizes per leaf will reduce overfitting (but may lead to under-fitting).
'min_child_samples': hp.choice('min_child_samples', list(range(100, 250, 10))),
# subsample: represents a fraction of the rows (observations) to be
# considered when building each subtree. Tianqi Chen and Carlos Guestrin
# in their paper A Scalable Tree Boosting System recommend
'subsample': hp.choice('subsample', [0.2, 0.4, 0.5, 0.6, 0.7, .8, .9]),
# randomly select a fraction of the features.
# feature_fraction: controls the subsampling of features used
# for training (as opposed to subsampling the actual training data in
# the case of bagging). Smaller fractions reduce overfitting.
'feature_fraction': hp.uniform('feature_fraction', 0.4, .8),
# randomly bag or subsample training data.
'bagging_fraction': hp.uniform('bagging_fraction', 0.4, .9)
# bagging_fraction and bagging_freq: enables bagging (subsampling)
# of the training data. Both values need to be set for bagging to be used.
# The frequency controls how often (iteration) bagging is used. Smaller
# fractions and frequencies reduce overfitting.
}
# Set algoritm parameters
best = fmin(fn=objective,
space=space,
algo=tpe.suggest,
max_evals=27)
# Print best parameters
best_params = space_eval(space, best)
###Output
_____no_output_____ |
percepcion/magistrales/deteccion_bordes.ipynb | ###Markdown
Filtrado pasa altaLos bordes son cambios bruscos de intensidades en pixeles cercanos, por tanto correcponden a componentes de alta frecuencia. Con los filtros sobel y prewitt logramos filtrar las componentes de baja frecuencia.
###Code
imsobel = filters.sobel(image)
io.imshow(imsobel)
io.show()
imprewitt = filters.prewitt(image)
io.imshow(imprewitt)
io.show()
imcanny = feature.canny(image)
io.imshow(imcanny)
io.show()
###Output
_____no_output_____
###Markdown
SegmentaciónUna vez hemos filtrado los bordes realizaremos la segementación.1. Binarezaremos la iamgen> Aplicamos un umbral al histograma de la imagen2. Elemento de list
###Code
plt.hist(imprewitt.flatten(), bins=256)
plt.show()
umbral = 0.05 # Umbral manual
# Umbral de forma automatica con la tecnica de otso
# umbral = filters.threshold_otsu(imprewitt)
print('Umbral:',umbral)
binarizacion = imprewitt > umbral
io.imshow(binarizacion)
io.show()
###Output
Umbral: 0.05
###Markdown
Operadores morfológicos: Dilatación y erosion* Dilatación: aumentar el negro.* Erosión: aumentar el blanco.
###Code
im0 = morphology.remove_small_objects(binarizacion, 9)
im1 = morphology.dilation(image=im0, selem=disk(2))
im2 = morphology.erosion(image=im1, selem=disk(1))
im3 = ndi.binary_fill_holes(im2)
im4 = morphology.erosion(image=im3, selem=disk(5))
im5 = morphology.dilation(image=im4, selem=disk(4))
f, axes = plt.subplots(2, 4, figsize=(16, 8))
axes[0, 0].imshow(binarizacion, cmap='gray', aspect='equal')
axes[0, 0].set_title('Binary image')
axes[0, 1].imshow(im0, cmap='gray', aspect='equal')
axes[0, 1].set_title('Small objects remove')
axes[0, 2].imshow(im1, cmap='gray', aspect='equal')
axes[0, 2].set_title('After dilation')
axes[0, 3].imshow(im2, cmap='gray', aspect='equal')
axes[0, 3].set_title('After erosion')
axes[1, 0].imshow(im3, cmap='gray', aspect='equal')
axes[1, 0].set_title('Holes filled')
axes[1, 1].imshow(im4, cmap='gray', aspect='equal')
axes[1, 1].set_title('After erosion')
axes[1, 2].imshow(im5, cmap='gray', aspect='equal')
axes[1, 2].set_title('After dilation')
axes[1, 3].imshow(image, cmap='gray', aspect='equal')
axes[1, 3].set_title('Imagen original')
###Output
_____no_output_____ |
Term_Deposit_Hypothesis_Testing_Module1_Prj.ipynb | ###Markdown
###Code
#Import right libraries
import scipy.stats as stats
from sklearn.cluster import KMeans
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import MinMaxScaler
#Call required libraries
# To time processes
import time
#To suppress warnings
import warnings
#Data manipulation
import numpy as np
import pandas as pd
#For graphics
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
sns.set()
import plotly.graph_objs as go
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
init_notebook_mode(connected=True)
# For scaling dataset
from sklearn.preprocessing import StandardScaler
#For clustering
from sklearn.cluster import KMeans, AgglomerativeClustering, AffinityPropagation
#For GMM clustering
from sklearn.mixture import GaussianMixture
# For os related operations
import os
import sys
#load the dataset
bank=pd.read_csv('/content/raw_data.csv')
#preview
bank
#preview the columns
bank.columns
#preview the data types
bank.dtypes
# visualization styling code
sns.set(rc={'figure.figsize':(13, 7.5)})
sns.set_context('talk')
#Turning off warnings
import warnings
def fxn():
warnings.warn("deprecated", DeprecationWarning)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fxn()
bank.count()
###Output
_____no_output_____
###Markdown
i**.Nomality Test**
###Code
# Normality test
from scipy.stats import shapiro
def shapiro_test(bank, col_list):
for x in col_list:
print(x)
data = bank[x]
stat, p = shapiro(data)
print('Statistics=%.3f, p=%.3f' % (stat, p))
# interpret
alpha = 0.05
if p > alpha:
print('Sample looks Gaussian (fail to reject H0)')
else:
print('Sample does not look Gaussian (reject H0)')
print('\n')
#use pp plot to check for nomality in age variable
import matplotlib.pyplot as plt
stats.probplot(bank['age'], plot= plt)
plt.show()
#shapiro Wilk's test
#check nomality test for age column
stat, p = shapiro(bank['age'])
print('Statistics=%.3f, p=%.3f' % (stat, p))
# interpreting
alpha = 0.05
if p > alpha:
print('Sample looks Gaussian')
else:
print('Sample does not look Gaussian')
#use pp plot to check for nomality in age variable
import matplotlib.pyplot as plt
stats.probplot(bank['duration'], plot= plt)
plt.show()
#check nomality test for duration column
stat, p = shapiro(bank['duration'])
print('Statistics=%.3f, p=%.3f' % (stat, p))
# interpreting
alpha = 0.05
if p > alpha:
print('Sample looks Gaussian')
else:
print('Sample does not look Gaussian')
###Output
Statistics=0.722, p=0.000
Sample does not look Gaussian
###Markdown
ii. **Sampling and Hypothesis Testing** **Relationship between term_deposit and marital**
###Code
#Drop the unknown in marital columns
df_marital=bank.drop(bank.index[bank['marital'] == 'unknown'], inplace = True)
#preview the column for marital
bank['marital']
# Stratified sample
df_marital= bank.groupby('marital', group_keys=False).apply(lambda grouped_subset : grouped_subset.sample(frac=0.1))
#preview
df_marital
table_marital= pd.crosstab(bank['marital'], bank['term_deposit'])
table_marital
# Perform chi-square test
from scipy.stats import chi2_contingency
from scipy.stats import chi2
stat, p, dof, expected = chi2_contingency(table_marital.to_numpy())
#Calculate critical value, set significance level = 0.05
prob = 0.95
critical_value = chi2.ppf(prob, dof)
print(f'Propability: {prob}, Critical value: {critical_value}, Test statistic: {stat}')
print(f'Alpha: {1-prob}, p-value: {p}')
###Output
Propability: 0.95, Critical value: 5.991464547107979, Test statistic: 121.60529977368248
Alpha: 0.050000000000000044, p-value: 3.924141589479533e-27
###Markdown
p-value less than alpha, thus significant evidence to reject null hypothesis
###Code
#Heat map representation
plt.figure(figsize=(12,8))
sns.heatmap(table_marital, annot=True, cmap="YlGnBu")
###Output
_____no_output_____
###Markdown
**Relationship between term_deposit and education**
###Code
#Relationship between Term deposit and Education
#Drop the unknown in marital columns
bank.drop(bank.index[bank['education'] == 'unknown'], inplace = True)
#preview the column for education
bank['education']
# Stratified sample
df_education= bank.groupby('education', group_keys=False).apply(lambda grouped_subset : grouped_subset.sample(frac=0.1))
#preview
df_education
#education vs term_deposit
table_edu= pd.crosstab(bank['education'], bank['term_deposit'])
table_edu
# Perform chi-square test
from scipy.stats import chi2_contingency
from scipy.stats import chi2
stat, p, dof, expected = chi2_contingency(table_edu.to_numpy())
#Calculate critical value, set significance level = 0.05
prob = 0.95
critical_value = chi2.ppf(prob, dof)
print(f'Propability: {prob}, Critical value: {critical_value}, Test statistic: {stat}')
print(f'Alpha: {1-prob}, p-value: {p}')
###Output
Propability: 0.95, Critical value: 12.591587243743977, Test statistic: 175.63060241800403
Alpha: 0.050000000000000044, p-value: 2.8727636814613635e-35
###Markdown
p-value less than alpha, thus significant evidence to reject null hypothesis
###Code
#Heat map representation
plt.figure(figsize=(12,8))
sns.heatmap(table_edu, annot=True, cmap="YlGnBu")
###Output
_____no_output_____
###Markdown
**Relationship between term_deposit and job**
###Code
#Drop the unknown in marital columns
bank.drop(bank.index[bank['job'] == 'unknown'], inplace = True)
#preview job column
bank['job']
#job vs term_deposit
table_job= pd.crosstab(bank['job'], bank['term_deposit'])
table_job
# Perform chi-square test
from scipy.stats import chi2_contingency
from scipy.stats import chi2
stat, p, dof, expected = chi2_contingency(table_job.to_numpy())
#Calculate critical value, set significance level = 0.05
prob = 0.95
critical_value = chi2.ppf(prob, dof)
print(f'Propability: {prob}, Critical value: {critical_value}, Test statistic: {stat}')
print(f'Alpha: {1-prob}, p-value: {p}')
###Output
Propability: 0.95, Critical value: 18.307038053275146, Test statistic: 819.5833643464897
Alpha: 0.050000000000000044, p-value: 1.2706458353488479e-169
###Markdown
p-value less than alpha, thus significant evidence to reject null hypothesis
###Code
#Heat map representation
plt.figure(figsize=(12,8))
sns.heatmap(table_job, annot=True, cmap="YlGnBu")
###Output
_____no_output_____
###Markdown
Relationship between Loan and Term_deposit
###Code
#Drop the unknown in marital columns
bank.drop(bank.index[bank['loan'] == 'unknown'], inplace = True)
#preview the loan column
bank['loan']
# Stratified sample
df_loan= bank.groupby('job', group_keys=False).apply(lambda grouped_subset : grouped_subset.sample(frac=0.1))
#preview
df_loan
#loan vs term_deposit
table_loan= pd.crosstab(bank['loan'], bank['term_deposit'])
table_loan
# Perform chi-square test
from scipy.stats import chi2_contingency
from scipy.stats import chi2
stat, p, dof, expected = chi2_contingency(table_job.to_numpy())
#Calculate critical value, set significance level = 0.05
prob = 0.95
critical_value = chi2.ppf(prob, dof)
print(f'Propability: {prob}, Critical value: {critical_value}, Test statistic: {stat}')
print(f'Alpha: {1-prob}, p-value: {p}')
###Output
Propability: 0.95, Critical value: 18.307038053275146, Test statistic: 819.5833643464897
Alpha: 0.050000000000000044, p-value: 1.2706458353488479e-169
###Markdown
p-value less than alpha, thus significant evidence to reject null hypothesis
###Code
#Heat map representation
plt.figure(figsize=(12,8))
sns.heatmap(table_loan, annot=True, cmap="YlGnBu")
###Output
_____no_output_____
###Markdown
**Relationship between term_deposit and housing**
###Code
#Drop the unknown in housing columns
bank.drop(bank.index[bank['housing'] == 'unknown'], inplace = True)
#preview the housing column
bank['housing']
#housing vs bank
table_housing= pd.crosstab(bank['housing'], bank['term_deposit'])
table_housing
# Perform chi-square test
from scipy.stats import chi2_contingency
from scipy.stats import chi2
stat, p, dof, expected = chi2_contingency(table_housing.to_numpy())
#Calculate critical value, set significance level = 0.05
prob = 0.95
critical_value = chi2.ppf(prob, dof)
print(f'Propability: {prob}, Critical value: {critical_value}, Test statistic: {stat}')
print(f'Alpha: {1-prob}, p-value: {p}')
###Output
Propability: 0.95, Critical value: 3.841458820694124, Test statistic: 3.758158182973599
Alpha: 0.050000000000000044, p-value: 0.0525504337154611
###Markdown
p value is greater than alpha, thus significant evidence to accept null hypothesis
###Code
#Heat map representation
plt.figure(figsize=(12,8))
sns.heatmap(table_housing, annot=True, cmap="YlGnBu")
###Output
_____no_output_____
###Markdown
**Relationship between Term_deposit Account and Age**
###Code
#sample 40 records in age
table_age= bank[['age', 'term_deposit']].sample(n=40, random_state=1)
#preview the sample
table_age
#convert term_deposit column into numerical
from sklearn.preprocessing import LabelEncoder
label_encoder = LabelEncoder()
label_encoder.fit(bank['term_deposit'])
bank['term_deposit'] = label_encoder.transform(bank[['term_deposit']])
bank
from sklearn.preprocessing import LabelEncoder
label_encoder = LabelEncoder()
label_encoder.fit(table_age['term_deposit'])
table_age['term_deposit'] = label_encoder.transform(table_age['term_deposit'])
table_age
#population mean of age
a = bank['age'].mean()
a
#population mean of term_deposit account
b= bank['term_deposit'].mean()
b
#sample mean of age
c=table_age['age'].mean()
c
#sample mean of term_deposit account
d=table_age['term_deposit'].mean()
d
#sample standard deviation
e= table_age['age'].std()
e
#sample standard deviation
f=table_age['term_deposit'].std()
f
# point estimation
# population.mean() - sample.mean()
bank['age'].mean() - table_age['age'].mean()
# Perform a two sample z test
from statsmodels.stats.weightstats import ztest
zscore, p = ztest(x1 = table_age[table_age['term_deposit']==1]['age'].values, x2=table_age[table_age['term_deposit']==0]['age'].values)
print(f'Test statistic: {zscore}, p-value: {p}')
#interpretation of the p value
# alpha value is 0.05 or 5%
if p < 0.05:
print(" we are rejecting null hypothesis")
else:
print("we fail to reject null hypothesis")
import math
sample_mean = table_age['term_deposit'].mean()
# Get the z-critical value
z_critical = stats.norm.ppf(q = 0.975)
# Check the z-critical value
print("z-critical value:")
print(z_critical)
# Get the population standard deviation
pop_stdev = bank['age'].std()
margin_of_error = z_critical * (pop_stdev/math.sqrt(296))
confidence_interval = (sample_mean - margin_of_error,
sample_mean + margin_of_error)
print("Confidence interval:")
print(confidence_interval)
###Output
z-critical value:
1.959963984540054
Confidence interval:
(-1.0721845906295608, 1.272184590629561)
###Markdown
Relationship between term_deposit and call duration
###Code
#sample 40 records in age
table_duration= bank[['duration', 'term_deposit']].sample(n=40, random_state=1)
#preview the sample
table_duration
#population mean of age
a = bank['duration'].mean()
a
#population mean of term_deposit account
b= bank['term_deposit'].mean()
b
#sample mean of duration
c=table_duration['duration'].mean()
c
#sample mean of term_deposit account
d=table_duration['term_deposit'].mean()
d
#sample standard deviation
e= table_age['age'].std()
e
#sample standard deviation
f=table_duration['term_deposit'].std()
f
# population.mean() - sample.mean()
bank['age'].mean() - table_age['age'].mean()
# Perform a two sample z test
from statsmodels.stats.weightstats import ztest
zscore, p = ztest(x1 = table_duration[table_duration['term_deposit']==1]['duration'].values, x2=table_duration[table_duration['term_deposit']==0]['duration'].values)
print(f'Test statistic: {zscore}, p-value: {p}')
#interpretation of the p value
# alpha value is 0.05 or 5%
if p < 0.05:
print(" we are rejecting null hypothesis")
else:
print("we fail to reject null hypothesis")
import math
sample_mean = table_duration['term_deposit'].mean()
# Get the z-critical value
z_critical = stats.norm.ppf(q = 0.975)
# Check the z-critical value
print("z-critical value:")
print(z_critical)
# Get the population standard deviation
pop_stdev = bank['duration'].std()
margin_of_error = z_critical * (pop_stdev/math.sqrt(296))
confidence_interval = (sample_mean - margin_of_error,
sample_mean + margin_of_error)
print("Confidence interval:")
print(confidence_interval)
###Output
z-critical value:
1.959963984540054
Confidence interval:
(-29.4957313333587, 29.695731333358705)
|
udacity_cs344_hw3.ipynb | ###Markdown
###Code
# Homework 3 for Udacity CS344 Course, Intro to Parallel Programming
# clone the code repo,
!git clone https://github.com/depctg/udacity-cs344-colab
!pip install git+git://github.com/depctg/nvcc4jupyter.git
# load cuda plugin
%config NVCCPluginV2.static_dir = True
%config NVCCPluginV2.relative_dir = "udacity-cs344-colab/src/HW3"
%load_ext nvcc_plugin
# change to work directory, generate makefiles
!mkdir udacity-cs344-colab/build
%cd udacity-cs344-colab/build
!cmake ../src
%%cuda --name student_func.cu
/* Udacity Homework 3
HDR Tone-mapping
Background HDR
==============
A High Dynamic Range (HDR) image contains a wider variation of intensity
and color than is allowed by the RGB format with 1 byte per channel that we
have used in the previous assignment.
To store this extra information we use single precision floating point for
each channel. This allows for an extremely wide range of intensity values.
In the image for this assignment, the inside of church with light coming in
through stained glass windows, the raw input floating point values for the
channels range from 0 to 275. But the mean is .41 and 98% of the values are
less than 3! This means that certain areas (the windows) are extremely bright
compared to everywhere else. If we linearly map this [0-275] range into the
[0-255] range that we have been using then most values will be mapped to zero!
The only thing we will be able to see are the very brightest areas - the
windows - everything else will appear pitch black.
The problem is that although we have cameras capable of recording the wide
range of intensity that exists in the real world our monitors are not capable
of displaying them. Our eyes are also quite capable of observing a much wider
range of intensities than our image formats / monitors are capable of
displaying.
Tone-mapping is a process that transforms the intensities in the image so that
the brightest values aren't nearly so far away from the mean. That way when
we transform the values into [0-255] we can actually see the entire image.
There are many ways to perform this process and it is as much an art as a
science - there is no single "right" answer. In this homework we will
implement one possible technique.
Background Chrominance-Luminance
================================
The RGB space that we have been using to represent images can be thought of as
one possible set of axes spanning a three dimensional space of color. We
sometimes choose other axes to represent this space because they make certain
operations more convenient.
Another possible way of representing a color image is to separate the color
information (chromaticity) from the brightness information. There are
multiple different methods for doing this - a common one during the analog
television days was known as Chrominance-Luminance or YUV.
We choose to represent the image in this way so that we can remap only the
intensity channel and then recombine the new intensity values with the color
information to form the final image.
Old TV signals used to be transmitted in this way so that black & white
televisions could display the luminance channel while color televisions would
display all three of the channels.
Tone-mapping
============
In this assignment we are going to transform the luminance channel (actually
the log of the luminance, but this is unimportant for the parts of the
algorithm that you will be implementing) by compressing its range to [0, 1].
To do this we need the cumulative distribution of the luminance values.
Example
-------
input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2]
min / max / range: 0 / 9 / 9
histo with 3 bins: [4 7 3]
cdf : [4 11 14]
Your task is to calculate this cumulative distribution by following these
steps.
*/
#include <limits.h>
#include <float.h>
#include <math.h>
#include <stdio.h>
#include "utils.h"
__global__
void reduce_minmax_kernel(const float* d_in, float* d_out, const size_t size, int minmax){
extern __shared__ float shared[];
int mid = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
if (mid < size){
shared[tid] = d_in[mid];
} else {
if (minmax == 0)
shared[tid] = FLT_MAX;
else
shared[tid] = - FLT_MAX;
}
__syncthreads();
if (mid >= size){
if (tid == 0){
if (minmax == 0)
d_out[blockIdx.x] = FLT_MAX;
else
d_out[blockIdx.x] = -FLT_MAX;
}
return;
}
for (unsigned int s = blockDim.x/2; s > 0; s /= 2){
if (tid < s){
if (minmax == 0){
shared[tid] = min(shared[tid], shared[tid + s]);
} else {
shared[tid] = max(shared[tid], shared[tid + s]);
}
}
__syncthreads();
}
if (tid == 0){
d_out[blockIdx.x] = shared[0];
}
}
__global__
void histogram_kernel(unsigned int* d_bins, const float* d_in, const int bin_count, const float lum_min, const float lum_max, const int size){
int mid = threadIdx.x + blockDim.x * blockIdx.x;
if (mid >= size)
return;
float lum_range = lum_max - lum_min;
int bin = ((d_in[mid] - lum_min) / lum_range) * bin_count;
bin = bin == bin_count ? bin_count - 1 : bin; //max value bin is the last of the histo
atomicAdd(&d_bins[bin], 1);
}
__global__
void scan_kernel(unsigned int* d_bins, int size){
int mid = threadIdx.x + blockDim.x * blockIdx.x;
if (mid >= size)
return;
for (int s = 1; s <= size; s *= 2){
int spot = mid - s;
unsigned int val = 0;
if (spot >= 0)
val = d_bins[spot];
__syncthreads();
if (spot >= 0)
d_bins[mid] += val;
__syncthreads();
}
}
int get_max_size(int n, int d){
return (int)ceil((float)n/float(d));
}
float reduce_minmax(const float* const d_in, const size_t size, int minmax){
int BLOCK_SIZE = 32;
size_t curr_size = size;
float* d_curr_in;
checkCudaErrors(cudaMalloc(&d_curr_in, sizeof(float) * size));
checkCudaErrors(cudaMemcpy(d_curr_in, d_in, sizeof(float) * size, cudaMemcpyDeviceToDevice));
float* d_curr_out;
dim3 thread_dim(BLOCK_SIZE);
const int shared_mem_size = sizeof(float) * BLOCK_SIZE;
while (1){
checkCudaErrors(cudaMalloc(&d_curr_out, sizeof(float) * get_max_size(curr_size, BLOCK_SIZE)));
dim3 block_dim(get_max_size(size, BLOCK_SIZE));
reduce_minmax_kernel<<<block_dim, thread_dim, shared_mem_size>>>(d_curr_in, d_curr_out, curr_size, minmax);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaFree(d_curr_in));
d_curr_in = d_curr_out;
if (curr_size < BLOCK_SIZE)
break;
curr_size = get_max_size(curr_size, BLOCK_SIZE);
}
float h_out;
cudaMemcpy(&h_out, d_curr_out, sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_curr_out);
return h_out;
}
void your_histogram_and_prefixsum(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
//TODO
/*Here are the steps you need to implement
1) find the minimum and maximum value in the input logLuminance channel
store in min_logLum and max_logLum
2) subtract them to find the range
3) generate a histogram of all the values in the logLuminance channel using
the formula: bin = (lum[i] - lumMin) / lumRange * numBins
4) Perform an exclusive scan (prefix sum) on the histogram to get
the cumulative distribution of luminance values (this should go in the
incoming d_cdf pointer which already has been allocated for you) */
const size_t size = numRows * numCols;
min_logLum = reduce_minmax(d_logLuminance, size, 0);
max_logLum = reduce_minmax(d_logLuminance, size, 1);
printf("got min of %f\n", min_logLum);
printf("got max of %f\n", max_logLum);
printf("numBins %d\n", numBins);
unsigned int* d_bins;
size_t histo_size = sizeof(unsigned int) * numBins;
checkCudaErrors(cudaMalloc(&d_bins, histo_size));
checkCudaErrors(cudaMemset(d_bins, 0, histo_size));
dim3 thread_dim(1024);
dim3 hist_block_dim(get_max_size(size, thread_dim.x));
histogram_kernel<<<hist_block_dim, thread_dim>>>(d_bins, d_logLuminance, numBins, min_logLum, max_logLum, size);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
unsigned int h_out[1024];
/*cudaMemcpy(&h_out, d_bins, sizeof(unsigned int)*1024, cudaMemcpyDeviceToHost);
for (int i = 0; i < 1024; i++)
printf("hist out %d\n", h_out[i]);*/
dim3 scan_block_dim(get_max_size(numBins, thread_dim.x));
scan_kernel<<<scan_block_dim, thread_dim>>>(d_bins, numBins);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
/*cudaMemcpy(&h_out, d_bins, sizeof(unsigned int)*1024, cudaMemcpyDeviceToHost);
for (int i = 0; i < 1024; i++)
printf("cdf out %d\n", h_out[i]);*/
cudaMemset(d_cdf, 0, histo_size);
cudaMemcpy(d_cdf+1, d_bins, histo_size-sizeof(unsigned int), cudaMemcpyDeviceToDevice);
checkCudaErrors(cudaFree(d_bins));
}
# make the cuda project
!make HW3
print("\n====== RESULT OF HW3 =======\n")
!bin/HW3 ../src/HW3/memorial_png_large.gold
# plot output images
import matplotlib.pyplot as plt
_,ax = plt.subplots(2,2, dpi=150)
ax[0][0].imshow(plt.imread("../src/HW3/memorial_raw_large.png"))
ax[0][0].set_title("original")
ax[0][0].grid(False)
ax[0][1].imshow(plt.imread("HW3_output.png"))
ax[0][1].set_title("output")
ax[0][1].grid(False)
ax[1][0].imshow(plt.imread("HW3_reference.png"))
ax[1][0].set_title("reference")
ax[1][0].grid(False)
ax[1][1].imshow(plt.imread("HW3_differenceImage.png"))
ax[1][1].set_title("difference")
ax[1][1].grid(False)
plt.show()
###Output
_____no_output_____ |
demos/ivp_odes/Stiffness.ipynb | ###Markdown
Stiffness in Initial Value ProblemsCopyright (C) 2020 Andreas KloecknerMIT LicensePermission is hereby granted, free of charge, to any person obtaining a copyof this software and associated documentation files (the "Software"), to dealin the Software without restriction, including without limitation the rightsto use, copy, modify, merge, publish, distribute, sublicense, and/or sellcopies of the Software, and to permit persons to whom the Software isfurnished to do so, subject to the following conditions:The above copyright notice and this permission notice shall be included inall copies or substantial portions of the Software.THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS ORIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THEAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHERLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS INTHE SOFTWARE.
###Code
import numpy as np
import matplotlib.pyplot as pt
###Output
_____no_output_____
###Markdown
Consider $y'=-100y+100t + 101$.Exact solution: $y(t)=1+t+ce^{-100t}$.Exact solution derivative: $y'(t)=1-100ce^{-100t}$.
###Code
def f(t, y):
return -100*y+100*t + 101
t_end = 0.2
def plot_solution(t0, y0):
c = (y0-1-t0)/np.exp(-100*t0)
t_mesh = np.linspace(t0, t_end, 1000)
solution = 1+t_mesh+c*np.exp(-100*t_mesh)
pt.plot(t_mesh, solution, label="exact")
pt.plot(t0, y0, "ko")
plot_solution(t0=0, y0=1)
plot_solution(t0=0, y0=1.2)
plot_solution(t0=0, y0=-0.5)
plot_solution(t0=0.05, y0=-0.5)
###Output
/usr/local/lib/python3.5/dist-packages/IPython/core/formatters.py:92: DeprecationWarning: DisplayFormatter._ipython_display_formatter_default is deprecated: use @default decorator instead.
def _ipython_display_formatter_default(self):
/usr/local/lib/python3.5/dist-packages/IPython/core/formatters.py:669: DeprecationWarning: PlainTextFormatter._singleton_printers_default is deprecated: use @default decorator instead.
def _singleton_printers_default(self):
###Markdown
Here's a helper function that uses a time stepper in the form of a `step_function` to numerically solve an ODE and plot the numerical solution:
###Code
def integrate_ode(step_function, t0, y0, h):
times = [t0]
ys = [y0]
while times[-1] <= t_end + 1e-14:
t = times[-1]
ys.append(step_function(t, ys[-1], h))
times.append(t + h)
pt.plot(times, ys, label=step_function.__name__)
pt.xlim([t0, t_end])
pt.ylim([-1, 2])
pt.legend(loc="best")
###Output
_____no_output_____
###Markdown
Using an Explicit Method First, implement `forward_euler_step(tk, yk, h)`:
###Code
#clear
def forward_euler_step(tk, yk, h):
return yk + h*f(tk, yk)
t0 = 0.05
y0 = -0.5
h = 0.008 # start this at 0.001, then grow
plot_solution(t0=t0, y0=y0)
integrate_ode(forward_euler_step, t0=t0, y0=y0, h=h)
###Output
_____no_output_____
###Markdown
* What's the main challenge here? Using an Implicit Method Next, implement `backward_euler_step(tk, yk, h)`:
###Code
#clear
def backward_euler_step(tk, yk, h):
tkp1 = tk+h
return (yk + h*(100*tkp1 + 101))/(1+100*h)
t0 = 0.05
y0 = -0.5
h = 0.05 # start this at 0.001, then grow
plot_solution(t0=t0, y0=y0)
integrate_ode(backward_euler_step, t0=t0, y0=y0, h=h)
pt.xlim([t0, t_end])
pt.ylim([-1, 2])
pt.legend()
###Output
_____no_output_____ |
WEEK_1/RepMLA_Lab_1_7.ipynb | ###Markdown
**Artificial Intelligence - MSc**CS6501 - MACHINE LEARNING AND APPLICATIONS**Business Analytics - MSc**ET5003 - MACHINE LEARNING APPLICATIONS ***Annual Repeat***Instructor: Enrique NaredoRepMLA_Lab-1.7Student ID: 0427845Student name: Alison O'Connor Pandas exercise
###Code
import pandas as pd
###Output
_____no_output_____
###Markdown
Problem 1 Create a pandas series from each of the items below: a list, numpy and a dictionary
###Code
# Input
import numpy as np
a_list = list("abcdefg")
numpy_array = np.arange(1, 10)
dictionary = {"A": 0, "B":1, "C":2, "D":3, "E":5}
#Solutions (different ways)
series1 = pd.Series(a_list)
print(series1)
series2 = pd.Series(numpy_array)
print(series2)
series3 = pd.Series(dictionary)
print(series3)
###Output
0 a
1 b
2 c
3 d
4 e
5 f
6 g
dtype: object
0 1
1 2
2 3
3 4
4 5
5 6
6 7
7 8
8 9
dtype: int32
A 0
B 1
C 2
D 3
E 5
dtype: int64
###Markdown
Problem 2Convert the series `ser` into a dataframe with its index as another column on the dataframe.
###Code
# input
mylist = list('abcedfghijklmnopqrstuvwxyz')
myarr = np.arange(26)
mydict = dict(zip(mylist, myarr))
ser = pd.Series(mydict)
print(ser[:5])
###Output
a 0
b 1
c 2
e 3
d 4
dtype: int64
###Markdown
Problem 3 Combine ser1 and ser2 to form a dataframe with two columns.
###Code
# input
ser1 = pd.Series(list('abcedfghijklmnopqrstuvwxyz'))
ser2 = pd.Series(np.arange(26))
###Output
_____no_output_____
###Markdown
Problem 4Get all items of ser1 and ser2 not common to both.
###Code
# input
ser1 = pd.Series([1, 2, 3, 4, 5])
ser2 = pd.Series([4, 5, 6, 7, 8])
###Output
_____no_output_____
###Markdown
Problem 5Get all items of ser1 and ser2 not common to both.
###Code
# input
ser1 = pd.Series([1, 2, 3, 4, 5])
ser2 = pd.Series([4, 5, 6, 7, 8])
###Output
_____no_output_____ |
examples/6_p_scale_test_Dorogokupets2007_MgO.ipynb | ###Markdown
For high dpi displays.
###Code
%config InlineBackend.figure_format = 'retina'
###Output
_____no_output_____
###Markdown
0. General note This example compares pressure calculated from `pytheos` and original publication for the MgO scale by Dorogokupets 2007. 1. Global setup
###Code
import matplotlib.pyplot as plt
import numpy as np
from uncertainties import unumpy as unp
import pytheos as eos
###Output
_____no_output_____
###Markdown
3. Compare
###Code
eta = np.linspace(1., 0.6, 9)
print(eta)
dorogokupets2007_mgo = eos.periclase.Dorogokupets2007()
help(dorogokupets2007_mgo)
dorogokupets2007_mgo.print_equations()
dorogokupets2007_mgo.print_equations()
dorogokupets2007_mgo.print_parameters()
v0 = 74.698
dorogokupets2007_mgo.three_r
v = v0 * (eta)
temp = 3000.
p = dorogokupets2007_mgo.cal_p(v, temp * np.ones_like(v))
###Output
_____no_output_____
###Markdown
###Code
print('for T = ', temp)
for eta_i, p_i in zip(eta, p):
print("{0: .3f} {1: .2f}".format(eta_i, p_i))
v = dorogokupets2007_mgo.cal_v(p, temp * np.ones_like(p), min_strain=0.6)
print(1.-(v/v0))
###Output
[0. 0.05 0.1 0.15 0.2 0.25 0.3 0.35 0.4 ]
|
examples/tutorial_particle_physics/3b_score_ew_w_wt_az-1M.ipynb | ###Markdown
MadMiner particle physics tutorial Part 3b: Training a score estimatorJohann Brehmer, Felix Kling, Irina Espejo, and Kyle Cranmer 2018-2019 In part 3a of this tutorial we will finally train a neural network to estimate likelihood ratios. We assume that you have run part 1 and 2a of this tutorial. If, instead of 2a, you have run part 2b, you just have to load a different filename later. Preparations Make sure you've run the first tutorial before executing this notebook!
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
%matplotlib inline
from madminer.sampling import SampleAugmenter
from madminer import sampling
from madminer.ml import ScoreEstimator
# MadMiner output
logging.basicConfig(
format='%(asctime)-5.5s %(name)-20.20s %(levelname)-7.7s %(message)s',
datefmt='%H:%M',
level=logging.INFO
)
# Output of all other modules (e.g. matplotlib)
for key in logging.Logger.manager.loggerDict:
if "madminer" not in key:
logging.getLogger(key).setLevel(logging.WARNING)
###Output
_____no_output_____
###Markdown
1. Make (unweighted) training and test samples with augmented data At this point, we have all the information we need from the simulations. But the data is not quite ready to be used for machine learning. The `madminer.sampling` class `SampleAugmenter` will take care of the remaining book-keeping steps before we can train our estimators:First, it unweights the samples, i.e. for a given parameter vector `theta` (or a distribution `p(theta)`) it picks events `x` such that their distribution follows `p(x|theta)`. The selected samples will all come from the event file we have so far, but their frequency is changed -- some events will appear multiple times, some will disappear.Second, `SampleAugmenter` calculates all the augmented data ("gold") that is the key to our new inference methods. Depending on the specific technique, these are the joint likelihood ratio and / or the joint score. It saves all these pieces of information for the selected events in a set of numpy files that can easily be used in any machine learning framework.
###Code
# sampler = SampleAugmenter('data/lhe_data_shuffled.h5')
sampler = SampleAugmenter('/data_CMS/cms/cortinovis/ewdim6/data_ew_1M_az/delphes_data_shuffled.h5')
###Output
20:12 madminer.analysis.da INFO Loading data from /data_CMS/cms/cortinovis/ewdim6/data_ew_1M_az/delphes_data_shuffled.h5
20:12 madminer.analysis.da INFO Found 2 parameters
20:12 madminer.analysis.da INFO Did not find nuisance parameters
20:12 madminer.analysis.da INFO Found 6 benchmarks, of which 6 physical
20:12 madminer.analysis.da INFO Found 18 observables
20:12 madminer.analysis.da INFO Found 89591 events
20:12 madminer.analysis.da INFO 34607 signal events sampled from benchmark sm
20:12 madminer.analysis.da INFO 17041 signal events sampled from benchmark w
20:12 madminer.analysis.da INFO 15914 signal events sampled from benchmark neg_w
20:12 madminer.analysis.da INFO 11005 signal events sampled from benchmark ww
20:12 madminer.analysis.da INFO 11024 signal events sampled from benchmark neg_ww
20:12 madminer.analysis.da INFO Found morphing setup with 6 components
20:12 madminer.analysis.da INFO Did not find nuisance morphing setup
###Markdown
The relevant `SampleAugmenter` function for local score estimators is `extract_samples_train_local()`. As in part 3a of the tutorial, for the argument `theta` you can use the helper functions `sampling.benchmark()`, `sampling.benchmarks()`, `sampling.morphing_point()`, `sampling.morphing_points()`, and `sampling.random_morphing_points()`.
###Code
x, theta, t_xz, _ = sampler.sample_train_local(
theta=sampling.benchmark('sm'),
n_samples=500000,
folder='/data_CMS/cms/cortinovis/ewdim6/data_ew_2M_az/samples',
filename='train_score'
)
###Output
10:53 madminer.sampling.sa INFO Extracting training sample for local score regression. Sampling and score evaluation according to sm
10:53 madminer.sampling.sa INFO Starting sampling serially
10:53 madminer.sampling.sa INFO Sampling from parameter point 1 / 1
10:53 madminer.sampling.sa INFO Effective number of samples: mean 41734.99999999999, with individual thetas ranging from 41734.999999999985 to 41734.999999999985
###Markdown
We can use the same data as in part 3a, so you only have to execute this if you haven't gone through tutorial 3a:
###Code
_ = sampler.sample_test(
theta=sampling.benchmark('sm'),
n_samples=1000,
folder='/data_CMS/cms/cortinovis/ewdim6/data_ew_2M_az/samples',
filename='test'
)
###Output
10:53 madminer.sampling.sa INFO Extracting evaluation sample. Sampling according to sm
10:53 madminer.sampling.sa INFO Starting sampling serially
10:53 madminer.sampling.sa INFO Sampling from parameter point 1 / 1
10:53 madminer.sampling.sa INFO Effective number of samples: mean 13770.999999999998, with individual thetas ranging from 13770.999999999998 to 13770.999999999998
###Markdown
2. Train score estimator It's now time to build a neural network. Only this time, instead of the likelihood ratio itself, we will estimate the gradient of the log likelihood with respect to the theory parameters -- the score. To be precise, the output of the neural network is an estimate of the score at some reference parameter point, for instance the Standard Model. A neural network that estimates this "local" score can be used to calculate the Fisher information at that point. The estimated score can also be used as a machine learning version of Optimal Observables, and likelihoods can be estimated based on density estimation in the estimated score space. This method for likelihood ratio estimation is called SALLY, and there is a closely related version called SALLINO. Both are explained in ["Constraining Effective Field Theories With Machine Learning"](https://arxiv.org/abs/1805.00013) and ["A Guide to Constraining Effective Field Theories With Machine Learning"](https://arxiv.org/abs/1805.00020).The central object for this is the `madminer.ml.ScoreEstimator` class:
###Code
estimator = ScoreEstimator(n_hidden=(30,30))
estimator.train(
method='sally',
x='/data_CMS/cms/cortinovis/ewdim6/data_ew_2M_az/samples/x_train_score.npy',
t_xz='/data_CMS/cms/cortinovis/ewdim6/data_ew_2M_az/samples/t_xz_train_score.npy',
)
estimator.save('/data_CMS/cms/cortinovis/ewdim6/models_ew_2M_az/sally')
###Output
10:53 madminer.ml.score INFO Starting training
10:53 madminer.ml.score INFO Batch size: 128
10:53 madminer.ml.score INFO Optimizer: amsgrad
10:53 madminer.ml.score INFO Epochs: 50
10:53 madminer.ml.score INFO Learning rate: 0.001 initially, decaying to 0.0001
10:53 madminer.ml.score INFO Validation split: 0.25
10:53 madminer.ml.score INFO Early stopping: True
10:53 madminer.ml.score INFO Scale inputs: True
10:53 madminer.ml.score INFO Shuffle labels False
10:53 madminer.ml.score INFO Samples: all
10:53 madminer.ml.score INFO Loading training data
10:53 madminer.utils.vario INFO Loading /data_CMS/cms/cortinovis/ewdim6/data_ew_2M_az/samples/x_train_score.npy into RAM
10:53 madminer.utils.vario INFO Loading /data_CMS/cms/cortinovis/ewdim6/data_ew_2M_az/samples/t_xz_train_score.npy into RAM
10:53 madminer.ml.score INFO Found 500000 samples with 2 parameters and 18 observables
10:53 madminer.ml.base INFO Setting up input rescaling
10:53 madminer.ml.score INFO Creating model
10:53 madminer.ml.score INFO Training model
10:53 madminer.utils.ml.tr INFO Training on CPU with single precision
10:53 madminer.utils.ml.tr INFO Epoch 2: train loss 0.31973 (mse_score: 0.320)
10:53 madminer.utils.ml.tr INFO val. loss 0.26984 (mse_score: 0.270)
10:54 madminer.utils.ml.tr INFO Epoch 4: train loss 0.23662 (mse_score: 0.237)
10:54 madminer.utils.ml.tr INFO val. loss 0.21506 (mse_score: 0.215)
10:55 madminer.utils.ml.tr INFO Epoch 6: train loss 0.20567 (mse_score: 0.206)
10:55 madminer.utils.ml.tr INFO val. loss 0.19369 (mse_score: 0.194)
10:55 madminer.utils.ml.tr INFO Epoch 8: train loss 0.18572 (mse_score: 0.186)
10:55 madminer.utils.ml.tr INFO val. loss 0.17514 (mse_score: 0.175)
10:56 madminer.utils.ml.tr INFO Epoch 10: train loss 0.17016 (mse_score: 0.170)
10:56 madminer.utils.ml.tr INFO val. loss 0.16176 (mse_score: 0.162)
10:57 madminer.utils.ml.tr INFO Epoch 12: train loss 0.15872 (mse_score: 0.159)
10:57 madminer.utils.ml.tr INFO val. loss 0.15332 (mse_score: 0.153)
10:58 madminer.utils.ml.tr INFO Epoch 14: train loss 0.15017 (mse_score: 0.150)
10:58 madminer.utils.ml.tr INFO val. loss 0.14669 (mse_score: 0.147)
10:58 madminer.utils.ml.tr INFO Epoch 16: train loss 0.14309 (mse_score: 0.143)
10:58 madminer.utils.ml.tr INFO val. loss 0.14009 (mse_score: 0.140)
10:59 madminer.utils.ml.tr INFO Epoch 18: train loss 0.13711 (mse_score: 0.137)
10:59 madminer.utils.ml.tr INFO val. loss 0.13725 (mse_score: 0.137)
11:00 madminer.utils.ml.tr INFO Epoch 20: train loss 0.13240 (mse_score: 0.132)
11:00 madminer.utils.ml.tr INFO val. loss 0.13283 (mse_score: 0.133)
11:00 madminer.utils.ml.tr INFO Epoch 22: train loss 0.12773 (mse_score: 0.128)
11:00 madminer.utils.ml.tr INFO val. loss 0.12882 (mse_score: 0.129)
11:01 madminer.utils.ml.tr INFO Epoch 24: train loss 0.12444 (mse_score: 0.124)
11:01 madminer.utils.ml.tr INFO val. loss 0.12419 (mse_score: 0.124)
11:02 madminer.utils.ml.tr INFO Epoch 26: train loss 0.12079 (mse_score: 0.121)
11:02 madminer.utils.ml.tr INFO val. loss 0.12303 (mse_score: 0.123)
11:03 madminer.utils.ml.tr INFO Epoch 28: train loss 0.11794 (mse_score: 0.118)
11:03 madminer.utils.ml.tr INFO val. loss 0.11950 (mse_score: 0.119)
11:03 madminer.utils.ml.tr INFO Epoch 30: train loss 0.11558 (mse_score: 0.116)
11:03 madminer.utils.ml.tr INFO val. loss 0.11684 (mse_score: 0.117)
11:04 madminer.utils.ml.tr INFO Epoch 32: train loss 0.11358 (mse_score: 0.114)
11:04 madminer.utils.ml.tr INFO val. loss 0.11610 (mse_score: 0.116)
11:05 madminer.utils.ml.tr INFO Epoch 34: train loss 0.11195 (mse_score: 0.112)
11:05 madminer.utils.ml.tr INFO val. loss 0.11484 (mse_score: 0.115)
11:05 madminer.utils.ml.tr INFO Epoch 36: train loss 0.11045 (mse_score: 0.110)
11:05 madminer.utils.ml.tr INFO val. loss 0.11369 (mse_score: 0.114)
11:06 madminer.utils.ml.tr INFO Epoch 38: train loss 0.10930 (mse_score: 0.109)
11:06 madminer.utils.ml.tr INFO val. loss 0.11236 (mse_score: 0.112)
11:07 madminer.utils.ml.tr INFO Epoch 40: train loss 0.10807 (mse_score: 0.108)
11:07 madminer.utils.ml.tr INFO val. loss 0.11138 (mse_score: 0.111)
11:08 madminer.utils.ml.tr INFO Epoch 42: train loss 0.10706 (mse_score: 0.107)
11:08 madminer.utils.ml.tr INFO val. loss 0.11061 (mse_score: 0.111)
11:08 madminer.utils.ml.tr INFO Epoch 44: train loss 0.10623 (mse_score: 0.106)
11:08 madminer.utils.ml.tr INFO val. loss 0.11042 (mse_score: 0.110)
11:09 madminer.utils.ml.tr INFO Epoch 46: train loss 0.10550 (mse_score: 0.105)
11:09 madminer.utils.ml.tr INFO val. loss 0.10964 (mse_score: 0.110)
11:10 madminer.utils.ml.tr INFO Epoch 48: train loss 0.10475 (mse_score: 0.105)
11:10 madminer.utils.ml.tr INFO val. loss 0.10860 (mse_score: 0.109)
11:10 madminer.utils.ml.tr INFO Epoch 50: train loss 0.10418 (mse_score: 0.104)
11:10 madminer.utils.ml.tr INFO val. loss 0.10824 (mse_score: 0.108)
11:10 madminer.utils.ml.tr INFO Early stopping did not improve performance
11:10 madminer.utils.ml.tr INFO Training time spend on:
11:10 madminer.utils.ml.tr INFO initialize model: 0.00h
11:10 madminer.utils.ml.tr INFO ALL: 0.30h
11:10 madminer.utils.ml.tr INFO check data: 0.00h
11:10 madminer.utils.ml.tr INFO make dataset: 0.00h
11:10 madminer.utils.ml.tr INFO make dataloader: 0.00h
11:10 madminer.utils.ml.tr INFO setup optimizer: 0.00h
11:10 madminer.utils.ml.tr INFO initialize training: 0.00h
11:10 madminer.utils.ml.tr INFO set lr: 0.00h
11:10 madminer.utils.ml.tr INFO load training batch: 0.12h
11:10 madminer.utils.ml.tr INFO fwd: move data: 0.00h
11:10 madminer.utils.ml.tr INFO fwd: check for nans: 0.02h
11:10 madminer.utils.ml.tr INFO fwd: model.forward: 0.02h
11:10 madminer.utils.ml.tr INFO fwd: calculate losses: 0.01h
11:10 madminer.utils.ml.tr INFO training forward pass: 0.04h
11:10 madminer.utils.ml.tr INFO training sum losses: 0.00h
11:10 madminer.utils.ml.tr INFO opt: zero grad: 0.00h
11:10 madminer.utils.ml.tr INFO opt: backward: 0.03h
11:10 madminer.utils.ml.tr INFO opt: clip grad norm: 0.00h
11:10 madminer.utils.ml.tr INFO opt: step: 0.05h
11:10 madminer.utils.ml.tr INFO optimizer step: 0.08h
11:10 madminer.utils.ml.tr INFO load validation batch: 0.04h
11:10 madminer.utils.ml.tr INFO validation forward pass: 0.01h
11:10 madminer.utils.ml.tr INFO validation sum losses: 0.00h
11:10 madminer.utils.ml.tr INFO early stopping: 0.00h
11:10 madminer.utils.ml.tr INFO report epoch: 0.00h
11:10 madminer.ml.base INFO Saving model to /data_CMS/cms/cortinovis/ewdim6/models_ew_2M_az/sally
###Markdown
3. Evaluate score estimator Let's evaluate the SM score on the test data
###Code
estimator.load('/data_CMS/cms/cortinovis/ewdim6/models_ew_2M_az/sally')
t_hat = estimator.evaluate_score(
x = '/data_CMS/cms/cortinovis/ewdim6/data_ew_2M_az/samples/x_test.npy'
)
###Output
11:10 madminer.ml.base INFO Loading model from /data_CMS/cms/cortinovis/ewdim6/models_ew_2M_az/sally
11:10 madminer.utils.vario INFO Loading /data_CMS/cms/cortinovis/ewdim6/data_ew_2M_az/samples/x_test.npy into RAM
###Markdown
Let's have a look at the estimated score and how it is related to the observables:
###Code
x = np.load('/data_CMS/cms/cortinovis/ewdim6/data_ew_2M_az/samples/x_test.npy')
fig = plt.figure(figsize=(10,4))
for i in range(2):
ax = plt.subplot(1,2,i+1)
sc = plt.scatter(x[:,0], x[:,1], c=t_hat[:,i], s=25., cmap='viridis', vmin=-1., vmax=1.)
cbar = plt.colorbar(sc)
cbar.set_label(r'$\hat{t}_' + str(i) + r'(x | \theta_{ref})$')
plt.xlabel(r'$p_{T,j1}$ [GeV]')
plt.ylabel(r'$\Delta \phi_{jj}$')
plt.xlim(10.,300.)
plt.ylim(-3.15,3.15)
plt.tight_layout()
plt.show()
###Output
_____no_output_____ |
hocor2020/notebook/session-5.4.ipynb | ###Markdown
Step 1. 讀入檔案
###Code
# 下載 Gossiping 版 2005 至 2020 年,每五年的詞向量
!gdown --id "1gEL4v3wGgvqJnpWspISZvLeIL3GQZLB1" -O "Gossiping_2005.model" # 2005 年 Gossiping 板
!gdown --id "1yB9WPVDJVmmLLxbEHZroZP_cYMP0JUpC" -O "Gossiping_2010.model" # 2010 年 Gossiping 板
!gdown --id "1Vh8meq6hdte02nQ2-djclgpEKxFUC0YU" -O "Gossiping_2015.model" # 2015 年 Gossiping 板
!gdown --id "1EiDgWcnDDSOy1bu_aRjbBk4JGIENNoGk" -O "Gossiping_2020.model" # 2020 年 Gossiping 板
# 下載 WomenTalk 版 2005 至 2020 年,每五年的詞向量
!gdown --id "18rhI6VBnBXBji0YRplcL9bF31K2gFH9R" -O "WomenTalk_2005.model" # 2005 年 WomenTalk 板
!gdown --id "19XZ-SeZNUu515TZS3lW9kHASk_P6CYQJ" -O "WomenTalk_2010.model" # 2010 年 WomenTalk 板
!gdown --id "1CQtZ_5Tu8ML24es2vYfQcCoGcCTadzCp" -O "WomenTalk_2015.model" # 2015 年 WomenTalk 板
!gdown --id "1PqqW_5TyNKDU3WPubypIBED2GnlfFGTE" -O "WomenTalk_2020.model" # 2020 年 WomenTalk 板
###Output
Downloading...
From: https://drive.google.com/uc?id=18rhI6VBnBXBji0YRplcL9bF31K2gFH9R
To: /content/WomenTalk_2005.model
15.0MB [00:00, 69.7MB/s]
Downloading...
From: https://drive.google.com/uc?id=19XZ-SeZNUu515TZS3lW9kHASk_P6CYQJ
To: /content/WomenTalk_2010.model
17.0MB [00:00, 77.1MB/s]
Downloading...
From: https://drive.google.com/uc?id=1CQtZ_5Tu8ML24es2vYfQcCoGcCTadzCp
To: /content/WomenTalk_2015.model
31.0MB [00:00, 97.8MB/s]
Downloading...
From: https://drive.google.com/uc?id=1PqqW_5TyNKDU3WPubypIBED2GnlfFGTE
To: /content/WomenTalk_2020.model
57.7MB [00:00, 139MB/s]
###Markdown
Step 2. 選定 PPT 的版及年份
###Code
board_lst = ['Gossiping', 'WomenTalk']
year_lst = ['2005', '2010', '2015', '2020']
###Output
_____no_output_____
###Markdown
Step 3. 歷時詞向量(、鄰近詞)
###Code
import gensim # 讀入詞向量
# 建立一個 class 來存放與詞向量有關的資料
class Embedding:
def __init__(self, board, year_lst):
self.board = board # 選定 PTT 的版,存成 string
self.year_lst = year_lst # 選定各年份,存成 list
self.path_lst = [f'{board}_{year}.model' for year in self.year_lst] # 該版各年份的詞向量檔案路徑
self.model_lst = [gensim.models.Word2Vec.load(path) for path in self.path_lst] # 依詞向量檔案路徑,讀入檔案
# TO-DO
# 建立 Gossiping 版,2005 及 2015 的詞向量 class
embed_2005_2015 = Embedding('Gossiping', ['2005', '2015'])
# TO-DO
# 看 embed_2005_2015 的 model_lst
embed_2005_2015.model_lst
# TO-DO
# 找出 model_lst[0] 中,'台灣' 的鄰近詞
embed_2005 = embed_2005_2015.model_lst[0]
embed_2005.wv.most_similar('台灣')
# TO-DO
# 找出 model_lst[0] 中,'台灣' 的前35個鄰近詞
embed_2005.wv.most_similar('台灣', topn=35)
# TO-DO
# 找出 model_lst[0] 中,'台灣' 的詞向量
embed_2005['台灣']
###Output
/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:3: DeprecationWarning: Call to deprecated `__getitem__` (Method will be removed in 4.0.0, use self.wv.__getitem__() instead).
This is separate from the ipykernel package so we can avoid doing imports until
###Markdown
Step 4. 視覺化
###Code
import numpy as np
from sklearn.manifold import TSNE
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.font_manager import FontProperties
%matplotlib inline
# 下載中文字體
!wget -O taipei_sans_tc_beta.ttf https://drive.google.com/uc?id=1eGAsTN1HBpJAkeVM57_C7ccp7hbgSz3_&export=download
# 中文字體設定
matplotlib.font_manager.fontManager.addfont('taipei_sans_tc_beta.ttf')
matplotlib.rc('font', family = 'Taipei Sans TC Beta')
# 視覺化解析度設定
plt.rcParams['figure.dpi'] = 300
# source: https://github.com/sismetanin/word2vec-tsne
def tsne_plot_similar_words(labels, embedding_clusters, word_clusters, n1):
plt.figure(figsize=(9, 9)) # 設定空白畫布
colors = cm.Accent(np.linspace(0, 1, len(labels))) # 依 labels 數量設定不同的顏色
# source: https://matplotlib.org/3.1.1/gallery/color/colormap_reference.html
arrow_lst = []
for label, embeddings, words, color in zip(labels, embedding_clusters, word_clusters, colors):
x = embeddings[:, 0]
y = embeddings[:, 1]
arrow_lst.append((x[0], y[0])) # 第 0 個點是關鍵詞本身,抓出此點的 x, y,存入 arrow_lst 中
# 畫點
plt.scatter(x[:1], y[:1], c=color, alpha=1, label=label)
for i, word in enumerate(words):
# 關鍵詞本身
if i == 0:
a = 1 # 透明度
size = 28 # 字體大小
# 將近鄰詞分層,調整透明度與字體大小
elif i >= 1 and i <= n1:
a = 0.85
size = 16
else:
a = 0.35
size = 16
# 標詞
plt.annotate(word, alpha=a, xy=(x[i], y[i]), xytext=(1, 1),
textcoords='offset points', ha='right', va='bottom', size=size, c=color)
for c, i in zip(colors, range(len(arrow_lst))):
try:
# 劃上箭頭方向
plt.annotate('', xy=(arrow_lst[i+1][0], arrow_lst[i+1][1]), xytext=(arrow_lst[i][0], arrow_lst[i][1]),
arrowprops=dict(facecolor=c, edgecolor=c, width=5, shrink=0.01, alpha=0.5))
# source: https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.annotate.html
except:
pass
plt.legend(loc=4)
plt.grid(True)
plt.axis('off')
plt.show()
class PlotTemporalData(Embedding): # 從 Embedding 這個 class 繼續擴增 function
def __init__(self, board, year_lst):
super().__init__(board, year_lst)
# self.vocab_lst = [model.wv.vocab for model in self.model_lst] # 每個詞向量的 vocabulary
# 抓出詞向量中的點
def create_datapoints(self, keyword, n1=10, n2=15):
error_log = {} # 紀錄錯誤訊息
labels = [] # 詞_年份
word_clusters = [] # 詞
embedding_clusters = [] # 向量
# 第一層 for loop: 各年份
for year, model in zip(self.year_lst, self.model_lst): # 將 self.year_lst 和 self.model_lst 一一對應
label = f'{keyword}({year})'
try: # 若是有任何錯誤(Exception as e),以 try-except 紀錄錯誤訊息(e),並存至 error_log 這個 dictionary
# 關鍵詞
words = [label]
embeddings = [model[keyword]]
# 第二層 for loop: 某年份的鄰近詞
# 鄰近詞(前 n1+n2 個鄰近詞)
for similar_word, _ in model.wv.most_similar(keyword, topn=n1+n2):
words.append(similar_word)
embeddings.append(model[similar_word])
embedding_clusters.append(embeddings)
word_clusters.append(words)
labels.append(label)
except Exception as e:
error_log[label] = e
print(error_log)
self.error_log = error_log
self.keyword = keyword
self.labels = labels
self.n1 = n1
self.n2 = n2
self.embedding_clusters = embedding_clusters
self.word_clusters = word_clusters
# 將點經過 t-SNE 處理
def tsne(self):
embedding_clusters = np.array(self.embedding_clusters)
n, m, k = embedding_clusters.shape
tsne_model_en_2d = TSNE(perplexity=15, n_components=2, init='pca', n_iter=3500, random_state=32)
embeddings_en_2d = np.array(tsne_model_en_2d.fit_transform(embedding_clusters.reshape(n * m, k))).reshape(n, m, 2)
self.embeddings_en_2d = embeddings_en_2d
# 將處理後的點視覺化
def tsne_plot(self):
tsne_plot_similar_words(self.labels, self.embeddings_en_2d, self.word_clusters, self.n1)
###Output
_____no_output_____
###Markdown
Step 5. 選定想觀察的字詞
###Code
keyword = '台灣'
for board in board_lst:
data = PlotTemporalData(board, year_lst)
data.create_datapoints(keyword, n1=5, n2=5)
#data.create_datapoints(keyword)
data.tsne()
data.tsne_plot()
###Output
/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:23: DeprecationWarning: Call to deprecated `__getitem__` (Method will be removed in 4.0.0, use self.wv.__getitem__() instead).
/usr/local/lib/python3.6/dist-packages/gensim/matutils.py:737: FutureWarning: Conversion of the second argument of issubdtype from `int` to `np.signedinteger` is deprecated. In future, it will be treated as `np.int64 == np.dtype(int).type`.
if np.issubdtype(vec.dtype, np.int):
/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:29: DeprecationWarning: Call to deprecated `__getitem__` (Method will be removed in 4.0.0, use self.wv.__getitem__() instead).
|
ML_with_Graphs/CS224W_Colab_0.ipynb | ###Markdown
**CS224W - Colab 0**Colab 0 **will not be graded**, so you don't need to hand in this notebook. That said, we highly recommend you to run this notebook, so you can get familiar with the basic concepts of graph mining and Graph Neural Networks.In this Colab, we will introduce two packages, [NetworkX](https://networkx.org/documentation/stable/) and [PyTorch Geometric](https://pytorch-geometric.readthedocs.io/en/latest/).For the PyTorch Geometric section, you don't need to understand all the details already. Concepts and implementations of graph neural network will be covered in future lectures and Colabs.Please make a copy before you proceed. New Section NetworkX TutorialNetworkX is one of the most frequently used Python packages to create, manipulate, and mine graphs.Main parts of this tutorial are adapted from https://colab.research.google.com/github/jdwittenauer/ipython-notebooks/blob/master/notebooks/libraries/NetworkX.ipynbscrollTo=zA1OO6huHeV6 Setup
###Code
# Import the NetworkX package
import networkx as nx
###Output
_____no_output_____
###Markdown
GraphNetworkX provides several classes to store different types of graphs, such as directed and undirected graph. It also provides classes to create multigraphs (both directed and undirected).For more information, please refer to [NetworkX graph types](https://networkx.org/documentation/stable/reference/classes/index.html).
###Code
# Create an undirected graph G
G = nx.Graph()
print(G.is_directed())
# Create a directed graph H
H = nx.DiGraph()
print(H.is_directed())
# Add graph level attribute
G.graph["Name"] = "Bar"
print(G.graph)
###Output
False
True
{'Name': 'Bar'}
###Markdown
NodeNodes (with attributes) can be easily added to NetworkX graphs.
###Code
# Add one node with node level attributes
G.add_node(0, feature=0, label=0)
# Get attributes of the node 0
node_0_attr = G.nodes[0]
print("Node 0 has the attributes {}".format(node_0_attr))
# Add multiple nodes with attributes
G.add_nodes_from([
(1, {"feature": 1, "label": 1}),
(2, {"feature": 2, "label": 2})
])
# Loop through all the nodes
# Set data=True will return node attributes
for node in G.nodes(data=True):
print(node)
# Get number of nodes
num_nodes = G.number_of_nodes()
print("G has {} nodes".format(num_nodes))
###Output
(0, {'feature': 0, 'label': 0})
(1, {'feature': 1, 'label': 1})
(2, {'feature': 2, 'label': 2})
G has 3 nodes
###Markdown
EdgeSimilar to nodes, edges (with attributes) can also be easily added to NetworkX graphs.
###Code
# Add one edge with edge weight 0.5
G.add_edge(0, 1, weight=0.5)
# Get attributes of the edge (0, 1)
edge_0_1_attr = G.edges[(0, 1)]
print("Edge (0, 1) has the attributes {}".format(edge_0_1_attr))
# Add multiple edges with edge weights
G.add_edges_from([
(1, 2, {"weight": 0.3}),
(2, 0, {"weight": 0.1})
])
# Loop through all the edges
# Here there is no data=True, so only the edge will be returned
for edge in G.edges():
print(edge)
# Get number of edges
num_edges = G.number_of_edges()
print("G has {} edges".format(num_edges))
###Output
(0, 1)
(0, 2)
(1, 2)
G has 3 edges
###Markdown
Visualization
###Code
# Draw the graph
nx.draw(G, with_labels = True)
###Output
_____no_output_____
###Markdown
Node Degree and Neighbor
###Code
node_id = 1
# Degree of node 1
print("Node {} has degree {}".format(node_id, G.degree[node_id]))
# Get neighbor of node 1
for neighbor in G.neighbors(node_id):
print("Node {} has neighbor {}".format(node_id, neighbor))
###Output
Node 1 has degree 2
Node 1 has neighbor 0
Node 1 has neighbor 2
###Markdown
Other FunctionalitiesNetworkX also provides plenty of useful methods to study graphs.Here is an example to get [PageRank](https://networkx.org/documentation/stable/reference/algorithms/generated/networkx.algorithms.link_analysis.pagerank_alg.pagerank.htmlnetworkx.algorithms.link_analysis.pagerank_alg.pagerank) of nodes (we will talk about PageRank in one of the future lectures).
###Code
num_nodes = 4
# Create a new path like graph and change it to a directed graph
G = nx.DiGraph(nx.path_graph(num_nodes))
nx.draw(G, with_labels = True)
# Get the PageRank
pr = nx.pagerank(G, alpha=0.8)
pr
###Output
_____no_output_____
###Markdown
Documentation You can explore more NetworkX functions through its [documentation](https://networkx.org/documentation/stable/). PyTorch Geometric TutorialPyTorch Geometric (PyG) is an extension library for PyTorch. It provides useful primitives to develop Graph Deep Learning models, including various graph neural network layers and a large number of benchmark datasets.Don't worry if you don't understand some concepts such as `GCNConv` -- we will cover all of them in the future lectures :)This tutorial is adapted from https://colab.research.google.com/drive/1h3-vJGRVloF5zStxL5I0rSy4ZUPNsjy8?usp=sharingscrollTo=ci-LpZWhRJoI by [Matthias Fey](https://rusty1s.github.io//)
###Code
import torch
print("PyTorch has version {}".format(torch.__version__))
###Output
PyTorch has version 1.8.1+cu101
###Markdown
SetupThe installation of PyG on Colab can be a little bit tricky. Execute the cell below -- in case of issues, more information can be found on the [PyG's installation page](https://pytorch-geometric.readthedocs.io/en/latest/notes/installation.html).
###Code
# Install torch geometric
!pip install -q torch-scatter -f https://pytorch-geometric.com/whl/torch-1.8.0+cu101.html
!pip install -q torch-sparse -f https://pytorch-geometric.com/whl/torch-1.8.0+cu101.html
!pip install -q torch-geometric
###Output
[K |████████████████████████████████| 2.6MB 9.5MB/s
[K |████████████████████████████████| 1.5MB 9.8MB/s
[K |████████████████████████████████| 215kB 10.9MB/s
[K |████████████████████████████████| 235kB 27.4MB/s
[K |████████████████████████████████| 2.2MB 28.6MB/s
[K |████████████████████████████████| 51kB 3.7MB/s
[?25h Building wheel for torch-geometric (setup.py) ... [?25l[?25hdone
###Markdown
Visualization
###Code
# Helper function for visualization.
%matplotlib inline
import torch
import networkx as nx
import matplotlib.pyplot as plt
# Visualization function for NX graph or PyTorch tensor
def visualize(h, color, epoch=None, loss=None):
plt.figure(figsize=(7,7))
plt.xticks([])
plt.yticks([])
if torch.is_tensor(h):
h = h.detach().cpu().numpy()
plt.scatter(h[:, 0], h[:, 1], s=140, c=color, cmap="Set2")
if epoch is not None and loss is not None:
plt.xlabel(f'Epoch: {epoch}, Loss: {loss.item():.4f}', fontsize=16)
else:
nx.draw_networkx(G, pos=nx.spring_layout(G, seed=42), with_labels=False,
node_color=color, cmap="Set2")
plt.show()
###Output
_____no_output_____
###Markdown
IntroductionRecently, deep learning on graphs has emerged to one of the hottest research fields in the deep learning community.Here, **Graph Neural Networks (GNNs)** aim to generalize classical deep learning concepts to irregular structured data (in contrast to images or texts) and to enable neural networks to reason about objects and their relations.This tutorial will introduce you to some fundamental concepts regarding deep learning on graphs via Graph Neural Networks based on the **[PyTorch Geometric (PyG) library](https://github.com/rusty1s/pytorch_geometric)**.PyTorch Geometric is an extension library to the popular deep learning framework [PyTorch](https://pytorch.org/), and consists of various methods and utilities to ease the implementation of Graph Neural Networks.Following [Kipf et al. (2017)](https://arxiv.org/abs/1609.02907), let's dive into the world of GNNs by looking at a simple graph-structured example, the well-known [**Zachary's karate club network**](https://en.wikipedia.org/wiki/Zachary%27s_karate_club). This graph describes a social network of 34 members of a karate club and documents links between members who interacted outside the club. Here, we are interested in detecting communities that arise from the member's interaction. DatasetPyTorch Geometric provides an easy access to the dataset via the [`torch_geometric.datasets`](https://pytorch-geometric.readthedocs.io/en/latest/modules/datasets.htmltorch_geometric.datasets) subpackage:
###Code
from torch_geometric.datasets import KarateClub
dataset = KarateClub()
print(f'Dataset: {dataset}:')
print('======================')
print(f'Number of graphs: {len(dataset)}')
print(f'Number of features: {dataset.num_features}')
print(f'Number of classes: {dataset.num_classes}')
###Output
Dataset: KarateClub():
======================
Number of graphs: 1
Number of features: 34
Number of classes: 4
###Markdown
After initializing the [`KarateClub`](https://pytorch-geometric.readthedocs.io/en/latest/modules/datasets.htmltorch_geometric.datasets.KarateClub) dataset, we first can inspect some of its properties.For example, we can see that this dataset holds exactly **one graph**, and that each node in this dataset is assigned a **34-dimensional feature vector** (which uniquely describes the members of the karate club).Furthermore, the graph holds exactly **4 classes**, which represent the community each node belongs to.Let's now look at the underlying graph in more detail:
###Code
data.keys
data = dataset[0] # Get the first graph object.
print(data)
print('==============================================================')
# Gather some statistics about the graph.
print(f'Number of nodes: {data.num_nodes}')
print(f'Number of edges: {data.num_edges}')
print(f'Average node degree: {data.num_edges / data.num_nodes:.2f}')
print(f'Number of training nodes: {data.train_mask.sum()}')
print(f'Training node label rate: {int(data.train_mask.sum()) / data.num_nodes:.2f}')
print(f'Contains isolated nodes: {data.contains_isolated_nodes()}')
print(f'Contains self-loops: {data.contains_self_loops()}')
print(f'Is undirected: {data.is_undirected()}')
###Output
Data(edge_index=[2, 156], train_mask=[34], x=[34, 34], y=[34])
==============================================================
Number of nodes: 34
Number of edges: 156
Average node degree: 4.59
Number of training nodes: 4
Training node label rate: 0.12
Contains isolated nodes: False
Contains self-loops: False
Is undirected: True
###Markdown
Data Each graph in PyTorch Geometric is represented by a single [`Data`](https://pytorch-geometric.readthedocs.io/en/latest/modules/data.htmltorch_geometric.data.Data) object, which holds all the information to describe its graph representation.We can print the data object anytime via `print(data)` to receive a short summary about its attributes and their shapes:```Data(edge_index=[2, 156], x=[34, 34], y=[34], train_mask=[34])```We can see that this `data` object holds 4 attributes:(1) The `edge_index` property holds the information about the **graph connectivity**, *i.e.*, a tuple of source and destination node indices for each edge.PyG further refers to (2) **node features** as `x` (each of the 34 nodes is assigned a 34-dim feature vector), and to (3) **node labels** as `y` (each node is assigned to exactly one class).(4) There also exists an additional attribute called `train_mask`, which describes for which nodes we already know their community assigments.In total, we are only aware of the ground-truth labels of 4 nodes (one for each community), and the task is to infer the community assignment for the remaining nodes.The `data` object also provides some **utility functions** to infer some basic properties of the underlying graph.For example, we can easily infer whether there exists isolated nodes in the graph (*i.e.* there exists no edge to any node), whether the graph contains self-loops (*i.e.*, $(v, v) \in \mathcal{E}$), or whether the graph is undirected (*i.e.*, for each edge $(v, w) \in \mathcal{E}$ there also exists the edge $(w, v) \in \mathcal{E}$).
###Code
from IPython.display import Javascript # Restrict height of output cell.
display(Javascript('''google.colab.output.setIframeHeight(0, true, {maxHeight: 300})'''))
edge_index = data.edge_index
print(edge_index.t())
###Output
_____no_output_____
###Markdown
Edge Index By printing `edge_index`, we can further understand how PyG represents graph connectivity internally.We can see that for each edge, `edge_index` holds a tuple of two node indices, where the first value describes the node index of the source node and the second value describes the node index of the destination node of an edge.This representation is known as the **COO format (coordinate format)** commonly used for representing sparse matrices.Instead of holding the adjacency information in a dense representation $\mathbf{A} \in \{ 0, 1 \}^{|\mathcal{V}| \times |\mathcal{V}|}$, PyG represents graphs sparsely, which refers to only holding the coordinates/values for which entries in $\mathbf{A}$ are non-zero.We can further visualize the graph by converting it to the `networkx` library format, which implements, in addition to graph manipulation functionalities, powerful tools for visualization:
###Code
from torch_geometric.utils import to_networkx
G = to_networkx(data, to_undirected=True)
visualize(G, color=data.y)
###Output
_____no_output_____
###Markdown
Implementing Graph Neural NetworksAfter learning about PyG's data handling, it's time to implement our first Graph Neural Network!For this, we will use one of the most simple GNN operators, the **GCN layer** ([Kipf et al. (2017)](https://arxiv.org/abs/1609.02907)).PyG implements this layer via [`GCNConv`](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.htmltorch_geometric.nn.conv.GCNConv), which can be executed by passing in the node feature representation `x` and the COO graph connectivity representation `edge_index`.With this, we are ready to create our first Graph Neural Network by defining our network architecture in a `torch.nn.Module` class:
###Code
import torch
from torch.nn import Linear
from torch_geometric.nn import GCNConv
class GCN(torch.nn.Module):
def __init__(self):
super(GCN, self).__init__()
torch.manual_seed(12345)
self.conv1 = GCNConv(dataset.num_features, 4)
self.conv2 = GCNConv(4, 4)
self.conv3 = GCNConv(4, 2)
self.classifier = Linear(2, dataset.num_classes)
def forward(self, x, edge_index):
h = self.conv1(x, edge_index)
h = h.tanh()
h = self.conv2(h, edge_index)
h = h.tanh()
h = self.conv3(h, edge_index)
h = h.tanh() # Final GNN embedding space.
# Apply a final (linear) classifier.
out = self.classifier(h)
return out, h
model = GCN()
print(model)
###Output
GCN(
(conv1): GCNConv(34, 4)
(conv2): GCNConv(4, 4)
(conv3): GCNConv(4, 2)
(classifier): Linear(in_features=2, out_features=4, bias=True)
)
###Markdown
Here, we first initialize all of our building blocks in `__init__` and define the computation flow of our network in `forward`.We first define and stack **three graph convolution layers**, which corresponds to aggregating 3-hop neighborhood information around each node (all nodes up to 3 "hops" away).In addition, the `GCNConv` layers reduce the node feature dimensionality to $2$, *i.e.*, $34 \rightarrow 4 \rightarrow 4 \rightarrow 2$. Each `GCNConv` layer is enhanced by a [tanh](https://pytorch.org/docs/stable/generated/torch.nn.Tanh.html?highlight=tanhtorch.nn.Tanh) non-linearity.After that, we apply a single linear transformation ([`torch.nn.Linear`](https://pytorch.org/docs/stable/generated/torch.nn.Linear.html?highlight=lineartorch.nn.Linear)) that acts as a classifier to map our nodes to 1 out of the 4 classes/communities.We return both the output of the final classifier as well as the final node embeddings produced by our GNN.We proceed to initialize our final model via `GCN()`, and printing our model produces a summary of all its used sub-modules.
###Code
model = GCN()
_, h = model(data.x, data.edge_index)
print(f'Embedding shape: {list(h.shape)}')
visualize(h, color=data.y)
###Output
Embedding shape: [34, 2]
###Markdown
Remarkably, even before training the weights of our model, the model produces an embedding of nodes that closely resembles the community-structure of the graph.Nodes of the same color (community) are already closely clustered together in the embedding space, although the weights of our model are initialized **completely at random** and we have not yet performed any training so far!This leads to the conclusion that GNNs introduce a strong inductive bias, leading to similar embeddings for nodes that are close to each other in the input graph. Training on the Karate Club NetworkBut can we do better? Let's look at an example on how to train our network parameters based on the knowledge of the community assignments of 4 nodes in the graph (one for each community):Since everything in our model is differentiable and parameterized, we can add some labels, train the model and observe how the embeddings react.Here, we make use of a semi-supervised or transductive learning procedure: We simply train against one node per class, but are allowed to make use of the complete input graph data.Training our model is very similar to any other PyTorch model.In addition to defining our network architecture, we define a loss critertion (here, [`CrossEntropyLoss`](https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html)) and initialize a stochastic gradient optimizer (here, [`Adam`](https://pytorch.org/docs/stable/optim.html?highlight=adamtorch.optim.Adam)).After that, we perform multiple rounds of optimization, where each round consists of a forward and backward pass to compute the gradients of our model parameters w.r.t. to the loss derived from the forward pass.If you are not new to PyTorch, this scheme should appear familar to you. Otherwise, the PyTorch docs provide [a good introduction on how to train a neural network in PyTorch](https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.htmldefine-a-loss-function-and-optimizer).Note that our semi-supervised learning scenario is achieved by the following line:```loss = criterion(out[data.train_mask], data.y[data.train_mask])```While we compute node embeddings for all of our nodes, we **only make use of the training nodes for computing the loss**.Here, this is implemented by filtering the output of the classifier `out` and ground-truth labels `data.y` to only contain the nodes in the `train_mask`.Let us now start training and see how our node embeddings evolve over time (best experienced by explicitely running the code):
###Code
import time
from IPython.display import Javascript # Restrict height of output cell.
display(Javascript('''google.colab.output.setIframeHeight(0, true, {maxHeight: 430})'''))
model = GCN()
criterion = torch.nn.CrossEntropyLoss() # Define loss criterion.
optimizer = torch.optim.Adam(model.parameters(), lr=0.01) # Define optimizer.
def train(data):
optimizer.zero_grad() # Clear gradients.
out, h = model(data.x, data.edge_index) # Perform a single forward pass.
loss = criterion(out[data.train_mask], data.y[data.train_mask]) # Compute the loss solely based on the training nodes.
loss.backward() # Derive gradients.
optimizer.step() # Update parameters based on gradients.
return loss, h
for epoch in range(401):
loss, h = train(data)
# Visualize the node embeddings every 10 epochs
if epoch % 10 == 0:
visualize(h, color=data.y, epoch=epoch, loss=loss)
time.sleep(0.3)
###Output
_____no_output_____ |
Sesion14_Diccionarios.ipynb | ###Markdown
PROGRAMACIÓN DE COMPUTADORES UNIVERSIDAD EAFITMEDELLÍN - COLOMBIA Sesión 14 - Diccionarios Docente:> *Carlos Alberto Álvarez Henao, I.C. Ph.D.* Diccionarios >Un Diccionario es una estructura de datos y un tipo de dato en *Python* con características especiales que nos permite almacenar cualquier tipo de valor como enteros, cadenas, listas e incluso otras funciones. Estos diccionarios nos permiten además identificar cada elemento por una *clave* (*Key*).> Para definir un diccionario, se encierra el listado de valores entre llaves, ({}). Las parejas de *clave* y *valor* se separan con comas, y la *clave* y el *valor* se separan con dos puntos, (:).>Los diccionarios (llamados *arrays asociativos* o *tablas de hash* en otros lenguajes), son una estructura de datos muy poderosa, que permite asociar un valor a una clave.> Las claves deben ser de tipo inmutable, los valores pueden ser de cualquier tipo.> Los diccionarios no están ordenados. Si bien se los puede recorrer, el orden en el que se tomarán los elementos no está determinado.
###Code
diccionario = {'nombre' : 'Carlos', 'edad' : 48, 'cursos': ['Python','Fortran','Matlab'] }
###Output
_____no_output_____
###Markdown
Podemos acceder al elemento de un Diccionario mediante la *clave* de este elemento, como veremos a continuación:
###Code
print(diccionario['nombre']) #Carlos
print(diccionario['edad']) #48
print(diccionario['cursos']) #['Python','Fortran','Matlab']
###Output
_____no_output_____
###Markdown
También es posible insertar una lista dentro de un diccionario. Para acceder a cada uno de los cursos usamos los índices:
###Code
print(diccionario['cursos'][0:2])#Python
print(diccionario['cursos'][1])#Fortran
print(diccionario['cursos'][2])#Matlab
###Output
_____no_output_____
###Markdown
Para recorrer todo el Diccionario, podemos hacer uso de la estructura for:
###Code
for a in diccionario:
print(a, ":", diccionario[a])
###Output
_____no_output_____
###Markdown
Métodos de los Diccionarios *dict*()- Recibe como parámetro una representación de un diccionario y si es factible, devuelve un diccionario de datos.
###Code
dic = dict(nombre='Carlos', apellido='Alvarez', edad=48)
print(dic)
###Output
_____no_output_____
###Markdown
*zip*()- Recibe como parámetro dos elementos iterables, ya sea una cadena, una lista o una tupla. Ambos parámetros deben tener el mismo número de elementos. Se devolverá un diccionario relacionando el elemento $i$-esimo de cada uno de los iterables.
###Code
dic = dict(zip('abcd',["z","y","x","w"]))
print(dic)
###Output
_____no_output_____
###Markdown
*items*()- Devuelve una lista de tuplas, cada tupla se compone de dos elementos: el primero será la *clave* y el segundo, su *valor*.
###Code
dic = {'a' : 1, 'b': 2, 'c' : 3 , 'd' : 4}
items = dic.items()
print(items)
###Output
_____no_output_____
###Markdown
*keys*()- Retorna una lista de elementos, los cuales serán las *claves* de nuestro diccionario.
###Code
dic = {'a' : 1, 'b' : 2, 'c' : 3 , 'd' : 4}
keys= dic.keys()
print(keys)
###Output
_____no_output_____
###Markdown
*values*()- Retorna una lista de elementos, que serán los *valores* de nuestro diccionario.
###Code
dic = {'a' : 1, 'b' : 2, 'c' : 3 , 'd' : 4}
values= dic.values()
print(values)
###Output
_____no_output_____
###Markdown
*clear*()- Elimina todos los ítems del diccionario dejándolo vacío.
###Code
dic1 = {'a' : 1, 'b' : 2, 'c' : 3 , 'd' : 4}
dic1.clear()
print(dic1)
###Output
_____no_output_____
###Markdown
*copy*()- Retorna una copia del diccionario original.
###Code
dic = {'a' : 1, 'b' : 2, 'c' : 3 , 'd' : 4}
dic1 = dic.copy()
print(dic1)
###Output
_____no_output_____
###Markdown
*fromkeys*()- Recibe como parámetros un iterable y un valor, devolviendo un diccionario que contiene como claves los elementos del iterable con el mismo valor ingresado. Si el valor no es ingresado, devolverá none para todas las claves.
###Code
dic = dict.fromkeys(['a','b','c','d'],1)
print(dic)
###Output
_____no_output_____
###Markdown
*get*()- Recibe como parámetro una clave, devuelve el valor de la clave. Si no lo encuentra, devuelve un objeto none.
###Code
dic = {'a' : 1, 'b' : 2, 'c' : 3 , 'd' : 4}
valor = dic.get('z')
print(valor)
###Output
_____no_output_____
###Markdown
*pop*()- Recibe como parámetro una clave, elimina esta y devuelve su valor. Si no lo encuentra, devuelve error.
###Code
dic = {'a' : 1, 'b' : 2, 'c' : 3 , 'd' : 4}
valor = dic.pop('c')
print(valor)
print(dic)
###Output
_____no_output_____
###Markdown
*setdefault*()- Funciona de dos formas. En la primera como get
###Code
dic = {'a' : 1, 'b' : 2, 'c' : 3 , 'd' : 4}
valor = dic.setdefault('b')
print(valor)
###Output
_____no_output_____
###Markdown
Y en la segunda forma, nos sirve para agregar un nuevo elemento a nuestro diccionario.
###Code
dic = {'a' : 1, 'b' : 2, 'c' : 3 , 'd' : 4}
valor = dic.setdefault('e',5)
print(dic)
print(valor)
###Output
_____no_output_____
###Markdown
*update*()- Recibe como parámetro otro diccionario. Si se tienen claves iguales, actualiza el valor de la clave repetida; si no hay claves iguales, este par clave-valor es agregado al diccionario.
###Code
dic1 = {'a' : 1, 'b' : 2, 'c' : 3 , 'd' : 4}
dic2 = {'c' : 6, 'b' : 5, 'e' : 9 , 'f' : 10}
dic2.update(dic1)
print(dic2)
###Output
_____no_output_____
###Markdown
> Los diccionarios son una herramienta muy versátil. Se puede utilizar un diccionario, por ejemplo, para contar cuántas apariciones de cada palabra hay en un texto, o cuántas apariciones de cada letra.> Es posible utilizar un diccionario, también, para tener una agenda donde la clave es el nombre de la persona, y el valor es una lista con los datos correspondientes a esa persona.> También podría utilizarse un diccionario para mantener los datos de los alumnos inscritos en una materia. Siendo la clave el ID, y el valor una lista con todas las notas asociadas a ese alumno.> En general, los diccionarios sirven para crear bases de datos muy simples, en las que la clave es el identificador del elemento, y el valor son todos los datos del elemento a considerar.> Otro posible uso de un diccionario sería utilizarlo para realizar traducciones, donde la clave sería la palabra en el idioma original y el valor la palabra en el idioma al que se quiere traducir. Sin embargo esta aplicación es poco destacable, ya que esta forma de traducir es muy mala. Ejemplo: Se desea crear un diccionario con el listado de jugadores de la selección española de fútbol campeona del mundial de Suráfrica 2010 y realizar una serie de consultas sobre él. (*Ejemplo de extraído del blog [Jarroba.com](https://jarroba.com/diccionario-python-ejemplos/)*)
###Code
futbolistas = dict()
futbolistas = {
1 : "Casillas", 15 : "Ramos",
3 : "Pique", 5 : "Puyol",
11 : "Capdevila", 14 : "Xabi Alonso",
16 : "Busquets", 8 : "Xavi Hernandez",
18 : "Pedrito", 6 : "Iniesta",
7 : "Villa"
}
###Output
_____no_output_____
###Markdown
Recorriendo cada uno de los elementos del diccionario e imprimiendo el resultado
###Code
for k,v in futbolistas.items():
print("el jugador # {0} es {1} ".format(k,v))
#print("el jugador #", k, "es ", v, "mas", 10.000)
###Output
el jugador # 1 es Casillas
el jugador # 15 es Ramos
el jugador # 3 es Pique
el jugador # 5 es Puyol
el jugador # 11 es Capdevila
el jugador # 14 es Xabi Alonso
el jugador # 16 es Busquets
el jugador # 8 es Xavi Hernandez
el jugador # 18 es Pedrito
el jugador # 6 es Iniesta
el jugador # 7 es Villa
###Markdown
vamos a determinar la cantidad de elementos del diccionario
###Code
numElem = len(futbolistas)
print("Numero de elementos del diccionario len(futbolistas) = {0}".format(numElem))
###Output
Numero de elementos del diccionario len(futbolistas) = 11
###Markdown
Ahora queremos ver por separado las claves y los valores del diccionario
###Code
# Imprimimos una lista con las claves del diccionario
keys = futbolistas.keys();
print("Las claves del diccionario son \n {0}".format(keys))
# Imprimimos en una lista los valores del diccionario
values = futbolistas.values()
print("\nLos valores del diccionario son \n {0}".format(values))
###Output
Las claves del diccionario son
dict_keys([1, 15, 3, 5, 11, 14, 16, 8, 18, 6, 7])
Los valores del diccionario son
dict_values(['Casillas', 'Ramos', 'Pique', 'Puyol', 'Capdevila', 'Xabi Alonso', 'Busquets', 'Xavi Hernandez', 'Pedrito', 'Iniesta', 'Villa'])
###Markdown
Si deseamos conocer el valor que tiene una determinada clave empleamos el método `get(key)`
###Code
elem = futbolistas.get(6)
print("el nombre del futbolista que tiene el número '6' es {0}".format(elem))
###Output
el nombre del futbolista que tiene el número '6' es Iniesta
###Markdown
A continuación vamos a ver dos formas de insertar elementos en el diccionario. La primera de ellas es la más sencilla (como si de un array asociativo se tratase), pasándole la clave entre corchetes y asignándole un valor:
###Code
# Añadimos un nuevo elemento a la lista
futbolistas[22] = 'Navas'
print("\nDiccionario tras añadir un elemento: \n {0}".format(futbolistas))
numElem = len(futbolistas)
print("Numero de elementos del diccionario len(futbolistas) = {0}".format(numElem))
###Output
Numero de elementos del diccionario len(futbolistas) = 12
###Markdown
La segunda forma de insertar un elemento es con el método `setdefault(key,default=valor)` al que se le pasa como parámetros un clave y un valor. Este método tiene la peculiaridad de que solo inserta el elemento en el diccionario sino existe un elemento con esa clave. Si existe un elemento con esa clave no realiza la inserción:
###Code
# Insertamos un elemento en el array. Si la clave ya existe no inserta el elemento
elem2 = futbolistas.setdefault(10,'Cesc')
print("\nInsertamos un elemento en el diccionario (Si la clave existe no lo inserta): {0}".format(elem2))
numElem = len(futbolistas)
print("Numero de elementos del diccionario len(futbolistas) = {0}".format(numElem))
###Output
Numero de elementos del diccionario len(futbolistas) = 13
###Markdown
El siguiente método que vamos a ver `pop(key)` nos borrará del diccionario aquel elemento que tenga como clave, la que le pasamos como parámetro. Por ejemplo vamos a borrar el elemento con *clave = 22*:
###Code
# Eliminamos un elemento del diccionario dada su clave
futbolistas.pop(22)
print("\nDiccionario tras eliminar un elemento: {0}".format(futbolistas))
numElem = len(futbolistas)
print("Numero de elementos del diccionario len(futbolistas) = {0}".format(numElem))
###Output
Numero de elementos del diccionario len(futbolistas) = 12
###Markdown
Para hacer una copia de un diccionario, se utiliza el método `copy()`:
###Code
# Hacemos una copia del diccionario
futbolistasCopy = futbolistas.copy();
print("\nRealizamos una copia del diccionario: \n {0}".format(futbolistasCopy))
###Output
Realizamos una copia del diccionario:
{1: 'Casillas', 15: 'Ramos', 3: 'Pique', 5: 'Puyol', 11: 'Capdevila', 14: 'Xabi Alonso', 16: 'Busquets', 8: 'Xavi Hernandez', 18: 'Pedrito', 6: 'Iniesta', 7: 'Villa', 10: 'Cesc'}
###Markdown
Para eliminar el contenido (o los elementos) de un diccionario utilizamos el método `clear()`:
###Code
# Eliminamos los elementos de un diccionario
futbolistasCopy.clear()
print("\nEliminamos los elementos de un diccionario: {0}".format(futbolistasCopy))
###Output
Eliminamos los elementos de un diccionario: {}
###Markdown
Con el método `fromkeys(listKey,default=value)`, creamos un diccionario cuyas claves son las que le pasamos como parámetro en una lista. Si le pasamos un segundo parámetro, pondrá ese parámetro como clave de cada uno de los elementos. Veamos un ejemplo:
###Code
# Creamos un diccionario a partir de una lista con las claves
keys = ['nombre', 'apellidos', 'edad']
dictList = dict.fromkeys(keys, 'nada')
print("Creamos un diccionario a partir de una lista {0}".format(dictList))
###Output
Creamos un diccionario a partir de una lista {'nombre': 'nada', 'apellidos': 'nada', 'edad': 'nada'}
###Markdown
El método que nos permite comprobar si existe o no una clave es el método `has_key(key)`. Veamos un ejemplo:
###Code
# Comprobamos si existe o no una clave
exit2 = futbolistas.has_key(2)
exit8 = futbolistas.has_key(8)
print("\nComprobamos si existen los elementos 2 y 8 : {0}, {1}".format(exit2,exit8))
###Output
_____no_output_____ |
notebooks/tspec_full.ipynb | ###Markdown
Transmission spectra: full Setup
###Code
%load_ext autoreload
%autoreload 2
import glob as glob
import matplotlib as mpl
import matplotlib.patheffects as PathEffects
import matplotlib.pyplot as plt
import matplotlib.transforms as transforms
import numpy as np
import pandas as pd
import seaborn as sns
import corner
import json
import pathlib
import pickle
import utils
import warnings
from astropy import constants as const
from astropy import units as uni
from astropy.io import ascii, fits
from astropy.time import Time
from mpl_toolkits.axes_grid1 import ImageGrid
# Default figure dimensions
FIG_WIDE = (11, 5)
FIG_LARGE = (8, 11)
# Figure style
sns.set(style="ticks", palette="colorblind", color_codes=True, context="talk")
params = utils.plot_params()
plt.rcParams.update(params)
###Output
_____no_output_____
###Markdown
[Dowload data](https://www.dropbox.com/sh/ngr81uubrouo8nk/AACrisaNEnLdxDwwosdX1Edva?dl=1) Unzip this into a folder named `data` in the same level as this notebook Plot
###Code
base_dir = "data/tspec_full"
fig, ax = plt.subplots(figsize=FIG_WIDE)
utils.plot_tspec_IMACS(ax, base_dir)
# Save
fig.tight_layout()
fig.set_size_inches(FIG_WIDE)
#utils.savefig(f"../paper/figures/tspec_full/tspec_full.pdf")
base_dir = "data/tspec_full"
fig, ax = plt.subplots(figsize=FIG_WIDE)
utils.plot_tspec_IMACS(ax, base_dir)
# Save
fig.tight_layout()
fig.set_size_inches(FIG_WIDE)
utils.savefig(f"../paper/figures/tspec_full/tspec_full.pdf")
###Output
offsets: [-381.66382639 -250.19806149 604.72304087 238.023367 -134.88932493]
offsets (% mean wlc depth): [-2.94346912 -1.92957838 4.66374719 1.83568466 -1.04029393]
Saving tspec to: data/tspec_full/tspec_c.csv
mean WLC depth: 12966.462729191497 228.463440286341
Rp (Rj): 1.2765205003564386 jupiterRad
Rs (Rsun): 1.152 solRad
gp (m/s^2): 2920.5267508681527 cm / s2
###Markdown
Table
###Code
pd.read_csv("data/tspec_full/tspec_c.csv")#.to_clipboard(index=False)
###Output
_____no_output_____ |
midterm/.ipynb_checkpoints/CSE445.1_Midterm_Ferdous Zeaul Islam_1731136042-checkpoint.ipynb | ###Markdown
Midterm Answer Script**Name**: Ferdous Zeaul Islam **ID**: 173 1136 042 **Course**: CSE445 (Machine Learning) **Faculty**: Dr. Sifat Momen (Sfm1) **Section**: 01 **Semester**: Spring 2021 N.B- please put the diabetes.csv dataset on the same directory as the ipynb file.
###Code
# only need this line in jupyter
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
###Output
_____no_output_____
###Markdown
(a) Read the dataset (which is in the csv format) using panda's dataframe.
###Code
diabetes_df = pd.read_csv('./diabetes.csv')
diabetes_df.shape
###Output
_____no_output_____
###Markdown
(b) Find out the number of instances and the number of features (including the target class) in the dataset.
###Code
print('Number of instances in the dataset =', diabetes_df.shape[0])
print('Number of features in the dataset =', diabetes_df.shape[1])
###Output
Number of features in the dataset = 9
###Markdown
(c) Does the dataset have any missing entries? Show your workings.
###Code
diabetes_df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 768 entries, 0 to 767
Data columns (total 9 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Pregnancies 768 non-null int64
1 Glucose 768 non-null int64
2 BloodPressure 768 non-null int64
3 SkinThickness 768 non-null int64
4 Insulin 768 non-null int64
5 BMI 768 non-null float64
6 DiabetesPedigreeFunction 768 non-null float64
7 Age 768 non-null int64
8 Outcome 768 non-null int64
dtypes: float64(2), int64(7)
memory usage: 54.1 KB
###Markdown
Explanation:We can observe from the command on the previous line that all columns/features of the dataset have non-null count equal to the total number of instances that we found on on Question(b). Therefore, we can state that **to the naked eye there are no missing entries in this dataset.** (d) Here “Outcome” is the target class and contains values zeros or ones. Determine how many instances have the outcome values zeroes and how many have the outcome values ones. Hence or otherwise, comment on whether this dataset suffers from class imbalance problem.
###Code
outcome_freq = diabetes_df.Outcome.value_counts()
outcome_freq
num_total_instances = diabetes_df.shape[0]
num_outcome_zero = outcome_freq[0]
num_outcome_one = outcome_freq[1]
outcome_zero_data_percentage = round((num_outcome_zero*100)/num_total_instances, 3)
print('Percentage of data with outcome zero =', outcome_zero_data_percentage)
outcome_one_data_percentage = round((num_outcome_one*100)/num_total_instances, 3)
print('Percentage of data with outcome one =', outcome_one_data_percentage)
###Output
Percentage of data with outcome one = 34.896
###Markdown
Explanation:With respect to "Outcome" we see that there are **65.104% data with value zero** and remaining **34.896% data with value one**. Clearly, **the dataset suffers from class imbalance.** (e) Show the first 5 and the last 5 instances of the dataset.
###Code
diabetes_df.head()
diabetes_df.tail()
###Output
_____no_output_____
###Markdown
(f) Often, in many datasets, it may appear that there exists no missing entries. However, when you look at the dataset closely, it is often found that the missing entries are replaced by a zero (0). Check if this dataset has this issue or not. Show and explain your workings.
###Code
diabetes_df[30:35]
diabetes_df[342:347]
diabetes_df[706:711]
diabetes_df[(diabetes_df['DiabetesPedigreeFunction'] == 0)].shape[0]
diabetes_df[(diabetes_df['Age'] == 0)].shape[0]
###Output
_____no_output_____
###Markdown
Explanation- Apart from the 'Pregnancy' and 'Outcome' columns any other column with the value 0 is non-sensical. By printing various segments of the data we see that some instances have 0 value for columns- 'Glucose', 'BloodPressure', 'SkinThickness', 'Insulin' and 'BMI'. So we can state that, **there are missing datas replaced with 0 in this dataset.** Further calculations are shown below,
###Code
missing_data_count = diabetes_df[ (diabetes_df['Glucose']==0) | (diabetes_df['BloodPressure']==0) | (diabetes_df['BMI']==0)
| (diabetes_df['Insulin']==0) | (diabetes_df['SkinThickness']==0) ].shape[0]
print('A total of', missing_data_count, 'instances have missing data (one or more columns invalidly contain zero).')
###Output
A total of 376 instances have missing data (one or more columns invalidly contain zero).
###Markdown
(g) Draw a histogram for each numerical features. You may use the hist() function of the panda's dataframe. Documentation on this can be found at https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.hist.html In order to make the histograms for each features visually appealing, you are advised to tweak bins and figsize parameters.
###Code
diabetes_df.hist(bins = 9, figsize = (15, 15))
plt.show()
###Output
_____no_output_____
###Markdown
(h) One of the ways to visualize how each attribute is correlated with other attributes is by drawing a seaborn correlation heatmap. Read the documentation on how to generate correlation heatmap using the seaborn library. The following link provides a quick overview on how to do this: https://www.geeksforgeeks.org/how-to-create-a-seaborn-correlation-heatmap-in-python/ I strongly suggest you to adjust the figure size before using the heatmap. For instance, you can write the code plt.figure (figsize = (a,b)) before using the seaborn's heatmap [Here a and b are appropriate choices for the figure size that you need to decide on].
###Code
import seaborn
# help taken from ->
# https://medium.com/@szabo.bibor/how-to-create-a-seaborn-correlation-heatmap-in-python-834c0686b88e
plt.figure(figsize=(15, 8))
corr_matrix = diabetes_df.corr()
# mask to hide the upper triangle of the symmetric corr-matrix
# mask = np.triu(np.ones_like(corr_matrix, dtype=np.bool))
heatmap = seaborn.heatmap(
# correlation matrix
corr_matrix,
# mask the top triangle of the matrix
# mask=mask,
# two-contrast color, different color for + -
cmap="PiYG",
# color map range
vmin=-1, vmax=1,
# show corr values in the cells
annot=True
)
# set a title
heatmap.set_title('Correlation Heatmap', fontdict={'fontsize':20}, pad=16);
plt.show()
###Output
_____no_output_____
###Markdown
(i) If this dataset has the issue discussed in (f), you are now required to write a function in python that will replace each zeros by the corresponding median value of the features. Note that you may require to use the numpy library. We saw in (f) that there were some invalid zeroes in the columns- **'Glucose', 'BloodPressure', 'SkinThickness', 'Insulin' and 'BMI'**.
###Code
column_with_invalid_zeroes = ['Glucose', 'BloodPressure', 'SkinThickness', 'Insulin', 'BMI']
for column in column_with_invalid_zeroes:
# extract the column from original dataframe
column_data = diabetes_df[column]
# replace zero values with np.NaN
column_data = column_data.replace(0, np.NaN)
# replace np.NaN values with the median
column_data = column_data.fillna(column_data.median())
# put the column in the original dataframe
diabetes_df[column] = column_data
###Output
_____no_output_____
###Markdown
Now if we run the same code as we did of (f) to count missing values (i.e contains invalid zero),
###Code
missing_data_count = diabetes_df[ (diabetes_df['Glucose']==0) | (diabetes_df['BloodPressure']==0)
| (diabetes_df['BMI']==0) | (diabetes_df['Insulin']==0)
| (diabetes_df['SkinThickness']==0) ].shape[0]
print('A total of', missing_data_count, 'instances have missing data (one or more columns invalidly contain zero).')
###Output
A total of 0 instances have missing data (one or more columns invalidly contain zero).
###Markdown
**Therefore we can safely assume that invalid zeroes have been replaced by their columns median values.** (j) Split the dataset into X and y where X contains all the predictors and y contains only the entries in the target class.
###Code
X = diabetes_df.drop(columns=['Outcome'])
y = diabetes_df['Outcome']
diabetes_df.head()
X.head()
y.head()
###Output
_____no_output_____
###Markdown
(k) Use the train_test_split function to split the dataset into train set and test set in the ratio 80:20.
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42, stratify=y)
train_data_percentage = round((X_train.shape[0]/X.shape[0])*100, 2)
test_data_percentage = round((X_test.shape[0]/X.shape[0])*100, 2)
print("Test size = " + str(test_data_percentage) + "%" + " Train size = " + str(train_data_percentage) + "%")
###Output
Test size = 20.05% Train size = 79.95%
###Markdown
(l) Write a code to implement the zeroR classifier (i.e. a baseline classifier) on this dataset. Determine the precision, recall, F1 score, train accuracy and the test accuracy.
###Code
from sklearn.dummy import DummyClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import classification_report
# ZeroR classifier
model = DummyClassifier(strategy = 'most_frequent', random_state = 42)
# Dataset is trained and a model is created
model.fit(X_train,y_train)
y_train_predictions = model.predict(X_train)
y_test_predictions = model.predict(X_test)
print('For the train predictions:\n', classification_report(y_train, y_train_predictions))
print()
print('For the test predictions:\n', classification_report(y_test, y_test_predictions))
###Output
For the train predictions:
precision recall f1-score support
0 0.65 1.00 0.79 400
1 0.00 0.00 0.00 214
accuracy 0.65 614
macro avg 0.33 0.50 0.39 614
weighted avg 0.42 0.65 0.51 614
For the test predictions:
precision recall f1-score support
0 0.65 1.00 0.79 100
1 0.00 0.00 0.00 54
accuracy 0.65 154
macro avg 0.32 0.50 0.39 154
weighted avg 0.42 0.65 0.51 154
###Markdown
(m) Apply the KNN classifier with the euclidean distance as the distance metric on this dataset. You need to determine a suitable value of the hyperparameter, k. One way to do this is to apply the KNN classifier with different values of k and determine the train and test accuracies. Plot a graph of train and test accuracy with respect to k and determine the value of k for which the difference between the train and the test accuracy is minimum. You may require to do feature scaling before using the KNN classifier. Before we begin applying KNN algorithm we need to Scale our dataset. **We must scale test and train both segments of the dataset using the same min, max values for corresponding columns.**
###Code
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import MinMaxScaler
X_train.head()
scaler = MinMaxScaler()
X_train_scaled_using_library = pd.DataFrame(scaler.fit_transform(X_train), columns=X_train.columns)
X_train_scaled_using_library.hist(bins = 9, figsize = (15, 15))
plt.show()
columns = X.columns
X_train_col_min = []
X_train_col_max = []
col_idx = 0
for column in columns:
X_train_col_max.append(X_train[column].max())
X_train_col_min.append(X_train[column].min())
X_train_scaled = X_train.copy()
# MUST MAKE COLUMNS INTO FLOAT DATATYPE, UNLESS SCALING WILL NOT WORK
# spent 3hs for this (:
X_train_scaled[list(columns)] = X_train_scaled[list(columns)].astype(float)
for (row_idx, data) in X_train.iterrows():
col_idx = 0
for val in data:
column = columns[col_idx]
scaled_val = (val - X_train_col_min[col_idx]) / (X_train_col_max[col_idx] - X_train_col_min[col_idx])
X_train_scaled.at[row_idx, column] = float(scaled_val)
col_idx += 1
X_train_scaled.hist(bins = 9, figsize = (15, 15))
plt.show()
###Output
_____no_output_____
###Markdown
Among the above two scaling, the first one was done using MinMaxScaler() of sklearn library. The second one was done by manually implementing the scaling process. From the two histograms diagrams of each column from above we can conclude that our manual scaling process is as accurate as the MinMaxScaler() of sklearn library. Now we can proceed to manually scale the test set, **using the minimum and maximum values of the train dataset**,
###Code
X_test_scaled = X_test.copy()
X_test_scaled[list(columns)] = X_test_scaled[list(columns)].astype(float)
for (row_idx, data) in X_test_scaled.iterrows():
col_idx = 0
for val in data:
column = columns[col_idx]
scaled_val = (val - X_train_col_min[col_idx]) / (X_train_col_max[col_idx] - X_train_col_min[col_idx])
X_test_scaled.at[row_idx, column] = scaled_val
col_idx += 1
X_test_scaled.head()
###Output
_____no_output_____
###Markdown
Now we implement a function that applies KNN classifier for k values in the range provided as function parameter,
###Code
def check_k_in_range(left, right):
k_values = []
for i in range (left, right):
k_values.append(i)
train_accuracies = []
test_accuracies = []
for k in k_values:
# k-nn classifier witk k neighbours and euclidian distance
model = KNeighborsClassifier(n_neighbors=k, metric='minkowski', p=2)
# train model
model.fit(X_train_scaled, y_train)
# train predictions
y_train_predictions = model.predict(X_train_scaled)
# train accuracy for current k value
train_accuracies.append(accuracy_score(y_train, y_train_predictions))
# test predictions
y_test_predictions = model.predict(X_test_scaled)
# test accuracy for current k value
test_accuracies.append(accuracy_score(y_test, y_test_predictions))
# plot the Test-Accuracy, Training-Accuracy VS K-value
plt.figure(figsize=(15, 8))
plt.title('Train accuracy, Test accuracy vs K-values')
plt.plot(k_values, train_accuracies, 'ro-', k_values, test_accuracies,'bv--')
plt.legend(['Training Accuracy','Test Accuracy'])
plt.xlabel('K values')
plt.ylabel('Accuracy')
min_k = 1
max_k = int(X_train.shape[0]/5)
print('Minimum k = ', min_k, 'Maximum k = ', max_k)
check_k_in_range(min_k, max_k)
###Output
Minimum k = 1 Maximum k = 122
###Markdown
Explanation- From the figure we can observe that an optiman k-value lies in the range 10 to 20. Because for this range Train Accuracies and Test Accuracies seem relatively closer, which means reduced chance of model getting too complex and overfitting. Let's test for k values in range 10 to 20 now.
###Code
check_k_in_range(10, 20)
###Output
_____no_output_____
###Markdown
From the above three graphs we can state that **k=17 should be the optimal choice for our K nearest neighbour classifier.** (n) Apply the decision tree classifier with the “gini” criterion on this dataset. One of the hyperparameters of the decision tree classifier is max_depth. Apply the decision tree classifier with different values of max_depth and find the train and test accuracies. Plot a graph showing how the train and test accuracy varies with max_depth. Determine the most suitable value of max_depth. For a suitable value of max_depth, draw the decision tree.
###Code
from sklearn import tree
def check_decision_tree_max_depth_in_range(left, right):
max_depths = []
for i in range (left, right):
max_depths.append(i)
train_accuracies = []
test_accuracies = []
for depth in max_depths:
# decision tree classifier with max_depth impurity measure 'gini'
model = tree.DecisionTreeClassifier(criterion='gini',max_depth=depth)
# train model
model.fit(X_train, y_train)
# train predictions
y_train_predictions = model.predict(X_train)
# train accuracy for current k value
train_accuracies.append(accuracy_score(y_train, y_train_predictions))
# test predictions
y_test_predictions = model.predict(X_test)
# test accuracy for current k value
test_accuracies.append(accuracy_score(y_test, y_test_predictions))
# plot the Test-Accuracy, Training-Accuracy VS K-value
plt.figure(figsize=(15, 8))
plt.title('Train accuracy, Test accuracy vs Max-Depths')
plt.plot(max_depths, train_accuracies, 'ro-', max_depths, test_accuracies,'bv--')
plt.legend(['Training Accuracy','Test Accuracy'])
plt.xlabel('Max Depths')
plt.ylabel('Accuracy')
check_decision_tree_max_depth_in_range(1, 50)
###Output
_____no_output_____
###Markdown
It appears that our desired max_depth is somewhere in the range from 1 to 10. Let's find out,
###Code
check_decision_tree_max_depth_in_range(1, 10)
###Output
_____no_output_____
###Markdown
From the graph we can state that **maxdepth = 4 is the optimal choice.** Now let's draw the decision tree for max_depth=4 and impurity measure as gini,
###Code
import pydotplus
from IPython.display import Image
# decision tree classifier with max_depth = 4 and impurity measure 'gini'
model = tree.DecisionTreeClassifier(criterion='gini',max_depth=4)
# train model
model.fit(X_train, y_train)
dot_data = tree.export_graphviz(model, feature_names=X_train.columns, class_names=['non-diabetic','diabetic'],
filled=True, out_file=None)
graph = pydotplus.graph_from_dot_data(dot_data)
Image(graph.create_png())
###Output
_____no_output_____
###Markdown
(o) Read the article “How to configure k-fold cross validation” and apply 10-fold cross validation using the classifiers in m and n. Determine the performance of the classifiers (accuracy, precision, recall, f1-score and the area under the curve of the ROC curve) on this dataset. Link to the article: https://machinelearningmastery.com/how-to-configure-k-fold-cross-validation/ We have to initialize the 10-fold cross validator and our classifiers.
###Code
from sklearn.model_selection import StratifiedKFold, cross_val_score
# 10-fold cross validation
cv = StratifiedKFold(n_splits = 10, random_state = 42, shuffle = True)
###Output
_____no_output_____
###Markdown
Let's find out the accuracy, precision, recall and area under the ROC curve for decision tree classifier. To pick the max_depth hyper parameter for the decision tree we will pick the one with the highest average accuracy for 10-fold cross validation.
###Code
max_depths = []
for i in range (1, 25):
max_depths.append(i)
accuracies = []
for depth in max_depths:
model = tree.DecisionTreeClassifier(criterion='gini',max_depth=depth)
accuracie_segments = cross_val_score(model, X, y, scoring = 'accuracy', cv = cv, n_jobs = 1)
accuracies.append(np.mean(accuracie_segments))
plt.figure(figsize=(15, 8))
plt.title('Avg accuracy vs Max depths')
plt.plot(max_depths, accuracies,'bv--')
plt.xlabel('Max depths')
plt.ylabel('Avg accuracy')
plt.show()
###Output
_____no_output_____
###Markdown
So, **max_depth = 5** gives the highest accuracy.
###Code
# decision tree classifier with max_depth=5 impurity measure 'gini'
model_decision_tree = tree.DecisionTreeClassifier(criterion='gini',max_depth=5)
accuracies = cross_val_score(model_decision_tree, X, y, scoring = 'accuracy', cv = cv, n_jobs = 1)
precisions = cross_val_score(model_decision_tree, X, y, scoring = 'precision', cv = cv, n_jobs = 1)
recalls = cross_val_score(model_decision_tree, X, y, scoring = 'recall', cv = cv, n_jobs = 1)
f1s = cross_val_score(model_decision_tree, X, y, scoring = 'f1', cv = cv, n_jobs = 1)
aucs = cross_val_score(model_decision_tree, X, y, scoring = 'roc_auc', cv = cv, n_jobs = 1)
accuracy_decision_tree = np.mean(accuracies)
precision_decision_tree = np.mean(precisions)
recall_decision_tree = np.mean(recalls)
f1_decision_tree = np.mean(f1s)
auc_decision_tree = np.mean(aucs)
print('For the Decision Tree classifier:')
print('accuracy =', round(accuracy_decision_tree, 2)
, 'precision =', round(precision_decision_tree, 2)
, 'recall =', round(recall_decision_tree, 2)
, 'f1-score =', round(f1_decision_tree, 2)
, 'AUC =', round(auc_decision_tree, 2))
###Output
For the Decision Tree classifier:
accuracy = 0.76 precision = 0.68 recall = 0.65 f1-score = 0.64 AUC = 0.79
###Markdown
Let's find out the accuracy, precision, recall and area under the ROC curve for K-NN classifier. To pick the hyper parameter, k for the classifier we will pick the one with the highest average accuracy for 10-fold cross validation.
###Code
X_scaled = pd.DataFrame(MinMaxScaler().fit_transform(X), columns=X.columns)
k_values = []
for i in range (1, 25):
k_values.append(i)
accuracies = []
for k in k_values:
model = KNeighborsClassifier(n_neighbors=k, metric='minkowski', p=2)
accuracy_segments = cross_val_score(model, X_scaled, y, scoring = 'accuracy', cv = cv, n_jobs = 1)
accuracies.append(np.mean(accuracy_segments))
plt.figure(figsize=(15, 8))
plt.title('Avg accuracy vs K-values')
plt.plot(k_values, accuracies,'bv--')
plt.xlabel('K values')
plt.ylabel('Avg accuracy')
plt.show()
###Output
_____no_output_____
###Markdown
So, **k=17(or 15)** gives the highest average accuracy.
###Code
# k-nn classifier witk k=17 neighbours and euclidian distance
model_knn = KNeighborsClassifier(n_neighbors=17, metric='minkowski', p=2)
accuracies = cross_val_score(model_knn, X_scaled, y, scoring = 'accuracy', cv = cv, n_jobs = 1)
precisions = cross_val_score(model_knn, X_scaled, y, scoring = 'precision', cv = cv, n_jobs = 1)
recalls = cross_val_score(model_knn, X_scaled, y, scoring = 'recall', cv = cv, n_jobs = 1)
f1s = cross_val_score(model_knn, X_scaled, y, scoring = 'f1', cv = cv, n_jobs = 1)
aucs = cross_val_score(model_knn, X_scaled, y, scoring = 'roc_auc', cv = cv, n_jobs = 1)
accuracy_knn = np.mean(accuracies)
precision_knn = np.mean(precisions)
recall_knn = np.mean(recalls)
f1_knn = np.mean(f1s)
auc_knn = np.mean(aucs)
print('For the K-NN classifier:')
print('accuracy =', round(accuracy_knn, 2)
, ', precision =', round(precision_knn, 2)
, ', recall =', round(recall_knn, 2)
, ', f1-score =', round(f1_knn, 2)
, ', AUC =', round(auc_knn, 2))
###Output
For the K-NN classifier:
accuracy = 0.77 , precision = 0.7 , recall = 0.59 , f1-score = 0.64 , AUC = 0.83
###Markdown
For comparison of performance let's draw a bar graph of evaluation metrics for the two classifiers.
###Code
labels = ['accuracy', 'precision', 'recall', 'f1', 'auc']
decision_tree_evaluation_metrics = [accuracy_decision_tree, precision_decision_tree, recall_decision_tree,
f1_decision_tree, auc_decision_tree]
knn_evaluation_metrics = [accuracy_knn, precision_knn, recall_knn, f1_knn, auc_knn]
x = np.arange(len(labels)) # the label locations
width = 0.35 # the width of the bars
fig, ax = plt.subplots()
ax.bar(x - width/2, decision_tree_evaluation_metrics, width, label='decision tree')
ax.bar(x + width/2, knn_evaluation_metrics, width, label='k-nn')
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('Score')
ax.set_title('Decision Tree vs KNN comparison')
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend()
plt.show()
###Output
_____no_output_____ |
2.Analysis Using Pandas.ipynb | ###Markdown
Making decisions with pandas quantile analysis: random data Quintile analysis is a common framework for evaluating the efficacy of security factors What is a factor ? A factor is a method for scoring/ranking sets of securities. For a particular point in time and for aparticular set of securities, a factor can be represented as a pandas series where the index is anarray of the security identifiers and the values are the scores or ranks. Quitiles/Buckets If we take factor scores over time, we can, at each point in time, split the set of securities into 5equal buckets, or quintiles, based on the order of the factor scores. There is nothing particularlysacred about the number 5. We could have used 3 or 10. But we use 5 often. Finally, we track theperformance of each of the five buckets to determine if there is a meaningful difference in thereturns. We tend to focus more intently on the difference in returns of the bucket with the highestrank relative to that of the lowest rank. generating time series data for explanation Returns:- generate random returns for specified number of securities and periods. Signals: generate random signals for specified number of securities and periods and withprescribed level of correlation with Returns. In order for a factor to be useful, there must besome information or correlation between the scores/ranks and subsequent returns. If thereweren't correlation, we would see it. That would be a good exercise for the reader, duplicatethis analysis with random data generated with 0 correlation.
###Code
import pandas as pd
import numpy as np
num_securities = 1000
num_periods = 1000
period_frequency = 'W'
start_date = "2000-12-31"
np.random.seed([3,1415])
means = [0,0]
covariance = [[1.,5e-3],
[5e-3,1.]]
#generating a set of data [0] and m[1] with ~0.005 correlation
m = np.random.multivariate_normal(means, covariance,
(num_periods, num_securities)).T
# generating index
ids = pd.Index(['s{:05d}'.format(s) for s in range(num_securities)])
tidx = pd.date_range(start=start_date, periods=num_periods, freq=period_frequency)
###Output
_____no_output_____
###Markdown
I divide m[0] by 25 to scale down to something that looks like stock returns. I also add 1e-7 to give amodest positive mean return.
###Code
security_returns = pd.DataFrame(m[0] / 25 + 1e-7, tidx, ids)
security_signals = pd.DataFrame(m[1], tidx, ids)
###Output
_____no_output_____
###Markdown
pd.qcut - Create Quintile Buckets
###Code
def qcut(s, q=5):
labels = ['q{}'.format(i) for i in range(1, 6)]
return pd.qcut(s, q, labels=labels)
cut = security_signals.stack().groupby(level=0).apply(qcut)
#Use these cuts as an index on our returns
returns_cut = security_returns.stack().rename('returns') \
.to_frame().set_index(cut, append=True) \
.swaplevel(2, 1).sort_index().squeeze() \
.groupby(level=[0, 1]).mean().unstack()
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(15, 5))
ax1 = plt.subplot2grid((1,3), (0,0))
ax2 = plt.subplot2grid((1,3), (0,1))
ax3 = plt.subplot2grid((1,3), (0,2))
# Cumulative Returns
returns_cut.add(1).cumprod() \
.plot(colormap='jet', ax=ax1, title="Cumulative Returns")
leg1 = ax1.legend(loc='upper left', ncol=2, prop={'size': 10}, fancybox=True)
leg1.get_frame().set_alpha(.8)
# Rolling 50 Week Return
returns_cut.add(1).rolling(50).apply(lambda x: x.prod()) \
.plot(colormap='jet', ax=ax2, title="Rolling 50 Week Return")
leg2 = ax2.legend(loc='upper left', ncol=2, prop={'size': 10}, fancybox=True)
leg2.get_frame().set_alpha(.8)
# Return Distribution
returns_cut.plot.box(vert=False, ax=ax3, title="Return Distribution")
fig.autofmt_xdate()
plt.show()
###Output
_____no_output_____
###Markdown
Visualize Quintile Correlation with scatter_matrix
###Code
def max_dd(returns):
"""returns is a series"""
r = returns.add(1).cumprod()
dd = r.div(r.cummax()).sub(1)
mdd = dd.min()
end = dd.argmin()
start = r.loc[:end].argmax()
return mdd, start, end
def max_dd_df(returns):
"""returns is a dataframe"""
series = lambda x: pd.Series(x, ['Draw Down', 'Start', 'End'])
return returns.apply(max_dd).apply(series)
#max_dd_df(returns_cut)
draw_downs = max_dd_df(returns_cut)
fig, axes = plt.subplots(5, 1, figsize=(10, 8))
for i, ax in enumerate(axes[::-1]):
returns_cut.iloc[:, i].add(1).cumprod().plot(ax=ax)
sd, ed = draw_downs[['Start', 'End']].iloc[i]
ax.axvspan(sd, ed, alpha=0.1, color='r')
ax.set_ylabel(returns_cut.columns[i])
fig.suptitle('Maximum Draw Down', fontsize=18)
fig.tight_layout()
plt.subplots_adjust(top=.95)
###Output
_____no_output_____ |
module_4/challenge/main.ipynb | ###Markdown
Desafio 3Neste desafio, iremos praticar nossos conhecimentos sobre distribuições de probabilidade. Para isso,dividiremos este desafio em duas partes: 1. A primeira parte contará com 3 questões sobre um *data set* artificial com dados de uma amostra normal e uma binomial.2. A segunda parte será sobre a análise da distribuição de uma variável do _data set_ [Pulsar Star](https://archive.ics.uci.edu/ml/datasets/HTRU2), contendo 2 questões.> Obs.: Por favor, não modifique o nome das funções de resposta. _Setup_ geral
###Code
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as sct
import seaborn as sns
from statsmodels.distributions.empirical_distribution import ECDF
#%matplotlib inline
from IPython.core.pylabtools import figsize
figsize(12, 8)
sns.set()
###Output
_____no_output_____
###Markdown
Parte 1 _Setup_ da parte 1
###Code
np.random.seed(42)
dataframe = pd.DataFrame({"normal": sct.norm.rvs(20, 4, size=10000),
"binomial": sct.binom.rvs(100, 0.2, size=10000)})
###Output
_____no_output_____
###Markdown
Inicie sua análise a partir da parte 1 a partir daqui
###Code
# Sua análise da parte 1 começa aqui.
dataframe.head()
dataframe.info()
dataframe.describe()
###Output
_____no_output_____
###Markdown
Questão 1Qual a diferença entre os quartis (Q1, Q2 e Q3) das variáveis `normal` e `binomial` de `dataframe`? Responda como uma tupla de três elementos arredondados para três casas decimais.Em outra palavras, sejam `q1_norm`, `q2_norm` e `q3_norm` os quantis da variável `normal` e `q1_binom`, `q2_binom` e `q3_binom` os quantis da variável `binom`, qual a diferença `(q1_norm - q1 binom, q2_norm - q2_binom, q3_norm - q3_binom)`?
###Code
def q1():
quantiles = dataframe.quantile([.25, .5, .75])
quantiles_diff = quantiles['normal'] - quantiles['binomial']
return tuple(quantiles_diff.round(3).to_list())
q1()
###Output
_____no_output_____
###Markdown
Para refletir:* Você esperava valores dessa magnitude?* Você é capaz de explicar como distribuições aparentemente tão diferentes (discreta e contínua, por exemplo) conseguem dar esses valores? Questão 2Considere o intervalo $[\bar{x} - s, \bar{x} + s]$, onde $\bar{x}$ é a média amostral e $s$ é o desvio padrão. Qual a probabilidade nesse intervalo, calculada pela função de distribuição acumulada empírica (CDF empírica) da variável `normal`? Responda como uma único escalar arredondado para três casas decimais.
###Code
def q2():
inferior = dataframe.normal.mean() - dataframe.normal.std()
superior = dataframe.normal.mean() + dataframe.normal.std()
ecdf = ECDF(dataframe.normal)
return np.float(round(ecdf(superior) - ecdf(inferior), 3))
q2()
###Output
_____no_output_____
###Markdown
Para refletir:* Esse valor se aproxima do esperado teórico?* Experimente também para os intervalos $[\bar{x} - 2s, \bar{x} + 2s]$ e $[\bar{x} - 3s, \bar{x} + 3s]$. Questão 3Qual é a diferença entre as médias e as variâncias das variáveis `binomial` e `normal`? Responda como uma tupla de dois elementos arredondados para três casas decimais.Em outras palavras, sejam `m_binom` e `v_binom` a média e a variância da variável `binomial`, e `m_norm` e `v_norm` a média e a variância da variável `normal`. Quais as diferenças `(m_binom - m_norm, v_binom - v_norm)`?
###Code
def q3():
mean_std = dataframe.describe()[1:3]
mean_std.loc['std'] **= 2
mean_std_diff = mean_std['binomial'] - mean_std['normal']
return tuple(mean_std_diff.round(3).to_list())
q3()
###Output
_____no_output_____
###Markdown
Para refletir:* Você esperava valore dessa magnitude?* Qual o efeito de aumentar ou diminuir $n$ (atualmente 100) na distribuição da variável `binomial`? Parte 2 _Setup_ da parte 2
###Code
stars = pd.read_csv("pulsar_stars.csv")
stars.rename({old_name: new_name
for (old_name, new_name)
in zip(stars.columns,
["mean_profile", "sd_profile", "kurt_profile", "skew_profile", "mean_curve", "sd_curve", "kurt_curve", "skew_curve", "target"])
},
axis=1, inplace=True)
stars.loc[:, "target"] = stars.target.astype(bool)
###Output
_____no_output_____
###Markdown
Inicie sua análise da parte 2 a partir daqui
###Code
stars.head()
stars.info()
stars.describe()
###Output
_____no_output_____
###Markdown
Questão 4Considerando a variável `mean_profile` de `stars`:1. Filtre apenas os valores de `mean_profile` onde `target == 0` (ou seja, onde a estrela não é um pulsar).2. Padronize a variável `mean_profile` filtrada anteriormente para ter média 0 e variância 1.Chamaremos a variável resultante de `false_pulsar_mean_profile_standardized`.Encontre os quantis teóricos para uma distribuição normal de média 0 e variância 1 para 0.80, 0.90 e 0.95 através da função `norm.ppf()` disponível em `scipy.stats`.Quais as probabilidade associadas a esses quantis utilizando a CDF empírica da variável `false_pulsar_mean_profile_standardized`? Responda como uma tupla de três elementos arredondados para três casas decimais.
###Code
def standardization(x):
return (x - x.mean()) / x.std()
def q4():
false_pulsar_mean_profile = stars.loc[stars['target'] == False]['mean_profile']
false_pulsar_mean_profile_standardized = standardization(false_pulsar_mean_profile)
ecdf = ECDF(false_pulsar_mean_profile_standardized)
ppf = pd.Series(ecdf(sct.norm.ppf([0.80, 0.90, 0.95])), [0.80, 0.90, 0.95])
return tuple(ppf.round(3).to_list())
q4()
###Output
_____no_output_____
###Markdown
Para refletir:* Os valores encontrados fazem sentido?* O que isso pode dizer sobre a distribuição da variável `false_pulsar_mean_profile_standardized`? Questão 5Qual a diferença entre os quantis Q1, Q2 e Q3 de `false_pulsar_mean_profile_standardized` e os mesmos quantis teóricos de uma distribuição normal de média 0 e variância 1? Responda como uma tupla de três elementos arredondados para três casas decimais.
###Code
def standardization(x):
return (x - x.mean()) / x.std()
def q5():
false_pulsar_mean_profile = stars.loc[stars['target'] == False]['mean_profile']
false_pulsar_mean_profile_standardized = standardization(false_pulsar_mean_profile)
ppf = pd.Series(sct.norm.ppf([0.25, 0.50, 0.75]), [0.25, 0.50, 0.75])
quantiles = false_pulsar_mean_profile_standardized.quantile([0.25, 0.50, 0.75])
return tuple((quantiles - ppf).round(3).to_list())
q5()
###Output
_____no_output_____ |
test_mosaic_cube_briggsbwtaper.ipynb | ###Markdown
Certify and install required python modules.
###Code
import os
import sys
import subprocess
import pkg_resources
__require = {'casatools', 'casatasks', 'casatestutils', 'panel', 'astropy', 'matplotlib', 'numpy'}
__installed = {pkg.key for pkg in pkg_resources.working_set}
__missing = __require - __installed
if len(__missing) > 0:
subprocess.check_call([sys.executable, '-m', 'pip', 'install', *__missing], stdout=subprocess.DEVNULL)
!python3 -m casatools --update-user-data
###Output
_____no_output_____
###Markdown
Utility Functions
###Code
def clean_data():
import os
os.system('rm -rf mosaic_cube* *.log *.png *.html')
###Output
_____no_output_____
###Markdown
Stakeholder Datasets ```Datasets (MOUS)E2E6.1.00034.S (uid://A002/Xcff05c/X1ec)Test list:9c. Mosaic cube with pcwdT+briggsbwtaper- E2E6.1.00034.SEach test stores reference values in dictionaries for the metricsto be tested and these dictionaries are stored in a single nested dictionaryin a json file located in the casatestdata repository. The path of json file is stored in the variable, self.expdict_jsonfile in test_tclean_base.setUp(). * NOTE for updating the tests and fiducial values in json file *When the json file is updated and its 'casa_version'could also be updated then self.refversion in the setUp() needs to be updated tomatch with the 'casa_version' as defined in the json file otherwise almastkteestutils.read_testcase_expdicts() print an error message.The fudicial metric values for a specific image are stored with the following keys. For the standard tests, default sets are: exp_im_stats, exp_mask_stats, exp_pb_stats, exp_psf_stats, exp_model_stats, exp_resid_stats, exp_sumwt_statsFor mosaic tests, the ones above and exp_wt_stats (for mosaic)Additionally, for cube imaging (if self.parallel=True), exp_bmin_dict, exp_bmaj_dict, exp_pa_dictAnd for mtmfs exp_im1_stats, exp_model1_stats, exp_resid1_stats, exp_sumwt1_stats``` Load Stakeholder Data
###Code
!wget -r -np -nH --cut-dirs=4 --reject "index.html*" https://www.cv.nrao.edu/~jhoskins/E2E6.1.00034.S_tclean.ms.tar
!tar -xvf E2E6.1.00034.S_tclean.ms.tar
os.system('mv E2E6.1.00034.S_tclean.ms data/')
!wget -r -np -nH --cut-dirs=4 --reject "index.html*" https://www.cv.nrao.edu/~jhoskins/test_stk_alma_pipeline_imaging_exp_dicts.json
os.system('mv test_stk_alma_pipeline_imaging_exp_dicts.json ./data/')
###Output
_____no_output_____
###Markdown
Enable mpi
###Code
parallel = False
###Output
_____no_output_____
###Markdown
Import Required Dependencies
###Code
import casatasks
import casatools
import panel as pn
from astropy.io import fits
from astropy.wcs import WCS
import pylab as pl
import numpy as np
import scripts.test_mosaic_cube_briggsbwtaper as stk
###Output
_____no_output_____
###Markdown
User Stakeholder TestThe unit test script is broken into **three** core parts within the notebook.- Setup- Modifiable Tests- Metric Checks and Reporting Setup The setup section handles setting up the untit test and general class instantiation. In general the user shouldn't have to change this. For those that are developing their own stakeholder test, the setup section would include all the functions required to run the unit tests along with any user defined utility functions specific to their tests. TestingThe testing section makes the stakeholder test code available to the user such that they can modify parameters, check their diagnostic and rerun the tests. The tests in the notebook are presented as they are defined in the unit testing script including their default values. **It is recommended that the user restart the kernel and run all after each change** Metric Checks and TestingThis section runs the unit tests built in metric code as well as allowing the user to write their won diagnostics code. In addition, the user can access and tweak the values in the 'expected metrics' JSON using the setter/getter functionality.**Getting**`new_dict = standard.exp_dict`**Modifying**`new_dict['exp_im_stats']['im_rms'][1] = `**Setting**`standard.exp_dict = new_dict`The user can now rerun the metric test functions and the new dictionay will be used. This will not be the case if the kernel or the testing class is reinstantiated. Setup
###Code
standard = stk.Test_standard()
standard.set_file_path(path=os.getcwd()+"/data/")
standard.setUp()
standard.test_mosaic_cube_briggsbwtaper()
###Output
_____no_output_____
###Markdown
TestingThe head(footer) are used with `nbsyc.py` which allows the user to sync change either from `notebook` --> `scipt` or `script` --> `notebook`. The formatting of the headers(footers) is important. There must be a single whitespace after the header and before the footer. Any changes made between the header(footer) will be sync with `nbsync.py`.
###Code
msfile = standard.data_path + '/E2E6.1.00034.S_tclean.ms'
file_name = standard.file_name
# %% test_mosaic_cube_briggsbwtaper_tclean_1 start @
casatasks.tclean(vis=msfile, field='SMIDGE_NWCloud', spw=['0'], \
antenna=['0,1,2,3,4,5,6,7,8'], scan=['8,12,16'], \
intent='OBSERVE_TARGET#ON_SOURCE', datacolumn='data', \
imagename=file_name+'0', imsize=[108, 108], cell=['1.1arcsec'], \
phasecenter='ICRS 00:45:54.3836 -073.15.29.413', stokes='I', \
specmode='cube', nchan=508, start='220.2526743594GHz', \
width='0.2441741MHz', outframe='LSRK', \
perchanweightdensity=True, gridder='mosaic', \
mosweight=True, usepointing=False, pblimit=0.2, \
deconvolver='hogbom', restoration=False, restoringbeam='common', \
pbcor=False, weighting='briggsbwtaper', robust=0.5, npixels=0, niter=0, \
threshold='0.0mJy', interactive=0, usemask='auto-multithresh', \
sidelobethreshold=1.25, noisethreshold=5.0, \
lownoisethreshold=2.0, negativethreshold=0.0, minbeamfrac=0.1, \
growiterations=75, dogrowprune=True, minpercentchange=1.0, \
fastnoise=False, savemodel='none', parallel=parallel,
verbose=True)
# %% test_mosaic_cube_briggsbwtaper_tclean_1 end @
print('Copying iter0 files to iter1')
standard.copy_products(file_name+'0', file_name+'1')
casatasks.imstat(file_name + '0.psf')
# %% test_mosaic_cube_briggsbwtaper_tclean_2 start @
casatasks.tclean(vis=msfile, field='SMIDGE_NWCloud', spw=['0'], \
antenna=['0,1,2,3,4,5,6,7,8'],scan=['8,12,16'], \
intent='OBSERVE_TARGET#ON_SOURCE', datacolumn='data', \
imagename=file_name+'1', imsize=[108, 108], \
cell=['1.1arcsec'], phasecenter='ICRS 00:45:54.3836'
' -073.15.29.413', stokes='I', specmode='cube', nchan=508, \
start='220.2526743594GHz', width='0.2441741MHz', \
outframe='LSRK', perchanweightdensity=True, \
gridder='mosaic', mosweight=True, \
usepointing=False, pblimit=0.2, deconvolver='hogbom', \
restoration=True, restoringbeam='common', \
pbcor=True, weighting='briggsbwtaper', robust=0.5,\
npixels=0, niter=20000, threshold='0.354Jy', nsigma=0.0, \
interactive=0, usemask='auto-multithresh', \
sidelobethreshold=1.25, noisethreshold=5.0, \
lownoisethreshold=2.0, negativethreshold=0.0, \
minbeamfrac=0.1, growiterations=75, dogrowprune=True, \
minpercentchange=1.0, fastnoise=False, restart=True, \
savemodel='none', calcres=False, calcpsf=False, \
parallel=parallel, verbose=True)
# %% test_mosaic_cube_briggsbwtaper_tclean_1 end @
###Output
_____no_output_____
###Markdown
Produce Standard Cube Report
###Code
standard.standard_cube_report()
###Output
_____no_output_____
###Markdown
Clean DataUncomment this if you want to clean the working files out of the directory.
###Code
#clean_data()
###Output
_____no_output_____
###Markdown
Display Weblog
###Code
from IPython.display import IFrame
IFrame(src='./test_tclean_alma_pipeline_weblog.html', width=900, height=600)
###Output
_____no_output_____ |
sig-peak-dia.ipynb | ###Markdown
Jupyter Notebook to analyze significant peaks in MZML file as found in OSW fileThis notebook should work with the following generated files:- Post-processed MZML experiments (total of 162) binned and bound to significant peaks- Merged OSW table outlining significant peaks (QVALUE < 0.01) and total peaks (QVALUE < 1)
###Code
from pathlib import Path
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
BASE_DIR = Path().resolve()
TSV_DIR = BASE_DIR / "data" / "tsv"
MZML_DIR = BASE_DIR / "data" / "mzml"
MZML_MAPPED_TSV_DIR = TSV_DIR / "mapped_mzml_to_osw_qval_01_mz_window_20ppm"
# Significant QVALUE OSW datapath
sig_qvalue_osw = TSV_DIR / "20220127_run_1330_0_sig_qval_null_feature_ftrans_trans_score_ms2.tsv"
# Non-significant QVALUE OSW datapath
nonsig_qvalue_osw = TSV_DIR / "20220127_qval_null_feature_ftrans_trans_score_ms2.tsv"
# Significant 1/162 MZML
# mzml_1_162_sig_qvalue = MZML_MAPPED_TSV_DIR / '20220124_frame=25081_scan=452_qvalue_01.tsv'
# mzml_1_162_sig_qvalue = (
# TSV_DIR
# / "mapped_mzml_to_osw_qval_01_mz_window_20ppm"
# / "merged_20220126_frame=22786_scan=452_qvalue_01.tsv"
# )
mzml_1_162_sig_qvalue = TSV_DIR / "merged_20220128_frame=22786_scan=452_qvalue_01.tsv"
# Load significant MZML 1/162 dataframe
mzml_1_162_df = pd.read_csv(mzml_1_162_sig_qvalue, sep="\t")
# mzml_1_162_df = next(mzml_1_162_df) ## only for chunking
# Load chunk of significant OSW filepath (1,000,000 rows)
# sig_qvalue_osw_df = next(pd.read_csv(sig_qvalue_osw, sep='\t', chunksize=1_000_000))
sig_qvalue_osw_df = pd.read_csv(sig_qvalue_osw, sep="\t").drop(columns=["FEATURE_ID"])
print(mzml_1_162_df.shape, list(mzml_1_162_df))
print(sig_qvalue_osw_df.shape, list(sig_qvalue_osw_df))
merged_df = mzml_1_162_df.merge(sig_qvalue_osw_df, on="TRANSITION_ID")
im_range = 0.05
merged_df["EXP_IM_LEFT"] = merged_df["EXP_IM"] - im_range
merged_df["EXP_IM_RIGHT"] = merged_df["EXP_IM"] + im_range
# sig_qvalue_osw_df.head(10)[['FEATURE_ID', 'TRANSITION_ID']]
mzml_1_162_df.head(10)[["FEATURE_ID", "TRANSITION_ID"]]
rt = mzml_1_162_df["RT"][0]
rt
###Output
_____no_output_____
###Markdown
Attempt to find percentage of significant peaks found by MSExperiment object
###Code
import pyopenms as pms
mzml_filepath = (
MZML_DIR
/ "Rost_DIApy3_SP2um_90min_250ngK562_100nL_1_Slot1-5_1_1330_6-28-2021_0_2400_to_2700_swath_700.mzML"
)
def get_msexperiment_obj(mzml_filepath):
# Read corresponding mzML file and load it into pyopenms object
msexperiment = pms.MSExperiment()
pms.MzMLFile().load(str(mzml_filepath), msexperiment)
return msexperiment
exps = get_msexperiment_obj(mzml_filepath)
from collections import Counter
def get_perc_peaks_found_in_mzml(reduced_df, reduced_df_filename, merged_df, mzml_exps):
# MUST CONVERT FLOAT TO STR WITH ROUNDING TO ALLOW SEARCH OF MZML EXP WITH SIG DATA
def round_(x):
return str(x)[:8]
exp_name_tsv = "frame" + str(reduced_df_filename).split("frame")[-1].split("_qvalue")[0]
for mz_idx, exp in enumerate(mzml_exps):
if exp_name_tsv == exp.getNativeID():
mz, int_ = exp.get_peaks()
rt = exp.getRT()
im = exp.getFloatDataArrays()[0].get_data()[mz_idx]
peak_count = map(round_, mz)
break
peak_count_map = dict(Counter(peak_count))
# Find just sig MZ
found_mz_peaks = set(map(round_, reduced_df["MZ"]))
count = 0
for peak in found_mz_peaks:
count += peak_count_map[peak]
mz_perc = count / len(mz)
# Find sig MZ in RT window
rt_window_df = merged_df[(merged_df["LEFT_WIDTH"] < rt) & (merged_df["RIGHT_WIDTH"] > rt)]
found_mz_rt_peaks = set(map(round_, rt_window_df["MZ"]))
count = 0
for peak in found_mz_rt_peaks:
count += peak_count_map[peak]
mz_rt_perc = count / len(mz)
# Find sig MZ in RT window AND in IM window
rt_im_window_df = rt_window_df[
(rt_window_df["EXP_IM_LEFT"] < im) & (rt_window_df["EXP_IM_RIGHT"] > im)
]
found_mz_rt_im_peaks = set(map(round_, rt_im_window_df["MZ"]))
count = 0
for peak in found_mz_rt_im_peaks:
count += peak_count_map[peak]
mz_rt_im_perc = count / len(mz)
return mz_perc, mz_rt_perc, mz_rt_im_perc, exp.getNativeID()
im_range = 0.05
merged_df["EXP_IM_LEFT"] = merged_df["EXP_IM"] - im_range
merged_df["EXP_IM_RIGHT"] = merged_df["EXP_IM"] + im_range
perc = get_perc_peaks_found_in_mzml(mzml_1_162_df, mzml_1_162_sig_qvalue, merged_df, exps)
perc
def read_chunk_tsv(input_path, chunksize=100_000):
# DON'T USE IF YOU ARE PLANNING TO MERGE TABLES ON A KEY
print(f"Reading {input_path} as TSV chunk")
with pd.read_csv(input_path, sep="\t", chunksize=chunksize) as reader:
yield from reader
def fetch_product_mz(tsv_chunk, mz_range=0.001, ppm=None):
for idx, chunk in enumerate(tsv_chunk):
if ppm is not None and isinstance(ppm, (int, float)):
ppm_range = (mz_to_ppm(mz, ppm) for mz in chunk["PRODUCT_MZ"])
chunk["PRODUCT_MZ_LEFT"], chunk["PRODUCT_MZ_RIGHT"] = zip(*ppm_range)
else:
# Give a small width to the PRODUCT_MZ vals - Do 0.001 Da for now
chunk["PRODUCT_MZ_LEFT"] = chunk["PRODUCT_MZ"] - mz_range
chunk["PRODUCT_MZ_RIGHT"] = chunk["PRODUCT_MZ"] + mz_range
print(f"Reading chunk #{idx}", end="\r")
yield chunk
def mz_to_ppm(mz, ppm):
err_da = mz / (1_000_000 / ppm)
return (mz - err_da, mz + err_da)
def find_peak_in_mzml_to_osw(mzml_exp, osw_df):
# Iterate through MZML experiments to find significant peaks
for exp_idx, exp in enumerate(mzml_exp):
non_found = 0
found = 0
# MZ and intensity arrays
mz_array, int_array = exp.get_peaks()
# Iterate through MZ values in MZML experiment
for mz_idx, mz in enumerate(exp.get_peaks()[0]):
# Find MZ that lie within a MZ threshold as defined in OSW file
filtered_osw_df = osw_df[
(mz > osw_df["PRODUCT_MZ_LEFT"]) & (mz < osw_df["PRODUCT_MZ_RIGHT"])
]
if filtered_osw_df.shape[0] != 0:
# Peak(s) found - record data to output file
found += 1
else:
non_found += 1
# print(f"Found: {found} | Not Found: {non_found}", end="\r")
perc_sig_peaks = round((found / (non_found + found)) * 100, 2)
print(perc_sig_peaks, exp.getNativeID())
df = pd.concat(fetch_product_mz(read_chunk_tsv(sig_qvalue_osw), ppm=20))
find_peak_in_mzml_to_osw(exps, df)
# FETCH SIGNIFICANT PEAKS VIA FUNC
# THERE IS DISCREPANCY BETWEEN WHAT IS REPORTED AS SIG FROM MERGE FUNC AND ALG FUNC - KEEP ALG FUNC
import os
sig_qvalue_osw = TSV_DIR / "20220127_run_1330_0_sig_qval_null_feature_ftrans_trans_score_ms2.tsv"
# sig_qvalue_osw_df = pd.read_csv(sig_qvalue_osw, sep="\t").drop(columns=["FEATURE_ID"])
mzml_perc_list = []
im_range = 0.05
for filepath in os.listdir(MZML_MAPPED_TSV_DIR):
if filepath.endswith(".tsv"):
filepath = MZML_MAPPED_TSV_DIR / filepath
mz_df = pd.read_csv(filepath, sep="\t")
merged_df = mz_df.merge(sig_qvalue_osw_df, on="TRANSITION_ID")
merged_df["EXP_IM_LEFT"] = merged_df["EXP_IM"] - im_range
merged_df["EXP_IM_RIGHT"] = merged_df["EXP_IM"] + im_range
mz_perc, mz_rt_perc, mz_rt_im_perc, exp_id = get_perc_peaks_found_in_mzml(
mz_df, filepath, merged_df, exps
)
print(mz_perc, mz_rt_perc, mz_rt_im_perc, exp_id)
mzml_perc_list.append(tuple([mz_perc, mz_rt_perc, mz_rt_im_perc, exp_id]))
# GET ALL PERC FEATURES AND RT - SORT BY RT - PREP FOR PLOTTING
def get_exp_rt_from_frame_id(exps, frame_id):
for exp in exps:
if exp.getNativeID() == frame_id:
return exp.getRT()
summary = []
for run in mzml_perc_list:
frame_id = run[-1]
run = list(run)
run.append(get_exp_rt_from_frame_id(exps, frame_id))
summary.append(run)
summary = sorted(summary, key=lambda x: x[-1])
mz_perc, mz_rt_perc, mz_rt_im_perc, exp_id, exp_rt = zip(*summary)
import matplotlib.pyplot as plt
# plt.plot(exp_rt, mz_perc, label="mz_perc")
plt.plot(exp_rt, mz_rt_perc, label="mz_rt_perc")
plt.plot(exp_rt, mz_rt_im_perc, label="mz_rt_im_perc")
plt.show()
print(sorted(zip(mz_rt_im_perc, exp_rt)))
x = []
for i in range(len(summary)):
x.append(summary[i][0] - summary[i][1])
plt.plot(exp_rt, x)
plt.show()
cum_json = []
for item in summary:
export_json = {
"rt": "",
"exp_id": "",
"mz_perc_identified": "",
"mz_perc_rt_identified": "",
"mz_perc_rt_im_identified": "",
}
export_json["rt"] = item[-1]
export_json["exp_id"] = item[-2]
export_json["mz_perc_identified"] = item[0]
export_json["mz_perc_rt_identified"] = item[1]
export_json["mz_perc_rt_im_identified"] = item[2]
cum_json.append(export_json)
import json
with open("perc_peaks_identified_1330_0.json", "w") as f:
json.dump(cum_json, f)
# mzml_perc_list_backup = mzml_perc_list.copy()
# Select features within window
window_rt_df = merged_df[(merged_df["LEFT_WIDTH"] <= rt) & (merged_df["RIGHT_WIDTH"] >= rt)]
window_rt_df
list(mzml_1_162_df["PERC_SIG_PEAKS"])[0]
###Output
_____no_output_____
###Markdown
END perc sig peak find
###Code
# Demo block - check which columns to keep
cols = list(merged_df)
# RM from first half: PVALUE, PEP, UNCHARGED_MASS
first_half = cols[: int(len(cols) / 2)]
# RM from second half: DETECTING, IDENTIFYING, QUANTIFYING, DECOY
second_half = cols[int(len(cols) / 2) :]
# merged_df[second_half].head(30)
# Keeping useful columns from merged DF
cols_to_rm = ["PVALUE", "PEP", "UNCHARGED_MASS", "DETECTING", "IDENTIFYING", "QUANTIFYING", "DECOY"]
merged_df_tidy = merged_df.drop(columns=cols_to_rm)
# Drop duplicate rows
merged_df_tidy = merged_df_tidy.drop_duplicates()
print(merged_df_tidy.shape)
# merged_df_tidy.head(30)
# What if we joined 'FEATURE_ID' with 'MZ' to create a unique key and keep unique cols
merged_df_tidy["FEATURE_ID_MZ"] = merged_df_tidy["FEATURE_ID"].astype(str) + merged_df_tidy[
"MZ"
].astype(str)
# This is the number of found m/z
print(len(set(merged_df_tidy["FEATURE_ID_MZ"])))
len(set(merged_df_tidy["FEATURE_ID"]))
merged_df_tidy.dtypes
def unique(arr):
unique_arr = tuple(set(arr))
if len(unique_arr) == 1:
return unique_arr[0]
return unique_arr
# Aggregate data (attempt) using pivot table
# merged_df_test = merged_df_tidy[:1000]
merged_df_pivot = pd.pivot_table(
window_rt_df.reset_index(),
index=["TRANSITION_ID", "MZ"],
aggfunc={
"IM": lambda x: unique(x),
"INTENSITY": lambda x: unique(x),
"QVALUE": lambda x: unique(x),
"NORM_RT": lambda x: unique(x),
"EXP_RT": lambda x: unique(x),
"EXP_IM": lambda x: unique(x),
"FEATURE_ID": lambda x: list(x),
# NOTE: you can use np.unique (more performant) but you CAN run into aggregation error...
},
).reset_index()
# merged_df_pivot['UNIQUE_IM'] = merged_df_pivot['IM'] / 6
# merged_df_pivot['UNIQUE_IM'] = merged_df_pivot['UNIQUE_IM'].astype('int')
# set([len(x) for x in merged_df_pivot['FEATURE_ID']])
merged_df_pivot["FEATURE_ID"] = merged_df_pivot["FEATURE_ID"].apply(unique)
merged_df_pivot
merged_df_pivot[merged_df_pivot["FEATURE_ID"] == -8312574784079845815]
merged_df_tidy[merged_df_tidy["FEATURE_ID"] == -8312574784079845815]
test = pd.pivot_table(
merged_df_pivot,
index=["TRANSITION_ID"],
aggfunc={
"FEATURE_ID": lambda x: unique(x),
"IM": lambda x: unique(x),
"MZ": lambda x: unique(x),
"INTENSITY": lambda x: unique(x),
"QVALUE": lambda x: unique(x),
"NORM_RT": lambda x: unique(x),
"EXP_RT": lambda x: unique(x),
"EXP_IM": lambda x: unique(x),
},
).reset_index()
test
merged_df_pivot.head(15)
# func to compress nested list dynamically
def compress_list(l):
concat = []
for item in l:
if hasattr(item, "__iter__"):
concat.extend(item)
else:
concat.append(item)
return concat
compress_list([[1, 2, 3], [1, 3, 10], 1, 2, [1, 2]])
# import itertools
# pd.pivot_table(
# test,
# index=["FEATURE_ID"],
# aggfunc={'IM': lambda x:list(itertools.chain.from_iterable(x))}
# )
test["IM_FLAT"] = test["IM"].apply(lambda x: sorted(compress_list(x)))
test["IM_COUNT"] = test["IM"].apply(lambda x: len(compress_list(x)))
test["MZ_COUNT"] = test["MZ"].apply(lambda x: len(compress_list(x)))
test["INTENSITY_FLAT"] = test["INTENSITY"].apply(lambda x: compress_list(x))
test["INTENSITY_COUNT"] = test["INTENSITY"].apply(lambda x: len(compress_list(x)))
test.head(30)
# Get ratio of IM per MZ reads PER feature ID
test["IM/MZ"] = test["IM_COUNT"] / test["MZ_COUNT"]
labels, counts = zip(*test["IM/MZ"].value_counts().iteritems())
print(labels, counts)
plt.pie(counts, labels=labels)
plt.show()
# Get MZ reads PER feature ID
labels, counts = zip(*test["MZ_COUNT"].value_counts().iteritems())
print(labels, counts)
plt.pie(counts, labels=labels)
plt.show()
# Percentage of features had one MZ?
print(len(test[test["MZ_COUNT"] == 1]) / len(test["MZ_COUNT"]))
# Get IM reads PER feature ID
labels, counts = zip(*test["IM_COUNT"].value_counts().iteritems())
print(labels, counts)
plt.pie(counts, labels=labels)
plt.show()
mz = merged_df_tidy["MZ"]
plt.hist(mz, bins=100)
plt.title(f"Significant Spectra (N={merged_df_tidy.shape[0]}) from 1/168 MZML Exp.")
plt.xlabel("MZ")
plt.xticks(np.arange(200, max(mz) + 1, 100.0))
plt.ylabel("Count")
plt.show()
# Check if intensity correlates with MZ count
zip_mz_int = zip(merged_df_tidy["MZ"], merged_df_tidy["INTENSITY"])
sorted(list(set(zip_mz_int)), key=lambda x: x[1], reverse=True)[:20]
# CONCLUSION - THOSE PEAKS WITH HIGH INTENSITY ARE PICKED UP AS SIGNIFICANT BY OPENSWATH
# Fetch MZ as written in summarized file
mz_arr_flat = compress_list(test["MZ"])
print(len(mz_arr_flat))
print(len(merged_df_tidy["MZ"]))
np.array_split(merged_df_tidy, 100)
# Double check how many features are assigned to the same MZ spectrum
x = merged_df_tidy[merged_df_tidy["MZ"] > 240.134317]
y = x[x["MZ"] < 240.134319]
y
# ANSWER THE QUESTION :: CAN A PEAK BE ASSIGNED TO MORE THAN ONE FEATURE?
mz_centric = pd.pivot_table(
merged_df_tidy, index=["MZ", "FEATURE_ID"], aggfunc={"FEATURE_ID": np.unique, "MZ": np.unique}
).rename(columns={"FEATURE_ID": "ID", "MZ": "_MZ"})
mz_centric
# mz_centric_2 = pd.pivot_table(
# mz_centric,
# index=['MZ'],
# aggfunc={'ID': lambda x:list(x)}
# ).rename(columns={'ID': 'FEATURE_ID'})
# mz_centric_2['FI_COUNT'] = mz_centric_2['FEATURE_ID'].apply(lambda x: len(compress_list(x)))
# mz_centric_2['MZ'] = mz_centric_2.index
# mz_centric_2
# plt.bar(mz_centric_2['MZ'], mz_centric_2['FI_COUNT'])
# plt.show()
## Get Features per MZ
# labels, counts = zip(*mz_centric_2['FI_COUNT'].value_counts().iteritems())
# print(labels, counts)
# plt.pie(counts, labels = labels)
# plt.show()
len(mz_centric_2[mz_centric_2["FI_COUNT"] == 1]) / mz_centric_2.shape[0]
merged_df_tidy[merged_df_tidy["FEATURE_ID"] == 7516195912228066749]["PRODUCT_MZ"]
# Is there a correlation with intensity and QVALUE?
merged_df_tidy["EXP_RT"].corr(merged_df_tidy["MZ"])
from collections import Counter
x = Counter(merged_df_tidy["MZ"])
dict(sorted(x.items(), key=lambda item: item[1], reverse=True))
pd.pivot_table(merged_df_pivot, index=["ID"], aggfunc={"_MZ": len, "IM": np.size})
print(merged_df_tidy[merged_df_tidy["FEATURE_ID"] == -9223234016384753589])
# Check how many peaks area associated with features
# features = set(merged_df_tidy['FEATURE_ID'])
# for feature in features:
# print(feature, '---', len(merged_df[merged_df['FEATURE_ID'] == feature]))
mzml_1_162_df
print(sig_qvalue_osw_df["PRODUCT_MZ"][0], mzml_1_162_df["MZ"][0])
one_feature = list(set(merged_df["FEATURE_ID"]))[15]
set(merged_df[merged_df["FEATURE_ID"] == one_feature]["IM"])
# # Print IM's found per feature identified
# for feature in list(set(merged_df['FEATURE_ID'])):
# print(set(merged_df[merged_df['FEATURE_ID'] == feature]['IM']))
select_cols = [
"FEATURE_ID",
"MZ",
"INTENSITY",
"TIC",
"IM",
"MS_LEVEL",
"RT",
"UNCHARGED_MASS",
"PERC_SIG_PEAKS",
"QVALUE",
"TRANSITION_ID",
"AREA_INTENSITY",
"TOTAL_AREA_INTENSITY",
"APEX_INTENSITY",
"PRECURSOR_ID",
"EXP_RT",
"EXP_IM",
"NORM_RT",
"DELTA_RT",
"LEFT_WIDTH",
"RIGHT_WIDTH",
"PRODUCT_MZ",
"CHARGE",
]
# merged_df[select_cols].head(50)
# 99.9983% of rows are unique.
print(merged_df.shape)
print(merged_df.drop_duplicates().shape)
merged_df[:3000].to_csv("test.csv")
# Drop cols: DETECTING, IDENTIFYING, QUANTIFYING, DECOY
print(merged_df.shape)
for col in list(merged_df):
print(col, len(set(merged_df[col])))
filepath = TSV_DIR / "20220201_frame=25523_scan=452_agg_data_qval_01.tsv"
agg_data = pd.read_csv(filepath, sep="\t")
agg_data.head(5)
from ast import literal_eval
def f(x):
try:
return literal_eval(x)
except:
return x
agg_data["TRANSITION_ID_UNIQUE"] = agg_data["TRANSITION_ID_UNIQUE"].apply(f)
print(len(set(agg_data["FEATURE_ID"])))
print(len(set(agg_data.explode("TRANSITION_ID_UNIQUE")["FEATURE_ID"])))
exploded = agg_data.explode("TRANSITION_ID_UNIQUE", ignore_index=True)
# HOW MANY FEATURES ARE BOUND TO A TRANSITION? MAJORITY ARE ONE
from collections import Counter
transition_counts_map = dict(Counter(list(exploded["TRANSITION_ID_UNIQUE"])))
transition_counts = list(transition_counts_map.values())
# Sort by key
transition_counts_counter = dict(
sorted(dict(Counter(transition_counts)).items(), key=lambda item: item[0])
)
labels = list(transition_counts_counter.keys())
counts = list(transition_counts_counter.values())
perc_1 = f"{round(transition_counts_counter[1] / len(transition_counts) * 100, 1)}%"
perc_2 = f"{round(transition_counts_counter[2] / len(transition_counts) * 100, 1)}%"
perc_3 = f"{round(transition_counts_counter[3] / len(transition_counts) * 100, 1)}%"
fig = plt.figure()
fig.set_size_inches(5, 5)
plt.pie(counts, labels=[perc_1, perc_2, perc_3, "", ""])
plt.title("No. transitions per feature ID")
plt.legend(labels=labels, bbox_to_anchor=(1, 1), loc=2, borderaxespad=0.0)
fig.tight_layout()
plt.show()
x = dict(Counter(transition_counts))
dict(sorted(x.items(), key=lambda item: item[0]))
mzml_osw_qval_001_mz_window_001_dir = TSV_DIR / "mapped_mzml_to_osw_qval_01_mz_window_20ppm"
x = pd.read_csv(
mzml_osw_qval_001_mz_window_001_dir / "merged_20220127_frame=22786_scan=452_qvalue_01.tsv",
sep="\t",
)
exploded.head(15)
min(exploded["TRANSITION_ID_UNIQUE"])
agg_data["MZ"] = agg_data["MZ"].apply(f)
mz_explode = agg_data.explode("MZ", ignore_index=True)
plt.hist(mz_explode["MZ"], bins=300)
plt.show()
agg_data["EXP_RT"] = agg_data["EXP_RT"].apply(f)
mz_explode = agg_data.explode("EXP_RT", ignore_index=True)
plt.hist(mz_explode["EXP_RT"], bins=300)
plt.show()
agg_data["IM_FLAT"] = agg_data["IM_FLAT"].apply(f)
mz_explode = agg_data.explode("IM_FLAT", ignore_index=True)
plt.hist(mz_explode["IM_FLAT"], bins=142)
plt.show()
agg_data["INTENSITY_FLAT"] = agg_data["INTENSITY_FLAT"].apply(f)
mz_explode = agg_data.explode("INTENSITY_FLAT", ignore_index=True)
intensities = [item for item in mz_explode["INTENSITY_FLAT"] if item < 150 and item > 11]
plt.hist(intensities, bins=138)
plt.show()
# sorted(mz_explode["INTENSITY_FLAT"])[:-20000]
list(agg_data)
###Output
_____no_output_____ |
scikit-learn/More-Scikit-Learn/Untitled.ipynb | ###Markdown
Loading a Skicit-Learn dataset
###Code
# loading
from sklearn import datasets
# get digits dataset
digits = datasets.load_digits()
# Create a feature matrix
X = digits.data
# create a target vector
y = digits.target
# View the feature matrix
X[0]
###Output
_____no_output_____ |
notebooks/bigquery:vegvar.standardized.maalestasjoner.ipynb | ###Markdown
Denne spørringen regner ut gjennomsnittlig, minimum- og maksimumtemperatur per målestasjon og dato, fra 1. til 7. januar 2022.Merk at den bruker både `maaledata` og `maalestasjoner`.
###Code
query = f"""
SELECT
navn,
DATE(md.maaletidspunkt, "Europe/Oslo") dato,
ROUND(AVG(lufttemperaturCelsius), 1) gjennomsnittLufttemperaturCelsius,
MIN(lufttemperaturCelsius) minimumLufttemperaturCelsius,
MAX(lufttemperaturCelsius) maksimumLufttemperaturCelsius
FROM `{project}.standardized.maaledata` md
JOIN `{project}.standardized.maalestasjoner` ms
ON ms.id = md.maalestasjonId
AND ms.versjon = md.maalestasjonVersjon
WHERE DATE(md.maaletidspunkt, "Europe/Oslo") BETWEEN "2022-01-01" AND "2022-01-07"
GROUP BY navn, dato
ORDER BY navn, dato
LIMIT 30
"""
print(query)
client.query(query).to_dataframe()
###Output
_____no_output_____
###Markdown
Denne spørringen viser navn og koordinater for de 10 varmeste målestasjonene 1. januar 2022.Merk at den bruker både `maaledata` og `maalestasjoner`.
###Code
query = f"""
SELECT
ANY_VALUE(navn) navn,
ANY_VALUE(lokasjon) lokasjon,
MAX(lufttemperaturCelsius) maksTemperatur,
DATE(maaletidspunkt, "Europe/Oslo") dato
FROM `{project}.standardized.maaledata` md
JOIN `{project}.standardized.maalestasjoner` ms
ON md.maalestasjonId = ms.id AND md.maalestasjonVersjon = ms.versjon
WHERE DATE(maaletidspunkt, "Europe/Oslo") = "2022-01-01"
GROUP BY ms.id, ms.versjon, dato
ORDER BY maksTemperatur DESC
LIMIT 10
"""
print(query)
client.query(query).to_dataframe()
###Output
_____no_output_____
###Markdown
Denne spørringen henter id, versjon, navn og lokasjon for siste versjon av alle målestasjoner.
###Code
query = f"""
SELECT * EXCEPT (row_number) FROM (
SELECT id, versjon, navn, lokasjon, row_number() OVER (
PARTITION BY id
ORDER BY CAST(versjon AS INT) DESC
) row_number
FROM `{project}.standardized.maalestasjoner`
)
WHERE row_number = 1
"""
print(query)
client.query(query).to_dataframe()
###Output
_____no_output_____
###Markdown
Denne spørringen henter alle målestasjoner som har måledata fra det siste døgnet, sortert på målestasjonens navn.
###Code
query = f"""
SELECT id, versjon, ANY_VALUE(navn) navn, ANY_VALUE(lokasjon) lokasjon
FROM `{project}.standardized.maalestasjoner` ms
JOIN `{project}.standardized.maaledata` md
ON id = maalestasjonId AND versjon = maalestasjonVersjon
WHERE md.maaletidspunkt >= TIMESTAMP_SUB(CURRENT_TIMESTAMP(), INTERVAL 24 HOUR)
GROUP BY id, versjon
ORDER BY navn
"""
print(query)
client.query(query).to_dataframe()
###Output
_____no_output_____ |
100days/day 39 - 4sum.ipynb | ###Markdown
algorithm
###Code
def sum4(data):
# store 2-sums
sum_of_2 = defaultdict(list)
for i, j in combinations(range(len(data)), 2):
k = data[i] + data[j]
sum_of_2[k].append((i, j))
# match pairs of 2-sums
sum_of_4 = set()
for k in sum_of_2:
if k >= 0 and -k in sum_of_2:
for i, j in product(sum_of_2[k], sum_of_2[-k]):
index = tuple(sorted(set(i + j)))
if len(index) == 4:
sum_of_4.add(index)
return sum_of_4
###Output
_____no_output_____
###Markdown
run
###Code
n = 10
data = np.random.randint(-n, n, n)
data
for index in sum4(data):
print(index, data[list(index)])
###Output
(0, 1, 4, 8) [ 5 1 4 -10]
(1, 2, 6, 7) [ 1 -9 7 1]
(1, 3, 4, 7) [ 1 -6 4 1]
(0, 3, 6, 9) [ 5 -6 7 -6]
(0, 4, 7, 8) [ 5 4 1 -10]
(1, 4, 7, 9) [ 1 4 1 -6]
|
_notebooks/2021-09-03-hotel_booking_cancel.ipynb | ###Markdown
호텔 예약이 취소되는 원인이 무엇인지 찾아봅시다. 이 분석은 Dowhy 라이브러리 사이트의 Case Study내용을 발췌하였으며, Antonio, Almeida, Nunes(2019)의 호텔 예약 데이터 셋을 사용합니다. 데이터는 github의 rfordatascience/tidytuseday 에서 구할 수 있습니다. 호텔 예약이 취소되는 이유는 여러가지가 있을 수 있습니다. 예를 들어, - 1. 고객이 호텔이 제공하기 어려운 요청을 하고(ex. 호텔의 주차공간이 부족하고, 고객은 주차공간을 요청), 요청을 거절받은 고객이 예약을 취소할 수 있고, 혹은 - 2. 고객이 여행 계획을 취소했기 때문에 호텔예약을 취소했을 수 있습니다. 1번과 같은 경우는, 호텔에서 추가 조치(다른 시설의 주차공간을 확보)를 취할 수 있는 반면, 2번과 같은 경우는 호텔이 취할 수 있는 조치가 없습니다.어찌됐든, 우리는 예약취소를 유발하는 원인들을 보다 더 자세히 이해하는 것이 목표입니다.이를 발견하는 가장 좋은 방법은 RCT(Randomized Control Trail)와 같은 실험을 하는 것입니다. 주차 공간 제공이 호텔 예약 취소에 미치는 정량적 영향도를 알아보겠다면, 고객을 두 개의 범주로 나눠, 한 그룹에는 주차공간을 할당하고, 나머지 한 그룹에는 주차공간을 할당하지 않습니다. 그리고 각 그룹 간 호텔 예약 취소율을 비교하면 됩니다. 물론, 저런 실험이 소문나면 호텔 장사는 다했다고 봐야죠.과거 데이터와 가설만 있는 상황에서, 우리는 어떻게 답을 찾아야 할까요?
###Code
%reload_ext autoreload
%autoreload 2
# Config dict to set the logging level
import logging.config
DEFAULT_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'loggers': {
'': {
'level': 'INFO',
},
}
}
logging.config.dictConfig(DEFAULT_LOGGING)
# Disabling warnings output
import warnings
# !pip install sklearn
from sklearn.exceptions import DataConversionWarning, ConvergenceWarning
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
warnings.filterwarnings(action='ignore', category=ConvergenceWarning)
warnings.filterwarnings(action='ignore', category=UserWarning)
# !pip install dowhy
import dowhy
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# pd.options.plotting.backend = 'plotly'
dataset = pd.read_csv('https://raw.githubusercontent.com/Sid-darthvader/DoWhy-The-Causal-Story-Behind-Hotel-Booking-Cancellations/master/hotel_bookings.csv')
# dataset = pd.read_csv('hotel_bookings.csv')
dataset.head()
dataset.columns
dataset[['is_canceled']].plot()
###Output
_____no_output_____
###Markdown
Feature Engineering이제 차원수를 줄이기 위해 의미있는 Feature들을 만들어봅시다.**Total Stay** = **stays_in_weekend_nights** + **stays_in_weekend_nights** **Guests** = **adults** + **children** + **babies** **Different_room_assigned** = 예약과 다른 룸을 받았다면 1 아니라면 0
###Code
# Total stay in nights
dataset['total_stay'] = dataset['stays_in_week_nights']+dataset['stays_in_weekend_nights']
# Total number of guests
dataset['guests'] = dataset['adults']+dataset['children'] +dataset['babies']
# Creating the different_room_assigned feature
dataset['different_room_assigned']=0
slice_indices = dataset['reserved_room_type']!=dataset['assigned_room_type']
dataset.loc[slice_indices,'different_room_assigned']=1
# Deleting older features
dataset = dataset.drop(['stays_in_week_nights','stays_in_weekend_nights','adults','children','babies'
,'reserved_room_type','assigned_room_type'],axis=1)
###Output
_____no_output_____
###Markdown
결측치가 많거나 Unique value가 많은 컬럼은 본 분석에서는 사용될 일이 적으니, 삭제를 하겠습니다. 그리고 Country의 경우는, 가장 빈도가 높은 나라를 결측치에 대입하겠습니다. **distribution_channel** 도 **market_segemnt** 컬럼과 많이 중복되니 삭제를 하도록 하겠습니다.
###Code
dataset.isnull().sum() # Country,Agent,Company contain 488,16340,112593 missing entries
dataset = dataset.drop(['agent','company'],axis=1)
# Replacing missing countries with most freqently occuring countries
dataset['country']= dataset['country'].fillna(dataset['country'].mode()[0])
dataset = dataset.drop(['reservation_status','reservation_status_date','arrival_date_day_of_month'],axis=1)
dataset = dataset.drop(['arrival_date_year'],axis=1)
dataset = dataset.drop(['distribution_channel'], axis=1)
# Replacing 1 by True and 0 by False for the experiment and outcome variables
dataset['different_room_assigned']= dataset['different_room_assigned'].replace(1,True)
dataset['different_room_assigned']= dataset['different_room_assigned'].replace(0,False)
dataset['is_canceled']= dataset['is_canceled'].replace(1,True)
dataset['is_canceled']= dataset['is_canceled'].replace(0,False)
dataset.dropna(inplace=True)
print(dataset.columns)
dataset.iloc[:, 5:20].head(100)
dataset = dataset[dataset.deposit_type=="No Deposit"]
dataset.groupby(['deposit_type','is_canceled']).count()
dataset_copy = dataset.copy(deep=True)
###Output
_____no_output_____
###Markdown
Calculating Expected Count 가설을 하나 세워봅시다. - *고객은 예약과 다른 방을 배정받으면, 예약을 취소한다.* 위의 가설에 해당하는 그룹과 그렇지 않은 그룹으로 데이터를 구분 후 *가설에 해당되는 그룹의 인원*을 계산해볼 수 있겠습니다.**is_cancled**와 **different_room_assigned**가 매우 Imbalance하기 때문에,(different_room_assigned=0: 104,469개, different_room_assigned=1: 14,917개) 1,000개의 관측치를 랜덤으로 샘플링 후, - **different_room_assigned**변수와 **is_cancled**변수가 같은 값을 가지는 경우가 얼마나 있었는지 (i.e. case 1. "예약과 다른 방이 배정" & "예약 취소" 인 경우와 case 2. "예약과 동일한 방이 배정" & "예약 유지"인 경우가 얼마나 있었는지) 확인합니다.그리고 이 프로세스(샘플링하고 갯수세기)를 10,000번 반복하면, *가설에 해당되는 그룹 인원* 기댓값를 계산할 수 있겠네요계산해보면, *가설에 해당되는 그룹*의 Expected Count는 거의 50%에 가깝습니다. (i.e. 두 변수가 무작위로 동일한 값을 얻을 확률) 풀어서 설명하면, 임의의 고객에게 예약한 방과 다른 방을 배정하면, 예약을 취소할 수도 있고 취소하지 않을 수도 있습니다. 따라서, 통계적으로는 이 단계에서는 명확한 결론이 없습니다.
###Code
counts_sum=0
for i in range(1,10000):
counts_i = 0
rdf = dataset.sample(1000)
counts_i = rdf[rdf["is_canceled"]== rdf["different_room_assigned"]].shape[0]
# counts_i = rdf.loc[(rdf["is_canceled"]==1)&(rdf["different_room_assigned"]==1)].shape[0]
counts_sum+= counts_i
counts_sum/10000
###Output
_____no_output_____
###Markdown
이제 예약변경 횟수가 0인 집단 중 *가설에 해당되는 그룹*의 Expected Count를 확인하겠습니다.
###Code
# Expected Count when there are no booking changes
counts_sum=0
for i in range(1,10000):
counts_i = 0
rdf = dataset[dataset["booking_changes"]==0].sample(1000)
counts_i = rdf[rdf["is_canceled"]== rdf["different_room_assigned"]].shape[0]
counts_sum+= counts_i
counts_sum/10000
###Output
_____no_output_____
###Markdown
두 번째 케이스로, 예약변경이 1회 이상인 집단 중 *가설에 해당되는 그룹*의 Expected Count를 확인하겠습니다.
###Code
# Expected Count when there are booking changes = 66.4%
counts_sum=0
for i in range(1,10000):
counts_i = 0
rdf = dataset[dataset["booking_changes"]>0].sample(1000)
counts_i = rdf[rdf["is_canceled"]== rdf["different_room_assigned"]].shape[0]
counts_sum+= counts_i
counts_sum/10000
###Output
_____no_output_____
###Markdown
예약 변경횟수가 1보다 큰 경우의 Expected Count(약 600)가 예약 변경횟수가 0인 경우(약 500)보다 훨씬 큰 것을 확인할 수 있습니다. 우리는 여기서 **Booking Changes** 컬럼이 Confounding variable(교란변수, X와 Y 양쪽에 영향을 미쳐 X, Y 간 인과관계의 크기를 왜곡함)임을 알 수 있습니다. 하지만, **Booking Changes**가 유일한 Confounding Variable일까요? 만약 컬럼들 중, 우리가 확인하지 못한 Confounding Variable이 있다면, 우리는 이전과 같은 주장을 할 수 있을까요? Step-1. Create a Causal Graph 예측 모델과 관련된 사전 지식들을 Causal Inference Graph로 먼저 표현해봅시다. 전체 그래프를 다 그릴 필요는 없으니 큰 걱정은 안하셔도 됩니다.Causal Inference Graph로 표현할 가정들은 아래와 같습니다. * **Market Segment** 컬럼은 2개의 값을 가지고 있음. **TA**는 Travel Agent를 의미하고, **TO**는 Tour Operator임. Market Segment는 LeadTime(예약시점부터 체크인할 때까지의 시간)에 영향을 미칠 것임 TA, TO 상세 내용은 링크 참조: https://www.tenontours.com/the-difference-between-tour-operators-and-travel-agents/ * **Country**는 고객이 예약을 일찍할지 늦게할지와 고객이 어떤 식사를 좋아할지 판단하는데 도움이 될 것임. * **LeadTime**은 **Days in Waitlist**의 크기에 영향을 미칠 것임(예약을 늦게 한다면 남아있는 방이 적겠죠?) * **Days in Waitlist**, **Total Stay in nights**, **Guest**의 크기는 예약이 취소될지 유지될지에 영향을 미칠 겁니다 (손님이 많고 숙박할 날짜가 길다면 다른 호텔을 구하기 쉽지 않겠죠)* **Previous Booking Retentions**는 고객이 Repeated Guest인지 아닌지에 영향을 미칠겁니다. 그리고, 이 두 변수는 예약이 취소될지 아닐지에도 영향을 줄겁니다. (예를들어, 이전에 여러 번 예약을 유지한 고객은 다음번에도 예약을 유지할 가능성이 크고, 예약을 자주 취소했던 고객은 다음번에도 예약을 취소할 가능성이 크겠죠)* **Booking Changes** 는 (앞에서 보셨다시피) 고객이 different room할지말지(=예약과 다른 방에 배정될지 아닐지)와 예약취소에도 영향을 미칠 겁니다.* 마지막으로 **Booking Changes**가 우리가 알고자 하는 원인인자변수(다른 방을 배정)를 교란하는 *유일한 변수*일 개연성은 작습니다.(경험적으로)
###Code
import pygraphviz
causal_graph = """digraph {
different_room_assigned[label="Different Room Assigned"];
is_canceled[label="Booking Cancelled"];
booking_changes[label="Booking Changes"];
previous_bookings_not_canceled[label="Previous Booking Retentions"];
days_in_waiting_list[label="Days in Waitlist"];
lead_time[label="Lead Time"];
market_segment[label="Market Segment"];
country[label="Country"];
U[label="Unobserved Confounders"];
is_repeated_guest;
total_stay;
guests;
meal;
hotel;
U->different_room_assigned; U->is_canceled;U->required_car_parking_spaces;
market_segment -> lead_time;
lead_time->is_canceled; country -> lead_time;
different_room_assigned -> is_canceled;
country->meal;
lead_time -> days_in_waiting_list;
days_in_waiting_list ->is_canceled;
previous_bookings_not_canceled -> is_canceled;
previous_bookings_not_canceled -> is_repeated_guest;
is_repeated_guest -> is_canceled;
total_stay -> is_canceled;
guests -> is_canceled;
booking_changes -> different_room_assigned; booking_changes -> is_canceled;
hotel -> is_canceled;
required_car_parking_spaces -> is_canceled;
total_of_special_requests -> is_canceled;
country->{hotel, required_car_parking_spaces,total_of_special_requests,is_canceled};
market_segment->{hotel, required_car_parking_spaces,total_of_special_requests,is_canceled};
}"""
###Output
_____no_output_____
###Markdown
Treatment는 고객이 예약을 할 때 선택한 방을 배정받았는지(**different_room_assigned**)입니다. Outcome은 예약이 취소될지 아닐지(**is_cancled**) 입니다.Common Cause는 Treatment와 Outcome 둘 모두에 영향을 미치는 변수입니다. **Booking Changes**와 **Unobserved Confounders**(우리가 확인하지 못한 교란변수) 2개가 Common Cause에 해당됩니다. 만약 우리가 그래프를 명시적으로 지정하지 않는다면(추천하지 않습니다!), 교란변수들은 파라메터로 사용됩니다.
###Code
model= dowhy.CausalModel(
data = dataset,
graph=causal_graph.replace("\n", " "),
treatment='different_room_assigned',
outcome='is_canceled')
model.view_model()
from IPython.display import Image, display
display(Image(filename="causal_model.png"))
###Output
_____no_output_____
###Markdown
Step2. Identify the Causal EffectTreatment변수의 변화가 Outcome변수의 변화만 이끌어낸다면 우리는 Treatment변수가 Outcome변수에 영향을 끼쳤다고 말할수 있습니다. 이번 step에서는 영향인자를 식별해보도록 하겠습니다.
###Code
import statsmodels
model= dowhy.CausalModel(
data = dataset,
graph=causal_graph.replace("\n", " "),
treatment="different_room_assigned",
outcome='is_canceled')
#Identify the causal effect
identified_estimand = model.identify_effect(proceed_when_unidentifiable=True)
print(identified_estimand)
###Output
Estimand type: nonparametric-ate
### Estimand : 1
Estimand name: backdoor
Estimand expression:
d
──────────────────────────(Expectation(is_canceled|hotel,days_in_waiting_list,
d[different_room_assigned]
booking_changes,market_segment,previous_bookings_not_canceled,meal,guests,coun
try,total_of_special_requests,required_car_parking_spaces,lead_time,is_repeate
d_guest,total_stay))
Estimand assumption 1, Unconfoundedness: If U→{different_room_assigned} and U→is_canceled then P(is_canceled|different_room_assigned,hotel,days_in_waiting_list,booking_changes,market_segment,previous_bookings_not_canceled,meal,guests,country,total_of_special_requests,required_car_parking_spaces,lead_time,is_repeated_guest,total_stay,U) = P(is_canceled|different_room_assigned,hotel,days_in_waiting_list,booking_changes,market_segment,previous_bookings_not_canceled,meal,guests,country,total_of_special_requests,required_car_parking_spaces,lead_time,is_repeated_guest,total_stay)
### Estimand : 2
Estimand name: iv
No such variable found!
### Estimand : 3
Estimand name: frontdoor
No such variable found!
###Markdown
Step3. Estimate the identified estimand
###Code
estimate = model.estimate_effect(identified_estimand,
method_name="backdoor.propensity_score_stratification",target_units="ate")
# ATE = Average Treatment Effect
# ATT = Average Treatment Effect on Treated (i.e. those who were assigned a different room)
# ATC = Average Treatment Effect on Control (i.e. those who were not assigned a different room)
print(estimate)
###Output
*** Causal Estimate ***
## Identified estimand
Estimand type: nonparametric-ate
### Estimand : 1
Estimand name: backdoor
Estimand expression:
d
──────────────────────────(Expectation(is_canceled|hotel,days_in_waiting_list,
d[different_room_assigned]
booking_changes,market_segment,previous_bookings_not_canceled,meal,guests,coun
try,total_of_special_requests,required_car_parking_spaces,lead_time,is_repeate
d_guest,total_stay))
Estimand assumption 1, Unconfoundedness: If U→{different_room_assigned} and U→is_canceled then P(is_canceled|different_room_assigned,hotel,days_in_waiting_list,booking_changes,market_segment,previous_bookings_not_canceled,meal,guests,country,total_of_special_requests,required_car_parking_spaces,lead_time,is_repeated_guest,total_stay,U) = P(is_canceled|different_room_assigned,hotel,days_in_waiting_list,booking_changes,market_segment,previous_bookings_not_canceled,meal,guests,country,total_of_special_requests,required_car_parking_spaces,lead_time,is_repeated_guest,total_stay)
## Realized estimand
b: is_canceled~different_room_assigned+hotel+days_in_waiting_list+booking_changes+market_segment+previous_bookings_not_canceled+meal+guests+country+total_of_special_requests+required_car_parking_spaces+lead_time+is_repeated_guest+total_stay
Target units: ate
## Estimate
Mean value: -0.2509265086102207
###Markdown
상당히 재밌는 결과가 나왔습니다. 라이브러리가 계산한 영향도를 보면, 예약과 다른 방이 배정됐을 때(**different_room_assign** = 1) , 예약이 취소될 가능성이 더 적을 것이라고 하네요. 여기서 한번 더 생각을 해보면...이게 올바른 Causal Effect가 맞는 걸까요?? 예약된 객실을 사용할 수 없고, 다른 객실이 배정하는 것이 고객에게 긍정적인 영향을 미칠 수 있을까요? 다른 매커니즘이 있을 수도 있습니다. 예약과 다른 객실을 배정하는 건 체크인 할 때만 발생하고, 이는 고객이 이미 호텔에 도착했다는 뜻이니, 예약을 취소할 가능성이 낮다고 볼 수 있을 것 같네요. 이 매커니즘이 맞다면, 우리가 가정한 그래프에는 예약과 다른 객실이 "언제" 발생하는 지에 대한 정보가 없습니다. 예약과 다른 객실이 배정되는 이벤트가 "언제" 발생하는 지 알 수 있다면, 분석을 개선하는데 도움이 될 수 있을 겁니다. 앞서 연관 분석에서 is_cancled와 different_room_assign 사이에 양의 상관관계가 있음을 확인 했지만, DoWhy라이브러리를 이용해 인과관계를 추정하면 다른 결과가 나옵니다. 이는 호텔이 "예약과 다른 객실을 배정하는 행위"의 횟수를 줄이는 결정이 호텔에게 비생산적일 수 있음을 의미합니다. Step4. Refute result인과 자체는 데이터 자체에서 나오는 것이 아닙니다. 데이터는 단순히 통계적 추정에 사용됩니다. 다시말해 우리의 가설("고객은 예약과 다른 방을 배정받으면, 예약을 취소한다.")이 옳은지 여부를 확인하는 것이 중요합니다.만약 또다른 common cause가 있다면 어떻게 될까요? Treatment 변수의 영향도가 플라시보 효과에 의한 거라면 어떻게 될까요? Method-1 **Random Common Cause:** 무작위로 추출한 공변량을 데이터에 추가하고, 분석을 다시 실행하여, 인과 추정치(estimand effect)가 변하는지 여부를 확인합니다.우리의 가정이 옳았다면, 인과추정치(estimand effect)가 크게 변하지 않아야 합니다.
###Code
refute1_results=model.refute_estimate(identified_estimand, estimate,
method_name="random_common_cause")
print(refute1_results)
###Output
Refute: Add a Random Common Cause
Estimated effect:-0.2509265086102207
New effect:-0.24891037769504973
###Markdown
Method-2 **Placebo Treatment Refuter:** 임의의 공변량을 Treatment변수에 할당하고 분석을 다시 실행합니다. 우리의 가정이 옳았다면, 새로운 추정치는 0이 되어야 합니다.
###Code
refute2_results=model.refute_estimate(identified_estimand, estimate,
method_name="placebo_treatment_refuter")
print(refute2_results)
###Output
Refute: Use a Placebo Treatment
Estimated effect:-0.2509265086102207
New effect:0.0003681316319065167
p value:0.43
###Markdown
Method-3 **Data Subset Refuter:** 데이터 부분집합을 생성하고(cross-validation하듯이), 부분집합 별로 분석을 수행하면서, 추정치가 얼마나 변하는 지 확인합니다.우리의 가정이 옳았다면, 추정치는 크게 변하지 않아야 합니다.
###Code
refute3_results=model.refute_estimate(identified_estimand, estimate,
method_name="data_subset_refuter")
print(refute3_results)
###Output
Refute: Use a subset of data
Estimated effect:-0.2509265086102207
New effect:-0.24911870944918946
p value:0.19
|
Project1/notebooks/simulation.ipynb | ###Markdown
Elliptical Trap Simulation
###Code
import sys
sys.path.append("../analysis/")
import numpy as np
import matplotlib.pyplot as plt
import analysis as src
from multiprocessing import Process, Queue
import pandas as pd
import time
from tqdm import tqdm
#plt.gcf().subplots_adjust(bottom=0.15)
plt.style.use("../lib/rapport.mplstyle")
%load_ext autoreload
%autoreload 2
def saveto(fig, path):
lgd = fig.legend(loc='lower left',# mode='expand',-
ncol=2,
bbox_to_anchor=(0.1, 1.02, 1, 0.2))
fig.savefig(f"../figures/{path}.pdf", bbox_inches='tight')
###Output
The autoreload extension is already loaded. To reload it, use:
%reload_ext autoreload
###Markdown
10 Particles
###Code
conf = src.config()
cutoff = 500
conf["directory"] = "elliptical_10_interacting"
conf["threads"] = 4
conf["numPart"] = 10
conf["numDim"] = 3
conf["numSteps"] = 2**20 + cutoff
conf["stepLength"] = 0.5
conf["importanceSampling"] = 1
conf["alpha"] = 0.5
conf["a"] = 0.0043
conf["InitialState"] = "HardshellInitial"
conf["Wavefunction"] = "EllipticalHardshellWavefunction"
conf["Hamiltonian"] = "EllipticalOscillator"
###Output
_____no_output_____
###Markdown
Gradient decent
###Code
mu = 0.01
for i in range(5):
src.runner(conf)
localEnergies, _, psiGrad, acceptanceRate = src.readData(conf)
gradient = src.calculateGradient(localEnergies, psiGrad)
conf["alpha"] -= mu*gradient
print(f"gradient: {gradient:.5f}. alpha: {conf['alpha']:.5f}. acceptance rate: {acceptanceRate[0]:.5f}.")
###Output
gradient: 0.25912. alpha: 0.49741. acceptance rate: 0.62735.
gradient: -0.00247. alpha: 0.49743. acceptance rate: 0.62978.
gradient: 0.00085. alpha: 0.49743. acceptance rate: 0.62976.
gradient: -0.00100. alpha: 0.49744. acceptance rate: 0.62977.
gradient: 0.00098. alpha: 0.49743. acceptance rate: 0.62976.
###Markdown
Using optimal alpha
###Code
conf["numSteps"] = 2**20 + cutoff
conf["alpha"] = 0.49752
src.runner(conf, verbose = True)
#3localEnergies, _, psiGrad, acceptanceRate = src.readData(conf, cutoff, readPos = False)
#localEnergies = np.concatenate(localEnergies)
bins = np.linspace(0, 3, 200)
densityInteracting = src.densityParallel(conf, bins)/conf["numSteps"]
conf["directory"] = "elliptical_10_noninteracting"
conf["a"] = 0
src.runner(conf, verbose = True)
bins = np.linspace(0, 3, 200)
densityNonInteracting = src.densityParallel(conf, bins)/conf["numSteps"]
###Output
100%|██████████| 10490760/10490760 [00:12<00:00, 836497.52it/s]
93%|█████████▎| 9807413/10490760 [00:12<00:00, 729700.87it/s]]
100%|██████████| 10490760/10490760 [00:12<00:00, 826853.98it/s]
100%|██████████| 10490760/10490760 [00:12<00:00, 815504.06it/s]
100%|██████████| 10490760/10490760 [00:12<00:00, 812735.23it/s]
100%|██████████| 10490760/10490760 [00:13<00:00, 799394.90it/s]
100%|██████████| 10490760/10490760 [00:13<00:00, 794229.90it/s]
100%|██████████| 10490760/10490760 [00:13<00:00, 790797.40it/s]
100%|██████████| 10490760/10490760 [00:13<00:00, 799509.52it/s]
100%|██████████| 10490760/10490760 [00:13<00:00, 784127.73it/s]
100%|██████████| 10490760/10490760 [00:13<00:00, 787103.65it/s]
100%|██████████| 10490760/10490760 [00:13<00:00, 782260.81it/s]
###Markdown
Estimation of energy and uncertainty
###Code
E = np.mean(localEnergies)
Var = src.blocking(localEnergies, 18)
plt.plot(Var)
plt.show()
print(f"<E> = {E} +- {np.sqrt(Var[9])}")
###Output
<E> = 24.398477790886265 +- 0.0011886200428013818
###Markdown
Radial onebody density
###Code
fig = plt.figure()
plt.plot(bins, densityNonInteracting, label="Non-Interacting")
plt.plot(bins, densityInteracting, "--", label="Interacting")
plt.xlabel("R")
plt.ylabel("number of particles per R")
plt.show()
saveto(fig, "density10")
#fig.savefig("figures/density10.pdf", bbox_inches = "tight")
###Output
_____no_output_____
###Markdown
50 particles
###Code
conf = src.config()
cutoff = 2000
conf["threads"] = 12
conf["numPart"] = 50
conf["numDim"] = 3
conf["numSteps"] = 2**20 + cutoff
conf["stepLength"] = 0.5
conf["importanceSampling"] = 1
conf["alpha"] = 0.49752
conf["a"] = 0.0043
conf["InitialState"] = "HardshellInitial"
conf["Wavefunction"] = "EllipticalHardshellWavefunction"
conf["Hamiltonian"] = "EllipticalOscillator"
mu = 0.001
for i in range(5):
src.runner(conf)
localEnergies, _, psiGrad, acceptanceRate = src.readData(conf, cutoff, readPos = False)
gradient = src.calculateGradient(localEnergies, psiGrad)
conf["alpha"] -= mu*gradient
print(f"gradient: {gradient:.5f}. alpha: {conf['alpha']:.5f}. acceptance rate: {acceptanceRate[0]:.5f}.")
###Output
gradient: 4.12064. alpha: 0.49340. acceptance rate: 0.64611.
gradient: 2.27719. alpha: 0.49112. acceptance rate: 0.64988.
gradient: 0.93448. alpha: 0.49019. acceptance rate: 0.65196.
gradient: 1.08126. alpha: 0.48911. acceptance rate: 0.65279.
gradient: 0.07514. alpha: 0.48903. acceptance rate: 0.65379.
###Markdown
Using optimal alpha
###Code
conf["directory"] = "elliptical_50_interacting"
conf["alpha"] = 0.48903
src.runner(conf, verbose = True)
localEnergies, _, psiGrad, acceptanceRate = src.readData(conf, cutoff, readPos = False)
localEnergies = np.concatenate(localEnergies)
bins = np.linspace(0, 3, 200)
conf["threads"] = 6 #downscale, to avoid using too much memory
densityInteracting = src.densityParallel(conf, bins)/conf["numSteps"]
conf["directory"] = "elliptical_50_noninteracting"
conf["alpha"] = 0.48903
conf["a"] = 0
src.runner(conf, verbose = True)
bins = np.linspace(0, 3, 200)
densityNonInteracting = src.densityParallel(conf, bins)/conf["numSteps"]
###Output
100%|██████████| 52528800/52528800 [00:52<00:00, 998134.09it/s]]
100%|██████████| 52528800/52528800 [00:52<00:00, 999670.98it/s]
100%|██████████| 52528800/52528800 [00:53<00:00, 990613.62it/s]
100%|██████████| 52528800/52528800 [00:53<00:00, 985571.48it/s]
100%|██████████| 52528800/52528800 [00:54<00:00, 962114.46it/s]
100%|██████████| 52528800/52528800 [00:54<00:00, 959578.33it/s]
###Markdown
Estimation of energy and uncertainty
###Code
E = np.mean(localEnergies)
Var = src.blocking(localEnergies, 18)
plt.plot(Var)
plt.show()
print(f"<E> = {E} +- {np.sqrt(Var[13])}")
###Output
<E> = 127.37989862150846 +- 0.12304662071136907
###Markdown
Radial onebody density
###Code
fig = plt.figure()
plt.plot(bins, densityNonInteracting)
plt.plot(bins, densityInteracting, "--")
plt.xlabel("R")
plt.ylabel("number of particles per R")
plt.grid()
plt.show()
saveto(fig, "density50")
#fig.savefig("figures/density50.pdf", bbox_inches = "tight")
###Output
_____no_output_____
###Markdown
100 Particles
###Code
conf = src.config()
cutoff = 2000
conf["threads"] = 12
conf["numPart"] = 100
conf["numDim"] = 3
conf["numSteps"] = 2**20 + cutoff
conf["stepLength"] = 0.5
conf["importanceSampling"] = 1
conf["alpha"] = 0.48903
conf["a"] = 0.0043
conf["InitialState"] = "HardshellInitial"
conf["Wavefunction"] = "EllipticalHardshellWavefunction"
conf["Hamiltonian"] = "EllipticalOscillator"
mu = 0.001
for i in range(5):
src.runner(conf)
localEnergies, _, psiGrad, acceptanceRate = src.readData(conf, cutoff, readPos = False)
gradient = src.calculateGradient(localEnergies, psiGrad)
conf["alpha"] -= mu*gradient
print(f"gradient: {gradient:.5f}. alpha: {conf['alpha']:.5f}. acceptance rate: {acceptanceRate[0]:.5f}.")
###Output
gradient: 8.97470. alpha: 0.48006. acceptance rate: 0.66570.
gradient: -2.12743. alpha: 0.48218. acceptance rate: 0.67375.
gradient: 0.93860. alpha: 0.48124. acceptance rate: 0.67187.
gradient: 2.76470. alpha: 0.47848. acceptance rate: 0.67276.
gradient: -3.11632. alpha: 0.48160. acceptance rate: 0.67520.
###Markdown
Using optimal alpha
###Code
conf["directory"] = "elliptical_100_interacting"
conf["alpha"] = 0.48160
src.runner(conf, verbose = True)
localEnergies, _, psiGrad, acceptanceRate = src.readData(conf, cutoff, readPos = False)
localEnergies = np.concatenate(localEnergies)
bins = np.linspace(0, 3, 200)
conf["threads"] = 3 #downscale, to avoid using too much memory
densityInteracting = src.densityParallel(conf, bins)/conf["numSteps"]
conf["directory"] = "elliptical_100_noninteracting"
conf["alpha"] = 0.48160
conf["a"] = 0
src.runner(conf, verbose = True)
bins = np.linspace(0, 3, 200)
densityNonInteracting = src.densityParallel(conf, bins)/conf["numSteps"]
###Output
100%|██████████| 105057600/105057600 [01:41<00:00, 1032170.78it/s]
100%|██████████| 105057600/105057600 [01:43<00:00, 1014826.16it/s]
100%|██████████| 105057600/105057600 [01:44<00:00, 1009077.33it/s]
###Markdown
Estiamtion of Energy and Uncertainty
###Code
E = np.mean(localEnergies)
Var = src.blocking(localEnergies, 18)
plt.plot(Var)
print(f"<E> = {E} +- {np.sqrt(Var[15])}")
###Output
<E> = 265.5433074826718 +- 0.4914128178006312
###Markdown
Radial Onebody Density
###Code
fig = plt.figure()
plt.plot(bins, densityNonInteracting)
plt.plot(bins, densityInteracting, "--")
plt.xlabel("R")
plt.ylabel("number of particles per R")
plt.grid()
plt.show()
fig.savefig("figures/density100.pdf", bbox_inches = "tight")
###Output
_____no_output_____ |
final_project/Riiid_ANN_Final.ipynb | ###Markdown

Riiid is a company whose goal is to imporve quality of education using AI.
Riiid wants to make persnolised education better for every student using AI. Riiid Labs, an AI solutions provider delivering creative disruption to the education market, empowers global education players to rethink traditional ways of learning leveraging AI. With a strong belief in equal opportunity in education, Riiid launched an AI tutor based on deep-learning algorithms in 2017 that attracted more than one million South Korean students. This year, the company released EdNet, the world’s largest open database for AI education containing more than 100 million student interactions.
In this competition, your challenge is to create algorithms for "Knowledge Tracing," the modeling of student knowledge over time. The goal is to accurately predict how students will perform on future interactions. we will pair your machine learning skills using Riiid’s EdNet data.
Overview of the dataset
###Code
import os
os.environ['KAGGLE_USERNAME']='karanchoudhary103'
os.environ['KAGGLE_KEY']='3e84e0a4f94d7b95f0e24ff242c5aa45'
#Api command to load data in colab from kaggle
! kaggle competitions download -c riiid-test-answer-prediction
#unzipping the train.csv file and rest of the files are unziped
! unzip train.csv.zip
#importing the libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#reading the data train.csv,questions.csv,lectures.csv
data=pd.read_csv('train.csv',nrows=10000000)
data.columns
data.head()
data.shape
data.info()
#checking the null value in the dataset
data.isna().sum()
#finding the uique value in the dataset
data.nunique()
data.describe().style.applymap(lambda x:"background-color:yellow")
###Output
_____no_output_____
###Markdown
CORRELATION PLOT
--finding the correlation between independent variables
###Code
import seaborn as sns
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 10))
# Store heatmap object in a variable to easily access it when you want to include more features (such as title).
# Set the range of values to be displayed on the colormap from -1 to 1, and set the annotation to True to display the correlation values on the heatmap.
heatmap = sns.heatmap(data.corr(), vmin=-1, vmax=1, annot=True)
# Give a title to the heatmap. Pad defines the distance of the title from the top of the heatmap.
heatmap.set_title('Correlation Heatmap for Independent variables in the dataset', fontdict={'fontsize':12}); #scale of -1 to +1
import seaborn as sns
plt.figure(figsize=(8, 12))
heatmap = sns.heatmap(data.corr()[['answered_correctly']].sort_values(by='answered_correctly', ascending=False), vmin=-1, vmax=1, annot=True, cmap='BrBG')
heatmap.set_title('Features Correlating with answered_correctly', fontdict={'fontsize':10}, pad=10); #min and max value [-1, +1]
import numpy as np
data['prior_question_had_explanation'] = data['prior_question_had_explanation'].replace(np.nan, data['prior_question_had_explanation'].mean)
data['prior_question_elapsed_time'] = data['prior_question_elapsed_time'].replace(np.nan,(False))
data.isna().sum()
###Output
_____no_output_____
###Markdown
Checking the datatypes of the columns
###Code
data.dtypes
#to make the entire data in int and bool
data['prior_question_had_explanation'] = data['prior_question_had_explanation'].astype('bool')
data['prior_question_had_explanation'].dtypes
#find the if thier is any null or any other vallue other than 0 and 1
print('Unique value of content_type_id -> \t',data.content_type_id.unique())
print('count of unique value 0 and 1 are->\n',data.content_type_id.value_counts()) #to see the lectures(1) attend and the question asked
#TARGET VARIABLE ANSERED CORRECTLY
#-1 lecture
# 0 incorrect
# 1 is correct
print(data.answered_correctly.value_counts())
def countplot(column):
plt.figure(dpi=100)
sns.countplot(data[column])
plt.show()
countplot('user_answer')
countplot('content_type_id')
countplot('answered_correctly')
#plot graph of target variable
import matplotlib.pyplot as plt
correct = data[data.answered_correctly != -1].answered_correctly.value_counts()
fig = plt.figure(figsize=(10,4))
correct.plot.bar()
plt.title("Questions answered correctly")
plt.ylabel('count ')
plt.xlabel('value')
plt.show()
pq = data[data.answered_correctly != -1].groupby(['prior_question_had_explanation']).agg({'answered_correctly': ['mean']})
fig = plt.figure(figsize=(12,4))
pq.plot.bar(legend=None)
plt.title("Answered_correctly versus Prior Question had explanation")
plt.xlabel("Percent answered correctly")
plt.ylabel("Prior question had explanation")
plt.xticks(rotation=0)
plt.show()
data[([ 'timestamp', 'user_id', 'content_id', 'content_type_id',
'task_container_id', 'user_answer', 'answered_correctly'])].hist(figsize=(15,10),color="#7504ab")
data.columns
# row id ,timestamp user_id ,task_container_id are not important columns of our target variable in the dataset
''' now we will be implementing algorithm based in the columns prior_question_elapsed_time,prior_question_had_explanation,
and answered_correctly'''
#now we will be implementing algorithms and make prediction accordingly on based on decison tree ,naive bayes
''' now we will be implementing join operation on train.csv ,lectures.csv and questions.csv extracting the impotanat features and make model on it'''
###Output
_____no_output_____
###Markdown
Now we will be implementing join operation on train.csv,lectures.csv and questions.csv extracting the important features and make model onit
###Code
#dataset loading
train_data=pd.read_csv('train.csv',nrows=10000000)
lectures = pd.read_csv('lectures.csv')
questions = pd.read_csv('questions.csv')
questions.head()
lectures.head()
###Output
_____no_output_____
###Markdown
Now we will performing merge functions on the tables We will be selecting these coulmns and implement and form a model
question stats - question_success_rate, part_success_rate
user general stats - user_success_rate, user_part_success_rate, user_relative_success_rate,
user current stats - lectures_watched, prior_question_elapsed_time, prior_question_had_explanation, prior_group_answers_correct
###Code
import numpy as np
user_general_stats = train_data[train_data['content_type_id']==0][['user_id', 'answered_correctly']].groupby('user_id').agg({'answered_correctly':
['count', np.sum]})
user_general_stats.columns = user_general_stats.columns.droplevel()
user_general_stats = user_general_stats.reset_index().rename(columns={'sum': 'correct_answers', 'count': 'total_questions'})
user_general_stats['user_success_rate'] = user_general_stats['correct_answers']/user_general_stats['total_questions']
user_general_stats = user_general_stats[['user_id', 'user_success_rate']]
user_general_stats.head()
question_stats = train_data[train_data['content_type_id']==0][['content_id', 'answered_correctly']].groupby('content_id').agg({'answered_correctly':
['count', np.sum]})
question_stats.columns = question_stats.columns.droplevel()
question_stats = question_stats.reset_index().rename(columns={'sum': 'correct_answers', 'count': 'total_questions'})
question_stats['question_success_rate'] = question_stats['correct_answers']/question_stats['total_questions']
question_stats = question_stats[['content_id', 'question_success_rate']]
question_stats.head()
training = pd.merge(train_data[train_data['content_type_id']==0], user_general_stats, on='user_id', how='left')
training = pd.merge(training, question_stats, on='content_id', how='left')
training.head()
final = training[['user_success_rate', 'question_success_rate', 'answered_correctly']]
final= final.groupby('answered_correctly').agg('mean').reset_index()
final
from tensorflow import keras
from keras.models import Sequential
from keras.layers import Dense , Dropout
model = keras.Sequential()
model.add(Dense(16, input_shape=(2,), activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(8, activation='sigmoid'))
model.add(Dropout(0.2))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy']) #auc
X = training[['user_success_rate', 'question_success_rate']].values
y = training[['answered_correctly']].values
print(X.shape)
print(y.shape)
model.fit(
x=X,
y=y,
epochs=2
)
###Output
_____no_output_____ |
ml/SVM/SVM.ipynb | ###Markdown
importing libraries :
###Code
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
###Output
_____no_output_____
###Markdown
Data Pre-processing Step : Reading the Data:
###Code
dataset = pd.read_csv('Social_Network_Ads.csv')
###Output
_____no_output_____
###Markdown
Visulaizing the Data:
###Code
dataset.shape
dataset.head()
###Output
_____no_output_____
###Markdown
Defining the features and the Target:
###Code
X = dataset.iloc[:, [2,3]].values
y = dataset.iloc[:,4].values
###Output
_____no_output_____
###Markdown
Splitting the dataset into training and test data:
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.25,random_state=0)
###Output
_____no_output_____
###Markdown
Feature Scaling:
###Code
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.fit_transform(X_test)
###Output
_____no_output_____
###Markdown
for linear kernel: Fitting classifier to the Training Set:
###Code
from sklearn.svm import SVC
classifier = SVC(kernel = 'linear', random_state=0)
classifier.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
Predicting the Test Set Resutls:
###Code
y_pred = classifier.predict(X_test)
###Output
_____no_output_____
###Markdown
Making the Confusion Matrix:
###Code
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
print(cm)
###Output
[[63 5]
[ 7 25]]
###Markdown
Model Score:
###Code
score = classifier.score(X_test,y_test)
print(score)
###Output
0.88
###Markdown
Visualising the Training set results:
###Code
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('SVM (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
###Output
'c' argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with 'x' & 'y'. Please use a 2-D array with a single row if you really want to specify the same RGB or RGBA value for all points.
'c' argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with 'x' & 'y'. Please use a 2-D array with a single row if you really want to specify the same RGB or RGBA value for all points.
###Markdown
Visualizing the Test Set Results:
###Code
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('SVM (Test set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
###Output
'c' argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with 'x' & 'y'. Please use a 2-D array with a single row if you really want to specify the same RGB or RGBA value for all points.
'c' argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with 'x' & 'y'. Please use a 2-D array with a single row if you really want to specify the same RGB or RGBA value for all points.
###Markdown
for rbf kernel: Fitting classifier to the Training Set:
###Code
from sklearn.svm import SVC
classifier = SVC(kernel = 'rbf', random_state=0)
classifier.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
Predicting the Test Set Resutls:
###Code
y_pred = classifier.predict(X_test)
###Output
_____no_output_____
###Markdown
Making the Confusion Matrix:
###Code
from sklearn.metrics import confusion_matrix
cm1 = confusion_matrix(y_test, y_pred)
print(cm1)
###Output
[[64 4]
[ 3 29]]
###Markdown
Model Score:
###Code
score1 = classifier.score(X_test,y_test)
print(score1)
###Output
0.93
###Markdown
Visualising the Training set results:
###Code
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('SVM (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
###Output
'c' argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with 'x' & 'y'. Please use a 2-D array with a single row if you really want to specify the same RGB or RGBA value for all points.
'c' argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with 'x' & 'y'. Please use a 2-D array with a single row if you really want to specify the same RGB or RGBA value for all points.
###Markdown
Visualizing the Test Set Results:
###Code
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('SVM (Test set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
###Output
'c' argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with 'x' & 'y'. Please use a 2-D array with a single row if you really want to specify the same RGB or RGBA value for all points.
'c' argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with 'x' & 'y'. Please use a 2-D array with a single row if you really want to specify the same RGB or RGBA value for all points.
###Markdown
for poly kernel: Fitting classifier to the Training Set:
###Code
from sklearn.svm import SVC
classifier = SVC(kernel = 'poly', random_state=0)
classifier.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
Predicting the Test Set Resutls:
###Code
y_pred = classifier.predict(X_test)
###Output
_____no_output_____
###Markdown
Making the Confusion Matrix:
###Code
from sklearn.metrics import confusion_matrix
cm2 = confusion_matrix(y_test, y_pred)
print(cm2)
###Output
[[64 4]
[12 20]]
###Markdown
Model Score:
###Code
score2 = classifier.score(X_test,y_test)
print(score2)
###Output
0.84
###Markdown
Visualising the Training set results:
###Code
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('SVM (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
###Output
'c' argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with 'x' & 'y'. Please use a 2-D array with a single row if you really want to specify the same RGB or RGBA value for all points.
'c' argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with 'x' & 'y'. Please use a 2-D array with a single row if you really want to specify the same RGB or RGBA value for all points.
###Markdown
Visualizing the Test Set Results:
###Code
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('SVM (Test set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
###Output
'c' argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with 'x' & 'y'. Please use a 2-D array with a single row if you really want to specify the same RGB or RGBA value for all points.
'c' argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with 'x' & 'y'. Please use a 2-D array with a single row if you really want to specify the same RGB or RGBA value for all points.
|
notebooks/semisupervised/cifar10/learned-metric/not-augmented-Y-thresh0.8/cifar10-aug-4ex-learned-nothresh-Y-not-augmented.ipynb | ###Markdown
Choose GPU
###Code
%env CUDA_DEVICE_ORDER=PCI_BUS_ID
%env CUDA_VISIBLE_DEVICES=3
import tensorflow as tf
gpu_devices = tf.config.experimental.list_physical_devices('GPU')
if len(gpu_devices)>0:
tf.config.experimental.set_memory_growth(gpu_devices[0], True)
print(gpu_devices)
tf.keras.backend.clear_session()
###Output
[PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]
###Markdown
Load packages
###Code
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tqdm.autonotebook import tqdm
from IPython import display
import pandas as pd
import umap
import copy
import os, tempfile
import tensorflow_addons as tfa
import pickle
###Output
/mnt/cube/tsainbur/conda_envs/tpy3/lib/python3.6/site-packages/tqdm/autonotebook/__init__.py:14: TqdmExperimentalWarning: Using `tqdm.autonotebook.tqdm` in notebook mode. Use `tqdm.tqdm` instead to force console mode (e.g. in jupyter console)
" (e.g. in jupyter console)", TqdmExperimentalWarning)
###Markdown
parameters
###Code
dataset = "cifar10"
labels_per_class = 4 # 'full'
n_latent_dims = 1024
confidence_threshold = 0.8 # minimum confidence to include in UMAP graph for learned metric
learned_metric = True # whether to use a learned metric, or Euclidean distance between datapoints
augmented = False #
min_dist= 0.001 # min_dist parameter for UMAP
negative_sample_rate = 5 # how many negative samples per positive sample
batch_size = 128 # batch size
optimizer = tf.keras.optimizers.Adam(1e-3) # the optimizer to train
optimizer = tfa.optimizers.MovingAverage(optimizer)
label_smoothing = 0.2 # how much label smoothing to apply to categorical crossentropy
max_umap_iterations = 500 # how many times, maximum, to recompute UMAP
max_epochs_per_graph = 10 # how many epochs maximum each graph trains for (without early stopping)
graph_patience = 10 # how many times without improvement to train a new graph
min_graph_delta = 0.0025 # minimum improvement on validation acc to consider an improvement for training
from datetime import datetime
datestring = datetime.now().strftime("%Y_%m_%d_%H_%M_%S_%f")
datestring = (
str(dataset)
+ "_"
+ str(confidence_threshold)
+ "_"
+ str(labels_per_class)
+ "____"
+ datestring
+ '_umap_augmented'
)
print(datestring)
###Output
cifar10_0.8_4____2020_08_19_23_00_09_641532_umap_augmented
###Markdown
Load dataset
###Code
from tfumap.semisupervised_keras import load_dataset
(
X_train,
X_test,
X_labeled,
Y_labeled,
Y_masked,
X_valid,
Y_train,
Y_test,
Y_valid,
Y_valid_one_hot,
Y_labeled_one_hot,
num_classes,
dims
) = load_dataset(dataset, labels_per_class)
###Output
_____no_output_____
###Markdown
load architecture
###Code
from tfumap.semisupervised_keras import load_architecture
encoder, classifier, embedder = load_architecture(dataset, n_latent_dims)
###Output
_____no_output_____
###Markdown
load pretrained weights
###Code
from tfumap.semisupervised_keras import load_pretrained_weights
encoder, classifier = load_pretrained_weights(dataset, augmented, labels_per_class, encoder, classifier)
###Output
WARNING: Logging before flag parsing goes to stderr.
W0819 23:00:16.805668 140622372112128 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.
Two checkpoint references resolved to different objects (<tensorflow_addons.layers.wrappers.WeightNormalization object at 0x7fe42e41e198> and <tensorflow.python.keras.layers.advanced_activations.LeakyReLU object at 0x7fe42e41e630>).
W0819 23:00:16.808525 140622372112128 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.
Two checkpoint references resolved to different objects (<tensorflow_addons.layers.wrappers.WeightNormalization object at 0x7fe42e42c080> and <tensorflow.python.keras.layers.advanced_activations.LeakyReLU object at 0x7fe42e422c88>).
W0819 23:00:16.836151 140622372112128 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.
Two checkpoint references resolved to different objects (<tensorflow_addons.layers.wrappers.WeightNormalization object at 0x7fe42e8ca588> and <tensorflow.python.keras.layers.normalization_v2.BatchNormalization object at 0x7fe42e3a9710>).
W0819 23:00:16.841773 140622372112128 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.
Two checkpoint references resolved to different objects (<tensorflow.python.keras.layers.normalization_v2.BatchNormalization object at 0x7fe42e3a9710> and <tensorflow.python.keras.layers.advanced_activations.LeakyReLU object at 0x7fe42e2f5c50>).
W0819 23:00:16.850290 140622372112128 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.
Two checkpoint references resolved to different objects (<tensorflow_addons.layers.wrappers.WeightNormalization object at 0x7fe42e222588> and <tensorflow.python.keras.layers.normalization_v2.BatchNormalization object at 0x7fe42e1d8240>).
W0819 23:00:16.856026 140622372112128 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.
Two checkpoint references resolved to different objects (<tensorflow.python.keras.layers.normalization_v2.BatchNormalization object at 0x7fe42e1d8240> and <tensorflow.python.keras.layers.advanced_activations.LeakyReLU object at 0x7fe42e1d85c0>).
W0819 23:00:16.862545 140622372112128 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.
Two checkpoint references resolved to different objects (<tensorflow_addons.layers.wrappers.WeightNormalization object at 0x7fe42ea9e320> and <tensorflow.python.keras.layers.normalization_v2.BatchNormalization object at 0x7fe42ea9e630>).
W0819 23:00:16.866960 140622372112128 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.
Two checkpoint references resolved to different objects (<tensorflow.python.keras.layers.normalization_v2.BatchNormalization object at 0x7fe42ea9e630> and <tensorflow.python.keras.layers.advanced_activations.LeakyReLU object at 0x7fe42ea9e7b8>).
W0819 23:00:16.878203 140622372112128 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.
Two checkpoint references resolved to different objects (<tensorflow_addons.layers.wrappers.WeightNormalization object at 0x7fe42e382fd0> and <tensorflow.python.keras.layers.normalization_v2.BatchNormalization object at 0x7fe42e239f60>).
W0819 23:00:16.882596 140622372112128 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.
Two checkpoint references resolved to different objects (<tensorflow.python.keras.layers.normalization_v2.BatchNormalization object at 0x7fe42e239f60> and <tensorflow.python.keras.layers.advanced_activations.LeakyReLU object at 0x7fe42e3b3320>).
W0819 23:00:16.889384 140622372112128 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.
Two checkpoint references resolved to different objects (<tensorflow_addons.layers.wrappers.WeightNormalization object at 0x7fe42ea10ac8> and <tensorflow.python.keras.layers.normalization_v2.BatchNormalization object at 0x7fe42ea14128>).
W0819 23:00:16.893776 140622372112128 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.
Two checkpoint references resolved to different objects (<tensorflow.python.keras.layers.normalization_v2.BatchNormalization object at 0x7fe42ea14128> and <tensorflow.python.keras.layers.advanced_activations.LeakyReLU object at 0x7fe42ea14390>).
W0819 23:00:16.900657 140622372112128 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.
Two checkpoint references resolved to different objects (<tensorflow_addons.layers.wrappers.WeightNormalization object at 0x7fe42e584e10> and <tensorflow.python.keras.layers.normalization_v2.BatchNormalization object at 0x7fe42e08e160>).
W0819 23:00:16.905045 140622372112128 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.
Two checkpoint references resolved to different objects (<tensorflow.python.keras.layers.normalization_v2.BatchNormalization object at 0x7fe42e08e160> and <tensorflow.python.keras.layers.advanced_activations.LeakyReLU object at 0x7fe42e08e3c8>).
W0819 23:00:16.915707 140622372112128 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.
Two checkpoint references resolved to different objects (<tensorflow_addons.layers.wrappers.WeightNormalization object at 0x7fe42e3c4550> and <tensorflow.python.keras.layers.normalization_v2.BatchNormalization object at 0x7fe42e3c4b70>).
W0819 23:00:16.920092 140622372112128 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.
Two checkpoint references resolved to different objects (<tensorflow.python.keras.layers.normalization_v2.BatchNormalization object at 0x7fe42e3c4b70> and <tensorflow.python.keras.layers.advanced_activations.LeakyReLU object at 0x7fe42e3c4e10>).
###Markdown
compute pretrained accuracy
###Code
# test current acc
pretrained_predictions = classifier.predict(encoder.predict(X_test, verbose=True), verbose=True)
pretrained_predictions = np.argmax(pretrained_predictions, axis=1)
pretrained_acc = np.mean(pretrained_predictions == Y_test)
print('pretrained acc: {}'.format(pretrained_acc))
###Output
313/313 [==============================] - 3s 8ms/step
313/313 [==============================] - 0s 1ms/step
pretrained acc: 0.217
###Markdown
get a, b parameters for embeddings
###Code
from tfumap.semisupervised_keras import find_a_b
a_param, b_param = find_a_b(min_dist=min_dist)
###Output
_____no_output_____
###Markdown
build network
###Code
from tfumap.semisupervised_keras import build_model
model = build_model(
batch_size=batch_size,
a_param=a_param,
b_param=b_param,
dims=dims,
encoder=encoder,
classifier=classifier,
negative_sample_rate=negative_sample_rate,
optimizer=optimizer,
label_smoothing=label_smoothing,
embedder = embedder,
)
###Output
_____no_output_____
###Markdown
build labeled iterator
###Code
from tfumap.semisupervised_keras import build_labeled_iterator
labeled_dataset = build_labeled_iterator(X_labeled, Y_labeled_one_hot, augmented, dims)
###Output
_____no_output_____
###Markdown
training
###Code
from livelossplot import PlotLossesKerasTF
from tfumap.semisupervised_keras import get_edge_dataset
from tfumap.semisupervised_keras import zip_datasets
###Output
_____no_output_____
###Markdown
callbacks
###Code
# plot losses callback
groups = {'acccuracy': ['classifier_accuracy', 'val_classifier_accuracy'], 'loss': ['classifier_loss', 'val_classifier_loss']}
plotlosses = PlotLossesKerasTF(groups=groups)
history_list = []
current_validation_acc = 0
batches_per_epoch = np.floor(len(X_train)/batch_size).astype(int)
epochs_since_last_improvement = 0
current_umap_iterations = 0
current_epoch = 0
from tfumap.paths import MODEL_DIR, ensure_dir
save_folder = MODEL_DIR / 'semisupervised-keras' / dataset / str(labels_per_class) / datestring
ensure_dir(save_folder / 'test_loss.npy')
for cui in tqdm(np.arange(current_epoch, max_umap_iterations)):
if len(history_list) > graph_patience+1:
previous_history = [np.mean(i.history['val_classifier_accuracy']) for i in history_list]
best_of_patience = np.max(previous_history[-graph_patience:])
best_of_previous = np.max(previous_history[:-graph_patience])
if (best_of_previous + min_graph_delta) > best_of_patience:
print('Early stopping')
break
# make dataset
edge_dataset = get_edge_dataset(
model,
augmented,
classifier,
encoder,
X_train,
Y_masked,
batch_size,
confidence_threshold,
labeled_dataset,
dims,
learned_metric = learned_metric
)
# zip dataset
zipped_ds = zip_datasets(labeled_dataset, edge_dataset, batch_size)
# train dataset
history = model.fit(
zipped_ds,
epochs= current_epoch + max_epochs_per_graph,
initial_epoch = current_epoch,
validation_data=(
(X_valid, tf.zeros_like(X_valid), tf.zeros_like(X_valid)),
{"classifier": Y_valid_one_hot},
),
callbacks = [plotlosses],
max_queue_size = 100,
steps_per_epoch = batches_per_epoch,
#verbose=0
)
current_epoch+=len(history.history['loss'])
history_list.append(history)
# save score
class_pred = classifier.predict(encoder.predict(X_test))
class_acc = np.mean(np.argmax(class_pred, axis=1) == Y_test)
np.save(save_folder / 'test_loss.npy', (np.nan, class_acc))
# save weights
encoder.save_weights((save_folder / "encoder").as_posix())
classifier.save_weights((save_folder / "classifier").as_posix())
# save history
with open(save_folder / 'history.pickle', 'wb') as file_pi:
pickle.dump([i.history for i in history_list], file_pi)
current_umap_iterations += 1
previous_history = [np.mean(i.history['val_classifier_accuracy']) for i in history_list]
best_of_patience = np.max(previous_history[-graph_patience:])
best_of_previous = np.max(previous_history[:-graph_patience])
if (best_of_previous + min_graph_delta) > best_of_patience:
print('Early stopping')
plt.plot(previous_history)
save_folder
###Output
_____no_output_____
###Markdown
save embedding
###Code
z = encoder.predict(X_train)
reducer = umap.UMAP(verbose=True)
embedding = reducer.fit_transform(z.reshape(len(z), np.product(np.shape(z)[1:])))
plt.scatter(embedding[:, 0], embedding[:, 1], c=Y_train.flatten(), s= 1, alpha = 0.1, cmap = plt.cm.tab10)
np.save(save_folder / 'train_embedding.npy', embedding)
###Output
_____no_output_____ |
Linear-01-Linear Regression with Python.ipynb | ###Markdown
Linear Regression with Python** This is mostly just code for reference. Please watch the video lecture for more info behind all of this code.**Your neighbor is a real estate agent and wants some help predicting housing prices for regions in the USA. It would be great if you could somehow create a model for her that allows her to put in a few features of a house and returns back an estimate of what the house would sell for.She has asked you if you could help her out with your new data science skills. You say yes, and decide that Linear Regression might be a good path to solve this problem!Your neighbor then gives you some information about a bunch of houses in regions of the United States,it is all in the data set: USA_Housing.csv.The data contains the following columns:* 'Avg. Area Income': Avg. Income of residents of the city house is located in.* 'Avg. Area House Age': Avg Age of Houses in same city* 'Avg. Area Number of Rooms': Avg Number of Rooms for Houses in same city* 'Avg. Area Number of Bedrooms': Avg Number of Bedrooms for Houses in same city* 'Area Population': Population of city house is located in* 'Price': Price that the house sold at* 'Address': Address for the house **Let's get started!** Check out the dataWe've been able to get some data from your neighbor for housing prices as a csv set, let's get our environment ready with the libraries we'll need and then import the data! Import Libraries
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
!pip install seaborn
###Output
Requirement already satisfied: seaborn in c:\users\avina\appdata\roaming\python\python38\site-packages (0.11.0)
Requirement already satisfied: numpy>=1.15 in d:\programdata\anaconda3\lib\site-packages (from seaborn) (1.19.5)
Requirement already satisfied: scipy>=1.0 in c:\users\avina\appdata\roaming\python\python38\site-packages (from seaborn) (1.5.3)
Requirement already satisfied: pandas>=0.23 in c:\users\avina\appdata\roaming\python\python38\site-packages (from seaborn) (1.1.3)
Requirement already satisfied: matplotlib>=2.2 in c:\users\avina\appdata\roaming\python\python38\site-packages (from seaborn) (3.3.2)
Requirement already satisfied: pytz>=2017.2 in c:\users\avina\appdata\roaming\python\python38\site-packages (from pandas>=0.23->seaborn) (2020.1)
Requirement already satisfied: python-dateutil>=2.7.3 in c:\users\avina\appdata\roaming\python\python38\site-packages (from pandas>=0.23->seaborn) (2.8.1)
Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.3 in c:\users\avina\appdata\roaming\python\python38\site-packages (from matplotlib>=2.2->seaborn) (2.4.7)
Requirement already satisfied: certifi>=2020.06.20 in c:\users\avina\appdata\roaming\python\python38\site-packages (from matplotlib>=2.2->seaborn) (2020.6.20)
###Markdown
Check out the Data
###Code
USAhousing = pd.read_csv('USA_Housing.csv')
USAhousing.head()
USAhousing.shape
USAhousing.info()
USAhousing.describe().T
#USAhousing.describe()
USAhousing.columns
###Output
_____no_output_____
###Markdown
EDALet's create some simple plots to check out the data!
###Code
sns.pairplot(USAhousing)
sns.distplot(USAhousing['Price'])
sns.heatmap(USAhousing.corr())
###Output
_____no_output_____
###Markdown
Training a Linear Regression ModelLet's now begin to train out regression model! We will need to first split up our data into an X array that contains the features to train on, and a y array with the target variable, in this case the Price column. We will toss out the Address column because it only has text info that the linear regression model can't use. X and y arrays
###Code
X = USAhousing[['Avg. Area Income', 'Avg. Area House Age', 'Avg. Area Number of Rooms',
'Avg. Area Number of Bedrooms', 'Area Population']]
y = USAhousing['Price']
X.shape
###Output
_____no_output_____
###Markdown
Train Test SplitNow let's split the data into a training set and a testing set. We will train out model on the training set and then use the test set to evaluate the model.
###Code
from sklearn.model_selection import train_test_split
#train_test_split(X,y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=17,shuffle =True)
X_test.shape
X_test.head()
###Output
_____no_output_____
###Markdown
Creating and Training the Model
###Code
from sklearn.linear_model import LinearRegression
lm = LinearRegression()
?lm.fit
lm.fit(X_train,y_train)
###Output
_____no_output_____
###Markdown
Model EvaluationLet's evaluate the model by checking out it's coefficients and how we can interpret them.
###Code
# print the intercept
print(lm.intercept_)
lm.coef_
X.columns
coeff_df = pd.DataFrame(lm.coef_,X.columns,columns=['Coefficient'])
coeff_df
X['Avg. Area Number of Rooms'].unique()
###Output
_____no_output_____
###Markdown
Interpreting the coefficients:- Holding all other features fixed, a 1 unit increase in **Avg. Area Income** is associated with an **increase of \$21.52 **.- Holding all other features fixed, a 1 unit increase in **Avg. Area House Age** is associated with an **increase of \$164883.28 **.- Holding all other features fixed, a 1 unit increase in **Avg. Area Number of Rooms** is associated with an **increase of \$122368.67 **.- Holding all other features fixed, a 1 unit increase in **Avg. Area Number of Bedrooms** is associated with an **increase of \$2233.80 **.- Holding all other features fixed, a 1 unit increase in **Area Population** is associated with an **increase of \$15.15 **.Does this make sense? Probably not because I've fudged the data i.e. me - Avinash - made up this data. If you want real data to repeat this sort of analysis, check out the [boston dataset](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_boston.html): from sklearn.datasets import load_boston boston = load_boston() print(boston.DESCR) boston_df = boston.data Predictions from our ModelLet's grab predictions off our test set and see how well it did!
###Code
preds = lm.predict(X_test)
preds
plt.scatter(y_test,preds)
?plt.scatter
AN = pd.DataFrame()
Predictions['actuals'] = y_test
Predictions['Predictions'] = Predictions
#Predictions
AN['mape'] = (abs((AN['Pred'] - AN.actuals) / AN.actuals)) * 100
AN['mape'].mean()
AN['Accuracy'] = 100 - AN.mape
AN
AN.mape.mean()
from datetime import datetime
a = datetime.now().date()
a
AN.to_excel('housing_predictions_%s.xlsx' %a)
###Output
_____no_output_____
###Markdown
**Residual Histogram**
###Code
sns.distplot((y_test-predictions),bins=50);
###Output
C:\Users\avina\AppData\Roaming\Python\Python38\site-packages\seaborn\distributions.py:2551: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms).
warnings.warn(msg, FutureWarning)
###Markdown
Regression Evaluation MetricsHere are three common evaluation metrics for regression problems:**Mean Absolute Error** (MAE) is the mean of the absolute value of the errors:$$\frac 1n\sum_{i=1}^n|y_i-\hat{y}_i|$$**Mean Squared Error** (MSE) is the mean of the squared errors:$$\frac 1n\sum_{i=1}^n(y_i-\hat{y}_i)^2$$**Root Mean Squared Error** (RMSE) is the square root of the mean of the squared errors:$$\sqrt{\frac 1n\sum_{i=1}^n(y_i-\hat{y}_i)^2}$$Comparing these metrics:- **MAE** is the easiest to understand, because it's the average error.- **MSE** is more popular than MAE, because MSE "punishes" larger errors, which tends to be useful in the real world.- **RMSE** is even more popular than MSE, because RMSE is interpretable in the "y" units.All of these are **loss functions**, because we want to minimize them.
###Code
from sklearn import metrics
print('MAE:', metrics.mean_absolute_error(y_test, predictions))
print('MSE:', metrics.mean_squared_error(y_test, predictions))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, predictions)))
###Output
MAE: 83060.31483553258
MSE: 10597642180.090357
RMSE: 102944.85018732291
|
notebook/Tutorial-Reference_Metric.ipynb | ###Markdown
window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'UA-59152712-8'); NRPy+'s Reference Metric Interface Author: Zach Etienne Formatting improvements courtesy Brandon Clark NRPy+ Source Code for this module: [reference_metric.py](../edit/reference_metric.py) Introduction: Why use a reference metric? Benefits of choosing the best coordinate system for the problemWhen solving a partial differential equation on the computer, it is useful to first pick a coordinate system well-suited to the geometry of the problem. For example, if we are modeling a spherically-symmetric star, it would be hugely wasteful to model the star in 3-dimensional Cartesian coordinates ($x$,$y$,$z$). This is because in Cartesian coordinates, we would need to choose high sampling in all three Cartesian directions. If instead we chose to model the star in spherical coordinates ($r$,$\theta$,$\phi$), so long as the star is centered at $r=0$, we would not need to model the star with more than one point in the $\theta$ and $\phi$ directions!A similar argument holds for stars that are *nearly* spherically symmetric. Such stars may exhibit density distributions that vary slowly in $\theta$ and $\phi$ directions (e.g., isolated neutron stars or black holes). In these cases the number of points needed to sample the angular directions will still be much smaller than in the radial direction.Thus choice of an appropriate reference metric may directly mitigate the [Curse of Dimensionality](https://en.wikipedia.org/wiki/Curse_of_dimensionality). Table of Contents$$\label{toc}$$This notebook is organized as follow1. [Step 1](define_ref_metric): Defining a reference metric, [`reference_metric.py`](../edit/reference_metric.py)1. [Step 2](define_geometric): Defining geometric quantities, **`ref_metric__hatted_quantities()`**1. [Step 3](prescribed_ref_metric): Prescribed reference metrics in [`reference_metric.py`](../edit/reference_metric.py) 1. [Step 3.a](sphericallike): Spherical-like coordinate systems 1. [Step 3.a.i](spherical): **`reference_metric::CoordSystem = "Spherical"`** 1. [Step 3.a.ii](sinhspherical): **`reference_metric::CoordSystem = "SinhSpherical"`** 1. [Step 3.a.iii](sinhsphericalv2): **`reference_metric::CoordSystem = "SinhSphericalv2"`** 1. [Step 3.b](cylindricallike): Cylindrical-like coordinate systems 1. [Step 3.b.i](cylindrical): **`reference_metric::CoordSystem = "Cylindrical"`** 1. [Step 3.b.ii](sinhcylindrical): **`reference_metric::CoordSystem = "SinhCylindrical"`** 1. [Step 3.b.iii](sinhcylindricalv2): **`reference_metric::CoordSystem = "SinhCylindricalv2"`** 1. [Step 3.c](cartesianlike): Cartesian-like coordinate systems 1. [Step 3.c.i](cartesian): **`reference_metric::CoordSystem = "Cartesian"`** 1. [Step 3.d](prolatespheroidal): Prolate spheroidal coordinates 1. [Step 3.d.i](symtp): **`reference_metric::CoordSystem = "SymTP"`** 1. [Step 3.d.ii](sinhsymtp): **`reference_metric::CoordSystem = "SinhSymTP"`**1. [Step 4](latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file Step 1: Defining a reference metric, [`reference_metric.py`](../edit/reference_metric.py) \[Back to [top](toc)\]$$\label{define_ref_metric}$$***Note that currently only orthogonal reference metrics of dimension 3 or fewer are supported. This can be extended if desired.***NRPy+ assumes all curvilinear coordinate systems map directly from a uniform, Cartesian numerical grid with coordinates $(x,y,z)$=(`xx[0]`,`xx[1]`,`xx[2]`). Thus when defining reference metrics, all defined coordinate quantities must be in terms of the `xx[]` array. As we will see, this adds a great deal of flexibilityFor example, [**reference_metric.py**](../edit/reference_metric.py) requires that the *orthogonal coordinate scale factors* be defined. As described [here](https://en.wikipedia.org/wiki/Curvilinear_coordinates), the $i$th scale factor is the positive root of the metric element $g_{ii}$. In ordinary spherical coordinates $(r,\theta,\phi)$, with line element $ds^2 = g_{ij} dx^i dx^j = dr^2+ r^2 d \theta^2 + r^2 \sin^2\theta \ d\phi^2$, we would first define* $r = xx_0$* $\theta = xx_1$* $\phi = xx_2$,so that the scale factors are defined as* `scalefactor_orthog[0]` = $1$* `scalefactor_orthog[1]` = $r$* `scalefactor_orthog[2]` = $r \sin \theta$Here is the corresponding code:
###Code
import sympy as sp
import NRPy_param_funcs as par
import reference_metric as rfm
r = rfm.xx[0]
th = rfm.xx[1]
ph = rfm.xx[2]
rfm.scalefactor_orthog[0] = 1
rfm.scalefactor_orthog[1] = r
rfm.scalefactor_orthog[2] = r*sp.sin(th)
# Notice that the scale factor will be given
# in terms of the fundamental Cartesian
# grid variables, and not {r,th,ph}:
print("r*sin(th) = "+str(rfm.scalefactor_orthog[2]))
###Output
r*sin(th) = xx0*sin(xx1)
###Markdown
Next suppose we wish to modify our radial coordinate $r(xx_0)$ to be an exponentially increasing function, so that our numerical grid $(xx_0,xx_1,xx_2)$ will map to a spherical grid with radial grid spacing ($\Delta r$) that *increases* with $r$. Generally we will find it useful to define $r(xx_0)$ to be an odd function, so let's choose$$r(xx_0) = a \sinh(xx_0/s),$$where $a$ is an overall radial scaling factor, and $s$ denotes the scale (in units of $xx_0$) over which exponential growth will take place. In our implementation below, note that we use the relation$$\sinh(x) = \frac{e^x - e^{-x}}{2},$$as SymPy finds it easier to evaluate exponentials than hyperbolic trigonometric functions.
###Code
a,s = sp.symbols('a s',positive=True)
xx0_rescaled = rfm.xx[0] / s
r = a*(sp.exp(xx0_rescaled) - sp.exp(-xx0_rescaled))/2
# Must redefine the scalefactors since 'r' has been updated!
rfm.scalefactor_orthog[0] = 1
rfm.scalefactor_orthog[1] = r
rfm.scalefactor_orthog[2] = r*sp.sin(th)
print(rfm.scalefactor_orthog[2])
###Output
a*(exp(xx0/s) - exp(-xx0/s))*sin(xx1)/2
###Markdown
Often we will find it useful to also define the appropriate mappings from (`xx[0]`,`xx[1]`,`xx[2]`) to Cartesian coordinates (for plotting purposes) and ordinary spherical coordinates (e.g., in case initial data when solving a PDE are naturally written in spherical coordinates). For this purpose, reference_metric.py also declares lists **`xxCart[]`** and **`xxSph[]`**, which in this case are defined as
###Code
rfm.xxSph[0] = r
rfm.xxSph[1] = th
rfm.xxSph[2] = ph
rfm.xxCart[0] = r*sp.sin(th)*sp.cos(ph)
rfm.xxCart[1] = r*sp.sin(th)*sp.sin(ph)
rfm.xxCart[2] = r*sp.cos(th)
# Here we show off SymPy's pretty_print()
# and simplify() functions. Nice, no?
sp.pretty_print(sp.simplify(rfm.xxCart[0]))
###Output
⎛xx₀⎞
a⋅sin(xx₁)⋅cos(xx₂)⋅sinh⎜───⎟
⎝ s ⎠
###Markdown
Step 2: Define geometric quantities, `ref_metric__hatted_quantities()` \[Back to [top](toc)\]$$\label{define_geometric}$$Once `scalefactor_orthog[]` has been defined, the function **`ref_metric__hatted_quantities()`** within [reference_metric.py](../edit/reference_metric.py) can be called to define a number of geometric quantities useful for solving PDEs in curvilinear coordinate systems. Adopting the notation of [Baumgarte, Montero, Cordero-Carrión, and Müller, PRD 87, 044026 (2012)](https://arxiv.org/abs/1211.6632), geometric quantities related to the reference metric are named "hatted" quantities, . For example, the reference metric is defined as $\hat{g}_{ij}$=`ghatDD[i][j]`:
###Code
rfm.ref_metric__hatted_quantities()
sp.pretty_print(sp.Matrix(sp.simplify(rfm.ghatDD)))
###Output
⎡1 0 0 ⎤
⎢ ⎥
⎢ 2 ⎥
⎢ ⎛ xx₀ -xx₀ ⎞ ⎥
⎢ ⎜ ─── ─────⎟ ⎥
⎢ 2 ⎜ s s ⎟ ⎥
⎢ a ⋅⎝ℯ - ℯ ⎠ ⎥
⎢0 ─────────────────── 0 ⎥
⎢ 4 ⎥
⎢ ⎥
⎢ 2 ⎥
⎢ ⎛ xx₀ -xx₀ ⎞ ⎥
⎢ ⎜ ─── ─────⎟ ⎥
⎢ 2 ⎜ s s ⎟ 2 ⎥
⎢ a ⋅⎝ℯ - ℯ ⎠ ⋅sin (xx₁)⎥
⎢0 0 ─────────────────────────────⎥
⎣ 4 ⎦
###Markdown
In addition to $\hat{g}_{ij}$, **`ref_metric__hatted_quantities()`** also provides:* The rescaling "matrix" `ReDD[i][j]`, used for separating singular (due to chosen coordinate system) pieces of smooth rank-2 tensor components from the smooth parts, so that the smooth parts can be used within temporal and spatial differential operators.* Inverse reference metric: $\hat{g}^{ij}$=`ghatUU[i][j]`.* Reference metric determinant: $\det\left(\hat{g}_{ij}\right)$=`detgammahat`.* First and second derivatives of the reference metric: $\hat{g}_{ij,k}$=`ghatDD_dD[i][j][k]`; $\hat{g}_{ij,kl}$=`ghatDD_dDD[i][j][k][l]`* Christoffel symbols associated with the reference metric, $\hat{\Gamma}^i_{jk}$ = `GammahatUDD[i][j][k]` and their first derivatives $\hat{\Gamma}^i_{jk,l}$ = `GammahatUDD_dD[i][j][k][l]`For example, the Christoffel symbol $\hat{\Gamma}^{xx_1}_{xx_2 xx_2}=\hat{\Gamma}^1_{22}$ is given by `GammahatUDD[1][2][2]`:
###Code
sp.pretty_print(sp.simplify(rfm.GammahatUDD[1][2][2]))
###Output
-sin(2⋅xx₁)
────────────
2
###Markdown
Given the trigonometric identity $2\sin(x)\cos(x) = \sin(2x)$, notice that the above expression is equivalent to Eq. 18 of [Baumgarte, Montero, Cordero-Carrión, and Müller, PRD 87, 044026 (2012)](https://arxiv.org/abs/1211.6632). This is expected since the sinh-radial spherical coordinate system is equivalent to ordinary spherical coordinates in the angular components. Step 3: Prescribed reference metrics in [`reference_metric.py`](../edit/reference_metric.py) \[Back to [top](toc)\]$$\label{prescribed_ref_metric}$$One need not manually define scale factors or other quantities for reference metrics, as a number of prescribed reference metrics are already defined in [reference_metric.py](../edit/reference_metric.py). These can be accessed by first setting the parameter **reference_metric::CoordSystem** to one of the following, and then calling the function **`rfm.reference_metric()`**.
###Code
import NRPy_param_funcs as par
import indexedexp as ixp
import grid as gri
# Step 0a: Initialize parameters
thismodule = __name__
par.initialize_param(par.glb_param("char", thismodule, "CoordSystem", "Spherical"))
# Step 0b: Declare global variables
xx = gri.xx
xxCart = ixp.zerorank1(DIM=4) # Must be set in terms of xx[]s
Cart_to_xx = ixp.zerorank1(DIM=4) # Must be set in terms of xx[]s
Cartx,Carty,Cartz = sp.symbols("Cartx Carty Cartz", real=True)
Cart = [Cartx,Carty,Cartz]
xxSph = ixp.zerorank1(DIM=4) # Must be set in terms of xx[]s
scalefactor_orthog = ixp.zerorank1(DIM=4) # Must be set in terms of xx[]s
have_already_called_reference_metric_function = False
CoordSystem = par.parval_from_str("reference_metric::CoordSystem")
M_PI,M_SQRT1_2 = par.Cparameters("#define",thismodule,["M_PI","M_SQRT1_2"],"")
global xxmin
global xxmax
global UnitVectors
UnitVectors = ixp.zerorank2(DIM=3)
###Output
_____no_output_____
###Markdown
We will find the following plotting function useful for analyzing coordinate systems in which the radial coordinate is rescaled.
###Code
def create_r_of_xx0_plots(CoordSystem, r_of_xx0,rprime_of_xx0):
import matplotlib.pyplot as plt
plt.clf()
Nr = 20
dxx0 = 1.0 / float(Nr)
xx0s = []
rs = []
deltars = []
rprimes = []
for i in range(Nr):
xx0 = (float(i) + 0.5)*dxx0
xx0s.append(xx0)
rs.append( sp.sympify(str(r_of_xx0 ).replace("xx0",str(xx0))))
rprimes.append(sp.sympify(str(rprime_of_xx0).replace("xx0",str(xx0))))
if i>0:
deltars.append(sp.log(rs[i]-rs[i-1],10))
else:
deltars.append(sp.log(2*rs[0],10))
# fig, ax = plt.subplots()
fig = plt.figure(figsize=(12,12)) # 8 in x 8 in
ax = fig.add_subplot(221)
ax.set_title('$r(xx_0)$ for '+CoordSystem,fontsize='x-large')
ax.set_xlabel('$xx_0$',fontsize='x-large')
ax.set_ylabel('$r(xx_0)$',fontsize='x-large')
ax.plot(xx0s, rs, 'k.', label='Spacing between\nadjacent gridpoints')
# legend = ax.legend(loc='lower right', shadow=True, fontsize='x-large')
# legend.get_frame().set_facecolor('C1')
ax = fig.add_subplot(222)
ax.set_title('Grid spacing for '+CoordSystem,fontsize='x-large')
ax.set_xlabel('$xx_0$',fontsize='x-large')
ax.set_ylabel('$\log_{10}(\Delta r)$',fontsize='x-large')
ax.plot(xx0s, deltars, 'k.', label='Spacing between\nadjacent gridpoints\nin $r(xx_0)$ plot')
legend = ax.legend(loc='lower right', shadow=True, fontsize='x-large')
legend.get_frame().set_facecolor('C1')
ax = fig.add_subplot(223)
ax.set_title('$r\'(xx_0)$ for '+CoordSystem,fontsize='x-large')
ax.set_xlabel('$xx_0$',fontsize='x-large')
ax.set_ylabel('$r\'(xx_0)$',fontsize='x-large')
ax.plot(xx0s, rprimes, 'k.', label='Nr=96')
# legend = ax.legend(loc='upper left', shadow=True, fontsize='x-large')
# legend.get_frame().set_facecolor('C1')
plt.tight_layout(pad=2)
plt.show()
###Output
_____no_output_____
###Markdown
Step 3.a: Spherical-like coordinate systems \[Back to [top](toc)\]$$\label{sphericallike}$$ Step 3.a.i: **`reference_metric::CoordSystem = "Spherical"`** \[Back to [top](toc)\]$$\label{spherical}$$Standard spherical coordinates, with $(r,\theta,\phi)=(xx_0,xx_1,xx_2)$
###Code
if CoordSystem == "Spherical":
# Adding assumption real=True can help simplify expressions involving xx[0] & xx[1] below.
xx[0] = sp.symbols("xx0", real=True)
xx[1] = sp.symbols("xx1", real=True)
RMAX = par.Cparameters("REAL", thismodule, ["RMAX"],10.0)
xxmin = [sp.sympify(0), sp.sympify(0), -M_PI]
xxmax = [ RMAX, M_PI, M_PI]
r = xx[0]
th = xx[1]
ph = xx[2]
Cart_to_xx[0] = sp.sqrt(Cartx ** 2 + Carty ** 2 + Cartz ** 2)
Cart_to_xx[1] = sp.acos(Cartz / Cart_to_xx[0])
Cart_to_xx[2] = sp.atan2(Carty, Cartx)
xxSph[0] = r
xxSph[1] = th
xxSph[2] = ph
# Now define xCart, yCart, and zCart in terms of x0,xx[1],xx[2].
# Note that the relation between r and x0 is not necessarily trivial in SinhSpherical coordinates. See above.
xxCart[0] = xxSph[0]*sp.sin(xxSph[1])*sp.cos(xxSph[2])
xxCart[1] = xxSph[0]*sp.sin(xxSph[1])*sp.sin(xxSph[2])
xxCart[2] = xxSph[0]*sp.cos(xxSph[1])
scalefactor_orthog[0] = sp.diff(xxSph[0],xx[0])
scalefactor_orthog[1] = xxSph[0]
scalefactor_orthog[2] = xxSph[0]*sp.sin(xxSph[1])
# Set the unit vectors
UnitVectors = [[ sp.sin(xxSph[1])*sp.cos(xxSph[2]), sp.sin(xxSph[1])*sp.sin(xxSph[2]), sp.cos(xxSph[1])],
[ sp.cos(xxSph[1])*sp.cos(xxSph[2]), sp.cos(xxSph[1])*sp.sin(xxSph[2]), -sp.sin(xxSph[1])],
[ -sp.sin(xxSph[2]), sp.cos(xxSph[2]), sp.sympify(0) ]]
###Output
_____no_output_____
###Markdown
Now let's analyze $r(xx_0)$ for **"Spherical"** coordinates.
###Code
%matplotlib inline
import sympy as sp
import reference_metric as rfm
import NRPy_param_funcs as par
CoordSystem = "Spherical"
par.set_parval_from_str("reference_metric::CoordSystem",CoordSystem)
rfm.reference_metric()
RMAX = 10.0
r_of_xx0 = sp.sympify(str(rfm.xxSph[0] ).replace("RMAX",str(RMAX)))
rprime_of_xx0 = sp.sympify(str(sp.diff(rfm.xxSph[0],rfm.xx[0])).replace("RMAX",str(RMAX)))
create_r_of_xx0_plots(CoordSystem, r_of_xx0,rprime_of_xx0)
###Output
_____no_output_____
###Markdown
Step 3.a.ii: **`reference_metric::CoordSystem = "SinhSpherical"`** \[Back to [top](toc)\]$$\label{sinhspherical}$$Spherical coordinates, but with $$r(xx_0) = \text{AMPL} \frac{\sinh\left(\frac{xx_0}{\text{SINHW}}\right)}{\sinh\left(\frac{1}{\text{SINHW}}\right)}.$$SinhSpherical uses two parameters: `AMPL` and `SINHW`. `AMPL` sets the outer boundary distance; and `SINHW` sets the focusing of the coordinate points near $r=0$, where a small `SINHW` ($\sim 0.125$) will greatly focus the points near $r=0$ and a large `SINHW` will look more like an ordinary spherical polar coordinate system.
###Code
if CoordSystem == "SinhSpherical":
xxmin = [sp.sympify(0), sp.sympify(0), -M_PI]
xxmax = [sp.sympify(1), M_PI, M_PI]
AMPL, SINHW = par.Cparameters("REAL",thismodule,["AMPL","SINHW"],[10.0,0.2])
# Set SinhSpherical radial coordinate by default; overwrite later if CoordSystem == "SinhSphericalv2".
r = AMPL * (sp.exp(xx[0] / SINHW) - sp.exp(-xx[0] / SINHW)) / \
(sp.exp(1 / SINHW) - sp.exp(-1 / SINHW))
th = xx[1]
ph = xx[2]
Cart_to_xx[0] = SINHW*sp.asinh(sp.sqrt(Cartx ** 2 + Carty ** 2 + Cartz ** 2)*sp.sinh(1/SINHW)/AMPL)
Cart_to_xx[1] = sp.acos(Cartz / sp.sqrt(Cartx ** 2 + Carty ** 2 + Cartz ** 2))
Cart_to_xx[2] = sp.atan2(Carty, Cartx)
xxSph[0] = r
xxSph[1] = th
xxSph[2] = ph
# Now define xCart, yCart, and zCart in terms of x0,xx[1],xx[2].
# Note that the relation between r and x0 is not necessarily trivial in SinhSpherical coordinates. See above.
xxCart[0] = xxSph[0]*sp.sin(xxSph[1])*sp.cos(xxSph[2])
xxCart[1] = xxSph[0]*sp.sin(xxSph[1])*sp.sin(xxSph[2])
xxCart[2] = xxSph[0]*sp.cos(xxSph[1])
scalefactor_orthog[0] = sp.diff(xxSph[0],xx[0])
scalefactor_orthog[1] = xxSph[0]
scalefactor_orthog[2] = xxSph[0]*sp.sin(xxSph[1])
# Set the unit vectors
UnitVectors = [[ sp.sin(xxSph[1])*sp.cos(xxSph[2]), sp.sin(xxSph[1])*sp.sin(xxSph[2]), sp.cos(xxSph[1])],
[ sp.cos(xxSph[1])*sp.cos(xxSph[2]), sp.cos(xxSph[1])*sp.sin(xxSph[2]), -sp.sin(xxSph[1])],
[ -sp.sin(xxSph[2]), sp.cos(xxSph[2]), sp.sympify(0) ]]
###Output
_____no_output_____
###Markdown
Now we explore $r(xx_0)$ for `SinhSpherical` assuming `AMPL=10.0` and `SINHW=0.2`:
###Code
%matplotlib inline
import sympy as sp
import reference_metric as rfm
import NRPy_param_funcs as par
CoordSystem = "SinhSpherical"
par.set_parval_from_str("reference_metric::CoordSystem",CoordSystem)
rfm.reference_metric()
AMPL = 10.0
SINHW = 0.2
r_of_xx0 = sp.sympify(str(rfm.xxSph[0] ).replace("AMPL",str(AMPL)).replace("SINHW",str(SINHW)))
rprime_of_xx0 = sp.sympify(str(sp.diff(rfm.xxSph[0],rfm.xx[0])).replace("AMPL",str(AMPL)).replace("SINHW",str(SINHW)))
create_r_of_xx0_plots(CoordSystem, r_of_xx0,rprime_of_xx0)
###Output
_____no_output_____
###Markdown
Step 3.a.iii: **`reference_metric::CoordSystem = "SinhSphericalv2"`** \[Back to [top](toc)\]$$\label{sinhsphericalv2}$$The same as SinhSpherical coordinates, but with an additional `AMPL*const_dr*xx_0` term:$$r(xx_0) = \text{AMPL} \left[\text{const_dr}\ xx_0 + \frac{\sinh\left(\frac{xx_0}{\text{SINHW}}\right)}{\sinh\left(\frac{1}{\text{SINHW}}\right)}\right].$$
###Code
if CoordSystem == "SinhSphericalv2":
# SinhSphericalv2 adds the parameter "const_dr", which allows for a region near xx[0]=0 to have
# constant radial resolution of const_dr, provided the sinh() term does not dominate near xx[0]=0.
xxmin = [sp.sympify(0), sp.sympify(0), -M_PI]
xxmax = [sp.sympify(1), M_PI, M_PI]
AMPL, SINHW = par.Cparameters("REAL",thismodule,["AMPL","SINHW"],[10.0,0.2])
const_dr = par.Cparameters("REAL",thismodule,["const_dr"],0.0625)
r = AMPL*( const_dr*xx[0] + (sp.exp(xx[0] / SINHW) - sp.exp(-xx[0] / SINHW)) /
(sp.exp(1 / SINHW) - sp.exp(-1 / SINHW)) )
th = xx[1]
ph = xx[2]
# NO CLOSED-FORM EXPRESSION FOR RADIAL INVERSION.
# Cart_to_xx[0] = "NewtonRaphson"
# Cart_to_xx[1] = sp.acos(Cartz / sp.sqrt(Cartx ** 2 + Carty ** 2 + Cartz ** 2))
# Cart_to_xx[2] = sp.atan2(Carty, Cartx)
xxSph[0] = r
xxSph[1] = th
xxSph[2] = ph
# Now define xCart, yCart, and zCart in terms of x0,xx[1],xx[2].
# Note that the relation between r and x0 is not necessarily trivial in SinhSpherical coordinates. See above.
xxCart[0] = xxSph[0]*sp.sin(xxSph[1])*sp.cos(xxSph[2])
xxCart[1] = xxSph[0]*sp.sin(xxSph[1])*sp.sin(xxSph[2])
xxCart[2] = xxSph[0]*sp.cos(xxSph[1])
scalefactor_orthog[0] = sp.diff(xxSph[0],xx[0])
scalefactor_orthog[1] = xxSph[0]
scalefactor_orthog[2] = xxSph[0]*sp.sin(xxSph[1])
# Set the unit vectors
UnitVectors = [[ sp.sin(xxSph[1])*sp.cos(xxSph[2]), sp.sin(xxSph[1])*sp.sin(xxSph[2]), sp.cos(xxSph[1])],
[ sp.cos(xxSph[1])*sp.cos(xxSph[2]), sp.cos(xxSph[1])*sp.sin(xxSph[2]), -sp.sin(xxSph[1])],
[ -sp.sin(xxSph[2]), sp.cos(xxSph[2]), sp.sympify(0) ]]
###Output
_____no_output_____
###Markdown
Now we explore $r(xx_0)$ for `SinhSphericalv2` assuming `AMPL=10.0`, `SINHW=0.2`, and `const_dr=0.05`. Notice that the `const_dr` term significantly increases the grid spacing near $xx_0=0$ relative to `SinhSpherical` coordinates.
###Code
%matplotlib inline
import sympy as sp
import reference_metric as rfm
import NRPy_param_funcs as par
CoordSystem = "SinhSphericalv2"
par.set_parval_from_str("reference_metric::CoordSystem",CoordSystem)
rfm.reference_metric()
AMPL = 10.0
SINHW = 0.2
const_dr = 0.05
r_of_xx0 = sp.sympify(str(rfm.xxSph[0] ).replace("AMPL",str(AMPL)).replace("SINHW",str(SINHW)).replace("const_dr",str(const_dr)))
rprime_of_xx0 = sp.sympify(str(sp.diff(rfm.xxSph[0],rfm.xx[0])).replace("AMPL",str(AMPL)).replace("SINHW",str(SINHW)).replace("const_dr",str(const_dr)))
create_r_of_xx0_plots(CoordSystem, r_of_xx0,rprime_of_xx0)
###Output
_____no_output_____
###Markdown
Step 3.b: Cylindrical-like coordinate systems \[Back to [top](toc)\]$$\label{cylindricallike}$$ Step 3.b.i: **`reference_metric::CoordSystem = "Cylindrical"`** \[Back to [top](toc)\]$$\label{cylindrical}$$Standard cylindrical coordinates, with $(\rho,\phi,z)=(xx_0,xx_1,xx_2)$
###Code
if CoordSystem == "Cylindrical":
# Assuming the cylindrical radial coordinate
# is positive makes nice simplifications of
# unit vectors possible.
xx[0] = sp.symbols("xx0", real=True)
RHOMAX,ZMIN,ZMAX = par.Cparameters("REAL",thismodule,["RHOMAX","ZMIN","ZMAX"],[10.0,-10.0,10.0])
xxmin = [sp.sympify(0), -M_PI, ZMIN]
xxmax = [ RHOMAX, M_PI, ZMAX]
RHOCYL = xx[0]
PHICYL = xx[1]
ZCYL = xx[2]
Cart_to_xx[0] = sp.sqrt(Cartx ** 2 + Carty ** 2)
Cart_to_xx[1] = sp.atan2(Carty, Cartx)
Cart_to_xx[2] = Cartz
xxCart[0] = RHOCYL*sp.cos(PHICYL)
xxCart[1] = RHOCYL*sp.sin(PHICYL)
xxCart[2] = ZCYL
xxSph[0] = sp.sqrt(RHOCYL**2 + ZCYL**2)
xxSph[1] = sp.acos(ZCYL / xxSph[0])
xxSph[2] = PHICYL
scalefactor_orthog[0] = sp.diff(RHOCYL,xx[0])
scalefactor_orthog[1] = RHOCYL
scalefactor_orthog[2] = sp.diff(ZCYL,xx[2])
# Set the unit vectors
UnitVectors = [[ sp.cos(PHICYL), sp.sin(PHICYL), sp.sympify(0)],
[-sp.sin(PHICYL), sp.cos(PHICYL), sp.sympify(0)],
[ sp.sympify(0), sp.sympify(0), sp.sympify(1)]]
###Output
_____no_output_____
###Markdown
Next let's plot **"Cylindrical"** coordinates.
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
R = np.linspace(0, 2, 24)
h = 2
u = np.linspace(0, 2*np.pi, 24)
x = np.outer(R, np.cos(u))
y = np.outer(R, np.sin(u))
z = h * np.outer(np.ones(np.size(u)), np.ones(np.size(u)))
r = np.arange(0,2,0.25)
theta = 2*np.pi*r*0
fig = plt.figure(figsize=(12,12)) # 8 in x 8 in
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1 = plt.axes(projection='polar')
ax1.set_rmax(2)
ax1.set_rgrids(r,labels=[])
thetas = np.linspace(0,360,24, endpoint=True)
ax1.set_thetagrids(thetas,labels=[])
# ax.grid(True)
ax1.grid(True,linewidth='1.0')
ax1.set_title("Top Down View")
plt.show()
ax2 = plt.axes(projection='3d', xticklabels=[], yticklabels=[], zticklabels=[])
#ax2.plot_surface(x,y,z, alpha=.75, cmap = 'viridis') # z in case of disk which is parallel to XY plane is constant and you can directly use h
x=np.linspace(-2, 2, 100)
z=np.linspace(-2, 2, 100)
Xc, Zc=np.meshgrid(x, z)
Yc = np.sqrt(4-Xc**2)
rstride = 10
cstride = 10
ax2.plot_surface(Xc, Yc, Zc, alpha=1.0, rstride=rstride, cstride=cstride, cmap = 'viridis')
ax2.plot_surface(Xc, -Yc, Zc, alpha=1.0, rstride=rstride, cstride=cstride, cmap = 'viridis')
ax2.set_title("Standard Cylindrical Grid in 3D")
ax2.grid(False)
plt.axis('off')
plt.show()
###Output
_____no_output_____
###Markdown
Step 3.b.ii" **`reference_metric::CoordSystem = "SinhCylindrical"`** \[Back to [top](toc)\]$$\label{sinhcylindrical}$$Cylindrical coordinates, but with$$\rho(xx_0) = \text{AMPLRHO} \frac{\sinh\left(\frac{xx_0}{\text{SINHWRHO}}\right)}{\sinh\left(\frac{1}{\text{SINHWRHO}}\right)}$$and $$z(xx_2) = \text{AMPLZ} \frac{\sinh\left(\frac{xx_2}{\text{SINHWZ}}\right)}{\sinh\left(\frac{1}{\text{SINHWZ}}\right)}$$
###Code
if CoordSystem == "SinhCylindrical":
# Assuming the cylindrical radial coordinate
# is positive makes nice simplifications of
# unit vectors possible.
xx[0] = sp.symbols("xx0", real=True)
xxmin = [sp.sympify(0), -M_PI, sp.sympify(-1)]
xxmax = [sp.sympify(1), M_PI, sp.sympify(+1)]
AMPLRHO, SINHWRHO, AMPLZ, SINHWZ = par.Cparameters("REAL",thismodule,
["AMPLRHO","SINHWRHO","AMPLZ","SINHWZ"],
[ 10.0, 0.2, 10.0, 0.2])
# Set SinhCylindrical radial & z coordinates by default; overwrite later if CoordSystem == "SinhCylindricalv2".
RHOCYL = AMPLRHO * (sp.exp(xx[0] / SINHWRHO) - sp.exp(-xx[0] / SINHWRHO)) / (sp.exp(1 / SINHWRHO) - sp.exp(-1 / SINHWRHO))
# phi coordinate remains unchanged.
PHICYL = xx[1]
ZCYL = AMPLZ * (sp.exp(xx[2] / SINHWZ) - sp.exp(-xx[2] / SINHWZ)) / (sp.exp(1 / SINHWZ) - sp.exp(-1 / SINHWZ))
Cart_to_xx[0] = SINHWRHO*sp.asinh(sp.sqrt(Cartx ** 2 + Carty ** 2)*sp.sinh(1/SINHWRHO)/AMPLRHO)
Cart_to_xx[1] = sp.atan2(Carty, Cartx)
Cart_to_xx[2] = SINHWZ*sp.asinh(Cartz*sp.sinh(1/SINHWZ)/AMPLZ)
xxCart[0] = RHOCYL*sp.cos(PHICYL)
xxCart[1] = RHOCYL*sp.sin(PHICYL)
xxCart[2] = ZCYL
xxSph[0] = sp.sqrt(RHOCYL**2 + ZCYL**2)
xxSph[1] = sp.acos(ZCYL / xxSph[0])
xxSph[2] = PHICYL
scalefactor_orthog[0] = sp.diff(RHOCYL,xx[0])
scalefactor_orthog[1] = RHOCYL
scalefactor_orthog[2] = sp.diff(ZCYL,xx[2])
# Set the unit vectors
UnitVectors = [[ sp.cos(PHICYL), sp.sin(PHICYL), sp.sympify(0)],
[-sp.sin(PHICYL), sp.cos(PHICYL), sp.sympify(0)],
[ sp.sympify(0), sp.sympify(0), sp.sympify(1)]]
###Output
_____no_output_____
###Markdown
Next let's plot **"SinhCylindrical"** coordinates.
###Code
fig=plt.figure()
plt.clf()
fig = plt.figure()
ax = plt.subplot(1,1,1, projection='polar')
ax.set_rmax(2)
Nr = 20
xx0s = np.linspace(0,2,Nr, endpoint=True) + 1.0/(2.0*Nr)
rs = []
AMPLRHO = 1.0
SINHW = 0.4
for i in range(Nr):
rs.append(AMPLRHO * (np.exp(xx0s[i] / SINHW) - np.exp(-xx0s[i] / SINHW)) / \
(np.exp(1.0 / SINHW) - np.exp(-1.0 / SINHW)))
ax.set_rgrids(rs,labels=[])
thetas = np.linspace(0,360,25, endpoint=True)
ax.set_thetagrids(thetas,labels=[])
# ax.grid(True)
ax.grid(True,linewidth='1.0')
plt.show()
###Output
_____no_output_____
###Markdown
Step 3.b.iii: **`reference_metric::CoordSystem = "SinhCylindricalv2"`** \[Back to [top](toc)\]$$\label{sinhcylindricalv2}$$Cylindrical coordinates, but with$$\rho(xx_0) = \text{AMPLRHO} \left[\text{const_drho}\ xx_0 + \frac{\sinh\left(\frac{xx_0}{\text{SINHWRHO}}\right)}{\sinh\left(\frac{1}{\text{SINHWRHO}}\right)}\right]$$and $$z(xx_2) = \text{AMPLZ} \left[\text{const_dz}\ xx_2 + \frac{\sinh\left(\frac{xx_2}{\text{SINHWZ}}\right)}{\sinh\left(\frac{1}{\text{SINHWZ}}\right)}\right]$$
###Code
if CoordSystem == "SinhCylindricalv2":
# Assuming the cylindrical radial coordinate
# is positive makes nice simplifications of
# unit vectors possible.
xx[0] = sp.symbols("xx0", real=True)
# SinhCylindricalv2 adds the parameters "const_drho", "const_dz", which allows for regions near xx[0]=0
# and xx[2]=0 to have constant rho and z resolution of const_drho and const_dz, provided the sinh() terms
# do not dominate near xx[0]=0 and xx[2]=0.
xxmin = [sp.sympify(0), -M_PI, sp.sympify(-1)]
xxmax = [sp.sympify(1), M_PI, sp.sympify(+1)]
AMPLRHO, SINHWRHO, AMPLZ, SINHWZ = par.Cparameters("REAL",thismodule,
["AMPLRHO","SINHWRHO","AMPLZ","SINHWZ"],
[ 10.0, 0.2, 10.0, 0.2])
const_drho, const_dz = par.Cparameters("REAL",thismodule,["const_drho","const_dz"],[0.0625,0.0625])
RHOCYL = AMPLRHO * ( const_drho*xx[0] + (sp.exp(xx[0] / SINHWRHO) - sp.exp(-xx[0] / SINHWRHO)) / (sp.exp(1 / SINHWRHO) - sp.exp(-1 / SINHWRHO)) )
PHICYL = xx[1]
ZCYL = AMPLZ * ( const_dz *xx[2] + (sp.exp(xx[2] / SINHWZ ) - sp.exp(-xx[2] / SINHWZ )) / (sp.exp(1 / SINHWZ ) - sp.exp(-1 / SINHWZ )) )
# NO CLOSED-FORM EXPRESSION FOR RADIAL OR Z INVERSION.
# Cart_to_xx[0] = "NewtonRaphson"
# Cart_to_xx[1] = sp.atan2(Carty, Cartx)
# Cart_to_xx[2] = "NewtonRaphson"
xxCart[0] = RHOCYL*sp.cos(PHICYL)
xxCart[1] = RHOCYL*sp.sin(PHICYL)
xxCart[2] = ZCYL
xxSph[0] = sp.sqrt(RHOCYL**2 + ZCYL**2)
xxSph[1] = sp.acos(ZCYL / xxSph[0])
xxSph[2] = PHICYL
scalefactor_orthog[0] = sp.diff(RHOCYL,xx[0])
scalefactor_orthog[1] = RHOCYL
scalefactor_orthog[2] = sp.diff(ZCYL,xx[2])
# Set the unit vectors
UnitVectors = [[ sp.cos(PHICYL), sp.sin(PHICYL), sp.sympify(0)],
[-sp.sin(PHICYL), sp.cos(PHICYL), sp.sympify(0)],
[ sp.sympify(0), sp.sympify(0), sp.sympify(1)]]
###Output
_____no_output_____
###Markdown
For example, let's set up **`SinhCylindricalv2`** coordinates and output the Christoffel symbol $\hat{\Gamma}^{xx_2}_{xx_2 xx_2}$, or more simply $\hat{\Gamma}^2_{22}$:
###Code
par.set_parval_from_str("reference_metric::CoordSystem","SinhCylindricalv2")
rfm.reference_metric()
sp.pretty_print(sp.simplify(rfm.GammahatUDD[2][2][2]))
###Output
⎛ 2⋅xx₂ ⎞ 1
⎜ ────── ⎟ ──────
⎜ SINHWZ ⎟ SINHWZ
-⎝ℯ - 1⎠⋅ℯ
────────────────────────────────────────────────────────────────────────
⎛ ⎛ 2 ⎞ xx₂ ⎛ 2⋅xx₂ ⎞ 1 ⎞
⎜ ⎜ ────── ⎟ ────── ⎜ ────── ⎟ ──────⎟
⎜ ⎜ SINHWZ ⎟ SINHWZ ⎜ SINHWZ ⎟ SINHWZ⎟
SINHWZ⋅⎝- SINHWZ⋅const_dz⋅⎝ℯ - 1⎠⋅ℯ - ⎝ℯ + 1⎠⋅ℯ ⎠
###Markdown
As we will soon see, defining these "hatted" quantities will be quite useful when expressing hyperbolic ([wave-equation](https://en.wikipedia.org/wiki/Wave_equation)-like) PDEs in non-Cartesian coordinate systems. Step 3.c: Cartesian-like coordinate systems \[Back to [top](toc)\]$$\label{cartesianlike}$$ Step 3.c.i: **`reference_metric::CoordSystem = "Cartesian"`** \[Back to [top](toc)\]$$\label{cartesian}$$Standard Cartesian coordinates, with $(x,y,z)=$ `(xx0,xx1,xx2)`
###Code
if CoordSystem == "Cartesian":
xmin, xmax, ymin, ymax, zmin, zmax = par.Cparameters("REAL",thismodule,
["xmin","xmax","ymin","ymax","zmin","zmax"],
[ -10.0, 10.0, -10.0, 10.0, -10.0, 10.0])
xxmin = ["xmin", "ymin", "zmin"]
xxmax = ["xmax", "ymax", "zmax"]
xxCart[0] = xx[0]
xxCart[1] = xx[1]
xxCart[2] = xx[2]
xxSph[0] = sp.sqrt(xx[0] ** 2 + xx[1] ** 2 + xx[2] ** 2)
xxSph[1] = sp.acos(xx[2] / xxSph[0])
xxSph[2] = sp.atan2(xx[1], xx[0])
Cart_to_xx[0] = Cartx
Cart_to_xx[1] = Carty
Cart_to_xx[2] = Cartz
scalefactor_orthog[0] = sp.sympify(1)
scalefactor_orthog[1] = sp.sympify(1)
scalefactor_orthog[2] = sp.sympify(1)
# Set the transpose of the matrix of unit vectors
UnitVectors = [[sp.sympify(1), sp.sympify(0), sp.sympify(0)],
[sp.sympify(0), sp.sympify(1), sp.sympify(0)],
[sp.sympify(0), sp.sympify(0), sp.sympify(1)]]
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
plt.clf()
fig = plt.figure()
ax = fig.gca()
Nx = 16
ax.set_xticks(np.arange(0, 1., 1./Nx))
ax.set_yticks(np.arange(0, 1., 1./Nx))
# plt.scatter(x, y)
ax.set_aspect('equal')
plt.grid()
# plt.savefig("Cartgrid.png",dpi=300)
plt.show()
# plt.close(fig)
###Output
_____no_output_____
###Markdown
Step 3.d: [Prolate spheroidal](https://en.wikipedia.org/wiki/Prolate_spheroidal_coordinates)-like coordinate systems \[Back to [top](toc)\]$$\label{prolatespheroidal}$$ Step 3.d.i: **`reference_metric::CoordSystem = "SymTP"`** \[Back to [top](toc)\]$$\label{symtp}$$Symmetric TwoPuncture coordinates, with $(\rho,\phi,z)=(xx_0\sin(xx_1), xx_2, \sqrt{xx_0^2 + \text{bScale}^2}\cos(xx_1))$
###Code
if CoordSystem == "SymTP":
var1, var2= sp.symbols('var1 var2',real=True)
bScale, AW, AMAX, RHOMAX, ZMIN, ZMAX = par.Cparameters("REAL",thismodule,
["bScale","AW","AMAX","RHOMAX","ZMIN","ZMAX"],
[0.5, 0.2, 10.0, 10.0, -10.0, 10.0])
# Assuming xx0, xx1, and bScale
# are positive makes nice simplifications of
# unit vectors possible.
xx[0],xx[1] = sp.symbols("xx0 xx1", real=True)
xxmin = [sp.sympify(0), sp.sympify(0),-M_PI]
xxmax = [ AMAX, M_PI, M_PI]
AA = xx[0]
if CoordSystem == "SinhSymTP":
AA = (sp.exp(xx[0]/AW)-sp.exp(-xx[0]/AW))/2
var1 = sp.sqrt(AA**2 + (bScale * sp.sin(xx[1]))**2)
var2 = sp.sqrt(AA**2 + bScale**2)
RHOSYMTP = AA*sp.sin(xx[1])
PHSYMTP = xx[2]
ZSYMTP = var2*sp.cos(xx[1])
xxCart[0] = AA *sp.sin(xx[1])*sp.cos(xx[2])
xxCart[1] = AA *sp.sin(xx[1])*sp.sin(xx[2])
xxCart[2] = ZSYMTP
xxSph[0] = sp.sqrt(RHOSYMTP**2 + ZSYMTP**2)
xxSph[1] = sp.acos(ZSYMTP / xxSph[0])
xxSph[2] = PHSYMTP
rSph = sp.sqrt(Cartx ** 2 + Carty ** 2 + Cartz ** 2)
thSph = sp.acos(Cartz / rSph)
phSph = sp.atan2(Carty, Cartx)
# Mathematica script to compute Cart_to_xx[]
# AA = x1;
# var2 = Sqrt[AA^2 + bScale^2];
# RHOSYMTP = AA*Sin[x2];
# ZSYMTP = var2*Cos[x2];
# Solve[{rSph == Sqrt[RHOSYMTP^2 + ZSYMTP^2],
# thSph == ArcCos[ZSYMTP/Sqrt[RHOSYMTP^2 + ZSYMTP^2]],
# phSph == x3},
# {x1, x2, x3}]
Cart_to_xx[0] = sp.sqrt(-bScale**2 + rSph**2 +
sp.sqrt(bScale**4 + 2*bScale**2*rSph**2 + rSph**4 -
4*bScale**2*rSph**2*sp.cos(thSph)**2))*M_SQRT1_2 # M_SQRT1_2 = 1/sqrt(2); define this way for UnitTesting
# The sign() function in the following expression ensures the correct root is taken.
Cart_to_xx[1] = sp.acos(sp.sign(Cartz)*(
sp.sqrt(1 + rSph**2/bScale**2 -
sp.sqrt(bScale**4 + 2*bScale**2*rSph**2 + rSph**4 -
4*bScale**2*rSph**2*sp.cos(thSph)**2)/bScale**2)*M_SQRT1_2)) # M_SQRT1_2 = 1/sqrt(2); define this way for UnitTesting
Cart_to_xx[2] = phSph
###Output
_____no_output_____
###Markdown
Step 3.d.ii: **`reference_metric::CoordSystem = "SinhSymTP"`** \[Back to [top](toc)\]$$\label{sinhsymtp}$$Symmetric TwoPuncture coordinates, but with $$xx_0 \to \sinh(xx_0/\text{AW})$$
###Code
if CoordSystem == "SinhSymTP":
var1, var2= sp.symbols('var1 var2',real=True)
bScale, AW, AMAX, RHOMAX, ZMIN, ZMAX = par.Cparameters("REAL",thismodule,
["bScale","AW","AMAX","RHOMAX","ZMIN","ZMAX"],
[0.5, 0.2, 10.0, 10.0, -10.0, 10.0])
# Assuming xx0, xx1, and bScale
# are positive makes nice simplifications of
# unit vectors possible.
xx[0],xx[1] = sp.symbols("xx0 xx1", real=True)
xxmin = [sp.sympify(0), sp.sympify(0),-M_PI]
xxmax = [ AMAX, M_PI, M_PI]
AA = xx[0]
if CoordSystem == "SinhSymTP":
# With xxmax[0] == AMAX, sinh(xx0/AMAX) will evaluate to a number between 0 and 1.
# Similarly, sinh(xx0/(AMAX*SINHWAA)) / sinh(1/SINHWAA) will also evaluate to a number between 0 and 1.
# Then AA = AMAX*sinh(xx0/(AMAX*SINHWAA)) / sinh(1/SINHWAA) will evaluate to a number between 0 and AMAX.
AA = AMAX * (sp.exp(xx[0] / (AMAX*SINHWAA)) - sp.exp(-xx[0] / (AMAX*SINHWAA))) / (sp.exp(1 / SINHWAA) - sp.exp(-1 / AMAX))
var1 = sp.sqrt(AA**2 + (bScale * sp.sin(xx[1]))**2)
var2 = sp.sqrt(AA**2 + bScale**2)
RHOSYMTP = AA*sp.sin(xx[1])
PHSYMTP = xx[2]
ZSYMTP = var2*sp.cos(xx[1])
xxCart[0] = AA *sp.sin(xx[1])*sp.cos(xx[2])
xxCart[1] = AA *sp.sin(xx[1])*sp.sin(xx[2])
xxCart[2] = ZSYMTP
xxSph[0] = sp.sqrt(RHOSYMTP**2 + ZSYMTP**2)
xxSph[1] = sp.acos(ZSYMTP / xxSph[0])
xxSph[2] = PHSYMTP
scalefactor_orthog[0] = sp.diff(AA,xx[0]) * var1 / var2
scalefactor_orthog[1] = var1
scalefactor_orthog[2] = AA * sp.sin(xx[1])
# Set the transpose of the matrix of unit vectors
UnitVectors = [[sp.sin(xx[1]) * sp.cos(xx[2]) * var2 / var1,
sp.sin(xx[1]) * sp.sin(xx[2]) * var2 / var1,
AA * sp.cos(xx[1]) / var1],
[AA * sp.cos(xx[1]) * sp.cos(xx[2]) / var1,
AA * sp.cos(xx[1]) * sp.sin(xx[2]) / var1,
-sp.sin(xx[1]) * var2 / var1],
[-sp.sin(xx[2]), sp.cos(xx[2]), sp.sympify(0)]]
###Output
_____no_output_____
###Markdown
Step 4: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](toc)\]$$\label{latex_pdf_output}$$The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename[Tutorial-Reference_Metric.pdf](Tutorial-Reference_Metric.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
###Code
!jupyter nbconvert --to latex --template latex_nrpy_style.tplx Tutorial-Reference_Metric.ipynb
!pdflatex -interaction=batchmode Tutorial-Reference_Metric.tex
!pdflatex -interaction=batchmode Tutorial-Reference_Metric.tex
!pdflatex -interaction=batchmode Tutorial-Reference_Metric.tex
!rm -f Tut*.out Tut*.aux Tut*.log
###Output
[NbConvertApp] Converting notebook Tutorial-Reference_Metric.ipynb to latex
[NbConvertApp] Support files will be in Tutorial-Reference_Metric_files/
[NbConvertApp] Making directory Tutorial-Reference_Metric_files
[NbConvertApp] Making directory Tutorial-Reference_Metric_files
[NbConvertApp] Making directory Tutorial-Reference_Metric_files
[NbConvertApp] Making directory Tutorial-Reference_Metric_files
[NbConvertApp] Making directory Tutorial-Reference_Metric_files
[NbConvertApp] Making directory Tutorial-Reference_Metric_files
[NbConvertApp] Making directory Tutorial-Reference_Metric_files
[NbConvertApp] Writing 132807 bytes to Tutorial-Reference_Metric.tex
This is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)
restricted \write18 enabled.
entering extended mode
This is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)
restricted \write18 enabled.
entering extended mode
This is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)
restricted \write18 enabled.
entering extended mode
|
Interview Preparation Kit/12. Linked Lists/Linked Lists_Detect a Cycle.ipynb | ###Markdown
Linked Lists: Detect a Cycle
###Code
"""
Detect a cycle in a linked list. Note that the head pointer may be 'None' if the list is empty.
A Node is defined as:
class Node(object):
def __init__(self, data = None, next_node = None):
self.data = data
self.next = next_node
"""
def has_cycle(head):
if not head:
return 0
else:
visit = dict()
curr = head
visit[curr.data] = 1
while curr.next:
nxt = curr.next
if visit.get(nxt.data):
return 1
else:
visit[nxt.data] = 1
curr = nxt
return 0
###Output
_____no_output_____ |
tips/cloudwatch_metrics/keras_tensorflow_mnist.ipynb | ###Markdown
Cloudwatch Metrics に学習過程のスコアを書き出す 概要このノートブックでは,Amazon SageMaker 上で学習する際のスコアを,Cloudwatch Metrics に書き出して可視化するやり方について確認します. データセットのS3へのアップロード- keras.datasetsを利用してmnistのデータをダウンロードしてnpz形式で保存します。- 保存したnpz形式のファイルを、SageMaker Python SDKを利用してS3にアップロードします。
###Code
import os
import keras
import numpy as np
from keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
os.makedirs("./data", exist_ok = True)
np.savez('./data/train', image=x_train, label=y_train)
np.savez('./data/test', image=x_test, label=y_test)
import sagemaker
sagemaker_session = sagemaker.Session()
bucket_name = sagemaker_session.default_bucket()
input_data = sagemaker_session.upload_data(path='./data', bucket=bucket_name, key_prefix='dataset/mnist')
print('Training data is uploaded to: {}'.format(input_data))
###Output
_____no_output_____
###Markdown
メトリクスの記述Estimator オブジェクトを作成する際に,metric_definitions を JSON 形式で指定することができます.ここで正規表現の形でメトリクスを指定することで,ジョブを実行した際の標準出力からマッチする値を取り出して,自動的に Cloudwatch Metrics に書き出してくれます.ここでは Keras のジョブを実行するため,以下のような形式でログが出力されます.エポックごとの,訓練データと評価データ両方に対する損失関数の値を,メトリクスとして抜き出すことを考えてみましょう.```59600/60000 [============================>.] - ETA: 0s - loss: 0.2289 - acc: 0.929859800/60000 [============================>.] - ETA: 0s - loss: 0.2286 - acc: 0.929960000/60000 [==============================] - 28s 460us/step - loss: 0.2282 - acc: 0.9300 - val_loss: 0.1047 - val_acc: 0.9671Epoch 2/100 100/60000 [..............................] - ETA: 28s - loss: 0.1315 - acc: 0.9500 300/60000 [..............................] - ETA: 25s - loss: 0.1260 - acc: 0.9600 500/60000 [..............................] - ETA: 25s - loss: 0.1209 - acc: 0.9620```ここでは,以下のようにメトリクスを定義することで,上記形式のログから,訓練・評価データそれぞれの損失関数の値を抜き出すことができます.``` metric_definitions=[ { "Name": "train:loss", "Regex": ".*step\\s-\\sloss:\\s(\\S+).*" }, { "Name": "val:loss", "Regex": ".*\\sval_loss:\\s(\\S+).*" } ],``` SageMakerでの学習先ほど説明したメトリクス定義を含めて Tensorflow オブジェクトを作成し,実行することで,メトリクスも出力されます.確認のために,マネジメントコンソールの左メニュー内「トレーニングジョブ」から,該当するジョブを選択します.詳細画面の下側にある「モニタリング」フィールド内の「アルゴリズムメトリクスの表示」リンクから,定義したメトリクスのグラフに飛ぶことができます.
###Code
from sagemaker.tensorflow import TensorFlow
from sagemaker import get_execution_role
role = get_execution_role()
mnist_estimator = TensorFlow(entry_point = "./src/keras_mlp_mnist.py",
role=role,
train_instance_count=1,
train_instance_type="ml.m4.xlarge",
framework_version="1.11.0",
py_version='py3',
script_mode=True,
metric_definitions=[
{ "Name": "train:loss", "Regex": ".*step\\s-\\sloss:\\s(\\S+).*" },
{ "Name": "val:loss", "Regex": ".*\\sval_loss:\\s(\\S+).*" }
],
hyperparameters={'batch_size': 64,
'n_class': 10,
'epochs': 15})
mnist_estimator.fit(input_data)
###Output
_____no_output_____ |
python_ds4b/01_exploration/00_data_visualization/seaborn_grids.ipynb | ###Markdown
GridsGrids are general types of plots that allow you to map plot types to rows and columns of a grid, this helps you create similar plots separated by features.
###Code
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
iris = sns.load_dataset('iris')
iris.head()
###Output
_____no_output_____
###Markdown
PairGridPairgrid is a subplot grid for plotting pairwise relationships in a dataset.
###Code
# Just the Grid
sns.PairGrid(iris);
# Then you map to the grid
g = sns.PairGrid(iris)
g.map(plt.scatter);
# Map to upper,lower, and diagonal
g = sns.PairGrid(iris)
g.map_diag(plt.hist)
g.map_upper(plt.scatter)
g.map_lower(sns.kdeplot);
###Output
_____no_output_____
###Markdown
pairplotpairplot is a simpler version of PairGrid (you'll use quite often)
###Code
sns.pairplot(iris);
sns.pairplot(iris,hue='species',palette='rainbow');
###Output
_____no_output_____
###Markdown
Facet GridFacetGrid is the general way to create grids of plots based off of a feature:
###Code
tips = sns.load_dataset('tips')
tips.head()
# Just the Grid
g = sns.FacetGrid(tips, col="time", row="smoker");
g = sns.FacetGrid(tips, col="time", row="smoker")
g = g.map(plt.hist, "total_bill");
g = sns.FacetGrid(tips, col="time", row="smoker",hue='sex')
# Notice hwo the arguments come after plt.scatter call
g = g.map(plt.scatter, "total_bill", "tip").add_legend();
###Output
_____no_output_____
###Markdown
JointGridJointGrid is the general version for jointplot() type grids, for a quick example:
###Code
g = sns.JointGrid(x="total_bill", y="tip", data=tips);
g = sns.JointGrid(x="total_bill", y="tip", data=tips)
g = g.plot(sns.regplot, sns.distplot);
###Output
_____no_output_____ |
.ipynb_checkpoints/continuous-checkpoint.ipynb | ###Markdown
Synthetic data: Continuous variables
###Code
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.utils import shuffle
from sklearn.metrics import accuracy_score
from synthesize_data import synthesize_data
import expectation_reflection as ER
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
import matplotlib.pyplot as plt
%matplotlib inline
np.random.seed(1)
def inference(X_train,y_train,X_test,y_test,method='expectation_reflection'):
if method == 'expectation_reflection':
h0,w = ER.fit(X_train,y_train,niter_max=100,regu=0.)
y_pred = ER.predict(X_test,h0,w)
else:
if method == 'logistic_regression':
model = LogisticRegression(solver='liblinear')
if method == 'naive_bayes':
model = GaussianNB()
if method == 'random_forest':
model = RandomForestClassifier(criterion = "gini", random_state = 1,
max_depth=3, min_samples_leaf=5,n_estimators=100)
if method == 'decision_tree':
model = DecisionTreeClassifier()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
accuracy = accuracy_score(y_test,y_pred)
return accuracy
def compare_inference(X,y,train_size):
npred = 100
accuracy = np.zeros((len(list_methods),npred))
for ipred in range(npred):
X, y = shuffle(X, y)
X_train0,X_test,y_train0,y_test = train_test_split(X,y,test_size=0.2,random_state = ipred)
idx_train = np.random.choice(len(y_train0),size=int(train_size*len(y)),replace=False)
X_train,y_train = X_train0[idx_train],y_train0[idx_train]
for i,method in enumerate(list_methods):
accuracy[i,ipred] = inference(X_train,y_train,X_test,y_test,method)
return accuracy.mean(axis=1),accuracy.std(axis=1)
l = 10000 ; n = 40 ; g = 4.
X,y = synthesize_data(l,n,g,data_type='continuous')
np.unique(y,return_counts=True)
list_train_size = [0.8,0.6,0.4,0.2,0.1]
list_methods=['logistic_regression','naive_bayes','random_forest','decision_tree','expectation_reflection']
acc = np.zeros((len(list_train_size),len(list_methods)))
acc_std = np.zeros((len(list_train_size),len(list_methods)))
for i,train_size in enumerate(list_train_size):
acc[i,:],acc_std[i,:] = compare_inference(X,y,train_size)
print(train_size,acc[i,:])
acc_std
df = pd.DataFrame(acc,columns = list_methods)
df.insert(0, "train_size",list_train_size, True)
df
plt.figure(figsize=(4,3))
plt.plot(list_train_size,acc[:,0],'k--',marker='o',mfc='none',label='Logistic Regression')
plt.plot(list_train_size,acc[:,1],'b--',marker='s',mfc='none',label='Naive Bayes')
plt.plot(list_train_size,acc[:,2],'r--',marker='^',mfc='none',label='Random Forest')
plt.plot(list_train_size,acc[:,4],'k-',marker='o',label='Expectation Reflection')
plt.xlabel('train size')
plt.ylabel('accuracy mean')
plt.legend()
plt.figure(figsize=(4,3))
plt.plot(list_train_size,acc_std[:,0],'k--',marker='o',mfc='none',label='Logistic Regression')
plt.plot(list_train_size,acc_std[:,1],'b--',marker='s',mfc='none',label='Naive Bayes')
plt.plot(list_train_size,acc_std[:,2],'r--',marker='^',mfc='none',label='Random Forest')
plt.plot(list_train_size,acc_std[:,4],'k-',marker='o',label='Expectation Reflection')
plt.xlabel('train size')
plt.ylabel('accuracy standard deviation')
plt.legend()
###Output
_____no_output_____ |
Symbolic Math Toolbox/Create Plots.ipynb | ###Markdown
**Create Plots** **Plot with Symbolic Plotting Functions** MATLAB® provides many techniques for plotting numerical data. Graphical capabilities of MATLAB include plotting tools, standard plotting functions, graphic manipulation and data exploration tools, and tools for printing and exporting graphics to standard formats. Symbolic Math Toolbox™ expands these graphical capabilities and lets you plot symbolic functions using: - fplot to create 2-D plots of symbolic expressions, equations, or functions in Cartesian coordinates.- fplot3 to create 3-D parametric plots.- ezpolar to create plots in polar coordinates.- fsurf to create surface plots.- fcontour to create contour plots.- fmesh to create mesh plots. Plot the symbolic expression $sin(6x)$ by using **fplot**. By default, **fplot** uses the range $−5<x<5$.
###Code
from sympy import *
x = symbols('x')
plot(sin(6*x),(x,-5,5))
###Output
_____no_output_____
###Markdown
Plot a symbolic expression or function in polar coordinates $r$ (radius) and $\theta$ (polar angle) by using **ezpolar**. By default, **ezpolar** plots a symbolic expression or function over the interval $0<\theta<2\pi$.Plot the symbolic expression $sin(6t)$ in polar coordinates.
###Code
#syms t
#ezpolar(sin(6*t))
import matplotlib.pyplot as plt
import numpy as np
t = symbols('t')
eqf = lambdify(t,sin(6*t))
angle = np.arange(0,2*np.pi,1/100)
plt.polar(angle,np.abs(eqf(angle)))
plt.title('$r=sin(6t)$')
###Output
_____no_output_____
###Markdown
**Plot Functions Numerically** As an alternative to plotting expressions symbolically, you can substitute symbolic variables with numeric values by using **subs**. Then, you can use these numeric values with plotting functions in MATLAB™.In the following expressions **u** and **v**, substitute the symbolic variables **x** and **y** with the numeric values defined by **meshgrid**.
###Code
x,y = symbols('x y')
u = sin(x**2+y**2)
v = cos(x*y)
###Output
_____no_output_____
###Markdown
Now, you can plot **U** and **V** by using standard MATLAB plotting functions.Create a plot of the vector field defined by the functions $U(X,Y)$ and $V(X,Y)$ by using the MATLAB **quiver** function.
###Code
eqfU = lambdify((x,y),u)
eqfV = lambdify((x,y),v)
X,Y = np.meshgrid(np.arange(-1,1,0.1),np.arange(-1,1,0.1))
plt.quiver(X,Y,eqfU(X,Y),eqfV(X,Y))
###Output
_____no_output_____
###Markdown
**Plot Multiple Symbolic Functions in One Graph** Plot several functions on one graph by adding the functions sequentially. After plotting the first function, add successive functions by using the **hold** on command. The **hold on** command keeps the existing plots. Without the **hold on** command, each new plot replaces any existing plot. After the **hold on** command, each new plot appears on top of existing plots. Switch back to the default behavior of replacing plots by using the **hold off** command.Plot $f=e^x sin(20x)$ using **fplot**. Show the bounds of **f** by superimposing plots of $e^x$ and $-e^x$ as dashed red lines. Set the title by using the **DisplayName** property of the object returned by **fplot**.
###Code
x,y = symbols('x y')
f = exp(x)*sin(20*x)
###Output
_____no_output_____
###Markdown
$f=sin(20x)e^x$
###Code
p1 = plot(f,exp(x),-exp(x),(x,0,3))
###Output
_____no_output_____
###Markdown
**Plot Multiple Symbolic Functions in One Figure** Display several functions side-by-side in one figure by dividing the figure window into several subplots using **subplot**. The command **subplot(m,n,p)** divides the figure into a **m** by **n** matrix of subplots and selects the subplot **p**. Display multiple plots in separate subplots by selecting the subplot and using plotting commands. Plotting into multiple subplots is useful for side-by-side comparisons of plots.Compare plots of $sin\left(\left(x^2+y^2\right)/a\right)$ for $a=10,20,50,100$ by using subplot to create side-by-side subplots.
###Code
import mpl_toolkits.mplot3d
x,y,a = symbols('x y a')
eqf3 = lambdify((x,y,a),sin((x**2+y**2)/a))
X,Y = np.meshgrid(np.arange(-5,5,0.1),np.arange(-5,5,0.1))
fig = plt.figure(constrained_layout=True)
ax0 = fig.add_subplot(2,2,1,projection='3d')
ax0.plot_surface(X,Y,eqf3(X,Y,10),cmap=plt.cm.viridis) #使用viridis色谱
ax0.set_title('$a=10$',loc='left')
ax1 = fig.add_subplot(2,2,2,projection='3d')
ax1.plot_surface(X,Y,eqf3(X,Y,20),cmap=plt.cm.viridis) #使用viridis色谱
ax1.set_title('$a=20$',loc='left')
ax2 = fig.add_subplot(2,2,3,projection='3d')
ax2.plot_surface(X,Y,eqf3(X,Y,50),cmap=plt.cm.viridis) #使用viridis色谱
ax2.set_title('$a=50$',loc='left')
ax3 = fig.add_subplot(2,2,4,projection='3d')
ax3.plot_surface(X,Y,eqf3(X,Y,100),cmap=plt.cm.viridis) #使用viridis色谱
ax3.set_title('$a=100$',loc='left')
###Output
_____no_output_____
###Markdown
**Combine Symbolic Function Plots and Numeric Data Plots** Plot numeric and symbolic data on the same graph by using MATLAB and Symbolic Math Toolbox functions together.For numeric values of **x** between $[−5,5]$, return a noisy sine curve by finding $y=sin(x)$ and adding random values to **y**. View the noisy sine curve by using **scatter** to plot the points $(x1,y1),(x2,y2),⋯$.
###Code
x = np.arange(-5,5,1/10)
y = np.sin(x)+((-1)*np.random.randint(10,size=100)*np.random.rand(100))/8
fig,ax = plt.subplots()
ax.scatter(x,y,c='w',edgecolors='#1f77b4')
###Output
_____no_output_____
###Markdown
Show the underlying structure in the points by superimposing a plot of the sine function. First, use **hold on** to retain the **scatter** plot. Then, use **fplot** to plot the sine function.
###Code
#hold on
#syms t
#fplot(sin(t))
#hold off
t = symbols('t')
eqft = lambdify(t,sin(t))
fig,ax = plt.subplots()
ax.scatter(x,y,c='w',edgecolors='#1f77b4')
ax.plot(x,eqft(x))
###Output
_____no_output_____
###Markdown
**Combine Numeric and Symbolic Plots in 3-D** Combine symbolic and numeric plots in 3-D by using MATLAB and Symbolic Math Toolbox plotting functions. Symbolic Math Toolbox provides these 3-D plotting functions: - fplot3 creates 3-D parameterized line plots.- fsurf creates 3-D surface plots.- fmesh creates 3-D mesh plots. Create a spiral plot by using **fplot3** to plot the parametric line$$ x=(1-t)sin(100t)$$$$ y=(1-t)cos(100t)$$$$ z=\sqrt{1-x^2-y^2}$$
###Code
t = symbols('t')
x = (1-t)*sin(100*t)
y = (1-t)*cos(100*t)
z = sqrt(1-x**2-y**2)
eqfx = lambdify(t,x)
eqfy = lambdify(t,y)
eqfz = lambdify(t,z)
X = eqfx(np.arange(0,1,1/1000))
Y = eqfy(np.arange(0,1,1/1000))
Z = eqfz(np.arange(0,1,1/1000))
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.plot(X,Y,Z,linewidth=0.6)
ax.set_title('Symbolic 3-D Parametric Line')
###Output
_____no_output_____
###Markdown
Superimpose a plot of a sphere with radius 1 and center at $(0, 0, 0)$. Find points on the sphere numerically by using **sphere**. Plot the sphere by using **mesh**. The resulting plot shows the symbolic parametric line wrapped around the top hemisphere.
###Code
#hold on
#[X,Y,Z] = sphere;
#mesh(X, Y, Z)
#colormap(gray)
#title('Symbolic Parametric Plot and a Sphere')
#hold off
theta,phi = np.meshgrid(np.linspace(0,2*np.pi,30),np.linspace(0,np.pi,30))
X_sphere = np.sin(phi)*np.cos(theta)
Y_sphere = np.sin(phi)*np.sin(theta)
Z_sphere = np.cos(phi)
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.plot_wireframe(X_sphere,Y_sphere,Z_sphere,linewidth=0.2,color='black')
ax.plot(X,Y,Z)
###Output
_____no_output_____ |
notebooks/dev/generalization.ipynb | ###Markdown
Load trained model and test on +4K data
###Code
mref = keras.models.load_model(
'/export/home/srasp/repositories/CBRAIN-CAM/saved_models/D004_fbp_engy_ess_fullyear_max_rs_deep.h5')
mref.summary()
dref_ref = get_diag(mref, 'fullyear')
dref_ref.compute_stats()
dref_ref.mean_stats(10)
dref_4k = get_diag(mref, '4k'); dref_4k.compute_stats()
dref_4k.mean_stats(10)
dref_4k.plot_double_yz(10, 15, 'TPHYSTND', cmap='bwr', vmin=-7e-4, vmax=7e-4);
###Output
_____no_output_____
###Markdown
Train new models Standard model
###Code
train_gen = DataGenerator(
PREPROC_DIR,
'fbp_engy_ess_train_sample1_shuffle_features.nc',
'fbp_engy_ess_train_sample1_shuffle_targets.nc',
1024,
'fbp_engy_ess_train_fullyear_norm.nc',
'feature_means', 'max_rs', None, 'target_conv',
shuffle=True,
)
mstd = fc_model(
94,
65,
[256,256,256,256,256,256,256,256,256],
1e-2,
'mse',
batch_norm=False,
activation='LeakyReLU',
dr=None,
l2=None,
)
mstd.fit_generator(
train_gen.return_generator(),
train_gen.n_batches/5,
epochs=3,
workers=8,
max_queue_size=50,
callbacks=[LearningRateScheduler(get_lr_sched(1e-2, 5, 1))],
)
dstd_ref = get_diag(mstd, 'fullyear'); dstd_ref.compute_stats()
dstd_ref.mean_stats(10)
dstd_4k = get_diag(mref, '4k'); dstd_4k.compute_stats(); dstd_4k.mean_stats(10)
dstd_4k.plot_double_yz(10, 15, 'TPHYSTND', cmap='bwr', vmin=-7e-4, vmax=7e-4);
###Output
_____no_output_____
###Markdown
Batch norm
###Code
mbn = fc_model(
94,
65,
[256,256,256,256,256,256,256,256,256],
1e-2,
'mse',
batch_norm=True,
activation='LeakyReLU',
dr=None,
l2=None,
)
mbn.fit_generator(
train_gen.return_generator(),
train_gen.n_batches/5,
epochs=3,
workers=8,
max_queue_size=50,
callbacks=[LearningRateScheduler(get_lr_sched(1e-2, 5, 1))],
)
evaluate(mbn)
###Output
100%|██████████| 2868/2868 [02:19<00:00, 20.51it/s]
/data11/home/srasp/repositories/CBRAIN-CAM/notebooks/dev/cbrain/model_diagnostics.py:249: RuntimeWarning: divide by zero encountered in true_divide
self.stats['r2'] = 1. - (self.stats['mse'] / self.stats['true_var'])
/data11/home/srasp/repositories/CBRAIN-CAM/notebooks/dev/cbrain/model_diagnostics.py:255: RuntimeWarning: divide by zero encountered in true_divide
self.stats['hor_r2'] = 1 - (self.stats['hor_mse'] / self.stats['hor_tvar'])
1%| | 3/287 [00:00<00:13, 20.55it/s]
###Markdown
Convolution
###Code
@threadsafe_generator
def data_generator_convo(data_dir, feature_fn, target_fn, shuffle=True,
batch_size=512, feature_norms=None, target_norms=None, noise=None):
"""Works on pre-stacked targets with truely random batches
Hard coded right now for
features = [TBP, QBP, VBP, PS, SOLIN, SHFLX, LHFLX]
and lev = 30
"""
# Open files
feature_file = h5py.File(data_dir + feature_fn, 'r')
target_file = h5py.File(data_dir + target_fn, 'r')
# Determine sizes
n_samples = feature_file['features'].shape[0]
n_batches = int(np.floor(n_samples / batch_size))
# Create ID list
idxs = np.arange(0, n_samples, batch_size)
if shuffle:
np.random.shuffle(idxs)
# generate
while True:
for i in range(n_batches):
batch_idx = idxs[i]
x = feature_file['features'][batch_idx:batch_idx + batch_size, :]
if feature_norms is not None: x = (x - feature_norms[0]) / feature_norms[1]
x1 = x[:, :90].reshape((x.shape[0], 30, -1))
x2 = x[:, 90:]
y = target_file['targets'][batch_idx:batch_idx + batch_size, :]
if target_norms is not None: y = (y - target_norms[0]) * target_norms[1]
if noise is not None:
x += np.random.normal(0, noise, x.shape)
yield [x1, x2], y
conv_gen = data_generator_convo(train_gen.data_dir, train_gen.feature_fn, train_gen.target_fn,
train_gen.shuffle, train_gen.batch_size, train_gen.feature_norms,
train_gen.target_norms, train_gen.noise)
mconv = conv_model((30, 3), 4, 65, [32, 64, 128], [256, 256], 1e-2, 'mse', activation='LeakyReLU',
padding='valid', stride=2)
mconv.summary()
mconv.fit_generator(
conv_gen,
train_gen.n_batches/5,
epochs=3,
workers=8,
max_queue_size=50,
callbacks=[LearningRateScheduler(get_lr_sched(1e-2, 5, 1))],
)
evaluate(mconv, convo=True)
###Output
100%|██████████| 2868/2868 [02:02<00:00, 23.50it/s]
/data11/home/srasp/repositories/CBRAIN-CAM/notebooks/dev/cbrain/model_diagnostics.py:254: RuntimeWarning: divide by zero encountered in true_divide
self.stats['r2'] = 1. - (self.stats['mse'] / self.stats['true_var'])
/data11/home/srasp/repositories/CBRAIN-CAM/notebooks/dev/cbrain/model_diagnostics.py:260: RuntimeWarning: divide by zero encountered in true_divide
self.stats['hor_r2'] = 1 - (self.stats['hor_mse'] / self.stats['hor_tvar'])
1%| | 3/287 [00:00<00:12, 23.63it/s]
###Markdown
Convolution with batch norm
###Code
mconvbn = conv_model((30, 3), 4, 65, [16, 32], [256, 256], 1e-2, 'mse', activation='LeakyReLU', batch_norm=True)
mconvbn.summary()
mconvbn.fit_generator(
conv_gen,
train_gen.n_batches/5,
epochs=3,
workers=8,
max_queue_size=50,
callbacks=[LearningRateScheduler(get_lr_sched(1e-2, 5, 1))],
)
evaluate(mconvbn, convo=True)
###Output
100%|██████████| 2868/2868 [02:34<00:00, 18.61it/s]
/data11/home/srasp/repositories/CBRAIN-CAM/notebooks/dev/cbrain/model_diagnostics.py:254: RuntimeWarning: divide by zero encountered in true_divide
self.stats['r2'] = 1. - (self.stats['mse'] / self.stats['true_var'])
/data11/home/srasp/repositories/CBRAIN-CAM/notebooks/dev/cbrain/model_diagnostics.py:260: RuntimeWarning: divide by zero encountered in true_divide
self.stats['hor_r2'] = 1 - (self.stats['hor_mse'] / self.stats['hor_tvar'])
1%| | 2/287 [00:00<00:15, 18.91it/s]
###Markdown
Convolution with tiles
###Code
@threadsafe_generator
def data_generator_convo_tile(data_dir, feature_fn, target_fn, shuffle=True,
batch_size=512, feature_norms=None, target_norms=None, noise=None):
"""Works on pre-stacked targets with truely random batches
Hard coded right now for
features = [TBP, QBP, VBP, PS, SOLIN, SHFLX, LHFLX]
and lev = 30
"""
# Open files
feature_file = h5py.File(data_dir + feature_fn, 'r')
target_file = h5py.File(data_dir + target_fn, 'r')
# Determine sizes
n_samples = feature_file['features'].shape[0]
n_batches = int(np.floor(n_samples / batch_size))
# Create ID list
idxs = np.arange(0, n_samples, batch_size)
if shuffle:
np.random.shuffle(idxs)
# generate
while True:
for i in range(n_batches):
batch_idx = idxs[i]
x = feature_file['features'][batch_idx:batch_idx + batch_size, :]
if feature_norms is not None: x = (x - feature_norms[0]) / feature_norms[1]
x = np.concatenate(
[
x[:, :90].reshape((x.shape[0], 30, -1)),
np.rollaxis(np.tile(x[:, 90:], (30, 1, 1)), 0, 2)
],
axis=-1,
)
y = target_file['targets'][batch_idx:batch_idx + batch_size, :]
if target_norms is not None: y = (y - target_norms[0]) * target_norms[1]
if noise is not None:
x += np.random.normal(0, noise, x.shape)
yield x, y
conv_gen_tile = data_generator_convo_tile(train_gen.data_dir, train_gen.feature_fn, train_gen.target_fn,
train_gen.shuffle, train_gen.batch_size, train_gen.feature_norms,
train_gen.target_norms, train_gen.noise)
mconvtile = conv_model((30, 7), None, 65, [16, 32, 64], [512], 1e-2, 'mse', activation='LeakyReLU', tile=True,
padding='valid', stride=2, dr=0.2)
mconvtile.summary()
mconvtile.fit_generator(
conv_gen_tile,
train_gen.n_batches/5,
epochs=3,
workers=8,
max_queue_size=50,
callbacks=[LearningRateScheduler(get_lr_sched(1e-2, 5, 1))],
)
evaluate(mconvtile, convo_tile=True)
###Output
100%|██████████| 2868/2868 [03:10<00:00, 15.09it/s]
/data11/home/srasp/repositories/CBRAIN-CAM/notebooks/dev/cbrain/model_diagnostics.py:259: RuntimeWarning: divide by zero encountered in true_divide
self.stats['hor_tvar'] = self.stats['hor_tsqmean'] - self.stats['hor_tmean'] ** 2
/data11/home/srasp/repositories/CBRAIN-CAM/notebooks/dev/cbrain/model_diagnostics.py:265: RuntimeWarning: divide by zero encountered in true_divide
columns=list(self.stats.keys()))
1%|▏ | 4/287 [00:00<00:08, 31.59it/s] |
tutorials/example_horse_collar.ipynb | ###Markdown
Horse collar data explorationThis notebook presents a systematic movement data exploration workflow. The proposed workflow consists of five main steps:1. **Establishing an overview** by visualizing raw input data records2. **Putting records in context** by exploring information from consecutive movement data records (such as: time between records, speed, and direction)3. **Extracting trajectories, locations & events** by dividing the raw continuous tracks into individual trajectories, locations, and events4. **Exploring patterns** in trajectory and event data by looking at groups of the trajectories or events5. **Analyzing outliers** by looking at potential outliers and how they may challenge preconceived assumptions about the dataset characteristicsThe workflow is demonstrated using horse collar tracking data provided by Prof. Lene Fischer (University of Copenhagen) and the Center for Technology & Environment of Guldborgsund Municiplaity in Denmark but should be generic enough to be applied to other tracking datasets.The workflow is implemented in Python using Pandas, GeoPandas, and MovingPandas (http://movingpandas.org).For an interactive version of this notebook visit https://mybinder.org/v2/gh/anitagraser/movingpandas/master. Setup
###Code
%matplotlib inline
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
import urllib
import os
import numpy as np
import pandas as pd
import geopandas as gpd
from geopandas import GeoDataFrame, read_file
from datetime import datetime, timedelta
from pyproj import CRS
import sys
sys.path.append("..")
import movingpandas as mpd
import warnings
warnings.simplefilter("ignore")
import hvplot.pandas # seems to be necessary for the following import to work
from holoviews import opts
opts.defaults(opts.Overlay(active_tools=['wheel_zoom']))
###Output
_____no_output_____
###Markdown
Raw data import
###Code
df = read_file('data/demodata_horse_collar.gpkg')
df['t'] = pd.to_datetime(df['timestamp'])
df = df.set_index('t').tz_localize(None)
print("This dataset contains {} records.\nThe first lines are:".format(len(df)))
df.head()
df.columns
df = df.drop(columns=['LMT_Date', 'LMT_Time',
'Origin', 'SCTS_Date', 'SCTS_Time', 'Latitude [?]', 'Longitude [?]',
'FixType', 'Main [V]', 'Beacon [V]', 'Sats', 'Sat',
'C/N', 'Sat_1', 'C/N_1', 'Sat_2', 'C/N_2', 'Sat_3', 'C/N_3', 'Sat_4',
'C/N_4', 'Sat_5', 'C/N_5', 'Sat_6', 'C/N_6', 'Sat_7', 'C/N_7', 'Sat_8',
'C/N_8', 'Sat_9', 'C/N_9', 'Sat_10', 'C/N_10', 'Sat_11', 'C/N_11',
'Easting', 'Northing',], axis=1)
df.head()
collar_id = df['CollarID'].unique()[0]
print("There is only one collar with ID {}.".format(collar_id))
df['Activity'].unique()
original_crs = df.crs
original_crs
###Output
_____no_output_____
###Markdown
1. Establishing an overviewThe first step in our proposed EDA workflow can be performed directly on rawinput data since it does not require temporally ordered data. It is therefore suitableas a first exploratory step when dealing with new data. Q1.1 Geographic extent: Is the geographical extent as expected and are there holes in the spatial coverage?
###Code
df.to_crs({'init': 'epsg:4326'}).hvplot(title='Geographic extent of the dataset', geo=True, tiles='OSM', width=500, height=500)
###Output
_____no_output_____
###Markdown
The main area (the horste's pasture?) is located south of Nykobing Strandhuse.However, we find also find two records on the road north west of the main area. Both points have been recorded on 2018-11-14 which is the first day of the dataset.
###Code
pd.DataFrame(df).sort_values('lat').tail(2)
###Output
_____no_output_____
###Markdown
A potential hypothesis for the origin of these two records is that the horse (or the collar) was transported on 2018-11-14, taking the road from Nykobing Falster south to the pasture.If we remove these first two records from the dataset, the remainder of the records are located in a small area:
###Code
df = df[2:].to_crs({'init': 'epsg:4326'})
( df.hvplot(title='OSM showing paths and fences', size=2, geo=True, tiles='OSM', width=500, height=500) +
df.hvplot(title='Imagery showing land cover details', size=2, color='red', geo=True, tiles='EsriImagery', width=500, height=500) )
###Output
_____no_output_____
###Markdown
It looks like the horse generally avoids areas without green vegetation since point patterns in these area appear more sparse than in other areas.
###Code
temp = df.to_crs(CRS(25832))
temp['geometry'] = temp['geometry'].buffer(5)
total_area = temp.dissolve(by='CollarID').area
total_area = total_area[collar_id]/10000
print('The total area covered by the data is: {:,.2f} ha'.format(total_area))
###Output
_____no_output_____
###Markdown
Q1.2 Temporal extent: Is the temporal extent as expected and are there holes in the temporal coverage?
###Code
print("The dataset covers the time between {} and {}.".format(df.index.min(), df.index.max()))
print("That's {}".format(df.index.max() - df.index.min()))
df['No'].resample('1d').count().hvplot(title='Number of records per day')
###Output
_____no_output_____
###Markdown
On most days there are 48 (+/- 1) records per day. However, there are some days with more records (in Nov 2018 and later between Mai and August 2019). There is one gap: On 2019-10-18 there are no records in the dataset and the previous day only contains 37 and the following day 27 records. Q1.3 Spatio-temporal gaps: Does the geographic extent vary over time or do holes appear during certain times? Considering that the dataset covers a whole year, it may be worthwhile to look at the individual months using small multiples map plots, for example:
###Code
df['Y-M'] = df.index.to_period('M')
a = None
for i in df['Y-M'].unique():
plot = df[df['Y-M']==i].hvplot(title=str(i), size=2, geo=True, tiles='OSM', width=300, height=300)
if a: a = a + plot
else: a = plot
a
###Output
_____no_output_____
###Markdown
The largest change between months seems to be that the southernmost part of the pasture wasn't used in August and September 2019. 2. Putting records in contextThe second exploration step puts movement records in their temporal and geographiccontext. The exploration includes information based on consecutive movement datarecords, such as time between records (sampling intervals), speed, and direction.Therefore, this step requires temporally ordered data. Q2.1 Sampling intervals: Is the data sampled at regular or irregular intervals?For example, tracking data of migratory animals is expected to exhibit seasonal changes. Such changes in vehicle tracking systems however may indicate issues with data collection .
###Code
t = df.reset_index().t
df = df.assign(delta_t=t.diff().values)
df['delta_t'] = df['delta_t'].dt.total_seconds()/60
pd.DataFrame(df).hvplot.hist('delta_t', title='Histogram of intervals between consecutive records (in minutes)', bins=60, bin_range=(0, 60))
###Output
_____no_output_____
###Markdown
The time delta between consecutive records is usually around 30 minutes. However, it seems that sometimes the intererval has been decreased to around 15 minutes. This would explain why some days have more than the usual 48 records. Q2.2 Speed values: Are there any unrealistic movements? For example: Does the data contain unattainable speeds?
###Code
tc = mpd.TrajectoryCollection(df, 'CollarID')
traj = tc.trajectories[0]
traj.add_speed()
max_speed = traj.df.speed.max()
print("The highest computed speed is {:,.2f} m/s ({:,.2f} km/h)".format(max_speed, max_speed*3600/1000))
###Output
_____no_output_____
###Markdown
Q2.3 Movement patterns: Are there any patterns in movement direction or speed?
###Code
pd.DataFrame(traj.df).hvplot.hist('speed', title='Histogram of speeds (in meters per second)', bins=90)
###Output
_____no_output_____
###Markdown
The speed distribution shows no surprising patterns.
###Code
traj.add_direction(overwrite=True)
pd.DataFrame(traj.df).hvplot.hist('direction', title='Histogram of directions', bins=90)
###Output
_____no_output_____
###Markdown
There is some variation in movement directions but no directions stand out in the histogram.Let's look at spatial patterns of direction and speed! Q2.4 Temporal context: Does the movement make sense in its temporal context? For example: Do nocturnal animal tracks show movement at night?
###Code
pd.DataFrame(traj.df).hvplot.heatmap(title='Mean speed by hour of day and month of year',
x='t.hour', y='t.month', C='speed', reduce_function=np.mean)
###Output
_____no_output_____
###Markdown
The movement speed by hour of day shows a clear pattern throughout the year with earlier and longer fast movements during the summer months and later and slower movements during the winter months. Temperature contextIn addition to time, the dataset also contains temperature information for each record:
###Code
traj.df['n'] = 1
pd.DataFrame(traj.df).hvplot.heatmap(title='Record count by temperature and month of year',
x='Temp [?C]', y='t.month', C='n', reduce_function=np.sum)
pd.DataFrame(traj.df).hvplot.heatmap(title='Mean speed by temperature and month of year',
x='Temp [?C]', y='t.month', C='speed', reduce_function=np.mean)
###Output
_____no_output_____
###Markdown
Q2.5 Geographic context: Does the movement make sense in its geographic context? For example: Do vessels follow traffic separation schemes defined in maritime maps? Are there any ship trajectories crossing land?
###Code
traj.df['dir_class'] = ((traj.df['direction']-22.5)/45).round(0)
a = None
temp = traj.df
for i in sorted(temp['dir_class'].unique()):
plot = temp[temp['dir_class']==i].hvplot(geo=True, tiles='OSM', size=2, width=300, height=300, title=str(int(i*45))+"°")
if a: a = a + plot
else: a = plot
a
###Output
_____no_output_____
###Markdown
There are no obvious spatial movement direction patterns.
###Code
traj.df['speed_class'] = (traj.df['speed']*2).round(1)
a = None
temp = traj.df
for i in sorted(temp['speed_class'].unique()):
filtered = temp[temp['speed_class']==i]
if len(filtered) < 10:
continue
plot = filtered.hvplot(geo=True, tiles='EsriImagery', color='red', size=2, width=300, height=300, title=str(i/2)) # alpha=max(0.05, 50/len(filtered)),
if a: a = a + plot
else: a = plot
a
###Output
_____no_output_____
###Markdown
Low speed records (classes 0.0 and 0.05 m/s) are distributed over the whole area with many points on the outline (fence?) of the area. Medium speed records (classes 0.1 and 0.15 m/s) seem to be more common along paths and channels. 3. Extracting trajectories & locations / eventsThe third exploration step looks at individual trajectories. It therefore requires thatthe continuous tracks are split into individual trajectories. Analysis results depend onhow the continuous streams are divided into trajectories, locations, and events. 3.1 Trajectory lines: Do the trajectory lines look plausible or are there indications of out of sequence positions or other unrealistic location jumps?
###Code
tc.hvplot()
###Output
_____no_output_____
###Markdown
Due to the 30 minute reporting interval, the trajectories are rather sparse. The trajectories mostly stay within the (fenced?) area. However, there are a few cases of positions outside the area. Movement during week 1
###Code
daily = mpd.TemporalSplitter(tc).split(mode='day')
a = None
for i in range(0,7):
if a: a = a + daily.trajectories[i].hvplot(title=daily.trajectories[i].id, c='speed', line_width=2, cmap='RdYlBu', width=300, height=300)
else: a = daily.trajectories[i].hvplot(title=daily.trajectories[i].id, c='speed', line_width=2, cmap='RdYlBu', width=300, height=300)
a
###Output
_____no_output_____
###Markdown
3.2 Home/depot locations: Do day trajectories start and end at the same home (for human and animal movement) or depot (for logistics applications) location?
###Code
daily_starts = daily.get_start_locations()
daily_starts['month'] = daily_starts.index.month
daily_starts.hvplot(c='month', geo=True, tiles='EsriImagery', cmap='autumn', width=500, height=500)
###Output
_____no_output_____
###Markdown
There is no clear preference for a certain home location where the horse would tend to spend the night. Instead of spliting by date, we can also specify a minimum movement speed and then split the continuous observation when this minimum speed is not reached for a certain time:
###Code
moving = mpd.TrajectoryCollection(traj.df[traj.df['speed'] > 0.05], 'CollarID')
moving = mpd.ObservationGapSplitter(moving).split(gap=timedelta(minutes=70))
moving.get_start_locations().hvplot(c='month', geo=True, tiles='EsriImagery', color='red', width=500, height=500)
###Output
_____no_output_____
###Markdown
3.3 Trajectory length
###Code
daily_lengths = [traj.get_length() for traj in daily]
daily_t = [traj.get_start_time() for traj in daily]
daily_lengths = pd.DataFrame(daily_lengths, index=daily_t, columns=['length'])
daily_lengths.hvplot(title='Daily trajectory length')
###Output
_____no_output_____
###Markdown
The length of the daily trajectories varies between 1.6 and 6.2 km. (It is worth noting that this has to be considered a lower bound of the movement due to the sparseness of the tracking data.)The seasonal trends agree well with the previously discovered seasonal movement speed patterns: winter trajectories tend to be shorter than summer trajectories. 3.4 Covered area Method 1: Convex hulls around trajectory
###Code
daily_areas = [(traj.id, traj.to_crs(CRS(25832)).to_linestring().convex_hull.area/10000) for traj in daily]
daily_areas = pd.DataFrame(daily_areas, index=daily_t, columns=['id', 'area'])
daily_areas.hvplot(title='Daily covered area [ha]', y='area')
###Output
_____no_output_____
###Markdown
Method 2: Buffered trajectory
###Code
daily_areas = [(traj.id, traj.to_crs(CRS(25832)).to_linestring().buffer(15).area/10000) for traj in daily]
daily_areas = pd.DataFrame(daily_areas, index=daily_t, columns=['id', 'area'])
daily_areas.hvplot(title='Daily covered area [ha]', y='area')
###Output
_____no_output_____
###Markdown
The ten smallest areas are:
###Code
daily_areas.sort_values(by='area')[:10]
###Output
_____no_output_____
###Markdown
The days with the smallest areas cover areas include the first and the last observation day (since they are only partially recorded). We can remove those:
###Code
daily_areas = daily_areas.drop(datetime(2018,11,14,12,30,8))
daily_areas = daily_areas.drop(datetime(2019,11,7,0,0,9))
###Output
_____no_output_____
###Markdown
The smallest area for a complete day was observed on 2018-11-19 with only 1.2 ha:
###Code
a = None
for i in daily_areas.sort_values(by='area')[:3].id:
traj = daily.get_trajectory(i)
if a: a = a + traj.hvplot(title=i, c='speed', line_width=2, cmap='RdYlBu', width=300, height=300)
else: a = traj.hvplot(title=i, c='speed', line_width=2, cmap='RdYlBu', width=300, height=300)
a
###Output
_____no_output_____
###Markdown
3.5 Stop detectionInstead of splitting the continuous track into daily trajectories, an alternative approach is to split it at stops. Stops can be defined as parts of the track where the moving object stays within a small area for a certain duration. Let's have a look at movement of one day and how stop detection parameter settings affect the results:
###Code
MAX_DIAMETER = 100
MIN_DURATION = timedelta(hours=3)
one_day = daily.get_trajectory('30788_2018-11-17 00:00:00')
one_day_stops = mpd.TrajectoryStopDetector(one_day).get_stop_segments(
min_duration=MIN_DURATION, max_diameter=MAX_DIAMETER)
( one_day.hvplot(title='Stops in Trajectory {}'.format(one_day.id), line_width=7.0, color='slategray', width=500) *
one_day_stops.hvplot(size=200, line_width=7, tiles=None, color='deeppink') *
one_day_stops.get_start_locations().hvplot(geo=True, size=200, color='deeppink') )
###Output
_____no_output_____
###Markdown
Let's apply stop detection to the whole dataset:
###Code
%%time
stops = mpd.TrajectoryStopDetector(tc).get_stop_segments(min_duration=MIN_DURATION, max_diameter=MAX_DIAMETER)
len(stops)
###Output
_____no_output_____
###Markdown
The spatial distribution reveals preferred stop locations:
###Code
stops.get_start_locations().hvplot(geo=True, tiles='OSM', color='deeppink', size=MAX_DIAMETER, alpha=0.2, width=500)
stop_durations = [stop.get_duration().seconds/3600.0 for stop in stops]
stop_durations = pd.DataFrame(stop_durations, columns=['duration'])
stop_durations.hvplot.hist(title='Stop duration histogram', xlabel='Duration [hours]', ylabel='n', bins=15)
###Output
_____no_output_____ |
notebooks/Key Features Examples.ipynb | ###Markdown
Establish the location of the results filepystops assumes results are being read from the standard FTA STOPS folder structure: STOPS |- Districts |- Inputs |- OutputData |- Reports |- Skims
###Code
report_file = os.path.join('..', 'data', 'STOPS', 'Reports', 'AC_m19-s19-d19#m19-s19-d19#m20-s19-d19-alt2_20_STOPSY2019Results.prn')
###Output
_____no_output_____
###Markdown
Read Skim Files scenario = ('exist', 'nobuild', 'build') mode = ('bs', 'fg', 'tr') bs=bus only; fg=fixed guideway; tr=all transit access = ('walk', 'pnr', 'knr') period = ('op', 'pk')
###Code
pystops.read_skim(report_file, scenario='nobuild', mode='fg', access='pnr', period='pk').head()
###Output
_____no_output_____
###Markdown
Example Table Reads 1.02: Station Listing
###Code
pystops.parse_table(report_file, '1.02').head(3)
###Output
_____no_output_____
###Markdown
2.04: Station Group Boardings Prior To Adjustment
###Code
pystops.parse_table(report_file, '2.04').head()
###Output
_____no_output_____
###Markdown
4.02: Weekday Incremental Linked Dist-to-Dist Transit Trips, Build, All Trips
###Code
pystops.parse_table(report_file, '4.02')
###Output
_____no_output_____
###Markdown
9.01: Average Weekday Station Boardings by Mode of Access
###Code
pystops.parse_table(report_file, '9.01').head()
###Output
_____no_output_____
###Markdown
10.01: Average Weekday Route Boardings by Zone (Production-End) Access Type
###Code
pystops.parse_table(report_file, '10.01').head()
###Output
_____no_output_____
###Markdown
345.01: Existing WEEKDAY LINKED TRANSIT TRIPS (All Transit/All car HH)
###Code
pystops.parse_table(report_file, '345.01').head()
###Output
_____no_output_____
###Markdown
Summarize Access Mode Percentages
###Code
pystops.summarize_access_modes(report_file, percentage=True)
###Output
_____no_output_____ |
Instacart_EDA_Categories__General.ipynb | ###Markdown
Things to note: * product_id seems to increase proportionally to with department_id | aisle_id (when sorted, low product_ids first, high product_ids last), which might be useful later
###Code
# Let's list the Category and Aisle counts, grouped alphabetically, arranged hierarchically.
data = {'Product_Count' : products['Category'].value_counts(),
'_Product_Count' : products['Sub_Category'].value_counts()
}
counts_df = pd.DataFrame(data=data).fillna(value='--')
counts_df.index.names = ['Category']
counts_df.sort_index()
# products categorized as'missing' or 'other' account for 1,806 products,
# roughly 3.634% of the 49,688 total products. Not exactly negligible.
counts_df.sort_index()[99:103]
f, ax = plt.subplots(figsize=(14,10), ncols=1)
products['Category'].value_counts().plot(kind='bar')
_= ax.set_title('Categories by Total Products', size=22)
_= ax.set_ylabel('Count', size=20)
_= ax.tick_params(labelsize=16)
plt.xticks(ha='right', rotation=55);
# Let's plot sub-category (aisle) as a proportion of category (department)
#sns.barplot(x = stacked_bar_data.Group
#products.pivot('','')[].plot(kind='bar', stacked=True)
data1 = {'_Product_Count' : products['Sub_Category'].value_counts()
}
data2 = {'Product_Count' : products['Category'].value_counts()
}
d1_counts_df = pd.DataFrame(data=data1)
d2_counts_df = pd.DataFrame(data=data2)
data1['_Product_Count'].value_counts().sum()
d2_counts_df['Product_Count']['other']
data3 = { 'No. of Sub-Categories' : {},
'Products in Sub-Category' : {},
'Parent' : {},
'Products in Parent Category' : {}
}
for i in range(len(d1_counts_df['_Product_Count'])):
#print counts_df.index[i], counts_df['_Product_Count'][i]
match = re.findall(r'([\w ]+)( >> )([\w ]+)', d1_counts_df.index[i])
parent, child = match[0][0], match[0][2]
if parent in data3['No. of Sub-Categories']:
data3['No. of Sub-Categories'][parent] += 1
data3['Products in Sub-Category'][child] = d1_counts_df['_Product_Count'][i]
else:
data3['No. of Sub-Categories'][parent] = 1
data3['Products in Parent Category'][parent] = d2_counts_df['Product_Count'][parent]
data3['Products in Sub-Category'][child] = d1_counts_df['_Product_Count'][i]
if child not in data3['Parent']:
data3['Parent'][child] = parent
data3['Products in Parent Category'][child] = d2_counts_df['Product_Count'][parent]
stacked_category_data = pd.DataFrame(data=data3)
stacked_category_data['Parent'
] = stacked_category_data['Parent'
].fillna('None')
stacked_category_data['Products in Sub-Category'
] = stacked_category_data['Products in Sub-Category'
].fillna('N/A')
stacked_category_data['No. of Sub-Categories'
] = stacked_category_data['No. of Sub-Categories'
].fillna('None')
stacked_category_data.sort_values(by=['Parent', 'Products in Parent Category'])[21:40]
# Let's plot Sub_Category product groups (aisle)
# as a proportion of total products in Parent Category (department)
newstacked = stacked_category_data.groupby(['Parent',
stacked_category_data.index,
'Products in Sub-Category'
]).size()[21:40]
newstacked
aisles_only = stacked_category_data.sort_values(by=
['Parent',
'Products in Parent Category'])[21:]
Top15 = aisles_only.sort_values(by='Products in Sub-Category',
ascending=False)[:15]
Bottom15 = aisles_only.sort_values(by='Products in Sub-Category',
ascending=False)[119:]
# Plotting top and Bottom 15 Sub-Categories by Product Count
f, ax = plt.subplots(figsize=(14,5))
Top15['Products in Sub-Category'].plot(kind='bar')
_= ax.set_title('Top 15 Sub-Categories by Count', size=22)
_= ax.set_ylabel('Count', size=20)
_= ax.tick_params(labelsize=16)
plt.xticks(ha='right', rotation=55);
# Notice that the larger Sub-Categories reflect more vague grouping criteria,
# e.g. "missing" isn't exactly specific
# which suggest less helpful groupings for making predictions
# as opposed to the smaller sub-categories below
f, ax = plt.subplots(figsize=(14,5))
Bottom15['Products in Sub-Category'].plot(kind='bar')
_= ax.set_title('Bottom 15 Sub-Categories by Count', size=22)
_= ax.set_ylabel('Count', size=20)
_= ax.tick_params(labelsize=16)
plt.xticks(ha='right', rotation=55);
# Now let's try to find some category trends. First, let's check out the alcohol category.
master_set = pd.merge(pd.merge(prior_set, orders, on='order_id', how='right'), products,
on='product_id',
how='left')
master_set.head()
# Let's plot a heatmap of the count of products ordered in the alcohol category by hour and day
category_five = master_set[master_set['department_id'] == 5.0]
cat_heatmap_data = pd.DataFrame(category_five[['order_dow', 'order_hour_of_day']].groupby(['order_dow',
'order_hour_of_day']
).size()
).reset_index()
cat_heatmap_data = cat_heatmap_data.pivot(index='order_hour_of_day',
columns='order_dow',
values=0)
f, ax = plt.subplots(figsize=(16,12))
_= ax.set_title('Alcohol Category:\n Time-Series Trend', size=22)
_= ax.set_ylabel('Hour of Day', size=20, labelpad=15)
_= ax.set_xlabel('Day of Week', size=20, labelpad=15)
_= ax.tick_params(labelsize=16)
sns.heatmap(cat_heatmap_data, ax=ax, annot=True, fmt="d");
bananas = master_set[master_set['product_id'] == 24852]
cat_heatmap_data = pd.DataFrame(bananas[['order_dow', 'order_hour_of_day']].groupby(['order_dow',
'order_hour_of_day']
).size()
).reset_index()
cat_heatmap_data = cat_heatmap_data.pivot(index='order_hour_of_day',
columns='order_dow',
values=0)
f, ax = plt.subplots(figsize=(16,12))
_= ax.set_title('Organic Bananas:\n Time-Series Trend', size=22)
_= ax.set_ylabel('Hour of Day', size=20, labelpad=15)
_= ax.set_xlabel('Day of Week', size=20, labelpad=15)
_= ax.tick_params(labelsize=16)
sns.heatmap(cat_heatmap_data, ax=ax, annot=True, fmt="d");
###Output
_____no_output_____ |
note/template.ipynb | ###Markdown
Main Title Preliminaries Imports
###Code
import os
import sys
import numpy as np
import pprint as pp
from os.path import dirname
from networkx.drawing.nx_pydot import to_pydot
# Import morpheus
note_dir = dirname(os.getcwd())
root_dir = dirname(note_dir)
src_dir = os.path.join(root_dir, "src")
sys.path.append(src_dir)
import morpheus
from morpheus import Morpheus
from morpheus.tests import (default_dataset,
default_m_list_for_mercs,
random_m_list_for_mercs)
# Visuals
from morpheus.graph import to_dot
# Ipython things
from IPython.display import Image, display
from IPython.core.display import HTML
# Pretty Printer
pp = pprint.PrettyPrinter(indent=4)
###Output
/cw/dtailocal/Repos
|
P2-Image_captioning/0_Dataset.ipynb | ###Markdown
Computer Vision Nanodegree Project: Image Captioning---The Microsoft **C**ommon **O**bjects in **CO**ntext (MS COCO) dataset is a large-scale dataset for scene understanding. The dataset is commonly used to train and benchmark object detection, segmentation, and captioning algorithms. You can read more about the dataset on the [website](http://cocodataset.org/home) or in the [research paper](https://arxiv.org/pdf/1405.0312.pdf).In this notebook, you will explore this dataset, in preparation for the project. Step 1: Initialize the COCO APIWe begin by initializing the [COCO API](https://github.com/cocodataset/cocoapi) that you will use to obtain the data.
###Code
import os
import sys
sys.path.append('/opt/cocoapi/PythonAPI')
from pycocotools.coco import COCO
# initialize COCO API for instance annotations
dataDir = '/opt/cocoapi'
dataType = 'val2014'
instances_annFile = os.path.join(dataDir, 'annotations/instances_{}.json'.format(dataType))
coco = COCO(instances_annFile)
# initialize COCO API for caption annotations
captions_annFile = os.path.join(dataDir, 'annotations/captions_{}.json'.format(dataType))
coco_caps = COCO(captions_annFile)
# get image ids
ids = list(coco.anns.keys())
###Output
loading annotations into memory...
Done (t=6.31s)
creating index...
index created!
loading annotations into memory...
Done (t=0.48s)
creating index...
index created!
###Markdown
Step 2: Plot a Sample ImageNext, we plot a random image from the dataset, along with its five corresponding captions. Each time you run the code cell below, a different image is selected. In the project, you will use this dataset to train your own model to generate captions from images!
###Code
import numpy as np
import skimage.io as io
import matplotlib.pyplot as plt
%matplotlib inline
# pick a random image and obtain the corresponding URL
ann_id = np.random.choice(ids)
img_id = coco.anns[ann_id]['image_id']
img = coco.loadImgs(img_id)[0]
url = img['coco_url']
# print URL and visualize corresponding image
print(url)
I = io.imread(url)
plt.axis('off')
plt.imshow(I)
plt.show()
# load and display captions
annIds = coco_caps.getAnnIds(imgIds=img['id']);
anns = coco_caps.loadAnns(annIds)
coco_caps.showAnns(anns)
###Output
http://images.cocodataset.org/val2014/COCO_val2014_000000100203.jpg
|
TensorFlow-2.0/03-TensorFlow-Keras/01-tf.keras_classification_model.ipynb | ###Markdown
1. 准备数据
###Code
(x_train_all, y_train_all), (x_test, y_test) = datasets.fashion_mnist.load_data()
x_valid, x_train = x_train_all[:5000], x_train_all[5000:]
y_valid, y_train = y_train_all[:5000], y_train_all[5000:]
print(x_valid.shape, y_valid.shape)
print(x_train.shape, y_train.shape)
print(x_test.shape, y_test.shape)
def show_single_image(img_arr):
plt.imshow(img_arr, cmap='binary')
show_single_image(x_train[0])
def show_imgs(n_rows, n_cols, x_data, y_data, class_names):
assert len(x_data) == len(y_data)
assert n_rows * n_cols < len(x_data)
plt.figure(figsize=(n_cols*1.6, n_rows*2))
for row in range(n_rows):
for col in range(n_cols):
index = n_cols * row + col
plt.subplot(n_rows, n_cols, index + 1)
plt.imshow(x_data[index], cmap='binary', interpolation='nearest')
plt.axis('off')
plt.title(class_names[y_data[index]])
class_names = ['T-shirt', 'Trouser', 'Pullover', 'Dress',
'Coat', 'Sandal', 'Shirt', 'Sneaker',
'Bag', 'Ankle boot']
show_imgs(3, 6, x_train, y_train, class_names)
# plt.subplot?
###Output
_____no_output_____
###Markdown
2. 构建模型
###Code
model = keras.models.Sequential([
layers.Flatten(input_shape=(28, 28)),
layers.Dense(300, activation='relu'),
layers.Dense(100, activation='relu'),
layers.Dense(10, activation='softmax')
])
# relu: y = max(0, x)
# softmax: 将向量变成概率分布. x = [x1, x2, x3],
# y = [e^x1/sum, e^x2/sum, e^x3/sum], sum = e^x1 + e^x2 + e^x3
optimizer = optimizers.SGD(learning_rate=0.001)
# reason for sparse: y->index. y->one_hot->[]
# 通过 sparse 可以使 y 变为向量
model.compile(loss="sparse_categorical_crossentropy", optimizer=optimizer,
metrics=["accuracy"])
# model.compile?
model.layers
model.summary()
# [None, 784] * W + b -> [None, 300] W.shape [784, 300], b = [300]
history = model.fit(x_train, y_train, epochs=7, validation_data=(x_valid, y_valid))
type(history)
history.history
def plot_learning_curves(history):
pd.DataFrame(history.history).plot(figsize=(8, 5))
plt.grid(True)
plt.gca().set_ylim(0, 1)
plot_learning_curves(history)
model.evaluate(x_test, y_test, verbose=0)
###Output
_____no_output_____ |
Bloque 3 - Machine Learning/01_Supervisado/0_RESUMEN/Pasos_Modelos.ipynb | ###Markdown
Resumen Machine LearningEn este notebook resumimos cómo usar los diferentes algoritmos de ML, así como alguna cosita más que pueda ayudarnos.*Nota: Este resumen está abierto a sugerencias ;)* Índice:Basic EDA -->1. [Basic EDA](Basic_EDA) 1. [Generación de puntos](Generacion_de_puntos) 2. [Gráfico de líneas](Grafico_de_lineas) 3. [Gráfico de dispersión](Grafico_de_dispersion) 4. [Matriz de correlación](Matriz_de_correlacion)1. [Mínimo tratamiento de datos](Minimo_Tratamiento_de_Datos) 1. [Mapear](Mapear) 2. [One Hot Encoding (dummies)](One_Hot_Encoding) 3. [Escalado](Escalado)1. [Separación Train/Test](Separacion_Train_Test) 1. [Tratamiento aplicado a la separación train/test](Separacion_Tratamiento)2. [Algoritmos Supervisados](Algoritmos_Supervisados) 1. [Regresión](Regresion) 1. [Regresión Lineal](Regresion_Lineal) 2. [Regularización](Regularizacion) 1. [Ridge](Ridge) 2. [Lasso](Lasso) 3. [Elastic Net](Elastic_Net) 3. [Árboles de decisión](Regresion_Arboles_de_decision) 4. [Support Vector Regressor (SVR)](Support_Vector_Regressor) 1. [SVR Lineal](SVR_Lineal) 1. [SVR con kernel](SVR_kernel) 2. [Clasificación](Clasificacion) 1. [Regresión Logística](Regresion_Logistica) 2. [Árboles de decisión](Clasificacion_Arboles_de_decision) 3. [K Nearest Neighbors](Clasificacion_KNN) 4. [Support Vector Classifier (SVC)](Support_Vector_Classifier) 1. [SVC Lineal](SVC_Lineal) 1. [SVC con kernel](SVC_kernel)1. [Métricas](Metricas) 1. [Regresión](Metricas_Regresion) 1. [MAE](Metricas_MAE) 2. [MSE](Metricas_MSE) 3. [RMSE](Metricas_RMSE) 4. [$R^2$](Metricas_R2) 2. [Clasificación](Metricas_Clasificacion) 1. [Matriz de confusión](Metricas_Matriz_Confusion) 2. [Porcentaje de acierto (Accuracy)](Metricas_Accuracy) 3. [Precision](Metricas_Precision) 4. [Recall](Metricas_Recall) 6. [F1](Metricas_F1) 7. [AUC y Curva ROC](Metricas_AUC)1. [Ejemplos](Ejemplos) 1. [Regresión](Ejemplos_Regresion) 1. [Regresión Lineal](Ejemplos_Regresion_Lineal) 2. [Regularización](Ejemplos_Regularizacion) 1. [Ridge](Ejemplos_Ridge) 1. [Lasso](Ejemplos_Lasso) 1. [Elastic Net](Ejemplos_Elastic_Net) 3. [Árboles de decisión](Ejemplos_Regresion_Arboles_de_decision) 4. [Support Vector Regressor (SVR)](Ejemplos_Support_Vector_Regressor) 1. [SVR Lineal](Ejemplos_SVR_Lineal) 1. [SVR con kernel](Ejemplos_SVR_kernel) 2. [Clasificación](Ejemplos_Clasificacion) 1. [Regresión Logística](Ejemplos_Regresion_Logistica) 2. [Árboles de decisión](Ejemplos_Clasificacion_Arboles_de_decision) 3. [K Nearest Neighbors](Ejemplos_Clasificacion_KNN) 4. [Support Vector Classifier (SVC)](Ejemplos_Support_Vector_Classifier) 1. [SVC Lineal](Ejemplos_SVC_Lineal) 1. [SVC con kernel](Ejemplos_SVC_kernel)
###Code
# Cosas básicas para ejecutar el notebook
import numpy as np
import pandas as pd
# Para ayudarnos a lo largo de las demostraciones, vamos a ayudarnos del siguiente DataFrame:
np.random.seed(10)
df = pd.DataFrame({'num_1': np.random.rand(100),
'cat_1': np.random.choice(['A', 'B', 'C', 'D'], size=100),
'num_2': np.random.randint(100, size=100)
})
df['num_3'] = df['num_1'] + 2*df['num_2']*np.random.rand(100)
df['num_4'] = (df['num_3'] + np.random.randint(-40, 40, size=100) > 50).astype(int)
df
###Output
_____no_output_____
###Markdown
1. Basic EDALo primero que debemos hacer es un buen análisis de datos. Como esto es un resumen, simplemente voy a dejarte algunas funciones básicas de representación. Nada del otro mundo. Tras haber visto algo tan elemental como la generación, pasaremos a ver las gráficas más básicas 1.1 Generación de puntos
###Code
# Punto inicial, punto final y cuántos puntos quieres en total:
punto_inicial = 0
punto_final = 10
n_puntos = 11
x = np.linspace(punto_inicial, punto_final, n_puntos)
x
# Punto inicial, punto final (al que no se va a llegar) y distancia entre puntos:
punto_inical = 0
punto_final = 10
paso_numeros = 2
x = np.arange(punto_inicial, punto_final, paso_numeros)
x
###Output
_____no_output_____
###Markdown
Tras haber visto algo tan elemental como la generación, pasaremos a ver las gráficas más básicas 1.2 Gráfico de líneas
###Code
# Generamos puntos para los ejes X e Y (tú tendrás otras variables, claro).
# Por ejemplo, vamos a representar la función x^2 frente a x**2 + ((x**1.5)*np.sin(x))
x = np.linspace(0, 100, 1001)
y = x**2
y2 = x**2 + ((x**1.5)*np.sin(x))
# Importamos la librería así (hacer siempre al principio del notebook, al importar el resto de cosas):
import matplotlib.pyplot as plt
%matplotlib inline
# Y representamos:
plt.plot(x, y, color='blue', label='x^2')
plt.plot(x, y2, color='red', label='x^2 + ((x^1.5)*np.sin(x))')
# Añadimos rejilla:
plt.grid();
# Añadimos leyenda:
plt.legend();
###Output
_____no_output_____
###Markdown
1.3 Gráfico de dispersión
###Code
# Generamos puntos para los ejes X e Y (tú tendrás otras variables, claro).
# Por ejemplo, vamos a representar la función x^2 frente a x**2 + ((x**1.5)*np.sin(x))
x = np.linspace(0, 100, 51)
y = x**2
y2 = x**2 + ((x**1.5)*np.sin(x))
# Importamos la librería así (hacer siempre al principio del notebook, al importar el resto de cosas):
import matplotlib.pyplot as plt
%matplotlib inline
# Y representamos:
plt.scatter(x, y, marker='.', color='blue', label='x^2')
plt.scatter(x, y2, marker='.', color='red', label='x^2 + ((x^1.5)*np.sin(x))')
# Añadimos rejilla:
plt.grid();
# Añadimos leyenda:
plt.legend();
###Output
_____no_output_____
###Markdown
1.4 Matriz de correlaciónOtra de las visualizaciones que nos pueden ser de mucha utilidad es la matriz de correlación, ya que nos indicará la relación lineal entre las diferentes variables que tenemos.Recuerda: Si está cerca de 1 o de -1 tendrá mucha relación lineal (ya sea positiva, es decir, que crezcan en el mismo sentido; o negativa, es decir, que cuando una crece la otra decrece). En cambio, si está cercana a 0, significará que no hay nada de relación.Para considerar que están fuertemente correlacionadas se suele poner el umbral en ``valor > 0.7`` o ``valor < -0.7``.
###Code
# Vamos a crearnos un DataFrame con datos generados ahora mismo, aunque tú ya tendrás un DataFrame con sus datos y todo)
x = np.linspace(0, 100, 51)
df_ejemplo = pd.DataFrame({'x': x, 'y': x**2, 'y2': -x * np.random.rand(len(x)), 'y3': x**2 * ((x**1.5)*np.sin(x))})
# Importamos la librería seaborn:
import seaborn as sns
# Nos creamos la matriz de correlación:
matriz_correlacion = df_ejemplo.corr()
# Y representamos con u mapa de calor:
sns.heatmap(matriz_correlacion, annot=True, square=True, vmin=-1, vmax=1)
###Output
_____no_output_____
###Markdown
2. Mínimo tratamiento de datosAntes de continuar, lo primero que se debe de hacer es convertir las variables a numéricas para que podamos utilizarlas en los modelos. 2.1 Conversión de categóricasBásicamente, el modelo no entiende letras, solo números. ASí que tenemos que convertir las palabras a números. Tenemos 2 opciones: "mapear" (cambiar las palabras por números, siendo el mismo número para la misma palabra) o crearse variables "dummy" (crearse una columna por cada valor que pueda tomar la variable): 2.1.1 MapearIdea: sacamos los valores distintos de esa variable categórica. Le asignamos un número distinto a cada palabra. Sustituimos las palabras por los números que hemos deicidido antes.Problema: estamos asignando un orden a las palabras, por lo que puede no resultar la mejor idea, ya que si estamos convirtiendo, por ejemplo, colores, no tendría mucho sentido decir que un ``verde`` es mejor que un ``morado``, y que este sea peor que un ``azul``.
###Code
# Seleccionamos variable a mapear:
# En este caso, será df['cat_1']
# Hacemos una copia para no cambiar el df para el resto de ejercicios:
df_ejemplo = df.copy()
# Y vemos sus valores distintos:
df_ejemplo['cat_1'].unique()
# Nos creamos un diccionario para hacer la conversión:
map_cat_1 = {
'A': 0,
'B': 1,
'C': 2,
'D': 3
}
# Y aplicamos el mapeo, guardándolo en una nueva variable:
# df_ejemplo['cat_1'] = df_ejemplo['cat_1'].map(map_cat_1)
df_ejemplo['num_cat_1'] = df_ejemplo['cat_1'].map(map_cat_1)
# Comprobamos:
df_ejemplo[['cat_1', 'num_cat_1']]
# Y nos eliminamos la anterior:
df_ejemplo = df_ejemplo.drop('cat_1', axis=1)
# Mostramos nuestro nuevo dataframe:
df_ejemplo
###Output
_____no_output_____
###Markdown
2.1.2 One Hot Encoding (dummies)Idea: nos creamos una nueva columna por cada valor diferente de la/s variable/s categóricas. La función se encarga de convertir todas aquellas categóricas, eliminando las categóricas originales y manteniendo las numéricas originales. Se encarga de todo por nosotros.*IMPORTANTE*: Cuando queramos aplicarlo a un proyecto, debemos de tener en cuenta si es train o test, [aquí](Separacion_Tratamiento).
###Code
# Hacemos una copia para no cambiar el df para el resto de ejercicios:
df_ejemplo = df.copy()
# Hacemos su conversión:
df_ejemplo = pd.get_dummies(df_ejemplo)
# Mostramos:
df_ejemplo
###Output
_____no_output_____
###Markdown
2.1.3 EscaladoIdea: reducir todas las variables al mismo rango. La función se encarga de quitar la media y escalar a la varianza unidad.Para utilizarlo, necesitamos un DataFrame con variables numéricas. Al transformar, nos devolverá un array de 2D (no es un DataFrame, pero guarda la misma estructura que el DataFrame original).*Nota: Es opcional pero ÚTIL en la mayoría de modelos, salvo árboles de decisión, que no le afecta**IMPORTANTE*: Cuando queramos aplicarlo a un proyecto, debemos de tener en cuenta si es train o test, [aquí](Separacion_Tratamiento).
###Code
# DataFrame numérico:
df_ejemplo = df.copy()
df_ejemplo = df_ejemplo[['num_1', 'num_2', 'num_3', 'num_4']]
# Importamos el objeto:
from sklearn.preprocessing import StandardScaler
# Nos creamos el objeto:
scaler = StandardScaler()
# Entrenamos:
scaler.fit(df_ejemplo)
# Y transformamos:
df_ejemplo = scaler.transform(df_ejemplo)
# Mostramos:
df_ejemplo
###Output
_____no_output_____
###Markdown
3. Separación Train/TestTras convertir nuestras variables, antes de utilizar los modelos, es bueno separar en train y test. ¿Por qué? Porque si entreno sobre train y pruebo los patrones extraídos sobre test, me sirve para comprobar si he generalizado bien. Si no lo hiciésemos, puede que esté "aprendiendo demasiado" de los datos, y cuando vaya a predecir algo de verdad, pues devuelva valores erróneos, haciendo que nuestro modelo no sirva para casi nada.
###Code
# Nos copiamos el df para no modificarlo, y lo convertimos para tener solo variables numéricas:
df_ejemplo = df.copy()
df_ejemplo = pd.get_dummies(df_ejemplo)
# Importamos objeto a utilizar:
from sklearn.model_selection import train_test_split
# Separamos las variables que podemos utilizar para predecir de la que queremos predecir:
y_col = 'num_3' # vamos a predecir 'num_3'
X_cols = [col for col in df_ejemplo.columns if col != y_col]
X = df_ejemplo[X_cols]
y = df_ejemplo[y_col]
# Y lo utilizamos:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
# Ahora ya podemos acceder a las variables de train/test:
# X_train: variables predictoras con las que entrenar el modelo
# y_train: variable a predecir (etiquetas de los datos) con la que entrenar el modelo para que extraiga sus patornes en base a X_train
# X_test: variables predictoras con las que probar el modelo
# y_test: variable a predecir con la que probar el modelo
###Output
_____no_output_____
###Markdown
3.1 Tratamiento aplicado a la separación train/testSi queremos aplicar cualquier tratamiento de datos al modelo, como puede ser el relleno de nulos, escalado o conversión de variables categóricas, es necesario hacerlo de forma ordenada. En primer lugar, se tiene que hacer el tratamiento sobre ``train``, y luego aplicarlo sobre ``test``, ya que no podemos utilizar nada de información de test sobre train, pues de hacerlo estaríamos haciendo trampa.Los objetos de ``sklearn`` para el tratamiento de datos, como el escalado, están pensados para este tipo de casos: entrenamos sobre train para que extraiga los valores necesarios para su cáculo (como la media y la desviación típica, en el caso del escalado) y transformamos sobre train y sobre test (sí, será con los valores que hemos sacado de train).A continuación, se presenta un ejemplo de tratamiento de datos donde aplicamos conversión de variables y escalado
###Code
# Nos copiamos el df para no modificarlo, y lo convertimos para tener solo variables numéricas:
df_ejemplo = df.copy()
# Importamos los objetos a utilizar:
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# Separamos las variables que podemos utilizar para predecir de la que queremos predecir:
y_col = 'num_3' # vamos a predecir 'num_3'
X_cols = [col for col in df_ejemplo.columns if col != y_col]
X = df_ejemplo[X_cols]
y = df_ejemplo[y_col]
# Y lo utilizamos para separar los datos:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
# Convertimos las variables categóricas:
# Tratamiento categóricas:
X_train = pd.get_dummies(X_train)
X_test = pd.get_dummies(X_test)
for col in X_train.columns:
if col not in X_test.columns:
X_test[col] = 0
X_test = X_test[X_train.columns]
# Nos creamos el objeto para escalar:
scaler = StandardScaler()
# Entrenamos con train:
scaler.fit(X_train)
# Y transformamos:
# En train:
X_train_scaled = scaler.transform(X_train)
# Y en test:
X_test_scaled = scaler.transform(X_test)
# # Mostramos:
# X_train_scaled
# OJO: Lo que nos devuelve el scaler son arrays de numpy. Nos sirve para utilizarlo en el modelo, pero por si quisiéramos tratarlos después,
#podemos volver a convertirlos a DataFrame, ya que tienen el mismo orden:
X_train_scaled_df = pd.DataFrame(X_train_scaled, columns = X_train.columns)
X_train_scaled_df = pd.DataFrame(X_train_scaled, columns = X_train.columns)
# Mostramos:
X_train_scaled_df
###Output
_____no_output_____
###Markdown
4. Algoritmos Supervisados 4.1 RegresiónLos algoritmos de regresión nos servirán para predecir valores numéricos. Por ejemplo, predecir cuánto cuesta un producto, cuántas personas van a asistir a un evento, cuánto cobrará una persona... Son valores numéricos, con sus decimales y todo lo que ello supone.Antes de nada, tenemos que decidir qué vamos a predecir y separar los datos en train y test. En este caso, predeciremos 'num_3', que es numérica:
###Code
# Nos copiamos el df para no modificarlo, y lo convertimos para tener solo variables numéricas:
df_ejemplo = df.copy()
df_ejemplo = pd.get_dummies(df_ejemplo)
# Primero debemos separar en train y test (tal como hemos visto antes):
from sklearn.model_selection import train_test_split
y_col = 'num_3' # vamos a predecir 'num_3'
X_cols = [col for col in df_ejemplo.columns if col != y_col]
X = df_ejemplo[X_cols]
y = df_ejemplo[y_col]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
###Output
_____no_output_____
###Markdown
4.1.1 Regresión Lineal
###Code
# Importamos el objeto:
from sklearn.linear_model import LinearRegression
# Creamos el modelo:
model = LinearRegression()
# Entrenamos:
model.fit(X_train, y_train)
# Ahora ya podemos calcular el score (por defecto, R^2):
model.score(X_test, y_test)
# También podemos predecir, siempre y cuando le pasemos objetos con la misma estructura que lo que hemos utilizado para entrenar:
model.predict([[0.303063, 2, 1, 0, 0, 0, 0]])
###Output
_____no_output_____
###Markdown
4.1.2 RegularizaciónPodemos aplicar penalizaciones a las regresiones lineales para generalizar más: 4.1.2.1 Ridge
###Code
# Importamos el objeto:
from sklearn.linear_model import Ridge
# Creamos el modelo:
model = Ridge(alpha = 1)
# Entrenamos:
model.fit(X_train, y_train)
# Ahora ya podemos calcular el score (por defecto, R^2):
model.score(X_test, y_test)
# También podemos predecir, siempre y cuando le pasemos objetos con la misma estructura que lo que hemos utilizado para entrenar:
model.predict([[0.303063, 2, 1, 0, 0, 0, 0]])
###Output
_____no_output_____
###Markdown
4.1.2.2 Lasso
###Code
# Importamos el objeto:
from sklearn.linear_model import Lasso
# Creamos el modelo:
model = Lasso(alpha = 1)
# Entrenamos:
model.fit(X_train, y_train)
# Ahora ya podemos calcular el score (por defecto, R^2):
model.score(X_test, y_test)
# También podemos predecir, siempre y cuando le pasemos objetos con la misma estructura que lo que hemos utilizado para entrenar:
model.predict([[0.303063, 2, 1, 0, 0, 0, 0]])
###Output
_____no_output_____
###Markdown
4.1.2.3 Elastic Net``l1_ratio``
###Code
# Importamos el objeto:
from sklearn.linear_model import ElasticNet
# Creamos el modelo:
model = ElasticNet(alpha = 1.5, l1_ratio=1)
# Entrenamos:
model.fit(X_train, y_train)
# Ahora ya podemos calcular el score (por defecto, R^2):
model.score(X_test, y_test)
# También podemos predecir, siempre y cuando le pasemos objetos con la misma estructura que lo que hemos utilizado para entrenar:
model.predict([[0.303063, 2, 1, 0, 0, 0, 0]])
###Output
_____no_output_____
###Markdown
4.1.3 Árboles de decisión (Regresión)
###Code
# Importamos el objeto:
from sklearn.tree import DecisionTreeRegressor
# Creamos el modelo:
model = DecisionTreeRegressor(max_depth=3)
# Entrenamos:
model.fit(X_train, y_train)
# Ahora ya podemos calcular el score (por defecto, R^2):
model.score(X_test, y_test)
# También podemos predecir, siempre y cuando le pasemos objetos con la misma estructura que lo que hemos utilizado para entrenar:
model.predict([[0.303063, 2, 1, 0, 0, 0, 0]])
###Output
_____no_output_____
###Markdown
4.1.4 Support Vector Regressor (SVR) 4.1.4.1 SVR Lineal
###Code
# Importamos el objeto:
from sklearn.svm import LinearSVR
# Creamos el modelo:
model = LinearSVR(epsilon=10)
# Entrenamos:
model.fit(X_train, y_train)
# Ahora ya podemos calcular el score (por defecto, R^2):
model.score(X_test, y_test)
# También podemos predecir, siempre y cuando le pasemos objetos con la misma estructura que lo que hemos utilizado para entrenar:
model.predict([[0.303063, 2, 1, 0, 0, 0, 0]])
###Output
_____no_output_____
###Markdown
4.1.4.2 SVR con kernel
###Code
# Importamos el objeto:
from sklearn.svm import SVR
# Creamos el modelo:
model = SVR(kernel='poly', epsilon=10)
# Entrenamos:
model.fit(X_train, y_train)
# Ahora ya podemos calcular el score (por defecto, R^2):
model.score(X_test, y_test)
# También podemos predecir, siempre y cuando le pasemos objetos con la misma estructura que lo que hemos utilizado para entrenar:
model.predict([[0.303063, 2, 1, 0, 0, 0, 0]])
###Output
_____no_output_____
###Markdown
4.2 ClasificaciónLos algoritmos de clasificación nos servirán para predecir valores finitos de una variable, es decir, clases. Por ejemplo, predecir a qué conjunto de la población pertenece un individuo (niño, adolescente, adulto o jubilado), predecir si una persona comprará un producto o no, predecir si una persona es diestra o zurda... Son valores finitos que indican categorías. Normalmente, no tienen relación de orden entre ellas.Antes de nada, al igual que antes, tenemos que decidir qué vamos a predecir y separar los datos en train y test. En este caso, predeciremos 'num_4', que es una variable que solo toma 2 valores (1/0). Pese a ser números como tal, hacen referencia a una clase u otra:
###Code
# Nos copiamos el df para no modificarlo, y lo convertimos para tener solo variables numéricas:
df_ejemplo = df.copy()
df_ejemplo = pd.get_dummies(df_ejemplo)
# Primero debemos separar en train y test (tal como hemos visto antes):
from sklearn.model_selection import train_test_split
y_col = 'num_4' # vamos a predecir 'num_4'
X_cols = [col for col in df_ejemplo.columns if col != y_col]
X = df_ejemplo[X_cols]
y = df_ejemplo[y_col]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
###Output
_____no_output_____
###Markdown
4.2.1 Regresión Logística
###Code
# Importamos el objeto:
from sklearn.linear_model import LogisticRegression
# Creamos el modelo:
model = LogisticRegression()
# Entrenamos:
model.fit(X_train, y_train)
# Ahora ya podemos calcular el score (por defecto, accuracy):
model.score(X_test, y_test)
# También podemos predecir, siempre y cuando le pasemos objetos con la misma estructura que lo que hemos utilizado para entrenar:
model.predict([[0.303063, 2, 1, 0, 0, 0, 1]])
# En el caso de los modelos de clasificación, podemos obtener no solo el valor predicho sino la probabilidad de que sea ese valor para el algoritmo:
model.predict_proba([[0.303063, 2, 1, 0, 0, 0, 1]])
###Output
_____no_output_____
###Markdown
4.2.2 Árboles de decisión
###Code
### Importamos el objeto:
from sklearn.tree import DecisionTreeClassifier
# Creamos el modelo:
model = DecisionTreeClassifier(max_depth=2)
# Entrenamos:
model.fit(X_train, y_train)
# Ahora ya podemos calcular el score (por defecto, accuracy):
model.score(X_test, y_test)
# También podemos predecir, siempre y cuando le pasemos objetos con la misma estructura que lo que hemos utilizado para entrenar:
model.predict([[0.303063, 2, 1, 0, 0, 0, 1]])
# En el caso de los modelos de clasificación, podemos obtener no solo el valor predicho sino la probabilidad de que sea ese valor para el algoritmo:
model.predict_proba([[0.303063, 2, 1, 0, 0, 0, 1]])
###Output
_____no_output_____
###Markdown
4.2.3 K Nearest Neighbor (KNN)
###Code
### Importamos el objeto:
from sklearn.neighbors import KNeighborsClassifier
# Creamos el modelo:
model = KNeighborsClassifier(n_neighbors=5)
# Entrenamos:
model.fit(X_train, y_train)
# Ahora ya podemos calcular el score (por defecto, accuracy):
model.score(X_test, y_test)
# También podemos predecir, siempre y cuando le pasemos objetos con la misma estructura que lo que hemos utilizado para entrenar:
model.predict([[0.303063, 2, 1, 0, 0, 0, 1]])
# En el caso de los modelos de clasificación, podemos obtener no solo el valor predicho sino la probabilidad de que sea ese valor para el algoritmo:
model.predict_proba([[0.303063, 2, 1, 0, 0, 0, 1]])
###Output
_____no_output_____
###Markdown
Pues con esto, ya podemos utilizar nuestros modelos: 4.2.4 Support Vector Classifier (SVC) 4.2.4.1 SVC Lineal
###Code
# Importamos el objeto:
from sklearn.svm import LinearSVC
# Creamos el modelo:
model = LinearSVC(C=10)
# Entrenamos:
model.fit(X_train, y_train)
# Ahora ya podemos calcular el score (por defecto, R^2):
model.score(X_test, y_test)
# También podemos predecir, siempre y cuando le pasemos objetos con la misma estructura que lo que hemos utilizado para entrenar:
model.predict([[0.303063, 2, 1, 0, 0, 0, 1]])
# Este modelo no tiene predict_proba
###Output
_____no_output_____
###Markdown
4.2.4.2 SVR con kernel
###Code
# Importamos el objeto:
from sklearn.svm import SVC
# Creamos el modelo:
model = SVC(kernel='poly', C=10)
# Entrenamos:
model.fit(X_train, y_train)
# Ahora ya podemos calcular el score (por defecto, R^2):
model.score(X_test, y_test)
# También podemos predecir, siempre y cuando le pasemos objetos con la misma estructura que lo que hemos utilizado para entrenar:
model.predict([[0.303063, 2, 1, 0, 0, 0, 0]])
# Este modelo no tiene predict_proba
###Output
_____no_output_____
###Markdown
5. MétricasEn función del tipo de problema, regresión o clasificación, tendremos unas métricas u otras.*IMPORTANTE*: Las métricas dependen de la distribución de la variable objetivo: - Regresión: si tenemos muchos datos en torno cierta región, puede que los adivinemos muy bien a costa de fallar otros por bastante y que apenas se note en el resultado final. - Calsificación: el desbalanceo en clasificación es mucho menos evidente, y podemos pensar que está bien, pero si tenemos muchos de una clase, podemos decir que todos son de esa clase y obtener unas métricas altísimas, ya que estaríamos acertando la mayoría pero no sabríamos diferenciar una de otra. 5.1 RegresiónLas métricas de regresión se basan en calcular errores de los valores predichos frente a los reales.
###Code
# Para evaluar las métricas de regresión, utilizaremos los resultados de un algoritmo de regresión:
y_col = 'num_3' # vamos a predecir 'num_3'
X_cols = [col for col in df_ejemplo.columns if col != y_col]
X = df_ejemplo[X_cols]
y = df_ejemplo[y_col]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
model = LinearRegression()
model.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
5.1.1 MAE (Mean Absolute Error)
###Code
### Importamos el objeto:
from sklearn.metrics import mean_absolute_error
# Calculamos la métrica:
mae = mean_absolute_error(y_test, model.predict(X_test))
mae
###Output
_____no_output_____
###Markdown
5.1.2 MSE (Mean Squared Error)
###Code
### Importamos el objeto:
from sklearn.metrics import mean_squared_error
# Calculamos la métrica:
mse = mean_squared_error(y_test, model.predict(X_test))
mse
###Output
_____no_output_____
###Markdown
5.1.3 RMSE (Root Mean Squared Error)
###Code
### Importamos el objeto:
from sklearn.metrics import mean_absolute_error
# Calculamos la métrica:
rmse = mean_squared_error(y_test, model.predict(X_test))**(1/2)
rmse
###Output
_____no_output_____
###Markdown
Dependiendo de la magnitud de las unidades que estemos utilizando, en este caso las ``y``, este valor nos permitirá conocer si es bueno o mal. Ejemplo: si nos sale 20 y la ``y`` se mueve entorno a 50.000, es un error pequeño. En cambio, si se mueve en torno a 20, pues... tenemos problemas. 5.1.4 $R^2$
###Code
### Importamos el objeto:
from sklearn.metrics import r2_score
# Calculamos la métrica:
r_2 = r2_score(y_test, model.predict(X_test))
r_2
###Output
_____no_output_____
###Markdown
5.2 ClasificaciónLas métricas de clasificación se basan en calcular la matriz de confusión, donde se reflejan los aciertos frente a los fallos, ya que ahora no hay una distancia de error, sino que o aciertas la clase o fallas.En este apartado es especialmente interesante el caso en el que tengamos desbalanceos, pues
###Code
# Nos copiamos el df para no modificarlo, y lo convertimos para tener solo variables numéricas:
df_ejemplo = df.copy()
df_ejemplo = pd.get_dummies(df_ejemplo)
# Primero debemos separar en train y test (tal como hemos visto antes):
from sklearn.model_selection import train_test_split
y_col = 'num_4' # vamos a predecir 'num_4'
X_cols = [col for col in df_ejemplo.columns if col != y_col]
X = df_ejemplo[X_cols]
y = df_ejemplo[y_col]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
model = LogisticRegression(random_state=42)
model.fit(X_train, y_train)
###Output
C:\Users\TheBridge\anaconda3\lib\site-packages\sklearn\linear_model\_logistic.py:762: ConvergenceWarning: lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
Increase the number of iterations (max_iter) or scale the data as shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
Please also refer to the documentation for alternative solver options:
https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression
n_iter_i = _check_optimize_result(
###Markdown
5.2.1 Matriz de confusión
###Code
### Importamos el objeto:
from sklearn.metrics import confusion_matrix
import seaborn as sns
# Calculamos la métrica:
cm = confusion_matrix(y_test, model.predict(X_test))
sns.heatmap(cm, annot=True)
###Output
_____no_output_____
###Markdown
5.2.2 Accuracy (Porcentaje de acierto)
###Code
### Importamos el objeto:
from sklearn.metrics import accuracy_score
# Calculamos la métrica:
acu = accuracy_score(y_test, model.predict(X_test))
acu
###Output
_____no_output_____
###Markdown
5.2.3 Precision
###Code
### Importamos el objeto:
from sklearn.metrics import precision_score
# Calculamos la métrica:
precision = precision_score(y_test, model.predict(X_test))
precision
###Output
_____no_output_____
###Markdown
5.2.4 Recall
###Code
### Importamos el objeto:
from sklearn.metrics import recall_score
# Calculamos la métrica:
recall = recall_score(y_test, model.predict(X_test))
recall
###Output
_____no_output_____
###Markdown
5.2.5 F1-score
###Code
### Importamos el objeto:
from sklearn.metrics import f1_score
# Calculamos la métrica:
f1 = f1_score(y_test, model.predict(X_test))
f1
###Output
_____no_output_____
###Markdown
5.2.5 AUC y Curva ROC
###Code
### Importamos el objeto:
from sklearn.metrics import roc_auc_score, plot_roc_curve
# Calculamos la métrica:
auc = roc_auc_score(y_test, model.predict(X_test))
auc
plot_roc_curve(model, X_test, y_test)
### Importamos el objeto:
from sklearn.metrics import roc_curve
# Calculamos la métrica:
fpr, tpr, thresholds = roc_curve(y_test, model.predict_proba(X_test)[:, 1])
plt.plot(np.linspace(0, 1, 50), np.linspace(0, 1, 50), 'red')
plt.plot(fpr, tpr)
plt.grid();
###Output
_____no_output_____
###Markdown
6. EjemplosEste apartado vas a ser tú quien lo complete. Vas a disponer de un apartado para cada uno de los modelos que hemos visto. Tendremos un dataset para regresión y otro de clasificación. 6.1 RegresiónEn este caso, utilizaremos el dataset del Titanic e intentaremos predecir el precio del billete (``Fare``): Antes de nada, transforma los datos y divídelos en train/test:
###Code
# Importamos librerías:
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
# Leemos df del csv:
df = pd.read_csv("0_RESUMEN/data/titanic.csv", sep='\t')
# Seleccionamos las columnas que nos interesan y separamos en train/test:
y_col = 'Fare' # vamos a predecir 'Fare'
X_cols = [col for col in df.columns if col not in ['PassengerId', 'Name', 'Ticket', 'Cabin', y_col]]
X = df[X_cols]
y = df[y_col]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
# Vamos a rellenar los nulos con las modas de train, para lo que podemos sacar un diccionario con las modas antes de convertir:
modas = {col: val for col, val in zip(X_train.columns, X_train.mode().values[0])}
# Convertimos:
X_train = X_train.fillna(modas)
X_test = X_test.fillna(modas)
# Convertimos las variables categóricas:
cat_cols = ['Pclass', 'SibSp', 'Parch', 'Sex', 'Embarked']
# Tratamiento categóricas:
X_train = pd.get_dummies(X_train, columns=cat_cols)
X_test = pd.get_dummies(X_test, columns=cat_cols)
for col in X_train.columns:
if col not in X_test.columns:
X_test[col] = 0
X_test = X_test[X_train.columns]
# Nos creamos el objeto para escalar:
scaler = StandardScaler()
# Entrenamos con train:
scaler.fit(X_train)
# Y transformamos:
# En train:
X_train_scaled = scaler.transform(X_train)
# Y en test:
X_test_scaled = scaler.transform(X_test)
###Output
_____no_output_____
###Markdown
6.1.1 Regresión Lineal
###Code
# Importamos el objeto:
from sklearn.linear_model import LinearRegression
# Creamos el modelo:
model = LinearRegression()
# Entrenamos:
model.fit(X_train_scaled, y_train)
# Ahora ya podemos calcular el score (por defecto, R^2):
train_score = model.score(X_train_scaled, y_train)
test_score = model.score(X_test_scaled, y_test)
print(f"Train (R^2): {train_score}")
print(f"Test (R^2): {test_score}")
###Output
Train (R^2): 0.668119510699658
Test (R^2): 0.3731378883534461
###Markdown
6.1.2 Regularización 6.1.2.1 Ridge
###Code
# Importamos el objeto:
from sklearn.linear_model import Ridge
# Creamos el modelo:
model = Ridge(alpha=1.5)
# Entrenamos:
model.fit(X_train_scaled, y_train)
# Ahora ya podemos calcular el score (por defecto, R^2):
train_score = model.score(X_train_scaled, y_train)
test_score = model.score(X_test_scaled, y_test)
print(f"Train (R^2): {train_score}")
print(f"Test (R^2): {test_score}")
###Output
Train (R^2): 0.6680431403225132
Test (R^2): 0.374834575679182
###Markdown
6.1.2.2 Lasso
###Code
# Importamos el objeto:
from sklearn.linear_model import Lasso
# Creamos el modelo:
model = Lasso(alpha=1.75)
# Entrenamos:
model.fit(X_train_scaled, y_train)
# Ahora ya podemos calcular el score (por defecto, R^2):
train_score = model.score(X_train_scaled, y_train)
test_score = model.score(X_test_scaled, y_test)
print(f"Train (R^2): {train_score}")
print(f"Test (R^2): {test_score}")
###Output
Train (R^2): 0.6524498168276089
Test (R^2): 0.37452205733986454
###Markdown
6.1.2.3 Elastic Net
###Code
# Importamos el objeto:
from sklearn.linear_model import ElasticNet
# Creamos el modelo:
model = ElasticNet(alpha=1.75, l1_ratio=0.85)
# Entrenamos:
model.fit(X_train_scaled, y_train)
# Ahora ya podemos calcular el score (por defecto, R^2):
train_score = model.score(X_train_scaled, y_train)
test_score = model.score(X_test_scaled, y_test)
print(f"Train (R^2): {train_score}")
print(f"Test (R^2): {test_score}")
###Output
Train (R^2): 0.6265479890025998
Test (R^2): 0.3810600195106405
###Markdown
6.1.3 Árboles de decisión (Regresión)
###Code
# Importamos el objeto:
from sklearn.tree import DecisionTreeRegressor
# Creamos el modelo:
model = DecisionTreeRegressor(max_depth=1)
# Entrenamos:
model.fit(X_train_scaled, y_train)
# Ahora ya podemos calcular el score (por defecto, R^2):
train_score = model.score(X_train_scaled, y_train)
test_score = model.score(X_test_scaled, y_test)
print(f"Train (R^2): {train_score}")
print(f"Test (R^2): {test_score}")
###Output
Train (R^2): 0.3969494915517149
Test (R^2): 0.43086277141639007
###Markdown
6.1.4 Support Vector Regressor (SVR) 6.1.4.1 SVR Lineal
###Code
# Importamos el objeto:
from sklearn.svm import LinearSVR
# Creamos el modelo:
model = LinearSVR(C=1500)
# Entrenamos:
model.fit(X_train_scaled, y_train)
# Ahora ya podemos calcular el score (por defecto, R^2):
train_score = model.score(X_train_scaled, y_train)
test_score = model.score(X_test_scaled, y_test)
print(f"Train (R^2): {train_score}")
print(f"Test (R^2): {test_score}")
###Output
Train (R^2): 0.4057255580867013
Test (R^2): 0.3645595007710828
###Markdown
6.1.4.2 SVR con kernel
###Code
# Importamos el objeto:
from sklearn.svm import SVR
# Creamos el modelo:
model = SVR(C=80, epsilon=0.05)
# Entrenamos:
model.fit(X_train_scaled, y_train)
# Ahora ya podemos calcular el score (por defecto, R^2):
train_score = model.score(X_train_scaled, y_train)
test_score = model.score(X_test_scaled, y_test)
print(f"Train (R^2): {train_score}")
print(f"Test (R^2): {test_score}")
###Output
Train (R^2): 0.6852575139950674
Test (R^2): 0.34423969324149495
###Markdown
6.2 ClasificaciónEn este caso, vamos a basarnos en el mismo dataset, el del Titanic, pero vamos a predecir el tipo de embarque de los pasajeros (``Embarked``): Al igual que antes, transforma los datos y divídelos en train/test:
###Code
# Importamos librerías:
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
# Leemos df del csv:
df = pd.read_csv("0_RESUMEN/data/titanic.csv", sep='\t')
# Quitamos aquellas que no tenemos target:
df = df.dropna(subset=['Embarked'])
# Seleccionamos las columnas que nos interesan y separamos en train/test:
y_col = 'Embarked' # vamos a predecir 'Fare'
X_cols = [col for col in df.columns if col not in ['PassengerId', 'Name', 'Ticket', 'Cabin', y_col]]
X = df[X_cols]
y = df[y_col]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
# Vamos a rellenar los nulos con las modas de train, para lo que podemos sacar un diccionario con las modas antes de convertir:
modas = {col: val for col, val in zip(X_train.columns, X_train.mode().values[0])}
# Convertimos:
X_train = X_train.fillna(modas)
X_test = X_test.fillna(modas)
# Convertimos las variables categóricas:
cat_cols = ['Pclass', 'SibSp', 'Parch', 'Sex']
# Tratamiento categóricas:
X_train = pd.get_dummies(X_train, columns=cat_cols)
X_test = pd.get_dummies(X_test, columns=cat_cols)
for col in X_train.columns:
if col not in X_test.columns:
X_test[col] = 0
X_test = X_test[X_train.columns]
# Nos creamos el objeto para escalar:
scaler = StandardScaler()
# Entrenamos con train:
scaler.fit(X_train)
# Y transformamos:
# En train:
X_train_scaled = scaler.transform(X_train)
# Y en test:
X_test_scaled = scaler.transform(X_test)
###Output
_____no_output_____
###Markdown
6.2.1 Regresión Logística
###Code
# Importamos el objeto:
from sklearn.linear_model import LogisticRegression
# Creamos el modelo:
model = LogisticRegression()
# Entrenamos:
model.fit(X_train_scaled, y_train)
# Ahora ya podemos calcular el score (por defecto, R^2):
train_score = model.score(X_train_scaled, y_train)
test_score = model.score(X_test_scaled, y_test)
print(f"Train (accuracy): {train_score}")
print(f"Test (accuracy): {test_score}")
###Output
Train (accuracy): 0.7037037037037037
Test (accuracy): 0.6808510638297872
###Markdown
6.2.2 Árboles de decisión
###Code
# Importamos el objeto:
from sklearn.tree import DecisionTreeClassifier
# Creamos el modelo:
model = DecisionTreeClassifier(max_depth=4)
# Entrenamos:
model.fit(X_train_scaled, y_train)
# Ahora ya podemos calcular el score (por defecto, R^2):
train_score = model.score(X_train_scaled, y_train)
test_score = model.score(X_test_scaled, y_test)
print(f"Train (accuracy): {train_score}")
print(f"Test (accuracy): {test_score}")
###Output
Train (accuracy): 0.8148148148148148
Test (accuracy): 0.7021276595744681
###Markdown
6.2.3 K Nearest Neighbor (KNN)
###Code
# Importamos el objeto:
from sklearn.neighbors import KNeighborsClassifier
# Creamos el modelo:
model = KNeighborsClassifier(n_neighbors=5)
# Entrenamos:
model.fit(X_train_scaled, y_train)
# Ahora ya podemos calcular el score (por defecto, R^2):
train_score = model.score(X_train_scaled, y_train)
test_score = model.score(X_test_scaled, y_test)
print(f"Train (accuracy): {train_score}")
print(f"Test (accuracy): {test_score}")
###Output
Train (accuracy): 0.75
Test (accuracy): 0.7021276595744681
###Markdown
6.2.4 Support Vector Classifier (SVC) 6.2.4.1 SVC Lineal
###Code
# Importamos el objeto:
from sklearn.svm import LinearSVC
# Creamos el modelo:
model = LinearSVC(C=0.005)
# Entrenamos:
model.fit(X_train_scaled, y_train)
# Ahora ya podemos calcular el score (por defecto, R^2):
train_score = model.score(X_train_scaled, y_train)
test_score = model.score(X_test_scaled, y_test)
print(f"Train (accuracy): {train_score}")
print(f"Test (accuracy): {test_score}")
###Output
Train (accuracy): 0.7222222222222222
Test (accuracy): 0.723404255319149
###Markdown
6.2.4.2 SVC con kernel
###Code
# Importamos el objeto:
from sklearn.svm import SVC
# Creamos el modelo:
model = SVC(C=1.75)
# Entrenamos:
model.fit(X_train_scaled, y_train)
# Ahora ya podemos calcular el score (por defecto, R^2):
train_score = model.score(X_train_scaled, y_train)
test_score = model.score(X_test_scaled, y_test)
print(f"Train (accuracy): {train_score}")
print(f"Test (accuracy): {test_score}")
###Output
Train (accuracy): 0.7407407407407407
Test (accuracy): 0.723404255319149
|
Smit/Sem V/CS/Set_12.ipynb | ###Markdown
Set 12--- **Use Euclid's method to solve ordinary differential equation**
###Code
import numpy as np
import matplotlib.pyplot as plt
def fxy(x,y) :
return -y
y0 = 1
h = [0.2 , 0.1, 0.05]
def func(h,x,y0):
y = np.zeros(len(x))
y[0] = y0
for i in range(1,len(x)):
y[i] = y[i-1] + h*fxy(x[i-1],y[i-1])
return y
x1 = np.linspace(0,6,30)
y1 = func(h[0],x1,y0)
x2 = np.linspace(0,6,60)
y2 = func(h[1],x2,y0)
x3 = np.linspace(0,6,120)
y3 = func(h[2],x3,y0)
plt.plot(x1,y1,color='blue',label='$h = 0.2$')
plt.plot(x2,y2,color='green',label='$h = 0.1$')
plt.plot(x3,y3,color='red',label='$h = 0.05$')
plt.plot(x3,np.exp(-x3),color='purple',label='$y = e^{-x}$')
plt.xlabel('$x$')
plt.ylabel('$y$')
plt.title('y v/s x')
plt.legend()
plt.show()
plt.plot(x1,np.exp(-x1) - y1,color='blue',label='$h = 0.2$')
plt.plot(x2,np.exp(-x2) - y2,color='green',label='$h = 0.1$')
plt.plot(x3,np.exp(-x3) - y3,color='red',label='$h = 0.05$')
plt.xlabel('$x$')
plt.ylabel('Error')
plt.title('Error v/s $x$')
plt.legend()
plt.show()
def fxy(x,y) :
return (y + x**2 - 2) / (x+1)
y0 = 2
h = [0.2 , 0.1, 0.05]
def func(h,x,y0):
y = np.zeros(len(x))
y[0] = y0
for i in range(1,len(x)):
y[i] = y[i-1] + h*fxy(x[i-1],y[i-1])
return y
x1 = np.linspace(0,6,30)
y1 = func(h[0],x1,y0)
x2 = np.linspace(0,6,60)
y2 = func(h[1],x2,y0)
x3 = np.linspace(0,6,120)
y3 = func(h[2],x3,y0)
plt.plot(x1,y1,color='blue',label='$h = 0.2$')
plt.plot(x2,y2,color='green',label='$h = 0.1$')
plt.plot(x3,y3,color='red',label='$h = 0.05$')
plt.plot(x3,x3**2 + 2*x3 +2 - 2*(x3+1)*np.log(x3+1),color='purple',label='$y = x^{2} + 2x + 2 - 2(x+1)ln(x+1)$')
plt.xlabel('$x$')
plt.ylabel('$y$')
plt.title('y v/s x')
plt.legend()
plt.show()
plt.plot(x1,x1**2 + 2*x1 +2 - 2*(x1+1)*np.log(x1+1) - y1,color='blue',label='$h = 0.2$')
plt.plot(x2,x2**2 + 2*x2 +2 - 2*(x2+1)*np.log(x2+1) - y2,color='green',label='$h = 0.1$')
plt.plot(x3,x3**2 + 2*x3 +2 - 2*(x3+1)*np.log(x3+1) - y3,color='red',label='$h = 0.05$')
plt.xlabel('$x$')
plt.ylabel('Error')
plt.title('Error v/s $x$')
plt.legend()
plt.show()
###Output
_____no_output_____ |
notebooks/high_resolution.ipynb | ###Markdown
High-resolution connectomics---Here, we have provided a sample example of using the high-resolution connectomes provided in our data release.In this example we will load the high resolution connectome and plot the distribution of nodal strength for the sample connectomes.---
###Code
# initial imports
import os
import urllib
import tempfile
import numpy as np
import seaborn as sns
import scipy.sparse as sparse
# location of a sample file in our remotely available data
file_url = 'https://swift.rc.nectar.org.au/v1/AUTH_ee5989f4c9184ea29012bb124cd3dff0/connectome_storage/HCP_1200/100408/high_resolution/functional_connectivity/100408_test_sparse_resting_functional_connectivity.npz'
tmp_file = 'tmp.npz'
# download from url
with open(tmp_file, 'w') as file:
urllib.request.urlretrieve(file_url,tmp_file)
# Now load the sparse connectome data
connectome = sparse.load_npz(tmp_file)
# delete the downloaded file
os.remove(tmp_file)
###Output
_____no_output_____
###Markdown
**Note:** Runing the previous cell on will take a while for the connectome to be downloaded...---Now we can see that the `connectome` object is actually a sparse adjacency matrix:
###Code
connectome
###Output
_____no_output_____
###Markdown
---We can create and plot the nodal strength distribution by computing the sum across colums/rows:
###Code
strength = connectome.sum(axis=0)
strength.shape
ax = sns.distplot(strength)
ax.set_xlabel('node strength')
ax.set_ylabel('probability')
ax.set_title('probability distribution function of high-resolution nodal strength')
###Output
_____no_output_____ |
Homework/HW3.ipynb | ###Markdown
Homework 3 1. Implement L1 norm regularization as a custom loss function
###Code
import torch
def lasso_reg(params, l1_lambda):
l1_penalty = torch.nn.L1Loss(size_average=False)
reg_loss = 0
for param in params:
reg_loss += l1_penalty(param)
loss += l1_lambda * reg_loss
return loss
###Output
_____no_output_____
###Markdown
2. The third-to-last paragraph in the notebook is concerning early stopping, an "old" regularization technique which involves the stopping of training earlier than the number of epochs would suggest. Read the paragraph and download the paper from Prechelt et al. a. Implement early stopping in the $E_{opt}$ specification In the paper, the value $E_{opt}$ is defned to be the lowest validation set error obtained in epochs up to $t$: $$E_{opt}(t) = \min_{t \le t'} E_{va}(t')$$ where $E_{va}$ is the validation error, i.e. the corresponding error on the validation set. As per instructions, I'm going to use the test data as validation.
###Code
# import in Colab
import sys
sys.path.append('/content/mnist.py')
sys.path.append('/content/train_utils.py')
import mnist
from train_utils import accuracy, AverageMeter
from torch import nn
class MLP(nn.Module):
def __init__(self):
super().__init__()
self.flat = nn.Flatten()
self.h1 = nn.Linear(28*28, 16)
self.h2 = nn.Linear(16, 32)
self.h3 = nn.Linear(32, 24)
self.out = nn.Linear(24, 10)
def forward(self, X, activ_hidden=nn.functional.relu):
out = self.flat(X)
out = activ_hidden(self.h1(out))
out = activ_hidden(self.h2(out))
out = activ_hidden(self.h3(out))
out = self.out(out)
return out
def train_epoch(model, dataloader, loss_fn, optimizer, loss_meter, performance_meter, performance):
for X, y in dataloader:
optimizer.zero_grad()
y_hat = model(X)
loss = loss_fn(y_hat, y)
loss.backward()
optimizer.step()
acc = performance(y_hat, y)
loss_meter.update(val=loss.item(), n=X.shape[0])
performance_meter.update(val=acc, n=X.shape[0])
def train_model(model, dataloader1, dataloader2, loss_fn, optimizer, num_epochs, performance=accuracy):
model.train()
E = {
"epoch": [],"training perf": [], "validation perf": [], "parameters": [], "optimizer": []
}
for epoch in range(num_epochs):
loss_meter = AverageMeter()
performance_meter = AverageMeter()
train_epoch(model, dataloader1, loss_fn, optimizer, loss_meter, performance_meter, performance)
fin_loss, fin_perf = test_model(model, dataloader2, loss_fn=loss_fn)
E["epoch"].append(epoch)
E["training perf"].append(performance_meter)
E["validation perf"].append(fin_perf)
E["parameters"].append(model.state_dict())
E["optimizer"].append(optimizer.state_dict())
return loss_meter.sum, performance_meter.avg, E
def test_model(model, dataloader, performance=accuracy, loss_fn=None):
# create an AverageMeter for the loss if passed
if loss_fn is not None:
loss_meter = AverageMeter()
performance_meter = AverageMeter()
model.eval()
with torch.no_grad():
for X, y in dataloader:
y_hat = model(X)
loss = loss_fn(y_hat, y) if loss_fn is not None else None
acc = performance(y_hat, y)
if loss_fn is not None:
loss_meter.update(loss.item(), X.shape[0])
performance_meter.update(acc, X.shape[0])
# get final performances
fin_loss = loss_meter.sum if loss_fn is not None else None
fin_perf = performance_meter.avg
return fin_loss, fin_perf
minibatch_size_train = 256
minibatch_size_test = 512
trainloader, testloader, trainset, testset = mnist.get_data(batch_size_train=minibatch_size_test, batch_size_test=minibatch_size_test)
learn_rate = 0.1
num_epochs = 30
model = MLP()
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learn_rate)
train_loss, train_acc, E = train_model(model, trainloader, testloader, loss_fn, optimizer, num_epochs)
###Output
_____no_output_____
###Markdown
Since `Validation_error = 1 - Validation_performance`, minimizing the error is equivalent to maximizing the performance.
###Code
from matplotlib import pyplot as plt
val_list = list(E["validation perf"])
maxval = max(E["validation perf"])
index = val_list.index(max(val_list)) + 1
plt.plot(E["epoch"], E["validation perf"] )
print(f"The best validation performance is {maxval}, obtained at epoch no. {index} out of {num_epochs}.")
###Output
The best validation performance is 0.9701166666666666, obtained at epoch no. 29 out of 30.
###Markdown
b$^*$. Implement early stopping in one of the additional specifications A stopping criterion described in the paper is based on the *generalization loss*: $$ GL (t) = 100 * \big( \frac{E_{va}(t)}{E_{opt}(t)} -1 \big)$$ that is, the validation error over the minimum so far in percent. We should stop as soon as this value exceeds a certain threshold $\alpha$.As reported in the paper, this criterion is used to maximize the probability to find a good solution, as opposed to maximizing the average quality of the solutions.
###Code
alpha = 1
E_opt = 1 - val_list[0]
for i in range(num_epochs):
E_va = 1 - val_list[i]
if E_va < E_opt:
E_opt = E_va
GL = 100 * (E_va/E_opt - 1)
if GL > alpha:
print(f"This stopping criterion halts the computation at epoch {i+1}")
break
###Output
This stopping criterion halts the computation at epoch 6
###Markdown
As we can see, this criterion stops very early, at the first epoch with lower performance. A solution is to add momentum to SGD to minimize oscillations:
###Code
optimizer = torch.optim.SGD(model.parameters(), lr=learn_rate, momentum=0.9)
num_epochs = 15
train_loss_m, train_acc_m, E_m = train_model(model, trainloader, testloader, loss_fn, optimizer, num_epochs)
from matplotlib import pyplot as plt
val_list = list(E_m["validation perf"])
maxval = max(E_m["validation perf"])
index = val_list.index(max(val_list)) + 1
plt.plot(E_m["epoch"], E_m["validation perf"] )
print(f"The best validation performance is {maxval}, obtained at epoch no. {index} out of {num_epochs}.")
alpha = 2
E_opt = 1 - val_list[0]
for i in range(num_epochs):
E_va = 1 - val_list[i]
if E_va < E_opt:
E_opt = E_va
GL = 100 * (E_va/E_opt - 1)
if GL > alpha:
print(f"This stopping criterion halts the computation at epoch {i+1}")
break
###Output
This stopping criterion halts the computation at epoch 4
###Markdown
**Homework 3** Problem 1| | True Bird | True Person ||-----------|-----------|-------------|| NN Bird | 45 | 5 || NN Person | 3 | 47 || | True Bird | True Person ||-----------|-----------|-------------|| NA Bird | 47 | 11 || NA Person | 1 | 42 | **a)** Which algorithm makes the fewest mistakes?NN made 8 mistakes, while NA made 12 mistakes. So algorithm NN makes less mistakes. **b)** Which algorithm is better for the zoo?NA is a better algorithm for the zoo, this is because it wrongly classifies a bird as a person less often than NN does. Because we do not want birds to be let out of the aviary we would trade more misclassifications of true people as birds for less misclassifications of true birds as people. **c)** Instead of 52% of the photos taken at the aviary door being people, it is now only 1%. Make new truth tables for both algorithms.In order to better show the operations of the two algorithms the truth tables I will assume 1000 pictures rather than 100.| | True Bird | True Person ||-----------|-----------|-------------|| NN Bird | 928 | 1 || NN Person | 62 | 9 || | True Bird | True Person ||-----------|-----------|-------------|| NA Bird | 969 | 2 || NA Person | 21 | 8 |These truth tables make the answer to question b much more obvious. When most of the pictures were of birds, algorithm NA makes much less mistakes than NN. Problem 2 **a)** What is the pdf of the sum of two identical exponential distributions?$$e(x) = \lambda e^{-\lambda x}$$$$e(x)*e(x) = \int_{0}^{x}e(x-z)e(z)dz = \int_{0}^{x} e^{-\lambda(x-z)}e^{-\lambda z}dz = \int_{0}^{x}\frac{e^{-\lambda x}}{e^{-\lambda z}}e^{-\lambda z}dz$$$$(e*e)(x) = ze^{-\lambda x}\bigg|_{z = 0}^{z = x} = xe^{-\lambda x}$$$xe^{-\lambda x}$ is basically just an unnormalized gamma distribution with $\alpha = 2$
###Code
import scipy
from scipy import stats, signal
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (15,10)
x = np.linspace(0,100,1001)
xc = np.linspace(0,200,2001)
e1 = stats.expon.pdf(x)
e2 = stats.expon.pdf(x)
ec = signal.fftconvolve(e1,e2)
fig, ax = plt.subplots(1)
ax.plot(x,e1)
ax.plot(xc, ec)
ax.set_xlim([-0.25,40])
plt.show()
###Output
_____no_output_____ |
2018_06_30_Boston_Housing_Price_Prediction.ipynb | ###Markdown
- Outlier 찾기: 잔차가 큰 데이터 (표준화 잔차)
###Code
result_boston_outlier = result_boston.resid_pearson
# 시각화 하기
%matplotlib inline
plt.figure(figsize=(10,2))
plt.stem(result_boston.resid_pearson)
plt.show()
###Output
C:\ProgramData\Anaconda3\lib\site-packages\matplotlib\font_manager.py:1320: UserWarning: findfont: Font family ['nanumgothic'] not found. Falling back to DejaVu Sans
(prop.get_family(), self.defaultFamily[fontext]))
###Markdown
- Leverage 찾기
###Code
influence_boston = result_boston.get_influence()
hat = influence_boston.hat_matrix_diag
# 시각화 하기
plt.figure(figsize = (10, 2))
plt.stem(hat)
plt.show()
###Output
C:\ProgramData\Anaconda3\lib\site-packages\matplotlib\font_manager.py:1320: UserWarning: findfont: Font family ['nanumgothic'] not found. Falling back to DejaVu Sans
(prop.get_family(), self.defaultFamily[fontext]))
###Markdown
- Cook's Distance- 잔차와 레버리지를 동시에 보기위한 기준으로 Cook's Distance- 레버리지가 커지거나 잔차의 크기가 커지면 Cook's Distance 값이 커진다.- Fox' Outlier Recommendation은 Cook's Distance가 다음과 같은 기준보다- 클 때 아웃라이어로 판단하자는 것.- Di > 4 / N - K - 1- 모든 데이터의 레버리지와 잔차를 동시에 보려면 plot_leverage_resid2 사용 - X축: 표준화 잔차의 제곱 - Y축: 레버리지값
###Code
cooks_distance, pvals = influence_boston.cooks_distance
fox_recommendation = 4 / (len(dfy) - 2)
idx = np.where(cooks_distance > fox_recommendation)[0]
idx
ax = plt.subplot()
plt.scatter(dfy, pred)
plt.scatter(dfy.MEDV[idx], pred[idx], s=300, c="r", alpha=0.5)
utils.annotate_axes(range(len(idx)), idx,
list(zip(dfy.MEDV[idx], pred[idx])), [(-20, 15)] * len(idx), size="small", ax=ax)
plt.show()
sm.graphics.plot_leverage_resid2(result_boston)
plt.show()
idx2 = list(set(range(len(dfX))).difference(idx))
dfX = dfX.iloc[idx2, :].reset_index(drop=True)
dfy = dfy.iloc[idx2, :].reset_index(drop=True)
model_boston2 = sm.OLS(dfy, dfX)
result_boston2 = model_boston2.fit()
print(result_boston2.summary())
###Output
OLS Regression Results
==============================================================================
Dep. Variable: MEDV R-squared: 0.831
Model: OLS Adj. R-squared: 0.827
Method: Least Squares F-statistic: 175.4
Date: Sat, 30 Jun 2018 Prob (F-statistic): 3.07e-169
Time: 11:54:24 Log-Likelihood: -1226.7
No. Observations: 476 AIC: 2481.
Df Residuals: 462 BIC: 2540.
Df Model: 13
Covariance Type: nonrobust
==============================================================================
coef std err t P>|t| [0.025 0.975]
------------------------------------------------------------------------------
const 20.9186 4.002 5.227 0.000 13.054 28.783
CRIM -0.0892 0.027 -3.246 0.001 -0.143 -0.035
ZN 0.0360 0.010 3.660 0.000 0.017 0.055
INDUS -0.0184 0.043 -0.427 0.670 -0.103 0.066
CHAS 1.3678 0.639 2.140 0.033 0.112 2.624
NOX -10.5885 2.700 -3.921 0.000 -15.895 -5.282
RM 4.8245 0.365 13.225 0.000 4.108 5.541
AGE -0.0215 0.010 -2.212 0.027 -0.041 -0.002
DIS -1.1260 0.144 -7.840 0.000 -1.408 -0.844
RAD 0.1987 0.047 4.244 0.000 0.107 0.291
TAX -0.0119 0.003 -4.599 0.000 -0.017 -0.007
PTRATIO -0.7661 0.091 -8.381 0.000 -0.946 -0.586
B 0.0108 0.002 5.644 0.000 0.007 0.015
LSTAT -0.3578 0.041 -8.711 0.000 -0.439 -0.277
==============================================================================
Omnibus: 24.076 Durbin-Watson: 1.127
Prob(Omnibus): 0.000 Jarque-Bera (JB): 28.702
Skew: 0.481 Prob(JB): 5.85e-07
Kurtosis: 3.722 Cond. No. 1.64e+04
==============================================================================
Warnings:
[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
[2] The condition number is large, 1.64e+04. This might indicate that there are
strong multicollinearity or other numerical problems.
|
2018-2019/assignment 6 (VAE)/VAE_Toy_Dataset.ipynb | ###Markdown
Variational Autoencoders (Toy dataset)Skeleton code from https://github.com/tudor-berariu/ann2018 1. Miscellaneous
###Code
import torch
from torch import Tensor
assert torch.cuda.is_available()
import matplotlib.pyplot as plt
from math import ceil
def show_images(X: torch.Tensor, nrows=3):
ncols = int(ceil(len(X) / nrows))
ratio = nrows / ncols
fig, axs = plt.subplots(nrows=nrows, ncols=ncols, figsize=(10, 10 * ratio))
for idx, img in enumerate(X):
r, c = idx // ncols, idx % ncols
axs[r][c].imshow(img[0].numpy(), aspect='equal', vmin=0, vmax=1, cmap='binary')
for row_axs in axs:
for ax in row_axs:
ax.set_aspect('equal', 'box')
ax.set_yticklabels([])
ax.set_xticklabels([])
fig.tight_layout()
###Output
_____no_output_____
###Markdown
2. Our dataset
###Code
def get_dataset(n, idxs):
X = torch.randn(n * 16) * .1
X[idxs] += 1
X = (X - X.min()) / (X.max() - X.min())
X.clamp_(0, 1)
X = X.reshape(n, 1, 4, 4)
return X
n = 15
idxs = [2, 6, 8, 9, 10, 11, 14, 17, 21, 24, 25, 26, 27, 29, 35, 39, 43, 44, 45,
46, 47, 48, 49, 50, 51, 52, 56, 60, 64, 68, 69, 70, 71, 72, 76, 80, 81,
82, 83, 84, 88, 92, 98, 102, 104, 105, 106, 107, 110, 112, 113, 114,
115, 116, 120, 124, 131, 135, 139, 140, 141, 142, 143, 147, 151, 155,
156, 157, 158, 159, 162, 166, 168, 169, 170, 171, 174, 178, 182, 186,
188, 189, 190, 191, 193, 196, 197, 198, 199, 201, 205, 209, 212, 213,
214, 215, 217, 221, 225, 228, 229, 230, 231, 233, 237]
X = get_dataset(n, idxs)
show_images(X)
print(X.shape)
###Output
torch.Size([15, 1, 4, 4])
###Markdown
3. The Variational Auto-encoderThe encoder computes $q_{\phi}\left(z \mid x\right)$ predicting: - $\mu_{\phi}\left(x\right)$ and - $\log \sigma_{\phi}^2\left(x\right)$. The decoder computes $p_{\theta}\left(x \mid z\right)$.
###Code
import torch.nn as nn
import torch.nn.functional as F
class VAE(nn.Module):
def __init__(self, nz: int = 1) -> None:
super(VAE, self).__init__()
self.nz = nz # The number of dimensions in the latent space
self.encoder = nn.Sequential(nn.Linear(16, 64), nn.ReLU())
self.mean = nn.Linear(64, nz) # predicts the mean of p(z|x)
self.log_var = nn.Linear(64, nz) # predicts the log-variance of p(z|x)
self.decoder = nn.Sequential(nn.Linear(nz, 64), nn.ReLU(),
nn.Linear(64, 16))
def forward(self, x):
x = x.view(-1, 16) # Drop this if you use convolutional encoders
# Encoding x into mu, and log-var of p(z|x)
x = self.encoder(x)
mean = self.mean(x)
log_var = self.log_var(x)
# ----------------------------------------------------------------
# TODO 1: compute z = (eps * std) + mean (reparametrization trick)
std = torch.exp(log_var / 2)
eps = torch.randn_like(std)
noise = eps * std + mean
# ----------------------------------------------------------------
# Decoding z into p(x|z)
x = self.decoder(noise)
x = torch.sigmoid(x)
return x.view(-1, 1, 4, 4), mean, log_var
def generate(self, nsamples: int = None, noise: Tensor = None) -> Tensor:
# Generate some data
with torch.no_grad():
if noise is None:
noise = torch.randn(nsamples, self.nz)
x = self.decoder(noise)
x = torch.sigmoid(x)
return x.view(-1, 1, 4, 4)
###Output
_____no_output_____
###Markdown
4. Training the modelThe optimization criterion has two components. - the KL divergence between $q_{\phi}\left(z \mid x\right)$ and $p\left(z\right)$ * both are diagonal gaussians, therefore we have a simple formula for the KL divergence: [wiki](https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergenceExamples) - the reconstruction loss computed using the [binary cross entropy](https://pytorch.org/docs/stable/nn.htmlbinary-cross-entropy)
###Code
import torch.optim as optim
import numpy as np
def train(vae: VAE, X: torch.Tensor, nsteps: int = 200000):
bce_trace, kl_trace = [], []
optimizer = optim.Adam(vae.parameters(), lr=.001)
for step in range(nsteps):
optimizer.zero_grad()
rec, mean, log_var = vae(X + torch.randn_like(X) * .05)
# -----------------------------------------------
# TODO 2: compute the two losses (do not average)
std = torch.exp(log_var / 2)
bce = F.binary_cross_entropy(rec, X, reduction='sum')
kl = 0.5 * torch.sum(std ** 2 + mean ** 2 - log_var - 1)
# -----------------------------------------------
(bce + kl).backward()
optimizer.step()
# Chestiuni pentru afișare
bce_trace.append(bce.item())
kl_trace.append(kl.item())
if (step + 1) % 100 == 0:
print(f"\rStep {step + 1:d}: BCE={np.mean(bce_trace):7.5f} "
f"KL={np.mean(kl_trace):7.5f}", end="")
bce_trace.clear()
kl_trace.clear()
if (step + 1) % 2500 == 0:
print("")
%%time
vae = VAE()
train(vae, X)
###Output
Step 2500: BCE=124.08627 KL=21.40327
Step 5000: BCE=121.51171 KL=22.73478
Step 7500: BCE=118.72833 KL=23.95096
Step 10000: BCE=116.03406 KL=25.23266
Step 12500: BCE=114.52011 KL=25.78964
Step 15000: BCE=113.27066 KL=26.21569
Step 17500: BCE=113.04031 KL=26.38341
Step 20000: BCE=112.53327 KL=26.08687
Step 22500: BCE=112.09835 KL=26.53603
Step 25000: BCE=111.80955 KL=27.05710
Step 27500: BCE=111.38995 KL=27.29390
Step 30000: BCE=111.39255 KL=26.35909
Step 32500: BCE=110.91012 KL=27.30216
Step 35000: BCE=110.99902 KL=27.11818
Step 37500: BCE=111.01553 KL=26.88746
Step 40000: BCE=110.35143 KL=27.28258
Step 42500: BCE=110.58733 KL=27.05585
Step 45000: BCE=110.33013 KL=27.25576
Step 47500: BCE=110.24042 KL=27.01841
Step 50000: BCE=110.44379 KL=27.61993
Step 52500: BCE=110.90212 KL=27.00687
Step 55000: BCE=110.07556 KL=27.86644
Step 57500: BCE=110.43340 KL=27.52413
Step 60000: BCE=110.03051 KL=27.28146
Step 62500: BCE=109.90407 KL=26.81705
Step 65000: BCE=109.87070 KL=27.51446
Step 67500: BCE=109.95827 KL=27.93731
Step 70000: BCE=110.08737 KL=27.70076
Step 72500: BCE=109.79326 KL=27.56118
Step 75000: BCE=110.00367 KL=27.20992
Step 77500: BCE=110.46362 KL=27.01450
Step 80000: BCE=109.26049 KL=27.93689
Step 82500: BCE=109.90259 KL=27.31611
Step 85000: BCE=109.75075 KL=27.47810
Step 87500: BCE=109.51105 KL=27.94054
Step 90000: BCE=110.18476 KL=27.49732
Step 92500: BCE=109.46164 KL=27.56707
Step 95000: BCE=109.36076 KL=27.93134
Step 97500: BCE=109.25126 KL=27.53956
Step 100000: BCE=109.61463 KL=27.86349
Step 102500: BCE=109.71135 KL=27.50679
Step 105000: BCE=109.45155 KL=27.57617
Step 107500: BCE=109.89920 KL=27.94318
Step 110000: BCE=109.82205 KL=27.78237
Step 112500: BCE=109.60374 KL=28.37775
Step 115000: BCE=109.60186 KL=27.50387
Step 117500: BCE=109.21853 KL=27.84297
Step 120000: BCE=109.43723 KL=27.97813
Step 122500: BCE=109.35838 KL=27.22437
Step 125000: BCE=110.14330 KL=27.36789
Step 127500: BCE=109.57189 KL=28.21403
Step 130000: BCE=109.93507 KL=27.91152
Step 132500: BCE=109.06009 KL=27.91721
Step 135000: BCE=109.37779 KL=27.78536
Step 137500: BCE=109.69968 KL=27.87922
Step 140000: BCE=109.00553 KL=27.65940
Step 142500: BCE=109.30359 KL=28.00105
Step 145000: BCE=109.58160 KL=28.22211
Step 147500: BCE=109.41012 KL=28.00669
Step 150000: BCE=108.90445 KL=27.75417
Step 152500: BCE=109.06663 KL=27.81458
Step 155000: BCE=109.87956 KL=27.07044
Step 157500: BCE=110.06372 KL=27.91314
Step 160000: BCE=109.47842 KL=27.67087
Step 162500: BCE=109.33568 KL=27.53336
Step 165000: BCE=108.89237 KL=27.65263
Step 167500: BCE=109.82531 KL=27.76033
Step 170000: BCE=109.74407 KL=27.39008
Step 172500: BCE=109.12641 KL=27.24444
Step 175000: BCE=109.49760 KL=27.66085
Step 177500: BCE=108.95157 KL=27.73946
Step 180000: BCE=109.28451 KL=27.67727
Step 182500: BCE=109.19811 KL=27.29111
Step 185000: BCE=109.12543 KL=28.20673
Step 187500: BCE=109.39921 KL=27.78472
Step 190000: BCE=108.93891 KL=27.52975
Step 192500: BCE=109.47471 KL=28.15572
Step 195000: BCE=109.63515 KL=27.02628
Step 197500: BCE=108.91029 KL=27.89915
Step 200000: BCE=109.12434 KL=27.48106
CPU times: user 11min 7s, sys: 8min 34s, total: 19min 41s
Wall time: 2min 51s
###Markdown
5. Evaluating the model 5.1 Reconstructions
###Code
with torch.no_grad():
recon, _, _ = vae(X)
show_images(recon)
###Output
_____no_output_____
###Markdown
5.2 Samples from the model
###Code
X_gen = vae.generate(nsamples=15)
show_images(X_gen)
###Output
_____no_output_____
###Markdown
5.3 Walk the latent space :)
###Code
N = 36
noise = torch.linspace(-2, 2, N).unsqueeze(1)
X_gen = vae.generate(noise=noise)
show_images(X_gen, nrows=6)
###Output
_____no_output_____ |
inference examples/individual examples/Stochastic Inference.ipynb | ###Markdown
Parameter identification exampleHere is a simple toy model that we use to demonstrate the working of the inference package$\emptyset \xrightarrow[]{k_1(I)} X \; \; \; \; X \xrightarrow[]{d_1} \emptyset$$ k_1(I) = \frac{k_1 I^2}{K_R^2 + I^2}$
###Code
%matplotlib inline
%config InlineBackend.figure_format = "retina"
from matplotlib import rcParams
rcParams["savefig.dpi"] = 100
rcParams["figure.dpi"] = 100
rcParams["font.size"] = 20
%matplotlib inline
import bioscrape as bs
from bioscrape.types import Model
from bioscrape.simulator import py_simulate_model
import numpy as np
import pylab as plt
import pandas as pd
species = ['I','X']
reactions = [(['X'], [], 'massaction', {'k':'d1'}), ([], ['X'], 'hillpositive', {'s1':'I', 'k':'k1', 'K':'KR', 'n':2})]
k1 = 50.0
d1 = 0.5
params = [('k1', k1), ('d1', d1), ('KR', 20)]
initial_condition = {'X':0, 'I':0}
M = Model(species = species, reactions = reactions, parameters = params,
initial_condition_dict = initial_condition)
###Output
_____no_output_____
###Markdown
Generate experimental data for multiple initial conditions1. Simulate bioscrape model2. Add Gaussian noise of non-zero mean and non-zero variance to the simulation3. Create appropriate Pandas dataframes 4. Write the data to a CSV file
###Code
num_trajectories = 4 # each with different initial condition
initial_condition_list = [{'I':5},{'I':10},{'I':15},{'I':20}]
timepoints = np.linspace(0,5,100)
result_list = []
for init_cond in initial_condition_list:
M.set_species(init_cond)
result = py_simulate_model(timepoints, Model = M)['X']
result_list.append(result)
plt.plot(timepoints, result, label = 'I =' + str(list(init_cond.values())[0]))
plt.xlabel('Time')
plt.ylabel('[X]')
plt.legend()
plt.show()
exp_data = pd.DataFrame()
exp_data['timepoints'] = timepoints
for i in range(num_trajectories):
exp_data['X' + str(i)] = result_list[i] + np.random.normal(5, 1, size = np.shape(result))
plt.plot(timepoints, exp_data['X' + str(i)], 'r', alpha = 0.3)
plt.plot(timepoints, result_list[i], 'k', linewidth = 3)
plt.xlabel('Time')
plt.ylabel('[X]')
plt.show()
###Output
_____no_output_____
###Markdown
CSV looks like:
###Code
exp_data.to_csv('../data/birth_death_data_multiple_conditions.csv')
exp_data
###Output
_____no_output_____
###Markdown
Run the bioscrape MCMC algorithm to identify parameters from the experimental data
###Code
from bioscrape.inference import py_inference
# Import data from CSV
# Import a CSV file for each experiment run
exp_data = []
for i in range(num_trajectories):
df = pd.read_csv('../data/birth_death_data_multiple_conditions.csv', usecols = ['timepoints', 'X'+str(i)])
df.columns = ['timepoints', 'X']
exp_data.append(df)
prior = {'k1' : ['uniform', 0, 100]}
sampler, pid = py_inference(Model = M, exp_data = exp_data, measurements = ['X'], time_column = ['timepoints'],
nwalkers = 15, init_seed = 0.15, nsteps = 5000, sim_type = 'stochastic',
params_to_estimate = ['k1'], prior = prior, plot_show = False, convergence_check = False)
pid.plot_mcmc_results(sampler, convergence_check = False);
###Output
Parameter posterior distribution convergence plots:
###Markdown
Check mcmc_results.csv for the results of the MCMC procedure and perform your own analysis. OR You can also plot the results as follows
###Code
M_fit = M
timepoints = pid.timepoints[0]
flat_samples = sampler.get_chain(discard=200, thin=15, flat=True)
inds = np.random.randint(len(flat_samples), size=200)
for init_cond in initial_condition_list:
for ind in inds:
sample = flat_samples[ind]
for pi, pi_val in zip(pid.params_to_estimate, sample):
M_fit.set_parameter(pi, pi_val)
M_fit.set_species(init_cond)
plt.plot(timepoints, py_simulate_model(timepoints, Model= M_fit)['X'], "C1", alpha=0.6)
# plt.errorbar(, y, yerr=yerr, fmt=".k", capsize=0)
for i in range(num_trajectories):
plt.plot(timepoints, list(pid.exp_data[i]['X']), 'b', alpha = 0.1)
plt.plot(timepoints, result, "k", label="original model")
plt.legend(fontsize=14)
plt.xlabel("Time")
plt.ylabel("[X]");
###Output
_____no_output_____
###Markdown
Let us now try to fit all three parameters to see if results improve:
###Code
# prior = {'d1' : ['gaussian', 0, 10, 1e-3], 'k1' : ['gaussian', 0, 50, 1e-4]}
prior = {'d1' : ['uniform', 0.1, 10],'k1' : ['uniform',0,100],'KR' : ['uniform',0,100]}
sampler, pid = py_inference(Model = M, exp_data = exp_data, measurements = ['X'], time_column = ['timepoints'],
nwalkers = 15, init_seed = 0.15, nsteps = 10000, sim_type = 'stochastic',
params_to_estimate = ['d1','k1','KR'], prior = prior, plot_show = True, convergence_check = False)
M_fit = M
timepoints = pid.timepoints[0]
flat_samples = sampler.get_chain(discard=200, thin=15, flat=True)
inds = np.random.randint(len(flat_samples), size=200)
for init_cond in initial_condition_list:
for ind in inds:
sample = flat_samples[ind]
for pi, pi_val in zip(pid.params_to_estimate, sample):
M_fit.set_parameter(pi, pi_val)
M_fit.set_species(init_cond)
plt.plot(timepoints, py_simulate_model(timepoints, Model= M_fit)['X'], "C1", alpha=0.6)
# plt.errorbar(, y, yerr=yerr, fmt=".k", capsize=0)
for i in range(num_trajectories):
plt.plot(timepoints, list(pid.exp_data[i]['X']), 'b', alpha = 0.2)
plt.plot(timepoints, result_list[i], "k")
# plt.legend(fontsize=14)
plt.xlabel("Time")
plt.ylabel("[X]");
###Output
_____no_output_____ |
_posts/submanifold_learning/Submanifold_Learning_Blog.ipynb | ###Markdown
It would be nice to run this on a larger collection of points, but the way I have written the code, the computation explodes quickly. For visualization's sake, here's what the first two steps would look like with a larger sample size.
###Code
angles=np.arange(0,6.28,1/100)
coords=[(cos(angle),sin(angle)) for angle in angles]
x_noise=np.random.random_sample(len(angles),)
y_noise=np.random.random_sample(len(angles),)
x_noise=[x/10 for x in x_noise]
y_noise=[y/10 for y in y_noise]
X=[coords[i][0]+x_noise[i] for i in range(len(angles))]
Y=[coords[i][1]+y_noise[i] for i in range(len(angles))]
plt.scatter(X,Y, s=1);
for i in range(len(angles)):
for j in range(len(angles)):
if (((((X[i]-X[j])**2)+((Y[i]-Y[j])**2))**(1/2))<0.06):
pyplot.plot([X[i], X[j]], [Y[i], Y[j]], color='black')
###Output
_____no_output_____ |
DAY 501 ~ 600/DAY507_[BaekJoon] 세 수 (Python).ipynb | ###Markdown
2021년 10월 9일 토요일 BaekJoon - 세 수 (Python) 문제 : https://www.acmicpc.net/problem/10817 블로그 : https://somjang.tistory.com/entry/BaekJoon-10817%EB%B2%88-%EC%84%B8-%EC%88%98-Python Solution
###Code
def three_numbers(numbers):
numbers = list(map(int, numbers.split()))
numbers.sort(reverse=True)
return numbers[1]
if __name__ == "__main__":
numbers = input()
print(three_numbers(numbers))
###Output
_____no_output_____ |
Lumpy Skin Disease Prediction/Model/Lumpy_Skin_Disease_Prediction.ipynb | ###Markdown
Importing the Dataset
###Code
df = pd.read_csv("Lumpy skin disease data.csv")
df.head()
df.describe()
df.info()
df.isna().sum(axis=0)
df.columns
###Output
_____no_output_____
###Markdown
Dropping Unnecessary Columns
###Code
df.drop(columns=['region','country','reportingDate','X5_Ct_2010_Da','X5_Bf_2010_Da'],inplace=True)
df.head()
df.corr()
###Output
_____no_output_____
###Markdown
Exploratory Data Analysis
###Code
plt.figure(figsize=(3,3),dpi=150)
plt.style.use('dark_background')
sns.countplot(x='lumpy', data = df)
plt.xlabel('Lumpiness classes')
plt.ylabel('count of each class')
plt.title('Lumpiness class distribution')
plt.figure(figsize=(15, 15))
heatmap = sns.heatmap(df.corr(), vmin= -1, vmax = 1, annot=True)
heatmap.set_title('Correlation Heatmap', fontdict={'fontsize':12})
###Output
_____no_output_____
###Markdown
Partitioning the dataset into training and test sets
###Code
X=df.iloc[:,:-1]
y=df.iloc[:,-1]
print("//Independent features//")
print(X.head())
print("\n\n//Dependent feature//")
print(y.head())
###Output
//Independent features//
x y cld dtr ... vap wet elevation dominant_land_cover
0 90.380931 22.437184 41.6 12.8 ... 15.7 0.00 147 2
1 87.854975 22.986757 40.5 13.3 ... 16.3 0.00 145 2
2 85.279935 23.610181 27.3 13.6 ... 13.0 0.98 158 2
3 81.564510 43.882221 45.3 12.8 ... 0.9 4.64 178 2
4 81.161057 43.834976 38.8 13.2 ... 1.2 1.69 185 3
[5 rows x 14 columns]
//Dependent feature//
0 1
1 1
2 1
3 1
4 1
Name: lumpy, dtype: int64
###Markdown
Train Test Split
###Code
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.3,random_state=0)
###Output
_____no_output_____
###Markdown
Feature Scaling
###Code
scaler=StandardScaler()
X_train=scaler.fit_transform(X_train)
X_test=scaler.transform(X_test)
# Logistic Regression
lr=LogisticRegression()
lr_mdl=lr.fit(X_train,y_train)
lr_pred=lr.predict(X_test)
lr_con_matrix=confusion_matrix(y_test,lr_pred)
lr_acc=accuracy_score(y_test,lr_pred)
print("Confusion Matrix",'\n',lr_con_matrix)
print('\n')
print("Accuracy of Logistic Regression: ",lr_acc*100,'\n')
print(classification_report(y_test,lr_pred))
#Random Forest Classfier
rf = RandomForestClassifier()
rf.fit(X_train,y_train)
rf_pred = rf.predict(X_test)
rf_con_matrix = confusion_matrix(y_test, rf_pred)
rf_acc = accuracy_score(y_test, rf_pred)
print("Confusion Matrix\n",rf_con_matrix)
print("\n")
print("Accuracy of Random Forest:",rf_acc*100,'\n')
print(classification_report(y_test,rf_pred))
#DecisionTreeClassifier
dt = DecisionTreeClassifier()
dt.fit(X_train, y_train)
dt_pred = dt.predict(X_test)
dt_con_matrix = confusion_matrix(y_test, dt_pred)
dt_acc = accuracy_score(y_test, dt_pred)
print("Confusion Matrix\n",dt_con_matrix)
print("\n")
print("Accuracy of Decision Tree Classifier:",dt_acc*100,'\n')
print(classification_report(y_test,dt_pred))
y_score1 = lr.predict_proba(X_test)[:,1]
y_score2 = rf.predict_proba(X_test)[:,1]
y_score3 = dt.predict_proba(X_test)[:,1]
false_positive_rate1, true_positive_rate1, threshold1 = roc_curve(y_test, y_score1)
false_positive_rate2, true_positive_rate2, threshold2 = roc_curve(y_test, y_score2)
false_positive_rate3, true_positive_rate3, threshold3 = roc_curve(y_test, y_score3)
plt.figure(figsize=(5,5),dpi=150)
plt.title('Receiver Operating Characteristic (ROC) Curve')
plt.plot(false_positive_rate1,true_positive_rate1, color='red', label = "Logistic Regression")
plt.plot(false_positive_rate2,true_positive_rate2, color='blue', label = "Random Forest")
plt.plot(false_positive_rate3,true_positive_rate3, color='green', label = "Decision Tree")
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],linestyle='--')
plt.axis('tight')
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
mdl_evl = pd.DataFrame({'Model': ['Logistic Regression','Random Forest', 'Decision Tree'], 'Accuracy': [lr_acc*100,rf_acc*100,dt_acc*100]})
mdl_evl
pal=['red','blue','green']
fig, ax = plt.subplots(figsize=(20,10))
sns.barplot(x="Model",y="Accuracy",palette=pal,data=mdl_evl)
plt.title('Model Accuracy')
plt.xlabel('Model')
plt.ylabel('Accuracy')
###Output
_____no_output_____ |
49_data_gorilla/FakeGorilla.ipynb | ###Markdown
Gorilla in the dataReproduce data from this paper:https://www.biorxiv.org/content/10.1101/2020.07.30.228916v1.full
###Code
library(tidyverse)
library(jpeg)
download.file('https://classroomclipart.com/images/gallery/Clipart/Black_and_White_Clipart/Animals/gorilla-waving-cartoon-black-white-outline-clipart-914.jpg', 'gorilla.jpg')
gorilla <- readJPEG("gorilla.jpg")
tidy_gorilla <- gorilla[,,1] %>%
as_tibble %>%
mutate(row=n()-row_number()) %>%
pivot_longer(V1:V412,names_to="column",values_to="intensity") %>%
mutate(column = as.integer(str_remove(column,"V")))
tidy_gorilla %>%
filter(intensity<.4) %>%
ggplot(aes(column, row)) +
geom_point()
tidy_gorilla %>%
filter(intensity<.4) %>%
sample_n(1786) %>%
ggplot(aes(column, row)) +
geom_point()
fake_data <- tidy_gorilla %>%
filter(intensity<.4) %>%
sample_n(1786) %>%
transmute(
bmi = (row/max(row)) * 17 + 15,
steps = 15000-column*15000/max(column)
)
fake_data %>%
ggplot(aes(steps,bmi)) + geom_point()
fake_data <- fake_data %>%
mutate(
i=steps*(1+rnorm(n(),0,10)),
sex=if_else(i<=median(steps),"female","male")
) %>%
select(-i)
fake_data %>%
count(sex)
fake_data %>%
ggplot(aes(steps,bmi,color=sex)) + geom_point()
fake_data %>% filter(sex=="female") %>% select(steps,bmi) %>% write_tsv("data9b_w.txt")
fake_data %>% filter(sex=="male") %>% select(steps,bmi) %>% write_tsv("data9b_m.txt")
###Output
_____no_output_____ |
.ipynb_aml_checkpoints/01 - Get Started with Notebooks-checkpoint2021-2-15-11-56-56.ipynb | ###Markdown
Get Started with Notebooks in Azure Machine LearningAzure Machine Learning is a cloud-based service for creating and managing machine learning solutions. It's designed to help data scientists and machine learning engineers leverage their existing data processing and model development skills and frameworks, and scale their workloads to the cloud.A lot of data science and machine learning work is accomplished in notebooks like this one. Notebooks consist of *cells*, some of which (like the one containing this text) are used for notes, graphics, and other content usually written using *markdown*; while others (like the cell below this one) contain code that you can run interactively within the notebook. The Azure Machine Learning Python SDKYou can run pretty much any Python code in a notebook, provided the required Python packages are installed in the environment where you're running it. In this case, you're running the notebook in a *Conda* environment on an Azure Machine Learning compute instance. This environment is installed in the compute instance by default, and contains common Python packages that data scientists typically work with. It also includes the Azure Machine Learning Python SDK, which is a Python package that enables you to write code that uses resources in your Azure Machine Learning workspace.Run the cell below to import the **azureml-core** package and checking the version of the SDK that is installed.
###Code
import azureml.core
print("Ready to use Azure ML", azureml.core.VERSION)
###Output
Ready to use Azure ML 1.22.0
###Markdown
Connect to your workspaceAll experiments and associated resources are managed within your Azure Machine Learning workspace. You can connect to an existing workspace, or create a new one using the Azure Machine Learning SDK.In most cases, you should store workspace connection information in a JSON configuration file. This makes it easier to connect without needing to remember details like your Azure subscription ID. You can download the JSON configuration file from the blade for your workspace in the Azure portal or from the workspace details pane in Azure Machine Learning studio, but if you're using a compute instance within your wokspace, the configuration file has already been downloaded to the root folder.The code below uses the configuration file to connect to your workspace.> **Note**: The first time you connect to your workspace in a notebook session, you may be prompted to sign into Azure by clicking the `https://microsoft.com/devicelogin` link, entering an automatically generated code, and signing into Azure. After you have successfully signed in, you can close the browser tab that was opened and return to this notebook.
###Code
from azureml.core import Workspace
ws = Workspace.from_config()
print(ws.name, "loaded")
###Output
dp100_ml loaded
###Markdown
View Azure Machine Learning resources in the workspaceNow that you have a connection to your workspace, you can work with the resources. For example, you can use the following code to enumerate the compute resources in your workspace.
###Code
print("Compute Resources:")
for compute_name in ws.compute_targets:
compute = ws.compute_targets[compute_name]
print("\t", compute.name, ':', compute.type)
###Output
Compute Resources:
dp100-compute : ComputeInstance
|
testing/eastern_cropmask/7_Accuracy_assessment_10m.ipynb | ###Markdown
Validating the 10m Eastern Africa Cropland Mask DescriptionPreviously, in the `6_Accuracy_assessment_20m.ipynb` notebook, we were doing preliminary validations on 20m resolution testing crop-masks. The crop-mask was stored on disk as a geotiff. The final cropland extent mask, produced at 10m resolution, is stored in the datacube and requires a different method for validating.> NOTE: A very big sandbox is required (256GiB RAM) to run this script. This notebook will output a `confusion error matrix` containing Overall, Producer's, and User's accuracy, along with the F1 score for each class. *** Getting startedTo run this analysis, run all the cells in the notebook, starting with the "Load packages" cell. Load Packages
###Code
import os
import sys
import glob
import rasterio
import datacube
import pandas as pd
import numpy as np
import seaborn as sn
import matplotlib.pyplot as plt
import geopandas as gpd
from sklearn.metrics import f1_score
from rasterstats import zonal_stats
###Output
/env/lib/python3.6/site-packages/geopandas/_compat.py:88: UserWarning: The Shapely GEOS version (3.7.2-CAPI-1.11.0 ) is incompatible with the GEOS version PyGEOS was compiled with (3.9.1-CAPI-1.14.2). Conversions between both will be slow.
shapely_geos_version, geos_capi_version_string
###Markdown
Analysis Parameters* `product` : name of crop-mask we're validating* `bands`: the bands of the crop-mask we want to load and validate. Can one of either `'mask'` or `'filtered'`* `grd_truth` : a shapefile containing crop/no-crop points to serve as the "ground-truth" dataset
###Code
product = "crop_mask_eastern"
band = 'mask'
grd_truth = 'data/validation_samples.shp'
###Output
_____no_output_____
###Markdown
Load the datasets`the cropland extent mask`
###Code
#connect to the datacube
dc = datacube.Datacube(app='feature_layers')
#load 10m cropmask
ds = dc.load(product=product, measurements=[band]).squeeze()
print(ds)
###Output
/env/lib/python3.6/site-packages/datacube/drivers/postgres/_connections.py:87: SADeprecationWarning: Calling URL() directly is deprecated and will be disabled in a future release. The public constructor for URL is now the URL.create() method.
username=username, password=password,
###Markdown
`Ground truth points`
###Code
#ground truth shapefile
ground_truth = gpd.read_file(grd_truth).to_crs('EPSG:6933')
# rename the class column to 'actual'
ground_truth = ground_truth.rename(columns={'Class':'Actual'})
# reclassifer into int
ground_truth['Actual'] = np.where(ground_truth['Actual']=='non-crop', 0, ground_truth['Actual'])
ground_truth['Actual'] = np.where(ground_truth['Actual']=='crop', 1, ground_truth['Actual'])
ground_truth.head()
###Output
_____no_output_____
###Markdown
Convert points into polygonsWhen the validation data was collected, 40x40m polygons were evaluated as either crop/non-crop rather than points, so we want to sample the raster using the same small polygons. We'll find the majority or 'mode' statistic within the polygon and use that to compare with the validation dataset.
###Code
#set radius (in metres) around points
radius = 20
#create circle buffer around points, then find envelope
ground_truth['geometry'] = ground_truth['geometry'].buffer(radius).envelope
###Output
_____no_output_____
###Markdown
Calculate zonal statisticsWe want to know what the majority pixel value is inside each validation polygon.
###Code
def custom_majority(x):
a=np.ma.MaskedArray.count(x)
b=np.sum(x)
c=b/a
if c>0.5:
return 1
if c<=0.5:
return 0
#calculate stats
stats = zonal_stats(ground_truth.geometry,
ds[band].values,
affine=ds.geobox.affine,
add_stats={'majority':custom_majority},
nodata=255)
#append stats to grd truth df
ground_truth['Prediction']=[i['majority'] for i in stats]
ground_truth.head()
###Output
_____no_output_____
###Markdown
*** Create a confusion matrix
###Code
confusion_matrix = pd.crosstab(ground_truth['Actual'],
ground_truth['Prediction'],
rownames=['Actual'],
colnames=['Prediction'],
margins=True)
confusion_matrix
###Output
_____no_output_____
###Markdown
Calculate User's and Producer's Accuracy `Producer's Accuracy`
###Code
confusion_matrix["Producer's"] = [confusion_matrix.loc[0, 0] / confusion_matrix.loc[0, 'All'] * 100,
confusion_matrix.loc[1, 1] / confusion_matrix.loc[1, 'All'] * 100,
np.nan]
###Output
_____no_output_____
###Markdown
`User's Accuracy`
###Code
users_accuracy = pd.Series([confusion_matrix[0][0] / confusion_matrix[0]['All'] * 100,
confusion_matrix[1][1] / confusion_matrix[1]['All'] * 100]
).rename("User's")
confusion_matrix = confusion_matrix.append(users_accuracy)
###Output
_____no_output_____
###Markdown
`Overall Accuracy`
###Code
confusion_matrix.loc["User's","Producer's"] = (confusion_matrix.loc[0, 0] +
confusion_matrix.loc[1, 1]) / confusion_matrix.loc['All', 'All'] * 100
###Output
_____no_output_____
###Markdown
`F1 Score`The F1 score is the harmonic mean of the precision and recall, where an F1 score reaches its best value at 1 (perfect precision and recall), and is calculated as:$$\begin{aligned}\text{Fscore} = 2 \times \frac{\text{UA} \times \text{PA}}{\text{UA} + \text{PA}}.\end{aligned}$$Where UA = Users Accuracy, and PA = Producer's Accuracy
###Code
fscore = pd.Series([(2*(confusion_matrix.loc["User's", 0]*confusion_matrix.loc[0, "Producer's"]) / (confusion_matrix.loc["User's", 0]+confusion_matrix.loc[0, "Producer's"])) / 100,
f1_score(ground_truth['Actual'].astype(np.int8), ground_truth['Prediction'].astype(np.int8), average='binary')]
).rename("F-score")
confusion_matrix = confusion_matrix.append(fscore)
###Output
_____no_output_____
###Markdown
Tidy Confusion Matrix* Limit decimal places,* Add readable class names* Remove non-sensical values
###Code
# round numbers
confusion_matrix = confusion_matrix.round(decimals=2)
# rename booleans to class names
confusion_matrix = confusion_matrix.rename(columns={0:'Non-crop', 1:'Crop', 'All':'Total'},
index={0:'Non-crop', 1:'Crop', 'All':'Total'})
#remove the nonsensical values in the table
confusion_matrix.loc["User's", 'Total'] = '--'
confusion_matrix.loc['Total', "Producer's"] = '--'
confusion_matrix.loc["F-score", 'Total'] = '--'
confusion_matrix.loc["F-score", "Producer's"] = '--'
confusion_matrix
###Output
_____no_output_____
###Markdown
Export csv
###Code
confusion_matrix.to_csv('results/Eastern_10m_accuracy_assessment_confusion_matrix.csv')
###Output
_____no_output_____ |
examples/minimal_example/minimal.ipynb | ###Markdown
Minimal example of command execution and logging with CmdInterface Import `CmdInterface`
###Code
from cmdint import CmdInterface
###Output
_____no_output_____
###Markdown
Create instance of `CmdInterface` with the name of the command to be called. Here we simply call the unix command *ls* to list a directory content.
###Code
test = CmdInterface('ls')
###Output
Warning, repo /home/neher/cmdint/cmdint is dirty!
###Markdown
Add keyword based argument to `CmdInterface`.
###Code
test.add_arg(key='-l', arg='/')
###Output
_____no_output_____
###Markdown
Run command and write log to the default file "CmdInterface.json" in the current working directory.
###Code
test.run()
###Output
2019-02-05 08:21:12 >> ls START
2019-02-05 08:21:12 >> ls END
|
sample-code/l23-textrepresent/Basic Sentiment.ipynb | ###Markdown
Using VADER
###Code
## Install if required
# !pip install vaderSentiment
# Import
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
model = SentimentIntensityAnalyzer()
# polarity_scores method of SentimentIntensityAnalyzer
# oject gives a sentiment dictionary.
# which contains pos, neg, neu, and compound scores.
def show_sentiment_scores(data):
sentiment_dict = model.polarity_scores(data)
print("Overall sentiment dictionary is : ", sentiment_dict)
print("sentence was rated as ", sentiment_dict['neg']*100, "% Negative")
print("sentence was rated as ", sentiment_dict['neu']*100, "% Neutral")
print("sentence was rated as ", sentiment_dict['pos']*100, "% Positive")
print("Sentence overall rated as", end = " ")
# decide sentiment as positive, negative and neutral
if sentiment_dict['compound'] >= 0.05 :
print("Positive")
elif sentiment_dict['compound'] <= - 0.05 :
print("Negative")
else :
print("Neutral")
data = "Vader is amazingly simple to use. What great fun!"
show_sentiment_scores(data)
data = "This is not bad!"
show_sentiment_scores(data)
data = "This was supposed to be bad but did not turn out that way!"
show_sentiment_scores(data)
###Output
Overall sentiment dictionary is : {'neg': 0.175, 'neu': 0.825, 'pos': 0.0, 'compound': -0.3699}
sentence was rated as 17.5 % Negative
sentence was rated as 82.5 % Neutral
sentence was rated as 0.0 % Positive
Sentence overall rated as Negative
|
01_oval_clean.ipynb | ###Markdown
**Learn nbdev !!!!!**
###Code
""" Maths are alright, but the axes length are still weird """
# export
import numpy as np
from matplotlib import pyplot as plt
# export
# the cov matrix
# sig1 = 2, sig2 = 1, rho = 0.5
cov = [[4,1],[1,1]]
# export
# get the points first makes more sense
numpts = 3000
Points = np.random.multivariate_normal([0,0],cov,size=numpts)
# plot the darn thing
plt.scatter(Points[:,0],Points[:,1])
plt.title("Raw points to have a look quickly")
# export
# get vector length
def vlen(v):
return np.sqrt( np.square(v[-2]) + np.square(v[-1]))
# try it
vlen(np.array([1,2])), vlen(np.array([3,4]))
# export
# get the eigvals and vecs
eigvals, eigvecs = np.linalg.eig(np.array(cov))
eigvals, eigvecs
eigvecs.T[0]
# confirm it's an eigenvector
eigvals[0] * eigvecs[0],np.array(cov) @ (eigvecs.T[0])
# export
major = eigvecs.T[0]
major
# export
minor = eigvecs.T[1]
minor
# export
# find the angle of major axis to x axis
# take advantage of high school math: dot product
x_vec = np.array([1,0])
dot = major @ x_vec
ang = np.arccos(dot/(vlen(major)*vlen(x_vec)))
ang * 180 / np.pi
###Output
_____no_output_____
###Markdown
Is the length correct ? should be proportional to sqrt(eigvals) ! Since std_x = 2 instead of 1, the modified overall std = sqrt(std_x^2 + std_y^2) = sqrt(5) , which would required us to scale the std by 1/std = 1/sqrt(5) from z-score table, we'll get ratio = 0.59
###Code
# export
""" 0.59 meets but i don't know why """ # gets 0.502
rat = 0.59 # 1/sqrt(5) times of standard deviation !?
major_length = 2*eigvals[0]**0.5 * rat
minor_length = 2*eigvals[1]**0.5 * rat
major_length, minor_length
# export
# get the ellipse again use high school math
cos = np.cos(ang)
sin = np.sin(ang)
def transform_x(x,y):
return x*cos + y*sin
def transform_y(x,y):
return x*sin - y*cos
theta = np.arange(0,360,1)*np.pi/180
x = major_length*np.cos(theta)
y = minor_length*np.sin(theta)
x_oval = transform_x(x,y)
y_oval = transform_y(x,y)
""" just checking """
plt.plot(x_oval,y_oval)
plt.show()
# export
# define boundary and calculate inside/total
Points_map = [[transform_x(ele[0],ele[1]),transform_y(ele[0],ele[1])] for ele in Points]
# export
def boundary(point):
return vlen([point[0]*minor_length,point[1]*major_length])
boundary(Points_map[1])
# export
# check if the point is inside the boundary
pts, pts2 = [], []
for i in range(numpts):
if boundary(Points_map[i]) < major_length*minor_length:
pts.append(Points[i])
else:
pts2.append(Points[i])
pts[:10]
# export
ptsn, ptsn2 = np.array(pts), np.array(pts2)
len(ptsn)/numpts
plt.scatter(ptsn[:,0],ptsn[:,1])
plt.scatter(ptsn2[:,0],ptsn2[:,1])
major, minor
# export
# major and minor axis line
xx = np.arange(-8,8,1/numpts)
y1 = xx * major[1]/major[0]
y2 = xx * minor[1]/minor[0]
# orthogonal
major[1]/major[0] * minor[1]/minor[0]
# print ratio
len(ptsn)/numpts
# export
# result
plt.figure(figsize=(9,9))
plt.plot(x_oval,y_oval,"purple",linewidth=5.0)
plt.scatter(ptsn[:,0],ptsn[:,1])
plt.scatter(ptsn2[:,0],ptsn2[:,1])
plt.plot(xx,y1,"black")
plt.plot(xx,y2,"green")
plt.xlim([-4,4])
plt.ylim([-4,4])
plt.legend(["contour","major","minor","inside","outside"])
plt.title(chr(ord('%')) + "std: %.4f; inside/total: %.4f" % (rat, len(ptsn)/numpts))
# change the filename each time, can use os.scandir() to write the right filename automatically
import os
if not os.path.isfile("multnormal\output.png"):
plt.savefig('output')
plt.show()
# export everything to a python script and save the result
###Output
_____no_output_____
###Markdown
Exploring Chi-squared F(s) = 1 - exp(-s/2) for bivariate normal distribution
###Code
def chi(s):
return 1 - np.exp(-s/2)
t = np.arange(0,7,0.01)
plt.plot(t,chi(t))
plt.title("chi-squared")
plt.show()
###Output
_____no_output_____
###Markdown
It turned out, we need chi-squared distribution for non-standard normal distribution P[x,k,lambda] = exp(-lambda/2) * sum((lambda/2)**j / j! * gamma(k/2,x/2+2j)/gammafunction(k/2)) j from 0 to +inf https://en.wikipedia.org/wiki/Noncentral_chi-squared_distributionCumulative_distribution_function Would this work?
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_core.ipynb.
Converted index.ipynb.
Converted oval_clean.ipynb.
|
projectile_motion.ipynb | ###Markdown
Projectile motion 
###Code
import numpy as np
import matplotlib.pyplot as plt
# input
u = 40 # initial velocity in m/s
g = 9.81 # gravitational acceleration m/s^2
theta1 = 45 # angle of projectile
theta2 = 60 # angle of projectile
ux1 = u*np.cos(theta1*np.pi/180) # velocity in x direction
uy1 = u*np.sin(theta1*np.pi/180) # velocity in y direction
ux2 = u*np.cos(theta2*np.pi/180) # velocity in x direction
uy2 = u*np.sin(theta2*np.pi/180) # velocity in y direction
t_total_1 = 2*uy1/g
t_total_2 = 2*uy2/g
t1 = np.linspace(0,t_total_1,100)
t2 = np.linspace(0,t_total_2,100)
x1 = ux1*t1
y1 = (uy1*t1)-(0.5*g*t1**2)
x2 = ux2*t2
y2= (uy2*t2)-(0.5*g*t2**2)
plt.figure(figsize=(10,7)) # set graph size
plt.margins(x=0) # set x axis margin
plt.title('Projectile motion')
plt.plot(x1,y1,label = r'$\theta$ = 45$\degree$')
plt.plot(x2,y2,label = r'$\theta$ = 60$\degree$',color='red')
plt.legend()
plt.show()
###Output
_____no_output_____ |
0.17/_downloads/5d2190fa4d2e36b882cc913879fb6f3b/plot_decoding_time_generalization_conditions.ipynb | ###Markdown
Decoding sensor space data with generalization across time and conditionsThis example runs the analysis described in [1]_. It illustrates how one canfit a linear classifier to identify a discriminatory topography at a given timeinstant and subsequently assess whether this linear model can accuratelypredict all of the time samples of a second set of conditions.References----------.. [1] King & Dehaene (2014) 'Characterizing the dynamics of mental representations: the Temporal Generalization method', Trends In Cognitive Sciences, 18(4), 203-210. doi: 10.1016/j.tics.2014.01.002.
###Code
# Authors: Jean-Remi King <[email protected]>
# Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
import mne
from mne.datasets import sample
from mne.decoding import GeneralizingEstimator
print(__doc__)
# Preprocess data
data_path = sample.data_path()
# Load and filter data, set up epochs
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
events_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
raw = mne.io.read_raw_fif(raw_fname, preload=True)
picks = mne.pick_types(raw.info, meg=True, exclude='bads') # Pick MEG channels
raw.filter(1., 30., fir_design='firwin') # Band pass filtering signals
events = mne.read_events(events_fname)
event_id = {'Auditory/Left': 1, 'Auditory/Right': 2,
'Visual/Left': 3, 'Visual/Right': 4}
tmin = -0.050
tmax = 0.400
decim = 2 # decimate to make the example faster to run
epochs = mne.Epochs(raw, events, event_id=event_id, tmin=tmin, tmax=tmax,
proj=True, picks=picks, baseline=None, preload=True,
reject=dict(mag=5e-12), decim=decim)
###Output
_____no_output_____
###Markdown
We will train the classifier on all left visual vs auditory trialsand test on all right visual vs auditory trials.
###Code
clf = make_pipeline(StandardScaler(), LogisticRegression(solver='lbfgs'))
time_gen = GeneralizingEstimator(clf, scoring='roc_auc', n_jobs=1,
verbose=True)
# Fit classifiers on the epochs where the stimulus was presented to the left.
# Note that the experimental condition y indicates auditory or visual
time_gen.fit(X=epochs['Left'].get_data(),
y=epochs['Left'].events[:, 2] > 2)
###Output
_____no_output_____
###Markdown
Score on the epochs where the stimulus was presented to the right.
###Code
scores = time_gen.score(X=epochs['Right'].get_data(),
y=epochs['Right'].events[:, 2] > 2)
###Output
_____no_output_____
###Markdown
Plot
###Code
fig, ax = plt.subplots(1)
im = ax.matshow(scores, vmin=0, vmax=1., cmap='RdBu_r', origin='lower',
extent=epochs.times[[0, -1, 0, -1]])
ax.axhline(0., color='k')
ax.axvline(0., color='k')
ax.xaxis.set_ticks_position('bottom')
ax.set_xlabel('Testing Time (s)')
ax.set_ylabel('Training Time (s)')
ax.set_title('Generalization across time and condition')
plt.colorbar(im, ax=ax)
plt.show()
###Output
_____no_output_____ |
housing-ex.ipynb | ###Markdown
Functions to download and load the data This notebook has a lot of different cells shwonig examples of different techniques. However, once you've run through them a few times you might want to skip to training models at the end. In order to do that, the minimum steps/cells that need to be run are labeled below as:* run 1* run 2* run 3* run 4* run 5* run 6* run 7* run 8* run 9* run 10* run 11* run 12* run 13* run 14* run 15
###Code
# run 1
import os
import tarfile
import urllib
import pandas as pd
import numpy as np
from zlib import crc32
DOWNLOAD_ROOT = "https://raw.githubusercontent.com/ageron/handson-ml2/master/"
HOUSING_PATH = os.path.join("datasets", "housing")
HOUSING_URL = DOWNLOAD_ROOT + "datasets/housing/housing.tgz"
def fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH):
os.makedirs(housing_path, exist_ok=True)
tgz_path = os.path.join(housing_path,"housing.tgz")
urllib.request.urlretrieve(housing_url, tgz_path)
housing_tgz = tarfile.open(tgz_path)
housing_tgz.extractall(path=housing_path)
housing_tgz.close()
def load_housing_data(housing_path=HOUSING_PATH):
csv_path = os.path.join(housing_path, "housing.csv")
return pd.read_csv(csv_path)
def split_train_test(data, test_ratio):
shuffled_indicies = np.random.permutation(len(data))
test_set_size = int(len(data) * test_ratio)
test_indicies = shuffled_indicies[:test_set_size]
train_indicies = shuffled_indicies[test_set_size:]
return data.iloc[train_indicies], data.iloc[test_indicies]
def test_set_check(identifier, test_ratio):
return crc32(np.int64(identifier)) & 0xffffffff < test_ratio * 2**32
def split_train_test_by_id(data, test_ratio, id_column):
ids = data[id_column]
in_test_set = ids.apply(lambda id_: test_set_check(id_, test_ratio))
return data.loc[~in_test_set], data.loc[in_test_set]
# display the scores
def display_scores(scores):
print("Scores:", scores)
print("Mean:", scores.mean())
print("Standard Deviation:", scores.std())
###Output
_____no_output_____
###Markdown
Download the data
###Code
fetch_housing_data()
###Output
_____no_output_____
###Markdown
Load and look at the data
###Code
# run 2
housing = load_housing_data()
housing.head()
housing.info()
housing["ocean_proximity"].value_counts()
housing.describe()
%matplotlib inline
import matplotlib.pyplot as plt
housing.hist(bins=50, figsize=(20,15))
plt.show()
###Output
_____no_output_____
###Markdown
Split test set from training data
###Code
#using a function we wrote
train_set_a, test_set_a = split_train_test(housing, 0.2)
print("train set:" + str(len(train_set_a)))
print("test set:" + str(len(test_set_a)))
#using a function that hashes an id column
housing_with_id = housing.reset_index()
housing_with_id["id"] = housing["longitude"] * 1000 + housing["latitude"]
train_set_b, test_set_b = split_train_test_by_id(housing_with_id, 0.2, "id")
print("train set:" + str(len(train_set_b)))
print("test set:" + str(len(test_set_b)))
#using sklearn
from sklearn.model_selection import train_test_split
train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)
print("train set:" + str(len(train_set)))
print("test set:" + str(len(test_set)))
# run 3
#creating an income category to perform stratified sampling
housing["income_cat"] = pd.cut(housing["median_income"], bins=[0,1.5,3.0,4.5,6.0, np.inf], labels=[1,2,3,4,5])
housing["income_cat"].hist()
# run 4
#stratified splitting
from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(housing, housing["income_cat"]):
strat_train_set = housing.loc[train_index]
strat_test_set = housing.loc[test_index]
strat_test_set["income_cat"].value_counts()/len(strat_test_set)
# run 5
#dropping the column we used to stratify
for set_ in (strat_train_set, strat_test_set):
set_.drop("income_cat", axis=1, inplace=True)
###Output
_____no_output_____
###Markdown
Discovering and visualizing the data
###Code
# run 6
%matplotlib inline
import matplotlib.pyplot as plt
#make a copy of the data
housing = strat_train_set.copy()
housing.plot(kind="scatter", x="longitude", y="latitude", alpha=0.4,s=housing["population"]/100, label="population",
figsize=(10,7), c="median_house_value", cmap=plt.get_cmap("jet"), colorbar=True)
plt.legend()
#correlation matrix
corr_matrix = housing.corr()
corr_matrix["median_house_value"].sort_values(ascending=False)
from pandas.plotting import scatter_matrix
attributes = ["median_house_value", "median_income","total_rooms", "housing_median_age"]
scatter_matrix(housing[attributes], figsize=(12,8))
housing.plot(kind="scatter", x="median_income", y="median_house_value", alpha=0.1)
###Output
_____no_output_____
###Markdown
Experimenting with Attribute Combinations
###Code
housing["rooms_per_household"] = housing["total_rooms"]/housing["households"]
housing["bedrooms_per_room"] = housing["total_bedrooms"]/housing["total_rooms"]
housing["population_per_household"] = housing["population"]/housing["households"]
corr_matrix = housing.corr()
corr_matrix["median_house_value"].sort_values(ascending=False)
# run 7
housing = strat_train_set.drop("median_house_value", axis=1)
housing_labels = strat_train_set["median_house_value"].copy()
housing.head(5)
housing_labels.head(5)
###Output
_____no_output_____
###Markdown
Data Cleaning
###Code
# drop missing values
housing_dropna = housing.copy()
housing_dropna.dropna(subset=["total_bedrooms"])
#drop the attribute with missing values
housing_drop = housing.drop("total_bedrooms",axis=1)
#replace missing values with median
median = housing["total_bedrooms"].median()
housing_fillna = housing.copy()
housing_fillna["total_bedrooms"].fillna(median, inplace=True)
# run 8
# skikit-learn simple imputer for missing values
from sklearn.impute import SimpleImputer
imputer = SimpleImputer(strategy="median")
# drop non numeric field
housing_num = housing.drop("ocean_proximity", axis=1)
imputer.fit(housing_num)
imputer.statistics_
housing_num.median().values
# transform the set by replacing missing numbers with their median (output is a Numpy array)
X = imputer.transform(housing_num)
# put the array back into a data frame
housing_tr = pd.DataFrame(X, columns=housing_num.columns, index=housing_num.index)
###Output
_____no_output_____
###Markdown
Text and categorical attributes
###Code
# run 9
housing_cat = housing[["ocean_proximity"]]
housing_cat.head(10)
# transform "ocean_proximity" into categorical columns using scikit-learn
# the resulting array corresponds to the data above... where the number in the housing_cat_encoded array is the position of the category in the ordinal_encoder.categories_ array below
# 0=<1H OCEAN... 4=NEAR OCEAN... 1=INLAND (which all kind of makes sense considering it is 'ordinal' encoding right?)
from sklearn.preprocessing import OrdinalEncoder
ordinal_encoder = OrdinalEncoder()
housing_cat_encoded = ordinal_encoder.fit_transform(housing_cat)
housing_cat_encoded[:10]
ordinal_encoder.categories_
# run 10
# One Hot Encoding
from sklearn.preprocessing import OneHotEncoder
cat_encoder = OneHotEncoder()
housing_cat_1hot = cat_encoder.fit_transform(housing_cat)
housing_cat_1hot
housing_cat_1hot.toarray()
cat_encoder.categories_
###Output
_____no_output_____
###Markdown
Custom Transformers
###Code
# run 11
from sklearn.base import BaseEstimator, TransformerMixin
rooms_ix, bedrooms_ix, population_ix, households_ix = 3,4,5,6
class CombinedAttributesAdder(BaseEstimator, TransformerMixin):
def __init__(self, add_bedrooms_per_room=True):
self.add_bedrooms_per_room = add_bedrooms_per_room
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
rooms_per_household = X[:, rooms_ix]/X[:, households_ix]
population_per_household = X[:, population_ix]/X[:,households_ix]
if self.add_bedrooms_per_room:
bedrooms_per_room = X[:, bedrooms_ix]/X[:,rooms_ix]
return np.c_[X, rooms_per_household, population_per_household, bedrooms_per_room]
else:
return np.c_[X, rooms_per_household, population_per_household]
attr_adder = CombinedAttributesAdder(add_bedrooms_per_room=False)
housing_extra_attribs = attr_adder.transform(housing.values)
housing_extra_attribs
###Output
_____no_output_____
###Markdown
Transformation Pipelines
###Code
# run 12
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.impute import SimpleImputer
num_pipeline = Pipeline([
('imputer', SimpleImputer(strategy="median")),
('attribs_adder', CombinedAttributesAdder()),
('std_scaler', StandardScaler())
])
housing_num_tr = num_pipeline.fit_transform(housing_num)
housing_num_tr
# run 13
from sklearn.compose import ColumnTransformer
num_attribs = list(housing_num)
cat_attribs = ["ocean_proximity"]
full_pipeline = ColumnTransformer([
("num", num_pipeline, num_attribs),
("cat", OneHotEncoder(), cat_attribs)
])
housing_prepared = full_pipeline.fit_transform(housing)
housing_prepared
###Output
_____no_output_____
###Markdown
Training and Evaluating on the Training Set
###Code
# run 14
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(housing_prepared, housing_labels)
some_data = housing.iloc[:5]
some_labels = housing_labels.iloc[:5]
some_data_prepared = full_pipeline.transform(some_data)
print("Predications", lin_reg.predict(some_data_prepared))
print("Labels", list(some_labels))
from sklearn.metrics import mean_squared_error
housing_predictions = lin_reg.predict(housing_prepared)
lin_mse = mean_squared_error(housing_labels, housing_predictions)
lin_rmse = np.sqrt(lin_mse)
lin_rmse
# run 15
from sklearn.tree import DecisionTreeRegressor
tree_reg = DecisionTreeRegressor()
tree_reg.fit(housing_prepared, housing_labels)
#overfit model
housing_predictions = tree_reg.predict(housing_prepared)
tree_mse = mean_squared_error(housing_labels, housing_predictions)
tree_rmse = np.sqrt(tree_mse)
tree_rmse
###Output
_____no_output_____
###Markdown
Cross-Validation
###Code
# scikit-learn's k-fold cross-validation feature
# this splits the data into 10 subsets (folds) then trains and evaluates the model 10 times
# each time it randomly picks a fold to use to evaluate the model and trains it on the remaining 9
# the resultant arry is the 10 evaluation scores
# here we evaluate it on the previously over-fit tree model
from sklearn.model_selection import cross_val_score
scores = cross_val_score(tree_reg, housing_prepared, housing_labels, scoring="neg_mean_squared_error", cv=10)
tree_rmse_scores = np.sqrt(-scores)
display_scores(tree_rmse_scores)
# here we evaluate our linear regression model with cross-validation
lin_scores = cross_val_score(lin_reg, housing_prepared, housing_labels, scoring="neg_mean_squared_error", cv=10)
lin_rmse_scores = np.sqrt(-lin_scores)
display_scores(lin_rmse_scores)
# try a random forest
from sklearn.ensemble import RandomForestRegressor
forest_reg = RandomForestRegressor()
forest_reg.fit(housing_prepared, housing_labels)
forest_predictions = forest_reg.predict(housing_prepared)
forest_mse = mean_squared_error(housing_labels, forest_predictions)
forest_rmse = np.sqrt(forest_mse)
forest_scores = cross_val_score(forest_reg, housing_prepared, housing_labels, scoring="neg_mean_squared_error", cv=10)
forest_rmse_scores = np.sqrt(-forest_scores)
print("Forest RMSE:",forest_rmse)
display_scores(forest_rmse_scores)
# how to save and load a model
import joblib
joblib.dump(forest_reg, "rnd-forest-ch-2.pkl")
#loaded_model = joblib.load("rnd-forest-ch-2.pkl")
###Output
_____no_output_____
###Markdown
Fine Tuning Models
###Code
# using scikit-learn GridSearchCV to optimize hyper parameters
# based on the param grid, this will look at 12 (3 X 4) combinations of estimators and features,
# then perform another pass looking at 6 (2 X 3) combinations of estimators and features for a total of 18 combinations of hyper parameter values to find the best ones (best_params_)
from sklearn.model_selection import GridSearchCV
param_grid = [
{'n_estimators': [3, 10, 30], 'max_features': [2, 4, 6, 8]},
{'bootstrap': [False], 'n_estimators': [3, 10], 'max_features': [2, 3, 4]}
]
forest_reg = RandomForestRegressor()
grid_search = GridSearchCV(forest_reg, param_grid, cv=5, scoring="neg_mean_squared_error", return_train_score=True)
grid_search.fit(housing_prepared, housing_labels)
grid_search.best_params_
#grid search best estimator
grid_search.best_estimator_
#look at the evaluation scores from grid search
cvres = grid_search.cv_results_
for mean_score, params in zip(cvres["mean_test_score"], cvres["params"]):
print(np.sqrt(-mean_score), params)
feature_importances = grid_search.best_estimator_.feature_importances_
feature_importances
extra_attribs = ["rooms_per_hhold", "pop_per_hhold", "bedrooms_per_room"]
cat_encoder = full_pipeline.named_transformers_["cat"]
cat_one_hot_attribs = list(cat_encoder.categories_[0])
attributes = num_attribs + extra_attribs + cat_one_hot_attribs
sorted(zip(feature_importances, attributes), reverse=True)
###Output
_____no_output_____
###Markdown
Evaluate the model on the test set!
###Code
#evaulate the model on the test set
# steps are: get the predictors and labels from the test set
# use the pipeline we created to transform the data (just 'transform()' it do not 'fit_transrom()')
# evaluate the model on the set
final_model = grid_search.best_estimator_
X_test = strat_test_set.drop("median_house_value", axis = 1)
y_test = strat_test_set["median_house_value"].copy()
X_test_prepared = full_pipeline.transform(X_test)
final_predictions = final_model.predict(X_test_prepared)
final_mse = mean_squared_error(y_test, final_predictions)
final_rmse = np.sqrt(final_mse)
#final RMSE
print("Final RMSE:", final_rmse)
#or look at 95% confidence interval to evaluate
from scipy import stats
confidence = 0.95
squared_errors = (final_predictions - y_test) ** 2
np.sqrt(stats.t.interval(confidence, len(squared_errors) - 1,
loc=squared_errors.mean(),
scale=stats.sem(squared_errors)))
joblib.dump(final_model, "final-model-ch-2.pkl")
###Output
_____no_output_____ |
Mathematics/Statistics/Statistics and Probability Python Notebooks/Concepts_Notebooks--probability_and_statistics-notebooks/notebook-a/ar1_process.ipynb | ###Markdown
AR(1) Process---The AR(1) process:$$ x_t = \phi x_{t-1} + \epsilon_t,\quad \epsilon_t\sim\mathrm{Normal}(0,\sigma^2),$$is a type of time series model that is widely applied in economics, finance and other related fields.The AR(1) process is a Markov chain and its transition kernel is$$ K(x_{t-1},x_t) = \frac1{\sqrt{2\pi\sigma^2}}\exp\left[-\frac{(x_t-\phi x_{t-1})^2}{2\sigma^2}\right].$$
###Code
import numpy as np
import scipy.stats as st
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
We suppose $\phi=0.9$, $\sigma^2=0.19$ and $x_0 \sim \mathrm{Uniform}\left(-\sqrt{3},\sqrt{3}\right)$. The following figure shows the initial distribution:
###Code
n = 1000000
x = st.uniform.rvs(loc=-np.sqrt(3.0), scale=2.0*np.sqrt(3.0), size=n)
plt.hist(x, density=True, bins=50)
plt.title('Initial Distribution')
plt.show()
###Output
_____no_output_____
###Markdown
The AR(1) process progresses from $t=0$ to $t=1$.
###Code
phi = 0.9
sigma = np.sqrt(0.19)
x = phi * x + st.norm.rvs(scale=sigma, size=n)
plt.hist(x, density=True, bins=50)
plt.title('AR(1) Distribution at t=1')
plt.show()
###Output
_____no_output_____
###Markdown
Then we repeat one more time.
###Code
x = phi * x + st.norm.rvs(scale=sigma, size=n)
plt.hist(x, density=True, bins=50)
plt.title('AR(1) Distribution at t=2')
plt.show()
###Output
_____no_output_____
###Markdown
After we repeat the computation 20 times, we have the following histogram.
###Code
for t in range(18):
x = phi * x + st.norm.rvs(scale=sigma, size=n)
plt.hist(x, density=True, bins=50)
plt.title('AR(1) Distribution at t=20')
xgrid = np.linspace(-4.0, 4.0, 101)
plt.plot(xgrid, st.norm.pdf(xgrid))
plt.legend(['Invariant', 'AR(1)'])
plt.show()
###Output
_____no_output_____ |
training-DOT.ipynb | ###Markdown
Importing
###Code
import pandas as pd
from sklearn import preprocessing
from collections import deque
import numpy as np
import random
import time
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, LSTM, BatchNormalization
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint, LearningRateScheduler
from tensorflow.keras.optimizers import SGD, Adam
from tensorflow.keras.optimizers.schedules import ExponentialDecay
from tensorflow.keras.regularizers import l1
from tensorflow.keras.regularizers import l2
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
###Output
_____no_output_____
###Markdown
Creating and preparing the dataset
###Code
main_df = pd.DataFrame()
ratio = "DOTUSDT-experiment"
dataset = pd.read_csv(
f"crypto_data/{ratio}.csv")
dataset.rename(
columns={"close": f"{ratio}-close", "volume": f"{ratio}-volume"}, inplace=True
)
dataset.set_index("time", inplace=True)
dataset.drop(["low", "high", "open"], axis=1, inplace=True)
if main_df.empty:
main_df = dataset
else:
main_df = main_df.join(dataset)
main_df.head()
main_df.columns.values
###Output
_____no_output_____
###Markdown
Parameters
###Code
SEQ_LEN = 8
FUTURE_PRED = 4
COIN = ratio #DOT-USDT
VAL_PCT = 0.2
def classify(current, future):
if float(current) <= float(future):
return 1 # when the price is higher we buy
else:
return 0 # when the price is lower we sell
main_df["future"] = main_df[f"{COIN}-close"].shift(-FUTURE_PRED)
main_df[[f'{COIN}-close', 'future']].head()
main_df["target"] = list(
map(classify, main_df[f"{COIN}-close"], main_df["future"])
)
main_df.head()
###Output
_____no_output_____
###Markdown
Splitting the dataset in traning and validation
###Code
times = main_df.index.values
last_x_pct = main_df.index.values[-int(VAL_PCT * len(times))]
validation_main_df = main_df[main_df.index >= last_x_pct]
main_df = main_df[main_df.index < last_x_pct]
main_df.loc[main_df.index == last_x_pct] #the splitting place
###Output
_____no_output_____
###Markdown
Checking for null values
###Code
main_df.fillna(method="ffill", inplace=True)
validation_main_df.fillna(method="ffill", inplace=True)
main_df.dropna(inplace=True)
validation_main_df.dropna(inplace=True)
main_df.isna().sum(), validation_main_df.isna().sum()
###Output
C:\Users\Claudiu\AppData\Local\Programs\Python\Python38\lib\site-packages\pandas\core\frame.py:4317: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
return super().fillna(
<ipython-input-32-be01dd49188d>:4: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
main_df.dropna(inplace=True)
<ipython-input-32-be01dd49188d>:5: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
validation_main_df.dropna(inplace=True)
###Markdown
Preprocessing through normalizing, scaling and balancing
###Code
def preprocessing_df(df):
df.drop("future", axis=1)
for col in df.columns:
if col != "target":
df[col] = df[col].pct_change()
df.dropna(inplace=True)
df.replace([np.inf, -np.inf], np.nan, inplace=True)
df.dropna(inplace=True)
df[col] = preprocessing.scale(df[col])
df.dropna(inplace=True)
sequencial_data = []
prev_days = deque(maxlen=SEQ_LEN)
for row in df.values:
prev_days.append([x for x in row[:-1]])
if len(prev_days) == SEQ_LEN:
#separate the label/target
sequencial_data.append([np.array(prev_days), row[-1]])
random.shuffle(sequencial_data)
buys = []
sells = []
for seq, target in sequencial_data:
if target == 0: # you need to sell
sells.append([seq, target])
elif target == 1: # you need to buy
buys.append([seq, target])
random.shuffle(buys)
random.shuffle(sells)
lower_nr = min(len(buys), len(sells))
buys = buys[:lower_nr]
sells = sells[:lower_nr]
sequencial_data = buys + sells
random.shuffle(sequencial_data)
x = []
Y = []
for seq, target in sequencial_data:
x.append(seq)
Y.append(target)
return np.array(x), np.array(Y)
train_x, train_Y = preprocessing_df(main_df)
test_x, test_Y = preprocessing_df(validation_main_df)
train_x.shape, test_x.shape
print(f"Training data: {len(train_x)}, Validation data: {len(test_x)}")
print(f" Training set Sells: {(train_Y.astype('int32') == 0).sum()}, Buys: {(train_Y.astype('int32') == 1).sum()}")
print(f" Validation set Sells: {(test_Y.astype('int32') == 0).sum()}, Buys: {(test_Y.astype('int32') == 1).sum()}")
###Output
Training data: 5174, Validation data: 1242
Training set Sells: 2587, Buys: 2587
Validation set Sells: 621, Buys: 621
###Markdown
Model architecture
###Code
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
model = Sequential()
model.add(LSTM(128, kernel_regularizer=l2(1e-6), recurrent_regularizer=l2(1e-6), bias_regularizer=l2(1e-6), return_sequences=True))
model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(LSTM(128, kernel_regularizer=l2(1e-6), recurrent_regularizer=l2(1e-6), bias_regularizer=l2(1e-6), return_sequences=True))
model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(LSTM(64, kernel_regularizer=l2(1e-6), recurrent_regularizer=l2(1e-6), bias_regularizer=l2(1e-6), return_sequences=True))
model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(LSTM(64, kernel_regularizer=l2(1e-6), recurrent_regularizer=l2(1e-6), bias_regularizer=l2(1e-6), return_sequences=True))
model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(LSTM(32, kernel_regularizer=l2(1e-6), recurrent_regularizer=l2(1e-6), bias_regularizer=l2(1e-6)))
model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(Dense(10, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(2, activation='softmax'))
opt = Adam(learning_rate=1e-4, decay=1e-7)
LOSS='sparse_categorical_crossentropy'
model.compile(loss=LOSS, optimizer=opt, metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Hyperparameters, tensorboard and checkpoint
###Code
EPOCHS = 20
BATCH_SIZE = 32
NAME = f"DOTUSDT-experiment"
tensorboard = TensorBoard(log_dir=f'logs/{NAME}')
filepath = "Model-final-DOT-{epoch:02d}-{val_accuracy:.3f}"
checkpoint = ModelCheckpoint("models/{}.model".format(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')) # saves only the best ones
###Output
_____no_output_____
###Markdown
Fitting the model
###Code
history = model.fit(train_x, train_Y,
batch_size=BATCH_SIZE,
epochs=EPOCHS,
validation_data=(test_x, test_Y),
callbacks=[tensorboard, checkpoint])
###Output
Epoch 1/20
162/162 [==============================] - 38s 143ms/step - loss: 0.9378 - accuracy: 0.5193 - val_loss: 0.6924 - val_accuracy: 0.5314
WARNING:absl:Found untraced functions such as lstm_cell_5_layer_call_fn, lstm_cell_5_layer_call_and_return_conditional_losses, lstm_cell_6_layer_call_fn, lstm_cell_6_layer_call_and_return_conditional_losses, lstm_cell_7_layer_call_fn while saving (showing 5 of 25). These functions will not be directly callable after loading.
WARNING:absl:Found untraced functions such as lstm_cell_5_layer_call_fn, lstm_cell_5_layer_call_and_return_conditional_losses, lstm_cell_6_layer_call_fn, lstm_cell_6_layer_call_and_return_conditional_losses, lstm_cell_7_layer_call_fn while saving (showing 5 of 25). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\Model-final-DOT-01-0.531.model\assets
INFO:tensorflow:Assets written to: models\Model-final-DOT-01-0.531.model\assets
Epoch 2/20
162/162 [==============================] - 7s 41ms/step - loss: 0.7882 - accuracy: 0.5614 - val_loss: 0.6819 - val_accuracy: 0.5475
WARNING:absl:Found untraced functions such as lstm_cell_5_layer_call_fn, lstm_cell_5_layer_call_and_return_conditional_losses, lstm_cell_6_layer_call_fn, lstm_cell_6_layer_call_and_return_conditional_losses, lstm_cell_7_layer_call_fn while saving (showing 5 of 25). These functions will not be directly callable after loading.
WARNING:absl:Found untraced functions such as lstm_cell_5_layer_call_fn, lstm_cell_5_layer_call_and_return_conditional_losses, lstm_cell_6_layer_call_fn, lstm_cell_6_layer_call_and_return_conditional_losses, lstm_cell_7_layer_call_fn while saving (showing 5 of 25). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\Model-final-DOT-02-0.548.model\assets
INFO:tensorflow:Assets written to: models\Model-final-DOT-02-0.548.model\assets
Epoch 3/20
162/162 [==============================] - 6s 39ms/step - loss: 0.7368 - accuracy: 0.5705 - val_loss: 0.6423 - val_accuracy: 0.6296
WARNING:absl:Found untraced functions such as lstm_cell_5_layer_call_fn, lstm_cell_5_layer_call_and_return_conditional_losses, lstm_cell_6_layer_call_fn, lstm_cell_6_layer_call_and_return_conditional_losses, lstm_cell_7_layer_call_fn while saving (showing 5 of 25). These functions will not be directly callable after loading.
WARNING:absl:Found untraced functions such as lstm_cell_5_layer_call_fn, lstm_cell_5_layer_call_and_return_conditional_losses, lstm_cell_6_layer_call_fn, lstm_cell_6_layer_call_and_return_conditional_losses, lstm_cell_7_layer_call_fn while saving (showing 5 of 25). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\Model-final-DOT-03-0.630.model\assets
INFO:tensorflow:Assets written to: models\Model-final-DOT-03-0.630.model\assets
Epoch 4/20
162/162 [==============================] - 7s 46ms/step - loss: 0.7328 - accuracy: 0.5814 - val_loss: 0.5891 - val_accuracy: 0.6997
WARNING:absl:Found untraced functions such as lstm_cell_5_layer_call_fn, lstm_cell_5_layer_call_and_return_conditional_losses, lstm_cell_6_layer_call_fn, lstm_cell_6_layer_call_and_return_conditional_losses, lstm_cell_7_layer_call_fn while saving (showing 5 of 25). These functions will not be directly callable after loading.
WARNING:absl:Found untraced functions such as lstm_cell_5_layer_call_fn, lstm_cell_5_layer_call_and_return_conditional_losses, lstm_cell_6_layer_call_fn, lstm_cell_6_layer_call_and_return_conditional_losses, lstm_cell_7_layer_call_fn while saving (showing 5 of 25). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\Model-final-DOT-04-0.700.model\assets
INFO:tensorflow:Assets written to: models\Model-final-DOT-04-0.700.model\assets
Epoch 5/20
162/162 [==============================] - 7s 46ms/step - loss: 0.6823 - accuracy: 0.6279 - val_loss: 0.5690 - val_accuracy: 0.7206
WARNING:absl:Found untraced functions such as lstm_cell_5_layer_call_fn, lstm_cell_5_layer_call_and_return_conditional_losses, lstm_cell_6_layer_call_fn, lstm_cell_6_layer_call_and_return_conditional_losses, lstm_cell_7_layer_call_fn while saving (showing 5 of 25). These functions will not be directly callable after loading.
WARNING:absl:Found untraced functions such as lstm_cell_5_layer_call_fn, lstm_cell_5_layer_call_and_return_conditional_losses, lstm_cell_6_layer_call_fn, lstm_cell_6_layer_call_and_return_conditional_losses, lstm_cell_7_layer_call_fn while saving (showing 5 of 25). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\Model-final-DOT-05-0.721.model\assets
INFO:tensorflow:Assets written to: models\Model-final-DOT-05-0.721.model\assets
Epoch 6/20
162/162 [==============================] - 7s 42ms/step - loss: 0.6550 - accuracy: 0.6315 - val_loss: 0.5351 - val_accuracy: 0.7432
WARNING:absl:Found untraced functions such as lstm_cell_5_layer_call_fn, lstm_cell_5_layer_call_and_return_conditional_losses, lstm_cell_6_layer_call_fn, lstm_cell_6_layer_call_and_return_conditional_losses, lstm_cell_7_layer_call_fn while saving (showing 5 of 25). These functions will not be directly callable after loading.
WARNING:absl:Found untraced functions such as lstm_cell_5_layer_call_fn, lstm_cell_5_layer_call_and_return_conditional_losses, lstm_cell_6_layer_call_fn, lstm_cell_6_layer_call_and_return_conditional_losses, lstm_cell_7_layer_call_fn while saving (showing 5 of 25). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\Model-final-DOT-06-0.743.model\assets
INFO:tensorflow:Assets written to: models\Model-final-DOT-06-0.743.model\assets
Epoch 7/20
162/162 [==============================] - 10s 62ms/step - loss: 0.6232 - accuracy: 0.6715 - val_loss: 0.5142 - val_accuracy: 0.7399
WARNING:absl:Found untraced functions such as lstm_cell_5_layer_call_fn, lstm_cell_5_layer_call_and_return_conditional_losses, lstm_cell_6_layer_call_fn, lstm_cell_6_layer_call_and_return_conditional_losses, lstm_cell_7_layer_call_fn while saving (showing 5 of 25). These functions will not be directly callable after loading.
WARNING:absl:Found untraced functions such as lstm_cell_5_layer_call_fn, lstm_cell_5_layer_call_and_return_conditional_losses, lstm_cell_6_layer_call_fn, lstm_cell_6_layer_call_and_return_conditional_losses, lstm_cell_7_layer_call_fn while saving (showing 5 of 25). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\Model-final-DOT-07-0.740.model\assets
INFO:tensorflow:Assets written to: models\Model-final-DOT-07-0.740.model\assets
Epoch 8/20
162/162 [==============================] - 7s 42ms/step - loss: 0.6165 - accuracy: 0.6728 - val_loss: 0.4867 - val_accuracy: 0.7705
WARNING:absl:Found untraced functions such as lstm_cell_5_layer_call_fn, lstm_cell_5_layer_call_and_return_conditional_losses, lstm_cell_6_layer_call_fn, lstm_cell_6_layer_call_and_return_conditional_losses, lstm_cell_7_layer_call_fn while saving (showing 5 of 25). These functions will not be directly callable after loading.
WARNING:absl:Found untraced functions such as lstm_cell_5_layer_call_fn, lstm_cell_5_layer_call_and_return_conditional_losses, lstm_cell_6_layer_call_fn, lstm_cell_6_layer_call_and_return_conditional_losses, lstm_cell_7_layer_call_fn while saving (showing 5 of 25). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\Model-final-DOT-08-0.771.model\assets
INFO:tensorflow:Assets written to: models\Model-final-DOT-08-0.771.model\assets
Epoch 9/20
162/162 [==============================] - 7s 45ms/step - loss: 0.5746 - accuracy: 0.7056 - val_loss: 0.4560 - val_accuracy: 0.7850
WARNING:absl:Found untraced functions such as lstm_cell_5_layer_call_fn, lstm_cell_5_layer_call_and_return_conditional_losses, lstm_cell_6_layer_call_fn, lstm_cell_6_layer_call_and_return_conditional_losses, lstm_cell_7_layer_call_fn while saving (showing 5 of 25). These functions will not be directly callable after loading.
WARNING:absl:Found untraced functions such as lstm_cell_5_layer_call_fn, lstm_cell_5_layer_call_and_return_conditional_losses, lstm_cell_6_layer_call_fn, lstm_cell_6_layer_call_and_return_conditional_losses, lstm_cell_7_layer_call_fn while saving (showing 5 of 25). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\Model-final-DOT-09-0.785.model\assets
INFO:tensorflow:Assets written to: models\Model-final-DOT-09-0.785.model\assets
Epoch 10/20
162/162 [==============================] - 7s 42ms/step - loss: 0.5396 - accuracy: 0.7263 - val_loss: 0.4258 - val_accuracy: 0.8100
WARNING:absl:Found untraced functions such as lstm_cell_5_layer_call_fn, lstm_cell_5_layer_call_and_return_conditional_losses, lstm_cell_6_layer_call_fn, lstm_cell_6_layer_call_and_return_conditional_losses, lstm_cell_7_layer_call_fn while saving (showing 5 of 25). These functions will not be directly callable after loading.
WARNING:absl:Found untraced functions such as lstm_cell_5_layer_call_fn, lstm_cell_5_layer_call_and_return_conditional_losses, lstm_cell_6_layer_call_fn, lstm_cell_6_layer_call_and_return_conditional_losses, lstm_cell_7_layer_call_fn while saving (showing 5 of 25). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\Model-final-DOT-10-0.810.model\assets
INFO:tensorflow:Assets written to: models\Model-final-DOT-10-0.810.model\assets
Epoch 11/20
162/162 [==============================] - 7s 44ms/step - loss: 0.5099 - accuracy: 0.7520 - val_loss: 0.3913 - val_accuracy: 0.8221
WARNING:absl:Found untraced functions such as lstm_cell_5_layer_call_fn, lstm_cell_5_layer_call_and_return_conditional_losses, lstm_cell_6_layer_call_fn, lstm_cell_6_layer_call_and_return_conditional_losses, lstm_cell_7_layer_call_fn while saving (showing 5 of 25). These functions will not be directly callable after loading.
WARNING:absl:Found untraced functions such as lstm_cell_5_layer_call_fn, lstm_cell_5_layer_call_and_return_conditional_losses, lstm_cell_6_layer_call_fn, lstm_cell_6_layer_call_and_return_conditional_losses, lstm_cell_7_layer_call_fn while saving (showing 5 of 25). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\Model-final-DOT-11-0.822.model\assets
INFO:tensorflow:Assets written to: models\Model-final-DOT-11-0.822.model\assets
Epoch 12/20
162/162 [==============================] - 10s 60ms/step - loss: 0.4774 - accuracy: 0.7735 - val_loss: 0.3530 - val_accuracy: 0.8406
WARNING:absl:Found untraced functions such as lstm_cell_5_layer_call_fn, lstm_cell_5_layer_call_and_return_conditional_losses, lstm_cell_6_layer_call_fn, lstm_cell_6_layer_call_and_return_conditional_losses, lstm_cell_7_layer_call_fn while saving (showing 5 of 25). These functions will not be directly callable after loading.
WARNING:absl:Found untraced functions such as lstm_cell_5_layer_call_fn, lstm_cell_5_layer_call_and_return_conditional_losses, lstm_cell_6_layer_call_fn, lstm_cell_6_layer_call_and_return_conditional_losses, lstm_cell_7_layer_call_fn while saving (showing 5 of 25). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\Model-final-DOT-12-0.841.model\assets
INFO:tensorflow:Assets written to: models\Model-final-DOT-12-0.841.model\assets
Epoch 13/20
162/162 [==============================] - 7s 45ms/step - loss: 0.4551 - accuracy: 0.7910 - val_loss: 0.3091 - val_accuracy: 0.8655
WARNING:absl:Found untraced functions such as lstm_cell_5_layer_call_fn, lstm_cell_5_layer_call_and_return_conditional_losses, lstm_cell_6_layer_call_fn, lstm_cell_6_layer_call_and_return_conditional_losses, lstm_cell_7_layer_call_fn while saving (showing 5 of 25). These functions will not be directly callable after loading.
WARNING:absl:Found untraced functions such as lstm_cell_5_layer_call_fn, lstm_cell_5_layer_call_and_return_conditional_losses, lstm_cell_6_layer_call_fn, lstm_cell_6_layer_call_and_return_conditional_losses, lstm_cell_7_layer_call_fn while saving (showing 5 of 25). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\Model-final-DOT-13-0.866.model\assets
INFO:tensorflow:Assets written to: models\Model-final-DOT-13-0.866.model\assets
Epoch 14/20
162/162 [==============================] - 8s 47ms/step - loss: 0.4256 - accuracy: 0.8088 - val_loss: 0.2903 - val_accuracy: 0.8784
WARNING:absl:Found untraced functions such as lstm_cell_5_layer_call_fn, lstm_cell_5_layer_call_and_return_conditional_losses, lstm_cell_6_layer_call_fn, lstm_cell_6_layer_call_and_return_conditional_losses, lstm_cell_7_layer_call_fn while saving (showing 5 of 25). These functions will not be directly callable after loading.
WARNING:absl:Found untraced functions such as lstm_cell_5_layer_call_fn, lstm_cell_5_layer_call_and_return_conditional_losses, lstm_cell_6_layer_call_fn, lstm_cell_6_layer_call_and_return_conditional_losses, lstm_cell_7_layer_call_fn while saving (showing 5 of 25). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\Model-final-DOT-14-0.878.model\assets
INFO:tensorflow:Assets written to: models\Model-final-DOT-14-0.878.model\assets
Epoch 15/20
162/162 [==============================] - 7s 44ms/step - loss: 0.4011 - accuracy: 0.8190 - val_loss: 0.2701 - val_accuracy: 0.8849
WARNING:absl:Found untraced functions such as lstm_cell_5_layer_call_fn, lstm_cell_5_layer_call_and_return_conditional_losses, lstm_cell_6_layer_call_fn, lstm_cell_6_layer_call_and_return_conditional_losses, lstm_cell_7_layer_call_fn while saving (showing 5 of 25). These functions will not be directly callable after loading.
WARNING:absl:Found untraced functions such as lstm_cell_5_layer_call_fn, lstm_cell_5_layer_call_and_return_conditional_losses, lstm_cell_6_layer_call_fn, lstm_cell_6_layer_call_and_return_conditional_losses, lstm_cell_7_layer_call_fn while saving (showing 5 of 25). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\Model-final-DOT-15-0.885.model\assets
INFO:tensorflow:Assets written to: models\Model-final-DOT-15-0.885.model\assets
Epoch 16/20
162/162 [==============================] - 7s 46ms/step - loss: 0.3938 - accuracy: 0.8241 - val_loss: 0.2660 - val_accuracy: 0.8873
WARNING:absl:Found untraced functions such as lstm_cell_5_layer_call_fn, lstm_cell_5_layer_call_and_return_conditional_losses, lstm_cell_6_layer_call_fn, lstm_cell_6_layer_call_and_return_conditional_losses, lstm_cell_7_layer_call_fn while saving (showing 5 of 25). These functions will not be directly callable after loading.
WARNING:absl:Found untraced functions such as lstm_cell_5_layer_call_fn, lstm_cell_5_layer_call_and_return_conditional_losses, lstm_cell_6_layer_call_fn, lstm_cell_6_layer_call_and_return_conditional_losses, lstm_cell_7_layer_call_fn while saving (showing 5 of 25). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\Model-final-DOT-16-0.887.model\assets
INFO:tensorflow:Assets written to: models\Model-final-DOT-16-0.887.model\assets
Epoch 17/20
162/162 [==============================] - 7s 45ms/step - loss: 0.3740 - accuracy: 0.8404 - val_loss: 0.2362 - val_accuracy: 0.9026
WARNING:absl:Found untraced functions such as lstm_cell_5_layer_call_fn, lstm_cell_5_layer_call_and_return_conditional_losses, lstm_cell_6_layer_call_fn, lstm_cell_6_layer_call_and_return_conditional_losses, lstm_cell_7_layer_call_fn while saving (showing 5 of 25). These functions will not be directly callable after loading.
WARNING:absl:Found untraced functions such as lstm_cell_5_layer_call_fn, lstm_cell_5_layer_call_and_return_conditional_losses, lstm_cell_6_layer_call_fn, lstm_cell_6_layer_call_and_return_conditional_losses, lstm_cell_7_layer_call_fn while saving (showing 5 of 25). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\Model-final-DOT-17-0.903.model\assets
INFO:tensorflow:Assets written to: models\Model-final-DOT-17-0.903.model\assets
Epoch 18/20
162/162 [==============================] - 7s 43ms/step - loss: 0.3368 - accuracy: 0.8605 - val_loss: 0.2208 - val_accuracy: 0.9018
WARNING:absl:Found untraced functions such as lstm_cell_5_layer_call_fn, lstm_cell_5_layer_call_and_return_conditional_losses, lstm_cell_6_layer_call_fn, lstm_cell_6_layer_call_and_return_conditional_losses, lstm_cell_7_layer_call_fn while saving (showing 5 of 25). These functions will not be directly callable after loading.
WARNING:absl:Found untraced functions such as lstm_cell_5_layer_call_fn, lstm_cell_5_layer_call_and_return_conditional_losses, lstm_cell_6_layer_call_fn, lstm_cell_6_layer_call_and_return_conditional_losses, lstm_cell_7_layer_call_fn while saving (showing 5 of 25). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\Model-final-DOT-18-0.902.model\assets
INFO:tensorflow:Assets written to: models\Model-final-DOT-18-0.902.model\assets
Epoch 19/20
162/162 [==============================] - 8s 47ms/step - loss: 0.3182 - accuracy: 0.8708 - val_loss: 0.2181 - val_accuracy: 0.9018
WARNING:absl:Found untraced functions such as lstm_cell_5_layer_call_fn, lstm_cell_5_layer_call_and_return_conditional_losses, lstm_cell_6_layer_call_fn, lstm_cell_6_layer_call_and_return_conditional_losses, lstm_cell_7_layer_call_fn while saving (showing 5 of 25). These functions will not be directly callable after loading.
WARNING:absl:Found untraced functions such as lstm_cell_5_layer_call_fn, lstm_cell_5_layer_call_and_return_conditional_losses, lstm_cell_6_layer_call_fn, lstm_cell_6_layer_call_and_return_conditional_losses, lstm_cell_7_layer_call_fn while saving (showing 5 of 25). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\Model-final-DOT-19-0.902.model\assets
INFO:tensorflow:Assets written to: models\Model-final-DOT-19-0.902.model\assets
Epoch 20/20
162/162 [==============================] - 7s 46ms/step - loss: 0.3075 - accuracy: 0.8728 - val_loss: 0.2018 - val_accuracy: 0.9130
WARNING:absl:Found untraced functions such as lstm_cell_5_layer_call_fn, lstm_cell_5_layer_call_and_return_conditional_losses, lstm_cell_6_layer_call_fn, lstm_cell_6_layer_call_and_return_conditional_losses, lstm_cell_7_layer_call_fn while saving (showing 5 of 25). These functions will not be directly callable after loading.
WARNING:absl:Found untraced functions such as lstm_cell_5_layer_call_fn, lstm_cell_5_layer_call_and_return_conditional_losses, lstm_cell_6_layer_call_fn, lstm_cell_6_layer_call_and_return_conditional_losses, lstm_cell_7_layer_call_fn while saving (showing 5 of 25). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\Model-final-DOT-20-0.913.model\assets
INFO:tensorflow:Assets written to: models\Model-final-DOT-20-0.913.model\assets
###Markdown
Evaluating and saving the model
###Code
# Scoring
score = model.evaluate(test_x, test_Y, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# Saving
model.save(f"models/{NAME}")
model.summary()
train_x.shape, test_x.shape
###Output
_____no_output_____ |
Data-Science-HYD-2k19/Topic-Wise/OOP/POLYMORPHISM.ipynb | ###Markdown
python uses dynamic typingUnlike C,C++, in python, there is no need to mention the data type before a variable or method
###Code
'''Ability to use the same syntax for objects of different types:'''
def summer(a,b):
return a+b
print(summer(1,1))
print(summer(['a','b','c'],['d','e']))
print(summer("abc","de"))
###Output
abcde
|
notebooks/0_calc_wbt_dunne.ipynb | ###Markdown
Calculate wet bulb temperatureEmpirical formula for wet bulb temperature used in [Dunne et al (2013)](https://www.nature.com/articles/nclimate1827?proof=trueMay%252F), taken from [Davies-Jones (2008)](https://journals.ametsoc.org/mwr/article/136/7/2764/68177/An-Efficient-and-Accurate-Method-for-Computing-the), accurate between 0 and 100$^\circ C$.
###Code
%load_ext autoreload
%autoreload 2
import xarray as xr
import numpy as np
from dask.diagnostics import ProgressBar
from pei import thermodynamics as td
# Point to model data
rootdir = '/local/ss23/GFDL_LEs/ATM/RCP85/'
suffix = '.rcp85.ens109.1950_2100.nc'
variables = ['sphum_k24','ps','t_ref']
ds = xr.Dataset()
for var in variables:
print(var)
ds[var] = xr.open_dataarray(rootdir+var+suffix,chunks={'time':1000}).squeeze()
wbt = td.calc_wbt_from_tref_sh_p(ds['t_ref'], ds['sphum_k24'], ds['ps']/100, method='Stull')
wbt.name = 'WBT'
wbt.attrs = {'units':'degC',
'long_name':'Wet bulb temperature at 2m, from daily mean absolute temperature, specific humidity and pressure, calculated using Stull (2011) J. Appl. Meteor. Climatol.'}
newdir = '../data/processed/GFDL/WBT/'
with ProgressBar():
wbt.to_netcdf(newdir+'wbt_mean_stull'+suffix)
ds['sp']
# Load CESM2 data for comparison
rootdir = '/local/ss23/CESM2_LE/ATM/RCP85/WBT/'
filename = 'b.e21.BHISTcmip6.f09_g17.LE2-1231.001.clm2.h7.WBT.1980010100-1990010100.nc'
ds_cesm = xr.open_dataset(rootdir+filename)
ds_cesm = ds_cesm.sel(time=slice('1980-01-01','1981-01-01'))
ds_cesm['WBT'].mean('time').plot(vmin=-40,vmax=40,cmap='RdBu_r')
ds['sphum_k24']
_calc_wbt_from_tref_rh_p(303.15,50,1000)
###Output
_____no_output_____ |
Phase_02.ipynb | ###Markdown
Table of Contents 1 Setup2 Semantics2.1 Motivation2.2 Term-Document2.2.1 Bag-of-Words2.2.2 TF-IDF2.3 Term-Context3 Word2Vec3.1 Continuous Bag of Words3.2 Skip-gram3.3 Example4 Doc2Vec4.1 Doc2Vec, the most powerful extension of word2vec4.2 Distrubted Memory (DM)4.3 Distrubted Bag of Words (DBOW)5 Exercises Setup---- This notebook assumes you have done the setup required in Week 1.In this lecture we will be using Gensim and NLTK, two widely used Python Natural Language Processing libraries.
###Code
reset -f -s
def pip_install(*packages):
"""
Install packages using pip
Alternatively just use command line
pip install package_name
"""
try:
import pip
for package in packages:
pip.main(["install", "--upgrade", package, "--user"])
except Exception as e:
print("Unable to install {} using pip.".format(package))
print("Exception:", e)
pip_install('gensim', 'nltk')
import nltk
nltk.download('gutenberg')
nltk.download('reuters')
import os
ROOTDIR = os.path.abspath(os.path.dirname('__file__'))
DATADIR = os.path.join(ROOTDIR, 'data')
###Output
_____no_output_____
###Markdown
Semantics--- Motivation If we want to be able to categorize text, we need to be able to generate features for articles, paragraphs, sentences and other bodies of text, based on the information they contain and what they represent. There are a number of ways to achieve this and we will go over 3 approaches. Term-Document Bag-of-WordsOne of the simplest ways to extract features from text is to just count how many times a word appears in a body of text. In this model, the order of words does not matter and only the number of occurrences of each unique term for each document is taken into account.
###Code
import pandas as pd
#Load movie reviews dataset
df = pd.read_csv(os.path.join(DATADIR, 'movie_reviews.csv'), nrows=100000)
texts = df.text.values #pd.Series -> np.ndarray
import nltk
# Transform each review string as a list of token strings. May take a few seconds
tokenized = [nltk.word_tokenize(review) for review in texts]
n = 10 #arbitrary pick
print('Example review:\n Raw: {} \n\n Tokenized: {}'.format(texts[n], [i for i in tokenized[n]]))
def clean_text(tokenized_list):
import string
sw = nltk.corpus.stopwords.words('english')
new_list = [[token.lower() for token in tlist if token not in string.punctuation and token.lower() not in sw] for tlist in tokenized_list]
return new_list
# Remove punctuations and stopwords
cleaned = clean_text(tokenized)
from gensim import corpora
# Create a dictionary from list of documents
dictionary = corpora.Dictionary(cleaned)
# Create a Corpus based on BOW Format.
corpus = [dictionary.doc2bow(text) for text in cleaned]
print('Example review featurized in Bag of Words :\n {}'.format([(dictionary[i[0]], i[1]) for i in corpus[n]]))
###Output
Example review featurized in Bag of Words :
[('still', 1), ('charm', 1), ('concept', 1), ('could', 1), ('design', 1), ('engulfed', 1), ('execution', 1), ('ingenious', 1), ('postage', 1), ('screen', 1), ('stamp-sized', 1), ('watch', 1)]
###Markdown
Note that when we use this model to featurize text:- The length of each feature vector will be the size of the vocabulary in the corpus- Thus each body of text will have a lot of zeroes TF-IDF __Term Frequency__: Number of occurrences of a word in a document __Inverse Document Frequency__: Number of documents that contain a certain word scaled by a weight __Term Frequency - Inverse Document Frequency__: (Number of ocurrences of word $w$ in text $T$) * $log$(Number of documents in a corpus/Number of documents containing word $w$) Let's check out the TF-IDF scores of the previous movie review we examined.
###Code
from gensim import corpora, models
#Create a TFIDF Model for the corpus
tfidf = models.TfidfModel(corpus)
print('Example review featurized with TF-IDF scores : \n{}'.format([(dictionary[i[0]], round(i[1],3)) for i in tfidf[corpus[n]]]))
###Output
collecting document frequencies
PROGRESS: processing document #0
PROGRESS: processing document #10000
PROGRESS: processing document #20000
PROGRESS: processing document #30000
PROGRESS: processing document #40000
PROGRESS: processing document #50000
PROGRESS: processing document #60000
PROGRESS: processing document #70000
PROGRESS: processing document #80000
PROGRESS: processing document #90000
calculating IDF weights for 100000 documents and 67733 features (1111018 matrix non-zeros)
Example review featurized with TF-IDF scores :
[('still', 0.152), ('charm', 0.206), ('concept', 0.244), ('could', 0.159), ('design', 0.25), ('engulfed', 0.43), ('execution', 0.259), ('ingenious', 0.293), ('postage', 0.414), ('screen', 0.176), ('stamp-sized', 0.458), ('watch', 0.191)]
###Markdown
Looks much more like a feature vector that we can use for text categorization! Note that in the TF-IDF model:- If a term frequently occurs in the corpus(i.e. stopwords, the term $like$), it is scaled to a lower score- Rarer terms will generally have higher scores. They tend to be more "informative" and descriptive.- A term that occurs frequently in a small number of documents within the corpus will have the highest scores.  Term-Context The vast majority of NLP works regards as atomic symbols: king, queen, book, etc.In vector space terms, this vector has one $1 $ and a lot of zeros. $king = [1, 0, 0, 0, 0, 0, 0, 0, 0]$ $queen = [0, 1, 0, 0, 0, 0, 0, 0, 0]$ $book = [0, 0, 1, 0, 0, 0, 0, 0, 0]$ It is called a "one-hot" encoding representation. It is a common way to represent categories in models. However, it is very sparse(as we saw from the BOW model); each row is mostly 0s. You can get more value by representing a word by its neighorbors Instead of using entire documents, we can use small contexts to a term.- Paragraphs- Sentences- A window of a sequence of consecutive termsIn this way, a word is defined over counts of context words. The assumption is that two words that appear in similar contexts are similar themselves. But count-based models have disadvantages:- vector sizes become huge, equal to vocabulary size - sparsity - curse of dimensionality - computationally expensive Word2Vec Word2Vec is an unsupervised neural network model that maximizes similarity between contextual neighbors while minimizing similarity for unseen contexts.Initial vectors are generated randomly and converge as the models is trained on the corpus through a sliding window.Target Vector sizes are set at the beginning of the training process, so the vectors are dense and do not need dimensionality reduction techniques. Continuous Bag of Words  Training objective is to maiximize the probability of observing the correct target word $w_t$ given context words $w_{c1}, w_{c2}, ... w_{cj}$ $$ C = -log p(w_t | w_{c1} ... w_{cj}) $$ The prediction vector is set as an average of all the context word vectors Skip-gram  Training objective is to maiximize the probability of observing the correct context words $w_{ci}$ given target word $w_{t}$ $$ C = -\sum^{j}_{i=1}log p(w_{ci} | w_{t}) $$ In this case, the prediction vector is the the target word vector. Example Now let's try training our own word embeddings and looking at what we can do with them. Word2Vec- `size`: Number of dimensions for the word embedding model- `window`: Number of context words to observe in each direction- `min_count`: Minimum frequency for words included in model- `sg` (Skip-Gram): '0' indicates CBOW model; '1' indicates Skip-Gram- `alpha`: Learning rate (initial); prevents model from over-correcting, enables finer tuning- `iterations`: Number of passes through dataset- `batch_words`: Number of words to sample from data during each pass
###Code
from nltk.corpus import gutenberg
from gensim import models
# Training word2vec model on Gutenberg corpus. This may take a few minutes.
model = models.Word2Vec(gutenberg.sents(), size = 300, window = 5, min_count =5, sg = 0, alpha = 0.025, iter=10, batch_words = 10000)
###Output
collecting all words and their counts
PROGRESS: at sentence #0, processed 0 words, keeping 0 word types
PROGRESS: at sentence #10000, processed 256693 words, keeping 9207 word types
PROGRESS: at sentence #20000, processed 567137 words, keeping 13603 word types
PROGRESS: at sentence #30000, processed 918759 words, keeping 17616 word types
PROGRESS: at sentence #40000, processed 1236460 words, keeping 19579 word types
PROGRESS: at sentence #50000, processed 1508401 words, keeping 22623 word types
PROGRESS: at sentence #60000, processed 1715918 words, keeping 27910 word types
PROGRESS: at sentence #70000, processed 1915119 words, keeping 30708 word types
PROGRESS: at sentence #80000, processed 2144824 words, keeping 35822 word types
PROGRESS: at sentence #90000, processed 2415166 words, keeping 44470 word types
collected 51134 word types from a corpus of 2621785 raw words and 98552 sentences
Loading a fresh vocabulary
min_count=5 retains 17011 unique words (33% of original 51134, drops 34123)
min_count=5 leaves 2565427 word corpus (97% of original 2621785, drops 56358)
deleting the raw counts dictionary of 51134 items
sample=0.001 downsamples 53 most-common words
downsampling leaves estimated 1802467 word corpus (70.3% of prior 2565427)
estimated required memory for 17011 words and 300 dimensions: 49331900 bytes
resetting layer weights
training model with 3 workers on 17011 vocabulary and 300 features, using sg=0 hs=0 sample=0.001 negative=5 window=5
PROGRESS: at 1.12% examples, 194674 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 2.15% examples, 213024 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 3.36% examples, 222702 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 4.47% examples, 228070 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 5.82% examples, 223539 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 7.21% examples, 217486 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 8.43% examples, 215687 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 9.65% examples, 210600 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 10.59% examples, 207928 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 11.64% examples, 206963 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 12.67% examples, 210723 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 13.82% examples, 212895 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 15.02% examples, 214139 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 16.10% examples, 209783 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 17.07% examples, 203605 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 17.93% examples, 198907 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 18.72% examples, 195887 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 19.71% examples, 192156 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 20.47% examples, 189817 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 21.17% examples, 186500 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 21.73% examples, 182641 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 22.44% examples, 182300 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 23.50% examples, 183143 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 24.61% examples, 185696 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 25.89% examples, 185833 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 27.23% examples, 185820 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 28.38% examples, 185893 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 29.68% examples, 186204 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 30.76% examples, 186886 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 31.83% examples, 187691 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 32.84% examples, 189637 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 34.00% examples, 191259 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 35.09% examples, 191383 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 36.41% examples, 191058 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 37.49% examples, 189599 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 38.45% examples, 189001 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 39.65% examples, 188520 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 40.56% examples, 188195 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 41.46% examples, 187546 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 42.35% examples, 188374 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 43.38% examples, 188618 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 44.39% examples, 189420 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 45.45% examples, 188932 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 46.11% examples, 186869 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 47.23% examples, 186033 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 48.38% examples, 186113 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 49.60% examples, 185852 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 50.56% examples, 185847 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 51.26% examples, 184525 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 52.00% examples, 184074 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 52.84% examples, 184396 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 53.83% examples, 184642 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 54.88% examples, 185039 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 56.03% examples, 184491 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 57.23% examples, 184132 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 58.24% examples, 183656 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 58.98% examples, 182995 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 59.90% examples, 181814 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 60.85% examples, 181748 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 61.88% examples, 182035 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 62.63% examples, 181982 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 63.44% examples, 181482 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 64.15% examples, 181199 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 65.24% examples, 181383 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 66.58% examples, 181488 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 67.90% examples, 181628 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 69.08% examples, 182047 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 70.19% examples, 181830 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 71.30% examples, 182072 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 72.31% examples, 182869 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 73.47% examples, 183439 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 74.58% examples, 184332 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 75.89% examples, 184431 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 77.08% examples, 184215 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 78.24% examples, 184162 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 79.08% examples, 183734 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 80.11% examples, 183378 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 80.96% examples, 183023 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 81.74% examples, 182573 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 82.59% examples, 182912 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 83.63% examples, 183130 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 84.58% examples, 183446 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 85.94% examples, 183593 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 87.23% examples, 183549 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 88.34% examples, 183548 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 89.25% examples, 183138 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 90.22% examples, 182933 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 91.18% examples, 182727 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 92.00% examples, 182662 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 92.89% examples, 182983 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 93.91% examples, 183278 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 95.09% examples, 183597 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 96.36% examples, 183570 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 97.67% examples, 183501 words/s, in_qsize 0, out_qsize 0
PROGRESS: at 98.78% examples, 183801 words/s, in_qsize 0, out_qsize 0
worker thread finished; awaiting finish of 2 more threads
PROGRESS: at 99.98% examples, 183696 words/s, in_qsize 1, out_qsize 1
###Markdown
 The word vectors are directions in space and can encode relationships between words. The proximity of words to each other can be calculated through their cosine similarity.
###Code
model.wv.most_similar(positive=['boy'])
model.wv.most_similar(positive=['food'])
model.wv.most_similar(positive=['she','her','hers','herself'], negative=['he','him','his','himself'])
# she + her + hers + herself - he - him - his - himself
# Let's limit ourselves to top 50 words that related to food to visualize how they relate in vector space
f_tokens = [token for token,weight in model.wv.most_similar(positive=['food'], topn=50)]
from sklearn.metrics import pairwise
vectors = [model.wv[word] for word in f_tokens]
dist_matrix = pairwise.pairwise_distances(vectors, metric='cosine')
from sklearn.manifold import MDS
mds = MDS(n_components = 2, dissimilarity='precomputed')
embeddings = mds.fit_transform(dist_matrix)
import matplotlib.pyplot as plt
%matplotlib inline
_, ax = plt.subplots(figsize=(14,10))
ax.scatter(embeddings[:,0], embeddings[:,1], alpha=0)
for i in range(len(vectors)):
ax.annotate(f_tokens[i], ((embeddings[i,0], embeddings[i,1])))
###Output
_____no_output_____
###Markdown
**What kind of clusters of food-themed terms can you notice?** Doc2Vec ---Doc2Vec, the most powerful extension of word2vec---Doc2vec (aka paragraph2vec or sentence embeddings) extrapolates the word2vec algorithm to larger blocks of text, such as sentences, paragraphs or entire documents. Every paragraph is mapped to a unique vector, represented by a column in matrix D and every word is also mapped to a unique vector, represented by a column in matrix W . The paragraph vector and word vectors are averaged or concatenated to predict the next word in a context. Each additional context does not have be a fixed length (because it is vectorized and projected into the same space).Additional parameters but the updates are sparse thus still efficent.__2 architectures__:1. Distrubted Memory (DM)2. Distrubted Bag of Words (DBOW) Distrubted Memory (DM)__Highlights__:- Assign and randomly initialize paragraph vector for each doc- Predict next word using context words and paragraph vector- Slide context window across doc but keep paragraph vector fixed (hence: Distrubted Memory)- Update weights via SGD and backprop Distrubted Bag of Words (DBOW)__Highlights__:- ONLY use paragraph vectors (no word vectors)- Take a window of words in a paragraph and randomly sample which ones to predict using paragraph vector- Simpler, more memory effecient Let's try building our own Doc2Vec model with Gensim Doc2Vec Parameters- `size`: Number of dimensions for the embedding model- `window`: Number of context words to observe in each direction within a document- `min_count`: Minimum frequency for words included in model- `dm` (distributed memory): '0' indicates DBOW model; '1' indicates DM- `alpha`: Learning rate (initial); prevents model from over-correcting, enables finer tuning- `iter`: Number of iterations through corpus
###Code
from gensim.models import Doc2Vec
from gensim.models.doc2vec import TaggedDocument
from nltk.corpus import reuters
# Tokenize Reuters corpus
tokenized_docs = [nltk.word_tokenize(reuters.raw(fileid)) for fileid in reuters.fileids()]
# Convert tokenized documents to TaggedDocuments
tagged_docs = [TaggedDocument(doc, tags=[idx]) for idx, doc in enumerate(tokenized_docs)]
# Create and train the doc2vec model. May take a few seconds
doc2vec = Doc2Vec(size=300, window=5, min_count=5, dm = 1, iter=10)
# Build the word2vec model from the corpus
doc2vec.build_vocab(tagged_docs)
###Output
collecting all words and their counts
PROGRESS: at example #0, processed 0 words (0/s), 0 word types, 0 tags
PROGRESS: at example #10000, processed 1431895 words (4396683/s), 60881 word types, 10000 tags
collected 63348 word types and 10788 unique tags from a corpus of 10788 examples and 1548468 words
Loading a fresh vocabulary
min_count=5 retains 15255 unique words (24% of original 63348, drops 48093)
min_count=5 leaves 1472770 word corpus (95% of original 1548468, drops 75698)
deleting the raw counts dictionary of 63348 items
sample=0.001 downsamples 45 most-common words
downsampling leaves estimated 1118059 word corpus (75.9% of prior 1472770)
estimated required memory for 15255 words and 300 dimensions: 57185100 bytes
resetting layer weights
###Markdown
You can also fortify your Doc2Vec models with pre-trained Word2Vec models.Let's try re-training with GoogleNews-trained word vectors. Download [here](https://drive.google.com/file/d/0B7XkCwpI5KDYNlNUTTlSS21pQmM/edit?usp=sharing) (Size is 1.5gb)
###Code
#This may take a few minutes to run
w2v_loc = # your saved location of GoogleNews-vectors-negative300.bin.gz
doc2vec.intersect_word2vec_format(w2v_loc, binary=True)
doc2vec.train(tagged_docs, epochs=10, total_examples=doc2vec.corpus_count)
###Output
training model with 3 workers on 15255 vocabulary and 300 features, using sg=0 hs=0 sample=0.001 negative=5 window=5
PROGRESS: at 4.56% examples, 501506 words/s, in_qsize 6, out_qsize 0
PROGRESS: at 9.25% examples, 518806 words/s, in_qsize 6, out_qsize 0
PROGRESS: at 14.56% examples, 541083 words/s, in_qsize 5, out_qsize 0
PROGRESS: at 19.44% examples, 545440 words/s, in_qsize 5, out_qsize 0
PROGRESS: at 24.75% examples, 553349 words/s, in_qsize 6, out_qsize 0
PROGRESS: at 28.82% examples, 537311 words/s, in_qsize 5, out_qsize 0
PROGRESS: at 32.49% examples, 519493 words/s, in_qsize 5, out_qsize 0
PROGRESS: at 36.55% examples, 511619 words/s, in_qsize 5, out_qsize 0
PROGRESS: at 40.50% examples, 504437 words/s, in_qsize 5, out_qsize 0
PROGRESS: at 45.47% examples, 507292 words/s, in_qsize 6, out_qsize 0
PROGRESS: at 50.50% examples, 513053 words/s, in_qsize 5, out_qsize 0
PROGRESS: at 55.35% examples, 514623 words/s, in_qsize 6, out_qsize 0
PROGRESS: at 59.87% examples, 515344 words/s, in_qsize 6, out_qsize 0
PROGRESS: at 64.39% examples, 512104 words/s, in_qsize 6, out_qsize 0
PROGRESS: at 68.21% examples, 506879 words/s, in_qsize 5, out_qsize 0
PROGRESS: at 73.00% examples, 507863 words/s, in_qsize 5, out_qsize 0
PROGRESS: at 78.15% examples, 512007 words/s, in_qsize 5, out_qsize 0
PROGRESS: at 83.46% examples, 516562 words/s, in_qsize 6, out_qsize 0
PROGRESS: at 88.90% examples, 521565 words/s, in_qsize 6, out_qsize 0
PROGRESS: at 94.39% examples, 525719 words/s, in_qsize 5, out_qsize 0
PROGRESS: at 99.62% examples, 528600 words/s, in_qsize 5, out_qsize 0
worker thread finished; awaiting finish of 2 more threads
worker thread finished; awaiting finish of 1 more threads
worker thread finished; awaiting finish of 0 more threads
training on 15484680 raw words (11287240 effective words) took 21.4s, 528654 effective words/s
|
src/WSK Week1.ipynb | ###Markdown
Truc Huynh- 1/11/2022- 7:58 PM- Workshop turn it in Workshop 1 This notebook will cover the following topics:1. Basic Input/Output and formatting2. Decision Structure and Boolean Logic3. Basic Loop Structures4. Data Structures 1.1 Basic Input/Output and formatting (Follow): **Learning Objectives:** 1. Perform basic input and output for a user 2. Format input and output 3. Perform basic math on input and output 4. Understand different datatypes
###Code
# Add comments below
# This is a normal string print
print ("This is a print statement")
# This is is f tring print with number
print (f"Try printing some different things including number {5}")
# Get input from user and store in input_string
input_string = input("Enter a short string here:")
# Display the user input
print ("The string you input is: ", input_string)
# define float_number value 2
float_number = 2
# print the float number with 2 decimal place format
print("This will print a float number with 2 decimal places: ", format(float_number, '.2f'))
num1 = 5
num2 = 10
print("This will multiply {num1} times {num2} and display the result: ", num1*num2)
###Output
This is a print statement
Try printing some different things including number 5
Enter a short string here:234
The string you input is: 234
This will print a float number with 2 decimal places: 2.00
This will multiply {num1} times {num2} and display the result: 50
###Markdown
1.1 Basic Input/Output and formatting (Group): 1. Create a small program that will take a celcius temperature from the user and convert it to fahrenheit2. Print the output result to 3 decimal places3. Print a degree sign after the output4. Try to figure out how to create the program in the fewest lines possible Fahrenheit = (9/5)(Degrees Celcius) + 32
###Code
def calculate_fahrenheit(celcius_degree):
return (f"The degree in Fahrenheit is {format((float(celcius_degree)*9)/5+32,'.3f')} \N{DEGREE SIGN}F")
cel_input = input('Please enter a degree in celius: ')
print(calculate_fahrenheit(cel_input))
###Output
Please enter a degree in celius: 23
The degree in Fahrenheit is 73.400 °F
###Markdown
1.2 Decision Structure and Boolean Logic (Follow): **Learning Objectives:** 1. Understand boolean logic and logical operators 2. Understand If/Else statements and program flow
###Code
#Add comments below
day = int(input('Enter a number (1-7) for the day of the week: '))
if day == 1:
print('Monday')
elif day == 2:
print('Tuesday')
elif day == 3:
print('Wednesday')
elif day == 4:
print ('Thursday')
elif day == 5:
print ('Friday')
elif day == 6:
print ('Saturday')
elif day == 7:
print ('Sunday')
else:
print ('Error: Please enter a number in the range 1-7.')
###Output
Enter a number (1-7) for the day of the week: 1
Monday
###Markdown
After running this program, think about and answer these questions:1. What is the program doing?2. Does every line in the code execute, explain?3. Could you find a way to break it? Answer:1. What is the program doing: The program print out the day of the week with respective number (start with 1 for Monday and 7 for Sunday)2. Does every line in the code execute: Yes, everyline of code execute with taking input then go through if-elif-else condition (All the syntax and else if else is in correct order and connected)3. Could you find a way to break it: Yes, just enter a string and it will break. I include a solution below to fix this bby validation the input inside the function instead parse it in the get input
###Code
def day_of_week(number):
if number.isdigit() and int(number) in range(1,8):
week_days=['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday','Saturday','Sunday']
print(week_days[int(number)-1])
else:
print('Please enter a number from 1 to 7 only!')
day = input('Enter a number (1-7) for the day of the week: ')
day_of_week(day)
#Add comments below
RED = 'red'
BLUE = 'blue'
YELLOW = 'yellow'
color1 = input('Enter the first primary color in lower case letters: ')
color2 = input('Enter the second primary color in lower case letters: ')
if color1 != RED and color1 != BLUE and color1 != YELLOW:
print('Error: The first color you entered is invalid.')
elif color2 != RED and color2 != BLUE and color2 != YELLOW:
print('Error: The second color you entered is invalid.')
elif color1 == color2:
print('Error: The two colors you entered are the same.')
else:
if color1 == RED:
if color2 == BLUE:
print('purple')
else:
print('orange')
elif color1 == BLUE:
if color2 == RED:
print('purple')
else:
print('green')
else:
if color2 == RED:
print('orange')
else:
print('green')
###Output
Enter the first primary color in lower case letters: dsfus9df8s9dsdf
Enter the second primary color in lower case letters: sdf
Error: The first color you entered is invalid.
###Markdown
After running this program, think about and answer these questions:1. What is the program doing?2. Does every line in the code execute, explain?3. Could you find a way to break it?4. Replace the 'ands' with 'ors' and discuss how the program executes now. Answer:After running this program, think about and answer these questions:1. What is the program doing: Program take 2 color of user input (out of 3 color: red, blue green) and output the mixed color of user input's color. The prgram also validate the user input to make sure input is validate2. Does every line in the code execute, explain: Yes, every line of code is execute. The code follow if-elif-else nested if structure and corrected indent. Therefore, every line of code is executed.3. Could you find a way to break it? Nope, it is well structure and cant break. The only missing case is both color 1 and color is invalid, it would be nice to print it out.4. Replace the 'ands' with 'ors' and discuss how the program executes now: Well if we add the or program will not run correctly. We are now encounter a logic error. For example in the 1st if statement, if user enter BLUE or YELLOW, the app will ignore them. it will still print out the invalid satatement eventhough the BLUE and GREEN is valid. The reason is the 'or' statement just make the print statement execute because it satisied the condition color is not 'RED'. 1.2 Decision Structure and Boolean Logic (Group): Develop a program that will do the following:1. Take input from the user for length and width for two different sets of numbers. Fours total inputs.2. Multiply the length and width together and find the area for the two different sets of numbers.3. Compare the areas and print which one is larger.4. Be sure to handle non-numeric inputs.
###Code
def compare_area(length1, width1, length2, width2):
if length1.isdigit() and length2.isdigit() and width1.isdigit() and width2.isdigit():
area1=int(length1)*int(width1)
area2=int(length2)*int(width2)
print (f"Area of reactangle 1 is: {area1}")
print (f"Area of reactangle 2 is: {area2}")
print("Rectangle 1 have bigger area than Rectangle 2"
if area1 > area2 else "Rectangle 2 have bigger area than Rectangle 1")
else:
print("Application only take integer only")
length1 = input("Please enter the length of object 1: ")
width1 = input("Please enter the width of object 1: ")
length2 = input("Please enter the length of object 2: ")
width2 = input("Please enter the width of object 2: ")
compare_area(length1, width1, length2, width2)
###Output
_____no_output_____
###Markdown
1.3 Basic loop structures (Follow):**Learning Objectives:** 1. Understanding repetative structures 2. Understanding while loops 3. Understanding for loops 4. Nested loops and order N run time
###Code
#Add comments below
number = 1.0
total = 0.0
# If number smaller than 0 app will stop working
while number > 0:
number = float(input('Enter a positive number (negative to quit): '))
if number > 0:
total = total + number
print (f'Total = {total:.2f}')
###Output
Enter a positive number (negative to quit): 4
Enter a positive number (negative to quit): -5
Total = 4.00
###Markdown
After running this program, think about and answer these questions:1. What is the program doing?2. How many time does each line in the code execute, explain?3. Could you find a way to break it?4. Try to rewrite the program using a for loop.
###Code
# Add comments to the program
caloriesPerMinute = 4.2
caloriesBurned = 0.0
print ('Minutes\t\tCalories Burned')
print ('-------------------------------')
# for miniute in range 10 to 30, increase by 5
for minutes in range(10, 31, 5):
caloriesBurned = caloriesPerMinute * minutes
print (minutes, "\t\t", caloriesBurned)
###Output
Minutes Calories Burned
-------------------------------
10 42.0
15 63.0
20 84.0
25 105.0
30 126.0
###Markdown
After running this program, think about and answer these questions:1. What is the program doing?2. How many time does each line in the code execute, explain?3. Could you find a way to break it?4. Try to rewrite the program using a while loop.
###Code
###### Add comments to the program
totalRainfall = 0.0
monthRainfall = 0.0
averageRainfall = 0.0
years = 0
totalMonths = 0
years = int(input('Enter the number of years: '))
for year in range(years):
print (f'For year {year + 1}:')
for month in range(12):
monthRainfall = float(input('Enter the rainfall amount for the month: '))
totalMonths += 1
totalRainfall += monthRainfall
averageRainfall = totalRainfall / totalMonths
print(f'For {totalMonths} months')
print(f'Total rainfall: {totalRainfall:,.2f} inches')
print(f'Average monthly rainfall: {averageRainfall:,.2f} inches')
###Output
Enter the number of years:
###Markdown
After running this program, think about and answer these questions:1. What is the program doing?2. How many time does each line in the code execute, explain?3. Could you find a way to break it?4. Compare the number of executuions for the inner for loop to the outer for loop.5. What order of magnitude larger are the inner instructions executing? 1.3 Basic loop structures (Group):Develop a program that will:1. Take input from the user asking how many organisms to start with.2. Take input from the user asking the average daily increase for the organisms.3. Take input from the user asking the number of days to multiply.4. Make sure the daily increase was entered as a percentage, if not correct it.5. Print each day and the increase in organsisms for each day. 6. Make sure to format the printing so that it is readable.
###Code
organism_size = int(input('please enter your organisms to start with here: '))
percentage = float(input('please enter your percentage here (format: 00.00): '))
day_count = int(input('please enter your number of days to multiply: '))
for i in range (day_count):
organism_size += organism_size*percentage
print(f"On day {i+1}, the {organism_size}" )
###Output
please enter your organisms to start with here: 123
please enter your percentage here (format: 00.00): .12
please enter your number of days to multiply: 23
On day 1, the 137.76
On day 2, the 154.2912
On day 3, the 172.80614400000002
On day 4, the 193.54288128000002
On day 5, the 216.76802703360002
On day 6, the 242.78019027763202
On day 7, the 271.91381311094784
On day 8, the 304.5434706842616
On day 9, the 341.088687166373
On day 10, the 382.01932962633776
On day 11, the 427.8616491814983
On day 12, the 479.2050470832781
On day 13, the 536.7096527332715
On day 14, the 601.114811061264
On day 15, the 673.2485883886158
On day 16, the 754.0384189952497
On day 17, the 844.5230292746796
On day 18, the 945.8657927876412
On day 19, the 1059.3696879221582
On day 20, the 1186.4940504728172
On day 21, the 1328.8733365295552
On day 22, the 1488.3381369131018
On day 23, the 1666.938713342674
###Markdown
1.4 Data Structures (Follow):**Learning Objectives:** 1. Understand list and tuples 2. Understand dictionaries and sets 3. Understand strings
###Code
# Add comments to the program
total_sales = 0.0
# define tupples
daily_sales_tuple = (7.0, 5.0, 3.2, 1.7, 6.0, 4.9, 1.1)
daily_sales_list = [7.0, 5.0, 3.2, 1.7, 6.0, 4.9, 1.1]
# define days of week list
days_of_week = ['Sunday', 'Monday', 'Tuesday',
'Wednesday', 'Thursday', 'Friday',
'Saturday']
for number in daily_sales_list:
total_sales += number
print (f'Total sales for the week: ${total_sales:,.2f}')
total_sales = 0.0
for number in daily_sales_tuple:
total_sales += number
print (f'Total sales for the week: ${total_sales:,.2f}')
print (f'Sales per day: ')
for i in range(7):
print(f"{days_of_week[i] : <10}", f"{daily_sales_list[i] : >20}")
print(f"{days_of_week[i] : <10}", f"{daily_sales_tuple[i] : >20}")
daily_sales_list[5] = 10.0
#daily_sales_tuple[5] = 10.0 #uncomment this line to see what happens
new_value = 3.2
daily_sales_list.append(new_value)
new_daily_sales_tuple = daily_sales_tuple + (new_value,)
days_of_week.append('Sunday')
print("")
print (f'Sales per day: ')
for i in range(len(days_of_week)):
print(f"{days_of_week[i] : <10}", f"{daily_sales_list[i] : >20}")
print(f"{days_of_week[i] : <10}", f"{new_daily_sales_tuple[i] : >20}")
###Output
_____no_output_____
###Markdown
After running the above code, think about and answer the following questions:1. What is the program doing?2. What does the append function do?3. Which actual entry is changed when setting daily sales[5] to 10?4. Discuss and the similarities and differences between lists and tuples.
###Code
# Add comments to the program
import random
capitals = {'Alabama':'Montgomery', 'Alaska':'Juneau',
'Arizona':'Phoenix', 'Arkansas':'Little Rock',
'California':'Sacramento', 'Colorado':'Denver',
'Connecticut':'Hartford', 'Delaware':'Dover',
'Florida':'Tallahassee', 'Georgia':'Atlanta',
'Hawaii':'Honolulu', 'Idaho':'Boise',
'Illinois':'Springfield', 'Indiana':'Indianapolis',
'Iowa':'Des Moines', 'Kansas':'Topeka',
'Kentucky':'Frankfort', 'Louisiana':'Baton Rouge',
'Maine':'Augusta', 'Maryland':'Annapolis',
'Massachusetts':'Boston', 'Michigan':'Lansing',
'Minnesota':'Saint Paul', 'Mississippi':'Jackson',
'Missouri':'Jefferson City', 'Montana':'Helena',
'Nebraska':'Lincoln', 'Nevada':'Carson City',
'New Hampshire':'Concord', 'New Jersey':'Trenton',
'New Mexico':'Santa Fe', 'New York':'Albany',
'North Carolina':'Raleigh', 'North Dakota':'Bismarck',
'Ohio':'Columbus', 'Oklahoma':'Oklahoma City',
'Oregon':'Salem', 'Pennsylvania':'Harrisburg',
'Rhode Island':'Providence', 'South Carolina':'Columbia',
'South Dakota':'Pierre', 'Tennessee':'Nashville',
'Texas':'Austin', 'Utah':'Salt Lake City',
'Vermont':'Montpelier', 'Virginia':'Richmond',
'Washington':'Olympia', 'West Virginia':'Charleston',
'Wisconsin':'Madison', 'Wyoming':'Cheyenne'}
correct = 0
wrong = 0
next_question = True
index = 0
user_solution = ''
while next_question:
state_iterator = iter(capitals)
index = (random.randint(1,50) - 1)
for i in range (index-1):
temp = state_iterator.__next__()
current_state = str(state_iterator.__next__())
user_solution = input(f'What is the capital of {current_state}? 'f'(or enter 0 to quit): ')
if user_solution == '0':
next_question = False
print(f'You had {correct} correct responses and 'f'{wrong} incorrect responses.')
elif user_solution == capitals[current_state]:
correct = correct + 1
print('That is correct.')
else:
wrong = wrong + 1
print('That is incorrect.')
###Output
_____no_output_____
###Markdown
After running the above code, think about and answer the following questions:1. What is the program doing?2. What does a key in a dictionary do? What does the value in a dictionary do?3. Where would you be more likely to use a dictionary versus using a list or tuple?4. How are dictionaries different from tuples and lists?
###Code
# add comments to the program
baseball = set(['Jodi', 'Carmen', 'Aida', 'Alicia'])
basketball = set(['Eva', 'Carmen', 'Alicia', 'Sarah'])
print('The following students are on the baseball team:')
for name in baseball:
print(name)
print()
print('The following students are on the basketball team:')
for name in basketball:
print(name)
print()
print('The following students play both baseball and basketball:')
for name in baseball.intersection(basketball):
print(name)
print()
print('The following students play either baseball or basketball:')
for name in baseball.union(basketball):
print(name)
print()
print('The following students play baseball, but not basketball:')
for name in baseball.difference(basketball):
print(name)
print()
print('The following students play basketball, but not baseball:')
for name in basketball.difference(baseball):
print(name)
print()
print('The following students play one sport, but not both:')
for name in baseball.symmetric_difference(basketball):
print(name)
###Output
_____no_output_____
###Markdown
After running the above code, think about and answer the following questions:1. What is the program doing?2. What does a set do and how is it used above?3. Where would you be more likely to use a set versus using a dictionary, list, or tuple?4. How are sets different from dictionaries, tuples, and lists? 1.4 Data Structures (Group):Create a program that does the following:1. Take the two lists provided and count the frequency of each word in each list.2. Create a dictionary with the key equal to each unique word and count as the value.3. Print off a list of words that are in both lists.4. Print off a list of words that are in the first list and not the second.5. Print off a list of words that are in the second list and not in the first.
###Code
list1 = ['the', 'quick', 'brown', 'fox', 'jumps', 'over', 'the', 'lazy', 'dog']
list2 = ['jack', 'be', 'nimble', 'jack', 'be', 'quick', 'jack', 'jumps', 'over', 'the', 'candlestick']
def count_character(list_):
dictlist = {}
for words in list_:
if words is not in dictlist:
dictlist[words]=1
else:
dictlist[words]+=1
dict1 = count_character(list1)
dict2 = count_character(list2)
set1 = set(list1)
set2 = set(list2)
print('The words in both list:')
for word in set1.intersection(set2):
print(word)
print('words that are in the first list:')
for word in set1.difference(set2)
print(word)
print('words that are in the second list:')
for word in set2.difference(set1)
print(word)
###Output
_____no_output_____ |
content/lessons/14/Watch-Me-Code/WMC1-Requests-BS4.ipynb | ###Markdown
Watch Me Code 1: Beautiful Soup 4Your boss wants to market a new product to the iSchool Faculty.You have been tasked with creating a contact list containing names, jobs and emails for the marketing department
###Code
# Todo:
# 1. Get html from ischool faculty directory
# 2. Parse html extract faculty contact info
# 3. Write to a csv file for marketing
import requests
from bs4 import BeautifulSoup
import csv
def get_html(url):
# Get html from url
response = requests.get(url)
return response.text
def extract_info(html):
# take html extract faculty info return list of dictionaries
soup = BeautifulSoup(html, "lxml")
faculty = []
for item in soup.select(".faculty-list")[0].select('.media'):
fac = {
"name": item.find("h4").text,
"title": item.find("h5").text,
"email": ""
}
for link in item.find_all("a"):
if "mailto:" in link["href"]:
fac["email"] = link.text
faculty.append(fac)
return faculty
def write_csv(filename, facultylist):
# take dictionaries and create a csv
with open(filename, 'w', newline='') as csvfile:
fieldnames = ['name', 'title', 'email']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for faculty in facultylist:
writer.writerow(faculty)
return "success"
# Follow our steps
webpage = get_html('https://ischool.syr.edu/people/directories/faculty/')
faculty = extract_info(webpage)
filename = "faculty.csv"
result = write_csv(filename, faculty)
if result == "success":
print("File %s written to disk" % (filename))
###Output
File faculty.csv written to disk
###Markdown
Watch Me Code 1: Beautiful Soup 4Your boss wants to market a new product to the iSchool Faculty.You have been tasked with creating a contact list containing names, jobs and emails for the marketing department
###Code
# Todo:
# 1. Get html from ischool faculty directory
# 2. Parse html extract faculty contact info
# 3. Write to a csv file for marketing
import requests
from bs4 import BeautifulSoup
import csv
def get_html(url):
# Get html from url
response = requests.get(url)
return response.text
def extract_info(html):
# take html extract faculty info return list of dictionaries
soup = BeautifulSoup(html, "lxml")
faculty = []
for item in soup.select(".faculty-list")[0].select('.media'):
fac = {
"name": item.find("h4").text,
"title": item.find("h5").text,
"email": ""
}
for link in item.find_all("a"):
if "mailto:" in link["href"]:
fac["email"] = link.text
faculty.append(fac)
return faculty
def write_csv(filename, facultylist):
# take dictionaries and create a csv
with open(filename, 'w', newline='') as csvfile:
fieldnames = ['name', 'title', 'email']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for faculty in facultylist:
writer.writerow(faculty)
return "success"
# Follow our steps
webpage = get_html('https://ischool.syr.edu/people/directories/faculty/')
faculty = extract_info(webpage)
filename = "faculty.csv"
result = write_csv(filename, faculty)
if result == "success":
print("File %s written to disk" % (filename))
###Output
File faculty.csv written to disk
|
docs/notebooks/01-electric-motor-production.ipynb | ###Markdown
Use case for "Modeling resilient cyber-physical processes and their composition from digital twins via Markov Decision Processes"In this notebook, we present the implementation of the use cases for the paper "Modeling resilient cyber-physical processes and their composition from digital twins via Markov Decision Processes", submitted at BPM 2022.Table of contents:- [Preliminaries](Preliminaries) - [Available services](Available-services) - [Target specification](Target-specification)- [Economic cost > Quality cost](Economic-cost->-Quality-cost) - [Discussion](Economic-cost->-Quality-cost:-Discussion)- [Quality-cost > Economic cost](Quality-cost->-Economic-cost) - [Discussion](Quality-cost->-Economic-cost:-Discussion)- [Increasing broken probability](Increasing-broken-probability) - [Discussion](Increasing-broken-probability:-Discussion)- [Using Tau Actions](Using-Tau-Actions) - [Discussion](Using-Tau-Actions:-Discussion) Preliminaries Available servicesWe start with the definitions of the available services and the DECLARE constraints of the task.
###Code
# Python imports, put at the top for simplicity
from logaut import ltl2dfa
from mdp_dp_rl.algorithms.dp.dp_analytic import DPAnalytic
from pylogics.parsers import parse_ltl
from docs.notebooks.utils import render_mdp_dfa
from docs.notebooks.utils import render_service, print_policy_data, \
print_value_function, print_q_value_function
from stochastic_service_composition.declare_utils import exactly_once, absence_2, alt_succession, alt_precedence, \
build_declare_assumption, not_coexistence
from stochastic_service_composition.dfa_target import mdp_from_dfa
from stochastic_service_composition.momdp import compute_final_mdp
from stochastic_service_composition.services import build_service_from_transitions, Service
from mdp_dp_rl.processes.det_policy import DetPolicy
from stochastic_service_composition.dfa_target import MdpDfa
# default economic cost value
DEFAULT_REWARD_ECONOMIC_COST = -1.0
# default quality cost value
DEFAULT_REWARD_QUALITY_COST = -1.0
# default reward when the service becomes broken
DEFAULT_BROKEN_REWARD = -2.0
# default reward to repair the device
DEFAULT_REWARD_REPAIR = -1.0
# default probability of being broken after an action
DEFAULT_BROKEN_PROB = 0.05
# default discount factor
GAMMA = 0.99
HIGH_COST = -2.0
LOW_COST = -0.5
# all the atomic actions for the task
BUILD_RETRIEVE_STATOR = "build_retrieve_stator"
BUILD_RETRIEVE_ROTOR = "build_retrieve_rotor"
BUILD_RETRIEVE_INVERTER = "build_retrieve_inverter"
ASSEMBLE_MOTOR = "assemble_motor"
PAINTING = "painting"
RUNNING_IN = "running_in"
ELECTRIC_TEST = "electric_test"
STATIC_TEST = "static_test"
ALL_SYMBOLS = {
BUILD_RETRIEVE_STATOR,
BUILD_RETRIEVE_ROTOR,
BUILD_RETRIEVE_INVERTER,
ASSEMBLE_MOTOR,
PAINTING,
RUNNING_IN,
ELECTRIC_TEST,
STATIC_TEST,
}
def stator_builder_service(
economic_cost: float = DEFAULT_REWARD_ECONOMIC_COST,
quality_cost: float = DEFAULT_REWARD_QUALITY_COST,
broken_prob: float = DEFAULT_BROKEN_PROB,
broken_reward: float = DEFAULT_BROKEN_REWARD,
repair_reward: float = DEFAULT_REWARD_REPAIR
) -> Service:
"""Build the stator builder device."""
assert 0.0 <= broken_prob <= 1.0
success_prob = 1.0 - broken_prob
average_action_reward = economic_cost * (1 - broken_prob) + (economic_cost + broken_reward) * broken_prob
transitions = {
"ready": {
BUILD_RETRIEVE_STATOR: ({"ready": success_prob, "broken": broken_prob}, (average_action_reward, quality_cost)),
},
"broken": {
"restore_stator": ({"ready": 1.0}, (repair_reward, 0.0)),
},
}
final_states = {"ready"}
initial_state = "ready"
return build_service_from_transitions(transitions, initial_state, final_states) # type: ignore
service_stator_builder = stator_builder_service()
render_service(service_stator_builder)
def stator_warehouse_service(economic_cost: float = DEFAULT_REWARD_ECONOMIC_COST, quality_cost: float = DEFAULT_REWARD_QUALITY_COST) -> Service:
"""Build the stator warehouse device."""
transitions = {
"ready": {
BUILD_RETRIEVE_STATOR: ({"ready": 1.0}, (economic_cost, quality_cost)),
},
}
final_states = {"ready"}
initial_state = "ready"
return build_service_from_transitions(transitions, initial_state, final_states) # type: ignore
service_stator_warehouse = stator_warehouse_service(economic_cost=-3.0, quality_cost=-3.0)
render_service(service_stator_warehouse)
def rotor_builder_service(
economic_cost: float = DEFAULT_REWARD_ECONOMIC_COST,
quality_cost: float = DEFAULT_REWARD_QUALITY_COST,
broken_prob: float = DEFAULT_BROKEN_PROB,
broken_reward: float = DEFAULT_BROKEN_REWARD,
repair_reward: float = DEFAULT_REWARD_REPAIR
) -> Service:
"""Build the rotor builder device."""
assert 0.0 <= broken_prob <= 1.0
success_prob = 1.0 - broken_prob
average_action_reward = economic_cost * (1 - broken_prob) + (economic_cost + broken_reward) * broken_prob
transitions = {
"ready": {
BUILD_RETRIEVE_ROTOR: ({"ready": success_prob, "broken": broken_prob}, (average_action_reward, quality_cost)),
},
"broken": {
"restore_rotor": ({"ready": 1.0}, (repair_reward, 0.0)),
},
}
final_states = {"ready"}
initial_state = "ready"
return build_service_from_transitions(transitions, initial_state, final_states) # type: ignore
# high economic cost - low quality cost
service_rotor_builder_1 = rotor_builder_service(economic_cost=HIGH_COST, quality_cost=LOW_COST)
# low economic cost - high quality cost
service_rotor_builder_2 = rotor_builder_service(economic_cost=LOW_COST, quality_cost=HIGH_COST)
render_service(service_rotor_builder_1)
render_service(service_rotor_builder_2)
def rotor_warehouse_service(economic_cost: float = DEFAULT_REWARD_ECONOMIC_COST, quality_cost: float = DEFAULT_REWARD_QUALITY_COST) -> Service:
"""Build the rotor warehouse device."""
transitions = {
"ready": {
BUILD_RETRIEVE_ROTOR: ({"ready": 1.0}, (economic_cost, quality_cost)),
},
}
final_states = {"ready"}
initial_state = "ready"
return build_service_from_transitions(transitions, initial_state, final_states) # type: ignore
service_rotor_warehouse = rotor_warehouse_service(-1.5, -1.5)
render_service(service_rotor_warehouse)
def inverter_warehouse_service(economic_cost: float = DEFAULT_REWARD_ECONOMIC_COST, quality_cost: float = DEFAULT_REWARD_QUALITY_COST) -> Service:
"""Build the inverter warehouse device."""
transitions = {
"ready": {
BUILD_RETRIEVE_INVERTER: ({"ready": 1.0}, (economic_cost, quality_cost)),
},
}
final_states = {"ready"}
initial_state = "ready"
return build_service_from_transitions(transitions, initial_state, final_states) # type: ignore
service_inverter_warehouse = inverter_warehouse_service()
render_service(service_inverter_warehouse)
def assembler_service(economic_cost: float = DEFAULT_REWARD_ECONOMIC_COST, quality_cost: float = DEFAULT_REWARD_QUALITY_COST) -> Service:
"""Build the assembler device."""
transitions = {
"ready": {
ASSEMBLE_MOTOR: ({"ready": 1.0}, (economic_cost, quality_cost)),
},
}
final_states = {"ready"}
initial_state = "ready"
return build_service_from_transitions(transitions, initial_state, final_states) # type: ignore
# high economic cost - low quality cost
service_assembler_1 = assembler_service(quality_cost=LOW_COST)
# low economic cost - high quality cost
service_assembler_2 = assembler_service(quality_cost=HIGH_COST)
render_service(service_assembler_1)
render_service(service_assembler_2)
def painter_service(economic_cost: float = DEFAULT_REWARD_ECONOMIC_COST, quality_cost: float = DEFAULT_REWARD_QUALITY_COST) -> Service:
"""Build the painter device."""
transitions = {
"ready": {
PAINTING: ({"ready": 1.0}, (economic_cost, quality_cost)),
},
}
final_states = {"ready"}
initial_state = "ready"
return build_service_from_transitions(transitions, initial_state, final_states) # type: ignore
# high economic cost - low quality cost
service_painter_1 = painter_service(economic_cost=HIGH_COST, quality_cost=LOW_COST)
# low economic cost - high quality cost
service_painter_2 = painter_service(economic_cost=LOW_COST, quality_cost=HIGH_COST)
render_service(service_painter_1)
render_service(service_painter_2)
def smart_tester_service(economic_cost: float = DEFAULT_REWARD_ECONOMIC_COST, quality_cost: float = DEFAULT_REWARD_QUALITY_COST) -> Service:
"""Build the smart tester device."""
transitions = {
"ready": {
RUNNING_IN: ({"ready": 1.0}, (economic_cost, quality_cost)),
},
}
final_states = {"ready"}
initial_state = "ready"
return build_service_from_transitions(transitions, initial_state, final_states) # type: ignore
# high economic cost - low quality cost
service_smart_tester_1 = smart_tester_service(economic_cost=HIGH_COST, quality_cost=LOW_COST)
# low economic cost - high quality cost
service_smart_tester_2 = smart_tester_service(economic_cost=LOW_COST, quality_cost=HIGH_COST)
render_service(service_smart_tester_1)
render_service(service_smart_tester_2)
def mechanical_engineer_service(economic_cost: float = DEFAULT_REWARD_ECONOMIC_COST, quality_cost: float = DEFAULT_REWARD_QUALITY_COST) -> Service:
"""Build the mechanical engineer device."""
transitions = {
"ready": {
ELECTRIC_TEST: ({"ready": 1.0}, (economic_cost, quality_cost)),
STATIC_TEST: ({"ready": 1.0}, (economic_cost, quality_cost)),
},
}
final_states = {"ready"}
initial_state = "ready"
return build_service_from_transitions(transitions, initial_state, final_states) # type: ignore
service_mechanical_engineer = mechanical_engineer_service()
render_service(service_mechanical_engineer)
###Output
_____no_output_____
###Markdown
Target specificationIn this section, we list all the DECLARE constraints, transform each of them in LTLf formulas (according to the paper) and put them in conjunction in order to compute the equivalent automaton.Then, we also show the MDP representation of the DFA.
###Code
declare_constraints = [
exactly_once(BUILD_RETRIEVE_STATOR),
exactly_once(BUILD_RETRIEVE_ROTOR),
exactly_once(BUILD_RETRIEVE_INVERTER),
exactly_once(RUNNING_IN),
exactly_once(ASSEMBLE_MOTOR),
absence_2(ELECTRIC_TEST),
absence_2(PAINTING),
absence_2(STATIC_TEST),
alt_succession(BUILD_RETRIEVE_STATOR, ASSEMBLE_MOTOR),
alt_succession(BUILD_RETRIEVE_ROTOR, ASSEMBLE_MOTOR),
alt_succession(BUILD_RETRIEVE_INVERTER, ASSEMBLE_MOTOR),
alt_succession(ASSEMBLE_MOTOR, RUNNING_IN),
alt_precedence(ASSEMBLE_MOTOR, PAINTING),
alt_precedence(ASSEMBLE_MOTOR, ELECTRIC_TEST),
alt_precedence(ASSEMBLE_MOTOR, STATIC_TEST),
not_coexistence(ELECTRIC_TEST, STATIC_TEST),
build_declare_assumption(ALL_SYMBOLS),
]
formula_str = " & ".join(map(lambda s: f"({s})", declare_constraints))
formula = parse_ltl(formula_str)
automaton = ltl2dfa(formula, backend="lydia")
target_mdp = mdp_from_dfa(automaton, gamma=GAMMA)
render_mdp_dfa(target_mdp)
###Output
_____no_output_____
###Markdown
Here we list all the services.
###Code
all_services = [
stator_builder_service(), # 0
stator_warehouse_service(economic_cost=-3.0, quality_cost=-3.0), # 1
rotor_builder_service(economic_cost=HIGH_COST, quality_cost=LOW_COST), # 2
rotor_builder_service(economic_cost=LOW_COST, quality_cost=HIGH_COST), # 3
rotor_warehouse_service(-1.5, -1.5), # 4
inverter_warehouse_service(), # 5
assembler_service(quality_cost=LOW_COST), # 6
assembler_service(quality_cost=HIGH_COST), # 7
painter_service(economic_cost=HIGH_COST, quality_cost=LOW_COST), # 8
painter_service(economic_cost=LOW_COST, quality_cost=HIGH_COST), # 9
smart_tester_service(economic_cost=HIGH_COST, quality_cost=LOW_COST), # 10
smart_tester_service(economic_cost=LOW_COST, quality_cost=HIGH_COST), # 11
mechanical_engineer_service(), # 12
]
###Output
_____no_output_____
###Markdown
Economic cost > Quality costIn the following, we compute the composition MOMDP by giving more weight to the economic cost (`2.0`) rather than the quality cost (`1.0`). The highest weight is to the target specification (`10.0`). The optimal solution will prefer using services whose economic cost of performing the main action is lower.
###Code
def execute_n_action_of_policy(mdp: MdpDfa, policy: DetPolicy, n, initial_state=None):
current_state = mdp.initial_state if initial_state is None else initial_state
for _ in range(n):
action = policy.get_action_for_state(current_state)
next_state_dist = final_mdp.transitions[current_state][action]
reward = final_mdp.rewards[current_state][action]
next_state_highest_prob = max(next_state_dist.items(), key=lambda pair: pair[1])[0]
print(f"action={action}, reward={reward}")
current_state = next_state_highest_prob
weights = [10.0, 2.0, 1.0]
final_mdp = compute_final_mdp(target_mdp, all_services, weights)
print("Number of states: ", len(final_mdp.all_states))
opn = DPAnalytic(final_mdp, 1e-4)
opt_policy = opn.get_optimal_policy_vi()
value_function = opn.get_value_func_dict(opt_policy)
q_value_function = opn.get_act_value_func_dict(opt_policy)
print_policy_data(opt_policy)
print_value_function(value_function)
execute_n_action_of_policy(final_mdp, opt_policy, 7)
###Output
action=('build_retrieve_inverter', 5), reward=-3.0
action=('build_retrieve_stator', 0), reward=-3.2
action=('build_retrieve_rotor', 3), reward=-3.2
action=('assemble_motor', 6), reward=-2.5
action=('running_in', 11), reward=17.0
action=('painting', 9), reward=17.0
action=('electric_test', 12), reward=17.0
###Markdown
Economic cost > Quality cost: DiscussionIn the above cell you can see the computed optimal plan:```action=('build_retrieve_inverter', 5), reward=-3.0action=('build_retrieve_stator', 0), reward=-3.2action=('build_retrieve_rotor', 3), reward=-3.2action=('assemble_motor', 6), reward=-2.5action=('running_in', 11), reward=17.0action=('painting', 9), reward=17.0action=('electric_test', 12), reward=17.0```You can notice that the optimal plan preferred actions from services with lower economic cost. E.g. to perform `build_retrieve_rotor`, the plan preferred using `service_rotor_builder_2` (service `3`) rather than `service_rotor_builder_1` (service `2`).The same holds for the `service_painter_2` (service `9`) and `service_smart_tester_2` (service `11`).Note however that `service_assembler_1` (service `6`) is preferred over `service_assembler_2` (service `7`) because they have the same economic cost but `service_assembler_1` has a lower quality cost.Moreover, note that the reward of satisfaction of the target specification is given as soon as `running_in` is performed; this is because by the specification `painting` and `electric_test` are optional, and since the reward is given for each prefix that satisfies the constraints then the optimizer realized it was better to execute the `running_in` action as soon as possible. To reward only complete traces, we refer to the "Getting rewards for complete traces only" section in:Brafman, Ronen, Giuseppe De Giacomo, and Fabio Patrizi. "LTLf/LDLf non-markovian rewards." Proceedings of the AAAI Conference on Artificial Intelligence. Vol. 32. No. 1. 2018. Quality cost > Economic costIn the following, we compute the composition MOMDP by giving more weight to the quality cost (`5.0`) rather than the economic cost (`1.0`). The highest weight is to the target specification (`10.0`). The optimal solution will prefer using services whose quality cost of performing the main action is lower.
###Code
weights = [10.0, 1.0, 5.0]
final_mdp = compute_final_mdp(target_mdp, all_services, weights)
print("Number of states: ", len(final_mdp.all_states))
opn = DPAnalytic(final_mdp, 1e-4)
opt_policy = opn.get_optimal_policy_vi()
value_function = opn.get_value_func_dict(opt_policy)
q_value_function = opn.get_act_value_func_dict(opt_policy)
print_policy_data(opt_policy)
print_value_function(value_function)
print_q_value_function(q_value_function)
execute_n_action_of_policy(final_mdp, opt_policy, 7)
###Output
action=('build_retrieve_rotor', 2), reward=-4.6
action=('build_retrieve_inverter', 5), reward=-6.0
action=('build_retrieve_stator', 0), reward=-6.1
action=('assemble_motor', 6), reward=-3.5
action=('running_in', 10), reward=15.5
action=('painting', 8), reward=15.5
action=('electric_test', 12), reward=14.0
###Markdown
Quality cost > Economic cost: DiscussionIn the above cell you can see the computed optimal plan:```action=('build_retrieve_rotor', 2), reward=-4.6action=('build_retrieve_inverter', 5), reward=-6.0action=('build_retrieve_stator', 0), reward=-6.1action=('assemble_motor', 6), reward=-3.5action=('running_in', 10), reward=15.5action=('painting', 8), reward=15.5action=('electric_test', 12), reward=14.0```You can notice that the optimal plan preferred actions from services with lower quality cost. E.g. to perform `build_retrieve_rotor`, the plan preferred using `service_rotor_builder_1` (service `2`) rather than `service_rotor_builder_2` (service `3`).The same holds for `service_painter_1` (service `8`) and `service_smart_tester_2` (service `10`).Note however that, as before, `service_assembler_1` (service `6`) is preferred over `service_assembler_2` (service `7`) because they have the same economic cost but `service_assembler_1` has a lower quality cost. Increasing broken probabilityIn this section, we show how the model is able to choose the right action in the case some parameter of the model changes, e.g. the probability of being broken. For example, we now use rotor builders with probability of being broken after performing the action of 40% (rather than 5%).
###Code
# high economic cost - low quality cost
service_rotor_builder_1 = rotor_builder_service(economic_cost=HIGH_COST, quality_cost=LOW_COST, broken_prob=0.4)
# low economic cost - high quality cost
service_rotor_builder_2 = rotor_builder_service(economic_cost=LOW_COST, quality_cost=HIGH_COST, broken_prob=0.4)
render_service(service_rotor_builder_1)
render_service(service_rotor_builder_2)
all_services = [
stator_builder_service(), # 0
stator_warehouse_service(economic_cost=-3.0, quality_cost=-3.0), # 1
rotor_builder_service(economic_cost=HIGH_COST, quality_cost=LOW_COST, broken_prob=0.4), # 2
rotor_builder_service(economic_cost=LOW_COST, quality_cost=HIGH_COST, broken_prob=0.4), # 3
rotor_warehouse_service(-1.5, -1.5), # 4
inverter_warehouse_service(), # 5
assembler_service(quality_cost=LOW_COST), # 6
assembler_service(quality_cost=HIGH_COST), # 7
painter_service(economic_cost=HIGH_COST, quality_cost=LOW_COST), # 8
painter_service(economic_cost=LOW_COST, quality_cost=HIGH_COST), # 9
smart_tester_service(economic_cost=HIGH_COST, quality_cost=LOW_COST), # 10
smart_tester_service(economic_cost=LOW_COST, quality_cost=HIGH_COST), # 11
mechanical_engineer_service(), # 12
]
weights = [10.0, 2.0, 1.0]
final_mdp = compute_final_mdp(target_mdp, all_services, weights)
opn = DPAnalytic(final_mdp, 1e-4)
opt_policy = opn.get_optimal_policy_vi()
execute_n_action_of_policy(final_mdp, opt_policy, 7)
###Output
action=('build_retrieve_inverter', 5), reward=-3.0
action=('build_retrieve_stator', 0), reward=-3.2
action=('build_retrieve_rotor', 3), reward=-4.6
action=('assemble_motor', 6), reward=-2.5
action=('running_in', 11), reward=17.0
action=('painting', 9), reward=17.0
action=('electric_test', 12), reward=17.0
###Markdown
Increasing broken probability: DiscussionAs you can see from the optimal policy:```action=('build_retrieve_inverter', 5), reward=-3.0action=('build_retrieve_stator', 0), reward=-3.2action=('build_retrieve_rotor', 3), reward=-4.6action=('assemble_motor', 6), reward=-2.5action=('running_in', 11), reward=17.0action=('painting', 9), reward=17.0action=('electric_test', 12), reward=17.0```Now the policy prefers to use the `service_rotor_warehouse` (service `4`) rather than `service_rotor_builder_X` (neither service `2` nor `3`). Using Tau ActionsIn this section, we see the usefulness of having included the tau actions in our model.In this example, we describe the case in which in the initial state of the production all the services that can be broken start in the "broken" state.
###Code
all_services = [
stator_builder_service(), # 0
stator_warehouse_service(economic_cost=-3.0, quality_cost=-3.0), # 1
rotor_builder_service(economic_cost=HIGH_COST, quality_cost=LOW_COST, broken_prob=0.4), # 2
rotor_builder_service(economic_cost=LOW_COST, quality_cost=HIGH_COST, broken_prob=0.4), # 3
rotor_warehouse_service(-1.5, -1.5), # 4
inverter_warehouse_service(), # 5
assembler_service(quality_cost=LOW_COST), # 6
assembler_service(quality_cost=HIGH_COST), # 7
painter_service(economic_cost=HIGH_COST, quality_cost=LOW_COST), # 8
painter_service(economic_cost=LOW_COST, quality_cost=HIGH_COST), # 9
smart_tester_service(economic_cost=HIGH_COST, quality_cost=LOW_COST), # 10
smart_tester_service(economic_cost=LOW_COST, quality_cost=HIGH_COST), # 11
mechanical_engineer_service(), # 12
]
weights = [10.0, 2.0, 1.0]
final_mdp = compute_final_mdp(target_mdp, all_services, weights, with_all_initial_states=True)
opn = DPAnalytic(final_mdp, 1e-4)
opt_policy = opn.get_optimal_policy_vi()
# service_stator_builder = broken
# service_rotor_builder_1 = broken
# service_rotor_builder_2 = broken
execute_n_action_of_policy(final_mdp, opt_policy, 8, initial_state=(('broken', 'ready', 'broken', 'broken', 'ready', 'ready', 'ready', 'ready', 'ready', 'ready', 'ready', 'ready', 'ready'), 0))
###Output
action=('restore_stator', 0), reward=-2.0
action=('build_retrieve_inverter', 5), reward=-3.0
action=('build_retrieve_stator', 0), reward=-3.2
action=('build_retrieve_rotor', 4), reward=-4.5
action=('assemble_motor', 6), reward=-2.5
action=('running_in', 11), reward=17.0
action=('painting', 9), reward=17.0
action=('electric_test', 12), reward=17.0
|
Examples/Arrow/JoinArrow.ipynb | ###Markdown
Converting a join to an arrow ([`R` version](https://github.com/WinVector/rquery/blob/master/Examples/Arrow/JoinArrow.md), [`Python` version](https://github.com/WinVector/data_algebra/blob/master/Examples/Arrow/JoinArrow.md)).
###Code
import pandas
from data_algebra.data_ops import *
from data_algebra.arrow import *
d1 = pandas.DataFrame({
'key': ['a', 'b'],
'x': [1, 2],
})
d1
table_1_description = describe_table(d1, table_name='d1')
d2 = pandas.DataFrame({
'key': ['b', 'c'],
'y': [3, 4],
})
d2
table_2_description = describe_table(d2, table_name='d2')
ops = table_1_description.\
natural_join(
b=table_2_description,
by=['key'],
jointype='FULL')
arrow_1 = DataOpArrow(ops, free_table_key=table_1_description.key)
#arrow_1.fit(d1)
print(arrow_1)
arrow_2 = DataOpArrow(ops, free_table_key=table_2_description.key)
print(arrow_2)
arrow_1.pipeline.eval({
'd1': d1,
'd2': d2,
})
###Output
_____no_output_____ |
examples/using_bmdal.ipynb | ###Markdown
Applying Deep Batch Active Learning to your own learning task In this notebook, we show how our implemented batch mode deep active learning (BMDAL) methods can be applied to a custom NN. We will first change the working directory from the examples subfolder to the main folder, which is required for the imports to work correctly.
###Code
import os
os.chdir('..') # change directory inside the notebook to the main directory
###Output
_____no_output_____
###Markdown
Example with artificial data and custom model We first generate some artificial 2-D training data, which will be plotted below.
###Code
import torch
import torch.nn as nn
n_train = 100
n_pool = 2000
torch.manual_seed(1234)
x = torch.randn(n_train+n_pool, 3)
theta = 3*(x[:, 1] + 0.1 * x[:, 0])
x = (0.2 * x[:, 2] + x[:, 1] + 2)[:, None] * torch.stack([torch.sin(theta), torch.cos(theta)], dim=1)
y = torch.exp(x[:, 0])
y = y[:, None]
x_train = x[:n_train]
y_train = y[:n_train]
x_pool = x[n_train:]
y_pool = y[n_train:]
###Output
_____no_output_____
###Markdown
Note that for the labels, we have used the deterministic function $y = e^{x_1}$. In order to learn this function, it would be better to have more training points on the right of the domain. We can visualize the pool set (in gray) and the train set (in black) without the labels as follows:
###Code
%matplotlib inline
import matplotlib.pyplot as plt
plt.plot(x_pool[:, 0].numpy(), x_pool[:, 1].numpy(), '.', color='#BBBBBB')
plt.plot(x_train[:, 0].numpy(), x_train[:, 1].numpy(), '.', color='k')
plt.show()
###Output
_____no_output_____
###Markdown
Next, we train a NN on the given training data. For this, we use a simple three-layer NN using the standard layers from PyTorch. Since we only have 100 training points, we use all of the data in each step. We optimize the NN using Adam for 256 epochs with fixed learning rate. These hyperparameters are only an example, you may use other values.
###Code
custom_model = nn.Sequential(nn.Linear(2, 100), nn.SiLU(), nn.Linear(100, 100), nn.SiLU(), nn.Linear(100, 1))
opt = torch.optim.Adam(custom_model.parameters(), lr=2e-2)
for epoch in range(256):
y_pred = custom_model(x_train)
loss = ((y_pred - y_train)**2).mean()
train_rmse = loss.sqrt().item()
pool_rmse = ((custom_model(x_pool) - y_pool)**2).mean().sqrt().item()
print(f'train RMSE: {train_rmse:5.3f}, pool RMSE: {pool_rmse:5.3f}')
loss.backward()
opt.step()
opt.zero_grad()
###Output
train RMSE: 5.334, pool RMSE: 7.043
train RMSE: 4.116, pool RMSE: 6.060
train RMSE: 2.640, pool RMSE: 4.933
train RMSE: 2.833, pool RMSE: 4.774
train RMSE: 2.589, pool RMSE: 4.797
train RMSE: 1.809, pool RMSE: 4.461
train RMSE: 1.725, pool RMSE: 4.376
train RMSE: 1.956, pool RMSE: 4.430
train RMSE: 1.917, pool RMSE: 4.374
train RMSE: 1.600, pool RMSE: 4.187
train RMSE: 1.261, pool RMSE: 3.989
train RMSE: 1.320, pool RMSE: 3.916
train RMSE: 1.594, pool RMSE: 3.940
train RMSE: 1.584, pool RMSE: 3.909
train RMSE: 1.325, pool RMSE: 3.840
train RMSE: 1.161, pool RMSE: 3.844
train RMSE: 1.234, pool RMSE: 3.919
train RMSE: 1.332, pool RMSE: 3.976
train RMSE: 1.303, pool RMSE: 3.960
train RMSE: 1.151, pool RMSE: 3.882
train RMSE: 0.988, pool RMSE: 3.794
train RMSE: 0.970, pool RMSE: 3.752
train RMSE: 1.069, pool RMSE: 3.755
train RMSE: 1.098, pool RMSE: 3.754
train RMSE: 0.993, pool RMSE: 3.728
train RMSE: 0.870, pool RMSE: 3.713
train RMSE: 0.868, pool RMSE: 3.737
train RMSE: 0.937, pool RMSE: 3.774
train RMSE: 0.946, pool RMSE: 3.773
train RMSE: 0.872, pool RMSE: 3.729
train RMSE: 0.788, pool RMSE: 3.670
train RMSE: 0.772, pool RMSE: 3.629
train RMSE: 0.796, pool RMSE: 3.608
train RMSE: 0.777, pool RMSE: 3.589
train RMSE: 0.708, pool RMSE: 3.569
train RMSE: 0.657, pool RMSE: 3.562
train RMSE: 0.666, pool RMSE: 3.567
train RMSE: 0.680, pool RMSE: 3.564
train RMSE: 0.643, pool RMSE: 3.534
train RMSE: 0.578, pool RMSE: 3.486
train RMSE: 0.551, pool RMSE: 3.441
train RMSE: 0.562, pool RMSE: 3.411
train RMSE: 0.541, pool RMSE: 3.391
train RMSE: 0.481, pool RMSE: 3.384
train RMSE: 0.448, pool RMSE: 3.394
train RMSE: 0.455, pool RMSE: 3.407
train RMSE: 0.435, pool RMSE: 3.397
train RMSE: 0.387, pool RMSE: 3.366
train RMSE: 0.378, pool RMSE: 3.336
train RMSE: 0.384, pool RMSE: 3.316
train RMSE: 0.348, pool RMSE: 3.306
train RMSE: 0.313, pool RMSE: 3.310
train RMSE: 0.324, pool RMSE: 3.318
train RMSE: 0.314, pool RMSE: 3.308
train RMSE: 0.270, pool RMSE: 3.278
train RMSE: 0.265, pool RMSE: 3.249
train RMSE: 0.278, pool RMSE: 3.232
train RMSE: 0.255, pool RMSE: 3.228
train RMSE: 0.239, pool RMSE: 3.235
train RMSE: 0.250, pool RMSE: 3.242
train RMSE: 0.242, pool RMSE: 3.233
train RMSE: 0.223, pool RMSE: 3.214
train RMSE: 0.228, pool RMSE: 3.199
train RMSE: 0.228, pool RMSE: 3.195
train RMSE: 0.214, pool RMSE: 3.203
train RMSE: 0.215, pool RMSE: 3.215
train RMSE: 0.221, pool RMSE: 3.219
train RMSE: 0.212, pool RMSE: 3.211
train RMSE: 0.208, pool RMSE: 3.197
train RMSE: 0.215, pool RMSE: 3.188
train RMSE: 0.210, pool RMSE: 3.186
train RMSE: 0.203, pool RMSE: 3.192
train RMSE: 0.207, pool RMSE: 3.197
train RMSE: 0.207, pool RMSE: 3.194
train RMSE: 0.200, pool RMSE: 3.184
train RMSE: 0.200, pool RMSE: 3.173
train RMSE: 0.201, pool RMSE: 3.167
train RMSE: 0.196, pool RMSE: 3.168
train RMSE: 0.193, pool RMSE: 3.172
train RMSE: 0.194, pool RMSE: 3.175
train RMSE: 0.191, pool RMSE: 3.171
train RMSE: 0.187, pool RMSE: 3.163
train RMSE: 0.188, pool RMSE: 3.156
train RMSE: 0.187, pool RMSE: 3.154
train RMSE: 0.184, pool RMSE: 3.156
train RMSE: 0.184, pool RMSE: 3.159
train RMSE: 0.183, pool RMSE: 3.158
train RMSE: 0.181, pool RMSE: 3.153
train RMSE: 0.180, pool RMSE: 3.146
train RMSE: 0.180, pool RMSE: 3.142
train RMSE: 0.178, pool RMSE: 3.142
train RMSE: 0.177, pool RMSE: 3.145
train RMSE: 0.177, pool RMSE: 3.146
train RMSE: 0.176, pool RMSE: 3.144
train RMSE: 0.174, pool RMSE: 3.139
train RMSE: 0.174, pool RMSE: 3.136
train RMSE: 0.173, pool RMSE: 3.136
train RMSE: 0.172, pool RMSE: 3.138
train RMSE: 0.172, pool RMSE: 3.140
train RMSE: 0.171, pool RMSE: 3.139
train RMSE: 0.170, pool RMSE: 3.136
train RMSE: 0.169, pool RMSE: 3.132
train RMSE: 0.169, pool RMSE: 3.130
train RMSE: 0.167, pool RMSE: 3.131
train RMSE: 0.167, pool RMSE: 3.131
train RMSE: 0.166, pool RMSE: 3.130
train RMSE: 0.165, pool RMSE: 3.126
train RMSE: 0.164, pool RMSE: 3.123
train RMSE: 0.164, pool RMSE: 3.121
train RMSE: 0.163, pool RMSE: 3.120
train RMSE: 0.162, pool RMSE: 3.120
train RMSE: 0.161, pool RMSE: 3.119
train RMSE: 0.161, pool RMSE: 3.117
train RMSE: 0.160, pool RMSE: 3.114
train RMSE: 0.159, pool RMSE: 3.112
train RMSE: 0.158, pool RMSE: 3.112
train RMSE: 0.158, pool RMSE: 3.111
train RMSE: 0.157, pool RMSE: 3.110
train RMSE: 0.156, pool RMSE: 3.108
train RMSE: 0.155, pool RMSE: 3.105
train RMSE: 0.155, pool RMSE: 3.103
train RMSE: 0.154, pool RMSE: 3.102
train RMSE: 0.153, pool RMSE: 3.101
train RMSE: 0.152, pool RMSE: 3.100
train RMSE: 0.151, pool RMSE: 3.098
train RMSE: 0.151, pool RMSE: 3.096
train RMSE: 0.150, pool RMSE: 3.094
train RMSE: 0.149, pool RMSE: 3.093
train RMSE: 0.148, pool RMSE: 3.092
train RMSE: 0.148, pool RMSE: 3.090
train RMSE: 0.147, pool RMSE: 3.088
train RMSE: 0.146, pool RMSE: 3.086
train RMSE: 0.145, pool RMSE: 3.085
train RMSE: 0.145, pool RMSE: 3.083
train RMSE: 0.144, pool RMSE: 3.082
train RMSE: 0.143, pool RMSE: 3.080
train RMSE: 0.142, pool RMSE: 3.078
train RMSE: 0.141, pool RMSE: 3.076
train RMSE: 0.141, pool RMSE: 3.075
train RMSE: 0.140, pool RMSE: 3.073
train RMSE: 0.139, pool RMSE: 3.072
train RMSE: 0.138, pool RMSE: 3.070
train RMSE: 0.137, pool RMSE: 3.068
train RMSE: 0.137, pool RMSE: 3.066
train RMSE: 0.136, pool RMSE: 3.065
train RMSE: 0.135, pool RMSE: 3.063
train RMSE: 0.134, pool RMSE: 3.061
train RMSE: 0.133, pool RMSE: 3.059
train RMSE: 0.132, pool RMSE: 3.058
train RMSE: 0.132, pool RMSE: 3.056
train RMSE: 0.131, pool RMSE: 3.054
train RMSE: 0.130, pool RMSE: 3.052
train RMSE: 0.129, pool RMSE: 3.050
train RMSE: 0.128, pool RMSE: 3.049
train RMSE: 0.128, pool RMSE: 3.047
train RMSE: 0.127, pool RMSE: 3.045
train RMSE: 0.126, pool RMSE: 3.043
train RMSE: 0.125, pool RMSE: 3.041
train RMSE: 0.124, pool RMSE: 3.039
train RMSE: 0.123, pool RMSE: 3.037
train RMSE: 0.122, pool RMSE: 3.035
train RMSE: 0.122, pool RMSE: 3.034
train RMSE: 0.121, pool RMSE: 3.032
train RMSE: 0.120, pool RMSE: 3.030
train RMSE: 0.119, pool RMSE: 3.028
train RMSE: 0.118, pool RMSE: 3.026
train RMSE: 0.117, pool RMSE: 3.024
train RMSE: 0.117, pool RMSE: 3.022
train RMSE: 0.116, pool RMSE: 3.020
train RMSE: 0.115, pool RMSE: 3.018
train RMSE: 0.114, pool RMSE: 3.016
train RMSE: 0.113, pool RMSE: 3.014
train RMSE: 0.112, pool RMSE: 3.013
train RMSE: 0.111, pool RMSE: 3.011
train RMSE: 0.111, pool RMSE: 3.009
train RMSE: 0.110, pool RMSE: 3.007
train RMSE: 0.109, pool RMSE: 3.005
train RMSE: 0.108, pool RMSE: 3.003
train RMSE: 0.107, pool RMSE: 3.001
train RMSE: 0.106, pool RMSE: 2.999
train RMSE: 0.105, pool RMSE: 2.997
train RMSE: 0.105, pool RMSE: 2.995
train RMSE: 0.104, pool RMSE: 2.993
train RMSE: 0.103, pool RMSE: 2.991
train RMSE: 0.102, pool RMSE: 2.989
train RMSE: 0.101, pool RMSE: 2.987
train RMSE: 0.100, pool RMSE: 2.985
train RMSE: 0.099, pool RMSE: 2.983
train RMSE: 0.099, pool RMSE: 2.981
train RMSE: 0.098, pool RMSE: 2.979
train RMSE: 0.097, pool RMSE: 2.977
train RMSE: 0.096, pool RMSE: 2.975
train RMSE: 0.095, pool RMSE: 2.973
train RMSE: 0.094, pool RMSE: 2.971
train RMSE: 0.093, pool RMSE: 2.969
train RMSE: 0.093, pool RMSE: 2.967
train RMSE: 0.092, pool RMSE: 2.965
train RMSE: 0.091, pool RMSE: 2.963
train RMSE: 0.090, pool RMSE: 2.961
train RMSE: 0.089, pool RMSE: 2.959
train RMSE: 0.088, pool RMSE: 2.957
train RMSE: 0.088, pool RMSE: 2.955
train RMSE: 0.087, pool RMSE: 2.953
train RMSE: 0.086, pool RMSE: 2.952
train RMSE: 0.085, pool RMSE: 2.950
train RMSE: 0.084, pool RMSE: 2.948
train RMSE: 0.084, pool RMSE: 2.946
train RMSE: 0.083, pool RMSE: 2.944
train RMSE: 0.082, pool RMSE: 2.942
train RMSE: 0.081, pool RMSE: 2.940
train RMSE: 0.080, pool RMSE: 2.938
train RMSE: 0.080, pool RMSE: 2.936
train RMSE: 0.079, pool RMSE: 2.934
train RMSE: 0.078, pool RMSE: 2.932
train RMSE: 0.077, pool RMSE: 2.930
train RMSE: 0.077, pool RMSE: 2.928
train RMSE: 0.076, pool RMSE: 2.926
train RMSE: 0.075, pool RMSE: 2.924
train RMSE: 0.074, pool RMSE: 2.923
train RMSE: 0.074, pool RMSE: 2.921
train RMSE: 0.073, pool RMSE: 2.919
train RMSE: 0.072, pool RMSE: 2.917
train RMSE: 0.072, pool RMSE: 2.915
train RMSE: 0.071, pool RMSE: 2.913
train RMSE: 0.070, pool RMSE: 2.911
train RMSE: 0.070, pool RMSE: 2.910
train RMSE: 0.069, pool RMSE: 2.908
train RMSE: 0.068, pool RMSE: 2.906
train RMSE: 0.068, pool RMSE: 2.904
train RMSE: 0.067, pool RMSE: 2.902
train RMSE: 0.066, pool RMSE: 2.901
train RMSE: 0.066, pool RMSE: 2.899
train RMSE: 0.066, pool RMSE: 2.897
train RMSE: 0.067, pool RMSE: 2.895
train RMSE: 0.072, pool RMSE: 2.894
train RMSE: 0.089, pool RMSE: 2.892
train RMSE: 0.128, pool RMSE: 2.894
train RMSE: 0.175, pool RMSE: 2.891
train RMSE: 0.183, pool RMSE: 2.896
train RMSE: 0.095, pool RMSE: 2.884
train RMSE: 0.094, pool RMSE: 2.883
train RMSE: 0.154, pool RMSE: 2.891
train RMSE: 0.094, pool RMSE: 2.879
train RMSE: 0.085, pool RMSE: 2.878
train RMSE: 0.132, pool RMSE: 2.885
train RMSE: 0.072, pool RMSE: 2.876
train RMSE: 0.094, pool RMSE: 2.874
train RMSE: 0.110, pool RMSE: 2.880
train RMSE: 0.058, pool RMSE: 2.872
train RMSE: 0.101, pool RMSE: 2.869
train RMSE: 0.084, pool RMSE: 2.874
train RMSE: 0.066, pool RMSE: 2.870
train RMSE: 0.096, pool RMSE: 2.865
train RMSE: 0.060, pool RMSE: 2.868
train RMSE: 0.078, pool RMSE: 2.868
train RMSE: 0.078, pool RMSE: 2.861
###Markdown
Next, we want to select 50 new points to label using Deep Batch Active Learning. For this, we wrap our training and pool inputs in the `TensorFeatureData` class and call the method `bmdal.algorithms.select_batch()` method with the batch size (50), our trained model, training and pool data as well as our desired configuration of selection method, mode (`sel_with_train`), base kernel and kernel transformations. Here, we use the LCMD-TP method proposed in our paper. For an extensive overview over the available options, we refer to the docstring in the code for `bmdal.algorithms.select_batch()`. Finally, we plot the selected samples from the pool set.Note: If your training / pool data is so large that it does not fit on the device (CPU/GPU), you may need to write a custom `FeatureData` subclass, where the `get_tensor_impl_()` method acts as a data loader. Please contact the library author in this case.The function `select_batch()` supports the computation of gradient features for layers of type `nn.Linear` by default. In order to support gradients for other types of layers, there are two options:- The first option is that the other layers inherit from `bmdal.layer_features.LayerGradientComputation` and implement the corresponding methods. For example, the class `bmdal.layer_features.LinearLayer` does this. In contrast to `nn.Linear`, it supports the factors $\sigma_w/\sqrt{d_l}$ and $\sigma_b$ from the paper, and it is used in our benchmarking code.- The second option is that a matching of the other layer types to corresponding subclasses of `bmdal.layer_features.LayerGradientComputation` is provided via the `layer_grad_dict` argument of `select_batch()`. For more details, we refer to the documentation of `select_batch()`.
###Code
from bmdal.feature_data import TensorFeatureData
from bmdal.algorithms import select_batch
train_data = TensorFeatureData(x_train)
pool_data = TensorFeatureData(x_pool)
new_idxs, _ = select_batch(batch_size=50, models=[custom_model],
data={'train': train_data, 'pool': pool_data}, y_train=y_train,
selection_method='lcmd', sel_with_train=True,
base_kernel='grad', kernel_transforms=[('rp', [512])])
plt.plot(x_pool[:, 0].numpy(), x_pool[:, 1].numpy(), '.', color='#BBBBBB')
plt.plot(x_train[:, 0].numpy(), x_train[:, 1].numpy(), '.', color='k')
plt.plot(x_pool[new_idxs, 0].numpy(), x_pool[new_idxs, 1].numpy(), '.', color='b')
plt.show()
###Output
_____no_output_____
###Markdown
We can observe that the BMDAL method selected more pool samples from the right of the domain. This is desirable since the target function is $y = e^{x_1}$, which is steeper at the right of the domain. This behavior would not arise from a network-independent base kernel like 'linear', 'nngp' or 'laplace'. Example using our benchmark data and training code In the following, we will give a different example, utilizing a data set from our benchmark and our functions for creating and fitting fully-connected NN models. The code is adapted from `ModelTrainer.__call__()` in `train.py`. First, we need to choose a data set. We choose the `road_network` data set since it contains 2D inputs representing locations in north Denmark, which can be nicely visualized. We choose an initial training set size of `n_train=256`. The argument `al_batch_sizes` is not relevant to us here since we will choose the batch size later manually. We then create a random split of the task with `id=0`, which also serves as a random seed for the split. Finally, we convert the index tensor for the respective parts of the data set to PyTorch tensors.To be able to run the following code, it is required to follow the data download instructions from the README.md file in the repository.
###Code
from data import Task, TaskSplit
import torch
task = Task.get_tabular_tasks(n_train=256, al_batch_sizes=[256] * 16, ds_names=['road_network'])[0]
task_split = TaskSplit(task, id=0)
train_idxs = torch.as_tensor(task_split.train_idxs, dtype=torch.int64)
valid_idxs = torch.as_tensor(task_split.valid_idxs, dtype=torch.int64)
pool_idxs = torch.as_tensor(task_split.pool_idxs, dtype=torch.int64)
test_idxs = torch.as_tensor(task_split.test_idxs, dtype=torch.int64)
data = task_split.data
###Output
_____no_output_____
###Markdown
Now, we visualize the pool points (gray) and training points (black). The shape of north Denmark can clearly be recognized.
###Code
%matplotlib inline
import matplotlib.pyplot as plt
x_train = data.tensors['X'][train_idxs]
x_pool = data.tensors['X'][pool_idxs]
y_pool = data.tensors['y'][pool_idxs]
plt.plot(x_pool[:, 0].numpy(), x_pool[:, 1].numpy(), '.', color='#BBBBBB')
plt.plot(x_train[:, 0].numpy(), x_train[:, 1].numpy(), '.', color='k')
plt.show()
###Output
_____no_output_____
###Markdown
Next, we create a fully-connected NN model with NTK Parametrization using the function `create_tabular_model` and train it using `fit_model`. Both functions contain more optional arguments to modify hyperparameters like the width and number of layers, the learning rate, and so on. We refer to the respective implementations for more details.Unlike typical PyTorch code, the two functions mentioned above work with NNs that are vectorized such that multiple ensemble members are trained separately. The number of ensemble members is given by the `n_models` parameter, which we set to $1$. In order to obtain a non-vectorized NN in the end, which is required for BMDAL, we call the function `get_single_model()`.
###Code
from train import fit_model
from models import create_tabular_model
n_models = 1
n_features = data.tensors['X'].shape[1] # will be 2 in our case
vectorized_model = create_tabular_model(n_models=n_models, n_features=n_features)
fit_model(vectorized_model, data, n_models, train_idxs, valid_idxs)
model = vectorized_model.get_single_model(0)
###Output
................................................................................................................................................................................................................................................................
###Markdown
The model structure, printed below, contains some custom layer types. The `ParallelSequential` and `ParallelLayerWrapper` layers come from the vectorization, but for the purpose of BMDAL `ParallelSequential` could be replaced by `nn.Sequential` and `ParallelLayerWrapper` could be omitted. The class `bmdal.layer_features.LinearLayer` implements the abstract class `bmdal.layer_features.LayerGradientComputation` and is therefore automatically used for computing gradient features. Compared to `nn.Linear`, `LinearLayer` supports weight and bias factors $\sigma_w/\sqrt{d_l}$ and $\sigma_b$ from the Neural Tangent Parameterization as used in the paper.
###Code
print(model)
###Output
ParallelSequential(
(layers): ModuleList(
(0): LinearLayer()
(1): ParallelLayerWrapper(
(module): ReLU()
)
(2): LinearLayer()
(3): ParallelLayerWrapper(
(module): ReLU()
)
(4): LinearLayer()
)
)
###Markdown
Now, we apply BMDAL to select a subset of elements from the pool set as in the example above.
###Code
from bmdal.feature_data import TensorFeatureData
from bmdal.algorithms import select_batch
X = TensorFeatureData(data.tensors['X'])
feature_data = {'train': X[train_idxs],
'pool': X[pool_idxs]}
y_train = data.tensors['y'][train_idxs]
new_idxs, al_stats = select_batch(batch_size=128, models=[model], data=feature_data, y_train=y_train,
selection_method='lcmd', sel_with_train=True,
base_kernel='grad', kernel_transforms=[('rp', [512])])
# move new_idxs from the pool set to the training set
# therefore, we first create a boolean array that is True at the indices in new_idxs and False elsewhere
logical_new_idxs = torch.zeros(pool_idxs.shape[-1], dtype=torch.bool)
logical_new_idxs[new_idxs] = True
# We now append the new indices to the training set
train_idxs = torch.cat([train_idxs, pool_idxs[logical_new_idxs]], dim=-1)
# and remove them from the pool set
pool_idxs = pool_idxs[~logical_new_idxs]
###Output
Added 256 train samples to selection
###Markdown
The `al_stats` object contains some information about the execution of BMDAL. Here, the time for building the features and running the selection was about 19 seconds on a CPU, which is not very much considering that the initial pool set contains 198,720 samples! The entry for `selection_status` may contain a warning message in case that the normal selection failed. Usually, the batch is filled up with random samples in this case. Here, we have no warning.
###Code
print(al_stats)
###Output
{'kernel_time': {'total': 6.66116532900196, 'process': 53.213278265999996}, 'selection_time': {'total': 12.25830632999714, 'process': 48.66809945300001}, 'selection_status': None}
###Markdown
Next, we plot the selected points:
###Code
plt.plot(x_pool[:, 0].numpy(), x_pool[:, 1].numpy(), '.', color='#BBBBBB')
plt.plot(x_train[:, 0].numpy(), x_train[:, 1].numpy(), '.', color='k')
plt.plot(x_pool[new_idxs, 0].numpy(), x_pool[new_idxs, 1].numpy(), '.', color='b')
plt.show()
###Output
_____no_output_____
###Markdown
The selected points concentrate in some regions of the space. We can see that the labels (which correspond to the elevation of the land in this data set) vary more strongly in these regions:
###Code
plt.scatter(x_pool[:, 0].numpy(), x_pool[:, 1].numpy(), c=y_pool[:, 0].numpy())
plt.gray()
plt.show()
###Output
_____no_output_____ |
2016/tutorial_final/38/Recommendation.ipynb | ###Markdown
Recommendation SystemStudent Name: Dacheng Wen (dachengw) IntroductionThis tutorial will introduce a approach to build a simple recommendation system.Accroding to the definition from Wikipedia, recommendation system is a subclass of information filtering system that seek to predict the "rating" or "preference" that a user would give to an item.A daily example is the Amazon's recommendation engine:[](http://netdna.webdesignerdepot.com/uploads/amazon//recommended.jpg)Theoretically, Amazon analyzes users' information (purchase history, browse history and more) to recommend what the users may want to buy. Tutorial contentIn this tutorial, we will build a simple offline recommendation system to recommend movies. This recommendaiton system is not a practical or sophisticated one for commerical use, but working through this tutorial can give a sense about how a recommendation system works.We will cover the following topics in this tutorial:- [Expectation](Expectation)- [Downloading and loading data](Downloading-and-loading-data)- [Item-based collaborative filtering](Item-based-collaborative-filtering)- [Recommendation for new users](Recommendation-for-new-users)- [Summary](Summary) Expectation The recommendation system we will build can:1. Take the existing rating data as input.2. Recommend at most k (k = 5 for this tutorial) movies which haven't rated by the user for each user.
###Code
k = 5
###Output
_____no_output_____
###Markdown
Downloading and loading data We are going to use the open dataset provided by MovieLens (https://movielens.org/).The dataset can be downloaded from http://grouplens.org/datasets/movielens/. For this tutorial, we will use the u.data file from smallest dataset (100K records)According to the ReadMe (http://files.grouplens.org/datasets/movielens/ml-100k/README). This files contains ratings by 943 users on 1682 items. Each user has rated at least 20 movies. Users and items are numbered consecutively from 1. The data is randomly ordered. This is a tab separated list of: user id | item id | rating | timestamp. Note: 1. An item means an movie, so the item id is the movie id. We consider item and movie interchangable for this tutorial.2. For the simple recommendaiton system we are going to build, we only use the first three fields, user id, item id and rating. That is to say, we ignore the timestamp. Timestamp is indeed a valuable information, but we ignore it in this tutorial for simplicity.3. The range of rating is 1-5, and 5 means the best.Althought not necessry, it would be nice to be able to get the movie title by its id. Therefore we need to download the u.item file. The first two fields of every record in this file are movie id | movie title | ...Let's download these files:
###Code
import requests
def download_file(link_address, filename):
response = requests.get(link_address, stream=True)
if (response.status_code == requests.codes.ok) :
with open(filename, 'wb') as handle:
for block in response.iter_content(1024):
handle.write(block)
print "Successfully downloaded " + filename
return True
else:
print "Sorry, " + filename + " download failed"
return False
# download user - movie ratings
download_file('http://files.grouplens.org/datasets/movielens/ml-100k/u.data', 'u.data')
# download movie id - movie map
download_file('http://files.grouplens.org/datasets/movielens/ml-100k/u.item', 'u.item')
###Output
Successfully downloaded u.data
Successfully downloaded u.item
###Markdown
Then read the files to memory:
###Code
# read u.data
user_rating_raw = []
with open('u.data') as f:
for line in f:
fields = line.split('\t')
user_rating_raw.append([int(fields[0]),
int(fields[1]),
float(fields[2]),
int(fields[3])])
print "Read u.data, got " + str(len(user_rating_raw)) + " rating records."
print
print "The first 5 records are:"
for row_index in range(5):
print user_rating_raw[row_index]
print
# read u.item
movie_title_map = {};
with open('u.item') as f:
for line in f:
fields = line.split('|')
movie_title_map[int(fields[0])] = fields[1]
print "Read id-title map for " + str(len(movie_title_map)) + " movies."
print
print "The first 5 movies in the map are:"
for movie_id in range(1, 6):
print (movie_id, movie_title_map[movie_id])
print
###Output
Read id-title map for 1682 movies.
The first 5 movies in the map are:
(1, 'Toy Story (1995)')
(2, 'GoldenEye (1995)')
(3, 'Four Rooms (1995)')
(4, 'Get Shorty (1995)')
(5, 'Copycat (1995)')
###Markdown
Item based collaborative filtering Among the multiple recommendation alogrithms, item-based collabrative filtering is one of most popular alogorithm. The recommendation alogrithm used by Amazon and other websites are based on item-based collabrative filtering (https://en.wikipedia.org/wiki/Item-item_collaborative_filtering). * We are going to implement a simple item-based collabrative filtering on thie tutorial.The idea of item-based collabrative filtering is to find similar items, and then recommend items based on the users' history related item. Let's say we found that _Star Wars (1977)_ is similar to _Return of the Jedi (1983)_, we assumes that the users who like _Star Wars (1977)_ are going to enjoy _Return of the Jedi (1983)_ too. Therefore, if we find that there is a user who watched (rated) _Star Wars (1997)_ but haven't watched (rated) _Return of the Jedi (1983)_, we will recommend _Return of the Jedi (1983)_ to the user.For our MovieLens scenario, we need to:1. Compute the similarity between movies based on the ratings2. For each user, recommend movies which are similar to the movies rated by that user, and the recommended movies should not contains those movies which have already rated by that user.Reference: * Linden, G., Smith, B., & York, J. (2003). Amazon. com recommendations: Item-to-item collaborative filtering. IEEE Internet computing, 7(1), 76-80. Before computing the similarity between movies, let's convert the raw data, user_rating_record, into a matrix (numpy 2d array), movie_user_mat.Each element in the movie_user_mat stores a rating. movie_user_mat is of size num_movie by num_user. num_movie\[i\]\[j\] means the j-th user's rating for i-th movie. Therefore, each row stores the ratings for a movie from all users, and each column stores a user's rating.Noted that the the range of the rating is 1-5, so we can use 0 to indicate that a user haven't rated a movie.
###Code
import numpy as np
# number of movies and number of users,
# these two numbers are from ReadMe (http://files.grouplens.org/datasets/movielens/ml-100k/README)
num_user = 943
num_movie = 1682
movie_user_mat = np.zeros((num_movie, num_user));
for user_rating_record in user_rating_raw:
# minus 1 to convert the index (id) to 0 based
user_index = user_rating_record[0] - 1
movie_index = user_rating_record[1] - 1
rating = user_rating_record[2]
movie_user_mat[movie_index][user_index] = rating
###Output
_____no_output_____
###Markdown
Now that we have the movie-user matrix, we can perform the first step, computing the similarity between movies. We will use cosine similarity that we learned (https://en.wikipedia.org/wiki/Cosine_similarity). Because each row represents the ratings for a movie from all users, we consider treat rows as the input vectors. Noted that the similarity matrix, movie_similarity_mat, is a sysemtric matrix (movie_similarity_mat\[i\]\[j\] = movie_similarity_mat\[j\]\[i\]).
###Code
import scipy.spatial as scp
movie_similarity_mat = np.zeros((num_movie, num_movie))
for i in range(num_movie):
movie_i_rating = movie_user_mat[i]
for j in range(i, num_movie):
movie_j_rating = movie_user_mat[j]
cos_similarity = 1.0 - scp.distance.cosine(movie_i_rating, movie_j_rating)
movie_similarity_mat[i][j] = cos_similarity
movie_similarity_mat[j][i] = cos_similarity
###Output
_____no_output_____
###Markdown
Finally, we can compute the what movies should be recommended to the users.In order to achieve this goal, for each user, we need to compute his / her interest in each movie. We represent the interests using a coefficient.The coefficient that indicates j-th user's interest in i-th movie (a large the coefficient means the user is highly interested in that movie)$$ coefficient[i][j]= \sum_{k=1}^n similarity[k-1][i] * rating[k-1][j]$$Where n is the number of movies, similarity\[k-1\]\[i\] is movie_similarity_mat\[k-1\]\[i\] (similarity between k-1 th movie and i-th movie) and rating\[k-1\]\[j\] is movie_user_mat\[k-1\]\[j\] (j-th user's rating on k-1 th movie)Noted that this equation is equivalent to$$ coefficient[i][j]= \sum_{k=1}^n similarity[i][k-1] * rating[k-1][j]$$because movie_similarity_mat is symmetric. It may looks cofusing, so let's take a small dataset (stored in test_rat) as an example.
###Code
test_rat = np.asarray([[0,1,5],
[1,0,5],
[5,0,0],
[0,5,3]]);
test_simi = np.zeros((4, 4))
for i in range(4):
movie_i_rating = test_rat[i]
for j in range(i, 4):
movie_j_rating = test_rat[j]
cos_similarity = 1.0 - scp.distance.cosine(movie_i_rating, movie_j_rating)
test_simi[i][j] = cos_similarity
test_simi[j][i] = cos_similarity
print "movie-rating:"
print test_rat
print
print "similarities:"
print test_simi
###Output
movie-rating:
[[0 1 5]
[1 0 5]
[5 0 0]
[0 5 3]]
similarities:
[[ 1. 0.96153846 0. 0.67267279]
[ 0.96153846 1. 0.19611614 0.5045046 ]
[ 0. 0.19611614 1. 0. ]
[ 0.67267279 0.5045046 0. 1. ]]
###Markdown
For the first user (0-th user), his / her interst in the first movie (0-th movie) should be:$$ coefficent[0][0] = rating[0][0] * similarity[0][0] + rating[1][0] * similarity[1][0] + rating[2][0] * similarity[2][0] + rating[3][0] * similarity[3][0] $$$$ coefficent[0][0] = 0 * 1 + 1 * 0.96153846 + 5 * 0 + 0 * 0.67267279 = 0.96153846 $$his / her interst in the last movie (3-th movie) should be:$$ coefficent[3][0] = 0 * 0.67267279 + 1 * 0.5045046 + 5 * 0 + 0 * 1 = 0.5045046 $$because 0.96153846 > 0.5045046, we should recommend the first movie instead of the last movie if we can only recommend one movie.Noted that the equation$$ coefficient[i][j]= \sum_{k=1}^n similarity[i][k-1] * rating[k-1][j]$$is simply a matrix dot operation:$$coefficient = similarity.dot(rating)$$ The last detail we need to take care of is that we shouldn't recommend a movie that have been rated. If a user already rated the movie _Star Wars (1977)_, we should not recomment _Star Wars (1977)_ to this user. We store the coeffiecients in recommendation_coefficient_mat, and store the id of the recommended movies for each user in a dictionary, recommendation_per_user.
###Code
import heapq
# find n elements with largest values from a dictonary
# http://www.pataprogramming.com/2010/03/python-dict-n-largest/
def dict_nlargest(d,n):
return heapq.nlargest(n,
d,
key = lambda t: d[t])
# num_movie by num_user = (num_movie by num_movie) * (num_movie by num_user)
recommendation_coefficient_mat = movie_similarity_mat.dot(movie_user_mat)
recommendation_per_user = {}
for user_index in range(num_user):
recommendation_coefficient_vector = recommendation_coefficient_mat.T[user_index]
# remove the movies that already been rated
unrated_movie = (movie_user_mat.T[user_index] == 0)
recommendation_coefficient_vector *= unrated_movie
recommendation_coefficient_dict = {movie_id:coefficient
for movie_id, coefficient
in enumerate(recommendation_coefficient_vector)}
recommendation_per_user[user_index] = dict_nlargest(recommendation_coefficient_dict, k)
###Output
_____no_output_____
###Markdown
So the recommended movie for the first user is:
###Code
print "(movie id, title)"
for movie_id in recommendation_per_user[0]:
# movie_id + 1 to convert it backed to 1-based instead of 0-based
print (movie_id, movie_title_map[movie_id + 1])
print
###Output
(movie id, title)
(422, 'E.T. the Extra-Terrestrial (1982)')
(654, 'Stand by Me (1986)')
(567, 'Speed (1994)')
(402, 'Batman (1989)')
(384, 'True Lies (1994)')
###Markdown
Recommendation for new users We mentioned that we can use users's information to recommend movies, but what if we have a new user that we have no information about? The coefficients for that user will be all zeros, it is not reasonable to find the top-5 elements in an array of zeros.What movies should we recommend? An option is to recommend the movies which got rated by the most number of the users. This is similiar to recommending "best seller" on Amazon.com to new users.
###Code
import collections
movie_rated_counter = collections.Counter([rating_record[1]
for rating_record in user_rating_raw])
most_rated_movies = movie_rated_counter.most_common(k)
print "The most rated 5 movies are:\n"
for movie_id, rated_count in most_rated_movies:
print (movie_id, movie_title_map[movie_id], rated_count)
print
###Output
The most rated 5 movies are:
(50, 'Star Wars (1977)', 583)
(258, 'Contact (1997)', 509)
(100, 'Fargo (1996)', 508)
(181, 'Return of the Jedi (1983)', 507)
(294, 'Liar Liar (1997)', 485)
|
Regression/Copy of data_preprocessing_tools.ipynb | ###Markdown
Importing the libraries
###Code
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
###Output
_____no_output_____
###Markdown
Importing the dataset
###Code
dataset = pd.read_csv('Data.csv')
x = dataset.iloc[ : , : -1].values
y = dataset.iloc[ : , -1].values
print(x)
print(y)
###Output
['No' 'Yes' 'No' 'No' 'Yes' 'Yes' 'No' 'Yes' 'No' 'Yes']
###Markdown
Taking care of missing data
###Code
from sklearn.impute import SimpleImputer
imputer = SimpleImputer(missing_values = np.nan, strategy = 'mean')
imputer.fit(x[:,1:3])
x[:,1:3] = imputer.transform(x[:,1:3])
print(x)
###Output
[['France' 44.0 72000.0]
['Spain' 27.0 48000.0]
['Germany' 30.0 54000.0]
['Spain' 38.0 61000.0]
['Germany' 40.0 63777.77777777778]
['France' 35.0 58000.0]
['Spain' 38.77777777777778 52000.0]
['France' 48.0 79000.0]
['Germany' 50.0 83000.0]
['France' 37.0 67000.0]]
###Markdown
Encoding categorical data Encoding the Independent Variable
###Code
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
ct = ColumnTransformer(transformers=[('encoder',OneHotEncoder(),[0])],remainder = 'passthrough')
x = np.array(ct.fit_transform(x))
print(x)
###Output
[[1.0 0.0 0.0 44.0 72000.0]
[0.0 0.0 1.0 27.0 48000.0]
[0.0 1.0 0.0 30.0 54000.0]
[0.0 0.0 1.0 38.0 61000.0]
[0.0 1.0 0.0 40.0 63777.77777777778]
[1.0 0.0 0.0 35.0 58000.0]
[0.0 0.0 1.0 38.77777777777778 52000.0]
[1.0 0.0 0.0 48.0 79000.0]
[0.0 1.0 0.0 50.0 83000.0]
[1.0 0.0 0.0 37.0 67000.0]]
###Markdown
Encoding the Dependent Variable
###Code
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
y = le.fit_transform(y)
print(y)
###Output
[0 1 0 0 1 1 0 1 0 1]
###Markdown
Splitting the dataset into the Training set and Test set
###Code
from sklearn.model_selection import train_test_split
x_tran,x_test,y_tran,y_test = train_test_split(x,y,test_size = 0.2,random_state = 1)
print(x_tran)
print(y_tran)
print(x_test)
print(y_test)
###Output
[0 1]
###Markdown
Feature Scaling
###Code
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
x_tran[:, 3:] = sc.fit_transform(x_tran[:, 3:])
x_test[:,3:] = sc.transform(x_test[:,3:])
print(x_tran)
print(x_test)
###Output
[[0.0 1.0 0.0 -1.4661817944830124 -0.9069571034860727]
[1.0 0.0 0.0 -0.44973664397484414 0.2056403393225306]]
|
PythonDataScienceHandbook/05.04-Feature-Engineering.ipynb | ###Markdown
*This notebook contains an excerpt from the [Python Data Science Handbook](http://shop.oreilly.com/product/0636920034919.do) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/PythonDataScienceHandbook).**The text is released under the [CC-BY-NC-ND license](https://creativecommons.org/licenses/by-nc-nd/3.0/us/legalcode), and code is released under the [MIT license](https://opensource.org/licenses/MIT). If you find this content useful, please consider supporting the work by [buying the book](http://shop.oreilly.com/product/0636920034919.do)!* Feature Engineering The previous sections outline the fundamental ideas of machine learning, but all of the examples assume that you have numerical data in a tidy, ``[n_samples, n_features]`` format.In the real world, data rarely comes in such a form.With this in mind, one of the more important steps in using machine learning in practice is *feature engineering*: that is, taking whatever information you have about your problem and turning it into numbers that you can use to build your feature matrix.In this section, we will cover a few common examples of feature engineering tasks: features for representing *categorical data*, features for representing *text*, and features for representing *images*.Additionally, we will discuss *derived features* for increasing model complexity and *imputation* of missing data.Often this process is known as *vectorization*, as it involves converting arbitrary data into well-behaved vectors. Categorical FeaturesOne common type of non-numerical data is *categorical* data.For example, imagine you are exploring some data on housing prices, and along with numerical features like "price" and "rooms", you also have "neighborhood" information.For example, your data might look something like this:
###Code
data = [
{'price': 850000, 'rooms': 4, 'neighborhood': 'Queen Anne'},
{'price': 700000, 'rooms': 3, 'neighborhood': 'Fremont'},
{'price': 650000, 'rooms': 3, 'neighborhood': 'Wallingford'},
{'price': 600000, 'rooms': 2, 'neighborhood': 'Fremont'}
]
###Output
_____no_output_____
###Markdown
You might be tempted to encode this data with a straightforward numerical mapping:
###Code
{'Queen Anne': 1, 'Fremont': 2, 'Wallingford': 3};
###Output
_____no_output_____
###Markdown
It turns out that this is not generally a useful approach in Scikit-Learn: the package's models make the fundamental assumption that numerical features reflect algebraic quantities.Thus such a mapping would imply, for example, that *Queen Anne < Fremont < Wallingford*, or even that *Wallingford - Queen Anne = Fremont*, which (niche demographic jokes aside) does not make much sense.In this case, one proven technique is to use *one-hot encoding*, which effectively creates extra columns indicating the presence or absence of a category with a value of 1 or 0, respectively.When your data comes as a list of dictionaries, Scikit-Learn's ``DictVectorizer`` will do this for you:
###Code
from sklearn.feature_extraction import DictVectorizer
vec = DictVectorizer(sparse=False, dtype=int)
vec.fit_transform(data)
###Output
_____no_output_____
###Markdown
Notice that the 'neighborhood' column has been expanded into three separate columns, representing the three neighborhood labels, and that each row has a 1 in the column associated with its neighborhood.With these categorical features thus encoded, you can proceed as normal with fitting a Scikit-Learn model.To see the meaning of each column, you can inspect the feature names:
###Code
vec.get_feature_names()
###Output
_____no_output_____
###Markdown
There is one clear disadvantage of this approach: if your category has many possible values, this can *greatly* increase the size of your dataset.However, because the encoded data contains mostly zeros, a sparse output can be a very efficient solution:
###Code
vec = DictVectorizer(sparse=True, dtype=int)
vec.fit_transform(data)
###Output
_____no_output_____
###Markdown
Many (though not yet all) of the Scikit-Learn estimators accept such sparse inputs when fitting and evaluating models. ``sklearn.preprocessing.OneHotEncoder`` and ``sklearn.feature_extraction.FeatureHasher`` are two additional tools that Scikit-Learn includes to support this type of encoding. Text FeaturesAnother common need in feature engineering is to convert text to a set of representative numerical values.For example, most automatic mining of social media data relies on some form of encoding the text as numbers.One of the simplest methods of encoding data is by *word counts*: you take each snippet of text, count the occurrences of each word within it, and put the results in a table.For example, consider the following set of three phrases:
###Code
sample = ['problem of evil',
'evil queen',
'horizon problem']
###Output
_____no_output_____
###Markdown
For a vectorization of this data based on word count, we could construct a column representing the word "problem," the word "evil," the word "horizon," and so on.While doing this by hand would be possible, the tedium can be avoided by using Scikit-Learn's ``CountVectorizer``:
###Code
from sklearn.feature_extraction.text import CountVectorizer
vec = CountVectorizer()
X = vec.fit_transform(sample)
X
###Output
_____no_output_____
###Markdown
The result is a sparse matrix recording the number of times each word appears; it is easier to inspect if we convert this to a ``DataFrame`` with labeled columns:
###Code
import pandas as pd
pd.DataFrame(X.toarray(), columns=vec.get_feature_names())
###Output
_____no_output_____
###Markdown
There are some issues with this approach, however: the raw word counts lead to features which put too much weight on words that appear very frequently, and this can be sub-optimal in some classification algorithms.One approach to fix this is known as *term frequency-inverse document frequency* (*TF–IDF*) which weights the word counts by a measure of how often they appear in the documents.The syntax for computing these features is similar to the previous example:
###Code
from sklearn.feature_extraction.text import TfidfVectorizer
vec = TfidfVectorizer()
X = vec.fit_transform(sample)
pd.DataFrame(X.toarray(), columns=vec.get_feature_names())
###Output
_____no_output_____
###Markdown
For an example of using TF-IDF in a classification problem, see [In Depth: Naive Bayes Classification](05.05-Naive-Bayes.ipynb). Image FeaturesAnother common need is to suitably encode *images* for machine learning analysis.The simplest approach is what we used for the digits data in [Introducing Scikit-Learn](05.02-Introducing-Scikit-Learn.ipynb): simply using the pixel values themselves.But depending on the application, such approaches may not be optimal.A comprehensive summary of feature extraction techniques for images is well beyond the scope of this section, but you can find excellent implementations of many of the standard approaches in the [Scikit-Image project](http://scikit-image.org).For one example of using Scikit-Learn and Scikit-Image together, see [Feature Engineering: Working with Images](05.14-Image-Features.ipynb). Derived FeaturesAnother useful type of feature is one that is mathematically derived from some input features.We saw an example of this in [Hyperparameters and Model Validation](05.03-Hyperparameters-and-Model-Validation.ipynb) when we constructed *polynomial features* from our input data.We saw that we could convert a linear regression into a polynomial regression not by changing the model, but by transforming the input!This is sometimes known as *basis function regression*, and is explored further in [In Depth: Linear Regression](05.06-Linear-Regression.ipynb).For example, this data clearly cannot be well described by a straight line:
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
x = np.array([1, 2, 3, 4, 5])
y = np.array([4, 2, 1, 3, 7])
plt.scatter(x, y);
###Output
_____no_output_____
###Markdown
Still, we can fit a line to the data using ``LinearRegression`` and get the optimal result:
###Code
from sklearn.linear_model import LinearRegression
X = x[:, np.newaxis]
model = LinearRegression().fit(X, y)
yfit = model.predict(X)
plt.scatter(x, y)
plt.plot(x, yfit);
###Output
_____no_output_____
###Markdown
It's clear that we need a more sophisticated model to describe the relationship between $x$ and $y$.One approach to this is to transform the data, adding extra columns of features to drive more flexibility in the model.For example, we can add polynomial features to the data this way:
###Code
from sklearn.preprocessing import PolynomialFeatures
poly = PolynomialFeatures(degree=3, include_bias=False)
X2 = poly.fit_transform(X)
print(X2)
###Output
[[ 1. 1. 1.]
[ 2. 4. 8.]
[ 3. 9. 27.]
[ 4. 16. 64.]
[ 5. 25. 125.]]
###Markdown
The derived feature matrix has one column representing $x$, and a second column representing $x^2$, and a third column representing $x^3$.Computing a linear regression on this expanded input gives a much closer fit to our data:
###Code
model = LinearRegression().fit(X2, y)
yfit = model.predict(X2)
plt.scatter(x, y)
plt.plot(x, yfit);
###Output
_____no_output_____
###Markdown
This idea of improving a model not by changing the model, but by transforming the inputs, is fundamental to many of the more powerful machine learning methods.We explore this idea further in [In Depth: Linear Regression](05.06-Linear-Regression.ipynb) in the context of *basis function regression*.More generally, this is one motivational path to the powerful set of techniques known as *kernel methods*, which we will explore in [In-Depth: Support Vector Machines](05.07-Support-Vector-Machines.ipynb). Imputation of Missing DataAnother common need in feature engineering is handling of missing data.We discussed the handling of missing data in ``DataFrame``s in [Handling Missing Data](03.04-Missing-Values.ipynb), and saw that often the ``NaN`` value is used to mark missing values.For example, we might have a dataset that looks like this:
###Code
from numpy import nan
X = np.array([[ nan, 0, 3 ],
[ 3, 7, 9 ],
[ 3, 5, 2 ],
[ 4, nan, 6 ],
[ 8, 8, 1 ]])
y = np.array([14, 16, -1, 8, -5])
###Output
_____no_output_____
###Markdown
When applying a typical machine learning model to such data, we will need to first replace such missing data with some appropriate fill value.This is known as *imputation* of missing values, and strategies range from simple (e.g., replacing missing values with the mean of the column) to sophisticated (e.g., using matrix completion or a robust model to handle such data).The sophisticated approaches tend to be very application-specific, and we won't dive into them here.For a baseline imputation approach, using the mean, median, or most frequent value, Scikit-Learn provides the ``Imputer`` class:
###Code
from sklearn.preprocessing import Imputer
imp = Imputer(strategy='mean')
X2 = imp.fit_transform(X)
X2
###Output
_____no_output_____
###Markdown
We see that in the resulting data, the two missing values have been replaced with the mean of the remaining values in the column. This imputed data can then be fed directly into, for example, a ``LinearRegression`` estimator:
###Code
model = LinearRegression().fit(X2, y)
model.predict(X2)
###Output
_____no_output_____
###Markdown
Feature PipelinesWith any of the preceding examples, it can quickly become tedious to do the transformations by hand, especially if you wish to string together multiple steps.For example, we might want a processing pipeline that looks something like this:1. Impute missing values using the mean2. Transform features to quadratic3. Fit a linear regressionTo streamline this type of processing pipeline, Scikit-Learn provides a ``Pipeline`` object, which can be used as follows:
###Code
from sklearn.pipeline import make_pipeline
model = make_pipeline(Imputer(strategy='mean'),
PolynomialFeatures(degree=2),
LinearRegression())
###Output
_____no_output_____
###Markdown
This pipeline looks and acts like a standard Scikit-Learn object, and will apply all the specified steps to any input data.
###Code
model.fit(X, y) # X with missing values, from above
print(y)
print(model.predict(X))
###Output
[14 16 -1 8 -5]
[14. 16. -1. 8. -5.]
|
fossen_depth_roll_pitch_controllers/.ipynb_checkpoints/bluerov_model_v1.7-checkpoint.ipynb | ###Markdown
**Depth, Roll and Pitch Controller** Dynamic model based on BlueROV2 Heavy using the Fossen's Model **Control Strategy** **Code**
###Code
import numpy as np
import yaml
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import time
#Global variables
m = 11.5 #kg
W = 112.8 #N
B = 114.8 #N 114.8
Zg = 0.02 #m
Ix = 0.16 #kg*m^2
Iy = 0.16 #kg*m^2
Iz = 0.16 #kg*m^2
##Added Mass
X_du = -5.5 #kg
Y_dv = -12.7 #kg
Z_dw = -14.57 #kg
K_dp = -0.12 #kg*m^2/rad
M_dq = -0.12 #kg*m^2/rad
N_dr = -0.12 #kg*m^2/rad
##Linear Damping
Xu = -4.03 #Ns/m
Yv = -6.22 #Ns/m
Zw = -5.18 #Ns/m previous value -> 5.18
Kp = -0.07 #Ns/rad
Mq = -0.07 #Ns/rad
Nr = -0.07 #Ns/rad
##Quadratic Damping
Xuu = -18.18 #Ns^2/m^2
Yvv = -21.66 #Ns^2/m^2
Zww = -36.99 #Ns^2/m^2 previous value -> 36.99
Kpp = -1.55 #Ns^2/rad^2
Mqq = -1.55 #Ns^2/rad^2
Nrr = -1.55 #Ns^2/rad^2
MAX_Z_FORCE = 160.0
MAX_ROLL_FORCE = 44.48
MAX_PITCH_FORCE = 28.8
class Controller:
"""PID controller."""
def __init__(self, Kp, Ki, Kd, origin_time=None):
if origin_time is None:
origin_time = 0
# Gains for each term
self.Kp = Kp
self.Ki = Ki
self.Kd = Kd
self.windup_guard = 20
# Corrections (outputs)
self.Cp = 0.0
self.Ci = 0.0
self.Cd = 0.0
self.previous_time = origin_time
self.sp_previous_time = origin_time
self.previous_error = 0.0
self.previous_altitud_error = 0.0
def setWindup(self, windup):
self.windup_guard = windup
def Update(self, error, current_time):
"""Update PID controller."""
dt = current_time - self.previous_time
if dt <= 0.0:
return 0
de = error - self.previous_error
self.Cp = error
self.Ci += error * dt
self.Cd = de / dt
##Anti-windup
if (self.Ci < -self.windup_guard):
self.Ci = -self.windup_guard
elif (self.Ci > self.windup_guard):
self.Ci = self.windup_guard
#Update previous values
self.previous_time = current_time
self.previous_error = error
return (
(self.Kp * self.Cp) # proportional term
+ (self.Ki * self.Ci) # integral term
+ (self.Kd * self.Cd) # derivative term
)
def derivative_error(self, current_altitud_error, dt) :
""" Computes the angular position error from altitude error """
de = current_altitud_error - self.previous_altitud_error
self.previous_altitud_error = de
return (de/dt)
def altitud_controller(self, error, current_time, x_, v_):
desired_acc = 0.001;
##position
phi = x_[3]
theta = x_[4]
##velocity
w = v_[2]
c1 = np.cos(phi)
c2 = np.cos(theta)
control_signal = self.Update(error, current_time)
temp = desired_acc - control_signal*(m - Z_dw)
tau_linear = temp - ((Zw + Zww*np.absolute(w))*w + (W-B)*c2*c1)
if tau_linear > MAX_Z_FORCE:
tau_linear = MAX_Z_FORCE
if tau_linear < -MAX_Z_FORCE:
tau_linear = -MAX_Z_FORCE
return tau_linear
def roll_controller(self, error, current_time, x_, v_): #p_dot_dot
desired_acc = 0.001;
##position
phi = x_[3]
theta = x_[4]
c2 = np.cos(theta)
s1 = np.sin(phi)
##velocity
p = v_[3]
control_signal = self.Update(error, current_time)
temp = desired_acc - control_signal*(Ix - K_dp)
tau_linear = temp - ((Kp + Kpp*np.absolute(p))*p - Zg*W*c2*s1)
if tau_linear > MAX_ROLL_FORCE:
tau_linear = MAX_ROLL_FORCE
if tau_linear < -MAX_ROLL_FORCE:
tau_linear = -MAX_ROLL_FORCE
return tau_linear
def pitch_controller(self, error, current_time, x_, v_): #p_dot_dot
desired_acc = 0.001;
##velocity
q = v_[4]
##position
theta = x_[4]
s2 = np.sin(theta)
control_signal = self.Update(error, current_time)
temp = desired_acc - control_signal*(Ix - K_dp)
tau_linear = temp - ((Mq + Mqq*np.absolute(q))*q - Zg*W*s2)
if tau_linear > MAX_PITCH_FORCE:
tau_linear = MAX_PITCH_FORCE
if tau_linear < -MAX_PITCH_FORCE:
tau_linear = -MAX_PITCH_FORCE
return tau_linear
class Bluerov:
"""Bluerov Class"""
def __init__(self, position, velocity, acceleration, tau):
self.x_ = position
self.v_ = velocity
self.acc = acceleration
self.tau = tau
self.p_dot_prev = np.zeros(9)
self.p_dot_prev = np.array( self.p_dot_prev).reshape(3,3)
self.model(self.x_, self.v_, self.acc, self.tau)
self.T = np.array([[0.707, 0.707, -0.707, -0.707, 0, 0, 0, 0],
[-0.707, 0.707, -0.707, 0.707, 0, 0, 0, 0],
[0, 0, 0, 0, -1, 1, 1, -1],
[0.06, -0.06, 0.06, -0.06, -0.218, -0.218, 0.218, 0.218],
[0.06, 0.06, -0.06, -0.06, 0.120, -0.120, 0.120, -0.120],
[-0.1888, 0.1888, 0.1888, -0.1888, 0, 0, 0, 0 ]])
T_trans = self.T.transpose()
temp = np.dot(self.T, T_trans)
temp = np.linalg.inv(temp)
self.T_plus = np.dot(T_trans, temp)
K = np.array([40, 40, 40, 40, 40, 40, 40, 40])
self.K_ = np.diag(K)
self.K_inv = np.linalg.inv(self.K_)
def model(self, x_, v_, acc, tau) :
##velocity
u = v_[0]
v = v_[1]
w = v_[2]
p = v_[3]
q = v_[4]
r = v_[5]
##position
#x = x_(1); y = x_(2); z = x_(3);
phi = x_[3]
theta = x_[4]
psi = x_[5]
##Force/Torque
X = tau[0]
Y = tau[1]
Z = tau[2]
K = tau[3]
M = tau[4]
N = tau[5]
u_dot = acc[0]
v_dot = acc[1]
w_dot = acc[2]
p_dot = acc[3]
q_dot = acc[4]
r_dot = acc[5]
c1 = np.cos(phi)
c2 = np.cos(theta)
c3 = np.cos(psi)
s1 = np.sin(phi)
s2 = np.sin(theta)
s3 = np.sin(psi)
t2 = np.tan(theta)
##Resulting acceleration
u_dot = ((Xu + Xuu*np.absolute(u))*u -(Z_dw + m)*q*w -(W-B)*s2 - m*Zg*q_dot + X)/(m-X_du)
v_dot = ((Yv + Yvv*np.absolute(v))*v + (Z_dw + m)*p*w + X_du*u*r + (W-B)*c2*s1 + m*Zg*p_dot + Y)/(m - Y_dv)
w_dot = ((Zw + Zww*np.absolute(w))*w - (X_du - m)*q*u + (Y_dv - m)*p*v + (W-B)*c2*c1 + Z)/(m - Z_dw)
p_dot = ((Kp + Kpp*np.absolute(p))*p - (Y_dv - Z_dw)*w*v - (M_dq - N_dr)*r*q -(Iz - Iy)*r*q - Zg*W*c2*s1 + m*Zg*v_dot + K)/(Ix - K_dp)
q_dot = ((Mq + Mqq*np.absolute(q))*q - (Z_dw - X_du)*u*w - (N_dr - K_dp)*p*r - (Ix-Iz)*p*r - Zg*W*s2 - m*Zg*u_dot + M)/(Iy-M_dq) #Iy-M_dq = 0.28
r_dot = ((Nr + Nrr*np.absolute(r))*r - (X_du - Y_dv)*u*v - (K_dp - M_dq)*p*q - (Iy - Ix)*p*q + N)/(Iz - N_dr)
acc_vec = np.array([u_dot, v_dot, w_dot, p_dot, q_dot, r_dot])
return acc_vec
def thruster_system(self, u):
F = np.dot(self.K_, u)
tau = np.dot(self.T, F)
#print(tau)
return tau
def control_allocation(self, tau):
K_inv = np.linalg.inv(self.K_)
temp = np.dot(self.T_plus, tau)
u = np.dot(self.K_inv, temp)
return u
def kinematics(self, v, p) :
""" Given the current velocity and the previous position computes the p_dot """
roll = p[3]
pitch = p[4]
yaw = p[5]
rec = [np.cos(yaw)*np.cos(pitch), -np.sin(yaw)*np.cos(roll)+np.cos(yaw)*np.sin(pitch)*np.sin(roll), np.sin(yaw)*np.sin(roll)+np.cos(yaw)*np.cos(roll)*np.sin(pitch),
np.sin(yaw)*np.cos(pitch), np.cos(yaw)*np.cos(roll)+np.sin(roll)*np.sin(pitch)*np.sin(yaw), -np.cos(yaw)*np.sin(roll)+np.sin(pitch)*np.sin(yaw)*np.cos(roll),
-np.sin(pitch), np.cos(pitch)*np.sin(roll), np.cos(pitch)*np.cos(roll)]
rec = np.array(rec).reshape(3,3)
to = [1.0, np.sin(roll)*np.tan(pitch), np.cos(roll)*np.tan(pitch),
0.0, np.cos(roll), -np.sin(roll),
0.0, np.sin(roll)/np.cos(pitch), np.cos(roll)/np.cos(pitch)]
to = np.array(to).reshape(3,3)
p_dot = np.zeros(6)
p_dot[0:3] = np.dot(rec, v[0:3])
p_dot[3:6] = np.dot(to, v[3:6])
return p_dot
def inv_kinematics(self, p_dot, p, a_n, dt) :
""" Given the current velocity and the previous position computes the p_dot """
roll = p[3]
pitch = p[4]
yaw = p[5]
to1 = [1.0, np.sin(roll)*np.tan(pitch), np.cos(roll)*np.tan(pitch),
0.0, np.cos(roll), -np.sin(roll),
0.0, np.sin(roll)/np.cos(pitch), np.cos(roll)/np.cos(pitch)]
to1 = np.array(to1).reshape(3,3)
to_dot_dot = (to1 - self.p_dot_prev)/dt
self.p_dot_prev = to1
to = [1.0, 0.0, -np.sin(pitch),
0.0, np.cos(roll), np.sin(roll)*np.cos(pitch),
0.0, -np.sin(roll), np.cos(roll)*np.cos(pitch)]
to = np.array(to).reshape(3,3)
v = np.zeros(3)
a_b = np.zeros(3)
v[0:3] = np.dot(to_dot_dot, p_dot[3:6])
a_b[0:3] = np.dot(to, (a_n - v[0:3]))
return a_b
# Sensor simulation
def position_sensing(self, p_real):
pos_uncert_amplitude = 0.5 # 10m max error
orient_uncert_amplitude = 1.0 # 5degr max error
p_noisy = np.zeros_like(p_real)
p_noisy[0:3] = p_real[0:3] + np.array([np.random.rand()*pos_uncert_amplitude,
np.random.rand()*pos_uncert_amplitude,
np.random.rand()*pos_uncert_amplitude])
p_noisy[3:7] = p_real[3:7] + np.array([np.random.rand()*np.pi/180.0*orient_uncert_amplitude,
np.random.rand()*np.pi/180.0*orient_uncert_amplitude,
np.random.rand()*np.pi/180.0*orient_uncert_amplitude])
return p_noisy
def velocity_sensing(self, v_real):
lin_vel_uncert_amplitude = 0.05 # 0.1 m/s max error
ang_vel_uncert_amplitude = 0.5 # 1 deg/s max error
v_noisy = np.zeros_like(v_real)
v_noisy[0:3] = v_real[0:3] + np.array([np.random.rand()*lin_vel_uncert_amplitude,
np.random.rand()*lin_vel_uncert_amplitude,
np.random.rand()*lin_vel_uncert_amplitude])
v_noisy[3:7] = v_real[3:7] + np.array([np.random.rand()*np.pi/180.0*ang_vel_uncert_amplitude,
np.random.rand()*np.pi/180.0*ang_vel_uncert_amplitude,
np.random.rand()*np.pi/180.0*ang_vel_uncert_amplitude])
return v_noisy
def integral(self, x_dot, x, t) :
""" Computes the integral o x dt """
x_ = (x_dot * t) + x
#print(x_)
return x_
# The main loop of the simulation
num_actuators = 8
# Settings of the simulation time in seconds
period = 0.005 #100Hz
t_max = 300.0
t_max_roll = t_max/5.0
t_max_pitch = t_max/6.0
t_max_altitud = t_max/7.0
time = np.arange(0.0,t_max,period)
time_roll = np.arange(0.0,t_max_roll,period)
time_pitch = np.arange(0.0,t_max_pitch,period)
time_depth = np.arange(0.0,t_max_altitud,period)
##Altitud Setpoint
desired_altitud = 70.0
initcond = [{'p_a':0.0,'d_a':5.0,},
{'p_a':0.0,'d_a':10.0,},
{'p_a':0.0,'d_a':15.0,},
{'p_a':0.0,'d_a':20.0,},
{'p_a':10.0,'d_a':25.0,},
{'p_a':10.0,'d_a':35.0,},
{'p_a':15.0,'d_a':50.0,},
{'p_a':50.0,'d_a':45.0,},
{'p_a':50.0,'d_a':40.0,},
{'p_a':50.0,'d_a':35.0,},
{'p_a':50.0,'d_a':30.0,},
{'p_a':50.0,'d_a':25.0,},
{'p_a':50.0,'d_a':20.0,},
{'p_a':50.0,'d_a':0.0,},]
pltnames = []
for trial in initcond:
# Input on initial conditions:
prev_altitude = trial['p_a']
desired_altitud = trial['d_a']
# Setting initial conditions:
#p = np.array(np.zeros(6))
p = np.array([0.0,0.0,prev_altitude,0.0,0.0,0.0])
v = np.array(np.zeros(6))
acc = np.array(np.zeros(6))
desired_roll = 0.0
desired_pitch = 0.0
# Initialise the position/velocity storage variable to the initial values
p_log = p
v_log = v
depth_log = 0.0
w_velocity_log = 0.0
roll_log = 0.0
p_velocity_log = 0.0
pitch_log = 0.0
q_velocity_log = 0.0
#Here you can change the resulting force for each degree of freedom
tau = np.array([[0], [0], [0], [0], [0], [0]]) # Force vector
p_dot_prev = np.zeros(6)
bluerov = Bluerov(p, v, acc, tau)
#u = bluerov.control_allocation(tau) #This function return the thruster's control input
#z_controller = Controller(1, 0.07, 0.0, 0)
p_head_controller = Controller(40.0, 0.0, 0.0)
p_controller = Controller(1.0, 0.0, 0.9, 0)
q_controller = Controller(2.5, 0.05, 0.9, 0)
q_head_controller = Controller(40.0, 0.0, 0.0)
z_controller = Controller(15, 0.0, 0.1, 0)
z_head_controller = Controller(5.0, 0.0, 5.3, 0)
z_controller.setWindup(20)
z_head_controller.setWindup(20)
count = 0
w_error = 0.0
p_error = 0.0
q_error = 0.0
w_ref = 0.0
p_ref = 0.0
q_ref = 0.0
sample1 = 0
sample2 = 0
sample3 = 0
sample4 = 0
altitude = prev_altitude
for tstep in time[1:]:
# Sensors
p_sens = bluerov.position_sensing(p)
v_sens = bluerov.position_sensing(v)
altitud = p_sens[2]
roll = p_sens[3]
pitch = p_sens[4]
w_velocity = v_sens[2]
p_velocity = v_sens[3]
q_velocity = v_sens[4]
count = count + 1
#External Loop
if (count % 7) == 0:
altitude_error = desired_altitud - altitud
w_ref = z_head_controller.Update(altitude_error, tstep)
p_dot = bluerov.kinematics(v,p)
p = bluerov.integral(p_dot, p, period)
depth_log = np.vstack((depth_log, altitud))
w_velocity_log = np.vstack((w_velocity_log, w_velocity))
if (count % 6) == 0: #4000 samples 20hz
pitch_error = desired_pitch - pitch
q_ref = q_head_controller.Update(pitch_error, tstep)
p_dot = bluerov.kinematics(v,p)
p = bluerov.integral(p_dot, p, period)
pitch_log = np.vstack((pitch_log, pitch))
q_velocity_log = np.vstack((q_velocity_log, q_velocity))
sample3 += 1
if (count % 5) == 0: #5000 samples 25Hz
roll_error = desired_roll - roll
p_ref = p_head_controller.Update(roll_error, tstep)
p_dot = bluerov.kinematics(v,p)
p = bluerov.integral(p_dot, p, period)
roll_log = np.vstack((roll_log, roll))
p_velocity_log = np.vstack((p_velocity_log, p_velocity))
sample1 += 1
## Internal Loop
if (count % 4) == 0:
w_error = w_velocity - w_ref
linear_z_tau = z_controller.altitud_controller(w_error, tstep, p_sens, v_sens)
linear_tau = np.array([[0], [0], [linear_z_tau], [0], [0], [0]]) # Force vector
##This function return the thruster's control input
u = bluerov.control_allocation(linear_tau)
##Force/Torque input
tau = bluerov.thruster_system(u)
v_dot = bluerov.model(p, v, acc, tau)
acc = v_dot
v_dot = np.squeeze(np.asarray(v_dot))
v = bluerov.integral(v_dot, v, period)
if (count % 3) == 0: #7000 samples 33.3Hz
##Controller
q_error = q_velocity - q_ref
linear_q_tau = q_controller.pitch_controller(q_error, tstep, p_sens, v_sens)
linear_tau = np.array([[0], [0], [0], [0], [linear_q_tau], [0]]) # Force vector
##This function return the thruster's control input
u = bluerov.control_allocation(linear_tau)
##Force/Torque input
tau = bluerov.thruster_system(u)
v_dot = bluerov.model(p, v, acc, tau)
acc = v_dot
v_dot = np.squeeze(np.asarray(v_dot))
v = bluerov.integral(v_dot, v, period)
sample4 += 1
if (count % 2) == 0: #10000 samples 50Hz
##Controller
p_error = p_velocity - p_ref
linear_p_tau = p_controller.roll_controller(p_error, tstep, p_sens, v_sens)
#linear_q_tau = q_controller.pitch_controller(pitch_error, tstep, p_sens, v_sens)
linear_tau = np.array([[0], [0], [0], [linear_p_tau], [0], [0]]) # Force vector
##This function return the thruster's control input
u = bluerov.control_allocation(linear_tau)
##Force/Torque input
tau = bluerov.thruster_system(u)
v_dot = bluerov.model(p, v, acc, tau)
acc = v_dot
v_dot = np.squeeze(np.asarray(v_dot))
v = bluerov.integral(v_dot, v, period)
#if tstep >= (t_max - period*10):
#print(roll)
sample2 += 1
#print(len(roll_log))
#print(len(p_velocity_log))
#print(len(time_roll))
#print(sample1)
#print(sample2)
#print(sample3)
#print(sample4)
#print(count)
fig, axs = plt.subplots(2,1, sharex='col')
axs[0].plot(time_roll, roll_log, color='magenta')
axs[0].plot([time_roll[0], time_roll[-1]], [desired_roll, desired_roll], color='k', linestyle='--', linewidth=1)
axs[0].set_title('Linear Position')
axs[0].legend(['roll (rad)',], loc="center left")
axs[1].plot(time_roll, p_velocity_log, color='magenta')
axs[1].set_title('Angular Speed')
axs[1].legend(['p (rad/s)'], loc="center left")
plt.show()
# The plotting
fig, axs = plt.subplots(2,1, sharex='col')
axs[0].plot(time_pitch, pitch_log, color='green')
axs[0].plot([time_pitch[0], time_pitch[-1]], [desired_pitch, desired_pitch], color='k', linestyle='--', linewidth=1)
axs[0].set_title('Linear Position')
axs[0].legend(['pitch (rad)',], loc="center left")
axs[1].plot(time_pitch, q_velocity_log, color='green')
axs[1].set_title('Angular Speed')
axs[1].legend(['q (rad/s)'], loc="center left")
plt.show()
# Depth plotting
fig, axs = plt.subplots(2,1, sharex='col')
axs[0].plot(time_depth, depth_log)
axs[0].plot([time_depth[0], time_depth[-1]], [trial['d_a'], trial['d_a']], color='k', linestyle='--', linewidth=1)
axs[0].set_ylim([-10.0,60.0])
axs[0].set_title('Position, start depth=%.2f, desir. depth=%.2f'%(trial['p_a'],trial['d_a']))
axs[0].legend(['z',], loc="center left")
axs[1].plot(time_depth, w_velocity_log)
axs[1].set_ylim([-2.5,2.5])
axs[1].set_title('Speed start depth=%.2f, desir. depth=%.2f'%(trial['p_a'],trial['d_a']))
axs[1].legend(['w'], loc="center left")
plt.show()
#plt.savefig('plot-p_a=%f,d_a=%f.png'%(trial['p_a'],trial['d_a']), dpi=150)
#pltnames.append('plot-p_a=%f,d_a=%f.png'%(trial['p_a'],trial['d_a']))
###Output
_____no_output_____ |
python/figure_notebooks/suppfig8_9.ipynb | ###Markdown
SKJ quotient analysis plots
###Code
ivunitsall = ['°C','°C','m','psu',
'umol/kg','kPa','m',
'log(mg/m$^3$)','m','cm']
ivnicenamesall = ['Sea surface temperature (SST)','T$_{100m}$',
'Thermocline depth (TCD)','Sea surface salinity (SSS)',
'O$_{2,100m}$','PO$_{2,100m}$','Tuna hypoxic depth (THD)',
'log(Chorophyll a)','Mixed layer depth (MLD)',
'Sea surface height anomaly (SSHA)']
ivshortnicenamesall = ['SST','T$_{100m}$','TCD','SSS',
'O$_{2,100m}$','PO$_{2,100m}$','THD',
'log(CHL)','MLD','SSHA']
dvnicenamesall = ['Skipjack CPUE','Skipjack CPUE','Skipjack CPUE','Skipjack CPUE',
'Skipjack CPUE','Skipjack CPUE','Skipjack CPUE',
'Skipjack CPUE','Skipjack CPUE','Skipjack CPUE']
ivall = [iv_sstskjcp, iv_temp100skjcp, iv_tcdskjcp, iv_sssskjcp,
iv_o2100skjcp, iv_po2100skjcp, iv_thdskjcp,
iv_logchlskjcp, iv_mldskjcp, iv_sshaskjcp]
binedgesall = [binedges_sstskjcp, binedges_temp100skjcp, binedges_tcdskjcp, binedges_sssskjcp,
binedges_o2100skjcp, binedges_po2100skjcp, binedges_thdskjcp,
binedges_logchlskjcp, binedges_mldskjcp, binedges_sshaskjcp]
bincentersall = [bincenters_sstskjcp, bincenters_temp100skjcp, bincenters_tcdskjcp, bincenters_sssskjcp,
bincenters_o2100skjcp, bincenters_po2100skjcp, bincenters_thdskjcp,
bincenters_logchlskjcp, bincenters_mldskjcp, bincenters_sshaskjcp]
ivcountsall = [ivcounts_sstskjcp, ivcounts_temp100skjcp, ivcounts_tcdskjcp, ivcounts_sssskjcp,
ivcounts_o2100skjcp, ivcounts_po2100skjcp, ivcounts_thdskjcp,
ivcounts_logchlskjcp, ivcounts_mldskjcp, ivcounts_sshaskjcp]
dvcountsall = [dvcounts_sstskjcp, dvcounts_temp100skjcp, dvcounts_tcdskjcp, dvcounts_sssskjcp,
dvcounts_o2100skjcp, dvcounts_po2100skjcp, dvcounts_thdskjcp,
dvcounts_logchlskjcp, dvcounts_mldskjcp, dvcounts_sshaskjcp]
dvquotall = [dvquot_sstskjcp, dvquot_temp100skjcp, dvquot_tcdskjcp, dvquot_sssskjcp,
dvquot_o2100skjcp, dvquot_po2100skjcp, dvquot_thdskjcp,
dvquot_logchlskjcp, dvquot_mldskjcp, dvquot_sshaskjcp]
qlimsreplaceTall = [qlimsreplaceT_sstskjcp, qlimsreplaceT_temp100skjcp, qlimsreplaceT_tcdskjcp, qlimsreplaceT_sssskjcp,
qlimsreplaceT_o2100skjcp, qlimsreplaceT_po2100skjcp, qlimsreplaceT_thdskjcp,
qlimsreplaceT_logchlskjcp, qlimsreplaceT_mldskjcp, qlimsreplaceT_sshaskjcp]
qlimsreplaceFall = [qlimsreplaceT_sstskjcp, qlimsreplaceT_temp100skjcp, qlimsreplaceT_tcdskjcp, qlimsreplaceT_sssskjcp,
qlimsreplaceT_o2100skjcp, qlimsreplaceT_po2100skjcp, qlimsreplaceT_thdskjcp,
qlimsreplaceT_logchlskjcp, qlimsreplaceT_mldskjcp, qlimsreplaceT_sshaskjcp]
plotbarorhist='bar'; plotlegend=1;
plotqlimsreplaceT=1; plotqlimsreplaceF=0
nrows=5; ncols=2
fig,axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(16,18))
isp = 0 # subplot index
for yax in range(0,nrows):
for xax in range(0,ncols):
ivunits = ivunitsall[isp]
ivnicename = ivnicenamesall[isp]
ivshortnicename = ivshortnicenamesall[isp]
dvnicename = dvnicenamesall[isp]
iv = ivall[isp]
binedges = binedgesall[isp]; bincenters = bincentersall[isp]
ivcounts = ivcountsall[isp]; dvcounts = dvcountsall[isp]
dvquot = dvquotall[isp]
qlimsreplaceT = qlimsreplaceTall[isp]
qlimsreplaceF = qlimsreplaceFall[isp]
if ncols>1 and nrows>1:
ax = axes[yax][xax]
elif ncols==1 and nrows>1:
ax = axes[yax]
elif ncols>1 and nrows==1:
ax = axes[xax]
exec(open('helper_scripts/plot_qa.py').read())
ax.text(-0.03, 1.03, string.ascii_uppercase[isp],
transform=ax.transAxes, size=13, weight='bold')
isp = isp+1
fig.tight_layout()
fig.savefig(figpath + 'S8_fig.pdf',
bbox_inches='tight', pad_inches = 0, dpi = 300)
fig.savefig(figpath + 'S8_fig.png',
bbox_inches='tight', pad_inches = 0, dpi = 300)
###Output
_____no_output_____
###Markdown
BET quotient analysis plots
###Code
ivunitsall = ['°C','°C','m','psu',
'umol/kg','kPa','m',
'log(mg/m$^3$)','m','cm']
ivnicenamesall = ['Sea surface temperature (SST)','T$_{100m}$',
'Thermocline depth (TCD)','Sea surface salinity (SSS)',
'O$_{2,100m}$','PO$_{2,100m}$','Tuna hypoxic depth (THD)',
'log(Chorophyll a)','Mixed layer depth (MLD)',
'Sea surface height anomaly (SSHA)']
ivshortnicenamesall = ['SST','T$_{100m}$','TCD','SSS',
'O$_{2,100m}$','PO$_{2,100m}$','THD',
'log(CHL)','MLD','SSHA']
dvnicenamesall = ['Bigeye CPUE','Bigeye CPUE','Bigeye CPUE','Bigeye CPUE',
'Bigeye CPUE','Bigeye CPUE','Bigeye CPUE',
'Bigeye CPUE','Bigeye CPUE','Bigeye CPUE']
ivall = [iv_sstbetcp, iv_temp100betcp, iv_tcdbetcp, iv_sssbetcp,
iv_o2100betcp, iv_po2100betcp, iv_thdbetcp,
iv_logchlbetcp, iv_mldbetcp, iv_sshabetcp]
binedgesall = [binedges_sstbetcp, binedges_temp100betcp, binedges_tcdbetcp, binedges_sssbetcp,
binedges_o2100betcp, binedges_po2100betcp, binedges_thdbetcp,
binedges_logchlbetcp, binedges_mldbetcp, binedges_sshabetcp]
bincentersall = [bincenters_sstbetcp, bincenters_temp100betcp, bincenters_tcdbetcp, bincenters_sssbetcp,
bincenters_o2100betcp, bincenters_po2100betcp, bincenters_thdbetcp,
bincenters_logchlbetcp, bincenters_mldbetcp, bincenters_sshabetcp]
ivcountsall = [ivcounts_sstbetcp, ivcounts_temp100betcp, ivcounts_tcdbetcp, ivcounts_sssbetcp,
ivcounts_o2100betcp, ivcounts_po2100betcp, ivcounts_thdbetcp,
ivcounts_logchlbetcp, ivcounts_mldbetcp, ivcounts_sshabetcp]
dvcountsall = [dvcounts_sstbetcp, dvcounts_temp100betcp, dvcounts_tcdbetcp, dvcounts_sssbetcp,
dvcounts_o2100betcp, dvcounts_po2100betcp, dvcounts_thdbetcp,
dvcounts_logchlbetcp, dvcounts_mldbetcp, dvcounts_sshabetcp]
dvquotall = [dvquot_sstbetcp, dvquot_temp100betcp, dvquot_tcdbetcp, dvquot_sssbetcp,
dvquot_o2100betcp, dvquot_po2100betcp, dvquot_thdbetcp,
dvquot_logchlbetcp, dvquot_mldbetcp, dvquot_sshabetcp]
qlimsreplaceTall = [qlimsreplaceT_sstbetcp, qlimsreplaceT_temp100betcp, qlimsreplaceT_tcdbetcp, qlimsreplaceT_sssbetcp,
qlimsreplaceT_o2100betcp, qlimsreplaceT_po2100betcp, qlimsreplaceT_thdbetcp,
qlimsreplaceT_logchlbetcp, qlimsreplaceT_mldbetcp, qlimsreplaceT_sshabetcp]
qlimsreplaceFall = [qlimsreplaceT_sstbetcp, qlimsreplaceT_temp100betcp, qlimsreplaceT_tcdbetcp, qlimsreplaceT_sssbetcp,
qlimsreplaceT_o2100betcp, qlimsreplaceT_po2100betcp, qlimsreplaceT_thdbetcp,
qlimsreplaceT_logchlbetcp, qlimsreplaceT_mldbetcp, qlimsreplaceT_sshabetcp]
plotbarorhist='bar'; plotlegend=1;
plotqlimsreplaceT=1; plotqlimsreplaceF=0
nrows=5; ncols=2
fig,axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(16,18))
isp = 0 # subplot index
for yax in range(0,nrows):
for xax in range(0,ncols):
ivunits = ivunitsall[isp]
ivnicename = ivnicenamesall[isp]
ivshortnicename = ivshortnicenamesall[isp]
dvnicename = dvnicenamesall[isp]
iv = ivall[isp]
binedges = binedgesall[isp]; bincenters = bincentersall[isp]
ivcounts = ivcountsall[isp]; dvcounts = dvcountsall[isp]
dvquot = dvquotall[isp]
qlimsreplaceT = qlimsreplaceTall[isp]
qlimsreplaceF = qlimsreplaceFall[isp]
if ncols>1 and nrows>1:
ax = axes[yax][xax]
elif ncols==1 and nrows>1:
ax = axes[yax]
elif ncols>1 and nrows==1:
ax = axes[xax]
exec(open('helper_scripts/plot_qa.py').read())
ax.text(-0.03, 1.03, string.ascii_uppercase[isp],
transform=ax.transAxes, size=13, weight='bold')
isp = isp+1
fig.tight_layout()
fig.savefig(figpath + 'S9_fig.pdf',
bbox_inches='tight', pad_inches = 0, dpi = 300)
fig.savefig(figpath + 'S9_fig.png',
bbox_inches='tight', pad_inches = 0, dpi = 300)
###Output
_____no_output_____ |
DataCamp Ensemble Learning blog/Ensemble Learning Comparison.ipynb | ###Markdown
Ensembles in Action
###Code
# Bagged Decision Trees for Classification
from sklearn import model_selection
from sklearn.ensemble import BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
array = normalized_array
X = array[:,0:9]
Y = array[:,9]
seed = 7
kfold = model_selection.KFold(n_splits=10, random_state=seed)
cart = DecisionTreeClassifier()
num_trees = 100
model = BaggingClassifier(base_estimator=cart, n_estimators=num_trees, random_state=seed)
results = model_selection.cross_val_score(model, X, Y, cv=kfold)
print(results.mean())
# AdaBoost Classification
from sklearn.ensemble import AdaBoostClassifier
seed = 7
num_trees = 30
kfold = model_selection.KFold(n_splits=10, random_state=seed)
model = AdaBoostClassifier(n_estimators=num_trees, random_state=seed)
results = model_selection.cross_val_score(model, X, Y, cv=kfold)
print(results.mean())
# Stochastic Gradient Boosting Classification
from sklearn.ensemble import GradientBoostingClassifier
seed = 7
num_trees = 100
kfold = model_selection.KFold(n_splits=10, random_state=seed)
model = GradientBoostingClassifier(n_estimators=num_trees, random_state=seed)
results = model_selection.cross_val_score(model, X, Y, cv=kfold)
print(results.mean())
# Voting Ensemble for Classification
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.ensemble import VotingClassifier
kfold = model_selection.KFold(n_splits=10, random_state=seed)
# create the sub models
estimators = []
model1 = LogisticRegression()
estimators.append(('logistic', model1))
model2 = DecisionTreeClassifier()
estimators.append(('cart', model2))
model3 = SVC()
estimators.append(('svm', model3))
# create the ensemble model
ensemble = VotingClassifier(estimators)
results = model_selection.cross_val_score(ensemble, X, Y, cv=kfold)
print(results.mean())
###Output
0.962857142857
|
finalCombined.ipynb | ###Markdown
Seed
###Code
import matplotlib.pyplot as plt
from time import time
import matplotlib.pyplot as plt
import pandas as pd
import cv2 as cv
seed_value= 0
import os
os.environ['PYTHONHASHSEED']=str(seed_value)
import random
random.seed(seed_value)
import numpy as np
np.random.seed(seed_value)
import tensorflow as tf
tf.set_random_seed(seed_value)
import keras
from keras.models import Sequential, Model
from keras.layers import Input, Flatten, Dense, Dropout, Convolution2D, Conv2D, MaxPooling2D, Lambda, GlobalMaxPooling2D, GlobalAveragePooling2D, BatchNormalization, Activation, AveragePooling2D, Concatenate
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras.utils import np_utils
from keras import backend as K
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
###Output
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/dtypes.py:516: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint8 = np.dtype([("qint8", np.int8, 1)])
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/dtypes.py:517: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint8 = np.dtype([("quint8", np.uint8, 1)])
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/dtypes.py:518: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint16 = np.dtype([("qint16", np.int16, 1)])
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/dtypes.py:519: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint16 = np.dtype([("quint16", np.uint16, 1)])
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/dtypes.py:520: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint32 = np.dtype([("qint32", np.int32, 1)])
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
np_resource = np.dtype([("resource", np.ubyte, 1)])
/usr/local/lib/python3.7/dist-packages/tensorboard/compat/tensorflow_stub/dtypes.py:541: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint8 = np.dtype([("qint8", np.int8, 1)])
/usr/local/lib/python3.7/dist-packages/tensorboard/compat/tensorflow_stub/dtypes.py:542: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint8 = np.dtype([("quint8", np.uint8, 1)])
/usr/local/lib/python3.7/dist-packages/tensorboard/compat/tensorflow_stub/dtypes.py:543: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint16 = np.dtype([("qint16", np.int16, 1)])
/usr/local/lib/python3.7/dist-packages/tensorboard/compat/tensorflow_stub/dtypes.py:544: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint16 = np.dtype([("quint16", np.uint16, 1)])
/usr/local/lib/python3.7/dist-packages/tensorboard/compat/tensorflow_stub/dtypes.py:545: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint32 = np.dtype([("qint32", np.int32, 1)])
/usr/local/lib/python3.7/dist-packages/tensorboard/compat/tensorflow_stub/dtypes.py:550: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
np_resource = np.dtype([("resource", np.ubyte, 1)])
Using TensorFlow backend.
###Markdown
Read Dataset
###Code
def getOneHot(a):
b = np.zeros((a.size, a.max()+1))
b[np.arange(a.size),a] = 1
return b
def getData(path, folders):
path+='/'
labelNames = folders
labels = []
images = []
temp = []
for i in range(len(folders)):
randomImages = []
imgAddress = path + folders[i]
l = os.listdir(imgAddress)
for img in l:
frame = cv.imread(imgAddress+'/'+img)
temp.append((i,frame))
random.shuffle(temp)
for x in temp:
temp1, temp2 = (x)
labels.append(temp1)
images.append(temp2)
labels = getOneHot(np.asarray(labels))
images = np.asarray(images)
return images, labels
def preprocess(a):
b = []
for action_frame in a:
hsv = cv.cvtColor(action_frame, cv.COLOR_BGR2HSV)
lower_color = np.array([0, 10, 60])
upper_color = np.array([20, 150, 255])
mask = cv.inRange(hsv, lower_color, upper_color)
res = cv.bitwise_and(action_frame,action_frame, mask= mask)
b.append(res)
return b
labelNames = ['next', 'previous', 'stop']
x_train, y_train = getData('completeData', labelNames)
x_train = np.asarray(preprocess(x_train))
###Output
_____no_output_____
###Markdown
Show images
###Code
def showImg(images, labels, labelNames):
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
n = random.randint(0,len(images))
plt.imshow(images[n])
plt.xlabel(labelNames[np.argmax(labels[n])])
plt.show()
print("Training data:")
showImg(x_train, y_train, labelNames)
###Output
Training data:
###Markdown
CNN architecture
###Code
def get_model(layer1Filter = 16, layer2Filter = 32, layer3Filter = 64, layer4Output = 400, optim = 'adam'):
x = Input((50, 50, 3))
model = BatchNormalization(axis = 3)(x)
model = Convolution2D(filters = layer1Filter, kernel_size = (3,3), activation='relu')(model)
model = MaxPooling2D()(model)
model = Dropout(0.5, seed = seed_value)(model)
model = BatchNormalization(axis = 3)(model)
model = Convolution2D(filters = layer2Filter, kernel_size = (3,3), activation='relu')(model)
model = MaxPooling2D()(model)
model = Dropout(0.5, seed = seed_value)(model)
model = BatchNormalization(axis = 3)(model)
model = Convolution2D(filters = layer3Filter, kernel_size = (3,3), activation='relu')(model)
model = MaxPooling2D()(model)
model = Dropout(0.5, seed = seed_value)(model)
model = Flatten()(model)
model = Dense(layer4Output , activation = 'relu')(model)
model = Dropout(0.5, seed = seed_value)(model)
model = Dense(3, activation = 'softmax')(model)
model = Model(input = x, output = model)
if optim == 'adam':
opt = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
else:
opt = keras.optimizers.RMSprop(learning_rate=0.001, rho=0.9)
model.compile(opt, loss='binary_crossentropy', metrics=['accuracy'])
return model
def get_callbacks(name_weights, patience_lr):
mcp_save = ModelCheckpoint(name_weights, save_best_only=True, monitor='loss', mode='min')
reduce_lr_loss = ReduceLROnPlateau(monitor='loss', factor=0.75, patience=patience_lr, verbose=1, epsilon=1e-4)
return [mcp_save, reduce_lr_loss]
def train_model(x_train, y_train, layer4Output, optim, epochs):
name_weights = "final_model_weights_complete.h5"
model = get_model(layer4Output = layer4Output, optim = optim)
callbacks = get_callbacks(name_weights = name_weights, patience_lr=2)
model.fit(x = x_train, y = y_train, epochs = epochs, callbacks=callbacks)
return model
model = train_model(x_train, y_train, 400, 'adam', 20)
###Output
WARNING:tensorflow:From /usr/local/lib/python3.7/dist-packages/keras/backend/tensorflow_backend.py:4070: The name tf.nn.max_pool is deprecated. Please use tf.nn.max_pool2d instead.
###Markdown
Prediction
###Code
cap = cv.VideoCapture(0)
while True:
ret, frame = cap.read()
if not ret:
print("Unable to capture video")
break
frame = cv.flip( frame, 1 )
frame2 = cv.resize(frame, (50, 50))
a = []
a.append(frame2)
a = np.array(preprocess(a))
prob = model.predict(a)
pred = np.argmax(prob)
if prob[0][pred] < 0.9:
s = "Background " + str(prob[0])
else:
s = labelNames[pred] + " " + str(prob[0][pred])
font = cv.FONT_HERSHEY_SIMPLEX
cv.putText(frame,s,(40,40),font,0.70,(0,0,255),2)
cv.imshow('frame', frame)
cv.imshow('frame2', a[0])
if cv.waitKey(10) & 0xFF == ord('q'):
break
cap.release()
cv.destroyAllWindows()
###Output
_____no_output_____
###Markdown
Using trained model
###Code
def load_trained_model(weights_path):
model= get_model()
model.load_weights(weights_path)
return model
model = load_trained_model("/home/ankit/Desktop/Acad/Sem/Sem5/COL780/Assn/4/weightsComplete.h5")
###Output
/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:23: UserWarning: Update your `Model` call to the Keras 2 API: `Model(inputs=Tensor("in..., outputs=Tensor("de...)`
|
BHAH/Tutorial-BlackHolesAtHome-BOINC_applications-Using_the_WrapperApp.ipynb | ###Markdown
window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'UA-59152712-8'); BlackHoles@Home Tutorial: Creating a `BOINC` app using the `WrapperApp` Author: Leo Werneck This tutorial notebook demonstrates how to write a program that runs in the `BOINC` infrastructure using the `WrapperApp` **WARNING**: this tutorial notebook is currently incompatible with Windows Introduction:The [BlackHoles@Home](http://blackholesathome.net/) project allows users to volunteer CPU time so a large number of binary black holes simulations can be performed. The objective is to create a large catalog of [gravitational waveforms](https://en.wikipedia.org/wiki/Gravitational_wave), which can be used by observatories such as [LIGO](https://www.ligo.org), [VIRGO](https://www.virgo-gw.eu), and, in the future, [LISA](https://lisa.nasa.gov) in order to infer what was the source of a detected gravitational wave.BlackHoles@Home is destined to run on the [BOINC](https://boinc.berkeley.edu) infrastructure (alongside [Einstein@Home](https://einsteinathome.org/) and [many other great projects](https://boinc.berkeley.edu/projects.php)), enabling anyone with a computer to contribute to the construction of the largest numerical relativity gravitational wave catalogs ever produced. Additional Reading Material:* [BOINC's Wiki page](https://boinc.berkeley.edu/trac/wiki)* [BOINC's WrapperApp Wiki page](https://boinc.berkeley.edu/trac/wiki/WrapperApp) Table of Contents$$\label{toc}$$This tutorial explains how to use the `BOINC` wrapper application to run a simple program. The structture of this notebook is as follows:1. [Step 1](introduction): Introduction1. [Step 2](compiling_wrapper_app): Compiling the `BOINC` wrapper app for your platform1. [Step 3](using_wrapper_app): Using the `BOINC` wrapper app 1. [Step 3.a](the_main_application): The main application 1. [Step 3.b](compiling_the_main_application): Compiling the main application 1. [Step 3.c](job_xml): The `job.xml` file 1. [Step 3.c.i](simple_job_xml): A very simple `job.xml` file 1. [Step 3.c.ii](job_xml_output_redirect_and_zip): Redirecting and zipping output files1. [Step 4](latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file Step 1: Introduction \[Back to [top](toc)\]$$\label{introduction}$$The [`WrapperApp`](https://boinc.berkeley.edu/trac/wiki/WrapperApp) is the simplest way of converting an existing program into a `BOINC` compatible application. The program that will be actually running is the `WrapperApp` and it will take care of:* Interfacing with the `BOINC` libraries* Running the original program* Handling input/output filesLet us assume a simple `BOINC` application, which is made out of only one program, `bhah_test_app`. The directory of this application should then contain the following files:* The application file `bhah_test_app` with the name format `appname_version_platform`.* The `WrapperApp` file with the name format `WrapperAppname_version_platform`.* The `WrapperApp` configuration file, which we will typically call `appname_version_job.xml`.* The appication version file, which is called `version.xml`.We note that the application we will create in this tutorial notebook is analogous to the native `BOINC` application we create in [this tutorial notebook](Tutorial-BlackHolesAtHome-BOINC_applications-Native_applications.ipynb), and thus reading that tutorial notebook is also recommended. Step 2: Compiling the `BOINC` wrapper app for your platform \[Back to [top](toc)\]$$\label{compiling_wrapper_app}$$
###Code
# Step 2: Compiling the BOINC wrapper app
# Step 2.a: Load needed Python modules
import os,sys
# Step 2.b: Add NRPy's root directory to the sys.path()
sys.path.append("..")
# Step 2.c: Load NRPy+'s command line helper module
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
# Step 2.d: Set the path to the BOINC source code
path_to_boinc = "~/bhah/boinc"
current_path = os.getcwd()
# Step 2.e: Check the platform and adjust the compilation command accordingly
if sys.platform == "linux":
wrapper_compile = "make"
elif sys.platform == "darwin":
wrapper_compile = "source BuildMacWrapper.sh"
else:
print("Unsupported platform: "+sys.platform)
sys.exit(1)
# Step 2.f: Compile the wrapper app
!cd $path_to_boinc/samples/wrapper && $wrapper_compile
# Step 2.g: Copy the wrapper app to the current working directory
!cp $path_to_boinc/samples/wrapper/wrapper $current_path
###Output
_____no_output_____
###Markdown
Step 3: Using the `BOINC` wrapper app \[Back to [top](toc)\]$$\label{using_wrapper_app}$$Once we have everything set up, using the wrapper app is as simple as running```bash$: ./wrapper```The following steps will describe how to set the configuration files so that the wrapper app works as you expect it to. Step 3.a: The main application \[Back to [top](toc)\]$$\label{the_main_application}$$Let us start by writing a simple application which we will run using the `BOINC` wrapper app. In order for us to be able to see some additional configuration features of the wrapper app, we will make our main application slightly more complicated than a simple "Hello World!" program.This application takes any number of command line arguments and then prints them to `stdout`, `stderr`, and an output text file.
###Code
%%writefile simple_app.c
// Step 0: Load all the necessary C header files
#include <stdio.h>
#include <stdlib.h>
// Program description: this program is just a slightly
// more complicated version of the
// "Hello World!" program, where
// we will be taking some command
// line inputs and printing them to
// stdout, stderr, and an output file.
int main( int argc, char** argv ) {
// Step 1: Check correct usage
if( argc == 1 ) {
fprintf(stderr,"(ERROR) Correct usage is ./simple_app <command_line_arguments>\n");
exit(1);
}
// Step 2: Print all command line arguments to
// stdout, stderr, and an output file
//
// Step 2.a: Open the output file
// Step 2.a.i: Set the output file name
char filename[100] = "output_file.txt";
// Step 2.a.ii: Open the file
FILE* filept = fopen(filename,"w");
// Step 2.a.iii: Check everything is OK
if( !filept ) {
fprintf(stderr,"(ERROR) Could not open file %s\n.",filename);
exit(1);
}
// Step 2.b: Print an information line
fprintf(stdout,"(INFO) Got the following command line arguments:");
fprintf(stderr,"(INFO) Got the following command line arguments:");
fprintf(filept,"(INFO) Got the following command line arguments:");
// Step 2.c: Loop over the command line arguments, printing
// them to stdout, stderr, and our output file
for(int i=1;i<argc;i++) {
fprintf(stdout," %s",argv[i]);
fprintf(stderr," %s",argv[i]);
fprintf(filept," %s",argv[i]);
}
// Step 2.d: Add a line break to the output
fprintf(stdout,"\n");
fprintf(stderr,"\n");
fprintf(filept,"\n");
// Step 2.d: Close the output file
fclose(filept);
// All done!
return 0;
}
###Output
_____no_output_____
###Markdown
Step 3.b: Compiling the main application \[Back to [top](toc)\]$$\label{compiling_the_main_application}$$We now compile the main application using NRPy+'s `cmdline_helper` module.
###Code
cmd.C_compile("simple_app.c","simple_app")
###Output
_____no_output_____
###Markdown
Step 3.c: The `job.xml` file \[Back to [top](toc)\]$$\label{job_xml}$$Let's see what happens if we try running the wrapper app:
###Code
!rm -f job.xml
cmd.Execute("wrapper")
###Output
_____no_output_____
###Markdown
As can be seen above, the `BOINC` wrapper application requests an input file, `job.xml`, to be present in the current working directory. We will now set up a `job.xml` file for the wrapper app in a way that it works correctly with our `simple_app`. A `job.xml` has the following syntax:```xml ...task_options... ...additional_options...```All the configurations for the wrapper application are enclosed by the `job_desc` environment. To configure the wrapper to work with our specific application, we provide the `task_options`, while `additional_options` can be provided for additional configuration, as we will see. Step 3.c.i: A very simple `job.xml` file \[Back to [top](toc)\]$$\label{simple_job_xml}$$First, let us start with a very basic configuration: let us ask the wrapper to run our simple application using the command line arguments `1 2 3 4 testing hello world 4 3 2 1`. This is achieved with the following `job.xml` file:
###Code
%%writefile job.xml
<job_desc>
<task>
<application>simple_app</application>
<command_line>1 2 3 4 testing hello world 4 3 2 1</command_line>
</task>
</job_desc>
###Output
_____no_output_____
###Markdown
Let us now copy everything into a new, fresh directory and run our wrapper application.
###Code
!rm -rf wrapper_app_test
cmd.mkdir("wrapper_app_test")
!cp wrapper simple_app job.xml wrapper_app_test && cd wrapper_app_test && ./wrapper && ls
###Output
_____no_output_____
###Markdown
Note that after execution, we see the output `(INFO) Got the following command line arguments: 1 2 3 4 testing hello world 4 3 2 1` printed to `stdout`. If we examine the file `output_file.txt`, we will see the same output:
###Code
!cat output_file.txt
###Output
_____no_output_____
###Markdown
The `stderr.txt` file is automatically generated by the `BOINC` wrapper app, and it contains all the output which was sent to `stderr`. We see that while we also have the expected output in it, there is also some additional information which was generated by the wrapper app:
###Code
!cat stderr.txt
###Output
_____no_output_____
###Markdown
Aditionally, we see that the wrapper application has created two additional files: the `boinc_finish_called` and the `wrapper_checkpoint.txt`. For our purposes, the `wrapper_checkpoint.txt` file is irrelevant, so we will ignore it for now. The `boinc_finish_called` file contains the numerical value returned by our program, `simple_app`. As is usual in `C`, if the return value is `0`, then the execution was successful, while a non-zero value indicates an error:
###Code
!cat boinc_finish_called
###Output
_____no_output_____
###Markdown
Step 3.c.ii: Redirecting and zipping output files \[Back to [top](toc)\]$$\label{job_xml_output_redirect_and_zip}$$Now that we have seen the simplest possible case, let us look at something slightly more complicated. The following `job.xml` file asks the wrapper app to perform the following tasks:1. Run the `simple_app` application with command line arguments `1 2 3 4 testing hello world 4 3 2 1`1. Redirect all `stdout` output to the file `simple_app.out`1. Redirect all `stderr` output to the file `simple_app.err`1. Zip all the output files we have seen before into a single file: `output.zip`
###Code
%%writefile job.xml
<job_desc>
<task>
<application>simple_app</application>
<command_line>1 2 3 4 testing hello world 4 3 2 1</command_line>
<stdout_filename>simple_app.out</stdout_filename>
<stderr_filename>simple_app.err</stderr_filename>
</task>
<zip_output>
<zipfilename>output.zip</zipfilename>
<filename>simple_app.out</filename>
<filename>simple_app.err</filename>
<filename>output_file.txt</filename>
<filename>boinc_finish_called</filename>
<filename>wrapper_checkpoint.txt</filename>
</zip_output>
</job_desc>
###Output
_____no_output_____
###Markdown
Now let us see what happens when we run the wrapper app:
###Code
!rm -rf wrapper_app_test
cmd.mkdir("wrapper_app_test")
!cp wrapper simple_app job.xml wrapper_app_test && cd wrapper_app_test && ./wrapper && ls
###Output
_____no_output_____
###Markdown
Notice that we now have the output files `simple_app.out` and `simple_app.err`, as expected. The file `stderr.txt` is still present, by default. We also have all our output files neatly collected into a single zip file, `output.zip`. Note that zipping the output is not done with the goal of reducing the overall size of the output, but because it easier to communicate the output files back to the `BOINC` server. Step 4: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](toc)\]$$\label{latex_pdf_output}$$The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename[Tutorial-BlackHolesAtHome-BOINC_applications-Using_the_WrapperApp.pdf](Tutorial-BlackHolesAtHome-BOINC_applications-Using_the_WrapperApp.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
###Code
!cp ../latex_nrpy_style.tplx .
cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-BlackHolesAtHome-BOINC_applications-Using_the_WrapperApp")
!rm -f latex_nrpy_style.tplx
###Output
_____no_output_____ |
training/dlscore04_train.ipynb | ###Markdown
Training workflow for DLScore version 3 Changes: With sensoring added. (But I don't think that helps, just to give it a try!)
###Code
from __future__ import print_function
import numpy as np
import pandas as pd
import keras
from keras import metrics
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras import backend as K
from keras import regularizers
from keras import initializers
from keras.callbacks import EarlyStopping
from keras.utils.training_utils import multi_gpu_model
from keras.utils import plot_model
from scipy.stats import pearsonr
from sklearn.model_selection import KFold
import random
import os.path
import itertools
import pickle
import json
from tqdm import *
import glob
import re
import csv
import multiprocessing as mp
from tqdm import *
random.seed(12345)
# Sensoring outliers
def sensoring(true, pred):
""" Sensor the predicted data to get rid of outliers"""
mn = np.min(true)
mx = np.max(true)
pred = np.minimum(pred, mx)
pred = np.maximum(pred, mn)
return pred
def split_data(x, y, pdb_ids, valid_size=0.1, test_size=0.1):
"""Converts the pandas dataframe into a matrix.
Splits the data into train, test and validations set.
Returns numpy arrays"""
# Load the indices of the non-zero columns.
# The same indices need to be used during the evaluation of test data
#with open("nonzero_column_indices.pickle", "rb") as f:
# non_zero_columns = pickle.load(f)
# Filter the zero columns out
#data = data[:, non_zero_columns]
pdb_ids = np.array(pdb_ids)
# Validation set
val_count = int(x.shape[0]*valid_size) # Number of examples to take
val_ids = np.random.choice(x.shape[0], val_count) # Select rows randomly
val_x = x[val_ids, :]
val_y = y[val_ids]
# Save the pdb ids of the validation set in disk
with open('val_pdb_ids.pickle', 'wb') as f:
pickle.dump(pdb_ids[val_ids], f)
# Remove validation set from data
mask = np.ones(x.shape[0], dtype=bool)
mask[val_ids] = False
x = x[mask, :]
y = y[mask]
pdb_ids = pdb_ids[mask]
# Test set
test_count = int(x.shape[0]*test_size)
test_ids = np.random.choice(x.shape[0], test_count)
test_x = x[test_ids, :]
test_y = y[test_ids]
# Save the pdb ids of the test set in disk
with open('test_pdb_ids.pickle', 'wb') as f:
pickle.dump(pdb_ids[test_ids], f)
# Remove test set from data
mask = np.ones(x.shape[0], dtype=bool)
mask[test_ids] = False
x = x[mask, :]
y = y[mask]
return x, y, val_x, val_y, test_x, test_y
def train_test_split(x, y, pdb_ids, test_size=0.1):
"""Converts the pandas dataframe into a matrix.
Splits the data into train, test and validations set.
Returns numpy arrays"""
# Load the indices of the non-zero columns.
# The same indices need to be used during the evaluation of test data
#with open("nonzero_column_indices.pickle", "rb") as f:
# non_zero_columns = pickle.load(f)
# Filter the zero columns out
#data = data[:, non_zero_columns]
pdb_ids = np.array(pdb_ids)
# Test set
test_count = int(x.shape[0]*test_size)
test_ids = np.random.choice(x.shape[0], test_count)
test_x = x[test_ids, :]
test_y = y[test_ids]
# Save the pdb ids of the test set in disk
with open('test_pdb_ids.pickle', 'wb') as f:
pickle.dump(pdb_ids[test_ids], f)
# Remove test set from data
mask = np.ones(x.shape[0], dtype=bool)
mask[test_ids] = False
x = x[mask, :]
y = y[mask]
return x, y, test_x, test_y
# Build the model
def get_model(x_size, hidden_layers, dr_rate=0.5, l2_lr=0.01):
model = Sequential()
model.add(Dense(hidden_layers[0], activation="relu", kernel_initializer='normal', input_shape=(x_size,)))
model.add(Dropout(0.2))
for i in range(1, len(hidden_layers)):
model.add(Dense(hidden_layers[i],
activation="relu",
kernel_initializer='normal',
kernel_regularizer=regularizers.l2(l2_lr),
bias_regularizer=regularizers.l2(l2_lr)))
model.add(Dropout(dr_rate))
model.add(Dense(1, activation="linear"))
return(model)
# def get_hidden_layers():
# x = [128, 256, 512, 768, 1024, 2048]
# hl = []
# for i in range(1, len(x)):
# hl.extend([p for p in itertools.product(x, repeat=i+1)])
# return hl
def run(output_dir, serial=0):
if serial:
print('Running in parallel')
else:
print('Running standalone')
# Create the output directory
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
# Preprocess the data
pdb_ids = []
x = []
y = []
with open('Data_new.csv', 'r') as f:
reader = csv.reader(f)
next(reader, None) # Skip the header
for row in reader:
pdb_ids.append(str(row[0]))
x.append([float(i) for i in row[1:349]])
y.append(float(row[349]))
x = np.array(x, dtype=np.float32)
y = np.array(y, dtype=np.float32)
# Normalize the data
mean = np.mean(x, axis=0)
std = np.std(x, axis=0) + 0.00001
x_n = (x - mean) / std
# Write things down
transform = {}
transform['std'] = std
transform['mean'] = mean
with open(output_dir + 'transform.pickle', 'wb') as f:
pickle.dump(transform, f)
# Read the 'best' hidden layers
with open("best_hidden_layers.pickle", "rb") as f:
hidden_layers = pickle.load(f)
# Determine if running all alone or in parts (if in parts, assuming 8)
if serial:
chunk_size = (len(hidden_layers)//8) + 1
hidden_layers = [hidden_layers[i*chunk_size:i*chunk_size+chunk_size] for i in range(8)][serial-1]
# Network parameters
epochs = 100
batch_size = 128
keras_callbacks = [EarlyStopping(monitor='val_mean_squared_error',
min_delta = 0,
patience=20,
verbose=0)
]
# Split the data into training and test set
train_x, train_y, test_x, test_y = train_test_split(x_n, y, pdb_ids, test_size=0.1)
#train_x, train_y, val_x, val_y, test_x, test_y = split_data(x_n, y, pdb_ids)
pbar = tqdm_notebook(total=len(hidden_layers),
desc='GPU: ' + str(serial))
for i in range(len(hidden_layers)):
if serial:
model_name = 'model_' + str(serial) + '_' + str(i)
else:
model_name = 'model_' + str(i)
# Set dynamic memory allocation in a specific gpu
config = K.tf.ConfigProto()
config.gpu_options.allow_growth = True
if serial:
config.gpu_options.visible_device_list = str(serial-1)
K.set_session(K.tf.Session(config=config))
# Build the model
model = get_model(train_x.shape[1], hidden_layers=hidden_layers[i])
# Save the model
with open(output_dir + model_name + ".json", "w") as json_file:
json_file.write(model.to_json())
if not serial:
# If not running with other instances then use 4 GPUs
model = multi_gpu_model(model, gpus=4)
model.compile(
loss='mean_squared_error',
optimizer=keras.optimizers.Adam(lr=0.001),
metrics=[metrics.mse])
#Save the initial weights
ini_weights = model.get_weights()
# 10 fold cross validation
kf = KFold(n_splits=10)
val_fold_score = 0.0
train_fold_score = 0.0
for _i, (train_index, valid_index) in enumerate(kf.split(train_x, train_y)):
# Reset the weights
model.set_weights(ini_weights)
# Train the model
train_info = model.fit(train_x[train_index], train_y[train_index],
batch_size=batch_size,
epochs=epochs,
shuffle=True,
verbose=0,
validation_split=0.1,
#validation_data=(train_x[valid_index], train_y[valid_index]),
callbacks=keras_callbacks)
current_val_predict = sensoring(train_y[valid_index], model.predict(train_x[valid_index])).flatten()
current_val_r2 = pearsonr(current_val_predict, train_y[valid_index])[0]
# If the current validation score is better then save it
if current_val_r2 > val_fold_score:
val_fold_score = current_val_r2
# Save the predicted values for both the training set
train_predict = sensoring(train_y[train_index], model.predict(train_x[train_index])).flatten()
train_fold_score = pearsonr(train_predict, train_y[train_index])[0]
# Save the training history
with open(output_dir + 'history_' + model_name + '_' + str(_i) + '.pickle', 'wb') as f:
pickle.dump(train_info.history, f)
# Save the results
dict_r = {}
dict_r['hidden_layers'] = hidden_layers[i]
dict_r['pearsonr_train'] = train_fold_score
dict_r['pearsonr_valid'] = val_fold_score
pred = sensoring(test_y, model.predict(test_x)).flatten()
dict_r['pearsonr_test'] = pearsonr(pred, test_y)[0]
#pred = sensoring(test_x, test_y, model.predict(test_x)).flatten()
# Write the result in a file
with open(output_dir + 'result_' + model_name + '.pickle', 'wb') as f:
pickle.dump(dict_r, f)
# Save the model weights
model.save_weights(output_dir + "weights_" + model_name + ".h5")
# Clear the session and the model from the memory
del model
K.clear_session()
pbar.update()
output_dir = 'dl_networks_04/'
jobs = [mp.Process(target=run, args=(output_dir, i)) for i in range(1, 9, 1)]
for j in jobs:
j.start()
###Output
Running in parallel
Running in parallel
Running in parallel
Running in parallel
Running in parallel
Running in parallel
Running in parallel
Running in parallel
###Markdown
Result Analysis
###Code
# Get the network number and pearson coffs. of train, test and validation set in a list (in order)
model_files = sorted(glob.glob(output_dir + 'model_*'))
weight_files = sorted(glob.glob(output_dir + 'weights_*'))
result_files = sorted(glob.glob(output_dir + 'result_*'))
models = []
r2 = []
hidden_layers = []
weights = []
# net_layers = []
for mod, res, w in zip(model_files, result_files, weight_files):
models.append(mod)
weights.append(w)
with open(res, 'rb') as f:
r = pickle.load(f)
coeff = [r['pearsonr_train'], r['pearsonr_test'], r['pearsonr_valid']]
r2.append(coeff)
hidden_layers.append(r['hidden_layers'])
###Output
_____no_output_____
###Markdown
Sort the indices according to the validation result
###Code
r2_ar = np.array(r2)
sorted_indices = list((-r2_ar)[:, 2].argsort())
sorted_r2 = [r2[i] for i in sorted_indices]
sorted_r2[:5]
sorted_models = [models[i] for i in sorted_indices]
sorted_models[:5]
sorted_weights = [weights[i] for i in sorted_indices]
sorted_weights[:5]
###Output
_____no_output_____
###Markdown
Save the lists in the disk
###Code
with open(output_dir + 'sorted_models.pickle', 'wb') as f:
pickle.dump(sorted_models, f)
with open(output_dir + 'sorted_r2.pickle', 'wb') as f:
pickle.dump(sorted_r2, f)
with open(output_dir + 'sorted_weights.pickle', 'wb') as f:
pickle.dump(sorted_weights, f)
###Output
_____no_output_____ |
examples/training/decision_tree/example_allstate.ipynb | ###Markdown
```Copyright 2021 IBM CorporationLicensed under the Apache License, Version 2.0 (the "License");you may not use this file except in compliance with the License.You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0Unless required by applicable law or agreed to in writing, softwaredistributed under the License is distributed on an "AS IS" BASIS,WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.See the License for the specific language governing permissions andlimitations under the License.``` Decision Tree on Allstate Dataset Background The goal of this competition is to predict Bodily Injury Liability Insurance claim payments based on the characteristics of the insured’s vehicle. SourceThe raw dataset can be obtained directly from the [Allstate Claim Prediction Challenge](https://www.kaggle.com/c/ClaimPredictionChallenge).In this example, we download the dataset directly from Kaggle using their API. In order for to work work, you must:1. Login into Kaggle and accept the [competition rules](https://www.kaggle.com/c/ClaimPredictionChallenge/rules).2. Folow [these instructions](https://www.kaggle.com/docs/api) to install your API token on your machine. GoalThe goal of this notebook is to illustrate how Snap ML can accelerate training of a decision tree model on this dataset. Code
###Code
cd ../../
CACHE_DIR='cache-dir'
import numpy as np
import time
from datasets import Allstate
from sklearn.tree import DecisionTreeClassifier
from snapml import DecisionTreeClassifier as SnapDecisionTreeClassifier
from sklearn.metrics import roc_auc_score as score
dataset = Allstate(cache_dir=CACHE_DIR)
X_train, X_test, y_train, y_test = dataset.get_train_test_split()
print("Number of examples: %d" % (X_train.shape[0]))
print("Number of features: %d" % (X_train.shape[1]))
print("Number of classes: %d" % (len(np.unique(y_train))))
model = DecisionTreeClassifier(max_depth=8)
t0 = time.time()
model.fit(X_train, y_train)
t_fit_sklearn = time.time()-t0
score_sklearn = score(y_test, model.predict_proba(X_test)[:,1])
print("Training time (sklearn): %6.2f seconds" % (t_fit_sklearn))
print("ROC AUC score (sklearn): %.4f" % (score_sklearn))
model = SnapDecisionTreeClassifier(max_depth=8, n_jobs=4)
t0 = time.time()
model.fit(X_train, y_train)
t_fit_snapml = time.time()-t0
score_snapml = score(y_test, model.predict_proba(X_test)[:,1])
print("Training time (snapml): %6.2f seconds" % (t_fit_snapml))
print("ROC AUC score (snapml): %.4f" % (score_snapml))
speed_up = t_fit_sklearn/t_fit_snapml
score_diff = (score_snapml-score_sklearn)/score_sklearn
print("Speed-up: %.1f x" % (speed_up))
print("Relative diff. in score: %.4f" % (score_diff))
###Output
Speed-up: 22.1 x
Relative diff. in score: 0.0050
###Markdown
DisclaimerPerformance results always depend on the hardware and software environment. Information regarding the environment that was used to run this notebook are provided below:
###Code
import utils
environment = utils.get_environment()
for k,v in environment.items():
print("%15s: %s" % (k, v))
###Output
platform: macOS-10.16-x86_64-i386-64bit
cpu_count: 8
cpu_freq_min: 2300
cpu_freq_max: 2300
total_memory: 32.0
snapml_version: 1.7.0
sklearn_version: 0.23.2
###Markdown
Record StatisticsFinally, we record the enviroment and performance statistics for analysis outside of this standalone notebook.
###Code
import scrapbook as sb
sb.glue("result", {
'dataset': dataset.name,
'n_examples_train': X_train.shape[0],
'n_examples_test': X_test.shape[0],
'n_features': X_train.shape[1],
'n_classes': len(np.unique(y_train)),
'model': type(model).__name__,
'score': score.__name__,
't_fit_sklearn': t_fit_sklearn,
'score_sklearn': score_sklearn,
't_fit_snapml': t_fit_snapml,
'score_snapml': score_snapml,
'score_diff': score_diff,
'speed_up': speed_up,
**environment,
})
###Output
_____no_output_____
###Markdown
```Copyright 2021 IBM CorporationLicensed under the Apache License, Version 2.0 (the "License");you may not use this file except in compliance with the License.You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0Unless required by applicable law or agreed to in writing, softwaredistributed under the License is distributed on an "AS IS" BASIS,WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.See the License for the specific language governing permissions andlimitations under the License.``` Decision Tree on Allstate Dataset Background The goal of this competition is to predict Bodily Injury Liability Insurance claim payments based on the characteristics of the insured’s vehicle. SourceThe raw dataset can be obtained directly from the [Allstate Claim Prediction Challenge](https://www.kaggle.com/c/ClaimPredictionChallenge).In this example, we download the dataset directly from Kaggle using their API. In order for to work work, you must:1. Login into Kaggle and accept the [competition rules](https://www.kaggle.com/c/ClaimPredictionChallenge/rules).2. Folow [these instructions](https://www.kaggle.com/docs/api) to install your API token on your machine. GoalThe goal of this notebook is to illustrate how Snap ML can accelerate training of a decision tree model on this dataset. Code
###Code
cd ../../
CACHE_DIR='cache-dir'
import numpy as np
import time
from datasets import Allstate
from sklearn.tree import DecisionTreeClassifier
from snapml import DecisionTreeClassifier as SnapDecisionTreeClassifier
from sklearn.metrics import roc_auc_score as score
dataset = Allstate(cache_dir=CACHE_DIR)
X_train, X_test, y_train, y_test = dataset.get_train_test_split()
print("Number of examples: %d" % (X_train.shape[0]))
print("Number of features: %d" % (X_train.shape[1]))
print("Number of classes: %d" % (len(np.unique(y_train))))
model = DecisionTreeClassifier(max_depth=8)
t0 = time.time()
model.fit(X_train, y_train)
t_fit_sklearn = time.time()-t0
score_sklearn = score(y_test, model.predict_proba(X_test)[:,1])
print("Training time (sklearn): %6.2f seconds" % (t_fit_sklearn))
print("ROC AUC score (sklearn): %.4f" % (score_sklearn))
model = SnapDecisionTreeClassifier(max_depth=8, n_jobs=4)
t0 = time.time()
model.fit(X_train, y_train)
t_fit_snapml = time.time()-t0
score_snapml = score(y_test, model.predict_proba(X_test)[:,1])
print("Training time (snapml): %6.2f seconds" % (t_fit_snapml))
print("ROC AUC score (snapml): %.4f" % (score_snapml))
speed_up = t_fit_sklearn/t_fit_snapml
score_diff = (score_snapml-score_sklearn)/score_sklearn
print("Speed-up: %.1f x" % (speed_up))
print("Relative diff. in score: %.4f" % (score_diff))
###Output
Speed-up: 12.4 x
Relative diff. in score: 0.0050
###Markdown
DisclaimerPerformance results always depend on the hardware and software environment. Information regarding the environment that was used to run this notebook are provided below:
###Code
import utils
environment = utils.get_environment()
for k,v in environment.items():
print("%15s: %s" % (k, v))
###Output
platform: Linux-4.15.0-151-generic-x86_64-with-glibc2.10
cpu_count: 40
cpu_freq_min: 800.0
cpu_freq_max: 2101.0
total_memory: 250.58893203735352
snapml_version: 1.8.1
sklearn_version: 1.0.1
xgboost_version: 1.3.3
lightgbm_version: 3.1.1
###Markdown
Record StatisticsFinally, we record the enviroment and performance statistics for analysis outside of this standalone notebook.
###Code
import scrapbook as sb
sb.glue("result", {
'dataset': dataset.name,
'n_examples_train': X_train.shape[0],
'n_examples_test': X_test.shape[0],
'n_features': X_train.shape[1],
'n_classes': len(np.unique(y_train)),
'model': type(model).__name__,
'score': score.__name__,
't_fit_sklearn': t_fit_sklearn,
'score_sklearn': score_sklearn,
't_fit_snapml': t_fit_snapml,
'score_snapml': score_snapml,
'score_diff': score_diff,
'speed_up': speed_up,
**environment,
})
###Output
/localhome/tpa/anaconda3/envs/snapenv/lib/python3.8/site-packages/papermill/iorw.py:50: FutureWarning: pyarrow.HadoopFileSystem is deprecated as of 2.0.0, please use pyarrow.fs.HadoopFileSystem instead.
from pyarrow import HadoopFileSystem
|
110_0_jupyter_tutorial_pure.ipynb | ###Markdown
Working With Jupyter Notebooks Jupyter notebooks are interactive online notebooks that run directly in your web browser and can both display text and run code (such as Python).In our course we will use Jupyter to provide some interactive content where you can learn new material and practice it at the same time, with instant feedback. The cell structure Every notebook is made up of **cells**. You can navigate between cells using the arrow keys or by clicking on them with the mouse pointer. Cells can either be **code cells** or **text cells** (using a formatting language called *Markdown*). All cells you have seen in this notebook so far have been text cells. They can display text with various formatting, such as *italics* or **bold**, as well as math formulas, for example $\dfrac{x^2-9}{x+3}$. If you double-click a text cell, or select it with the keyboard or your mouse and press `ENTER`, you will see the raw format (markdown) version of the text. If you press > `Shift + Enter`,the text cell will be formatted and display nicely. ---Cells can also be code cells and contain lines of code in a *programming language*, typically *Python*:
###Code
for i in range(10):
print("I love math!")
###Output
_____no_output_____
###Markdown
If you select a code cell and press > `Shift + Enter`,Jupyter will run the code in that cell. Any output that the computation produces will be shown **right beneath the code cell**. Try that with the code cell above. Select it and press `Shift + Enter`. We will use code cells mainly for *two purposes*:1. **to perform mathematical computations**2. **to enter and evaluate answers to problems** Initializing the notebookThe following code cell initializes the next part of the notebook. In particular, it loads the problems that you will be asked to solve. So please go ahead and run this cell using `Shift + Enter`.
###Code
"""
Notebook Initialization
"""
from cyllene import *
%initialize
###Output
_____no_output_____
###Markdown
---<!-- Go ahead and press the 'eye' button to make all code cells invisible.(If you know some Python and you would like to experiment, you can of course make all code sections visible and play around with the code. You can't "break" anything. For example, in the code cell above, change the 10 to a 4 and hit `SHIFT+ENTER`.) --> Computing with Jupyter notebooksYou can use code cells like a powerful calculator. Give it a try.Go ahead and execute (i.e `Shift+Enter`) the cell below.
###Code
2021*(17-6)
###Output
_____no_output_____
###Markdown
As you see, you enter calculations into code cells pretty much like you would enter it into a calculator, using `*` for multiplication.We use `/` for divison and `^` for exponentiation.
###Code
2^7/64
###Output
_____no_output_____
###Markdown
You can also use `**` for exponentiation. (This is actually the standard notation in Python.)
###Code
2**8
###Output
_____no_output_____
###Markdown
As usual, exponentiation binds stronger than the other operations. If you want to change that, you have to use parentheses `(,)`.
###Code
2^(7/64)
###Output
_____no_output_____
###Markdown
Answering problems We also use cells to enter answers to problems. _Answer cells start with the line `%%answer`, followed by the problem name. You input your answer in the line below that and, as usual, press `Shift+Enter`._ Problem 1What is the smallest [*sphenic*](https://en.wikipedia.org/wiki/Sphenic_number) number?
###Code
%%answer Problem 1
30
###Output
_____no_output_____
###Markdown
Try solving the next problem yourself. Problem 2$ \dfrac{2}{3} - \dfrac{1}{2} = $ ?(_Fractions are entered as `a/b`._)
###Code
%%answer Problem 2
###Output
_____no_output_____
###Markdown
Problem 3$ 1.01+0.22 = $ ? (*Decimal fractions are simply entered in the form `X.XXX`.*)
###Code
%%answer Problem 3
###Output
_____no_output_____
###Markdown
--- Entering algebraic expressionsWe will also need to enter algebraic expressions containing variables, such as $x$, along with operations like multiplication and exponentiation. The basic format remains the pretty much the same. When entering answers, as usual in mathematical notation, you may omit the multiplication symbol `*`. Problem 4Simplify: $\qquad 5x - 3x$
###Code
%%answer Problem 4
###Output
_____no_output_____
###Markdown
---The general template to input fractional expressions like $\dfrac{x^2-9}{x+3}$ is `(...)/(...)`. Problem 5Enter the fraction $\qquad \dfrac{x+3}{x^2-1}$.
###Code
%%answer Problem 5
###Output
_____no_output_____
###Markdown
--- Generating new problems with `Shift+Enter`If you come across a problem heading starting with &128260; symbol, this means you can generate new versions of this problem as often as you like. Simply select the problem cell and hit `Shift + Enter`. Give it a try below. &128260; More Practice
###Code
"""
Run this cell to generate a new problem
"""
generate_problem('6')
%%answer Problem 6
###Output
_____no_output_____ |
04_numpy/calculo_vectorial.ipynb | ###Markdown
Lección 5: Cálculo Vectorial Gráficas 3D e Implementación de Cuadratura Gaussiana Objetivos:1. Conocer los elementos vectoriales básicos con los que se puede trabajar en Python.2. Implementar la Cuadratura Gaussiana haciendo uso de los anteriores. 1. Algunas gráficas clásicas del Cálculo Multivariado. 1.1. La Hélice.Podemos graficar una de las curvas tridimensionales más conocidas de forma muy sencilla haciendo uso de las poderosas librerías disponibles en Python. Estudiemos el problema de graficar la curva paramétrica:$$x(t)=cos(t) \\y(t)=sin(t) \\z(t)=t$$comúnmente conocida como la hélice, la cual es el resultado de hacer al círculo 2-dimensional una función de una componente de altura.
###Code
%pylab inline
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = plt.axes(projection='3d')
z = np.linspace(0, 1, 100)
x = z * np.sin(20 * z)
y = z * np.cos(20 * z)
c = z
ax.scatter(x, y, z, c=c)
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.plot(x, y, z, '-b')
###Output
Populating the interactive namespace from numpy and matplotlib
###Markdown
1.2. El Cilindro.El cilindro corresponde a barrer el eje z con un círculo de radio fijo. El lugar geométrico en coordenadas cartesianas por:$$x^2+y^2 = r , r>0 $$
###Code
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x=np.linspace(-1, 1, 100)
z=np.linspace(-2, 2, 100)
Xc, Zc=np.meshgrid(x, z)
Yc = np.sqrt(1-Xc**2)
rstride = 20
cstride = 10
ax.plot_surface(Xc, Yc, Zc, alpha=0.2, rstride=rstride, cstride=cstride)
ax.plot_surface(Xc, -Yc, Zc, alpha=0.2, rstride=rstride, cstride=cstride)
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
plt.show()
###Output
_____no_output_____
###Markdown
1.3. El Atractor de LorenzEl atractor de Lorenz es un sistema dinámico tridimensional descubierto por Edward Lorenz mientras estudiaba la convección en la atmósfera terrestre. El sistema dinámico que lo describe es el siguiente:$$ \frac{dx}{dt} = a ( y - x ) \\\frac{dy}{dt} = x ( b - z ) - y \\\frac{dz}{dt} = xy - cz$$A continuación se presenta su gráfica:
###Code
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
a, b, c = 10., 28., 8. / 3.
def lorenz_map(X, dt = 1e-2):
X_dt = np.array([a * (X[1] - X[0]),
X[0] * (b - X[2]) - X[1],
X[0] * X[1] - c * X[2]])
return X + dt * X_dt
points = np.zeros((10000, 3))
X = np.array([.1, .0, .0])
for i in range(points.shape[0]):
points[i], X = X, lorenz_map(X)
fig = plt.figure()
ax = fig.gca(projection = '3d')
ax.plot(points[:, 0], points[:, 1], points[:, 2], c = 'k')
plt.show()
###Output
_____no_output_____
###Markdown
1.4. Campo EscalarPodemos graficar un campo escalar de forma bastante sencilla ocupando las librerías matplotlib y mpl_toolkits. A continuación se presenta una superficie 3 dimensional de un círculo.
###Code
import numpy as np
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
x = np.linspace(-3, 3, 256)
y = np.linspace(-3, 3, 256)
X, Y = np.meshgrid(x, y)
Z = np.sinc(np.sqrt(X ** 2 + Y ** 2))
fig = plt.figure()
ax = fig.gca(projection = '3d')
ax.plot_surface(X, Y, Z, color='w')
plt.show()
###Output
_____no_output_____
###Markdown
1.5. El Toro.El toro, o la dona, es una de las figuras paramétricas más famosas. La superficie está descrita por la ecuación:$$x = (R + r cos\alpha )cos \beta \\y = (R + r cos\alpha ) sin \beta \\z = rsin \alpha$$donde R representa el radio exterior, r el radio interior, $ \alpha $ la latitud respecto del eje xz y $ \beta $ el ángulo de rotación alrededor del eje z.
###Code
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
# Generate torus mesh
angle = np.linspace(0, 2 * np.pi, 32)
theta, phi = np.meshgrid(angle, angle)
r, R = .25, 1.
X = (R + r * np.cos(phi)) * np.cos(theta)
Y = (R + r * np.cos(phi)) * np.sin(theta)
Z = r * np.sin(phi)
# Display the mesh
fig = plt.figure()
ax = fig.gca(projection = '3d')
ax.set_xlim3d(-1, 1)
ax.set_ylim3d(-1, 1)
ax.set_zlim3d(-1, 1)
ax.plot_surface(X, Y, Z, color = 'w', rstride = 1, cstride = 1)
plt.show()
###Output
_____no_output_____
###Markdown
2. Elementos de Cálculo Vectorial 2.1. Cálculo de la longitud de Arco.Uno de los elementos esenciales del cálculo vectorial corresponde al cálculo de la longitud de arco de una curva en el espacio. Para ello, podemos emplear las poderosas librerías de Python y hacer los cálculos de forma bastante sencilla.Sea $\vec(r): [a,b] \Rightarrow \mathbb(R)^3$ una $\mathcal(C)^1$-parametrización de una curva seccionalmente regular $\gamma$. Denotemos por $\mathcal{P}$ una partición de $[a , b]$ dada por:$$ \mathcal{P}={a=t_0, t_1, t_2, ..., t_n = b} $$\Denotemos además $\Delta t_i = t_i - t_{i-1}$ para $i = 1, 2, ..., n$ y $\delta = \delta (\mathcal{P})=max_{i}(t_i-t_i-1)$. Si $\mathcal{P}_n(\delta)$ es la poligonal que se obtiene al unir los puntos $\vec{r}(t_i)$ con $\vec{t_{i+1}}, i=0, 1, ...,n-1$, entonces mientras más pequeño sea $\delta$, mejor será la aproximación de $\gamma$ mediante la longitud de la poligonal $\mathcal{P}_n(\delta)$:$$ l(\mathcal{P}_n(\delta)) = \sum_{i=1}^n\Vert \vec{r}(t_i)-\vec{r}(t_{i-1})\Vert = \sum_{i=1}^n \Vert \frac{\vec{r}(t_i)-\vec{r}(t_i-1}{t_i-t_{i-1}}\Vert \Delta t_i $$Luego, tomando $\delta \Rightarrow 0$ se obtiene la longitud de la curva $\gamma$:$$ l ( \mathcal{P}_n (\delta) ) \Rightarrow l( \gamma ) = \int_{a}^b \Vert \frac{d\vec{r}}{dt}(t)\Vert dt$$Otra forma de expresar lo anterior es dada una función $f(x)$ continua en $[a, b]$. En longitud de curva desde a hasta b es:$$ L = \int_a^b \sqrt{1+(f'(x))^2}dx $$Si la curva viene dada de forma paramétrica por $x = x(t), y = y(t)$ y $a = x(\alpha), b = x ( \beta ), t \in [\alpha , \beta]$, entonces podemos hacer el siguiente cambio de variable:$$ x = x(t) , dx = x'(t)dt $$ $$ f'(x) = \frac{dy}{dx} = \frac{dy}{dt} \frac{dt}{dx} = \frac{dy / dt}{dx / dt} = \frac{y'(t)}{x'(t)} $$$$ \Rightarrow L = \int_{a}^b \sqrt{1 + (f'(x))^2}dx = \int_{\alpha}^{\beta} \sqrt{1 + ( \frac{y'(t)}{x'(t)} )^2}x'(t)dt = \int_{\alpha}^{\beta} \sqrt{(x'(t))^2+(y'(t))^2}dt $$ Ejemplo:Desarrolle un programa para calcular la longitud de arco de la cicloide:$$ x = k(t-sen(t)), y = k(1- cos(t)), t \in [0, 2\pi] $$ con k constante real positiva.
###Code
from scipy import integrate
k = 1.2
t0 = 0
tn = 2 * math.pi
x2 = lambda z: k ** 2 * (1 - cos(z))**2 + (sin (z)) ** 2
integrate.quad(x2, t0, tn)
###Output
_____no_output_____
###Markdown
Para el cálculo de la integral anterior se utilizó la poderosa librería integrate de scipy la cual permite a través del método de cuadratura gaussiana el cálculo numérico de una integral dada. Desafío: Extienda el código anterior para crear una función para el cálculo del área para valores arbitrarios de inicio y fin tanto como de forma k. 2.2. Integración simbólica de Integrales de linea.¿Qué es posible hacer en el caso de que se quisiese utilizar Python para comprobar un resultado teórico y no numérico de una Integral de linea?. La respuesta se encuentra en la librería SimPy, la cual permite realizar cálculos simbólicos en el lenguaje. En particular en el caso de la integración el módulo integrate nos permitirá calcular tanto integrales definidas como indefinidas. A continuación se presenta un breve ejemplo de como utilizar la librería anterior. Ejemplo: Calcule la integral de linea de la función $f( x, y ) = x^2y^2$ sobre una circunferencia de radio unitario.
###Code
from simpy import *
t, x, y = symbols("t, x, y")
C = Curve([cos(t), sin(t)], (t, 0, 2 * pi))
line_integrate(x**2 * y**2, C, [x, y])
###Output
_____no_output_____
###Markdown
2.3. Integración MúltiplePara evaluar integrales en dimensiones Python dispone de las poderosas extensiones de la cuadratura. La sintaxis para llamar a cada librería corresponde a a dblquad y tplquad para el caso dos dimensional y tres dimensional respectivamente contenidos en SciPy. En el caso de una integral n-dimensional es posible extender el método a través del módulo nquad para integrales de la forma $\int...\int_D f(\vec{x})d\vec{x}$ sobre un dominio $D$ apropiado. En el caso de la integración doble, dblquad puede ser utilizada para cálculo de integrales de la forma $\int_a^b\int_{g(x)}^{h(x)}f(x,y)dxdy$ mediante la sintaxis $dblquad(f,a,b,g,h)$ donde tanto $f, g, h$ son funcions y $a, b$ constantes. Ejemplo:Calcule la integral de la función $\exp^{-(x^2+y^2)}$ sobre un cuadrado unitario centrado en el origen.
###Code
import matplotlib as plt
import numpy as np
from scipy import integrate
def f(x, y):
return np.exp(-x**2-y**2)
fig, ax = plt.subplots(figsize=(6,5))
x = y = np.linspace(-1.25, 1.25, 75)
X, Y = np-meshgrid(x, y)
c = ax.contour(X, Y, f(X,Y), 15, cmap = mpl.cm.RdBu, vmin = -1, vmax = 1)
bound_rect = plt.Rectangle((0, 0), 1, 1, facecolor = "grey")
ax.add_patch(bound_rect)
ax.axis('tight')
ax.set_xlabel('$x$', fontsize = 18)
ax.set_ylabel('$y$', fontsize = 18)
###Output
_____no_output_____
###Markdown
En la ventana anterior se muestra la región de integración en la que trabajaremos. A continuación se presenta el código para la integración.
###Code
import matplotlib as plt
import numpy as np
from scipy.integrate import dbquad
def f(x, y):
return np.exp(-x**2-y**2)
a, b = 0, 1
g = lambda x : 0
h = lambda x : 1
dbquad(f, a, b, g, h)
###Output
_____no_output_____ |
Ejercicios_Python_II.ipynb | ###Markdown
*elaborado por Ferran Carrascosa Mallafrè.* __[Abre en Colab](https://colab.research.google.com/github/griu/init_python_b1/blob/master/Ejercicios_Python_II.ipynb)__ Preparación del entornoPadawan! Cuando inicies sesión en Colab, prepara el entorno ejecutando el siguiente código.
###Code
if 'google.colab' in str(get_ipython()):
!git clone https://github.com/griu/init_python_b1.git /content/init_python_b1
!git -C /content/init_python_b1 pull
%cd /content/init_python_b1
###Output
_____no_output_____
###Markdown
Ejercicio 2Para el ejercicio 2, añadimos los datos del ejercicio 1 los datos de planetas.
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns; sns.set() # para el estilo de graficos
entidades = ['planets','starships','vehicles','people','species']
entidades_df = {x: pd.read_pickle('www/' + x + '_df.pkl') for x in entidades}
# Datos people
people_df = entidades_df['people'][["height","mass","birth_year","gender","homeworld"]].dropna()
# planetas
planets_df = entidades_df['planets'][["orbital_period","url"]].dropna()
planets_df.head()
###Output
_____no_output_____
###Markdown
Ejercicio 2.1.Construye una función que diga "buenos días", "buenas tardes" o "buenas noches" en función de la hora del día.> Truco 1: Para testear la función haz que tenga un parametro de entrada que tenga como valor por defecto: `datetime.now()` (primero carga `from datetime import datetime`).> Truco 2: Puedes extraer la hora de un datetime con `.hour`.
###Code
# Solución:
###Output
_____no_output_____
###Markdown
Ejercicio 2.2.En el data frame personajes_df, calcula de nuevo el IMC y crea una nueva variable con el trameado de la variable IMC definido en la siguiente tabla: | Categoría de nivel de peso | Intervalo del percentil | | -------------------------- | ----------------------- | | Bajo peso | < 18.5 | | Normal | >= 18.5 y < 25 | | Sobrepeso | >= 25 y <30 | | Obeso | >= 30 |> Truco: utiliza `pd.cut(..., right=False)` y modifica las etiquetas con `.cat.categories`.
###Code
# Solución:
###Output
_____no_output_____
###Markdown
Ejercicio 2.3.Muestra las frecuencias de la nueva variable definida en 2.2.
###Code
# Solución:
###Output
_____no_output_____
###Markdown
Ejercicio 2.4.Calcula ahora un trameado de la edad en 5 grupos equiprobables.Muestra los recuentos (frecuencias) de la nueva obtenida por pantalla.> Truco: Busca ayuda de la función [pd.qcut()](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.qcut.html)
###Code
# Solución:
###Output
_____no_output_____
###Markdown
Ejercicio 2.5.Presenta la tabla cruzada de tramos de edad (ej. 2.4) por tramos de IMC (ej. 2.2). ¿Qué tramo de edad tiene un mayor número de personajes con Bajo peso?
###Code
# Solución:
###Output
_____no_output_____
###Markdown
Ejercicio 2.6.Calcula una tabla resumen (data frame) donde se presente la media del IMC en cada tramo de edad calculado en ejercicio 2.4.Presenta por pantalla la nueva tabla resumen.
###Code
# Solución:
###Output
_____no_output_____
###Markdown
Ejercicio 2.7.Presenta los datos del ej. 2.6. como un gráfico de líneas donde el eje x sea la edad y el eje y el IMC medio.> Truco: Como eje x del gráfico de líneas, puedes calcular en 2.6., en el mismo cálculo del IMC medio, la mediana de edad de cada tramo de edad.
###Code
# Solución:
###Output
_____no_output_____
###Markdown
Ejercicio 2.8.Calcula el ratio del IMC sobre la mediana del IMC de su tramo de edad (definidos en el ejercicio 2.2.) mediante la función groupby(...).apply(...).> truco: primero crea una función que devuelva: `x / np.nanmedian(x)`.Presenta los datos mediante un boxplot de la nueva variable: [pd.boxplot()](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.boxplot.html).
###Code
# Solución:
###Output
_____no_output_____
###Markdown
Ejercicio 2.9.¿Cual es planeta con un menor índice IMC medio de sus personajes?¿Que personaje/s son de ese planeta?
###Code
# Solución:
###Output
_____no_output_____
###Markdown
Ejercicio 2.10.Convierte a datetime los siguientes strings con la función [datetime.strptime()](https://docs.python.org/3/library/datetime.htmlstrftime-strptime-behavior) (consulta la ayuda si es necesario) de la libreria datetime:- "1 january, 2020"- "15-feb.-2017"- "20190701 22:30" 1 de julio de 2019
###Code
# Solución:
###Output
_____no_output_____ |
Lab9/Exercise_1_VAE.ipynb | ###Markdown
Make sure your GPU is connceted to RunTime.
###Code
import keras
from keras import layers
# from keras import backend as K
from tensorflow.keras.models import Model
import numpy as np
import tensorflow.compat.v1.keras.backend as K
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
# Build the encoder
img_shape = (28, 28, 1)
batch_size = 16
latent_dim = 2 # Dimensionality of the latent space: a 2D plane
input_img = keras.Input(shape=img_shape)
x = layers.Conv2D(32, 3, padding='same', activation='relu')(input_img)
x = layers.Conv2D(64, 3, padding='same', activation='relu', strides=(2, 2))(x)
x = layers.Conv2D(64, 3, padding='same', activation='relu')(x)
x = layers.Conv2D(64, 3, padding='same', activation='relu')(x)
shape_before_flattening = K.int_shape(x)
x = layers.Flatten()(x)
x = layers.Dense(32, activation='relu')(x)
# The input image ends up being encoded into these two parameters
z_mean = layers.Dense(latent_dim)(x)
z_log_var = layers.Dense(latent_dim)(x)
print(z_mean)
###Output
Tensor("dense_1/BiasAdd:0", shape=(None, 2), dtype=float32)
###Markdown
Next is the code for using `z_mean` and `z_log_var`, the parameters of the statistical distribution assumed to have produced `input_img`, to generate a latent space point `z`. Here, you wrap some arbitrary code (built on top of Keras backend primitives) into a `Lambda` layer. In Keras, everything needs to be a layer, so code that isn’t part of a builtin layer should be wrapped in a `Lambda` (or in a custom layer).
###Code
# Latent-space-sampling function
def sampling(args):
z_mean, z_log_var = args
epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim), mean=0., stddev=1.)
return z_mean + K.exp(z_log_var) * epsilon
z = layers.Lambda(sampling)([z_mean, z_log_var])
###Output
_____no_output_____
###Markdown
VAE decoder network, mapping latent space points to imagesThe following listing shows the decoder implementation. You reshape the vector `z `to the dimensions of an image and then use a few convolution layers to obtain a final image output that has the same dimensions as the original `input_img`.
###Code
decoder_input = layers.Input(K.int_shape(z)[1:]) # Input where you’ll feed z
x = layers.Dense(np.prod(shape_before_flattening[1:]),
activation='relu')(decoder_input)
x = layers.Reshape(shape_before_flattening[1:])(x)
x = layers.Conv2DTranspose(32, 3,padding='same',
activation='relu',strides=(2, 2))(x)
x = layers.Conv2D(1, 3,padding='same',activation='sigmoid')(x)
decoder = Model(decoder_input, x)
z_decoded = decoder(z)
###Output
_____no_output_____
###Markdown
Custom layer used to compute the VAE loss
###Code
class CustomVariationalLayer(keras.layers.Layer):
def vae_loss(self, x, z_decoded):
x = K.flatten(x)
z_decoded = K.flatten(z_decoded)
xent_loss = keras.metrics.binary_crossentropy(x, z_decoded)
kl_loss = -5e-4 * K.mean(
1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
return K.mean(xent_loss + kl_loss)
#You implement custom layers You don't use by writing a call method.
def call(self, inputs):
x = inputs[0]
z_decoded = inputs[1]
loss = self.vae_loss(x, z_decoded)
self.add_loss(loss, inputs=inputs)
return x # You don't use this output, but the layer must return something.
y = CustomVariationalLayer()([input_img, z_decoded])
###Output
_____no_output_____
###Markdown
Training the VAE
###Code
from keras.datasets import mnist
# import tensorflow.compat.v1.keras.backend as K
# import tensorflow as tf
# tf.compat.v1.disable_eager_execution()
vae = Model(input_img, y)
vae.compile(optimizer='rmsprop', loss=None)
vae.summary()
(x_train, _), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_train = x_train.reshape(x_train.shape + (1,))
x_test = x_test.astype('float32') / 255.
x_test = x_test.reshape(x_test.shape + (1,))
vae.fit(x=x_train, y=None, shuffle=True, epochs=10, batch_size=batch_size, validation_data=(x_test, None))
import matplotlib.pyplot as plt
from scipy.stats import norm
n = 15
digit_size = 28
figure = np.zeros((digit_size * n, digit_size * n))
grid_x = norm.ppf(np.linspace(0.05, 0.95, n))
grid_y = norm.ppf(np.linspace(0.05, 0.95, n))
for i, yi in enumerate(grid_x):
for j, xi in enumerate(grid_y):
z_sample = np.array([[xi, yi]])
z_sample = np.tile(z_sample, batch_size).reshape(batch_size, 2)
x_decoded = decoder.predict(z_sample, batch_size=batch_size)
digit = x_decoded[0].reshape(digit_size, digit_size)
figure[i * digit_size: (i + 1) * digit_size,
j * digit_size: (j + 1) * digit_size] = digit
plt.figure(figsize=(10, 10))
plt.imshow(figure, cmap='Greys_r')
plt.show()
###Output
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/engine/training.py:2325: UserWarning: `Model.state_updates` will be removed in a future version. This property should not be used in TensorFlow 2.0, as `updates` are applied automatically.
warnings.warn('`Model.state_updates` will be removed in a future version. '
|
archived_lectures/Fall_2019/old_lectures/2018/Week_4/lecture10.ipynb | ###Markdown
Cross Validation and Bootsrapping
###Code
%matplotlib inline
import tellurium as te
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn.metrics import mean_squared_error, r2_score
###Output
_____no_output_____
###Markdown
Cross Validation Codes
###Code
def foldGenerator(num_points, num_folds):
"""
:param int num_points:
:param int num_folds:
:return array, array: training indices, test indices
"""
indices = range(num_points)
for remainder in range(num_folds):
test_indices = []
for idx in indices:
if idx % num_folds == remainder:
test_indices.append(idx)
train_indices = np.array(list(set(indices).difference(test_indices)))
test_indices = np.array(test_indices)
yield train_indices, test_indices
#
generator = foldGenerator(10, 5)
for g in generator:
print(g)
###Output
(array([1, 2, 3, 4, 6, 7, 8, 9]), array([0, 5]))
(array([0, 2, 3, 4, 5, 7, 8, 9]), array([1, 6]))
(array([0, 1, 3, 4, 5, 6, 8, 9]), array([2, 7]))
(array([0, 1, 2, 4, 5, 6, 7, 9]), array([3, 8]))
(array([0, 1, 2, 3, 5, 6, 7, 8]), array([4, 9]))
###Markdown
Set up data
###Code
# Detailed simulation model
COLUMN_NAMES = ["[%s]" % x for x in ['A', 'B', 'C']]
def getSimulationData():
te.setDefaultPlottingEngine('matplotlib')
model = """
model test
species A, B, C;
J0: -> A; v0
A -> B; ka*A;
B -> C; kb*B;
J1: C ->; C*kc
ka = 0.4;
v0 = 10
kb = 0.8*ka
kc = ka
end
"""
r = te.loada(model)
return r.simulate(0, 50, 100)
result = getSimulationData()
for col in COLUMN_NAMES:
plt.plot(result['time'], result[col])
plt.xlabel("Time")
plt.ylabel("Concentration")
plt.legend(COLUMN_NAMES)
# Set-up the data
if True:
STD = 5
result = getSimulationData()
length = len(result)
XV = result['time']
XV = XV.reshape(length, 1)
ERRORS = np.array(np.random.normal(0, STD, length))
YV_PURE = result['[B]']
YV = YV_PURE + ERRORS
YV_PURE = YV_PURE.reshape(length, 1)
YV = YV.reshape(length, 1)
###Output
_____no_output_____
###Markdown
Cross Validation
###Code
# Does a polynomial regression of the specified order
def buildMatrix(xv, order):
"""
:param array-of-float xv:
:return matrix:
"""
length = len(xv)
xv = xv.reshape(length)
constants = np.repeat(1, length)
constants = constants.reshape(length)
data = [constants]
for n in range(1, order+1):
data.append(xv*data[-1])
mat = np.matrix(data)
return mat.T
def regress(xv, yv, train, test, order=1):
"""
:param array-of-float xv: predictor values
:param array-of-float yv: response values
:param array-of-int train: indices of training data
:param array-of-int test: indices of test data
:param int order: Order of the polynomial regression
return float, array-float, array-float: R2, y_test, y_preds
"""
regr = linear_model.LinearRegression()
# Train the model using the training sets
mat_train = buildMatrix(xv[train], order)
regr.fit(mat_train, yv[train])
mat_test = buildMatrix(XV[test], order)
y_pred = regr.predict(mat_test)
rsq = r2_score(YV[test], y_pred)
return rsq, yv[test], y_pred, regr.coef_
generator = foldGenerator(100, 4)
for train, test in generator:
rsq, yv_test, yv_pred, coef_ = regress(XV, YV, train, test, order=3)
plt.figure()
plt.scatter(test, yv_pred, color = 'b')
plt.scatter(test, yv_test, color = 'r')
plt.title("RSQ: %2.4f" % rsq)
###Output
_____no_output_____
###Markdown
Bootstrapping
###Code
# Compute residuals
train = range(len(XV))
test = range(len(XV))
rsq, yv_test, yv_pred, _ = regress(XV, YV, train, test, order=3)
residuals = yv_test - yv_pred
plt.scatter(test, residuals)
_ = plt.title("%2.4f" % rsq)
# Generate synthetic data from residuals
def generateData(y_obs, y_fit):
"""
:param np.array y_obs
:param np.array y_fit
:return np.array: bootstrap data
"""
residuals = y_obs - y_fit
length = len(y_obs)
residuals = residuals.reshape(length)
samples = np.random.randint(0, length, length)
result = y_fit + residuals[samples]
result = result.reshape(length)
return result
y_obs = np.array([1, 2, 3])
y_fit = np.array([.9, 2.4, 3.2])
for _ in range(4):
print (generateData(y_obs, y_fit))
train = range(len(XV))
rsq, yv_test, yv_pred, _ = regress(XV, YV, train, train, order=3)
plt.scatter(YV, generateData(YV, yv_pred))
plt.title("Original")
for _ in range(4):
plt.figure()
plt.scatter(YV, generateData(YV, yv_pred))
# Estimate the parameters for each random data set
train = range(len(XV))
coefs = []
_, _, y_fit, _ = regress(XV, YV, train, train, order=3)
for _ in range(10):
yv = generateData(YV, y_fit)
_, _, _, coef_ = regress(XV, yv, train, train, order=3)
coefs.append(coef_)
coefs
###Output
_____no_output_____ |
Mobilenet-StarterKit.ipynb | ###Markdown
DL3Follow this notebook only if you're new to DeepLearing and Transfer learning. This is a extension of the Startet kit given [here](https://github.com/shubham3121/DL-3/blob/master/DL%233_EDA.ipynb). I'll try to keep it simple. Please ignore the typos :) Why to use Mobilenet architecture?You might have seen multiple tutorials on the VGG16 based transfer learning but here I'm going to use Mobilenet because of the following reasons No. of parameters to train in Mobilenet is quite less in compare to the VGG16 Having fewer parameters will make your training time less and you'll be able to do more experiment and your chances of wining becames higher. On top of above reasons Mobile net has similar performance on the ImageNet dataset as VGG16Having said that let move on to the imorting important libs
###Code
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import GlobalAveragePooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras.applications import MobileNet
from keras import optimizers
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.models import Model
from sklearn.model_selection import train_test_split
import pandas as pd
from tqdm import tqdm
import gc
import cv2 as cv
import numpy as np
import tensorflow as tf
import random as rn
# The below is necessary in Python 3.2.3 onwards to
# have reproducible behavior for certain hash-based operations.
# See these references for further details:
# https://docs.python.org/3.4/using/cmdline.html#envvar-PYTHONHASHSEED
# https://github.com/keras-team/keras/issues/2280#issuecomment-306959926
import os
os.environ['PYTHONHASHSEED'] = '0'
# The below is necessary for starting Numpy generated random numbers
# in a well-defined initial state.
np.random.seed(42)
# The below is necessary for starting core Python generated random numbers
# in a well-defined state.
rn.seed(12345)
# Force TensorFlow to use single thread.
# Multiple threads are a potential source of
# non-reproducible results.
# For further details, see: https://stackoverflow.com/questions/42022950/which-seeds-have-to-be-set-where-to-realize-100-reproducibility-of-training-res
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
from keras import backend as K
# The below tf.set_random_seed() will make random number generation
# in the TensorFlow backend have a well-defined initial state.
# For further details, see: https://www.tensorflow.org/api_docs/python/tf/set_random_seed
tf.set_random_seed(1234)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
###Output
_____no_output_____
###Markdown
I'm going to use 128x128 images. You can change that if you wish.My folder structure is as follow DL3 starter_kit this_notebook data train_img test_img
###Code
img_width, img_height = (128, 128)
train_data_dir = '../data/train_img/'
test_data_dir = '../data/test_img/'
epochs = 10
batch_size = 128
if K.image_data_format() == 'channels_first':
input_shape = (3, img_width, img_height)
else:
input_shape = (img_width, img_height, 3)
Mobile_model = MobileNet(include_top=False, input_shape=input_shape)
def get_model():
# add a global spatial average pooling layer
x = Mobile_model.output
x = GlobalAveragePooling2D()(x)
predictions = Dense(85, activation='sigmoid')(x)
model = Model(inputs=Mobile_model.input, outputs=predictions)
return model
model = get_model()
###Output
_____no_output_____
###Markdown
We'll start with training the head(last layer) only as that layer is initialized randomaly and we don't want to affect the other layers weights as while backpropogation.
###Code
#train only last layer
for layer in model.layers[:-1]:
layer.trainable = False
model.summary()
# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(width_shift_range=0.2, height_shift_range=0.2,
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True, rotation_range = 20)
val_datagen = ImageDataGenerator(rescale=1./255)
test_datagen = ImageDataGenerator(rescale=1./255)
train = pd.read_csv('../train.csv', index_col=0)
test = pd.read_csv('../test.csv')
attributes = pd.read_csv('../attributes.txt', delimiter='\t', header=None, index_col=0)
classes = pd.read_csv('../classes.txt', delimiter='\t', header=None, index_col=0)
def get_imgs(src, df, labels = False):
if labels == False:
imgs = []
files = df['Image_name'].values
for file in tqdm(files):
im = cv.imread(os.path.join(src, file))
im = cv.resize(im, (img_width, img_height))
imgs.append(im)
return np.array(imgs)
else:
imgs = []
labels = []
files = os.listdir(src)
for file in tqdm(files):
im = cv.imread(os.path.join(src, file))
im = cv.resize(im, (img_width, img_height))
imgs.append(im)
labels.append(df.loc[file].values)
return np.array(imgs), np.array(labels)
train_imgs, train_labels = get_imgs(train_data_dir, train, True)
#train val split
X_tra, X_val, y_tra, y_val = train_test_split(train_imgs, train_labels, test_size = 3000, random_state = 222)
gc.collect()
train_datagen.fit(X_tra)
val_datagen.fit(X_val)
def fmeasure(y_true, y_pred):
def recall(y_true, y_pred):
"""Recall metric.
Only computes a batch-wise average of recall.
Computes the recall, a metric for multi-label classification of
how many relevant items are selected.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision(y_true, y_pred):
"""Precision metric.
Only computes a batch-wise average of precision.
Computes the precision, a metric for multi-label classification of
how many selected items are relevant.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
precision = precision(y_true, y_pred)
recall = recall(y_true, y_pred)
return 2*((precision*recall)/(precision+recall))
###Output
_____no_output_____
###Markdown
We're going to train our model with SGD and very low learning rate
###Code
early_stp = EarlyStopping(patience=3)
model_ckpt = ModelCheckpoint('mobilenet_1_layer.h5', save_weights_only=True)
opt = optimizers.SGD(lr=0.001, decay = 1e-6, momentum = 0.9, nesterov = True)
model.compile(opt, loss = 'binary_crossentropy', metrics=['accuracy', fmeasure])
model.fit_generator(train_datagen.flow(X_tra, y_tra, batch_size=batch_size),
steps_per_epoch=len(X_tra) / batch_size, epochs=5,
validation_data=val_datagen.flow(X_val, y_val, batch_size=batch_size),
validation_steps = len(X_val)/batch_size, callbacks=[early_stp, model_ckpt], workers = 10, max_queue_size=20)
model = get_model()
#train only last 10 layer
for layer in model.layers:
layer.trainable = True
opt = optimizers.SGD(lr=0.001, decay = 1e-6, momentum = 0.9, nesterov = True)
model.compile(opt, loss = 'binary_crossentropy', metrics=['accuracy', fmeasure])
model.summary()
early_stp = EarlyStopping(patience=3)
model_ckpt = ModelCheckpoint('mobilenet_all_layers.h5', save_weights_only=True)
model.load_weights('mobilenet_1_layer.h5')
model.fit_generator(train_datagen.flow(X_tra, y_tra, batch_size=batch_size),
steps_per_epoch=len(X_tra) / batch_size, epochs=10,
validation_data=val_datagen.flow(X_val, y_val, batch_size=batch_size),
validation_steps = len(X_val)/batch_size, callbacks=[early_stp, model_ckpt], workers = 10, max_queue_size=20)
test_imgs = get_imgs(test_data_dir, test)
test_datagen.fit(test_imgs)
pred = model.predict_generator(test_datagen.flow(test_imgs, batch_size=512, shuffle=False), verbose=1, workers=8)
sub = pd.read_csv('../sample_submission.csv')
sub.iloc[:, 1:] = pred.round().astype(int)
sub.head()
sub.to_csv('submission.csv', index=False)
sub.shape
###Output
_____no_output_____ |
00_misogyny_tweet_sampling/01_EnglishFilter.ipynb | ###Markdown
Filter Non-English TweetsHere, we remove tweets from our sample that aren't in English.We should have done this in the previous step but didn't, so we rely on TextBlob here.
###Code
%matplotlib inline
import pandas as pd
import json
import matplotlib.pyplot as plt
from textblob import TextBlob
tweet_list = []
with open("full_diverse_sample.json", "r") as in_file:
tweet_list = json.load(in_file)
x = TextBlob(tweet_list[0])
x.detect_language()
eng_tweets = list(filter(lambda x: TextBlob(x).detect_language() == "en", tweet_list))
len(eng_tweets)
with open("misogyny_en_samples_to_label.json", "w") as out_file:
json.dump(eng_tweets, out_file)
###Output
_____no_output_____ |
02_eda_modeling.ipynb | ###Markdown
Connecticut Redistricting Analysis: GerryChain EDA- Project Objective: Analyze final 2021 CT State House and State Senate maps relative to incumbent protection- Notebook Objective: Set-up seed plan selection, constraint functions, acceptance functions Context >(Cod. Conn. Const. Art. III., Sec. 3, as amended.)(Senate, number, qualifications.) Sec. 3. The senate shall consist of not less than thirty and not more than fifty members, each of whom shall have attained the age of eighteen and be an elector residing in the senatorial district from which he is elected. *Each senatorial district shall be contiguous as to territory and shall elect no more than one senator.*">(Cod. Conn. Const. Art. III., Sec. 4, as amended.) (House of representatives, how constituted.)Sec. 4. The house of representatives shall consist of not less than one hundred twenty-five and not more than two hundred twenty-five members, each of whom shall have attained the age of eighteen years and be an elector residing in the assembly district from which he is elected. *Each assembly district shall be contiguous as to territory and shall elect no more than one representative. For the purpose of forming assembly districts no town shall be divided except for the purpose of forming assembly districts wholly within the town.*">(Cod. Conn. Const. Art. III., Sec. 5, as amended.)(Congressional and general assembly districts to be consistent with federal standards.)Sec. 5. The establishment of congressional districts and of districts in the general assembly shall be consistent with federal constitutional standards.[Source](https://www.cga.ct.gov/rr/tfs/20210401_2021%20Redistricting%20Project/laws.asp) Therefore, the following ensemble analysis integrates the following legal rules: - Districts shall be compact and contiguous- Towns will not be split- Population equality Import and Clean DataTwo blocks are identified with overlaps and a single island is identified. The island was removed.
###Code
import os
import sys
import random
from functools import partial
import geopandas as gpd
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
#import seaborn as sns
from gerrychain import (
Election,
Graph,
MarkovChain,
Partition,
accept,
constraints
)
from gerrychain.proposals import recom, propose_random_flip
from gerrychain.metrics import efficiency_gap, mean_median, polsby_popper, wasted_votes
from gerrychain.updaters import cut_edges, county_splits, Tally
from gerrychain.tree import recursive_tree_part, bipartition_tree_random
ct_graph = Graph.from_json("./data/CT_dual_graph.json")
ct_df = gpd.read_file("./data/CT_analysis.zip")
#UserWarning: Found overlaps among the given polygons. Indices of overlaps: {(33614, 33615)}
#UserWarning: Found islands (degree-0 nodes). Indices of islands: {36463}
#"Found islands (degree-0 nodes). Indices of islands: {}".format(islands)
ct_graph.remove_node(36463)
###Output
_____no_output_____
###Markdown
Map the Dual Graph and Geopandas
###Code
nx.draw(ct_graph,
pos = {node:(ct_graph.nodes[node]["C_X"],
ct_graph.nodes[node]["C_Y"])
for node in ct_graph.nodes()},
node_color=[ct_graph.nodes[node]["SENATE"]
for node in ct_graph.nodes()],
node_size=10,
cmap='tab20')
nx.draw(ct_graph,
pos = {node:(ct_graph.nodes[node]["C_X"],
ct_graph.nodes[node]["C_Y"])
for node in ct_graph.nodes()},
node_color=[ct_graph.nodes[node]["HOUSE"]
for node in ct_graph.nodes()],
node_size=10,
cmap='tab20')
ct_senate = ct_df.plot(column="SENATE", cmap="PuBuGn", edgecolor="face")
ct_senate.axis('off')
ct_house = ct_df.plot(column="HOUSE", cmap="PuBuGn", edgecolor="face")
ct_house.axis('off')
###Output
_____no_output_____
###Markdown
Generic Updates and Shortcuts
###Code
totpop = "VAP"
dist_num_cthouse = 151
dist_num_ctsen = 36
def num_splits(partition, df=ct_df):
df["current"] = df.index.map(partition.assignment)
return sum(df.groupby("town")["current"].nunique() > 1)
def num_incumbents(partition, df=ct_df):
df["current"] = df.index.map(partition.assignment)
df['current'] = df['current'].astype(str).replace('\.0', '', regex=True)
df["incumbent_chamber"] = 0
df.loc[(df['chamber'] == 'ct_house') & (df['INCUMBENT'] == 1),
'incumbent_chamber'] = 1
incum_numby_dist = df.groupby("current")["incumbent_chamber"].sum().value_counts()
incum_dist_sort = incum_numby_dist.sort_index()
return incum_dist_sort
updater = {
"population": Tally(totpop, alias="population"),
"cut_edges": cut_edges,
"town_splits": num_splits,
"incumbent": num_incumbents
}
total_population = sum([ct_graph.nodes[n][totpop] for n in ct_graph.nodes])
###Output
_____no_output_____
###Markdown
Seed Plan- Examine seed plans without constraints
###Code
seeds_town=[]
seeds_incumbent=[]
#Running multiple seeds to note rules for potential starting plans
for n in range(100):
plan_seed = recursive_tree_part(ct_graph, #graph object
range(dist_num_cthouse), #how many districts
total_population/dist_num_cthouse, #population target
totpop, #population column, variable name
.01, #epsilon value
1)
partition_seed = Partition(ct_graph,
plan_seed,
updater)
seeds_town.append(partition_seed["town_splits"])
seeds_incumbent.append(partition_seed["incumbent"])
#Plot seed plans to see where incumbent values fall
###Output
_____no_output_____
###Markdown
Constraint and Acceptance Functions
###Code
#Compactness bound, contiguity, town acceptance function, population equality
###Output
_____no_output_____ |
Spheroid/Spectral Components Analysis [WGM2012 Bouguer].ipynb | ###Markdown
Parameters
###Code
# Gaussian filter sigma, km
sigmaskm = np.linspace(25,1425,29)
sigmaskm
###Output
_____no_output_____
###Markdown
Raster from file
###Code
da = xr.open_dataarray('WGM2012_Bouguer_ponc_2min.grd').squeeze(drop=True)
da
# reduce dataset size
da = da.coarsen({'y':10, 'x':10}, boundary='trim').mean()
###Output
_____no_output_____
###Markdown
Raster to Database
###Code
%%time
da.to_dataframe(name='z').to_csv('data.csv', header=False)
fname = !pwd
fname = fname[0] + '/data.csv'
%%time
%%sql
create extension if not exists postgis;
drop table if exists data;
create table data (lat float, lon float, z float, the_geom geography(Point, 4326));
COPY data (lat, lon, z) FROM :fname WITH DELIMITER ',';
UPDATE data SET the_geom = ST_SetSRID(ST_MakePoint(lon, lat), 4326);
CREATE INDEX data_the_geom_idx ON data USING GIST (the_geom);
ANALYZE data;
###Output
* postgresql://localhost:5432/grav_29_1
Done.
Done.
Done.
583200 rows affected.
583200 rows affected.
Done.
Done.
CPU times: user 15.2 ms, sys: 5.37 ms, total: 20.6 ms
Wall time: 9.97 s
###Markdown
Grid to Database
###Code
grid = da[::150,::150]
print (grid.shape[0]*grid.shape[1])
grid
grid.to_dataframe(name='z0').to_csv('grid.csv', header=False)
fname = !pwd
fname = fname[0] + '/grid.csv'
%%time
%%sql
drop table if exists grid;
create table grid (lat float, lon float, z0 float, the_geom geography(Point, 4326));
COPY grid (lat, lon, z0) FROM :fname WITH DELIMITER ',';
UPDATE grid SET the_geom = ST_SetSRID(ST_MakePoint(lon, lat), 4326);
CREATE INDEX grid_the_geom_idx ON grid USING GIST (the_geom);
ANALYZE grid;
###Output
* postgresql://localhost:5432/grav_29_1
Done.
Done.
32 rows affected.
32 rows affected.
Done.
Done.
CPU times: user 11 ms, sys: 4.51 ms, total: 15.5 ms
Wall time: 81.8 ms
###Markdown
Gaussian Filtering in Database
###Code
%%sql
DROP FUNCTION IF EXISTS gaussian_transform(sigma float, geom geography);
CREATE OR REPLACE FUNCTION gaussian_transform(sigma float, geom geography)
RETURNS TABLE (z float, count bigint)
AS '
with weights as (
select
z,
exp(-(pow(ST_Distance($2,the_geom,false),2))/(2*pow($1,2))) as weight
from data
where ST_Buffer($2,4.*$1) && the_geom and ST_DWithin($2, the_geom, 4.*$1)
)
select
sum(z*weight)/sum(weight) as z,
count(1) as count
from weights
'
LANGUAGE SQL STABLE;
%%time
%sql drop table if exists gaussian_transform;
%sql create table gaussian_transform (sigmakm int, lat float, lon float, z0 float, z float, count bigint);
for sigmakm in sigmaskm:
print ("sigmakm", sigmakm)
%sql insert into gaussian_transform \
select :sigmakm, lat, lon, z0, t.* from grid, gaussian_transform(:sigmakm*1000, the_geom) as t;
###Output
* postgresql://localhost:5432/grav_29_1
Done.
* postgresql://localhost:5432/grav_29_1
Done.
sigmakm 25.0
* postgresql://localhost:5432/grav_29_1
32 rows affected.
sigmakm 75.0
* postgresql://localhost:5432/grav_29_1
32 rows affected.
sigmakm 125.0
* postgresql://localhost:5432/grav_29_1
32 rows affected.
sigmakm 175.0
* postgresql://localhost:5432/grav_29_1
32 rows affected.
sigmakm 225.0
* postgresql://localhost:5432/grav_29_1
32 rows affected.
sigmakm 275.0
* postgresql://localhost:5432/grav_29_1
32 rows affected.
sigmakm 325.0
* postgresql://localhost:5432/grav_29_1
32 rows affected.
sigmakm 375.0
* postgresql://localhost:5432/grav_29_1
32 rows affected.
sigmakm 425.0
* postgresql://localhost:5432/grav_29_1
32 rows affected.
sigmakm 475.0
* postgresql://localhost:5432/grav_29_1
32 rows affected.
sigmakm 525.0
* postgresql://localhost:5432/grav_29_1
32 rows affected.
sigmakm 575.0
* postgresql://localhost:5432/grav_29_1
32 rows affected.
sigmakm 625.0
* postgresql://localhost:5432/grav_29_1
32 rows affected.
sigmakm 675.0
* postgresql://localhost:5432/grav_29_1
32 rows affected.
sigmakm 725.0
* postgresql://localhost:5432/grav_29_1
32 rows affected.
sigmakm 775.0
* postgresql://localhost:5432/grav_29_1
32 rows affected.
sigmakm 825.0
* postgresql://localhost:5432/grav_29_1
32 rows affected.
sigmakm 875.0
* postgresql://localhost:5432/grav_29_1
32 rows affected.
sigmakm 925.0
* postgresql://localhost:5432/grav_29_1
32 rows affected.
sigmakm 975.0
* postgresql://localhost:5432/grav_29_1
32 rows affected.
sigmakm 1025.0
* postgresql://localhost:5432/grav_29_1
32 rows affected.
sigmakm 1075.0
* postgresql://localhost:5432/grav_29_1
32 rows affected.
sigmakm 1125.0
* postgresql://localhost:5432/grav_29_1
32 rows affected.
sigmakm 1175.0
* postgresql://localhost:5432/grav_29_1
32 rows affected.
sigmakm 1225.0
* postgresql://localhost:5432/grav_29_1
32 rows affected.
sigmakm 1275.0
* postgresql://localhost:5432/grav_29_1
32 rows affected.
sigmakm 1325.0
* postgresql://localhost:5432/grav_29_1
32 rows affected.
sigmakm 1375.0
* postgresql://localhost:5432/grav_29_1
32 rows affected.
sigmakm 1425.0
* postgresql://localhost:5432/grav_29_1
32 rows affected.
CPU times: user 110 ms, sys: 20.5 ms, total: 130 ms
Wall time: 2min 41s
###Markdown
Show Gaussian Transform map
###Code
%sql gaussian << select * from gaussian_transform
gaussian = gaussian.DataFrame()
gaussian = gaussian.set_index(['sigmakm','lat','lon']).to_xarray()
gaussian
%%time
# discrete power spectrum
power_spectrum = gaussian.std(['lat','lon'])['z'].rename('spectrum')
fig = plt.figure(figsize=(16,6))
ax = fig.add_subplot(1, 3, 1)
ax.plot(power_spectrum.sigmakm.values, power_spectrum.values, c='blue')
ax.set_yscale('log')
ax.set_title(f'Power Spectrum (Log)\n', fontsize=22)
ax.set_ylabel('Log (Spectral density)',fontsize=18)
ax.set_xlabel('Wavelength, km', fontsize=18)
ax.set_yticks([])
ax.set_yticks([], minor=True)
ax = fig.add_subplot(1, 3, 2)
ax.plot(power_spectrum.sigmakm.values, power_spectrum.values, c='blue')
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_title(f'Power Spectrum (LogLog)\n', fontsize=22)
ax.set_ylabel('Log (Spectral density)',fontsize=18)
ax.set_xlabel('Log (Wavelength), km', fontsize=18)
ax.set_yticks([])
ax.set_yticks([], minor=True)
ax = fig.add_subplot(1, 3, 3)
# calculate fractality index
slope = np.diff(np.log10(power_spectrum))/np.diff(np.log10(power_spectrum.sigmakm))
depths = (power_spectrum.sigmakm.values[1:]+power_spectrum.sigmakm.values[:-1])/2/np.sqrt(2)
fractal = (3 - (slope/2))
ax.plot(depths, 1000*fractal, c='blue')
ax.set_title(f'Density\n', fontsize=22)
ax.set_ylabel('ρ, kg/m³',fontsize=18)
ax.set_xlabel('Depth, km', fontsize=18)
plt.suptitle('WGM2012 Bouguer Gravity Power Spectrum and Fractality Density', fontsize=28)
fig.tight_layout(rect=[0.03, 0.0, 1, 0.9])
plt.savefig('Spectral Components Analysis [WGM2012 Bouguer].jpg', dpi=150, quality=95)
plt.show()
###Output
_____no_output_____ |
Jan 28 - File Reading Experiements.ipynb | ###Markdown
Rember our wordcount codeyou could import it here, but I repeated it to act as a reminder
###Code
def wordcount(userwords):
wordlist=userwords.split()
wordcount = dict()
for word in wordlist:
if word in wordcount:
wordcount[word] += 1
else:
wordcount[word] = 1
return wordcount
def a_union_b(a,b):
union = list()
keysa=a.keys()
keysb=b.keys()
for word in keysa:
if word in keysb:
# print("found :" + word)
union.append(word)
return union
def in_a_not_in_b(a, b):
a_not_b = list()
keysa=a.keys()
keysb=b.keys()
for word in keysa:
if word not in keysb:
# print("count not find :" + word)
a_not_b.append(word)
# else:
# print("found :" + word)
return a_not_b
###Output
_____no_output_____
###Markdown
Create a couple of text files and copy and paste in the text on cats and dogs from WikiPediafor this example create two files in the data directory (remember we've been putting code in src and data in data)To be compleately safe that there isn't a bunch of weird characters in it run the text through the tool at: https://pteo.paranoiaworks.mobi/diacriticsremover/* ABunchOfTextAboutCats.txt with content about cats* ABunchOfTextAboutDogs.txt set a couple of variables to point to these files
###Code
cat_filename="/Users/johnfunk/CloudStation/JupyterNotebooks/niece-python-lessons/data/ABunchOfTextAboutCats.txt" #Mac
#cat_filename="F:/CloudStation\\JupyterNotebooks\\niece-python-lessons\\data\\ABunchOfTextAboutCats.txt" #Windows
dog_filename="/Users/johnfunk/CloudStation/JupyterNotebooks/niece-python-lessons/data/ABunchOfTextAboutDogs.txt" #Mac
#dog_filename="F:/CloudStation/JupyterNotebooks/niece-python-lessons/data/ABunchOfTextAboutDogs.txt" #Windows
f = open(dog_filename,"r")
dogwords=f.read()
f.close()
f = open(cat_filename,"r")
catwords=f.read()
f.close()
dogdict=wordcount(dogwords)
catdict=wordcount(catwords)
u=a_union_b(dogdict,catdict)
u
left=in_a_not_in_b(dogdict,catdict)
left
right=in_a_not_in_b(catdict,dogdict)
right
###Output
_____no_output_____
###Markdown
Now lets write the output out to a file
###Code
common_words_file="/Users/johnfunk/CloudStation/JupyterNotebooks/niece-python-lessons/data/common-words.txt" #Mac
#common_words_file="F:/CloudStation/JupyterNotebooks/niece-python-lessons/data/common-words.txt" #Windows
f = open(common_words_file,"w")
dogwords=f.write(u)
f.close()
###Output
_____no_output_____
###Markdown
Dang - ok we've got to covert our list to a stringprobably want to add a new line at the end of each one so when we write it out we get 1 word per line
###Code
common_words=str()
for word in u:
common_words+=word+"\n"
f = open(common_words_file,"w")
dogwords=f.write(common_words)
f.close()
###Output
_____no_output_____ |
Bjorn_LS_DS_232_assignment.ipynb | ###Markdown
Lambda School Data Science*Unit 2, Sprint 3, Module 1*--- Wrangle ML datasets- [ ] Continue to clean and explore your data. - [ ] For the evaluation metric you chose, what score would you get just by guessing?- [ ] Can you make a fast, first model that beats guessing?**We recommend that you use your portfolio project dataset for all assignments this sprint.****But if you aren't ready yet, or you want more practice, then use the New York City property sales dataset for today's assignment.** Follow the instructions below, to just keep a subset for the Tribeca neighborhood, and remove outliers or dirty data. [Here's a video walkthrough](https://youtu.be/pPWFw8UtBVg?t=584) you can refer to if you get stuck or want hints!- Data Source: [NYC OpenData: NYC Citywide Rolling Calendar Sales](https://data.cityofnewyork.us/dataset/NYC-Citywide-Rolling-Calendar-Sales/usep-8jbt)- Glossary: [NYC Department of Finance: Rolling Sales Data](https://www1.nyc.gov/site/finance/taxes/property-rolling-sales-data.page)
###Code
%%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/'
!pip install category_encoders==2.*
!pip install pandas-profiling==2.*
import pandas as pd
from google.colab import files
uploaded = files.upload()
import io
df = pd.read_csv(io.BytesIO(uploaded['listings_3_.csv']))
df = df.drop(['id','listing_url', 'scrape_id', 'last_scraped', 'name', 'summary', 'space', 'description', 'neighborhood_overview', 'notes', 'access', 'interaction', 'house_rules', 'thumbnail_url', 'medium_url', 'picture_url'], axis=1)
df = df.drop(['xl_picture_url', 'host_id', 'host_url', 'host_name', 'host_about', 'host_thumbnail_url', 'host_picture_url', 'host_has_profile_pic', 'street', 'market', 'smart_location', 'country_code', 'country', 'extra_people', 'minimum_minimum_nights', 'maximum_minimum_nights', 'minimum_maximum_nights', 'maximum_maximum_nights', 'has_availability'], axis=1)
df = df.drop(['weekly_price', 'monthly_price', 'security_deposit', 'calendar_last_scraped', 'review_scores_rating', 'review_scores_accuracy', 'review_scores_cleanliness', 'review_scores_checkin', 'review_scores_communication', 'review_scores_location', 'review_scores_value', 'license', 'jurisdiction_names'], axis=1)
df = df.drop(['transit', 'host_since', 'host_location', 'host_acceptance_rate', 'host_is_superhost', 'host_listings_count', 'host_verifications', 'neighbourhood_group_cleansed', 'city', 'state', 'square_feet', 'calendar_updated', 'availability_30', 'availability_60', 'availability_90', 'availability_365', 'number_of_reviews', 'number_of_reviews_ltm', 'first_review', 'last_review', 'cancellation_policy', 'reviews_per_month' ], axis=1)
df = df.drop(['is_location_exact'], axis=1)
df = df[df.price <= 1750]
df.columns
df.shape
df.sample(5)
df.dtypes
df.sample()
# can pipelines interpret booleans? if not, the f and t need to be changed to 0 and 1 and column types for those features changed to int
#host_identity_verified, requires_license, instant_bookable, is_business_travel_ready, require_guest_profile_picture, require_guest_phone_verification
df[["host_identity_verified", "requires_license", "instant_bookable", "is_business_travel_ready", "require_guest_profile_picture", "require_guest_phone_verification" ]].replace({"t": "1", "f": "0"}, inplace=True)
df = df.replace(1.0, 1)
df = df.replace(0.0, 0)
# change column type of above to integer
df[["host_identity_verified", "requires_license", "instant_bookable", "is_business_travel_ready", "require_guest_profile_picture", "require_guest_phone_verification" ]] = df[["host_identity_verified", "requires_license", "instant_bookable", "is_business_travel_ready", "require_guest_profile_picture", "require_guest_phone_verification" ]].astype(float).astype(int)
df.dtypes
df.sample()
df['price'] = df['price'].str.replace('$', '')
df['price'] = df['price'].str.replace(',', '')
df['price'] = df['price'].astype(float)
df['price'].describe()
df['price'].isnull().sum()
y = df['price']
import seaborn as sns
sns.distplot(y);
# how many values are above 12k
import numpy as np
df = df[(df['price'] >= np.percentile(df['price'], 0.5)) &
(df['price'] <= np.percentile(df['price'], 99.5)) &
(df['latitude'] >= np.percentile(df['latitude'], 0.05)) &
(df['latitude'] < np.percentile(df['latitude'], 99.95)) &
(df['longitude'] >= np.percentile(df['longitude'], 0.05)) &
(df['longitude'] <= np.percentile(df['longitude'], 99.95))]
import matplotlib.pyplot as plt
y_log = np.log1p(y)
sns.distplot(y_log)
plt.title('Log-transformed target, in log-dollars');
df['cleaning_fee'] = df['cleaning_fee'].str.replace(',', '')
df['cleaning_fee'] = df['cleaning_fee'].astype(float)
w = df['cleaning_fee']
sns.distplot(w)
target = 'price'
features = df.columns
from sklearn.model_selection import train_test_split
train, test = train_test_split(df, train_size=0.80, test_size=0.20, random_state=42)
train.shape, test.shape
! pip install category_encoders
# let's encode a pipeline
from sklearn.impute import SimpleImputer
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import OrdinalEncoder
import category_encoders as ce
features = train.columns.drop(target)
X_train = train[features]
y_train = train[target]
X_test = test[features]
y_test = test[target]
pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(strategy='median'),
RandomForestRegressor(n_estimators=100, n_jobs=-1, random_state=42)
)
pipeline.fit(X_train, y_train)
print('Test Accuracy:', pipeline.score(X_test, y_test))
k = 3
scores = cross_val_score(pipeline, X_train, y_train, cv=k,
scoring='neg_mean_absolute_error')
print(f'MAE for {k} folds:', -scores)
# find the parameters of my best fold which had MAE of 180
from sklearn.metrics import mean_absolute_error
y_pred = pipeline.predict(X_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test MAE: ${mae:,.0f}')
###Output
_____no_output_____ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.