path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
code/DreamWorks_code/Shrek3_DataFrame.ipynb | ###Markdown
Shrek the Third DataFrameRecall from the [Analyzing White Space code](https://github.com/Data-Science-for-Linguists-2019/Animated-Movie-Gendered-Dialogue/blob/master/code/DreamWorks_code/Analyzing_White_Space.ipynb) that this movie only has three types of white space: 10, 11, or 26 spaces. This means it doesn't fit well into out streamline. Let's look at this case by itself.
###Code
shrek3 = open(r'C:\Users\cassi\Desktop\Data_Science\Animated-Movie-Gendered-Dialogue\private\imsdb_raw_nov_2015\Animation\shrekthethird.txt')
shrek3_script = shrek3.read()
shrek3.close()
import re
import pandas as pd
shrek3_script[:500]
shrek3_script[246:300]
shrek3_script = shrek3_script[246:]
def white_space_count(script_name):
white_space = re.findall(" {3,}", script_name)
len_w_s = [len(x) for x in white_space]
print(len_w_s[:100])
#print(len_w_s.index(25))
print(set(len_w_s))
for num in set(len_w_s):
print(num, "white spaces appear", len_w_s.count(num), "times")
white_space_count(shrek3_script)
###Output
[10, 10, 10, 10, 26, 11, 11, 11, 11, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 26, 11, 11, 26, 11, 10, 10, 26, 11, 10, 10, 10, 26, 11, 11, 10, 11, 26, 11, 11, 11, 10, 26, 11, 11, 10, 10, 10, 26, 11, 10, 10, 26, 26, 11, 26, 11, 10, 10, 10, 26, 11, 11, 11, 10, 26, 26, 11, 26, 11, 10, 10, 26, 11, 11, 10, 10, 10, 26, 26, 11, 10, 10, 10, 10, 10, 10, 26, 11, 11, 26, 11, 11, 10, 10, 26, 11, 10, 10, 10]
4365
{11, 25, 10, 26}
11 white spaces appear 1783 times
25 white spaces appear 1 times
10 white spaces appear 1554 times
26 white spaces appear 1028 times
###Markdown
Hmmm, one random grouping of 25 white spaces....
###Code
shrek3_script[:2000]
shrek3_script[4000:6000]
#10 after scene header
#10 between scene header descriptions
#26 before prince charming
#11 between all his lines
#10 after his last line and new scene description begins
#11 before those pesky final screening script lines
#removing those final screening script lines
titles = re.findall(r"\n\n {1,}Shrek the Third - Final Screening Script [0-9]+\.", shrek3_script)
len(titles)
shrek3_script = re.sub(r"\n\n {1,}Shrek the Third - Final Screening Script [0-9]+\.", '', shrek3_script)
white_space_count(shrek3_script)
1668+1+1552+1028
###Output
_____no_output_____
###Markdown
Removing parentheticals off the bat
###Code
def no_parentheses(script):
new_script = re.sub(r" *\([^\)]*\)", '', script)
return new_script
par = re.findall(r" *\([^\)]*\)", shrek3_script)
len(par)
shrek3_script_2 = no_parentheses(shrek3_script)
len(shrek3_script)
len(shrek3_script_2)
white_space_count(shrek3_script_2)
1602+1+1541+927
4249 - 4071 # I feel like this doesn't add up, but okay
#since some items in par actually have multiple lone white spaces, but the new white space count isn't as low as that number
#par
###Output
_____no_output_____
###Markdown
Attempting to Find actual lines
###Code
id_lines = re.findall(r"\n\n {25,}(\b[A-Z]['A-Z ]{1,})+\n\n", shrek3_script_2)
len(id_lines)
sorted(set(id_lines))
%pprint
###Output
Pretty printing has been turned OFF
###Markdown
Wow! This captured almost everything on the first try! The scene headers in this script aren't capitalized except for INT. or EXT., which contain punctuation (not included in my regular expression!). There is just one problem -- when Artie has an all capitalized line that extends over line breaks (which means the middle of it has no punctuation and is caught by my regular expression. This can be fixed by lowering that line (which will be done eventually anyway)
###Code
new_scene = re.findall(r"\n\n {10,}(\b[A-Z]['A-Z ]{1,})+\n\n", shrek3_script_2)
len(new_scene)
sorted(set(new_scene))
new_scene_2 = re.findall(r"\n\n {10,}(\b[A-Z]['A-Z ]{1,})+\n\n", shrek3_script_2)
len(new_scene_2)
sorted(set(new_scene_2))
shrek3_script_3 = re.sub('BY A MONSTER TRYING TO RELATE TO', 'by a monster trying to relate to', shrek3_script_2)
new_line = re.findall(r"\n\n {10,}(\b[A-Z]['A-Z ]{1,})+\n\n", shrek3_script_3)
len(sorted(set(new_line)))
shrek3_script_marked = re.sub(r"\n\n {10,}(\b[A-Z]['A-Z ]{1,})+\n\n", r"_NEWLINE_\1_", shrek3_script_3)
shrek3_script_marked[:1000]
cuts = re.findall(r"\n\n {10,}(\b[A-Z]['A-Z ]{1,})+:", shrek3_script_marked)
len(cuts)
cuts
shrek_3_script_marked = re.sub(r"\n\n {10,}(\b[A-Z]['A-Z ]{1,})+:", '', shrek3_script_marked)
#Now let's split it!
script_lines = shrek_3_script_marked.split("_NEWLINE_")
len(script_lines)
script_lines = script_lines[1:]
script_lines[:10]
testing = script_lines[:50]
testing_list = []
line_test = []
for line in testing:
testing_list.extend(re.findall(r"\n\n {10}\w", line))
marker = re.sub(r"\n\n {10}\w", '_ENDLINE_', line)
line_test.append(marker)
keep_lines = []
for line in line_test:
real_line = line.split('_ENDLINE_')
keep_lines.append(real_line[0])
testing_list
for line in line_test:
print(line)
keep_lines
testing
## Seems to have worked! Let's generalize it to whole script!
line_id = []
for line in script_lines:
marker = re.sub(r"\n\n {10}\w", '_ENDLINE_', line)
line_id.append(marker)
real_script_lines = []
for line in line_id:
real_line = line.split('_ENDLINE_')
real_script_lines.append(real_line[0])
len(real_script_lines) #should be 871!
real_script_lines[:10]
real_script_lines[-10:]
len(" ")
real_script_lines
##removing white space
## Remember, all the white space here is 11 spaces long!
white_space = []
for line in real_script_lines:
white_space.extend(re.findall(r"\n\n {11}", line))
len(white_space)
###Output
_____no_output_____
###Markdown
Splitting Speaker/Text and creating a dataframe
###Code
speaker_text = []
for line in real_script_lines:
line_no_space = re.sub(r"\n\n {11}", ' ', line)
line_tup = line_no_space.split('_')
line_tup[0] = line_tup[0].lower().strip()
line_tup[1] = line_tup[1].lower().strip()
speaker_text.append(tuple(line_tup))
len(speaker_text)
speaker_text[:10]
speaker_text[-10:]
#We don't need "The End"
speaker_text = speaker_text[:-1]
speaker_text[-10:]
###Output
_____no_output_____
###Markdown
Data Frame Time!
###Code
shrek_the_third = pd.DataFrame(speaker_text, columns=["Speaker", "Text"])
shrek_the_third.head()
shrek_the_third.to_pickle(r'..\..\..\Animated-Movie-Gendered-Dialogue\private\shrek3_lines.pkl')
###Output
_____no_output_____ |
Big-Data-Clusters/CU6/Public/content/common/sop033-azdata-logout.ipynb | ###Markdown
SOP033 - azdata logout======================Use the azdata command line interface to logout of a Big Data Cluster.Steps----- Common functionsDefine helper functions used in this notebook.
###Code
# Define `run` function for transient fault handling, hyperlinked suggestions, and scrolling updates on Windows
import sys
import os
import re
import json
import platform
import shlex
import shutil
import datetime
from subprocess import Popen, PIPE
from IPython.display import Markdown
retry_hints = {} # Output in stderr known to be transient, therefore automatically retry
error_hints = {} # Output in stderr where a known SOP/TSG exists which will be HINTed for further help
install_hint = {} # The SOP to help install the executable if it cannot be found
first_run = True
rules = None
debug_logging = False
def run(cmd, return_output=False, no_output=False, retry_count=0, base64_decode=False, return_as_json=False):
"""Run shell command, stream stdout, print stderr and optionally return output
NOTES:
1. Commands that need this kind of ' quoting on Windows e.g.:
kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='data-pool')].metadata.name}
Need to actually pass in as '"':
kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='"'data-pool'"')].metadata.name}
The ' quote approach, although correct when pasting into Windows cmd, will hang at the line:
`iter(p.stdout.readline, b'')`
The shlex.split call does the right thing for each platform, just use the '"' pattern for a '
"""
MAX_RETRIES = 5
output = ""
retry = False
global first_run
global rules
if first_run:
first_run = False
rules = load_rules()
# When running `azdata sql query` on Windows, replace any \n in """ strings, with " ", otherwise we see:
#
# ('HY090', '[HY090] [Microsoft][ODBC Driver Manager] Invalid string or buffer length (0) (SQLExecDirectW)')
#
if platform.system() == "Windows" and cmd.startswith("azdata sql query"):
cmd = cmd.replace("\n", " ")
# shlex.split is required on bash and for Windows paths with spaces
#
cmd_actual = shlex.split(cmd)
# Store this (i.e. kubectl, python etc.) to support binary context aware error_hints and retries
#
user_provided_exe_name = cmd_actual[0].lower()
# When running python, use the python in the ADS sandbox ({sys.executable})
#
if cmd.startswith("python "):
cmd_actual[0] = cmd_actual[0].replace("python", sys.executable)
# On Mac, when ADS is not launched from terminal, LC_ALL may not be set, which causes pip installs to fail
# with:
#
# UnicodeDecodeError: 'ascii' codec can't decode byte 0xc5 in position 4969: ordinal not in range(128)
#
# Setting it to a default value of "en_US.UTF-8" enables pip install to complete
#
if platform.system() == "Darwin" and "LC_ALL" not in os.environ:
os.environ["LC_ALL"] = "en_US.UTF-8"
# When running `kubectl`, if AZDATA_OPENSHIFT is set, use `oc`
#
if cmd.startswith("kubectl ") and "AZDATA_OPENSHIFT" in os.environ:
cmd_actual[0] = cmd_actual[0].replace("kubectl", "oc")
# To aid supportability, determine which binary file will actually be executed on the machine
#
which_binary = None
# Special case for CURL on Windows. The version of CURL in Windows System32 does not work to
# get JWT tokens, it returns "(56) Failure when receiving data from the peer". If another instance
# of CURL exists on the machine use that one. (Unfortunately the curl.exe in System32 is almost
# always the first curl.exe in the path, and it can't be uninstalled from System32, so here we
# look for the 2nd installation of CURL in the path)
if platform.system() == "Windows" and cmd.startswith("curl "):
path = os.getenv('PATH')
for p in path.split(os.path.pathsep):
p = os.path.join(p, "curl.exe")
if os.path.exists(p) and os.access(p, os.X_OK):
if p.lower().find("system32") == -1:
cmd_actual[0] = p
which_binary = p
break
# Find the path based location (shutil.which) of the executable that will be run (and display it to aid supportability), this
# seems to be required for .msi installs of azdata.cmd/az.cmd. (otherwise Popen returns FileNotFound)
#
# NOTE: Bash needs cmd to be the list of the space separated values hence shlex.split.
#
if which_binary == None:
which_binary = shutil.which(cmd_actual[0])
# Display an install HINT, so the user can click on a SOP to install the missing binary
#
if which_binary == None:
if user_provided_exe_name in install_hint and install_hint[user_provided_exe_name] is not None:
display(Markdown(f'HINT: Use [{install_hint[user_provided_exe_name][0]}]({install_hint[user_provided_exe_name][1]}) to resolve this issue.'))
raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)")
else:
cmd_actual[0] = which_binary
start_time = datetime.datetime.now().replace(microsecond=0)
print(f"START: {cmd} @ {start_time} ({datetime.datetime.utcnow().replace(microsecond=0)} UTC)")
print(f" using: {which_binary} ({platform.system()} {platform.release()} on {platform.machine()})")
print(f" cwd: {os.getcwd()}")
# Command-line tools such as CURL and AZDATA HDFS commands output
# scrolling progress bars, which causes Jupyter to hang forever, to
# workaround this, use no_output=True
#
# Work around a infinite hang when a notebook generates a non-zero return code, break out, and do not wait
#
wait = True
try:
if no_output:
p = Popen(cmd_actual)
else:
p = Popen(cmd_actual, stdout=PIPE, stderr=PIPE, bufsize=1)
with p.stdout:
for line in iter(p.stdout.readline, b''):
line = line.decode()
if return_output:
output = output + line
else:
if cmd.startswith("azdata notebook run"): # Hyperlink the .ipynb file
regex = re.compile(' "(.*)"\: "(.*)"')
match = regex.match(line)
if match:
if match.group(1).find("HTML") != -1:
display(Markdown(f' - "{match.group(1)}": "{match.group(2)}"'))
else:
display(Markdown(f' - "{match.group(1)}": "[{match.group(2)}]({match.group(2)})"'))
wait = False
break # otherwise infinite hang, have not worked out why yet.
else:
print(line, end='')
if rules is not None:
apply_expert_rules(line)
if wait:
p.wait()
except FileNotFoundError as e:
if install_hint is not None:
display(Markdown(f'HINT: Use {install_hint} to resolve this issue.'))
raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") from e
exit_code_workaround = 0 # WORKAROUND: azdata hangs on exception from notebook on p.wait()
if not no_output:
for line in iter(p.stderr.readline, b''):
try:
line_decoded = line.decode()
except UnicodeDecodeError:
# NOTE: Sometimes we get characters back that cannot be decoded(), e.g.
#
# \xa0
#
# For example see this in the response from `az group create`:
#
# ERROR: Get Token request returned http error: 400 and server
# response: {"error":"invalid_grant",# "error_description":"AADSTS700082:
# The refresh token has expired due to inactivity.\xa0The token was
# issued on 2018-10-25T23:35:11.9832872Z
#
# which generates the exception:
#
# UnicodeDecodeError: 'utf-8' codec can't decode byte 0xa0 in position 179: invalid start byte
#
print("WARNING: Unable to decode stderr line, printing raw bytes:")
print(line)
line_decoded = ""
pass
else:
# azdata emits a single empty line to stderr when doing an hdfs cp, don't
# print this empty "ERR:" as it confuses.
#
if line_decoded == "":
continue
print(f"STDERR: {line_decoded}", end='')
if line_decoded.startswith("An exception has occurred") or line_decoded.startswith("ERROR: An error occurred while executing the following cell"):
exit_code_workaround = 1
# inject HINTs to next TSG/SOP based on output in stderr
#
if user_provided_exe_name in error_hints:
for error_hint in error_hints[user_provided_exe_name]:
if line_decoded.find(error_hint[0]) != -1:
display(Markdown(f'HINT: Use [{error_hint[1]}]({error_hint[2]}) to resolve this issue.'))
# apply expert rules (to run follow-on notebooks), based on output
#
if rules is not None:
apply_expert_rules(line_decoded)
# Verify if a transient error, if so automatically retry (recursive)
#
if user_provided_exe_name in retry_hints:
for retry_hint in retry_hints[user_provided_exe_name]:
if line_decoded.find(retry_hint) != -1:
if retry_count < MAX_RETRIES:
print(f"RETRY: {retry_count} (due to: {retry_hint})")
retry_count = retry_count + 1
output = run(cmd, return_output=return_output, retry_count=retry_count)
if return_output:
if base64_decode:
import base64
return base64.b64decode(output).decode('utf-8')
else:
return output
elapsed = datetime.datetime.now().replace(microsecond=0) - start_time
# WORKAROUND: We avoid infinite hang above in the `azdata notebook run` failure case, by inferring success (from stdout output), so
# don't wait here, if success known above
#
if wait:
if p.returncode != 0:
raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(p.returncode)}.\n')
else:
if exit_code_workaround !=0 :
raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(exit_code_workaround)}.\n')
print(f'\nSUCCESS: {elapsed}s elapsed.\n')
if return_output:
if base64_decode:
import base64
return base64.b64decode(output).decode('utf-8')
else:
return output
def load_json(filename):
"""Load a json file from disk and return the contents"""
with open(filename, encoding="utf8") as json_file:
return json.load(json_file)
def load_rules():
"""Load any 'expert rules' from the metadata of this notebook (.ipynb) that should be applied to the stderr of the running executable"""
# Load this notebook as json to get access to the expert rules in the notebook metadata.
#
try:
j = load_json("sop033-azdata-logout.ipynb")
except:
pass # If the user has renamed the book, we can't load ourself. NOTE: Is there a way in Jupyter, to know your own filename?
else:
if "metadata" in j and \
"azdata" in j["metadata"] and \
"expert" in j["metadata"]["azdata"] and \
"expanded_rules" in j["metadata"]["azdata"]["expert"]:
rules = j["metadata"]["azdata"]["expert"]["expanded_rules"]
rules.sort() # Sort rules, so they run in priority order (the [0] element). Lowest value first.
# print (f"EXPERT: There are {len(rules)} rules to evaluate.")
return rules
def apply_expert_rules(line):
"""Determine if the stderr line passed in, matches the regular expressions for any of the 'expert rules', if so
inject a 'HINT' to the follow-on SOP/TSG to run"""
global rules
for rule in rules:
notebook = rule[1]
cell_type = rule[2]
output_type = rule[3] # i.e. stream or error
output_type_name = rule[4] # i.e. ename or name
output_type_value = rule[5] # i.e. SystemExit or stdout
details_name = rule[6] # i.e. evalue or text
expression = rule[7].replace("\\*", "*") # Something escaped *, and put a \ in front of it!
if debug_logging:
print(f"EXPERT: If rule '{expression}' satisfied', run '{notebook}'.")
if re.match(expression, line, re.DOTALL):
if debug_logging:
print("EXPERT: MATCH: name = value: '{0}' = '{1}' matched expression '{2}', therefore HINT '{4}'".format(output_type_name, output_type_value, expression, notebook))
match_found = True
display(Markdown(f'HINT: Use [{notebook}]({notebook}) to resolve this issue.'))
print('Common functions defined successfully.')
# Hints for binary (transient fault) retry, (known) error and install guide
#
retry_hints = {'azdata': ['Endpoint sql-server-master does not exist', 'Endpoint livy does not exist', 'Failed to get state for cluster', 'Endpoint webhdfs does not exist', 'Adaptive Server is unavailable or does not exist', 'Error: Address already in use', 'Login timeout expired (0) (SQLDriverConnect)']}
error_hints = {'azdata': [['The token is expired', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Reason: Unauthorized', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Max retries exceeded with url: /api/v1/bdc/endpoints', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Look at the controller logs for more details', 'TSG027 - Observe cluster deployment', '../diagnose/tsg027-observe-bdc-create.ipynb'], ['provided port is already allocated', 'TSG062 - Get tail of all previous container logs for pods in BDC namespace', '../log-files/tsg062-tail-bdc-previous-container-logs.ipynb'], ['Create cluster failed since the existing namespace', 'SOP061 - Delete a big data cluster', '../install/sop061-delete-bdc.ipynb'], ['Failed to complete kube config setup', 'TSG067 - Failed to complete kube config setup', '../repair/tsg067-failed-to-complete-kube-config-setup.ipynb'], ['Error processing command: "ApiError', 'TSG110 - Azdata returns ApiError', '../repair/tsg110-azdata-returns-apierror.ipynb'], ['Error processing command: "ControllerError', 'TSG036 - Controller logs', '../log-analyzers/tsg036-get-controller-logs.ipynb'], ['ERROR: 500', 'TSG046 - Knox gateway logs', '../log-analyzers/tsg046-get-knox-logs.ipynb'], ['Data source name not found and no default driver specified', 'SOP069 - Install ODBC for SQL Server', '../install/sop069-install-odbc-driver-for-sql-server.ipynb'], ["Can't open lib 'ODBC Driver 17 for SQL Server", 'SOP069 - Install ODBC for SQL Server', '../install/sop069-install-odbc-driver-for-sql-server.ipynb'], ['Control plane upgrade failed. Failed to upgrade controller.', 'TSG108 - View the controller upgrade config map', '../diagnose/tsg108-controller-failed-to-upgrade.ipynb'], ["[Errno 2] No such file or directory: '..\\\\", 'TSG053 - ADS Provided Books must be saved before use', '../repair/tsg053-save-book-first.ipynb'], ["NameError: name 'azdata_login_secret_name' is not defined", 'SOP013 - Create secret for azdata login (inside cluster)', '../common/sop013-create-secret-for-azdata-login.ipynb'], ['ERROR: No credentials were supplied, or the credentials were unavailable or inaccessible.', "TSG124 - 'No credentials were supplied' error from azdata login", '../repair/tsg124-no-credentials-were-supplied.ipynb']]}
install_hint = {'azdata': ['SOP063 - Install azdata CLI (using package manager)', '../install/sop063-packman-install-azdata.ipynb']}
###Output
_____no_output_____
###Markdown
Use azdata to log out
###Code
run('azdata logout')
print('Notebook execution complete.')
###Output
_____no_output_____ |
Examples/Creator_example.ipynb | ###Markdown
Define model parameters
###Code
mod_m1=e3d_creator.e3d_model('M1')
mod_m1.assign_model_parameters(10,2,0.05,10)
mod_m1.import_velocity('../Data/Antarctica_firn_vel_model.txt')
mod_m1.position_receivers(3,7,dx=0.5)
mod_m1.define_source(5,0.5,src_type=4,Mxx=-0.6710,Myy=0.0669,Mzz=0.6040,Mxy=0.2416,Mxz=0.4762,Myz=-0.5523)
# mod_m1.define_source(5,0.5,src_type=6)
###Output
_____no_output_____
###Markdown
Plot model
###Code
mod_m1.plot_model()
mod_m1.plot_velocity()
###Output
_____no_output_____
###Markdown
Export e3d parameter file
###Code
mod_m1.create_e3d_file()
###Output
File created: ./M1_e3dmodel.txt
|
Tutorials/Lecture006_DeepQLearning_CartPole.ipynb | ###Markdown
[Guide To Reinforcement Learning](https://skymind.ai/wiki/deep-reinforcement-learning) Reinforcement learning (RL)Reinforcement learning (RL) is the subfield of machine learning concerned with decision making and motor control. It studies how an agent can learn how to achieve goals in a complex, uncertain environment. It’s exciting for two reasons:* RL is very general, encompassing all problems that involve making a sequence of decisions: for example, controlling a robot’s motors so that it’s able to run and jump, making business decisions like pricing and inventory management, or playing video games and board games. RL can even be applied to supervised learning problems with sequential or structured outputs.* RL algorithms have started to achieve good results in many difficult environments. RL has a long history, but until recent advances in deep learning, it required lots of problem-specific engineering. DeepMind’s Atari results, BRETT from Pieter Abbeel’s group, and AlphaGo all used deep RL algorithms which did not make too many assumptions about their environment, and thus can be applied in other settings.See: [Open AI GYM](https://gym.openai.com/)RL is a general concept that can be simply described with an agent that takes actions in an environment in order to maximize its cumulative reward. The underlying idea is very lifelike, where similarly to the humans in real life, agents in RL algorithms are incentivized with punishments for bad actions and rewards for good ones. Markov Chain State–action–reward–state–action (SARSA) is an algorithm for learning a Markov decision process policy, We start with an initial environment. It doesn’t have any associated reward yet, but it has a state (S_t).Then for each iteration, an agent takes current state (S_t), picks best (based on model prediction) action (A_t) and executes it on an environment. Subsequently, environment returns a reward (R_t+1) for a given action, a new state (S_t+1) and an information if the new state is terminal. The process repeats until termination. Deep Q-Learning (DQN)DQN is a RL technique that is aimed at choosing the best action for given circumstances (observation). Each possible action for each possible observation has its Q value, where ‘Q’ stands for a quality of a given move.But how do we end up with accurate Q values? That’s where the deep neural networks and linear algebra come in.For each state experienced by our agent, we are going to remember itdqn_solver.remember(state, action, reward, state_next, terminal)and perform an experience replay.dqn_solver.experience_replay()Experience replay is a biologically inspired process that uniformly (to reduce correlation between subsequent actions) samples experiences from the memory and for each entry updates its Q value.We are calculating the new q by taking the maximum q for a given action (predicted value of a best next state), multiplying it by the discount factor (GAMMA) and ultimately adding it to the current state reward.In other words, we are updating our Q value with the cumulative discounted future rewards.Here is a formal notation:(source: https://en.wikipedia.org/wiki/Q-learning)For those of you who wonder how such function can possibly converge, as it looks like it is trying to predict its own output (in some sense it is!), don’t worry - it’s possible and in our simple case it does.However, convergence is not always that ‘easy’ and in more complex problems there comes a need of more advanced techniques that stabilize training. These techniques are for example Double DQN’s or Dueling DQN’s, but that’s a topic for another article (stay tuned). Reinforcement Learning (DQN) tutorial=====================================Adapted from PyTorch website.**Author**: `Adam Paszke `_This tutorial shows how to use PyTorch to train a Deep Q Learning (DQN) agenton the CartPole-v0 task from the `OpenAI Gym `__.**Task**The agent has to decide between two actions - moving the cart left orright - so that the pole attached to it stays upright. You can find anofficial leaderboard with various algorithms and visualizations at the`Gym website `__.As the agent observes the current state of the environment and choosesan action, the environment *transitions* to a new state, and alsoreturns a reward that indicates the consequences of the action. In thistask, the environment terminates if the pole falls over too far.The CartPole task is designed so that the inputs to the agent are 4 realvalues representing the environment state (position, velocity, etc.).However, neural networks can solve the task purely by looking at thescene, so we'll use a patch of the screen centered on the cart as aninput. Because of this, our results aren't directly comparable to theones from the official leaderboard - our task is much harder.Unfortunately this does slow down the training, because we have torender all the frames.Strictly speaking, we will present the state as the difference betweenthe current screen patch and the previous one. This will allow the agentto take the velocity of the pole into account from one image.**Packages**First, let's import needed packages. Firstly, we need`gym `__ for the environment(Install using `pip install gym`).We'll also use the following from PyTorch:- neural networks (``torch.nn``)- optimization (``torch.optim``)- automatic differentiation (``torch.autograd``)- utilities for vision tasks (``torchvision`` - `a separate package `__). Cartpole Problem  Cartpole ProblemCartpole - known also as an Inverted Pendulum is a pendulum with a center of gravity above its pivot point. It’s unstable, but can be controlled by moving the pivot point under the center of mass. The goal is to keep the cartpole balanced by applying appropriate forces to a pivot point.Violet square indicates a pivot pointRed and green arrows show possible horizontal forces that can be applied to a pivot point*A pole is attached by an un-actuated joint to a cart, which moves along a frictionless track. The system is controlled by applying a force of +1 or -1 to the cart. The pendulum starts upright, and the goal is to prevent it from falling over. A reward of +1 is provided for every timestep that the pole remains upright. The episode ends when the pole is more than 15 degrees from vertical, or the cart moves more than 2.4 units from the center.* Take a look at a video below with a real-life demonstration of a cartpole problem learning process.[Real-life application of Reinforcement Learning video](https://youtu.be/XiigTGKZfks) ImportBefore we start the tutorial, we need to install some dependencies to render and capture images from OpenAI Gym. It may take some time to set up the environment.
###Code
!apt-get install cmake zlib1g-dev libjpeg-dev xvfb libav-tools xorg-dev libboost-all-dev libsdl2-dev swig python3-dev python3-future python-opengl
!apt-get -qq -y install libcusparse8.0 libnvrtc8.0 libnvtoolsext1 > /dev/null
!ln -snf /usr/lib/x86_64-linux-gnu/libnvrtc-builtins.so.8.0 /usr/lib/x86_64-linux-gnu/libnvrtc-builtins.so
!apt-get -qq -y install xvfb freeglut3-dev ffmpeg> /dev/null
!apt-get install xserver-xorg libglu1-mesa-dev mesa-common-dev libxmu-dev libxi-dev
!pip3 install torch torchvision gym[all] PyOpenGL piglet pyglet pyvirtualdisplay
# Start virtual display
from pyvirtualdisplay import Display
display = Display(visible=0, size=(1024, 768))
display.start()
import os
os.environ["DISPLAY"] = ":" + str(display.display) + "." + str(display.screen)
%matplotlib inline
import gym
import math
import random
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.animation
from collections import namedtuple
from itertools import count
from PIL import Image
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torchvision.transforms as T
from IPython.display import HTML
env = gym.make('CartPole-v0').unwrapped
# set up matplotlib
is_ipython = 'inline' in matplotlib.get_backend()
if is_ipython:
from IPython import display
plt.ion()
# if gpu is to be used
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
###Output
_____no_output_____
###Markdown
Replay Memory-------------We'll be using experience replay memory for training our DQN. It storesthe transitions that the agent observes, allowing us to reuse this datalater. By sampling from it randomly, the transitions that build up abatch are decorrelated. It has been shown that this greatly stabilizesand improves the DQN training procedure.For this, we're going to need two classses:- ``Transition`` - a named tuple representing a single transition in our environment- ``ReplayMemory`` - a cyclic buffer of bounded size that holds the transitions observed recently. It also implements a ``.sample()`` method for selecting a random batch of transitions for training.
###Code
Transition = namedtuple('Transition',
('state', 'action', 'next_state', 'reward'))
class ReplayMemory(object):
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
self.position = 0
def push(self, *args):
"""Saves a transition."""
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.position] = Transition(*args)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def __len__(self):
return len(self.memory)
###Output
_____no_output_____
###Markdown
Now, let's define our model. But first, let quickly recap what a DQN is.DQN algorithm-------------Our environment is deterministic, so all equations presented here arealso formulated deterministically for the sake of simplicity. In thereinforcement learning literature, they would also contain expectationsover stochastic transitions in the environment.Our aim will be to train a policy that tries to maximize the discounted,cumulative reward$R_{t_0} = \sum_{t=t_0}^{\infty} \gamma^{t - t_0} r_t$, where$R_{t_0}$ is also known as the *return*. The discount,$\gamma$, should be a constant between $0$ and $1$that ensures the sum converges. It makes rewards from the uncertain farfuture less important for our agent than the ones in the near futurethat it can be fairly confident about.The main idea behind Q-learning is that if we had a function$Q^*: State \times Action \rightarrow \mathbb{R}$, that could tellus what our return would be, if we were to take an action in a givenstate, then we could easily construct a policy that maximizes ourrewards:\begin{align}\pi^*(s) = \arg\!\max_a \ Q^*(s, a)\end{align}However, we don't know everything about the world, so we don't haveaccess to $Q^*$. But, since neural networks are universal functionapproximators, we can simply create one and train it to resemble$Q^*$.For our training update rule, we'll use a fact that every $Q$function for some policy obeys the Bellman equation:\begin{align}Q^{\pi}(s, a) = r + \gamma Q^{\pi}(s', \pi(s'))\end{align}The difference between the two sides of the equality is known as thetemporal difference error, $\delta$:\begin{align}\delta = Q(s, a) - (r + \gamma \max_a Q(s', a))\end{align}To minimise this error, we will use the `Huberloss `__. The Huber loss actslike the mean squared error when the error is small, but like the meanabsolute error when the error is large - this makes it more robust tooutliers when the estimates of $Q$ are very noisy. We calculatethis over a batch of transitions, $B$, sampled from the replaymemory:\begin{align}\mathcal{L} = \frac{1}{|B|}\sum_{(s, a, s', r) \ \in \ B} \mathcal{L}(\delta)\end{align}\begin{align}\text{where} \quad \mathcal{L}(\delta) = \begin{cases} \frac{1}{2}{\delta^2} & \text{for } |\delta| \le 1, \\ |\delta| - \frac{1}{2} & \text{otherwise.} \end{cases}\end{align} Q-networkOur model will be a convolutional neural network that takes in thedifference between the current and previous screen patches. It has twooutputs, representing $Q(s, \mathrm{left})$ and$Q(s, \mathrm{right})$ (where $s$ is the input to thenetwork). In effect, the network is trying to predict the *quality* oftaking each action given the current input.
###Code
class DQN(nn.Module):
def __init__(self):
super(DQN, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=5, stride=2)
self.bn1 = nn.BatchNorm2d(16)
self.conv2 = nn.Conv2d(16, 32, kernel_size=5, stride=2)
self.bn2 = nn.BatchNorm2d(32)
self.conv3 = nn.Conv2d(32, 32, kernel_size=5, stride=2)
self.bn3 = nn.BatchNorm2d(32)
self.head = nn.Linear(448, 2)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
return self.head(x.view(x.size(0), -1))
###Output
_____no_output_____
###Markdown
Input extractionThe code below are utilities for extracting and processing renderedimages from the environment. It uses the ``torchvision`` package, whichmakes it easy to compose image transforms. Once you run the cell it willdisplay an example patch that it extracted.
###Code
resize = T.Compose([T.ToPILImage(),
T.Resize(40, interpolation=Image.CUBIC),
T.ToTensor()])
# This is based on the code from gym.
screen_width = 600
def get_cart_location():
world_width = env.x_threshold * 2
scale = screen_width / world_width
return int(env.state[0] * scale + screen_width / 2.0) # MIDDLE OF CART
def get_screen():
screen = env.render(mode='rgb_array').transpose(
(2, 0, 1)) # transpose into torch order (CHW)
# Strip off the top and bottom of the screen
screen = screen[:, 160:320]
view_width = 320
cart_location = get_cart_location()
if cart_location < view_width // 2:
slice_range = slice(view_width)
elif cart_location > (screen_width - view_width // 2):
slice_range = slice(-view_width, None)
else:
slice_range = slice(cart_location - view_width // 2,
cart_location + view_width // 2)
# Strip off the edges, so that we have a square image centered on a cart
screen = screen[:, :, slice_range]
# Convert to float, rescare, convert to torch tensor
# (this doesn't require a copy)
screen = np.ascontiguousarray(screen, dtype=np.float32) / 255
screen = torch.from_numpy(screen)
# Resize, and add a batch dimension (BCHW)
return resize(screen).unsqueeze(0).to(device)
env.reset()
plt.figure()
plt.imshow(get_screen().cpu().squeeze(0).permute(1, 2, 0).numpy(),
interpolation='none')
plt.title('Example extracted screen')
plt.show()
###Output
_____no_output_____
###Markdown
Training-------- Hyperparameters and utilitiesThis cell instantiates our model and its optimizer, and defines someutilities:- ``select_action`` - will select an action accordingly to an epsilon greedy policy. Simply put, we'll sometimes use our model for choosing the action, and sometimes we'll just sample one uniformly. The probability of choosing a random action will start at ``EPS_START`` and will decay exponentially towards ``EPS_END``. ``EPS_DECAY`` controls the rate of the decay.- ``plot_durations`` - a helper for plotting the durations of episodes, along with an average over the last 100 episodes (the measure used in the official evaluations). The plot will be underneath the cell containing the main training loop, and will update after every episode.
###Code
BATCH_SIZE = 128
GAMMA = 0.999
EPS_START = 0.9
EPS_END = 0.05
EPS_DECAY = 200
TARGET_UPDATE = 10
policy_net = DQN().to(device)
target_net = DQN().to(device)
target_net.load_state_dict(policy_net.state_dict())
target_net.eval()
optimizer = optim.RMSprop(policy_net.parameters())
memory = ReplayMemory(10000)
steps_done = 0
def select_action(state):
global steps_done
sample = random.random()
eps_threshold = EPS_END + (EPS_START - EPS_END) * \
math.exp(-1. * steps_done / EPS_DECAY)
steps_done += 1
if sample > eps_threshold:
with torch.no_grad():
return policy_net(state).max(1)[1].view(1, 1)
else:
return torch.tensor([[random.randrange(2)]], device=device, dtype=torch.long)
episode_durations = []
def plot_durations():
plt.figure(2)
plt.clf()
durations_t = torch.tensor(episode_durations, dtype=torch.float)
plt.title('Training...')
plt.xlabel('Episode')
plt.ylabel('Duration')
plt.plot(durations_t.numpy())
# Take 100 episode averages and plot them too
if len(durations_t) >= 100:
means = durations_t.unfold(0, 100, 1).mean(1).view(-1)
means = torch.cat((torch.zeros(99), means))
plt.plot(means.numpy())
plt.pause(0.001) # pause a bit so that plots are updated
if is_ipython:
display.clear_output(wait=True)
display.display(plt.gcf())
###Output
_____no_output_____
###Markdown
Training loopFinally, the code for training our model.Here, you can find an ``optimize_model`` function that performs asingle step of the optimization. It first samples a batch, concatenatesall the tensors into a single one, computes $Q(s_t, a_t)$ and$V(s_{t+1}) = \max_a Q(s_{t+1}, a)$, and combines them into ourloss. By defition we set $V(s) = 0$ if $s$ is a terminalstate. We also use a target network to compute $V(s_{t+1})$ foradded stability. The target network has its weights kept frozen most ofthe time, but is updated with the policy network's weights every so often.This is usually a set number of steps but we shall use episodes forsimplicity.
###Code
def optimize_model():
if len(memory) < BATCH_SIZE:
return
transitions = memory.sample(BATCH_SIZE)
# Transpose the batch (see http://stackoverflow.com/a/19343/3343043 for
# detailed explanation).
batch = Transition(*zip(*transitions))
# Compute a mask of non-final states and concatenate the batch elements
non_final_mask = torch.tensor(tuple(map(lambda s: s is not None,
batch.next_state)), device=device, dtype=torch.uint8)
non_final_next_states = torch.cat([s for s in batch.next_state
if s is not None])
state_batch = torch.cat(batch.state)
action_batch = torch.cat(batch.action)
reward_batch = torch.cat(batch.reward)
# Compute Q(s_t, a) - the model computes Q(s_t), then we select the
# columns of actions taken
state_action_values = policy_net(state_batch).gather(1, action_batch)
# Compute V(s_{t+1}) for all next states.
next_state_values = torch.zeros(BATCH_SIZE, device=device)
next_state_values[non_final_mask] = target_net(non_final_next_states).max(1)[0].detach()
# Compute the expected Q values
expected_state_action_values = (next_state_values * GAMMA) + reward_batch
# Compute Huber loss
loss = F.smooth_l1_loss(state_action_values, expected_state_action_values.unsqueeze(1))
# Optimize the model
optimizer.zero_grad()
loss.backward()
for param in policy_net.parameters():
param.grad.data.clamp_(-1, 1)
optimizer.step()
###Output
_____no_output_____
###Markdown
Below, you can find the main training loop. At the beginning we resetthe environment and initialize the ``state`` Tensor. Then, we samplean action, execute it, observe the next screen and the reward (always1), and optimize our model once. When the episode ends (our modelfails), we restart the loop.Below, `num_episodes` is set small. You should downloadthe notebook and run lot more epsiodes.
###Code
num_episodes = 50
for i_episode in range(num_episodes):
# Initialize the environment and state
env.reset()
last_screen = get_screen()
current_screen = get_screen()
state = current_screen - last_screen
for t in count():
# Select and perform an action
action = select_action(state)
_, reward, done, _ = env.step(action.item())
reward = torch.tensor([reward], device=device)
# Observe new state
last_screen = current_screen
current_screen = get_screen()
if not done:
next_state = current_screen - last_screen
else:
next_state = None
# Store the transition in memory
memory.push(state, action, next_state, reward)
# Move to the next state
state = next_state
# Perform one step of the optimization (on the target network)
optimize_model()
if done:
episode_durations.append(t + 1)
plot_durations()
break
# Update the target network
if i_episode % TARGET_UPDATE == 0:
target_net.load_state_dict(policy_net.state_dict())
print('Complete')
env.render()
plt.show()
frames = []
for i in range(3):
env.reset()
last_screen = get_screen()
current_screen = get_screen()
state = current_screen - last_screen
done = False
R = 0
t = 0
while not done and t < 200:
frames.append(env.render(mode = 'rgb_array'))
action = select_action(state)
obs, r, done, _ = env.step(action.item())
R += r
t += 1
if not done:
next_state = current_screen - last_screen
else:
next_state = None
state = next_state
print('test episode:', i, 'R:', R)
env.render()
# make a video to display
plt.figure(figsize=(frames[0].shape[1] / 72.0, frames[0].shape[0] / 72.0), dpi = 72)
patch = plt.imshow(frames[0])
plt.axis('off')
animate = lambda i: patch.set_data(frames[i])
ani = matplotlib.animation.FuncAnimation(plt.gcf(), animate, frames=len(frames), interval = 50)
HTML(ani.to_jshtml())
###Output
test episode: 0 R: 9.0
test episode: 1 R: 10.0
test episode: 2 R: 9.0
|
NLP_Customer_Review_ML_Complete.ipynb | ###Markdown
###Code
import pandas as pd
import nltk
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
reviews = pd.read_csv('https://raw.githubusercontent.com/lucasmoratof/customers_review_project/master/reviews_for_nlp.csv', usecols=['review_comment_message', 'is_good_review'])
reviews.head()
###Output
_____no_output_____
###Markdown
I will try some techniques to count the number of characters and words in each review.
###Code
# count the lenght of each review
reviews['char_count'] = reviews['review_comment_message'].apply(len)
reviews['char_count'].head()
# average characters in the reviews
reviews['char_count'].mean()
# create a function to count the number of words in each comment
def count_words(string):
words = string.split()
return len(words)
# applying the funciton to create a new feature
reviews['word_count'] = reviews['review_comment_message'].apply(count_words)
# finding the average number of words in the reviews
print(reviews['word_count'].mean())
###Output
11.901374718589835
###Markdown
Some text preprocessing techiniques:- Convert words into lowercase- Removing leading and trailing whitespaces- Removing punctuation- Removing stopwords- Expanding contractions- Removing special characters (numbers, emojis, etc)**Tokenization** is the process of converting words into a numerical format, called token. We can also convert sentences and ponctuation into tokens.**Lemmatization** is the process of converting word into it's lowercase base form.
###Code
# If you need to download the model (works on google colab)
import spacy.cli
spacy.cli.download("pt_core_news_sm")
# Load the Portuguese model
import spacy
nlp = spacy.load("pt_core_news_sm")
doc= nlp(reviews['review_comment_message'][2])
# IMPORTANT, when you pass the strings through nlp(), it performs Lemmatization by default
tokens = [token.text for token in doc]
lemmas= [token.lemma_ for token in doc]
print(tokens, "\n", lemmas)
# Stopwords
stopwords = spacy.lang.pt.stop_words.STOP_WORDS
no_stops= [lemma for lemma in lemmas if lemma.isalpha() and lemma not in stopwords]
print(' '.join(no_stops))
# Creating a function that combines tokenization and lemmatization
def preprocessing(text):
doc= nlp(text) # creates the document
lemmas= [token.lemma_ for doc in doc] # extracts the lemmas
# time to remove stopwords (remember that we are using the Portuguese version)
clean_lemmas= [lemma for lemma in lemmas if lemma.isalpha() and lemma not in stopwords]
return ' '.join(clean_lemmas)
###Output
_____no_output_____
###Markdown
Part of Speech - POSIt determines the meaning of each word, like proper noun, verb, etc.
###Code
# load the model
nlp= spacy.load('pt_core_news_sm')
# create the doc
doc= nlp(reviews['review_comment_message'][2])
# generate tokens and pos tags
pos= [(token.text, token.pos_) for token in doc]
print(pos)
###Output
[('aparelho', 'NOUN'), ('eficiente', 'ADJ'), ('.', 'PUNCT'), ('no', 'ADP'), ('site', 'VERB'), ('a', 'DET'), ('marca', 'NOUN'), ('do', 'DET'), ('aparelho', 'NOUN'), ('esta', 'DET'), ('impresso', 'VERB'), ('como', 'ADP'), ('3desinfector', 'NUM'), ('e', 'PUNCT'), ('a', 'ADP'), ('o', 'DET'), ('chegar', 'VERB'), ('esta', 'DET'), ('com', 'ADP'), ('outro', 'DET'), ('nome', 'NOUN'), ('...', 'PUNCT'), ('atualizar', 'VERB'), ('com', 'ADP'), ('a', 'DET'), ('marca', 'NOUN'), ('correta', 'ADJ'), ('uma', 'DET'), ('vez', 'NOUN'), ('que', 'SCONJ'), ('é', 'VERB'), ('o', 'DET'), ('mesmo', 'DET'), ('aparelho', 'NOUN')]
###Markdown
Below I will create to functions, to count the number of proper nouns and nouns, then, I will apply these function on the data separating good reviews and bad reviews. Finally, I will calculate the mean of PROPN and NOUNS on both groups and compare.
###Code
# PROPN
def proper_nouns(text, model=nlp):
# Create doc object
doc= model(text)
# Generate list of POS tags
pos= [token.pos_ for token in doc]
return pos.count('PROPN')
# NOUN
def nouns(text, model=nlp):
doc= nlp(text)
pos= [token.pos_ for token in doc]
return pos.count('NOUN')
# Create two columns, witht the number of nouns and proper nouns
reviews['num_propn'] = reviews['review_comment_message'].apply(proper_nouns)
reviews['num_noun'] = reviews['review_comment_message'].apply(nouns)
# computing the mean of proper nouns
good_propn= reviews[reviews['is_good_review']== 1]['num_propn'].mean()
bad_propn= reviews[reviews['is_good_review']== 0]['num_propn'].mean()
# computing the mean of nouns
good_noun= reviews[reviews['is_good_review']== 1]['num_noun'].mean()
bad_noun= reviews[reviews['is_good_review']== 0]['num_noun'].mean()
# print results to compare
print("Mean number of proper nouns for good and bad reviews are %.2f and %.2f respectively"%(good_propn, bad_propn))
print("Mean number of nouns for good and bad reviews are %.2f and %.2f respectively"%(good_noun, bad_noun))
###Output
Mean number of proper nouns for good and bad reviews are 0.48 and 0.88 respectively
Mean number of nouns for good and bad reviews are 2.10 and 3.63 respectively
###Markdown
Named Entity RecognitionIt classifies named entities into predefined categories, like person, organization, country, etc.Uses:- Efficient search algorithms- Question answering- News article classification- Customer service
###Code
# Let's practice NER
nlp= spacy.load('pt_core_news_sm')
text= reviews['review_comment_message'][11]
doc= nlp(text)
# print all named entities:
for ent in doc.ents:
print(ent.text, ent.label_)
###Output
Comprei PER
###Markdown
To find person's names, we can use the following function:
###Code
def find_persons(text, model=nlp):
doc= model(text)
persons= [ent.text for ent in doc.ents if ent.label_== 'PERSON']
return persons
###Output
_____no_output_____
###Markdown
VectorizationThe process of converting text into vectors, so it can be used in MLBag of Words is a model that do vectorization. It's important to perform text preprocessing as it leads to smaller vocabularies, and reducing the number of dimensions helps improve performance.CountVectorizer, from scikit-learn, is the tool used to perform bag of words.It needs some arguuments to pre-processing text.
###Code
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
# Create CountVectorizer object, specifying the arguments to preprocess text
stop_words_port= spacy.lang.pt.stop_words.STOP_WORDS
vectorizer= CountVectorizer(stop_words=stop_words_port)
# Split into training and test sets
X_train, X_test, y_train, y_test= train_test_split(reviews['review_comment_message'], reviews['is_good_review'], test_size=0.25, random_state=24)
# Generate training Bow vectors
X_train_bow= vectorizer.fit_transform(X_train)
# Generate test Bow vector
X_test_bow= vectorizer.transform(X_test)
print(X_train_bow.shape)
print(X_test_bow.shape)
###Output
(31315, 13603)
(10439, 13603)
###Markdown
We will try the Naive Bayes classifier to this problem.
###Code
# Import multinomialNB
from sklearn.naive_bayes import MultinomialNB
# create MultinomialNB object
clf= MultinomialNB()
# Train clf
clf.fit(X_train_bow, y_train)
# Compute accuracy on test set
accuracy= clf.score(X_test_bow, y_test)
print("The accuracy of the classifier is %.3f" % accuracy)
# Predict the sentiment of a negative review
review= "detestei o produto, nao gostei do vendedor, estou insatisfeito"
prediction= clf.predict(vectorizer.transform([review]))[0]
print("The sentiment predicted by the classifier is %i" % prediction)
###Output
The sentiment predicted by the classifier is 0
###Markdown
On the example above, the model correct classified a bad review. Techniques to give context to a reviewn-gramsIt is a contiguous sequence of n-elements, or words, in a given document. A bag of words is n-gram model where n= 1.Example: "I love you". If n=1, we have:- "I"- "Love"- "You"If we change n to 2, we would have:- "I love"- "love you"It helps the model to undestand the relationship between the words.
###Code
# To avoid the curse of dimensionality, don't use more than n=3
# We are going to compare how much it increases when we increase the n-gram
vectorizer_ng1 = CountVectorizer(ngram_range=(1, 1))
ng1 = vectorizer_ng1.fit_transform(X_train)
vectorizer_ng2 = CountVectorizer(ngram_range=(1, 2))
ng2 = vectorizer_ng2.fit_transform(X_train)
vectorizer_ng3 = CountVectorizer(ngram_range=(1, 3))
ng3 = vectorizer_ng3.fit_transform(X_train)
print("number of features by n-grams is:\n ng1= %i \n ng2= %i \n ng3= %i" % (ng1.shape[1], ng2.shape[1], ng3.shape[1]))
###Output
number of features by n-grams is:
ng1= 13963
ng2= 114172
ng3= 295810
###Markdown
We can see that with n=1 we have 13k features, while with n=3 it increases to 295k.
###Code
# We will try the same model again, now with n-gram= 2
vectorizer_ng= CountVectorizer(stop_words=stop_words_port, ngram_range=(1,3))
X_train_bow_ng= vectorizer_ng.fit_transform(X_train)
X_test_bow_ng= vectorizer_ng.transform(X_test)
clf.fit(X_train_bow_ng, y_train)
accuracy_ng= clf.score(X_test_bow_ng, y_test)
print("The accuracy of the classifier is %.3f" % accuracy_ng)
###Output
The accuracy of the classifier is 0.872
###Markdown
Term Frenquency - Inverse Document Frequency - **TF-IDF**The idea is, more frequent the word is accross all documents, plus the number of times it occurs, more weight it should have.
###Code
# instead using CountVectorizer(), we will use TfadVectorizer() from scikit-learn
from sklearn.feature_extraction.text import TfidfVectorizer
vectorizer= TfidfVectorizer()
tfidf_matrix= vectorizer.fit_transform(X_train)
print(tfidf_matrix.shape)
###Output
(31315, 13963)
###Markdown
Cosine similarity It is the cosine distance between two vectors
###Code
from sklearn.metrics.pairwise import cosine_similarity
import time
# record time
start= time.time()
# Compute cosine similarity matrix
cosine_sim= cosine_similarity(tfidf_matrix, tfidf_matrix)
# print the cosine similarity matrix
print(cosine_sim)
# Print time taken
print("Time taken: %s seconds" %(time.time() - start))
# we can use linear_kernal to calculate cosine similarity. It takes less time to process and it produces the same result.
from sklearn.metrics.pairwise import linear_kernel
import time
# record time
start= time.time()
# Compute cosine similarity matrix
cosine_sim= linear_kernel(tfidf_matrix, tfidf_matrix)
# print the cosine similarity matrix
print(cosine_sim)
# Print time taken
print("Time taken: %s seconds" %(time.time() - start))
###Output
[[1. 0. 0.20017999 ... 0. 0. 0.11678042]
[0. 1. 0. ... 0. 0.04735246 0. ]
[0.20017999 0. 1. ... 0. 0. 0.58337711]
...
[0. 0. 0. ... 1. 0. 0. ]
[0. 0.04735246 0. ... 0. 1. 0. ]
[0.11678042 0. 0.58337711 ... 0. 0. 1. ]]
Time taken: 19.20054602622986 seconds
###Markdown
Word embeddingsTo find similarity between words or sentences.
###Code
reviews['review_comment_message'].head()
# let's check how similar are the reviews
# first, creat a Doc
review_1_doc= nlp(reviews['review_comment_message'][1])
review_2_doc= nlp(reviews['review_comment_message'][2])
review_3_doc= nlp(reviews['review_comment_message'][3])
# Now, use the function similarity
print(review_1_doc.similarity(review_2_doc))
print(review_2_doc.similarity(review_3_doc))
print(review_3_doc.similarity(review_1_doc))
# trying Multinomial Naive Bayes with Tfidf vectorization
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.naive_bayes import MultinomialNB
import time
# Create CountVectorizer object, specifying the arguments to preprocess text
stop_words_port= spacy.lang.pt.stop_words.STOP_WORDS
vectorizer= TfidfVectorizer(stop_words=stop_words_port)
# Split into training and test sets
X_train, X_test, y_train, y_test= train_test_split(reviews['review_comment_message'], reviews['is_good_review'], test_size=0.25, random_state=24)
start= time.time()
# Generate training Bow vectors
X_train_vec= vectorizer.fit_transform(X_train)
# Generate test Bow vector
X_test_vec= vectorizer.transform(X_test)
# create MultinomialNB object
clf= MultinomialNB()
# Train clf
clf.fit(X_train_vec, y_train)
# Compute accuracy on test set
accuracy= clf.score(X_test_vec, y_test)
print("The accuracy of the classifier is %.3f" % accuracy)
print("Time taken: %s seconds" %(time.time() - start))
import sklearn.metrics as metrics
from sklearn.metrics import classification_report, confusion_matrix
clf_y_pred = clf.predict(X_test_vec)
print(metrics.classification_report(y_test, clf_y_pred))
###Output
_____no_output_____ |
notebooks/99. Journal Analysis project - Francesco.ipynb | ###Markdown
S2orc (exploration, clustering & visualization)------For presenting some results we need to analyze (and rapidly compare) some of the methods we used untill now in order to discriminates between paper's `field_of_study` based on their `title` and `abstract`.This notebook is an extention of some previous work done by Master's students from University of Florence (cite here). DatasetFrom each scientific paper we took the `title` and the `abstract`, as well as a property identifying the field in witch the article pertrains.The dataset (only 1000 elements) has been selected randomly from a full-version of 80M papers from different fields.The field of studies (that are called in the dataset `mag_field_of_study`) are the following:| Field of study | All papers | Full text ||----------------|------------|-----------|| Medicine | 12.8M | 1.8M || Biology | 9.6M | 1.6M || Chemistry | 8.7M | 484k || n/a | 7.7M | 583k || Engineering | 6.3M | 228k || Comp Sci | 6.0M | 580k || Physics | 4.9M | 838k || Mat Sci | 4.6M | 213k || Math | 3.9M | 669k || Psychology | 3.4M | 316k || Economics | 2.3M | 198k || Poli Sci | 1.8M | 69k || Business | 1.8M | 94k || Geology | 1.8M | 115k || Sociology | 1.6M | 93k || Geography | 1.4M | 58k || Env Sci | 766k | 52k || Art | 700k | 16k || History | 690k | 22k || Philosophy | 384k | 15k | Note for reproducibility: `data` is a `DatasetDict` object composed by `Dataset` object for every key (in `train`, `test`, `valid`):```python{ "train": Dataset, "test" : Dataset, "valid": Dataset}```
###Code
%load_ext autoreload
%autoreload 2
MAIN_PATH = '/home/vivoli/Thesis'
DATA_PATH = '/home/vivoli/Thesis/data'
OUT_PATH = '/home/vivoli/Thesis/outputs/'
ARGS_PATH = '/home/vivoli/Thesis/'
# Imports
from thesis.utils.general import load_dataset_wrapper
from thesis.utils.parsers.args_parser import parse_args
DICTIONARY_FIELD_NAMES = dict(
train = ['train'],
test = ['test', 'debug', 'dev'],
validation = ['validation', 'valid']
)
###Output
_____no_output_____
###Markdown
Getting the dataset---In order to get the dataset we need to create a dictionary with the DatasetArguments (params) and use our "library" called `thesis`.
###Code
# ------------------
# Creating Arguments
# ------------------
# create arguments dictionary
args = dict(
# DatasetArguments
model_name_or_path = "allenai/scibert_scivocab_uncased",
dataset_name = "s2orc", # "keyphrase",
dataset_config_name = "full", # "inspec",
# TrainingArguments
seed = '1234',
output_dir = "/home/vivoli/Thesis/output",
num_train_epochs = '1',
per_device_train_batch_size = "8", # 16 and 32 end with "RuntimeError: CUDA out of memory."
per_device_eval_batch_size = "8", # 16 and 32 end with "RuntimeError: CUDA out of memory."
max_seq_length = '512',
# S2orcArguments & KeyPhArguments
dataset_path = "/home/vivoli/Thesis/data",
data = "abstract",
target = "title",
classes = "mag_field_of_study", # "keywords",
# S2orcArguments
idxs = '0',
zipped = 'True',
mag_field_of_study = "Computer Science",
keep_none_papers = 'False',
keep_unused_columns = 'False',
# RunArguments
run_name = "scibert-s2orc",
run_number = '0',
run_iteration = '0',
# LoggingArguments
verbose = 'True',
debug_log = 'True',
time = 'False',
callbacks = "WandbCallback,CometCallback,TensorBoardCallback",
)
# save dictionary to file
import json
import os
ARGS_FILE = 'arguments.json'
with open(os.path.join(ARGS_PATH, ARGS_FILE), 'w') as fp:
json.dump(args, fp)
print(args)
# ------------------
# Parsing the Arguments
# ------------------
dataset_args, training_args, model_args, run_args, log_args, embedding_args = parse_args(['params_path', os.path.join(ARGS_PATH, ARGS_FILE)])
# ------------------
# Getting the datasets
# ------------------
# Getting the load_dataset wrapper that manages huggingface dataset and the custom ones
custom_load_dataset = load_dataset_wrapper()
# Loading the raw data based on input (and default) values of arguments
raw_datasets = custom_load_dataset(dataset_args, training_args, model_args, run_args, log_args, embedding_args)
# The Datasets in the raw form can have different form of key names (depending on the configuration).
# We need all datasets to contain 'train', 'test', 'validation' keys, if not we change the dictionary keys' name
# based on the `names_tuple` and conseguently on `names_map`.
def format_key_names(raw_datasets):
# The creation of `names_map` happens to be here
# For every element in the values lists, one dictionary entry is added
# with (k,v): k=Value of the list, v=Key such as 'train', etc.
def names_dict_generator(names_tuple: dict):
names_map = dict()
for key, values in names_tuple.items():
for value in values:
names_map[value] = key
return names_map
names_map = names_dict_generator(DICTIONARY_FIELD_NAMES)
split_names = raw_datasets.keys()
for split_name in split_names:
new_split_name = names_map.get(split_name)
if split_name != new_split_name:
raw_datasets[new_split_name] = raw_datasets.pop(split_name)
return raw_datasets
logger.info(f"Formatting DatasetDict keys")
datasets = format_key_names(raw_datasets)
keywords = []
keywords_info = {}
for item in data:
temp = item['keywords']
for keyword in temp:
keyword = keyword.replace("-", "").replace(",","").replace("/", "") #mi ero scordato di togliere il trattino nel preprocessing delle keywords. Per la virgola, non è un separatore, ma è una keyword che ha la virgola, e.g. "segmentation, features and descriptions"
if keyword not in keywords:
keywords.append(keyword)
keywords_info[keyword] = {'count': 0, 'appears_in': []}
keywords_info[keyword]['count'] += 1
keywords_info[keyword]['appears_in'].append(item['filename'])
print(keywords_info)
#plot distribution
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt; plt.rcdefaults()
pos = np.arange(len(keywords))
counts = []
for kw in keywords:
counts.append(keywords_info[kw]['count'])
plt.figure(figsize=(10,25))
y_pos = np.arange(len(keywords))
plt.barh(y_pos, counts, alpha=0.5)
plt.yticks(y_pos, keywords)
plt.xlabel('Count')
plt.title('Count distribution for each keyword')
plt.grid()
plt.show()
#order by count
ordered_kws = [x for _,x in sorted(zip(counts,keywords))]
ordered_cts = sorted(counts)
plt.figure(figsize=(5,22))
y_pos = np.arange(len(keywords))
plt.barh(y_pos, ordered_cts, alpha=0.5)
plt.yticks(y_pos, ordered_kws)
plt.xlabel('Count')
plt.title('Count distribution for each keyword')
#plt.grid()
plt.show()
###Output
_____no_output_____
###Markdown
Given the chart of frequency of the keywords (NOT normalized), a threshold can be set to only consider the most relevant keywords Definition of groups for the "ground truth" (?) "baseline" (?) Note: the following is arguable; in fact, the most frequent keywords are also the blandest and, maybe, less significant for a categorization.
###Code
len(keywords)
len(keywords)*.2 #the first 29 words make up to 20% of all the keywords
sum(ordered_cts[len(ordered_cts)-43:len(ordered_cts)])/sum(ordered_cts)
sum(ordered_cts[len(ordered_cts)-29:len(ordered_cts)])/sum(ordered_cts)
###Output
_____no_output_____
###Markdown
43 keywords make up roughly 80% of the total keywords count. However, for practical reasons, 29 keywords (20%) will be used, also considering how many documents have multiple keywords associated to them.This will have consequences in the choice of the number of clusters i.e. if HDBSCAN doesn't need nor wants the number of clusters to be specified, when using k-means it is mandatory for the nature of the algorithm.
###Code
MOST_IMPORTANT_KW_THRESHOLD = 29
mi_keywords = ordered_kws[len(ordered_kws)-MOST_IMPORTANT_KW_THRESHOLD:len(ordered_kws)] #most important keywords
mi_keywords_counts = ordered_cts[len(ordered_kws)-MOST_IMPORTANT_KW_THRESHOLD:len(ordered_kws)]
plt.rc('font', size=8)
plt.figure(figsize=(10,3))
y_pos = np.arange(len(mi_keywords))
plt.barh(y_pos, mi_keywords_counts, alpha=0.5)
plt.yticks(y_pos, mi_keywords, )
plt.xlabel('Count')
plt.title('Count distribution for each of the most important keywords')
plt.grid()
plt.show()
mi_associations = {}
#direi molto poco elegante ma okay
for keyword in mi_keywords:
mi_associations[keyword] = keywords_info[keyword]
print(mi_associations['deeplearning'])
###Output
_____no_output_____
###Markdown
ClusteringFrom here on clustering will be considered using SBERT embeddings. The variable called 'clustering_on' is used to discriminate and chose weather the embeddings are made on the abstracts or on the titles. In both cases, clustering is made through HDBSCAN and k-means (see the two subsections).WARNING: do not execute cells in random order. Some variables names are used both for the HDBSCAN clustering and for k-means; the suggestion is to execute hdbscan first, and k-means second. Otherwise, it is important to execute the definition of the functions used for both cases. Clustering with HDBSCAN
###Code
!pip install torch==1.7.1+cu101 torchvision==0.8.2+cu101 torchaudio==0.7.2 -f https://download.pytorch.org/whl/torch_stable.html
!pip install hdbscan
!pip install sentence-transformers
!pip install umap-learn
from sentence_transformers import SentenceTransformer
#model_in = SentenceTransformer('distilbert-base-nli-mean-tokens') #this model is kinda bad
model_in = SentenceTransformer('stsb-roberta-large')
#model = SentenceTransformer('paraphrase-distilroberta-base-v1')
#since the same exact thing has to be done for both abstracts and titles, i define a function
def elaborate(subject = None, model = None):
'''
:param str subject: can be either 'abstract' or 'title', specifies what the clustering has to be made on
:param SentenceTransformer model: instanciated model of SentenceTransformer (SBERT)
'''
textual_data = []
for item in data:
# the condition is rather importante. In the case of abstracts, it kept clustering
# (with HDBSCAN) all the error in the same cluster (which makes sense), remortion is thereby necessary
if not (item[subject] == 'UNABLE TO DECODE ABSTRACT' or item[subject] == 'Unable to open the pdf' or item[subject] == ""):
textual_data.append(item[subject])
print(textual_data)
return textual_data, model.encode(textual_data, show_progress_bar = True)
#funzione, dati dati originali e label li stampa assieme
def constructDictionaryOfClusters(labels, original_data):
print(labels)
associative = []
#prepare dictionary
for i in range(labels.max()+1):
associative.append([])
#print(f"associative: {associative}")
for i in range(len(original_data)):
if labels[i] != -1: #in the case of HDBSCAN the labels -1 are the "outsiders"
associative[labels[i]].append(original_data[i])
#print(i)
for item in associative:
print(len(item))
print(item)
print(len(associative))
return associative
#nota: l'ultimo elemento della lista è composto dagli outsiders
clustering_on = "abstract"
textual_data, embeddings = elaborate(clustering_on, model_in)
#########################
#instanciate model
embeddings = model.encode(textual_data, show_progress_bar = True)
print(embeddings.shape)
#here after, once calculated the embeddings (either for the abstracts or the title) the clustering is considered
#first with HDBSCAN, then k-means
import umap
import hdbscan
# first it's *better* to do dimensionality reduction, sbert returns embeddings of dimension 700+ or even 1000+ (depending on the chosen model)
# clustering algorithms seem not to perform well for for big dimensions use-cases
# UMAP is a popular algorithm for dimensionality reduction
umap_embeddings = umap.UMAP(n_neighbors=15,
n_components=5,
metric='cosine').fit_transform(embeddings)
cluster = hdbscan.HDBSCAN(min_cluster_size=5,
metric='euclidean',
cluster_selection_method='eom').fit(umap_embeddings)
import pandas as pd
umap_data = umap.UMAP(n_neighbors=15, n_components=2, min_dist=0.0, metric='cosine').fit_transform(umap_embeddings)
result = pd.DataFrame(umap_data, columns=['x', 'y'])
result['labels'] = cluster.labels_
# Visualize clusters
fig, ax = plt.subplots(figsize=(10, 7))
outliers = result.loc[result.labels == -1, :]
clustered = result.loc[result.labels != -1, :]
plt.scatter(outliers.x, outliers.y, color='#BDBDBD', s=0.05)
plt.scatter(clustered.x, clustered.y, c=clustered.labels, s=0.05, cmap='hsv_r')
plt.colorbar()
plt.title("Visualization of one instance of clustering through HDBSCAN")
'''cluster = hdbscan.HDBSCAN(min_cluster_size=3,
metric='manhattan',
cluster_selection_method='eom').fit(embeddings)
'''
#number of clusters
print(cluster.labels_.max()) #nota: è il label più alto => #numero clusters = max()+1
clusters = constructDictionaryOfClusters(cluster.labels_, textual_data)
print(len(clusters))
#really not necessary to be honest,
clusters_desc = clusters.copy()[:-1]
clusters_desc.sort(key = len, reverse = True)
print(len(clusters))
print(len(clusters_desc))
###Output
_____no_output_____
###Markdown
Now I do sort of a matching matrix. Maybe it would be appropriate to do intersection over union for each element of the matrix?
###Code
#transform clusters of titles/abstracts into clusters of ids
'''clusters_of_ids = []
for item in clusters_desc:
temp = []
for text in item:
tmp_ids = []
for dt in data:
#print(dt)
tp = None
if dt[clustering_on] == text:
tmp_ids.append(dt['filename'])
tp = dt['filename']
#print(tp)
break
temp.append(tp)
clusters_of_ids.append(temp)
#print(clusters_of_ids)
'''
def transform_clusters_into_id_clusters(temp_clust_desc):
id_clusters = []
for item in clusters_desc:
temp = []
for text in item:
tmp_ids = []
for dt in data:
#print(dt)
tp = None
if dt[clustering_on] == text:
tmp_ids.append(dt['filename'])
tp = dt['filename']
#print(tp)
break
temp.append(tp)
id_clusters.append(temp)
return id_clusters
clusters_of_ids = transform_clusters_into_id_clusters(clusters_desc)
print(len(clusters_of_ids[0]))
print(len(clusters_of_ids[-1]))
#should have stored this way since the beginning, useless transformation..
new_data = {}
for item in data:
new_data[item['filename']] = {'title': item['title'], 'abstract': item['abstract']}
#new_data
mi_keywords_desc = mi_keywords.copy()
mi_keywords_desc.reverse()
matching_matrix = []
#for each cluster
for c_item in clusters_of_ids:
#for each keyword of the previously defined keywords
row = []
for kwd in mi_keywords_desc:
# keyword k_item appears in doc1, doc2, ...
# c_item is the first set, the second set should be k_item['appears_in']
appears_in = mi_associations[kwd]['appears_in']
#the following can be replaced with whatever metric
union = len(set(c_item).union(set(appears_in)))
intersection = len(set(c_item).intersection(set(appears_in)))
row.append(intersection/union)
matching_matrix.append(row)
import numpy as np
np_matching_matrix = np.array(matching_matrix)
#print(np_matching_matrix)
mi_keywords_desc
import seaborn as sn
#TODO add the labels to the chart ?
plt.figure(figsize=(11,8))
sn.heatmap(np_matching_matrix, annot=False, xticklabels=mi_keywords_desc)
###Output
_____no_output_____
###Markdown
Clustering with K-means The reason for doing two types of clustering is that, while HDBSCAN leaves out the outsideres, k-means forces each element into a cluster.
###Code
from sklearn.cluster import KMeans
clustering_model = KMeans(n_clusters=29)
clustering_model.fit(umap_embeddings)
cluster_assignment = clustering_model.labels_
# number of clusters
print(clustering_model.labels_.max())
clusters = constructDictionaryOfClusters(clustering_model.labels_, textual_data)
import pandas as pd
#umap_data = umap.UMAP(n_neighbors=15, n_components=2, min_dist=0.0, metric='cosine').fit_transform(umap_embeddings)
result = pd.DataFrame(umap_data, columns=['x', 'y'])
result['labels'] = clustering_model.labels_
# Visualize clusters
fig, ax = plt.subplots(figsize=(10, 7))
outliers = result.loc[result.labels == -1, :]
clustered = result.loc[result.labels != -1, :]
plt.scatter(outliers.x, outliers.y, color='#BDBDBD', s=0.05)
plt.scatter(clustered.x, clustered.y, c=clustered.labels, s=0.05, cmap='hsv_r')
plt.colorbar()
plt.title("Visualization of one instance of clustering through k-means")
#really not necessary to be honest,
clusters_desc = clusters.copy()[:-1]
clusters_desc.sort(key = len, reverse = True)
###Output
_____no_output_____
###Markdown
NOTE: from this point on, it's the same code as with HDBSCAN
###Code
clusters_of_ids = transform_clusters_into_id_clusters(clusters_desc)
print(len(clusters_of_ids[0]))
print(len(clusters_of_ids[-1]))
#should have stored this way since the beginning, useless transformation..
new_data = {}
for item in data:
new_data[item['filename']] = {'title': item['title'], 'abstract': item['abstract']}
mi_keywords_desc = mi_keywords.copy()
mi_keywords_desc.reverse()
matching_matrix = []
#for each cluster
for c_item in clusters_of_ids:
#for each keyword of the previously defined keywords
row = []
for kwd in mi_keywords_desc:
# keyword k_item appears in doc1, doc2, ...
# c_item is the first set, the second set should be k_item['appears_in']
appears_in = mi_associations[kwd]['appears_in']
#the following can be replaced with whatever metric
union = len(set(c_item).union(set(appears_in)))
intersection = len(set(c_item).intersection(set(appears_in)))
row.append(intersection/union)
matching_matrix.append(row)
import numpy as np
np_matching_matrix = np.array(matching_matrix)
#print(np_matching_matrix)
import seaborn as sn
#TODO add the labels to the chart ?
plt.figure(figsize=(11,8))
sn.heatmap(np_matching_matrix, annot=False, xticklabels=mi_keywords_desc)
###Output
_____no_output_____
###Markdown
A further step: automatic keyword assignmentGiven the clusters, it's possibile tu use c-TF-IDF to infer the topic, this *could* allow for automatic labeling of a set of documents
###Code
#smarter way of doing things..
import pandas as pd
#for each cluster, create pandas dataframe
docs_df = pd.DataFrame(textual_data, columns=["Doc"])
docs_df['Topic'] = cluster.labels_
docs_df['Doc_ID'] = range(len(docs_df))
docs_per_topic = docs_df.groupby(['Topic'], as_index = False).agg({'Doc': ' '.join})
from sklearn.feature_extraction.text import CountVectorizer
#note: c-tf-idf is simply tf-idf but the measurements are made on one entire cluster
def c_tf_idf(documents, m, ngram_range=(1, 1)):
count = CountVectorizer(ngram_range=ngram_range, stop_words="english").fit(documents)
t = count.transform(documents).toarray()
w = t.sum(axis=1)
tf = np.divide(t.T, w)
sum_t = t.sum(axis=0)
idf = np.log(np.divide(m, sum_t)).reshape(-1, 1)
tf_idf = np.multiply(tf, idf)
return tf_idf, count
def extract_top_n_words_per_topic(tf_idf, count, docs_per_topic, n=20):
words = count.get_feature_names()
labels = list(docs_per_topic.Topic)
tf_idf_transposed = tf_idf.T
indices = tf_idf_transposed.argsort()[:, -n:]
top_n_words = {label: [(words[j], tf_idf_transposed[i][j]) for j in indices[i]][::-1] for i, label in enumerate(labels)}
return top_n_words
def extract_topic_sizes(df):
topic_sizes = (df.groupby(['Topic'])
.Doc
.count()
.reset_index()
.rename({"Topic": "Topic", "Doc": "Size"}, axis='columns')
.sort_values("Size", ascending=False))
return topic_sizes
tf_idf, count = c_tf_idf(docs_per_topic.Doc.values, m=len(data))
top_n_words = extract_top_n_words_per_topic(tf_idf, count, docs_per_topic, n=20)
topic_sizes = extract_topic_sizes(docs_df)
topic_sizes.head(10) #nota, i topic con '-1' sono quelli che hdbscan non ha clusterizzato
top_n_words[0][:100]
###Output
_____no_output_____ |
examples/Abatement/A2.ipynb | ###Markdown
A very simple input-displacing model We consider the simple case with:* Two technologies, using a combination of two fuels and capital in a leontief-nest.* Technology $1$ produces two goods $(u1,u2)$. Technology $2$ produces one good $(u3)$. This nest is CET (normalized).* $u1$ is used to produce a component $C1$, goods $(u2,u3)$ are combined as component $C2$. This is MNL (normalized).* Components $(C1,C2)$ are combined into one good $E$. This is CES. 1: Trees *Data file:*
###Code
data_file = 'TreeData.xlsx'
###Output
_____no_output_____
###Markdown
*Main tree:*
###Code
nt = nesting_tree.nesting_tree(name='A1')
###Output
_____no_output_____
###Markdown
*Add Trees:*
###Code
nt.add_tree(data_folder+'\\'+data_file,tree_name='T_inp',**{'sheet':'T'})
nt.add_tree(data_folder+'\\'+data_file,tree_name='T_out',**{'sheet':'U', 'type_io':'output','type_f':'CET_norm'})
nt.add_tree(data_folder+'\\'+data_file,tree_name='C',**{'sheet':'C', 'type_f':'MNL'})
nt.add_tree(data_folder+'\\'+data_file,tree_name='E',**{'sheet':'E', 'type_f': 'CES_norm'})
nt.run_all()
###Output
_____no_output_____
###Markdown
*Read in data on variables as well:*
###Code
[DataBase.GPM_database.merge_dbs(nt.database,excel2py.xl2PM.pm_from_workbook(data_folder+'\\'+data_file,{sheet:'vars'}),'first') for sheet in ('T','U','C','E')];
###Output
_____no_output_____
###Markdown
2: Production module
###Code
gm.get('map_all')
gm = Production.pr_static(nt=nt,work_folder=directory['work'],**{'data_folder':gams_folder,'name':'A1'})
gm.write_and_run(kwargs_init={'check_variables':True})
db = gm.model_instances['baseline'].out_db
gm.model_instances['baseline'].modelstat,gm.model_instances['baseline'].solvestat
db.get('qD').plot.bar(figsize=(4,3));
db.get('PbT').plot.bar(figsize=(4,3));
###Output
_____no_output_____ |
WMAP_power_spectrum_analysis_with_HealPy.ipynb | ###Markdown
###Code
#@title
!pip install healpy
!pip install astroML
%matplotlib inline
import numpy as np
from matplotlib import pyplot as plt
# warning: due to a bug in healpy, importing it before pylab can cause
# a segmentation fault in some circumstances.
import healpy as hp
from astroML.datasets import fetch_wmap_temperatures
#------------------------------------------------------------
# Fetch the data
wmap_unmasked = fetch_wmap_temperatures(masked=False)
wmap_masked = fetch_wmap_temperatures(masked=True)
white_noise = np.ma.asarray(np.random.normal(0, 0.062, wmap_masked.shape))
#------------------------------------------------------------
# plot the unmasked map
fig = plt.figure(1)
hp.mollview(wmap_unmasked, min=-1, max=1, title='Unmasked map',
fig=1, unit=r'$\Delta$T (mK)')
#------------------------------------------------------------
# plot the masked map
# filled() fills the masked regions with a null value.
fig = plt.figure(2)
hp.mollview(wmap_masked.filled(), title='Masked map',
fig=2, unit=r'$\Delta$T (mK)')
#------------------------------------------------------------
# compute and plot the power spectrum
cl = hp.anafast(wmap_masked.filled(), lmax=1024)
ell = np.arange(len(cl))
cl_white = hp.anafast(white_noise, lmax=1024)
fig = plt.figure(3)
ax = fig.add_subplot(111)
ax.scatter(ell, ell * (ell + 1) * cl,
s=4, c='black', lw=0,
label='data')
ax.scatter(ell, ell * (ell + 1) * cl_white,
s=4, c='gray', lw=0,
label='white noise')
ax.set_xlabel(r'$\ell$')
ax.set_ylabel(r'$\ell(\ell+1)C_\ell$')
ax.set_title('Angular Power (not mask corrected)')
ax.legend(loc='upper right')
ax.grid()
ax.set_xlim(0, 1100)
plt.show()
wmap_unmasked
###Output
_____no_output_____ |
Auto_ViML_Demo.ipynb | ###Markdown
Data Sets used in this tutorial courtesy of UCI Machine Learning RepositoryCitation Request:We suggest the following pseudo-APA reference format for referring to this repository:Dua, D. and Graff, C. (2019). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California, School of Information and Computer Science. Dataset Found here: https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+(Diagnostic)
###Code
import pandas as pd
datapath = '../data_sets/'
sep = ','
### Download the Breast Cancer data set from: https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+(Diagnostic)
df = pd.read_csv(datapath+'breast_cancer.csv',sep=sep, index_col=None)
#df = pd.read_csv(datapath+'boston.csv',sep=sep, index_col=None)
df = df.sample(frac=1.0, random_state=0)
target = 'diagnosis'
print(df.shape)
df.head()
num = int(0.9*df.shape[0])
train = df[:num]
test = df[num:]
sample_submission=''
scoring_parameter = ''
from autoviml.Auto_ViML import Auto_ViML
#### If Boosting_Flag = True => XGBoost, Fase=>ExtraTrees, None=>Linear Model
m, feats, trainm, testm = Auto_ViML(train, target, test, sample_submission,
scoring_parameter=scoring_parameter,
hyper_param='GS',feature_reduction=True,
Boosting_Flag=None,Binning_Flag=False,
Add_Poly=0, Stacking_Flag=False,
Imbalanced_Flag=False,
verbose=1)
###Output
Train (Size: 512,32) has Single_Label with target: ['diagnosis']
"
################### Binary-Class ##################### "
Shuffling the data set before training
Class -> Counts -> Percent
B: 322 -> 62.9%
M: 190 -> 37.1%
Selecting 2-Class Classifier...
Using GridSearchCV for Hyper Parameter tuning...
String or Multi Class target: diagnosis transformed as follows: {'B': 0, 'M': 1}
Classifying variables in data set...
Number of Numeric Columns = 30
Number of Integer-Categorical Columns = 0
Number of String-Categorical Columns = 0
Number of Factor-Categorical Columns = 0
Number of String-Boolean Columns = 0
Number of Numeric-Boolean Columns = 0
Number of Discrete String Columns = 0
Number of NLP String Columns = 0
Number of Date Time Columns = 0
Number of ID Columns = 1
Number of Columns to Delete = 0
31 Predictors classified...
This does not include the Target column(s)
1 variables removed since they were some ID or low-information variables
Test data has no missing values...
Number of numeric variables = 30
Number of variables removed due to high correlation = 11
Target Ready for Modeling: diagnosis
Starting Feature Engg, Extraction and Model Training for target diagnosis and 19 predictors
Number of numeric variables = 19
Number of variables removed due to high correlation = 4
Adding 0 categorical variables to reduced numeric variables of 15
Selected No. of variables = 15
Finding Important Features...
in 15 variables
in 12 variables
in 9 variables
in 6 variables
in 3 variables
Found 15 important features
Leaving Top 15 continuous variables as is...
No Entropy Binning specified
Rows in Train data set = 460
Features in Train data set = 15
Rows in held-out data set = 52
Finding Best Model and Hyper Parameters for Target: diagnosis...
Baseline Accuracy Needed for Model = 62.89%
Using Linear Model, Estimated Training time = 0.1 mins
Hyper Tuned Accuracy = 95.9%
Model Best Parameters = {'C': 551.0248979591837, 'class_weight': 'balanced', 'solver': 'saga'}
Finding Best Threshold for Highest F1 Score...
Found Optimal Threshold as 0.94 for Best F1: 1.00
Linear Model Results on Held Out Data Set:
Accuracy Score = 100.0%
precision recall f1-score support
0 1.00 1.00 1.00 28
1 1.00 1.00 1.00 24
micro avg 1.00 1.00 1.00 52
macro avg 1.00 1.00 1.00 52
weighted avg 1.00 1.00 1.00 52
[[28 0]
[ 0 24]]
Time taken for Ensembling: 0.3 seconds
########################################################
Completed Ensemble predictions on held out data
###Markdown
Use this to Test Regression Problems Only import numpy as npdef rmse(results, y_cv): return np.sqrt(np.mean((results - y_cv)**2, axis=0))from autoviml.Auto_ViML import print_regression_model_statsmodelname='Linear'print(rmse(test[target].values,testm[target+'_'+modelname+'_predictions'].values))print_regression_model_stats(test[target].values,testm[target+'_'+modelname+'_predictions'].values)
###Code
######## Use this to Test Classification Problems Only ####
modelname='Linear'
def accu(results, y_cv):
return (results==y_cv).astype(int).sum(axis=0)/(y_cv.shape[0])
from sklearn.metrics import classification_report, confusion_matrix
try:
print('Test results since target variable is present in test data:')
modelname = 'Bagging'
print(confusion_matrix(test[target].values,testm[target+'_'+modelname+'_predictions'].values))
print('\nTest Accuracy = %0.2f%%\n' %(100*accu(test[target].values, testm[target+'_'+modelname+'_predictions'].values)))
print(classification_report(test[target].values,testm[target+'_'+modelname+'_predictions'].values))
except:
print('No target variable present in test data. No results')
###Output
Test results since target variable is present in test data:
[[35 0]
[ 5 17]]
Test Accuracy = 91.23%
precision recall f1-score support
B 0.88 1.00 0.93 35
M 1.00 0.77 0.87 22
micro avg 0.91 0.91 0.91 57
macro avg 0.94 0.89 0.90 57
weighted avg 0.92 0.91 0.91 57
|
code/SageMaker-word2vec-kmeans.ipynb | ###Markdown
IntroductionWord2Vec is a popular algorithm used for generating dense vector representations of words in large corpora using unsupervised learning. The resulting vectors have been shown to capture semantic relationships between the corresponding words and are used extensively for many downstream natural language processing (NLP) tasks like sentiment analysis, named entity recognition and machine translation. SageMaker BlazingText which provides efficient implementations of Word2Vec on- single CPU instance- single instance with multiple GPUs - P2 or P3 instances- multiple CPU instances (Distributed training) In this notebook, we demonstrate how BlazingText can be used for distributed training of word2vec using multiple CPU instances. SetupLet's start by specifying:- The S3 bucket and prefix that you want to use for training and model data. This should be within the same region as the Notebook Instance, training, and hosting. If you don't specify a bucket, SageMaker SDK will create a default bucket following a pre-defined naming convention in the same region. - The IAM role ARN used to give SageMaker access to your data. It can be fetched using the **get_execution_role** method from sagemaker python SDK.
###Code
import sagemaker
from sagemaker import get_execution_role
import boto3
import json
sess = sagemaker.Session()
role = get_execution_role()
print(role) # This is the role that SageMaker would use to leverage AWS resources (S3, CloudWatch) on your behalf
bucket = sess.default_bucket() # Replace with your own bucket name if needed
print(bucket)
prefix = 'sagemaker/DEMO-blazingtext-text8' #Replace with the prefix under which you want to store the data if needed
###Output
_____no_output_____
###Markdown
Data IngestionNext, we download a dataset from the web on which we want to train the word vectors. BlazingText expects a single preprocessed text file with space separated tokens and each line of the file should contain a single sentence.In this example, let us train the vectors on [text8](http://mattmahoney.net/dc/textdata.html) dataset (100 MB), which is a small (already preprocessed) version of Wikipedia dump.
###Code
!wget http://mattmahoney.net/dc/text8.zip -O text8.gz
# Uncompressing
!gzip -d text8.gz -f
###Output
_____no_output_____
###Markdown
After the data downloading and uncompressing is complete, we need to upload it to S3 so that it can be consumed by SageMaker to execute training jobs. We'll use Python SDK to upload these two files to the bucket and prefix location that we have set above.
###Code
train_channel = prefix + '/train'
sess.upload_data(path='text8', bucket=bucket, key_prefix=train_channel)
s3_train_data = 's3://{}/{}'.format(bucket, train_channel)
s3_train_data
###Output
_____no_output_____
###Markdown
Next we need to setup an output location at S3, where the model artifact will be dumped. These artifacts are also the output of the algorithm's training job.
###Code
s3_output_location = 's3://{}/{}/output'.format(bucket, prefix)
s3_output_location
###Output
_____no_output_____
###Markdown
Training SetupNow that we are done with all the setup that is needed, we are ready to train our object detector. To begin, let us create a ``sageMaker.estimator.Estimator`` object. This estimator will launch the training job.
###Code
region_name = boto3.Session().region_name
region_name
container = sagemaker.amazon.amazon_estimator.get_image_uri(region_name, "blazingtext", "latest")
print('Using SageMaker BlazingText container: {} ({})'.format(container, region_name))
###Output
_____no_output_____
###Markdown
Training the BlazingText model for generating word vectors Similar to the original implementation of [Word2Vec](https://arxiv.org/pdf/1301.3781.pdf), SageMaker BlazingText provides an efficient implementation of the continuous bag-of-words (CBOW) and skip-gram architectures using Negative Sampling, on CPUs and additionally on GPU[s]. The GPU implementation uses highly optimized CUDA kernels. To learn more, please refer to [*BlazingText: Scaling and Accelerating Word2Vec using Multiple GPUs*](https://dl.acm.org/citation.cfm?doid=3146347.3146354). BlazingText also supports learning of subword embeddings with CBOW and skip-gram modes. This enables BlazingText to generate vectors for out-of-vocabulary (OOV) words, as demonstrated in this [notebook](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/blazingtext_word2vec_subwords_text8/blazingtext_word2vec_subwords_text8.ipynb). Besides skip-gram and CBOW, SageMaker BlazingText also supports the "Batch Skipgram" mode, which uses efficient mini-batching and matrix-matrix operations ([BLAS Level 3 routines](https://software.intel.com/en-us/mkl-developer-reference-fortran-blas-level-3-routines)). This mode enables distributed word2vec training across multiple CPU nodes, allowing almost linear scale up of word2vec computation to process hundreds of millions of words per second. Please refer to [*Parallelizing Word2Vec in Shared and Distributed Memory*](https://arxiv.org/pdf/1604.04661.pdf) to learn more. BlazingText also supports a *supervised* mode for text classification. It extends the FastText text classifier to leverage GPU acceleration using custom CUDA kernels. The model can be trained on more than a billion words in a couple of minutes using a multi-core CPU or a GPU, while achieving performance on par with the state-of-the-art deep learning text classification algorithms. For more information, please refer to [algorithm documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/blazingtext.html) or [the text classification notebook](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/blazingtext_text_classification_dbpedia/blazingtext_text_classification_dbpedia.ipynb). To summarize, the following modes are supported by BlazingText on different types instances:| Modes | cbow (supports subwords training) | skipgram (supports subwords training) | batch_skipgram | supervised ||:----------------------: |:----: |:--------: |:--------------: | :--------------: || Single CPU instance | ✔ | ✔ | ✔ | ✔ || Single GPU instance | ✔ | ✔ | | ✔ (Instance with 1 GPU only) || Multiple CPU instances | | | ✔ | | |Now, let's define the resource configuration and hyperparameters to train word vectors on *text8* dataset, using "batch_skipgram" mode on two c4.2xlarge instances.
###Code
bt_model = sagemaker.estimator.Estimator(container,
role,
train_instance_count=2,
train_instance_type='ml.c4.2xlarge',
train_volume_size = 5,
train_max_run = 360000,
input_mode= 'File',
output_path=s3_output_location,
sagemaker_session=sess)
###Output
_____no_output_____
###Markdown
Please refer to [algorithm documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/blazingtext_hyperparameters.html) for the complete list of hyperparameters.
###Code
bt_model.set_hyperparameters(mode="skipgram",
epochs=5,
min_count=5,
sampling_threshold=0.0001,
learning_rate=0.05,
window_size=5,
vector_dim=10,
negative_samples=5,
subwords=True, # Enables learning of subword embeddings for OOV word vector generation
min_char=3, # min length of char ngrams
max_char=6, # max length of char ngrams
batch_size=11, # = (2*window_size + 1) (Preferred. Used only if mode is batch_skipgram)
evaluation=True)# Perform similarity evaluation on WS-353 dataset at the end of training
###Output
_____no_output_____
###Markdown
Now that the hyper-parameters are setup, let us prepare the handshake between our data channels and the algorithm. To do this, we need to create the `sagemaker.session.s3_input` objects from our data channels. These objects are then put in a simple dictionary, which the algorithm consumes.
###Code
train_data = sagemaker.session.s3_input(s3_train_data, distribution='FullyReplicated',
content_type='text/plain', s3_data_type='S3Prefix')
data_channels = {'train': train_data}
###Output
_____no_output_____
###Markdown
We have our `Estimator` object, we have set the hyper-parameters for this object and we have our data channels linked with the algorithm. The only remaining thing to do is to train the algorithm. The following command will train the algorithm. Training the algorithm involves a few steps. Firstly, the instance that we requested while creating the `Estimator` classes is provisioned and is setup with the appropriate libraries. Then, the data from our channels are downloaded into the instance. Once this is done, the training job begins. The provisioning and data downloading will take some time, depending on the size of the data. Therefore it might be a few minutes before we start getting training logs for our training jobs. The data logs will also print out `Spearman's Rho` on some pre-selected validation datasets after the training job has executed. This metric is a proxy for the quality of the algorithm. Once the job has finished a "Job complete" message will be printed. The trained model can be found in the S3 bucket that was setup as `output_path` in the estimator.
###Code
bt_model.fit(inputs=data_channels, logs=True)
###Output
_____no_output_____
###Markdown
Hosting / InferenceOnce the training is done, we can deploy the trained model as an Amazon SageMaker real-time hosted endpoint. This will allow us to make predictions (or inference) from the model. Note that we don't have to host on the same type of instance that we used to train. Because instance endpoints will be up and running for long, it's advisable to choose a cheaper instance for inference.
###Code
bt_endpoint = bt_model.deploy(initial_instance_count = 1,instance_type = 'ml.m4.xlarge')
###Output
_____no_output_____
###Markdown
Getting vector representations for words Use JSON format for inferenceThe payload should contain a list of words with the key as "**instances**". BlazingText supports content-type `application/json`.
###Code
import pandas as pd
import numpy as np
df = pd.read_csv('<csv file name')
df.head()
import nltk
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('averaged_perceptron_tagger')
nltk.download('wordnet')
import nltk, re
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.corpus import wordnet
from nltk.stem import WordNetLemmatizer
# Let's get a list of stop words from the NLTK library
stop = stopwords.words('english')
# These words are important for our problem. We don't want to remove them.
additional_stopwords = ["a", "an", "the", "this", "that", "is", "it", "to", "and"]
stop.extend(additional_stopwords)
# New stop word list
#stop_words = [word for word in stop if word not in excluding]
# Initialize the lemmatizer
wl = WordNetLemmatizer()
# This is a helper function to map NTLK position tags
# Full list is available here: https://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html
def get_wordnet_pos(tag):
if tag.startswith('J'):
return wordnet.ADJ
elif tag.startswith('V'):
return wordnet.VERB
elif tag.startswith('N'):
return wordnet.NOUN
elif tag.startswith('R'):
return wordnet.ADV
else:
return wordnet.NOUN
def process_text_lemmitization(texts):
final_text_list=[]
for sent in texts:
filtered_sentence=[]
if not isinstance(sent,float):
sent = sent.lower() # Lowercase
sent = sent.strip() # Remove leading/trailing whitespace
sent = re.sub('\s+', ' ', sent) # Remove extra space and tabs
sent = re.compile('<.*?>').sub('', sent) # Remove HTML tags/markups:
for w in word_tokenize(sent):
# We are applying some custom filtering here, feel free to try different things
# Check if it is not numeric and its length>2 and not in stop words
if(not w.isnumeric()) and (len(w)>3) and (w not in stop):
# Stem and add to filtered list
filtered_sentence.append(w)
lemmatized_sentence = []
# Get position tags
word_pos_tags = nltk.pos_tag(filtered_sentence)
# Map the position tag and lemmatize the word/token
for idx, tag in enumerate(word_pos_tags):
lemmatized_sentence.append(wl.lemmatize(tag[0], get_wordnet_pos(tag[1])))
lemmatized_text = " ".join(lemmatized_sentence)
final_text_list.append(lemmatized_text)
return final_text_list
df_processed = process_text_lemmitization(df['sentence'])
df_processed
def get_max_word_count(sent_list):
word_count_list = []
for sent in sent_list:
sent_words = word_tokenize(sent)
word_count = len(sent_words)
word_count_list.append(word_count)
return max(word_count_list)
max_word_count = get_max_word_count(df_processed)
max_word_count
max_columns = max_word_count*10
max_columns
def sentence_to_vec2(response):
sentence_vec = []
test_array = np.zeros(max_columns)
for vec in response:
sentence_vec.extend(vec['vector'])
sent_array = np.array(sentence_vec)
test_array[0:sent_array.shape[0]] = sent_array
return test_array
def process_sent_to_vec(sent_list):
sent_list_vecs = []
#print(sent_list)
for sent in sent_list:
#print(sent)
sent_words = word_tokenize(sent)
payload = {"instances" : sent_words}
#print(sent_words)
response = bt_endpoint.predict(json.dumps(payload))
vecs = json.loads(response)
sent_vectors = sentence_to_vec2(vecs)
sent_list_vecs.append(sent_vectors)
return sent_list_vecs
test_vec = process_sent_to_vec(df_processed)
test_vec_array = np.array(test_vec)
test_vec_array.shape
test_vec_array
train_data = test_vec_array.astype('float32')
np.savetxt("kmeans_train_data.csv", train_data[0:100], delimiter=",")
from sagemaker import KMeans
num_clusters = 5
kmeans = KMeans(role=role,
train_instance_count=1,
train_instance_type='ml.c4.xlarge',
output_path='s3://'+ bucket +'/sentence-similarity/',
k=num_clusters)
%%time
kmeans.fit(kmeans.record_set(train_data))
test_channel = prefix + '/batch'
sess.upload_data(path='kmeans_train_data.csv', bucket=bucket, key_prefix=test_channel)
%%time
kmeans_transformer = kmeans.transformer(1, 'ml.m4.xlarge')
# start a transform job
batch_file = 'kmeans_train_data.csv'
input_location = 's3://{}/{}/batch/{}'.format(bucket, prefix, batch_file) # use input data without ID column
kmeans_transformer.transform(input_location, split_type='Line')
kmeans_transformer.wait()
import json
import io
from urllib.parse import urlparse
def get_csv_output_from_s3(s3uri, file_name):
parsed_url = urlparse(s3uri)
bucket_name = parsed_url.netloc
prefix = parsed_url.path[1:]
s3 = boto3.resource('s3')
obj = s3.Object(bucket_name, '{}/{}'.format(prefix, file_name))
return obj.get()["Body"].read().decode('utf-8')
output = get_csv_output_from_s3(kmeans_transformer.output_path, '{}.out'.format(batch_file))
output_df = pd.read_csv(io.StringIO(output), sep=",", header=None)
output_df.head(8)
%%time
kmeans_predictor = kmeans.deploy(initial_instance_count=1,
instance_type='ml.t2.medium')
%%time
result_kmeans=kmeans_predictor.predict(train_data[0:990])
result_kmeans
cluster_labels = [r.label['closest_cluster'].float32_tensor.values[0] for r in result_kmeans]
cluster_labels
df_results = pd.DataFrame(columns=['student_response'])
df_results['student_response'] = df_processed[0:990]
df_results['cluster'] = cluster_labels
df_results.head()
df_results.to_csv('results_word2vec_sm.csv',index=False)
pd.DataFrame(cluster_labels)[0].value_counts()
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
matplotlib.style.use('ggplot')
ax=plt.subplots(figsize=(6,3))
ax=sns.distplot(cluster_labels, kde=False)
title="Histogram of Cluster Counts"
ax.set_title(title, fontsize=12)
plt.show()
###Output
_____no_output_____
###Markdown
Evaluation Let us now download the word vectors learned by our model and visualize them using a [t-SNE](https://en.wikipedia.org/wiki/T-distributed_stochastic_neighbor_embedding) plot.
###Code
s3 = boto3.resource('s3')
key = bt_model.model_data[bt_model.model_data.find("/", 5)+1:]
s3.Bucket(bucket).download_file(key, 'model.tar.gz')
###Output
_____no_output_____
###Markdown
Uncompress `model.tar.gz` to get `vectors.txt`
###Code
!tar -xvzf model.tar.gz
###Output
_____no_output_____
###Markdown
If you set "evaluation" as "true" in the hyperparameters, then "eval.json" will be there in the model artifacts.The quality of trained model is evaluated on word similarity task. We use [WS-353](http://alfonseca.org/eng/research/wordsim353.html), which is one of the most popular test datasets used for this purpose. It contains word pairs together with human-assigned similarity judgments.The word representations are evaluated by ranking the pairs according to their cosine similarities, and measuring the Spearmans rank correlation coefficient with the human judgments.Let's look at the evaluation scores which are there in eval.json. For embeddings trained on the text8 dataset, scores above 0.65 are pretty good.
###Code
!cat eval.json
###Output
_____no_output_____
###Markdown
Now, let us do a 2D visualization of the word vectors
###Code
import numpy as np
from sklearn.preprocessing import normalize
# Read the 400 most frequent word vectors. The vectors in the file are in descending order of frequency.
num_points = 400
first_line = True
index_to_word = []
with open("vectors.txt","r") as f:
for line_num, line in enumerate(f):
if first_line:
dim = int(line.strip().split()[1])
word_vecs = np.zeros((num_points, dim), dtype=float)
first_line = False
continue
line = line.strip()
word = line.split()[0]
vec = word_vecs[line_num-1]
for index, vec_val in enumerate(line.split()[1:]):
vec[index] = float(vec_val)
index_to_word.append(word)
if line_num >= num_points:
break
word_vecs = normalize(word_vecs, copy=False, return_norm=False)
from sklearn.manifold import TSNE
tsne = TSNE(perplexity=40, n_components=2, init='pca', n_iter=10000)
two_d_embeddings = tsne.fit_transform(word_vecs[:num_points])
labels = index_to_word[:num_points]
from matplotlib import pylab
%matplotlib inline
def plot(embeddings, labels):
pylab.figure(figsize=(20,20))
for i, label in enumerate(labels):
x, y = embeddings[i,:]
pylab.scatter(x, y)
pylab.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points',
ha='right', va='bottom')
pylab.show()
plot(two_d_embeddings, labels)
###Output
_____no_output_____
###Markdown
Running the code above might generate a plot like the one below. t-SNE and Word2Vec are stochastic, so although when you run the code the plot won’t look exactly like this, you can still see clusters of similar words such as below where 'british', 'american', 'french', 'english' are near the bottom-left, and 'military', 'army' and 'forces' are all together near the bottom.  Stop / Close the Endpoint (Optional)Finally, we should delete the endpoint before we close the notebook.
###Code
sess.delete_endpoint(bt_endpoint.endpoint)
###Output
_____no_output_____ |
Odomero_WT_21_174/Data Analysis-Pandas-2/Project_.ipynb | ###Markdown
Project : Holiday weatherThere is nothing I like better than taking a holiday. In this project I am going to use the historic weather data from the Weather Underground for London to try to predict two good weather weeks to take off as holiday. Of course the weather in the summer of 2016 may be very different to 2014 but it should give some indication of when would be a good time to take a summer break. Getting the dataWeather Underground keeps historical weather data collected in many airports around the world. Right-click on the following URL and choose 'Open Link in New Window' (or similar, depending on your browser):http://www.wunderground.com/historyWhen the new page opens start typing 'LHR' in the 'Location' input box and when the pop up menu comes up with the option 'LHR, United Kingdom' select it and then click on 'Submit'. When the next page opens with London Heathrow data, click on the 'Custom' tab and select the time period From: 1 January 2014 to: 31 December 2014 and then click on 'Get History'. The data for that year should then be displayed further down the page. You can copy each month's data directly from the browser to a text editor like Notepad or TextEdit, to obtain a single file with as many months as you wish.Weather Underground has changed in the past the way it provides data and may do so again in the future. I have therefore collated the whole 2014 data in the provided 'London_2014.csv' file which can be found in the project folder. Now load the CSV file into a dataframe making sure that any extra spaces are skipped:
###Code
import warnings
warnings.simplefilter('ignore', FutureWarning)
import pandas as pd
moscow = pd.read_csv('Moscow_SVO_2014.csv', skipinitialspace=True)
###Output
_____no_output_____
###Markdown
Cleaning the dataFirst we need to clean up the data. I'm not going to make use of `'WindDirDegrees'` in my analysis, but you might in yours so we'll rename `'WindDirDegrees'` to `'WindDirDegrees'`.
###Code
moscow.head()
moscow = moscow.rename(columns={'WindDirDegrees<br />' : 'WindDirDegrees'})
###Output
_____no_output_____
###Markdown
remove the `` html line breaks from the values in the `'WindDirDegrees'` column.
###Code
moscow['WindDirDegrees'] = moscow['WindDirDegrees'].str.rstrip('<br />')
###Output
_____no_output_____
###Markdown
and change the values in the `'WindDirDegrees'` column to `float64`:
###Code
moscow['WindDirDegrees'] = moscow['WindDirDegrees'].astype('float64')
###Output
_____no_output_____
###Markdown
We definitely need to change the values in the `'Date'` column into values of the `datetime64` date type.
###Code
moscow['Date'] = pd.to_datetime(moscow['Date'])
###Output
_____no_output_____
###Markdown
We also need to change the index from the default to the `datetime64` values in the `'Date'` column so that it is easier to pull out rows between particular dates and display more meaningful graphs:
###Code
moscow.index = moscow['Date']
###Output
_____no_output_____
###Markdown
Finding a summer breakAccording to meteorologists, summer extends for the whole months of June, July, and August in the northern hemisphere and the whole months of December, January, and February in the southern hemisphere. So as I'm in the northern hemisphere I'm going to create a dataframe that holds just those months using the `datetime` index, like this:
###Code
from datetime import datetime
summer = moscow.loc[datetime(2014,6,1) : datetime(2014,8,31)]
###Output
_____no_output_____
###Markdown
I now look for the days with warm temperatures.
###Code
summer[summer['Mean TemperatureC'] >= 23]
###Output
_____no_output_____
###Markdown
Summer 2014 in Moscow: there are about 13 days with temperatures of 23 Celsius or higher. Best to see a graph of the temperature and look for the warmest period.So next we tell Jupyter to display any graph created inside this notebook:
###Code
%matplotlib inline
###Output
_____no_output_____
###Markdown
Now let's plot the `'Mean TemperatureC'` for the summer:
###Code
summer['Mean TemperatureC'].plot(grid=True, figsize=(10,5))
###Output
_____no_output_____
###Markdown
Well looking at the graph the second half of July into 1st half of July looks good for mean temperatures over 22.5 degrees C so let's also put precipitation on the graph too:
###Code
summer[['Mean TemperatureC', 'Precipitationmm']].plot(grid=True, figsize=(10,5))
###Output
_____no_output_____
###Markdown
The second half of July into 1st half of August is still looking good, with no heavy rain. Let's have a closer look by just plotting mean temperature and precipitation for July and August
###Code
july_aug = summer.loc[datetime(2014,7,21) : datetime(2014,8,14)]
july_aug[['Mean TemperatureC', 'Precipitationmm']].plot(grid=True, figsize=(10,5))
###Output
_____no_output_____ |
Clase1_IMDB_Limpieza_en_clase.ipynb | ###Markdown
IMDB MASTER DATA SCIENCE: NUCLIO PROFESOR: JOSEPH GALLART CLASE 1: EDA + Data Cleaning
###Code
import pandas as pd
import numpy as np
import matplotlib as plt
imdb = pd.read_csv("datasets/IMDB.csv", sep=";", index_col=[0])
imdb
imdb.shape
type(imdb.shape)
imdb.info()
# nos dice, por cada columna, si hay valores null -> NaN
imdb.isnull().any()
# para sacar las columnas con valores NaN
columna_nulos = imdb.columns[imdb.isnull().any()]
columna_nulos
# saca por la columna "color" las value y cuantas veces aparecen
imdb["color"].value_counts()
# saca por la columna "director_name" las value y cuantas veces aparecen
imdb["director_name"].value_counts()
# sacame todas las filas donde el director_name es NaN
# esto se hace para luego cambiar los valores NaN con valores "", porque el MODELO no entiende el NaN pero si el NULL
imdb[imdb["director_name"].isnull()]
# SLICING
# cuidado con el hacer siempre copias de df pero la info punta siempre al mismo df
director_nulo_mayorde140mins = imdb[ (imdb["director_name"].isnull()) & (imdb["duration"] >= 140) ]
# aplicar algo al resultado
imdb["gross"].describe().apply("{0:.2f}".format)
imdb["gross"].min()
imdb["gross"].max()
imdb["gross"].mean()
imdb[(imdb["gross"] > 600000000)].head(10)
imdb.hist(column="gross")
imdb[ (imdb["gross"] > 500000000) & (imdb["gross"] < 5600000000) ]
# ver valores duplicados (cuidado: toda la fila tiene que ser duplicada, seno no sale nada)
imdb.duplicated()
# ver FILAS duplicadas (cuidado: todos los valores de la fila tienen que ser duplicados, seno no sale nada)
# keep=False -> hace que la fila y su duplicado sean añadidos al resultado
imdb[imdb.duplicated(keep=False)]
###Output
_____no_output_____
###Markdown
When keep=True : only the duplicated row is sorted ( this is the setting to use when you perform the drop() ) When keep=False : both row are sorted (not only the duplicated one)
###Code
# ejemplo de usar keep=False
df_test = pd.DataFrame(["a","b","c","d","a","b"])
print(df_test.T)
print("")
#SIN keep=False -> le decimos de sacar solo las filas duplicada (solo duplicados)
df_test_only_duplicate = df_test[df_test.duplicated()]
print(df_test_only_duplicate.T)
print("")
#CON keep=False -> le decimos de sacar todas las filas de duplicado (original y duplicados)
df_test_only_duplicate_ALL = df_test[df_test.duplicated(keep=False)]
print(df_test_only_duplicate_ALL.T)
###Output
0 1 2 3 4 5
0 a b c d a b
4 5
0 a b
0 1 4 5
0 a b a b
###Markdown
Deep copy creates new id's of every object it contains while normal copy only copies the elements from the parent and creates a new id for a variable to which it is copied to.
###Code
# no queremos trabajar sobre el IMDB original, asi creamos una copia antes
# creamos un nuevo DF, con nuevo ID
imdb_sin_valores_duplicado = imdb.copy(deep=True)
#lo que sigue es para entender lo de LAS COPIAS que hay que hacer
# sacamos el ID del nuevo DF
# N.B. el ID de un objeto cambia a cada EJECUCION que se realize
print("id del DF imdb: " + str(id(imdb)))
print("id del DF imdb_sin_valores_duplicado: " + str(id(imdb_sin_valores_duplicado)))
# asi es como un preview
# imdb_sin_valores_duplicado.drop_duplicates()
# ---> BORRAMOS LAS FILAS DUPLICADA <---
# con inplace=True es 'real', el DF estara modificado
imdb_sin_valores_duplicado.drop_duplicates(inplace=True)
###Output
_____no_output_____
###Markdown
When inplace = True : the data is modified in place, which means it will return nothing and the dataframe is now updated. When inplace = False : (which is the default) then the operation is performed and it returns a copy of the object. You then need to save it to something.
###Code
imdb_sin_valores_duplicado.info()
# reseteamos LOS INDICES que se han visto modificado cuando hemos borrado filas
# drop=True -> borrar el indice antiguo y inplace=True -> hace que sea real
imdb_sin_valores_duplicado.reset_index(drop=True, inplace=True)
imdb_sin_valores_duplicado.info()
# BORRAMOS LA COLUMNA color (porque es irilevante)
imdb_sin_valores_duplicado.drop(columns=["color"], inplace=True)
# controlamos que se ha eliminada la columna color (si! ahora hay solo 12 columnas)
imdb_sin_valores_duplicado.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 93 entries, 0 to 92
Data columns (total 12 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 director_name 82 non-null object
1 duration 93 non-null int64
2 gross 86 non-null float64
3 genres 92 non-null object
4 movie_title 93 non-null object
5 title_year 93 non-null int64
6 language 93 non-null object
7 country 93 non-null object
8 budget 89 non-null float64
9 imdb_score 93 non-null float64
10 actors 93 non-null object
11 movie_facebook_likes 93 non-null int64
dtypes: float64(3), int64(3), object(6)
memory usage: 8.8+ KB
###Markdown
rellenar los nulos
###Code
# RELLENAMOS los NaN con valores "" (vacio), porque el MODELO no entiende el NaN o NULL pero si el "" (vacio)
imdb_sin_valores_duplicado["director_name"].fillna("", inplace=True)
# controlamos que no hay ahora valores NaN en la columna director_name
imdb_sin_valores_duplicado[imdb_sin_valores_duplicado["director_name"].isnull()]
# COMPROBAMOS que se han borrados, sacando el num() de los valores isnull() de la columna "director_name"
imdb_sin_valores_duplicado["director_name"].isnull().sum()
# los valores vacio de director_name (son 11)
imdb_sin_valores_duplicado[imdb_sin_valores_duplicado["director_name"] =="" ]
imdb_sin_valores_duplicado["director_name"].value_counts()
# en la primera fila, el 11 == a los valores vacio de director_name
imdb_sin_valores_duplicado["gross"].isnull().sum()
imdb_sin_valores_duplicado[ imdb_sin_valores_duplicado["gross"].isnull() ]
###Output
_____no_output_____
###Markdown
decidir como manejar los valores NaN de "gross" 1) poner a ZERO 2) poner un valor medio o mediana
###Code
# calculo la media
imdb_sin_valores_duplicado["gross"].mean()
# asigno la media a los valores NaN de "gross"
imdb_sin_valores_duplicado["gross"].fillna( imdb_sin_valores_duplicado["gross"].mean(), inplace=True )
# compruebo si hay valores NaN en "gross"
imdb_sin_valores_duplicado["gross"].isnull().sum()
#
imdb_sin_valores_duplicado.isnull().sum()
###Output
_____no_output_____
###Markdown
miramos los GENRES en el campo genres hay 4/5/6 string de generos - COMO LO MANEJAMOS ??? quieremos crear una columna por cada generos que hay y luego poner 1 si la peli es de este generos y 0 si no lo es
###Code
# CREAMOS un DF con todos le genres con METODO SPLIT() ---> sirve para separar
# il metodo .str() serve a trasformare il Df in serie ---> oggetto che supporta il metodo .split()
lista_de_generos = imdb_sin_valores_duplicado["genres"].str.split("|", expand=True)
# ATTRIBUTE EXPAND the split strings into separate columns.
# If True, return DataFrame/MultiIndex expanding dimensionality ---> pandas.core.frame.DataFrame
# If False, return Series/Index, containing lists of strings ---> pandas.core.series.Series
lista_de_generos
# RELLENAMOS los None con valores "" (vacio) porque el MODELO no entiende el NaN o NULL pero si el "" (vacio)
lista_de_generos.fillna("", inplace=True)
# se CREA una columna y se asigna un valor (esto lo hacemos tanta veces cuantos hay de genres)
#imdb_sin_valores_duplicado["genero_1"] = lista_de_generos[0]
#imdb_sin_valores_duplicado["genero_2"] = lista_de_generos[1]
#imdb_sin_valores_duplicado["genero_3"] = lista_de_generos[2]
#imdb_sin_valores_duplicado["genero_4"] = lista_de_generos[3]
#imdb_sin_valores_duplicado["genero_5"] = lista_de_generos[4]
# borrar la antigua columna "genres"
del(imdb_sin_valores_duplicado["genres"])
imdb2 = imdb_sin_valores_duplicado
imdb2
imdb2["duration"].hist()
# tambien se puede escribir asi : imdb2.hist(column="duration")
imdb2[imdb2["duration"]<=50]
# hay dos valores anomalos en la duration -> lo se soluciona con un WHERE
# usamos un WHERE para asignar un valor mean a cada fila de la columna duration que sea <50, si no lo es: se deja su valor
# np.where( condicion, que hago si la condicion es True, que hago si la condicion es False )
imdb2["duration"]=np.where( imdb2["duration"]<=50, imdb2["duration"].mean(), imdb2["duration"] )
# esto es un Df con todos le generos
lista_de_generos
###Output
_____no_output_____ |
Son/Samir/Arranger.ipynb | ###Markdown
Fonction du son : Arranger
###Code
Idées brouillon : arranger du son consiste à lui donner de la couleur, des sonoritées musicales.
###Output
_____no_output_____ |
Prueba.ipynb | ###Markdown
Prueba para científico de datos Parte 1 - Cargar datos y estadísticas básicasEn la carpeta ```Data``` encontrará un archivo llamado ```diamonds.csv```. Este archivo contiene información de 53940 diamantes. Dentro de la información disponible, está el precio, el color, el peso, etc. Puede consultar las características completas del dataset en [este enlace](https://www.kaggle.com/shivam2503/diamonds).1. Cargue el archivo en un dataframe de pandas 2. Use los metodos que conozca para describir las propiedades básicas de los datos.
###Code
# Respuesta a la parte 1
###Output
_____no_output_____
###Markdown
Parte 2 - Aprendizaje no supervisadoUsted desea encontrar estructura en los datos que le han sido dados. 1. A partir del dataframe que cargó en el primer punto, use algún algoritmo de aprendizaje no supervisado para encontrar clusters de diamantes con propiedades similares. 2. En una celda de markdown, describa una métrica/método que se pueda utilizar para evaluar la calidad de sus clusters.3. Varie $k$ (la cantidad de clusters) de 1 a 10 y grafique su métrica en función de $k$4. Qué $k$ describe mejor sus datos?
###Code
# Respuesta a la parte 2
###Output
_____no_output_____
###Markdown
Parte 3 - Reducción de dimensionalidad y regresiónUsted quiere predecir el precio del diamante a partir de sus características (toda columna en el dataset que no sea el precio). Sin embargo, tiene la intuición que varias columnas son redundantes - es decir - hay columnas que no aportan información nueva. 1. Realice una reducción de dimensionalidad de los datos para evitar tener información redundante. Procure que en este nuevo espacio se explique por lo menos el 90% de la varianza de los datos.2. En una celda de markdown, describa una métrica que se pueda utilizar para evaluar la calidad de su regresión y su habilidad para explicar los datos. 3. Parta los datos en un conjunto de entrenamiento y otro de evaluación. 3. Sobre este nuevo espacio, entrene un algoritmo de regresión para predecir el precio de los diamantes sobre el conjunto de entrenamiento. Evalue su algoritmo con su métrica sobre el conjunto de test. ¿Qué tan bien le va a su algoritmo? ¿Lo llevaría a producción? ¿Por qué?
###Code
# Respuesta a la parte 3
###Output
_____no_output_____
###Markdown
Parte 4 - clasificaciónEn la carpeta ```Data``` hay un archivo llamado ```emotions.csv``` que contiene informacion sobre las ondas electromagneticas emitidas por los cerebros de 2 pacientes. Hay un total de 2549 columnas con 2132 entradas. Su trabajo es predecir el estado de ánimo de la persona (la columna label): NEUTRAL, POSITIVE o NEGATIVE a partir de las otras columnas. Puede ver una descripción extensa del dataset [aquí](https://www.kaggle.com/birdy654/eeg-brainwave-dataset-feeling-emotions). Implemente el pipeline que considere necesario para llevar a cabo esta tarea. Es libre de escoger las herramientas y los métodos de clasificación que desee siempre y cuando cumpla lo siguiente:1. Implemente por lo menos 2 algoritmos de clasificación. 2. Grafique la matriz de confusión y las curvas de precisión y cobertura para cada algoritmo. Compare los resultados de sus clasificadores. 3. ¿Cuál algoritmo es mejor? 4. ¿Considera que el mejor algoritmo es suficiente para entrar a producción? ¿Por qué? ¿Por qué no?
###Code
# Respuesta a la parte 4
###Output
_____no_output_____
###Markdown
Parte 5 - DespliegueDespliegue el mejor clasificador de la etapa anterior en un endpoint. El endpoint debe procesar el objeto JSON del *body* de un POST request. El formato del objeto JSON es el siguiente:```{"input":[val1,val2,val3, ... ,val2548]}```El orden de los valores corresponde al orden de las columnas del archivo `emotions.csv`. La lista tiene 2548 valores que corresponden a los 2548 que su clasificador debe tomar como input. El endpoint debe retornar un json de la siguiente forma si la petición fue exitosa: ```{"output":"clasfOutput"}```Donde "clasfOutput" corresponde a la predicción del clasificador (NEUTRAL, POSITIVE o NEGATIVE).
###Code
# Respuesta a la parte 5 (url del endpoint)
###Output
_____no_output_____
###Markdown
Vemos que los datos estan sobre todo a la izquierda del histograma por lo que bajaremos el theshold a un valor que los nivele, por ejemplo 0.6
###Code
def modelate_with_different_connectivity(window_size, label, connectivity_number_total, G, conn_empty_values):
total_graphs_class_0, total_graphs_class_1 = [], []
for i in range(connectivity_number_total):
conn = search_key(connectivity_measures, i)
bands = search(connectivity_measures, conn)[1]
# The threshold can be omited to use the default one
graphs, _ = G.modelate(window_size = window_size, connectivity = conn, bands = bands, threshold = 0.6)
conn_empty_values = test_empty(graphs, conn_empty_values, i)
if(int(label)):
total_graphs_class_1 = total_graphs_class_1 + list(graphs.values())
else:
total_graphs_class_0 = total_graphs_class_0 + list(graphs.values())
return total_graphs_class_0, total_graphs_class_1, conn_empty_values
graphs_class_0, graphs_class_1 = open_data_directories(path, window_size_class_0, window_size_class_1, con_number_total)
print('\n=========================================')
print('Total graphs Generated for class 0: ', len(graphs_class_0))
print('Total graphs Generated for class 1: ', len(graphs_class_1))
graphs = [graphs_class_0, graphs_class_1]
#2) Visualize graphs
#============================================================================================================================================================================
def visualize_graphs(graphs, selected):
G = eegraph.Graph()
for i in range(selected[0], selected[1]+1):
G.visualize(graphs[i])
wanted = [0, 0] # Graph position
visualize_graphs(graphs_class_1, wanted)
#3)Histogram
#============================================================================================================================================================================
def edges_histogram(graphs, label):
total_edges, edges_dict = [], {}
for i in range(len(graphs)):
edges = [e for e in graphs[i].edges]
edges_dict[str(i+1)] = len(edges)
keys = edges_dict.keys()
values = edges_dict.values()
plt.figure(figsize=(30,15))
plt.title('Histogram: Edges per Graph. Class ' + str(label), fontsize=20)
plt.hist(values, bins=max(values)+1-min(values))
plt.xlabel('Number of edges')
plt.ylabel('Count')
plt.bar(keys, values, align='center')
plt.show()
print('\n=====================================================================')
for j in range(2):
edges_histogram(graphs[j], j)
#4)Empty graphs
#============================================================================================================================================================================
def empty_graphs(graphs):
empty_graphs, empty_dict = 0, {}
for i in range(len(graphs)):
if(nx.is_empty(graphs[i])):
empty_dict[i] = True
empty_graphs += 1
else:
empty_dict[i] = False
return empty_graphs, empty_dict
print('\n=====================================================================')
empty_amount, graphs_dict = [None]*2, [None]*2
for j in range(2):
empty_amount[j], graphs_dict[j] = empty_graphs(graphs[j])
print('\nNumber of Empty graphs. Class ' + str(j) + ': ' , empty_amount[j])
print('Empty graphs (True).', graphs_dict[j])
#5)Erase Empty Graphs
#============================================================================================================================================================================
def delete_graphs(graphs, graphs_dict):
for key,value in reversed(graphs_dict.items()):
if(value):
print('Deleting graph in index:', str(key))
del graphs[key]
return graphs
print('\n=====================================================================')
print('Deleting empty graphs.')
for j in range(2):
if (empty_amount[j]):
print('\nGraphs in Class', j, ':')
graphs[j] = delete_graphs(graphs[j], graphs_dict[j])
print('\nTotal graphs for class 0: ', len(graphs[0]))
print('Total graphs for class 1: ', len(graphs[1]))
#6)Mean value and Standard Deviation for graphs
#============================================================================================================================================================================
def mean_std(graphs):
edges_weights, edges_dict = [], {}
for i in range(len(graphs)):
edges = [d.get('weight') for e1,e2,d in graphs[i].edges(data=True)]
edges_weights = edges_weights + edges
print('Mean:', round(np.mean(edges_weights),5))
print('STD:', round(np.std(edges_weights),5))
print('\n=====================================================================')
print('Mean values and Standar Deviation for edges in the graphs.')
for j in range(2):
print('\nClass', j, ':')
mean_std(graphs[j])
###Output
data/0/1_presalva_2.edf 0
Extracting EDF parameters from /Users/juanlatasareinoso/Downloads/Seminario/EEGRAPH/data/0/1_presalva_2.edf...
EDF file detected
Setting channel info structure...
Creating raw.info structure...
[1mEEG Information.
[0mNumber of Channels: 19
Sample rate: 512.0 Hz.
Duration: 10.998 seconds.
Channel Names: ['EEG Fp1', 'EEG Fp2', 'EEG F4', 'EEG F3', 'EEG C3', 'EEG C4', 'EEG P4', 'EEG P3', 'EEG O2', 'EEG O1', 'EEG F8', 'EEG F7', 'EEG T4', 'EEG T3', 'EEG T6', 'EEG T5', 'EEG Pz', 'EEG Fz', 'EEG Cz']
=========================================
[1mModel Data.[0m
Pearson_correlation_Estimator()
Intervals: [(0, 512.0), (512.0, 1024.0), (1024.0, 1536.0), (1536.0, 2048.0), (2048.0, 2560.0), (2560.0, 3072.0), (3072.0, 3584.0), (3584.0, 4096.0), (4096.0, 4608.0), (4608.0, 5120.0), (5120.0, 5631.0)]
Threshold: 0.6
Number of graphs created: 11
Empty: [0]
data/0/1_presalva_3.edf 0
Extracting EDF parameters from /Users/juanlatasareinoso/Downloads/Seminario/EEGRAPH/data/0/1_presalva_3.edf...
EDF file detected
Setting channel info structure...
Creating raw.info structure...
[1mEEG Information.
[0mNumber of Channels: 19
Sample rate: 512.0 Hz.
Duration: 10.998 seconds.
Channel Names: ['EEG Fp1', 'EEG Fp2', 'EEG F4', 'EEG F3', 'EEG C3', 'EEG C4', 'EEG P4', 'EEG P3', 'EEG O2', 'EEG O1', 'EEG F8', 'EEG F7', 'EEG T4', 'EEG T3', 'EEG T6', 'EEG T5', 'EEG Pz', 'EEG Fz', 'EEG Cz']
=========================================
[1mModel Data.[0m
Pearson_correlation_Estimator()
Intervals: [(0, 512.0), (512.0, 1024.0), (1024.0, 1536.0), (1536.0, 2048.0), (2048.0, 2560.0), (2560.0, 3072.0), (3072.0, 3584.0), (3584.0, 4096.0), (4096.0, 4608.0), (4608.0, 5120.0), (5120.0, 5631.0)]
Threshold: 0.6
Number of graphs created: 11
Empty: [0]
data/0/1_presalva_1.edf 0
Extracting EDF parameters from /Users/juanlatasareinoso/Downloads/Seminario/EEGRAPH/data/0/1_presalva_1.edf...
EDF file detected
Setting channel info structure...
Creating raw.info structure...
[1mEEG Information.
[0mNumber of Channels: 19
Sample rate: 512.0 Hz.
Duration: 10.998 seconds.
Channel Names: ['EEG Fp1', 'EEG Fp2', 'EEG F4', 'EEG F3', 'EEG C3', 'EEG C4', 'EEG P4', 'EEG P3', 'EEG O2', 'EEG O1', 'EEG F8', 'EEG F7', 'EEG T4', 'EEG T3', 'EEG T6', 'EEG T5', 'EEG Pz', 'EEG Fz', 'EEG Cz']
=========================================
[1mModel Data.[0m
Pearson_correlation_Estimator()
Intervals: [(0, 512.0), (512.0, 1024.0), (1024.0, 1536.0), (1536.0, 2048.0), (2048.0, 2560.0), (2560.0, 3072.0), (3072.0, 3584.0), (3584.0, 4096.0), (4096.0, 4608.0), (4608.0, 5120.0), (5120.0, 5631.0)]
Threshold: 0.6
Number of graphs created: 11
Empty: [0]
data/0/1_presalva_4.edf 0
Extracting EDF parameters from /Users/juanlatasareinoso/Downloads/Seminario/EEGRAPH/data/0/1_presalva_4.edf...
EDF file detected
Setting channel info structure...
Creating raw.info structure...
[1mEEG Information.
[0mNumber of Channels: 19
Sample rate: 512.0 Hz.
Duration: 10.998 seconds.
Channel Names: ['EEG Fp1', 'EEG Fp2', 'EEG F4', 'EEG F3', 'EEG C3', 'EEG C4', 'EEG P4', 'EEG P3', 'EEG O2', 'EEG O1', 'EEG F8', 'EEG F7', 'EEG T4', 'EEG T3', 'EEG T6', 'EEG T5', 'EEG Pz', 'EEG Fz', 'EEG Cz']
=========================================
[1mModel Data.[0m
Pearson_correlation_Estimator()
Intervals: [(0, 512.0), (512.0, 1024.0), (1024.0, 1536.0), (1536.0, 2048.0), (2048.0, 2560.0), (2560.0, 3072.0), (3072.0, 3584.0), (3584.0, 4096.0), (4096.0, 4608.0), (4608.0, 5120.0), (5120.0, 5631.0)]
Threshold: 0.6
Number of graphs created: 11
Empty: [0]
data/0/1_presalva_13.edf 0
Extracting EDF parameters from /Users/juanlatasareinoso/Downloads/Seminario/EEGRAPH/data/0/1_presalva_13.edf...
EDF file detected
Setting channel info structure...
Creating raw.info structure...
[1mEEG Information.
[0mNumber of Channels: 19
Sample rate: 512.0 Hz.
Duration: 10.998 seconds.
Channel Names: ['EEG Fp1', 'EEG Fp2', 'EEG F4', 'EEG F3', 'EEG C3', 'EEG C4', 'EEG P4', 'EEG P3', 'EEG O2', 'EEG O1', 'EEG F8', 'EEG F7', 'EEG T4', 'EEG T3', 'EEG T6', 'EEG T5', 'EEG Pz', 'EEG Fz', 'EEG Cz']
=========================================
[1mModel Data.[0m
Pearson_correlation_Estimator()
Intervals: [(0, 512.0), (512.0, 1024.0), (1024.0, 1536.0), (1536.0, 2048.0), (2048.0, 2560.0), (2560.0, 3072.0), (3072.0, 3584.0), (3584.0, 4096.0), (4096.0, 4608.0), (4608.0, 5120.0), (5120.0, 5631.0)]
Threshold: 0.6
Number of graphs created: 11
Empty: [0]
data/0/1_presalva_12.edf 0
Extracting EDF parameters from /Users/juanlatasareinoso/Downloads/Seminario/EEGRAPH/data/0/1_presalva_12.edf...
EDF file detected
Setting channel info structure...
Creating raw.info structure...
[1mEEG Information.
[0mNumber of Channels: 19
Sample rate: 512.0 Hz.
Duration: 10.998 seconds.
Channel Names: ['EEG Fp1', 'EEG Fp2', 'EEG F4', 'EEG F3', 'EEG C3', 'EEG C4', 'EEG P4', 'EEG P3', 'EEG O2', 'EEG O1', 'EEG F8', 'EEG F7', 'EEG T4', 'EEG T3', 'EEG T6', 'EEG T5', 'EEG Pz', 'EEG Fz', 'EEG Cz']
=========================================
[1mModel Data.[0m
Pearson_correlation_Estimator()
Intervals: [(0, 512.0), (512.0, 1024.0), (1024.0, 1536.0), (1536.0, 2048.0), (2048.0, 2560.0), (2560.0, 3072.0), (3072.0, 3584.0), (3584.0, 4096.0), (4096.0, 4608.0), (4608.0, 5120.0), (5120.0, 5631.0)]
Threshold: 0.6
Number of graphs created: 11
Empty: [0]
data/0/1_presalva_5.edf 0
Extracting EDF parameters from /Users/juanlatasareinoso/Downloads/Seminario/EEGRAPH/data/0/1_presalva_5.edf...
EDF file detected
Setting channel info structure...
Creating raw.info structure...
[1mEEG Information.
[0mNumber of Channels: 19
Sample rate: 512.0 Hz.
Duration: 10.998 seconds.
Channel Names: ['EEG Fp1', 'EEG Fp2', 'EEG F4', 'EEG F3', 'EEG C3', 'EEG C4', 'EEG P4', 'EEG P3', 'EEG O2', 'EEG O1', 'EEG F8', 'EEG F7', 'EEG T4', 'EEG T3', 'EEG T6', 'EEG T5', 'EEG Pz', 'EEG Fz', 'EEG Cz']
=========================================
[1mModel Data.[0m
Pearson_correlation_Estimator()
Intervals: [(0, 512.0), (512.0, 1024.0), (1024.0, 1536.0), (1536.0, 2048.0), (2048.0, 2560.0), (2560.0, 3072.0), (3072.0, 3584.0), (3584.0, 4096.0), (4096.0, 4608.0), (4608.0, 5120.0), (5120.0, 5631.0)]
Threshold: 0.6
Number of graphs created: 11
Empty: [0]
data/0/1_presalva_7.edf 0
Extracting EDF parameters from /Users/juanlatasareinoso/Downloads/Seminario/EEGRAPH/data/0/1_presalva_7.edf...
EDF file detected
Setting channel info structure...
Creating raw.info structure...
[1mEEG Information.
[0mNumber of Channels: 19
Sample rate: 512.0 Hz.
Duration: 10.998 seconds.
Channel Names: ['EEG Fp1', 'EEG Fp2', 'EEG F4', 'EEG F3', 'EEG C3', 'EEG C4', 'EEG P4', 'EEG P3', 'EEG O2', 'EEG O1', 'EEG F8', 'EEG F7', 'EEG T4', 'EEG T3', 'EEG T6', 'EEG T5', 'EEG Pz', 'EEG Fz', 'EEG Cz']
=========================================
[1mModel Data.[0m
Pearson_correlation_Estimator()
Intervals: [(0, 512.0), (512.0, 1024.0), (1024.0, 1536.0), (1536.0, 2048.0), (2048.0, 2560.0), (2560.0, 3072.0), (3072.0, 3584.0), (3584.0, 4096.0), (4096.0, 4608.0), (4608.0, 5120.0), (5120.0, 5631.0)]
Threshold: 0.6
Number of graphs created: 11
Empty: [0]
data/0/1_presalva_10.edf 0
Extracting EDF parameters from /Users/juanlatasareinoso/Downloads/Seminario/EEGRAPH/data/0/1_presalva_10.edf...
EDF file detected
Setting channel info structure...
Creating raw.info structure...
[1mEEG Information.
[0mNumber of Channels: 19
Sample rate: 512.0 Hz.
Duration: 10.998 seconds.
Channel Names: ['EEG Fp1', 'EEG Fp2', 'EEG F4', 'EEG F3', 'EEG C3', 'EEG C4', 'EEG P4', 'EEG P3', 'EEG O2', 'EEG O1', 'EEG F8', 'EEG F7', 'EEG T4', 'EEG T3', 'EEG T6', 'EEG T5', 'EEG Pz', 'EEG Fz', 'EEG Cz']
=========================================
[1mModel Data.[0m
Pearson_correlation_Estimator()
Intervals: [(0, 512.0), (512.0, 1024.0), (1024.0, 1536.0), (1536.0, 2048.0), (2048.0, 2560.0), (2560.0, 3072.0), (3072.0, 3584.0), (3584.0, 4096.0), (4096.0, 4608.0), (4608.0, 5120.0), (5120.0, 5631.0)]
Threshold: 0.6
Number of graphs created: 11
Empty: [0]
data/0/1_presalva_11.edf 0
Extracting EDF parameters from /Users/juanlatasareinoso/Downloads/Seminario/EEGRAPH/data/0/1_presalva_11.edf...
EDF file detected
Setting channel info structure...
Creating raw.info structure...
[1mEEG Information.
[0mNumber of Channels: 19
Sample rate: 512.0 Hz.
Duration: 10.998 seconds.
Channel Names: ['EEG Fp1', 'EEG Fp2', 'EEG F4', 'EEG F3', 'EEG C3', 'EEG C4', 'EEG P4', 'EEG P3', 'EEG O2', 'EEG O1', 'EEG F8', 'EEG F7', 'EEG T4', 'EEG T3', 'EEG T6', 'EEG T5', 'EEG Pz', 'EEG Fz', 'EEG Cz']
=========================================
[1mModel Data.[0m
Pearson_correlation_Estimator()
Intervals: [(0, 512.0), (512.0, 1024.0), (1024.0, 1536.0), (1536.0, 2048.0), (2048.0, 2560.0), (2560.0, 3072.0), (3072.0, 3584.0), (3584.0, 4096.0), (4096.0, 4608.0), (4608.0, 5120.0), (5120.0, 5631.0)]
Threshold: 0.6
Number of graphs created: 11
Empty: [0]
data/0/1_presalva_6.edf 0
Extracting EDF parameters from /Users/juanlatasareinoso/Downloads/Seminario/EEGRAPH/data/0/1_presalva_6.edf...
EDF file detected
Setting channel info structure...
Creating raw.info structure...
[1mEEG Information.
[0mNumber of Channels: 19
Sample rate: 512.0 Hz.
Duration: 10.998 seconds.
Channel Names: ['EEG Fp1', 'EEG Fp2', 'EEG F4', 'EEG F3', 'EEG C3', 'EEG C4', 'EEG P4', 'EEG P3', 'EEG O2', 'EEG O1', 'EEG F8', 'EEG F7', 'EEG T4', 'EEG T3', 'EEG T6', 'EEG T5', 'EEG Pz', 'EEG Fz', 'EEG Cz']
=========================================
[1mModel Data.[0m
Pearson_correlation_Estimator()
Intervals: [(0, 512.0), (512.0, 1024.0), (1024.0, 1536.0), (1536.0, 2048.0), (2048.0, 2560.0), (2560.0, 3072.0), (3072.0, 3584.0), (3584.0, 4096.0), (4096.0, 4608.0), (4608.0, 5120.0), (5120.0, 5631.0)]
Threshold: 0.6
Number of graphs created: 11
Empty: [0]
data/0/1_presalva_8.edf 0
Extracting EDF parameters from /Users/juanlatasareinoso/Downloads/Seminario/EEGRAPH/data/0/1_presalva_8.edf...
EDF file detected
Setting channel info structure...
Creating raw.info structure...
[1mEEG Information.
[0mNumber of Channels: 19
Sample rate: 512.0 Hz.
Duration: 10.998 seconds.
Channel Names: ['EEG Fp1', 'EEG Fp2', 'EEG F4', 'EEG F3', 'EEG C3', 'EEG C4', 'EEG P4', 'EEG P3', 'EEG O2', 'EEG O1', 'EEG F8', 'EEG F7', 'EEG T4', 'EEG T3', 'EEG T6', 'EEG T5', 'EEG Pz', 'EEG Fz', 'EEG Cz']
=========================================
[1mModel Data.[0m
Pearson_correlation_Estimator()
Intervals: [(0, 512.0), (512.0, 1024.0), (1024.0, 1536.0), (1536.0, 2048.0), (2048.0, 2560.0), (2560.0, 3072.0), (3072.0, 3584.0), (3584.0, 4096.0), (4096.0, 4608.0), (4608.0, 5120.0), (5120.0, 5631.0)]
Threshold: 0.6
Number of graphs created: 11
Empty: [0]
data/0/1_presalva_9.edf 0
Extracting EDF parameters from /Users/juanlatasareinoso/Downloads/Seminario/EEGRAPH/data/0/1_presalva_9.edf...
EDF file detected
Setting channel info structure...
Creating raw.info structure...
[1mEEG Information.
[0mNumber of Channels: 19
Sample rate: 512.0 Hz.
Duration: 10.998 seconds.
Channel Names: ['EEG Fp1', 'EEG Fp2', 'EEG F4', 'EEG F3', 'EEG C3', 'EEG C4', 'EEG P4', 'EEG P3', 'EEG O2', 'EEG O1', 'EEG F8', 'EEG F7', 'EEG T4', 'EEG T3', 'EEG T6', 'EEG T5', 'EEG Pz', 'EEG Fz', 'EEG Cz']
=========================================
[1mModel Data.[0m
Pearson_correlation_Estimator()
Intervals: [(0, 512.0), (512.0, 1024.0), (1024.0, 1536.0), (1536.0, 2048.0), (2048.0, 2560.0), (2560.0, 3072.0), (3072.0, 3584.0), (3584.0, 4096.0), (4096.0, 4608.0), (4608.0, 5120.0), (5120.0, 5631.0)]
Threshold: 0.6
Number of graphs created: 11
Empty: [0]
data/1/1_espasmo_13.edf 1
Extracting EDF parameters from /Users/juanlatasareinoso/Downloads/Seminario/EEGRAPH/data/1/1_espasmo_13.edf...
EDF file detected
Setting channel info structure...
Creating raw.info structure...
[1mEEG Information.
[0mNumber of Channels: 19
Sample rate: 512.0 Hz.
Duration: 5.998 seconds.
Channel Names: ['EEG Fp1', 'EEG Fp2', 'EEG F4', 'EEG F3', 'EEG C3', 'EEG C4', 'EEG P4', 'EEG P3', 'EEG O2', 'EEG O1', 'EEG F8', 'EEG F7', 'EEG T4', 'EEG T3', 'EEG T6', 'EEG T5', 'EEG Pz', 'EEG Fz', 'EEG Cz']
=========================================
[1mModel Data.[0m
Pearson_correlation_Estimator()
Intervals: [(0, 512.0), (512.0, 1024.0), (1024.0, 1536.0), (1536.0, 2048.0), (2048.0, 2560.0), (2560.0, 3071.0)]
Threshold: 0.6
Number of graphs created: 6
Empty: [0]
data/1/1_espasmo_12.edf 1
Extracting EDF parameters from /Users/juanlatasareinoso/Downloads/Seminario/EEGRAPH/data/1/1_espasmo_12.edf...
EDF file detected
Setting channel info structure...
Creating raw.info structure...
[1mEEG Information.
[0mNumber of Channels: 19
Sample rate: 512.0 Hz.
Duration: 3.998 seconds.
Channel Names: ['EEG Fp1', 'EEG Fp2', 'EEG F4', 'EEG F3', 'EEG C3', 'EEG C4', 'EEG P4', 'EEG P3', 'EEG O2', 'EEG O1', 'EEG F8', 'EEG F7', 'EEG T4', 'EEG T3', 'EEG T6', 'EEG T5', 'EEG Pz', 'EEG Fz', 'EEG Cz']
=========================================
[1mModel Data.[0m
Pearson_correlation_Estimator()
Intervals: [(0, 512.0), (512.0, 1024.0), (1024.0, 1536.0), (1536.0, 2047.0)]
Threshold: 0.6
Number of graphs created: 4
Empty: [0]
data/1/1_espasmo_10.edf 1
Extracting EDF parameters from /Users/juanlatasareinoso/Downloads/Seminario/EEGRAPH/data/1/1_espasmo_10.edf...
EDF file detected
Setting channel info structure...
Creating raw.info structure...
[1mEEG Information.
[0mNumber of Channels: 19
Sample rate: 512.0 Hz.
Duration: 7.998 seconds.
Channel Names: ['EEG Fp1', 'EEG Fp2', 'EEG F4', 'EEG F3', 'EEG C3', 'EEG C4', 'EEG P4', 'EEG P3', 'EEG O2', 'EEG O1', 'EEG F8', 'EEG F7', 'EEG T4', 'EEG T3', 'EEG T6', 'EEG T5', 'EEG Pz', 'EEG Fz', 'EEG Cz']
=========================================
[1mModel Data.[0m
Pearson_correlation_Estimator()
Intervals: [(0, 512.0), (512.0, 1024.0), (1024.0, 1536.0), (1536.0, 2048.0), (2048.0, 2560.0), (2560.0, 3072.0), (3072.0, 3584.0), (3584.0, 4095.0)]
Threshold: 0.6
Number of graphs created: 8
Empty: [0]
data/1/1_espasmo_11.edf 1
Extracting EDF parameters from /Users/juanlatasareinoso/Downloads/Seminario/EEGRAPH/data/1/1_espasmo_11.edf...
EDF file detected
Setting channel info structure...
Creating raw.info structure...
[1mEEG Information.
[0mNumber of Channels: 19
Sample rate: 512.0 Hz.
Duration: 4.998 seconds.
Channel Names: ['EEG Fp1', 'EEG Fp2', 'EEG F4', 'EEG F3', 'EEG C3', 'EEG C4', 'EEG P4', 'EEG P3', 'EEG O2', 'EEG O1', 'EEG F8', 'EEG F7', 'EEG T4', 'EEG T3', 'EEG T6', 'EEG T5', 'EEG Pz', 'EEG Fz', 'EEG Cz']
=========================================
[1mModel Data.[0m
Pearson_correlation_Estimator()
Intervals: [(0, 512.0), (512.0, 1024.0), (1024.0, 1536.0), (1536.0, 2048.0), (2048.0, 2559.0)]
Threshold: 0.6
Number of graphs created: 5
Empty: [0]
data/1/1_espasmo_15.edf 1
Extracting EDF parameters from /Users/juanlatasareinoso/Downloads/Seminario/EEGRAPH/data/1/1_espasmo_15.edf...
EDF file detected
Setting channel info structure...
Creating raw.info structure...
[1mEEG Information.
[0mNumber of Channels: 19
Sample rate: 512.0 Hz.
Duration: 2.998 seconds.
Channel Names: ['EEG Fp1', 'EEG Fp2', 'EEG F4', 'EEG F3', 'EEG C3', 'EEG C4', 'EEG P4', 'EEG P3', 'EEG O2', 'EEG O1', 'EEG F8', 'EEG F7', 'EEG T4', 'EEG T3', 'EEG T6', 'EEG T5', 'EEG Pz', 'EEG Fz', 'EEG Cz']
=========================================
[1mModel Data.[0m
Pearson_correlation_Estimator()
Intervals: [(0, 512.0), (512.0, 1024.0), (1024.0, 1535.0)]
Threshold: 0.6
Number of graphs created: 3
Empty: [0]
data/1/1_espasmo_14.edf 1
Extracting EDF parameters from /Users/juanlatasareinoso/Downloads/Seminario/EEGRAPH/data/1/1_espasmo_14.edf...
EDF file detected
Setting channel info structure...
Creating raw.info structure...
[1mEEG Information.
[0mNumber of Channels: 19
Sample rate: 512.0 Hz.
Duration: 3.998 seconds.
Channel Names: ['EEG Fp1', 'EEG Fp2', 'EEG F4', 'EEG F3', 'EEG C3', 'EEG C4', 'EEG P4', 'EEG P3', 'EEG O2', 'EEG O1', 'EEG F8', 'EEG F7', 'EEG T4', 'EEG T3', 'EEG T6', 'EEG T5', 'EEG Pz', 'EEG Fz', 'EEG Cz']
=========================================
[1mModel Data.[0m
Pearson_correlation_Estimator()
Intervals: [(0, 512.0), (512.0, 1024.0), (1024.0, 1536.0), (1536.0, 2047.0)]
Threshold: 0.6
Number of graphs created: 4
Empty: [0]
data/1/1_espasmo_16.edf 1
Extracting EDF parameters from /Users/juanlatasareinoso/Downloads/Seminario/EEGRAPH/data/1/1_espasmo_16.edf...
EDF file detected
Setting channel info structure...
Creating raw.info structure...
[1mEEG Information.
[0mNumber of Channels: 19
Sample rate: 512.0 Hz.
Duration: 3.998 seconds.
Channel Names: ['EEG Fp1', 'EEG Fp2', 'EEG F4', 'EEG F3', 'EEG C3', 'EEG C4', 'EEG P4', 'EEG P3', 'EEG O2', 'EEG O1', 'EEG F8', 'EEG F7', 'EEG T4', 'EEG T3', 'EEG T6', 'EEG T5', 'EEG Pz', 'EEG Fz', 'EEG Cz']
=========================================
[1mModel Data.[0m
Pearson_correlation_Estimator()
Intervals: [(0, 512.0), (512.0, 1024.0), (1024.0, 1536.0), (1536.0, 2047.0)]
Threshold: 0.6
Number of graphs created: 4
Empty: [0]
data/1/1_espasmo_17.edf 1
Extracting EDF parameters from /Users/juanlatasareinoso/Downloads/Seminario/EEGRAPH/data/1/1_espasmo_17.edf...
EDF file detected
Setting channel info structure...
Creating raw.info structure...
[1mEEG Information.
[0mNumber of Channels: 19
Sample rate: 512.0 Hz.
Duration: 4.998 seconds.
Channel Names: ['EEG Fp1', 'EEG Fp2', 'EEG F4', 'EEG F3', 'EEG C3', 'EEG C4', 'EEG P4', 'EEG P3', 'EEG O2', 'EEG O1', 'EEG F8', 'EEG F7', 'EEG T4', 'EEG T3', 'EEG T6', 'EEG T5', 'EEG Pz', 'EEG Fz', 'EEG Cz']
=========================================
[1mModel Data.[0m
Pearson_correlation_Estimator()
Intervals: [(0, 512.0), (512.0, 1024.0), (1024.0, 1536.0), (1536.0, 2048.0), (2048.0, 2559.0)]
Threshold: 0.6
Number of graphs created: 5
Empty: [0]
data/1/1_espasmo_7.edf 1
Extracting EDF parameters from /Users/juanlatasareinoso/Downloads/Seminario/EEGRAPH/data/1/1_espasmo_7.edf...
EDF file detected
Setting channel info structure...
Creating raw.info structure...
[1mEEG Information.
[0mNumber of Channels: 19
Sample rate: 512.0 Hz.
Duration: 3.998 seconds.
Channel Names: ['EEG Fp1', 'EEG Fp2', 'EEG F4', 'EEG F3', 'EEG C3', 'EEG C4', 'EEG P4', 'EEG P3', 'EEG O2', 'EEG O1', 'EEG F8', 'EEG F7', 'EEG T4', 'EEG T3', 'EEG T6', 'EEG T5', 'EEG Pz', 'EEG Fz', 'EEG Cz']
=========================================
[1mModel Data.[0m
Pearson_correlation_Estimator()
Intervals: [(0, 512.0), (512.0, 1024.0), (1024.0, 1536.0), (1536.0, 2047.0)]
Threshold: 0.6
Number of graphs created: 4
Empty: [0]
data/1/1_espasmo_6.edf 1
Extracting EDF parameters from /Users/juanlatasareinoso/Downloads/Seminario/EEGRAPH/data/1/1_espasmo_6.edf...
EDF file detected
Setting channel info structure...
Creating raw.info structure...
[1mEEG Information.
[0mNumber of Channels: 19
Sample rate: 512.0 Hz.
Duration: 3.998 seconds.
Channel Names: ['EEG Fp1', 'EEG Fp2', 'EEG F4', 'EEG F3', 'EEG C3', 'EEG C4', 'EEG P4', 'EEG P3', 'EEG O2', 'EEG O1', 'EEG F8', 'EEG F7', 'EEG T4', 'EEG T3', 'EEG T6', 'EEG T5', 'EEG Pz', 'EEG Fz', 'EEG Cz']
=========================================
[1mModel Data.[0m
Pearson_correlation_Estimator()
Intervals: [(0, 512.0), (512.0, 1024.0), (1024.0, 1536.0), (1536.0, 2047.0)]
Threshold: 0.6
Number of graphs created: 4
Empty: [0]
data/1/1_espasmo_4.edf 1
Extracting EDF parameters from /Users/juanlatasareinoso/Downloads/Seminario/EEGRAPH/data/1/1_espasmo_4.edf...
EDF file detected
Setting channel info structure...
Creating raw.info structure...
[1mEEG Information.
[0mNumber of Channels: 19
Sample rate: 512.0 Hz.
Duration: 3.998 seconds.
Channel Names: ['EEG Fp1', 'EEG Fp2', 'EEG F4', 'EEG F3', 'EEG C3', 'EEG C4', 'EEG P4', 'EEG P3', 'EEG O2', 'EEG O1', 'EEG F8', 'EEG F7', 'EEG T4', 'EEG T3', 'EEG T6', 'EEG T5', 'EEG Pz', 'EEG Fz', 'EEG Cz']
=========================================
[1mModel Data.[0m
Pearson_correlation_Estimator()
Intervals: [(0, 512.0), (512.0, 1024.0), (1024.0, 1536.0), (1536.0, 2047.0)]
Threshold: 0.6
Number of graphs created: 4
Empty: [0]
data/1/1_espasmo_5.edf 1
Extracting EDF parameters from /Users/juanlatasareinoso/Downloads/Seminario/EEGRAPH/data/1/1_espasmo_5.edf...
EDF file detected
Setting channel info structure...
Creating raw.info structure...
[1mEEG Information.
[0mNumber of Channels: 19
Sample rate: 512.0 Hz.
Duration: 2.998 seconds.
Channel Names: ['EEG Fp1', 'EEG Fp2', 'EEG F4', 'EEG F3', 'EEG C3', 'EEG C4', 'EEG P4', 'EEG P3', 'EEG O2', 'EEG O1', 'EEG F8', 'EEG F7', 'EEG T4', 'EEG T3', 'EEG T6', 'EEG T5', 'EEG Pz', 'EEG Fz', 'EEG Cz']
=========================================
[1mModel Data.[0m
Pearson_correlation_Estimator()
Intervals: [(0, 512.0), (512.0, 1024.0), (1024.0, 1535.0)]
Threshold: 0.6
Number of graphs created: 3
Empty: [0]
data/1/1_espasmo_1.edf 1
Extracting EDF parameters from /Users/juanlatasareinoso/Downloads/Seminario/EEGRAPH/data/1/1_espasmo_1.edf...
EDF file detected
Setting channel info structure...
Creating raw.info structure...
[1mEEG Information.
[0mNumber of Channels: 19
Sample rate: 512.0 Hz.
Duration: 3.998 seconds.
Channel Names: ['EEG Fp1', 'EEG Fp2', 'EEG F4', 'EEG F3', 'EEG C3', 'EEG C4', 'EEG P4', 'EEG P3', 'EEG O2', 'EEG O1', 'EEG F8', 'EEG F7', 'EEG T4', 'EEG T3', 'EEG T6', 'EEG T5', 'EEG Pz', 'EEG Fz', 'EEG Cz']
=========================================
[1mModel Data.[0m
Pearson_correlation_Estimator()
Intervals: [(0, 512.0), (512.0, 1024.0), (1024.0, 1536.0), (1536.0, 2047.0)]
Threshold: 0.6
Number of graphs created: 4
Empty: [0]
data/1/1_espasmo_2.edf 1
Extracting EDF parameters from /Users/juanlatasareinoso/Downloads/Seminario/EEGRAPH/data/1/1_espasmo_2.edf...
EDF file detected
Setting channel info structure...
Creating raw.info structure...
[1mEEG Information.
[0mNumber of Channels: 19
Sample rate: 512.0 Hz.
Duration: 4.998 seconds.
Channel Names: ['EEG Fp1', 'EEG Fp2', 'EEG F4', 'EEG F3', 'EEG C3', 'EEG C4', 'EEG P4', 'EEG P3', 'EEG O2', 'EEG O1', 'EEG F8', 'EEG F7', 'EEG T4', 'EEG T3', 'EEG T6', 'EEG T5', 'EEG Pz', 'EEG Fz', 'EEG Cz']
=========================================
[1mModel Data.[0m
Pearson_correlation_Estimator()
Intervals: [(0, 512.0), (512.0, 1024.0), (1024.0, 1536.0), (1536.0, 2048.0), (2048.0, 2559.0)]
Threshold: 0.6
Number of graphs created: 5
Empty: [0]
data/1/1_espasmo_3.edf 1
Extracting EDF parameters from /Users/juanlatasareinoso/Downloads/Seminario/EEGRAPH/data/1/1_espasmo_3.edf...
EDF file detected
Setting channel info structure...
Creating raw.info structure...
[1mEEG Information.
[0mNumber of Channels: 19
Sample rate: 512.0 Hz.
Duration: 3.998 seconds.
Channel Names: ['EEG Fp1', 'EEG Fp2', 'EEG F4', 'EEG F3', 'EEG C3', 'EEG C4', 'EEG P4', 'EEG P3', 'EEG O2', 'EEG O1', 'EEG F8', 'EEG F7', 'EEG T4', 'EEG T3', 'EEG T6', 'EEG T5', 'EEG Pz', 'EEG Fz', 'EEG Cz']
=========================================
[1mModel Data.[0m
Pearson_correlation_Estimator()
Intervals: [(0, 512.0), (512.0, 1024.0), (1024.0, 1536.0), (1536.0, 2047.0)]
Threshold: 0.6
Number of graphs created: 4
Empty: [0]
data/1/1_espasmo_8.edf 1
Extracting EDF parameters from /Users/juanlatasareinoso/Downloads/Seminario/EEGRAPH/data/1/1_espasmo_8.edf...
EDF file detected
Setting channel info structure...
Creating raw.info structure...
[1mEEG Information.
[0mNumber of Channels: 19
Sample rate: 512.0 Hz.
Duration: 3.998 seconds.
Channel Names: ['EEG Fp1', 'EEG Fp2', 'EEG F4', 'EEG F3', 'EEG C3', 'EEG C4', 'EEG P4', 'EEG P3', 'EEG O2', 'EEG O1', 'EEG F8', 'EEG F7', 'EEG T4', 'EEG T3', 'EEG T6', 'EEG T5', 'EEG Pz', 'EEG Fz', 'EEG Cz']
=========================================
[1mModel Data.[0m
Pearson_correlation_Estimator()
Intervals: [(0, 512.0), (512.0, 1024.0), (1024.0, 1536.0), (1536.0, 2047.0)]
Threshold: 0.6
Number of graphs created: 4
Empty: [0]
data/1/1_espasmo_9.edf 1
Extracting EDF parameters from /Users/juanlatasareinoso/Downloads/Seminario/EEGRAPH/data/1/1_espasmo_9.edf...
EDF file detected
Setting channel info structure...
Creating raw.info structure...
[1mEEG Information.
[0mNumber of Channels: 19
Sample rate: 512.0 Hz.
Duration: 3.998 seconds.
Channel Names: ['EEG Fp1', 'EEG Fp2', 'EEG F4', 'EEG F3', 'EEG C3', 'EEG C4', 'EEG P4', 'EEG P3', 'EEG O2', 'EEG O1', 'EEG F8', 'EEG F7', 'EEG T4', 'EEG T3', 'EEG T6', 'EEG T5', 'EEG Pz', 'EEG Fz', 'EEG Cz']
=========================================
[1mModel Data.[0m
Pearson_correlation_Estimator()
Intervals: [(0, 512.0), (512.0, 1024.0), (1024.0, 1536.0), (1536.0, 2047.0)]
Threshold: 0.6
Number of graphs created: 4
Empty: [0]
data/1/1_espasmo_19.edf 1
Extracting EDF parameters from /Users/juanlatasareinoso/Downloads/Seminario/EEGRAPH/data/1/1_espasmo_19.edf...
EDF file detected
Setting channel info structure...
Creating raw.info structure...
[1mEEG Information.
[0mNumber of Channels: 19
Sample rate: 512.0 Hz.
Duration: 4.998 seconds.
Channel Names: ['EEG Fp1', 'EEG Fp2', 'EEG F4', 'EEG F3', 'EEG C3', 'EEG C4', 'EEG P4', 'EEG P3', 'EEG O2', 'EEG O1', 'EEG F8', 'EEG F7', 'EEG T4', 'EEG T3', 'EEG T6', 'EEG T5', 'EEG Pz', 'EEG Fz', 'EEG Cz']
=========================================
[1mModel Data.[0m
Pearson_correlation_Estimator()
Intervals: [(0, 512.0), (512.0, 1024.0), (1024.0, 1536.0), (1536.0, 2048.0), (2048.0, 2559.0)]
Threshold: 0.6
Number of graphs created: 5
Empty: [0]
data/1/1_espasmo_18.edf 1
Extracting EDF parameters from /Users/juanlatasareinoso/Downloads/Seminario/EEGRAPH/data/1/1_espasmo_18.edf...
EDF file detected
Setting channel info structure...
Creating raw.info structure...
[1mEEG Information.
[0mNumber of Channels: 19
Sample rate: 512.0 Hz.
Duration: 5.998 seconds.
Channel Names: ['EEG Fp1', 'EEG Fp2', 'EEG F4', 'EEG F3', 'EEG C3', 'EEG C4', 'EEG P4', 'EEG P3', 'EEG O2', 'EEG O1', 'EEG F8', 'EEG F7', 'EEG T4', 'EEG T3', 'EEG T6', 'EEG T5', 'EEG Pz', 'EEG Fz', 'EEG Cz']
=========================================
[1mModel Data.[0m
Pearson_correlation_Estimator()
Intervals: [(0, 512.0), (512.0, 1024.0), (1024.0, 1536.0), (1536.0, 2048.0), (2048.0, 2560.0), (2560.0, 3071.0)]
Threshold: 0.6
Number of graphs created: 6
Empty: [0]
data/1/1_espasmo_20.edf 1
Extracting EDF parameters from /Users/juanlatasareinoso/Downloads/Seminario/EEGRAPH/data/1/1_espasmo_20.edf...
EDF file detected
Setting channel info structure...
Creating raw.info structure...
[1mEEG Information.
[0mNumber of Channels: 19
Sample rate: 512.0 Hz.
Duration: 4.998 seconds.
Channel Names: ['EEG Fp1', 'EEG Fp2', 'EEG F4', 'EEG F3', 'EEG C3', 'EEG C4', 'EEG P4', 'EEG P3', 'EEG O2', 'EEG O1', 'EEG F8', 'EEG F7', 'EEG T4', 'EEG T3', 'EEG T6', 'EEG T5', 'EEG Pz', 'EEG Fz', 'EEG Cz']
=========================================
[1mModel Data.[0m
Pearson_correlation_Estimator()
Intervals: [(0, 512.0), (512.0, 1024.0), (1024.0, 1536.0), (1536.0, 2048.0), (2048.0, 2559.0)]
Threshold: 0.6
Number of graphs created: 5
Empty: [0]
data/1/1_espasmo_21.edf 1
Extracting EDF parameters from /Users/juanlatasareinoso/Downloads/Seminario/EEGRAPH/data/1/1_espasmo_21.edf...
EDF file detected
Setting channel info structure...
Creating raw.info structure...
[1mEEG Information.
[0mNumber of Channels: 19
Sample rate: 512.0 Hz.
Duration: 2.998 seconds.
Channel Names: ['EEG Fp1', 'EEG Fp2', 'EEG F4', 'EEG F3', 'EEG C3', 'EEG C4', 'EEG P4', 'EEG P3', 'EEG O2', 'EEG O1', 'EEG F8', 'EEG F7', 'EEG T4', 'EEG T3', 'EEG T6', 'EEG T5', 'EEG Pz', 'EEG Fz', 'EEG Cz']
=========================================
[1mModel Data.[0m
Pearson_correlation_Estimator()
Intervals: [(0, 512.0), (512.0, 1024.0), (1024.0, 1535.0)]
Threshold: 0.6
Number of graphs created: 3
Empty: [0]
data/1/1_espasmo_23.edf 1
Extracting EDF parameters from /Users/juanlatasareinoso/Downloads/Seminario/EEGRAPH/data/1/1_espasmo_23.edf...
EDF file detected
Setting channel info structure...
Creating raw.info structure...
[1mEEG Information.
[0mNumber of Channels: 19
Sample rate: 512.0 Hz.
Duration: 3.998 seconds.
Channel Names: ['EEG Fp1', 'EEG Fp2', 'EEG F4', 'EEG F3', 'EEG C3', 'EEG C4', 'EEG P4', 'EEG P3', 'EEG O2', 'EEG O1', 'EEG F8', 'EEG F7', 'EEG T4', 'EEG T3', 'EEG T6', 'EEG T5', 'EEG Pz', 'EEG Fz', 'EEG Cz']
=========================================
[1mModel Data.[0m
Pearson_correlation_Estimator()
Intervals: [(0, 512.0), (512.0, 1024.0), (1024.0, 1536.0), (1536.0, 2047.0)]
Threshold: 0.6
Number of graphs created: 4
Empty: [0]
data/1/1_espasmo_22.edf 1
Extracting EDF parameters from /Users/juanlatasareinoso/Downloads/Seminario/EEGRAPH/data/1/1_espasmo_22.edf...
EDF file detected
Setting channel info structure...
Creating raw.info structure...
[1mEEG Information.
[0mNumber of Channels: 19
Sample rate: 512.0 Hz.
Duration: 4.998 seconds.
Channel Names: ['EEG Fp1', 'EEG Fp2', 'EEG F4', 'EEG F3', 'EEG C3', 'EEG C4', 'EEG P4', 'EEG P3', 'EEG O2', 'EEG O1', 'EEG F8', 'EEG F7', 'EEG T4', 'EEG T3', 'EEG T6', 'EEG T5', 'EEG Pz', 'EEG Fz', 'EEG Cz']
=========================================
[1mModel Data.[0m
Pearson_correlation_Estimator()
Intervals: [(0, 512.0), (512.0, 1024.0), (1024.0, 1536.0), (1536.0, 2048.0), (2048.0, 2559.0)]
Threshold: 0.6
Number of graphs created: 5
Empty: [0]
=========================================
Total graphs Generated for class 0: 143
Total graphs Generated for class 1: 103
=====================================================================
|
notebooks/obsolete/CovidRatesByStatesMexico.ipynb | ###Markdown
COVID-19 Case Rates for States in Mexico[Work in progress]This notebooks uses data from [COVID-19 Mexico, Gobierno de Mexico](https://coronavirus.gob.mx/datos)
###Code
import math
import numpy as np
import pandas as pd
import datetime
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.dates import DateFormatter
from py2neo import Graph
import ipywidgets as widgets
pd.options.display.max_rows = None # display all rows
pd.options.display.max_columns = None # display all columsns
###Output
_____no_output_____
###Markdown
Connect to COVID-19-Net Knowledge Graph
###Code
graph = Graph("bolt://132.249.238.185:7687", user="reader", password="demo")
###Output
_____no_output_____
###Markdown
Select Metric to display
###Code
metric_widget = widgets.Dropdown(options=('confirmedRate', 'deathRate'), description='Metric')
display(metric_widget)
metric = metric_widget.value
print('Metric:', metric)
# start date for time series
start_date = '2020-04-01'
###Output
_____no_output_____
###Markdown
Get confirmed cases and deaths for all counties in a state
###Code
query = """
// get all states (admin1) in Mexico
MATCH (a:Admin1)-[:IN]->(:Country{name: 'Mexico'})
// get COVID-19 cases for all states
MATCH (a)<-[:REPORTED_IN]-(c:Cases{source: 'GOBMX', aggregationLevel: 'Admin1'})
WHERE c.date >= date($start_date)
RETURN a.name AS name, c.date AS date,
c.cases*100000.0/c.population AS confirmedRate,
c.deaths*100000.0/c.population AS deathRate,
c.cases AS cases,
c.deaths AS deaths,
c.population AS population
ORDER BY c.date ASC, a.name
"""
df = graph.run(query, start_date=start_date).to_data_frame()
df.tail(38)
###Output
_____no_output_____
###Markdown
Reformat data
###Code
# convert neo4j date object to datetime
df['date'] = df['date'].astype(str)
df['date'] = pd.to_datetime(df['date'], infer_datetime_format=False)
# pivot table
df_date = df.pivot(index='date', columns='name', values=metric)
df_date.fillna(0, inplace=True)
df_date.head()
ax = df_date.plot(figsize=(16, 8), legend=False, title=f'{metric} for states in Mexico');
ax.set_xlabel('Date');
ax.set_ylabel(f'{metric} per 100,000');
###Output
_____no_output_____
###Markdown
Case rate (per 100,000) by State
###Code
# dimensions for subplot layout
cols = 5
rows = math.ceil(df_date.shape[1]/cols)
ax = df_date.plot(subplots=True, layout=(rows,cols), sharey=True, figsize=(16, 2*rows));
###Output
_____no_output_____ |
Functional_Thinking/Lab/30B-numerical-and-logical-functions.ipynb | ###Markdown
Numerical and logical functions for working with iteratorsThese functions are always available. You don't need to import them. `any()`: checks if at least one element evaluates to `True`Without `any()`:
###Code
none_true = [0, 0, 0]
some_true = [0, 1, 0]
all_true = [1, 1, 1]
def check_any(i):
for e in i:
if e:
return True
return False
check_any(none_true)
###Output
_____no_output_____
###Markdown
With `any()`:
###Code
any(none_true)
###Output
_____no_output_____
###Markdown
An equivalent implementation using a generator expression:
###Code
True in (bool(e) for e in none_true)
###Output
_____no_output_____
###Markdown
`all(): checks if all elements evaluates to `True`Without `all()`:
###Code
def check_all(i):
for e in i:
if not e:
return False
return True
check_all(none_true)
###Output
_____no_output_____
###Markdown
With `all()`:
###Code
all(none_true)
###Output
_____no_output_____
###Markdown
An equivalent implementation using a generator expression:
###Code
False not in (bool(e) for e in none_true)
###Output
_____no_output_____
###Markdown
sorted(), min(), max(), and sum() `sorted()` takes an Iterator with numeric elements, sorts it, and returns a `list`:
###Code
numbers = [2, -1, 2, 4]
sorted(numbers)
###Output
_____no_output_____
###Markdown
Without `min()` and `max()`:
###Code
sorted(numbers)[-1]
###Output
_____no_output_____
###Markdown
With `min()` and `max()`:
###Code
max(numbers)
###Output
_____no_output_____
###Markdown
Without `sum()`:
###Code
def get_sum(i):
total = 0
for e in i:
total += e
return total
get_sum(numbers)
###Output
_____no_output_____
###Markdown
With `sum()`:
###Code
sum(numbers)
###Output
_____no_output_____ |
notebooks/extracting age and gender.ipynb | ###Markdown
In this notebook we get the age and gender based of the picture of a person.For this we consider only a subset of all images, those containing people that have a name associated with them.we do the following processing:1. get ids for photos where only one person is in the image2. get list of images associated with on person3. use py-agender to get the age and genderfinally we do some evaluation 1. get ids of photos where only one person is in the image
###Code
df = pd.read_pickle('data/named_subjects.pkl')
df.head()
person_per_image = df.names.map(len)
person_per_image.value_counts()
individual_portraits = person_per_image == 1
###Output
_____no_output_____
###Markdown
How many pictures do we have of one person?
###Code
individual_portraits_df = df[individual_portraits].copy()
individual_portraits_df['name'] = individual_portraits_df.names.map(lambda x: x[0])
individual_portraits_df.groupby('name').id.count().sort_values()
###Output
_____no_output_____
###Markdown
49 people don't have portraits. That's okay, we focus on the people that do.
###Code
unmatched_people = set([i for x in df.names.to_list() for i in x])\
.difference(set(individual_portraits_df['name'].tolist()))
len(unmatched_people)
###Output
_____no_output_____
###Markdown
2. get list associated to person
###Code
personal_portrait_image = individual_portraits_df.groupby('name').apply(lambda x: x.id.tolist())
personal_portrait_image = personal_portrait_image.rename('id').reset_index()
personal_portrait_image
agender = PyAgender()
###Output
WARNING:tensorflow:From /Users/lguillain/opt/anaconda3/envs/fdh/lib/python3.7/site-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.
Instructions for updating:
Colocations handled automatically by placer.
###Markdown
3. get age-gender lables the api not only gives us an age and gender estimate, but also the rectangle pointing to the face. We keep that as it can be used for the face map
###Code
def get_image(doc):
url = doc+'/f1.highres.jpg'
response = requests.get(url)
img = Image.open(BytesIO(response.content))
img = Image.open(BytesIO(response.content)).convert('RGB')
img = np.array(img)
return img
def get_age_gender_estimates(image_docs):
i = 0
estimates = []
#handle case where for one image of personn we can't get estimates
while i < len(image_docs) and len(estimates) == 0:
img = get_image(image_docs[i])
retries = 0
while retries < 5 and len(estimates) == 0:
estimates = agender.detect_genders_ages(img)
retries += 1
i = i+1
if estimates:
# use first estimate as it is most likely one
result = estimates[0]
result['number'] = len(estimates)
result['id'] = image_docs[i-1]
return result
return {}
if False:
age_gender_lables = personal_portrait_image.id.map(get_age_gender_estimates)
age_gender_lables = pd.DataFrame(age_gender_lables.tolist())
age_gender_lables['name'] = personal_portrait_image.name
age_gender_lables.to_json('data/age_gender_labeles.json')
age_gender_lables = pd.read_json('data/age_gender_labeles.json')
true_gender = pd.read_pickle('data/bnf_table_full.pkl')
len(true_gender) - len(age_gender_lables)
true_gender = true_gender[['name', 'gender']]
age_gender_lables = pd.merge(true_gender, age_gender_lables, on='name', suffixes=('_true', '_estimated'),how='left')
age_gender_lables.gender_true.value_counts()
age_gender_lables['gender_estimates_binary'] =\
age_gender_lables.loc[age_gender_lables.gender_estimated.notna(), 'gender_estimated'].map(lambda x: 'féminin' if x>.5 else 'masculin')
age_gender_lables.gender_estimates_binary.isna().value_counts()
age_gender_lables.gender_estimates_binary.value_counts()
age_gender_lables.to_json('data/age_gender_labeles_augmented.json')
###Output
_____no_output_____
###Markdown
evaluation of methodevaluation of algorithm itself is presented on wikipage of py-agender: https://github.com/yu4u/age-gender-estimation
###Code
len(age_gender_lables[age_gender_lables.age.isna()])
###Output
_____no_output_____
###Markdown
can't get lables for 46 people
###Code
unfound = age_gender_lables[age_gender_lables.age.isna()].name.tolist()
personal_portrait_image[personal_portrait_image.name.isin(unfound)].id.map(lambda x:x[0])
###Output
_____no_output_____
###Markdown
number of faces that we got:
###Code
age_gender_lables.number.value_counts()
age_gender_lables.age.plot(kind='hist', bins=100)
plt.title('histogram age distribution')
plt.xlabel('age in years')
###Output
_____no_output_____
###Markdown
mostly men
###Code
age_gender_lables[age_gender_lables.gender_estimated.notna()]
plt.title('histogram of gender estimates')
plt.ylabel('count')
age_gender_lables[(age_gender_lables.gender_true =='masculin') &\
age_gender_lables.gender_estimated.notna()].gender_estimated.plot('hist', bins=100)
age_gender_lables[(age_gender_lables.gender_true !='masculin') &\
age_gender_lables.gender_estimated.notna()].gender_estimated.plot('hist', bins=100)
plt.legend(['male', 'female'])
###Output
/Users/lguillain/opt/anaconda3/envs/fdh/lib/python3.7/site-packages/ipykernel_launcher.py:4: FutureWarning: `Series.plot()` should not be called with positional arguments, only keyword arguments. The order of positional arguments will change in the future. Use `Series.plot(kind='hist')` instead of `Series.plot('hist',)`.
after removing the cwd from sys.path.
/Users/lguillain/opt/anaconda3/envs/fdh/lib/python3.7/site-packages/ipykernel_launcher.py:6: FutureWarning: `Series.plot()` should not be called with positional arguments, only keyword arguments. The order of positional arguments will change in the future. Use `Series.plot(kind='hist')` instead of `Series.plot('hist',)`.
###Markdown
Clearly, given the name something went wrong
###Code
CM = confusion_matrix(age_gender_lables.gender_true == 'masculin',
age_gender_lables.gender_estimates_binary == 'masculin')
CM
TN = CM[0][0]
FN = CM[1][0]
TP = CM[1][1]
FP = CM[0][1]
FN
# Sensitivity, hit rate, recall, or true positive rate
TPR = TP/(TP+FN)
# Specificity or true negative rate
TNR = TN/(TN+FP)
# Precision or positive predictive value
PPV = TP/(TP+FP)
# Negative predictive value
NPV = TN/(TN+FN)
# Fall out or false positive rate
FPR = FP/(FP+TN)
# False negative rate
FNR = FN/(TP+FN)
# False discovery rate
FDR = FP/(TP+FP)
# Overall accuracy
ACC = (TP+TN)/(TP+FP+FN+TN)
ACC
###Output
_____no_output_____
###Markdown
Example of multiple matches or mismatches
###Code
font = {'family': 'serif',
'color': 'yellow',
'weight': 'normal',
'size': 16,
}
img = get_image(age_gender_lables.id[1296])
for detect in [age_gender_lables.iloc[1296]]:
gender = 'Woman' if detect['gender_estimated'] > .5 else 'Man'
plt.figure(figsize=(10, 10))
plt.text(detect['left'], detect['top']-10, str(detect['age'])[:2] + ' ' + gender, fontdict=font)
plt.imshow(cv2.rectangle(img, (int(detect['left']), int(detect['top'])), (int(detect['right']), int(detect['bottom'])), (255, 255, 0), 3))
###Output
_____no_output_____ |
_Moringa_Data_Science_Core_W8_Independent_Project_2020_07_Leah_Mbugua (1).ipynb | ###Markdown
Our dataset has no null values. However if we check on a single column there is a ? which should be converted to NAN value **Replace ? with NAN**
###Code
# Count unique elements in each column including NaN
uniqueValues = df.nunique(dropna=False)
uniqueValues
###Output
Count Unique values in each column including NaN
status 2
age 93
sex 3
on_thyroxine 2
query_on_thyroxine 2
on_antithyroid_medication 2
thyroid_surgery 2
query_hypothyroid 2
query_hyperthyroid 2
pregnant 2
sick 2
tumor 2
lithium 2
goitre 2
TSH_measured 2
TSH 240
T3_measured 2
T3 70
TT4_measured 2
TT4 269
T4U_measured 2
T4U 159
FTI_measured 2
FTI 281
TBG_measured 2
TBG 53
dtype: int64
###Markdown
* Column sex has 3 unique values, to check we print the unique values.
###Code
#For example the below column has a unique value ? which needs to be replaced
df['sex'].unique()
#Replace all rows with ? to nan
df.replace('?',np.nan,inplace=True)
df.head()
#Check for missing values
df.isnull().sum()
#Confirm that the ? value has been replaced.
df['sex'].unique()
#Checking data types of our dataset
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 3163 entries, 0 to 3162
Data columns (total 26 columns):
status 3163 non-null object
age 2717 non-null object
sex 3090 non-null object
on_thyroxine 3163 non-null object
query_on_thyroxine 3163 non-null object
on_antithyroid_medication 3163 non-null object
thyroid_surgery 3163 non-null object
query_hypothyroid 3163 non-null object
query_hyperthyroid 3163 non-null object
pregnant 3163 non-null object
sick 3163 non-null object
tumor 3163 non-null object
lithium 3163 non-null object
goitre 3163 non-null object
TSH_measured 3163 non-null object
TSH 2695 non-null object
T3_measured 3163 non-null object
T3 2468 non-null object
TT4_measured 3163 non-null object
TT4 2914 non-null object
T4U_measured 3163 non-null object
T4U 2915 non-null object
FTI_measured 3163 non-null object
FTI 2916 non-null object
TBG_measured 3163 non-null object
TBG 260 non-null object
dtypes: object(26)
memory usage: 642.6+ KB
###Markdown
There are numerical variables that need to be converted to numeric type.
###Code
num = ['age','TSH','T3','TT4','T4U','FTI']
num
categorical= ['status','sex', 'on_thyroxine', 'query_on_thyroxine','on_antithyroid_medication', 'thyroid_surgery', 'query_hypothyroid',
'query_hyperthyroid', 'pregnant', 'sick', 'tumor', 'lithium', 'goitre','TSH_measured','T3_measured','TT4_measured',
'T4U_measured','FTI_measured','TBG_measured', 'TBG']
#convert object to numerical
df[num] = df[num].apply(pd.to_numeric)
df.dtypes
# To confirm they have been converted,split numerical variables from categorical variables
numerical_variables = [col for col in df.columns if df[col].dtypes != 'O']
numerical_variables
#Get all categorical variables
categorical_variables = [col for col in df.columns if df[col].dtypes == 'O']
categorical_variables
#Check for missing values
df.isnull().sum()
#Fill missing values of numerical variables
# Use simple imputer to fill missing values with the mean
impute = SimpleImputer(strategy ='mean')
df[numerical_variables] = impute.fit_transform(df[numerical_variables])
# Fill missing values for categorical data
df['sex'].fillna(df['sex'].mode()[0], inplace=True)
df['TBG'].fillna(df['TBG'].mode()[0], inplace=True)
#Confirm there are no missing values
df.isnull().sum()
###Output
_____no_output_____
###Markdown
**Data Preprocessing**
###Code
#We define x and y
y = df['status']
y
#Change our target values(y) to a binary
y =df['status']= np.where(df['status']=='hypothyroid',0,1)
print(y)
df['status'].value_counts()
###Output
[0 0 0 ... 1 1 1]
###Markdown
* 1 means it's negative* 0 means it's hypothyroid
###Code
X = df.drop(['status'], axis=1)
X
from sklearn.model_selection import train_test_split
#Split our dataset train dataset size is 80% test datset is 20%
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
print(X_train.shape, X_test.shape)
!pip install sklearn
!pip install category_encoders
# encode categorical variables with one-hot encoding to numeric
encoder = ce.OneHotEncoder(cols=['sex','on_thyroxine','query_on_thyroxine','on_antithyroid_medication','thyroid_surgery','query_hypothyroid','query_hyperthyroid','pregnant','sick','tumor','lithium','goitre','TSH_measured','T3_measured','TT4_measured','T4U_measured','FTI_measured','TBG_measured','TBG'])
X_train = encoder.fit_transform(X_train)
X_test = encoder.transform(X_test)
print(X_train.head(4))
#Confirm there is no nan in train dataset.
np.any(np.isnan(X_train))
# Confirm there is no nan in test dataset
np.any(np.isnan(X_test))
###Output
_____no_output_____
###Markdown
**Feature scaling**Feature scaling is a method used to normalize the range of independent variables or features of data. In data processing, it is also known as data normalization.We need to normalize our independent variables. We use robust scaler to do this.
###Code
cols = X_train.columns
scaler = RobustScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
X_train = pd.DataFrame(X_train, columns=[cols])
X_test = pd.DataFrame(X_test, columns=[cols])
###Output
_____no_output_____
###Markdown
**Random Forest Classifier model with default parameters- 10 decision trees**
###Code
# Intiate the randomforestclassifier
rf = RandomForestClassifier(random_state=0)
# fit the model
rf.fit(X_train, y_train)
# Predict the Test set results
y_pred = rf.predict(X_test)
# Check accuracy score
from sklearn.metrics import accuracy_score
print('Model accuracy score with 10 decision-trees : {0:0.4f}'. format(accuracy_score(y_test, y_pred)))
#Check the error rate of the model.
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
#Compute the confusion matrix to understand the actual versus predicted variables.
from sklearn.metrics import confusion_matrix
confusion = confusion_matrix(y_test,y_pred)
confusion
###Output
_____no_output_____
###Markdown
* **Findings*** Model accuracy was 98% with an error rate of 0.14. The model predicted 602 negative and 17 hypothyroid correctly* To improve the model perfomance, we increase the number of decision trees to 100, increase the max depth and reduce sample split to 20. **Random forest classifier using 100 decision trees**
###Code
# Run the classifier with n_estimators = 100
rf1 = RandomForestClassifier(n_estimators=100, random_state=0,max_depth=5, min_samples_split = 20)
# fit the model to the training set
rf1.fit(X_train, y_train)
# Predict on the test set results
y_pred1 = rf1.predict(X_test)
# Create a comparison frame between the actual and predicted target variable
comparison_frame = pd.DataFrame({'Actual': y_test.flatten(), 'Predicted': y_pred.flatten()})
comparison_frame.describe()
# Check accuracy score
print('Model accuracy score with 100 decision-trees : {0:0.4f}'. format(accuracy_score(y_test, y_pred_100)))
#Check the error rate using root mean squared error
from sklearn import metrics
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
###Output
Root Mean Squared Error: 0.14871752967788066
###Markdown
Error rate is very low. 0.14.Shows the model is relevantly good.
###Code
# Calculate a confusion matrix to identify what patients were predicted to be negative or have hypothyroid
confusion = confusion_matrix(y_test,y_pred1)
confusion
###Output
_____no_output_____
###Markdown
* **Findings*** The model accuracy still remained the same 97% even after the number of decision trees.The error rate is very minimal : 0.14* The model predicted 602 patients were negative while in the actual they wrere negative.* The model predicted 17 patients had hypotyroid while in the actual they were.* This means the accuracy of the model is not affected by the change in decision trees.* However, we can still improve on the model using gradient boosting and see how it will perfom. **Gradient Boosting Classifier**
###Code
#Intiate the gradient boosting classifier
gradient = GradientBoostingClassifier(learning_rate =0.1,n_estimators=100,max_depth=3,min_samples_split=2) # defining my classifier as gradient
#fit the train dataset in the classifier
gradient.fit(X_train,y_train)
#Making a prediction
y_pred_g = gradient.predict(X_test)
y_pred_g
#Check the accuracy score of the gradient model
print("gradient_Accuracy score is :",metrics.accuracy_score(y_test, y_pred_g))
# Calculate a confusion matrix to identify what patients were predicted to be negative or have hypothyroid
confusion = confusion_matrix(y_test,y_pred_g)
confusion
###Output
_____no_output_____
###Markdown
* **Findings*** The accuracy score of the model increased to 98%.* The model predicted 600 patients were negative while in the actual they wrere negative.* The model predicted 23 patients had hypothyroid while in the actual they were.* In this case, we can use the gradient boosting classifier model compared to random forest as it's perfomance increased . **SVM(Support Vector Machine)**
###Code
#For this, we will create svm before parameter tuning and after parameter tuning using rbf. This is because we are solving a classifier.
# SVM before parameter tuning
#svm = SVC(kernel = 'linear',C=1.0,gamma='auto',random_state=2)
#SVM after parameter tuning. RBf is used to increase the dimension
clf = SVC(kernel = 'sigmoid',C=1.0,gamma='auto',random_state=0)
# fitting the train into the model
#svm.fit(X_train,y_train)
#svm_1.fit(X_train,y_train)
clf.fit(X_train,y_train)
# Now that we have trained our model, let's test how well it can predict if a patient is negattive or positive for hypothyroid
#Making predictions
#y_pred_svc = svm.predict(X_test)
#Making predictions with parameter tuning
y_pred1 = clf.predict(X_test)
#Check accuracy of model before setting any parameters
print("Accuracy with linear kernel:",metrics.accuracy_score(y_test, y_pred_svc))
#Accuracy score using sigmoid function
print(accuracy_score(y_test,y_pred))
###Output
Accuracy with linear kernel: 0.976303317535545
0.9652448657187994
|
.ipynb_checkpoints/Implementation of word2vec-checkpoint.ipynb | ###Markdown
Implementation of word2vec on Stanford Sentiment Treebank (SST) dataset“You shall know a word by the company it keeps” (J. R. Firth) IntroductionThis notebook is a step by step guide on implementation of word2vec skipgram on Stanford Sentiment Treebank (SST) dataset, and is the solution to coding sections of [Assignment 2](http://web.stanford.edu/class/cs224n/assignments/a2.pdf) of Stanford's ["CS224n: Natural Language Processing with Deep Learning"](http://web.stanford.edu/class/cs224n/) course. Contents of this notebook are taken from the course materials. I recommend reading the original papers [1,2] and all the course materials on the word2vec (specially this [one]( http://web.stanford.edu/class/cs224n/readings/cs224n-2019-notes01-wordvecs1.pdf) before proceeding to implementation. But if you are looking a for a shortcut, the [this link](http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/) covers all the major points in both papers. Conda environmentFirst you need to create a conda virtual environment with all the necessary packages to run the code. Run the following command from within the repo directory to create a new env named "word2vecenv":
###Code
conda env create -f env.yml
###Output
_____no_output_____
###Markdown
Activate the "word2vecenv" that you just created:
###Code
source activate word2vecenv (or conda activate depending on your OS and anaconda version)
###Output
_____no_output_____
###Markdown
Installing the IPython kernel in your env:
###Code
conda install ipykernel
ipython kernel install --user
###Output
_____no_output_____
###Markdown
Now switch your notebook's kernel to "word2vec" env. Understanding negative sampling The original word2vec paper [1] proposed "Naive softmax loss" as objective function ($J$): $- \sum^{2m}_{j=0,j \neq m} u^T_{c-m+j}v_c + 2m \log \sum_{k=1}^{|V|} \exp(u_k^T v_c) $ in which $v_c$ is the output word vector of the center word, $u_j$ is input word vector of outside word $j$, $|V|$ is the vocabulary size and $m$ is windows size. Note that everytime we update or evaluate $J$ we need to do a summation over the entire vocabulary (sum of $|V|$ terms), whihc is in order of millions and computationally huge! That's why author of the original paper came up with the idea of "Negative sampling loss" [2] to approximate the softmax normalization term (Sigma in the abvoe equation). The idea is that rather than looping over the entire vocabulary to do the summation, we generate negative samples and use them to estimate the objective function. We will use the latter in this notebook. Consider a pair $(w, c)$ of word and context. Did this pair come from the training data? Let’s denote by $P(D = 1|w, c)$ the probability that $(w, c)$ came from the corpus data. Correspondingly, $P(D = 0|w, c)$ will be the probability that $(w, c)$ did not come from the corpus data. First, let’s model $P(D = 1|w, c)$ with the sigmoid function: $P(D = 1|w, c,\theta) = \sigma (v_c^T v_w) = \frac{1}{1+exp(-v_c^T u_w)} $ and naturally if the pair did not come from the corpus, we will have: $P(D = 0|w, c,\theta) = 1 - P(D = 0|w, c) =1 - \sigma (v_c^T v_w) = 1- \frac{1}{1+exp(-v_c^T u_w)} $ For every training step, instead of looping over the entire vocabulary, we can just sample several negative examples! We "sample" from a noise distribution ($P_n(w)$) whose probabilities match the ordering of the frequency of the vocabulary. For a given center word (vector), $v_c$, and outside (context) word, $u_o$, and $K$ negative samples, $\tilde{u}_k^T$, our objective function for Skip-gram model will be: $J_{neg-sample} (v_c,u_o,U) = -\log \sigma(u^T_{o}v_c) - \sum_{k=1}^{K} \log \sigma (-\tilde{u}^T_{k}v_c) $ in which $U$ is the matrix of outside words. We will need partial derivatives of $J_{neg-sample} (v_c,u_o,U)$ wrt to $v_c$,$u_o$ and $u_k$ to for backpropagation (try to work out these derivatives from $J_{neg-sample} (v_c,u_o,U)$): $\partial J_{neg-sample} (v_c,u_o,U) / \partial v_c = -(1 - \sigma(u^T_o v_c))u_o + \sum_{k=1}^{K} (1-\sigma(-u_k^Tv_c)) u_k$ $\partial J_{neg-sample} (v_c,u_o,U) / \partial u_o = - (1- \sigma (u_o^T v_c))v_c$ $\partial J_{neg-sample} (v_c,u_o,U) / \partial u_k = (1- \sigma (-u_k^T v_c))v_c$ We will use these derivatives to implement *negSamplingLossAndGradient* function Implementation Libraries
###Code
import random
import numpy as np
from utils.treebank import StanfordSentiment
from utils.gradcheck import gradcheck_naive
from utils.utils import normalizeRows, softmax
import pickle
import matplotlib
import matplotlib.pyplot as plt
import time
import glob
import os.path as op
# Check Python Version
import sys
assert sys.version_info[0] == 3
assert sys.version_info[1] >= 5
###Output
_____no_output_____
###Markdown
Run the following command line code to fetch the "Stanford Sentiment Treebank (SST): dataset:
###Code
sh get_datasets.sh
###Output
_____no_output_____
###Markdown
Take the data for a spin! Let's take a look at the dataset first and see what's inside!
###Code
dataset.numSentences()
###Output
_____no_output_____
###Markdown
There are 11855 sentences in the dataset.
###Code
len(dataset.tokens())
###Output
_____no_output_____
###Markdown
and 19539 'tokens'. "dataset.tokens()" is mapping from tokens(words) to indices
###Code
dataset.tokens()['python']
###Output
_____no_output_____
###Markdown
That is the index of 'python' in our dictionary! 1. Naive softmax implementation Sigmoid functionGood ol' sigmoid function which we will use to calculate the loss:
###Code
def sigmoid(x):
"""
Arguments:
x -- A scalar or numpy array.
Return:
s -- sigmoid(x)
"""
sig_x=1/(1+np.exp(-x))
return sig_x
###Output
_____no_output_____
###Markdown
Negative sampler:We are going to define *getNegativeSamples* to draw random negative samples from the dataset:
###Code
def getNegativeSamples(outsideWordIdx, dataset, K):
""" Samples K indexes which are not the outsideWordIdx """
negSampleWordIndices = [None] * K
for k in range(K):
newidx = dataset.sampleTokenIdx()
while newidx == outsideWordIdx:
newidx = dataset.sampleTokenIdx()
negSampleWordIndices[k] = newidx
return negSampleWordIndices
###Output
_____no_output_____
###Markdown
Negative sampling loss and gradient:We are going to use $\partial J_{neg-sample} (v_c,u_o,U) / \partial v_c$, $\partial J_{neg-sample} (v_c,u_o,U) / \partial u_o$ and $\partial J_{neg-sample} (v_c,u_o,U) / \partial u_k$ that we derived above to implement calculate the loss and gradient:
###Code
def negSamplingLossAndGradient(
centerWordVec,
outsideWordIdx,
outsideVectors,
dataset,
K=10
):
""" Negative sampling loss function for word2vec models
"""
negSampleWordIndices = getNegativeSamples(outsideWordIdx, dataset, K)
indices = [outsideWordIdx] + negSampleWordIndices
u_ws=outsideVectors[indices,:]
u_ws[1:,:]=-u_ws[1:,:]
sigmoid_uws=sigmoid([email protected](-1,1)).squeeze()
loss= -np.log(sigmoid_uws).sum()
gradCenterVec=(sigmoid_uws[0]-1)*u_ws[0,:]
for row in range(1,u_ws.shape[0]):
gradCenterVec=gradCenterVec-(1-sigmoid_uws[row])*u_ws[row,:]
gradOutsideVecs=np.zeros(outsideVectors.shape)
gradOutsideVecs[indices[0],:]=((sigmoid_uws[0]-1)*centerWordVec).reshape(-1,)
for i,idx in enumerate(indices[1:]):
gradOutsideVecs[idx,:]=gradOutsideVecs[idx,:]+((1-sigmoid_uws[i+1])*centerWordVec).reshape(-1,)
return loss, gradCenterVec, gradOutsideVecs
###Output
_____no_output_____
###Markdown
SkipgramGiven a minibatch including a center word and a list of outside words form the dataset, we will implement the *skipgram* function to calculate the loss and gradients:
###Code
def skipgram(currentCenterWord, windowSize, outsideWords, word2Ind,
centerWordVectors, outsideVectors, dataset,
word2vecLossAndGradient=negSamplingLossAndGradient):
""" Skip-gram model
Arguments:
currentCenterWord -- a string of the current center word
windowSize -- integer, context window size
outsideWords -- list of no more than 2*windowSize strings, the outside words
word2Ind -- a dictionary that maps words to their indices in
the word vector list
centerWordVectors -- center word vectors (as rows) for all words in vocab
(V in pdf handout)
outsideVectors -- outside word vectors (as rows) for all words in vocab
(U in pdf handout)
word2vecLossAndGradient -- the loss and gradient function for
a prediction vector given the outsideWordIdx
word vectors, could be one of the two
loss functions you implemented above.
Return:
loss -- the loss function value for the skip-gram model
(J in the pdf handout)
gradCenterVecs -- the gradient with respect to the center word vectors
(dJ / dV in the pdf handout)
gradOutsideVectors -- the gradient with respect to the outside word vectors
(dJ / dU in the pdf handout)
"""
loss = 0.0
gradCenterVecs = np.zeros(centerWordVectors.shape)
gradOutsideVectors = np.zeros(outsideVectors.shape)
idx_vc=word2Ind[currentCenterWord]
idx_uws=[word2Ind[outsideWord] for outsideWord in outsideWords]
vc=centerWordVectors[idx_vc,:].reshape(-1,1)
for idx_uw in idx_uws:
loss_uw, gradCenterVec_uw, gradOutsideVecs_uw = negSamplingLossAndGradient(vc,idx_uw,outsideVectors,dataset)
loss=loss+loss_uw
gradCenterVecs[idx_vc,:]= gradCenterVecs[idx_vc,:] + gradCenterVec_uw.reshape(1,-1)
gradOutsideVectors= gradOutsideVectors + gradOutsideVecs_uw
return loss, gradCenterVecs, gradOutsideVectors
###Output
_____no_output_____
###Markdown
We also define a helper function to sequentially draw samples and perform stochastic gradient decent:
###Code
def word2vec_sgd_wrapper(batchsize,word2vecModel, word2Ind, wordVectors, dataset,
windowSize,
word2vecLossAndGradient=negSamplingLossAndGradient):
loss = 0.0
grad = np.zeros(wordVectors.shape)
N = wordVectors.shape[0]
centerWordVectors = wordVectors[:int(N/2),:]
outsideVectors = wordVectors[int(N/2):,:]
for i in range(batchsize):
windowSize1 = random.randint(1, windowSize)
centerWord, context = dataset.getRandomContext(windowSize1)
c, gin, gout = word2vecModel(
centerWord, windowSize1, context, word2Ind, centerWordVectors,
outsideVectors, dataset, word2vecLossAndGradient
)
loss += c / batchsize
grad[:int(N/2), :] += gin / batchsize
grad[int(N/2):, :] += gout / batchsize
return loss, grad
###Output
_____no_output_____
###Markdown
Stochastic Gradient Decent:Takes a function (f) and an input vector (x0) and performs gradient decent. we also define two other functions; *save_params* to save the matrix of word vectors every $n$ iterations while training and *load_saved_params* to load saved word vectors.
###Code
def save_params(iter, params):
params_file = "saved_params_%d.npy" % iter
np.save(params_file, params)
with open("saved_state_%d.pickle" % iter, "wb") as f:
pickle.dump(random.getstate(), f)
def load_saved_params():
"""
A helper function that loads previously saved parameters and resets
iteration start.
"""
st = 0
for f in glob.glob("saved_params_*.npy"):
iter = int(op.splitext(op.basename(f))[0].split("_")[2])
if (iter > st):
st = iter
if st > 0:
params_file = "saved_params_%d.npy" % st
state_file = "saved_state_%d.pickle" % st
params = np.load(params_file)
with open(state_file, "rb") as f:
state = pickle.load(f)
return st, params, state
else:
return st, None, None
def sgd(f, x0, step, iterations, PRINT_EVERY=10,SAVE_PARAMS_EVERY = 5000,ANNEAL_EVERY = 20000,useSaved=False):
""" Stochastic Gradient Descent
Implement the stochastic gradient descent method in this function.
Arguments:
f -- the function to optimize, it should take a single
argument and yield two outputs, a loss and the gradient
with respect to the arguments
x0 -- the initial point to start SGD from
step -- the step size for SGD
iterations -- total iterations to run SGD for
postprocessing -- postprocessing function for the parameters
if necessary. In the case of word2vec we will need to
normalize the word vectors to have unit length.
PRINT_EVERY -- specifies how many iterations to output loss
Return:
x -- the parameter value after SGD finishes
"""
if useSaved:
start_iter, oldx, state = load_saved_params()
if start_iter > 0:
x0 = oldx
step *= 0.5 ** (start_iter / ANNEAL_EVERY)
if state:
random.setstate(state)
else:
start_iter = 0
x=x0
exploss=0
for iter in range(start_iter + 1, iterations + 1):
loss = None
grad=0
loss,grad=f(x)
x=x-step*grad
if iter % PRINT_EVERY == 0:
if not exploss:
exploss = loss
else:
exploss = .95 * exploss + .05 * loss
print("iter %d: %f" % (iter, exploss))
if iter % SAVE_PARAMS_EVERY == 0:
save_params(iter, x)
if iter % ANNEAL_EVERY == 0:
step *= 0.5
return x
###Output
_____no_output_____
###Markdown
Showtime: Training!
###Code
random.seed(314)
dataset = StanfordSentiment()
tokens = dataset.tokens()
nWords = len(tokens)
# A 10 dimensional vector, Google's word2vec has 300 features.
dimVectors = 10
# Context size: How far away from the center word look for outside words?
C = 5
max_windowSize=C
wordVectors = np.concatenate(
((np.random.rand(nWords, dimVectors) - 0.5) /
dimVectors, np.zeros((nWords, dimVectors))),
axis=0)
random.seed(31415)
np.random.seed(9265)
startTime=time.time()
batch_size=50
wordVectors = sgd(
lambda vec: word2vec_sgd_wrapper(batch_size,skipgram, tokens, vec, dataset, C,
negSamplingLossAndGradient),
wordVectors, 0.3, 42000, PRINT_EVERY=1000,SAVE_PARAMS_EVERY = 5000,ANNEAL_EVERY = 20000,useSaved=True)
endTime=time.time()
print("Training time: %d minutes" %((endTime - startTime)/60))
###Output
_____no_output_____
###Markdown
ResultsI am going to use PCA to project word vectors onto 2D space and plot them:
###Code
wordVectors = np.concatenate(
(wordVectors[:nWords,:], wordVectors[nWords:,:]),
axis=0)
visualizeWords = [
"great", "cool", "brilliant", "wonderful", "well", "amazing",
"worth", "sweet", "enjoyable", "boring", "bad", "dumb",
"annoying", "female", "male", "queen", "king", "man", "woman", "rain", "snow",
"hail", "coffee", "tea"]
visualizeIdx = [tokens[word] for word in visualizeWords]
visualizeVecs = wordVectors[visualizeIdx, :]
temp = (visualizeVecs - np.mean(visualizeVecs, axis=0))
covariance = 1.0 / len(visualizeIdx) * temp.T.dot(temp)
U,S,V = np.linalg.svd(covariance)
coord = temp.dot(U[:,0:2])
%matplotlib inline
plt.figure()
for i in range(len(visualizeWords)):
plt.text(coord[i,0], coord[i,1], visualizeWords[i],
bbox=dict(facecolor='green', alpha=0.1))
plt.xlim((np.min(coord[:,0]), np.max(coord[:,0])))
plt.ylim((np.min(coord[:,1]), np.max(coord[:,1])))
plt.show()
###Output
_____no_output_____ |
final/Task6_knn.ipynb | ###Markdown
KNN(K最近邻分类)算法 如果有一个数据集中,有N类数据。输入没有标分类的数据集后,我们可以将预测集中的数据,和训练集的数据相比较,提取和预测数据最相似(距离最近)的K个数据,选择这K个数据中出现次数最多的标签,作为新数据的分类。 KNN算法的思想非常简洁直观:1、计算测试数据与各个训练数据之间的距离; 2、按照距离的递增关系进行排序; 3、选取距离最小的K个点; 4、确定前K个点所在类别的出现频率; 5、返回前K个点中出现频率最高的类别作为测试数据的预测分类。 KNN算法的优点:1、简单,易于实现; 2、因为找的是最近邻的数据点,因此当某些点数量稀少时,划分越准确,适合对稀有点分类; 3、使用多分类问题。 算法实现 我们利用一个案例,按照KNN算法的思想,逐步实现算法。 KNN案例:优化约会网站的配对效果项目概述海伦使用约会网站寻找约会对象。经过一段时间之后,她发现曾交往过三种类型的人:- 1:不喜欢的人- 2:魅力一般的人- 3:极具魅力的人她希望:- 不喜欢的人则直接排除掉- 工作日与魅力一般的人约会- 周末与极具魅力的人约会现在她收集到了一些约会网站未曾记录的数据信息,这更有助于匹配对象的归类。开发流程海伦把这些约会对象的数据存放在文本文件 datingTestSet2.txt 中,总共有 1000 行。海伦约会的对象主要包含以下 3 种特征:- `Col1`:每年获得的飞行常客里程数 - `Col2`:玩视频游戏所耗时间百分比 - `Col3`:每周消费的冰淇淋公升数 文本文件数据格式如下:```python40920 8.326976 0.953952 314488 7.153469 1.673904 226052 1.441871 0.805124 175136 13.147394 0.428964 138344 1.669788 0.134296 1 读取数据
###Code
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('datingTestSet2.txt',sep = '\t',header = None)
X = np.array(data.iloc[:,:-1])
y = np.array(data.iloc[:,-1])
###Output
_____no_output_____
###Markdown
切分数据我们可以直接调用sklearn的函数将数据集切分为训练集和测试集
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
###Output
_____no_output_____
###Markdown
计算测试集数据和训练集间的距离,进行分类。 我们先用最简单的思想分类:将想要预测的样本,和训练集中每个样本的特征直接相减的绝对值之和作为距离,将距离最近的训练样本的标签标记为预测样本的标签。
###Code
class KNN:
def __init__(self):
pass
def train(self,X_train,y_train):
#读取训练集
self.X_train = np.array(X_train)
self.y_train = np.array(y_train)
def predict(self,X_test):
(m,d) = np.shape(X_test) #测试集的数量和特征数
y_pred = np.zeros((m)) #将预测的标签初始化为0
for i in range(m):
distance = np.sum(np.abs(self.Xtrain - X_test[i,:]),axis = 1) #求距离的绝对之和
min_index = np.argmin(distance) #找到最近点的索引
y_pred[i] = self.y_train[min_index] #将最近点的分类给新数据标记
return y_pred
###Output
_____no_output_____
###Markdown
我们可以将这个算法称为“最近邻算法“,直接取找最近的一个数据进行分类标记,我们将这个算法扩展到K近邻算法。 可以扩展的方向:* 选择不同的距离公式* 选择不同的K值 选择不同的距离公式:上一个算法中用的距离公式为曼哈顿距离,将参数特征相减的绝对值求和,即L1距离。我们还可以用L2距离, 曼哈顿距离:$$d_1(I_1,I_2) = \sum_P|I_1^p - I_2^p|$$ 欧式距离:$$d_2(I_1,I_2) = \sum_P\sqrt{(I_1^p - I_2^p)^2}$$ 打个比方来说,当你搜索地图上的两个点,欧式距离就是将两个点用直线相连的空间距离;曼哈顿距离衡量的是你从A点开车到B点的距离,因为你不能穿过大楼和墙壁,所以衡量的是横向路线和纵向路线的的加总距离。 KNN算法中,欧式距离用的更多,因为我们一般衡量变量特征的在多维空间中的距离,这时候不需要“开车绕墙”。 如有兴趣,可自行学习其他距离公式,添加到我们后面的算法中。 选择不同的K值我们不再是选取排序后距离最近的一个训练数据打标签,而是选择距离最近的前K个训练数据,找到大多数近邻归属的类别,将预测值归为此类。 排序和计数我们可以直接调用argsort函数和Counter函数 按照以上思想,我们重新改写KNN算法:
###Code
from collections import Counter
class KNN:
def __init__(self,k=1,metric ='euclidean'): #默认距离算法为欧式距离,默认最近邻
self.metric = metric
self.k = k
def train(self,X_train,y_train):
self.X_train = np.array(X_train)
self.y_train = np.array(y_train)
def predict(self,x_test):
(m,d) = np.shape(x)#测试集的数量和特征数
y_pred = np.zeros((m))#将预测的标签初始化为0
for i in range(m):
if self.metric == 'manhattan':
distances = np.sum(np.abs(self.X_train - X_test[i,:]),axis = 1) #曼哈顿距离
if self.metric == 'euclidean':
distances = np.sqrt(np.sum(np.square(self.X_train - X_test[i,:]),axis = 1)) #欧式距离
sort = np.argsort(distances) #距离排序
top_K = [self.y_train[i] for i in sort[:self.k]] #找到K个近邻
k_counts = Counter(top_K) #对K个近邻的分类计出现频率
label = k_counts.most_common(1)[0][0] #将占大多数的那个分类标记为新数据的标签
ypred[i] = label
return ypred
###Output
_____no_output_____
###Markdown
*可能你会问,如果两个分类刚好数量相等怎么办?可以有多种方法进行处理,如随机分类,如比较两类的距离总长度,我们这里不做更多处理,按Counter函数默认给出的分类。* 选择K值 那么到底如何选择K值呢?我们可以选择在测试集中表现最好的K值。 本任务中我们直接调用sklearn中的kFold函数,将数据集进行k折验证,取每次验证的评分平均值作为此K值的误差评分。(这两个k表示的意思不一样,请留意) 如何定义测试结果的评分呢?可以直观地将分类正确的比例作为衡量指标。定义准确度的函数为:
###Code
def score(ypred,ytest):
return sum(ypred == ytest)/len(ytest)
###Output
_____no_output_____
###Markdown
将我们自己撰写的分类器中添加评分函数,这就是一个相对完整的分类器了,我们可以将他和sklearn的结果做比较
###Code
from collections import Counter
class KNN:
def __init__(self,k,metric ='euclidean'):
pass
self.metric = metric
self.k = k
def train(self,X,y):
self.X_train = np.array(X)
self.y_train = np.array(y)
def predict(self,x_test):
x = np.array(x_test)
(m,d) = np.shape(x)
ypred = np.zeros((m))
for i in range(m):
if self.metric == 'manhattan':
distances = np.sum(np.abs(self.X_train - x[i,:]),axis = 1)
if self.metric == 'euclidean':
distances = np.sqrt(np.sum(np.square(self.X_train - x[i,:]),axis = 1))
nearest = np.argsort(distances)
#print(len(nearest))
top_K = [self.y_train[i] for i in nearest[:self.k]]
votes = Counter(top_K)
label = votes.most_common(1)[0][0]
#min_index = np.argmin(distance)
#ypred[i] = self.ytrain[min_index]
ypred[i] = label
return ypred
def score(self,ypred,ytest):
return sum(ypred == ytest)/len(ytest)
###Output
_____no_output_____
###Markdown
和sklearn的KNeighborsClassifier算法做比较
###Code
#数据标准化
from sklearn.preprocessing import StandardScaler
ss = StandardScaler()
X_ = ss.fit(X)
X_std =ss.transform(X)
from sklearn.model_selection import cross_val_score
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsClassifier
k_range = range(1, 31)
k_error = []
#循环,取k=1到k=31,查看误差效果
for k in k_range:
knn = KNeighborsClassifier(n_neighbors=k)
#cv参数决定数据集划分比例,这里是按照5:1划分训练集和测试集
scores = cross_val_score(knn, X_std, y, cv=5, scoring='accuracy')
k_error.append(1 - scores.mean())
#画图,x轴为k值,y值为误差值
plt.plot(k_range, k_error)
plt.xlabel('Value of K for KNN')
plt.ylabel('Error')
plt.show()
###Output
_____no_output_____
###Markdown
用我们自己撰写的K近邻算法测试数据,用同样的作图法输出每个K值的误差结果。
###Code
from sklearn.model_selection import KFold
kf = KFold(n_splits=5,shuffle=False) #将数据集分为互斥的5等份,用作测试
k_errors = [] #建立初始的误差列表
for k in k_range:
knn = KNN(k=k)
scores = []
for train , test in kf.split(X_std,y):
knn.train(X_std[train],y[train])
ypred = knn.predict(X_std[test])
score = knn.score(ypred,y[test])
scores.append(1-score)
k_errors.append(np.mean(scores))
plt.plot(k_range, k_errors)
plt.xlabel('Value of K for KNN')
plt.ylabel('Error')
plt.show()
###Output
_____no_output_____
###Markdown
观察到,算法在$k=21$的时候表现良好,取K值为21,来预测一个新数据
###Code
knn = KNN(k=21)
knn.train(X_std,y)
# 定义类别对应的标签
resultList = ['不喜欢的人', '魅力一般的人', '极具魅力的人']
#输入数据
ffMiles = float(input("每年获得的飞行常客里程数?"))
percentTats = float(input("玩视频游戏所耗时间百分比?"))
iceCream = float(input("每周消费的冰淇淋公升数?"))
inArr = np.array([[ffMiles, percentTats, iceCream]])
#用之前的fit的标准化数据来转换数据
x_new = ss.transform(inArr)
#预测数据
ypred = knn.predict(x_new)
print("这个人属于: ", resultList[int(ypred) - 1])
###Output
每年获得的飞行常客里程数?38300
玩视频游戏所耗时间百分比?1.6
每周消费的冰淇淋公升数?.13
这个人属于: 不喜欢的人
|
docs/notebooks/link_two_dataframes.ipynb | ###Markdown
Link two datasets IntroductionThis example shows how two datasets with data about persons can be linked. We will try to link the data based on attributes like first name, surname, sex, date of birth, place and address. The data used in this example is part of [Febrl](https://sourceforge.net/projects/febrl/) and is fictitious. First, start with importing the ``recordlinkage`` module. The submodule ``recordlinkage.datasets`` contains several datasets that can be used for testing. For this example, we use the Febrl datasets 4A and 4B. These datasets can be loaded with the function ``load_febrl4``.
###Code
%precision 5
from __future__ import print_function
import pandas as pd
pd.set_option('precision',5)
pd.options.display.max_rows = 10
import recordlinkage
from recordlinkage.datasets import load_febrl4
###Output
_____no_output_____
###Markdown
The datasets are loaded with the following code. The returned datasets are of type ``pandas.DataFrame``. This makes it easy to manipulate the data if desired. For details about data manipulation with ``pandas``, see their comprehensive documentation http://pandas.pydata.org/.
###Code
dfA, dfB = load_febrl4()
dfA
###Output
_____no_output_____
###Markdown
Make record pairs It is very intuitive to compare each record in DataFrame ``dfA`` with all records of DataFrame ``dfB``. In fact, we want to make record pairs. Each record pair should contain one record of ``dfA`` and one record of ``dfB``. This process of making record pairs is also called 'indexing'. With the ``recordlinkage`` module, indexing is easy. First, load the ``index.Index`` class and call the `.full` method. This object generates a full index on a ``.index(...)`` call. In case of deduplication of a single dataframe, one dataframe is sufficient as argument.
###Code
indexer = recordlinkage.Index()
indexer.full()
pairs = indexer.index(dfA, dfB)
###Output
WARNING:recordlinkage:indexing - performance warning - A full index can result in large number of record pairs.
###Markdown
With the method ``index``, all possible (and unique) record pairs are made. The method returns a ``pandas.MultiIndex``. The number of pairs is equal to the number of records in ``dfA`` times the number of records in ``dfB``.
###Code
print (len(dfA), len(dfB), len(pairs))
###Output
5000 5000 25000000
###Markdown
Many of these record pairs do not belong to the same person. In case of one-to-one matching, the number of matches should be no more than the number of records in the smallest dataframe. In case of full indexing, ``min(len(dfA), len(N_dfB))`` is much smaller than ``len(pairs)``. The ``recordlinkage`` module has some more advanced indexing methods to reduce the number of record pairs. Obvious non-matches are left out of the index. Note that if a matching record pair is not included in the index, it can not be matched anymore. One of the most well known indexing methods is named *blocking*. This method includes only record pairs that are identical on one or more stored attributes of the person (or entity in general). The blocking method can be used in the ``recordlinkage`` module.
###Code
indexer = recordlinkage.Index()
indexer.block('given_name')
candidate_links = indexer.index(dfA, dfB)
print (len(candidate_links))
###Output
77249
###Markdown
The argument 'given_name' is the blocking variable. This variable has to be the name of a column in ``dfA`` and ``dfB``. It is possible to parse a list of columns names to block on multiple variables. Blocking on multiple variables will reduce the number of record pairs even further. Another implemented indexing method is *Sorted Neighbourhood Indexing* (``recordlinkage.index.SortedNeighbourhood``). This method is very useful when there are many misspellings in the string were used for indexing. In fact, sorted neighbourhood indexing is a generalisation of blocking. See the documentation for details about sorted neighbourd indexing. Compare records Each record pair is a candidate match. To classify the candidate record pairs into matches and non-matches, compare the records on all attributes both records have in common. The ``recordlinkage`` module has a class named ``Compare``. This class is used to compare the records. The following code shows how to compare attributes.
###Code
# This cell can take some time to compute.
compare_cl = recordlinkage.Compare()
compare_cl.exact('given_name', 'given_name', label='given_name')
compare_cl.string('surname', 'surname', method='jarowinkler', threshold=0.85, label='surname')
compare_cl.exact('date_of_birth', 'date_of_birth', label='date_of_birth')
compare_cl.exact('suburb', 'suburb', label='suburb')
compare_cl.exact('state', 'state', label='state')
compare_cl.string('address_1', 'address_1', threshold=0.85, label='address_1')
features = compare_cl.compute(candidate_links, dfA, dfB)
###Output
_____no_output_____
###Markdown
The comparing of record pairs starts when the ``compute`` method is called. All attribute comparisons are stored in a DataFrame with horizontally the features and vertically the record pairs.
###Code
features
features.describe()
###Output
_____no_output_____
###Markdown
The last step is to decide which records belong to the same person. In this example, we keep it simple:
###Code
# Sum the comparison results.
features.sum(axis=1).value_counts().sort_index(ascending=False)
features[features.sum(axis=1) > 3]
###Output
_____no_output_____
###Markdown
Full code
###Code
import recordlinkage
from recordlinkage.datasets import load_febrl4
dfA, dfB = load_febrl4()
# Indexation step
indexer = recordlinkage.Index()
indexer.block('given_name')
candidate_links = indexer.index(dfA, dfB)
# Comparison step
compare_cl = recordlinkage.Compare()
compare_cl.exact('given_name', 'given_name', label='given_name')
compare_cl.string('surname', 'surname', method='jarowinkler', threshold=0.85, label='surname')
compare_cl.exact('date_of_birth', 'date_of_birth', label='date_of_birth')
compare_cl.exact('suburb', 'suburb', label='suburb')
compare_cl.exact('state', 'state', label='state')
compare_cl.string('address_1', 'address_1', threshold=0.85, label='address_1')
features = compare_cl.compute(candidate_links, dfA, dfB)
# Classification step
matches = features[features.sum(axis=1) > 3]
print(len(matches))
###Output
3241
|
qiskit/advanced/terra/programming_with_pulses/gathering_system_information.ipynb | ###Markdown
 Obtaining information about your `backend` _Note: All the attributes of the backend are described in detail in the [Qiskit Backend Specifications](https://arxiv.org/pdf/1809.03452.pdf). This page reviews a subset of the spec._Programming a quantum computer at the microwave pulse level requires more information about the device than is required at the circuit level. A quantum circuit is built for an abstract quantum computer -- it will yield the same quantum state on any quantum computer (except for varying performance levels). A pulse schedule, on the other hand, is so specific to the device, that running one program on two different backends is not expected to have the same result, even on perfectly noiseless systems.As a basic example, imagine a drive pulse `q0_X180` calibrated on qubit 0 to enact an $X180$ pulse, which flips the state of qubit 0. If we use the samples from that pulse on qubit 1 on the same device, or qubit 0 on another device, we do not know what the resulting state will be -- but we can be pretty sure it won't be an $X180$ operation. The qubits are each unique, with various drive coupling strengths. If we have specified a frequency for the drive pulse, it's very probable that pulse would have little effect on another qubit, which has its own resonant frequency.With that, we have motivated why information from the backend may be very useful at times for building Pulse schedules. The information included in a `backend` is broken into three main parts: - **Configuration**: static backend features - **Properties**: measured and reported backend characteristics - **Defaults**: default settings for the OpenPulse-enabled backend which are each covered in the following sections. While all three of these contain interesting data for Pulse users, the defaults are _only_ provided for backends enabled with OpenPulse.The first thing you'll need to do is grab a backend to inspect. Here we use a mocked backend that contains a snapshot of data from the real OpenPulse-enabled backend.
###Code
from qiskit.test.mock import FakeAlmaden
backend = FakeAlmaden()
###Output
_____no_output_____
###Markdown
ConfigurationThe configuration is where you'll find data about the static setup of the device, such as its name, version, the number of qubits, and the types of features it supports.Let's build a description of our backend using information from the `backend`'s config.
###Code
config = backend.configuration()
# Basic Features
print("This backend is called {0}, and is on version {1}. It has {2} qubit{3}. It "
"{4} OpenPulse programs. The basis gates supported on this device are {5}."
"".format(config.backend_name,
config.backend_version,
config.n_qubits,
'' if config.n_qubits == 1 else 's',
'supports' if config.open_pulse else 'does not support',
config.basis_gates))
###Output
This backend is called fake_almaden, and is on version 1.2.4. It has 20 qubits. It supports OpenPulse programs. The basis gates supported on this device are ['u1', 'u2', 'u3', 'cx', 'id'].
###Markdown
Neat! All of the above configuration is available for any backend, whether enabled with OpenPulse or not, although it is not an exhaustive list. There are additional attributes available on Pulse backends. Let's go into a bit more detail with those.The **timescale**, `dt`, is backend dependent. Think of this as the inverse sampling rate of the control rack's arbitrary waveform generators. Each sample point and duration in a Pulse `Schedule` is given in units of this timescale.
###Code
config.dt # units of seconds
###Output
/Users/[email protected]/code/qiskit-terra/qiskit/providers/models/backendconfiguration.py:355: UserWarning: `dt` and `dtm` now have units of seconds(s) rather than nanoseconds(ns).
warnings.warn('`dt` and `dtm` now have units of seconds(s) rather '
###Markdown
The configuration also provides information that is useful for building measurements. Pulse supports three measurement levels: `0: RAW`, `1: KERNELED`, and `2: DISCRIMINATED`. The `meas_levels` attribute tells us which of those are supported by this backend. To learn how to execute programs with these different levels, see this page -- COMING SOON.
###Code
config.meas_levels
###Output
_____no_output_____
###Markdown
For backends which support measurement level 0, the sampling rate of the control rack's analog-to-digital converters (ADCs) also becomes relevant. The configuration also has this info, where `dtm` is the time per sample returned:
###Code
config.dtm
###Output
_____no_output_____
###Markdown
The measurement map, explained in detail on [this page COMING SOON], is also found here.
###Code
config.meas_map
###Output
_____no_output_____
###Markdown
The configuration also supplies convenient methods for getting channels for your schedule programs. For instance:
###Code
config.drive(0)
config.measure(0)
config.acquire(0)
###Output
_____no_output_____
###Markdown
It is a matter of style and personal preference whether you use `config.drive(0)` or `DriveChannel(0)`. PropertiesThe `backend` properties contain data that was measured and optionally reported by the provider. Let's see what kind of information is reported for qubit 0.
###Code
props = backend.properties()
def describe_qubit(qubit, properties):
"""Print a string describing some of reported properties of the given qubit."""
# Conversion factors from standard SI units
us = 1e6
ns = 1e9
GHz = 1e-9
print("Qubit {0} has a \n"
" - T1 time of {1} microseconds\n"
" - T2 time of {2} microseconds\n"
" - U2 gate error of {3}\n"
" - U2 gate duration of {4} nanoseconds\n"
" - resonant frequency of {5} GHz".format(
qubit,
properties.t1(qubit) * us,
properties.t2(qubit) * us,
properties.gate_error('u2', qubit),
properties.gate_length('u2', qubit) * ns,
properties.frequency(qubit) * GHz))
describe_qubit(0, props)
###Output
Qubit 0 has a
- T1 time of 113.3795751321217 microseconds
- T2 time of 150.2847720544259 microseconds
- U2 gate error of 0.0005295247303964942
- U2 gate duration of 35.555555555555564 nanoseconds
- resonant frequency of 4.8572819835984875 GHz
###Markdown
Properties are not guaranteed to be reported, but backends without Pulse access typically also provide this data. DefaultsUnlike the other two sections, `PulseDefaults` are only available for Pulse-enabled backends. It contains the default program settings run on the device.
###Code
defaults = backend.defaults()
###Output
_____no_output_____
###Markdown
Drive frequenciesDefaults contains the default frequency settings for the drive and measurement signal channels:
###Code
q0_freq = defaults.qubit_freq_est[0] # Hz
q0_meas_freq = defaults.meas_freq_est[0] # Hz
GHz = 1e-9
print("DriveChannel(0) defaults to a modulation frequency of {} GHz.".format(q0_freq * GHz))
print("MeasureChannel(0) defaults to a modulation frequency of {} GHz.".format(q0_meas_freq * GHz))
###Output
DriveChannel(0) defaults to a modulation frequency of 4.857219891603379 GHz.
MeasureChannel(0) defaults to a modulation frequency of 7.264856891000001 GHz.
###Markdown
Pulse Schedule definitions for QuantumCircuit instructionsFinally, one of the most important aspects of the `backend` for `Schedule` building is the `InstructionScheduleMap`. This is a basic mapping from a circuit operation's name and qubit to the default pulse-level implementation of that instruction.
###Code
inst_map = defaults.instruction_schedule_map
print(inst_map)
###Output
<InstructionScheduleMap(1Q instructions:
q0: {'MEAS', 'x', 'u2', 'u3', 'id', 'u1'}
q1: {'MEAS', 'x', 'u2', 'u3', 'id', 'u1'}
q2: {'MEAS', 'x', 'u2', 'u3', 'id', 'u1'}
q3: {'MEAS', 'x', 'u2', 'u3', 'id', 'u1'}
q4: {'MEAS', 'x', 'u2', 'u3', 'id', 'u1'}
q5: {'MEAS', 'x', 'u2', 'u3', 'id', 'u1'}
q6: {'MEAS', 'x', 'u2', 'u3', 'id', 'u1'}
q7: {'MEAS', 'x', 'u2', 'u3', 'id', 'u1'}
q8: {'MEAS', 'x', 'u2', 'u3', 'id', 'u1'}
q9: {'MEAS', 'x', 'u2', 'u3', 'id', 'u1'}
q10: {'MEAS', 'x', 'u2', 'u3', 'id', 'u1'}
q11: {'MEAS', 'x', 'u2', 'u3', 'id', 'u1'}
q12: {'MEAS', 'x', 'u2', 'u3', 'id', 'u1'}
q13: {'MEAS', 'x', 'u2', 'u3', 'id', 'u1'}
q14: {'MEAS', 'x', 'u2', 'u3', 'id', 'u1'}
q15: {'MEAS', 'x', 'u2', 'u3', 'id', 'u1'}
q16: {'MEAS', 'x', 'u2', 'u3', 'id', 'u1'}
q17: {'MEAS', 'x', 'u2', 'u3', 'id', 'u1'}
q18: {'MEAS', 'x', 'u2', 'u3', 'id', 'u1'}
q19: {'MEAS', 'x', 'u2', 'u3', 'id', 'u1'}
Multi qubit instructions:
(0, 1): {'cx'}
(1, 0): {'cx'}
(1, 2): {'cx'}
(1, 6): {'cx'}
(2, 1): {'cx'}
(2, 3): {'cx'}
(3, 2): {'cx'}
(3, 4): {'cx'}
(3, 8): {'cx'}
(4, 3): {'cx'}
(5, 6): {'cx'}
(5, 10): {'cx'}
(6, 1): {'cx'}
(6, 5): {'cx'}
(6, 7): {'cx'}
(7, 6): {'cx'}
(7, 8): {'cx'}
(7, 12): {'cx'}
(8, 3): {'cx'}
(8, 7): {'cx'}
(8, 9): {'cx'}
(9, 8): {'cx'}
(9, 14): {'cx'}
(10, 5): {'cx'}
(10, 11): {'cx'}
(11, 10): {'cx'}
(11, 12): {'cx'}
(11, 16): {'cx'}
(12, 7): {'cx'}
(12, 11): {'cx'}
(12, 13): {'cx'}
(13, 12): {'cx'}
(13, 14): {'cx'}
(13, 18): {'cx'}
(14, 9): {'cx'}
(14, 13): {'cx'}
(15, 16): {'cx'}
(16, 11): {'cx'}
(16, 15): {'cx'}
(16, 17): {'cx'}
(17, 16): {'cx'}
(17, 18): {'cx'}
(18, 13): {'cx'}
(18, 17): {'cx'}
(18, 19): {'cx'}
(19, 18): {'cx'}
(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19): {'measure'}
)>
###Markdown
Rather than build a measurement schedule from scratch, let's see what was calibrated by the backend to measure the qubits on this device:
###Code
measure_schedule = inst_map.get('measure', [q for q in range(config.n_qubits)])
measure_schedule.draw()
###Output
_____no_output_____
###Markdown
This can easily be appended to your own Pulse `Schedule` (`sched += inst_map.get('measure', ) << sched.duration`)!Likewise, each qubit will have a `Schedule` defined for each basis gate, and they can be appended directly to any `Schedule` you build.
###Code
# You can use `has` to see if an operation is defined. Ex: Does qubit 3 have an x gate defined?
inst_map.has('x', 3)
# Some circuit operations take parameters. U1 takes a rotation angle:
inst_map.get('u1', 0, P0=3.1415)
###Output
_____no_output_____
###Markdown
While building your schedule, you can also use `inst_map.add(name, qubits, schedule)` to store useful `Schedule`s that you've made yourself.
###Code
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
###Output
_____no_output_____ |
04. Data Analysis/review_analysis/reviews.ipynb | ###Markdown
1. Overview of the Dataframe
###Code
import pandas
from datetime import datetime
from pytz import utc
data = pandas.read_csv("reviews.csv", parse_dates= ["Timestamp"])
data.head()
data.shape
data.columns
data.hist("Rating")
###Output
_____no_output_____
###Markdown
2. Selecting data from the dataframe Selecting a column
###Code
data["Rating"]
###Output
_____no_output_____
###Markdown
Selecting multiple columns
###Code
data[["Course Name", "Rating"]]
###Output
_____no_output_____
###Markdown
Selecting a Row
###Code
data.iloc[3]
###Output
_____no_output_____
###Markdown
Selecting multiple rows
###Code
data.iloc[1:3]
###Output
_____no_output_____
###Markdown
Selecting a Cross Section
###Code
data[["Course Name", "Rating"]].iloc[1:3]
###Output
_____no_output_____
###Markdown
Selecting a particular cell
###Code
data["Timestamp"].iloc[2]
###Output
_____no_output_____
###Markdown
3. Filtering Data Based On Conditions One Condition
###Code
data[data["Rating"] > 4]
len(data[data["Rating"] > 4])
data[data["Rating"] > 4].count()
ratingFiltered = data[data["Rating"] > 4]
ratingFiltered["Rating"]
ratingFiltered["Rating"].mean()
###Output
_____no_output_____
###Markdown
Multiple conditions
###Code
data[( data["Rating"] > 4 ) & (data["Course Name"] == "Python for Beginners with Examples")]
dualCondition = data[( data["Rating"] > 4 ) & (data["Course Name"] == "Python for Beginners with Examples")]
dualCondition["Rating"].mean()
###Output
_____no_output_____
###Markdown
4. Time Based Filtering
###Code
data[ (data["Timestamp"] >= datetime(2020,7,1, tzinfo =utc)) & (data["Timestamp"] <= datetime(2020,12,31, tzinfo = utc)) ]
# You need to parse the dataframe Timestamp columns as dates and interpret the datetime ranges with the same Timezone
# as the Timestamps
###Output
_____no_output_____
###Markdown
5. From data to information Average of Rating of All Courses
###Code
data["Rating"].mean()
###Output
_____no_output_____
###Markdown
Average Rating for a particular course
###Code
data[(data["Course Name"] == "Python for Beginners with Examples")]["Rating"].mean()
###Output
_____no_output_____
###Markdown
Average Rating for a particular period
###Code
data[ (data["Timestamp"] >= datetime(2020,7,1, tzinfo =utc)) & (data["Timestamp"] <= datetime(2020,12,31, tzinfo = utc)) ]["Rating"].mean()
###Output
_____no_output_____
###Markdown
Average Rating for a particular course and period
###Code
df1 = data[ (data["Timestamp"] >= datetime(2020,7,1, tzinfo =utc)) & (data["Timestamp"] <= datetime(2020,12,31, tzinfo = utc))]
df1[df1["Course Name"] == "Python for Beginners with Examples"]["Rating"].mean()
###Output
_____no_output_____
###Markdown
Average of Commented Ratings
###Code
data[data["Comment"].isnull()]["Rating"].mean()
###Output
_____no_output_____
###Markdown
Average of Commented Ratings
###Code
data[data["Comment"].notnull()]["Rating"].mean()
###Output
_____no_output_____
###Markdown
Number of Uncommented Ratings
###Code
data[data["Comment"].isnull()]["Rating"].count()
###Output
_____no_output_____
###Markdown
Number of Commented Ratings
###Code
data[data["Comment"].notnull()]["Rating"].count()
###Output
_____no_output_____
###Markdown
Number of Comments Containing a Certain Word
###Code
data[(data["Comment"].str.contains("accent", na = False))]["Comment"].count()
data[(data["Comment"].str.contains("accent", na = False))]["Rating"].mean()
###Output
_____no_output_____ |
Analyzing_Heart_Disease.ipynb | ###Markdown
Analyzing Heart DiseaseHello! I'll be exploring the [heart disease dataset](https://archive.ics.uci.edu/ml/datasets/heart+Disease) provided by the University of California, Irvine. The database that this set came from contains 76 attributes, but the set itself only contains 14.AcknowledgementsCreators:Hungarian Institute of Cardiology. Budapest: Andras Janosi, M.D.University Hospital, Zurich, Switzerland: William Steinbrunn, M.D.University Hospital, Basel, Switzerland: Matthias Pfisterer, M.D.V.A. Medical Center, Long Beach and Cleveland Clinic Foundation: Robert Detrano, M.D., Ph.D.Donor:David W. Aha (aha '@' ics.uci.edu) (714) 856-8779The Attributes1. Age2. Sex1 = male0 = female3. Chest pain (CP)Value 0: asymptomaticValue 1: atypical anginaValue 2: non-anginal painValue 3: typical angina4. trestbpsResting blood pressure (in mm Hg on admission to the hospital)5. cholSerum cholestorol in mg/dl6. fbs (Fasting blood sugar)(fasting blood sugar > 120 mg/dl) (1 = true; 0 = false)7. restecg - Resting electrocardiographic results8. thalach - Maximum heart rate achieved9. exang - Exercise induced angina (1= Yes, 0 = No)10. oldpeak - ST depression induced by exercise relative to rest11. slope - The slope of the peak exercise ST segmenti: Upslopingii: Flatiii: Downsloping12. ca (coloured arteries) - Number of major vessels (0-3) colored by flourosopy13. thal - 3 = normal; 6 = fixed defect; 7 = reversable defect14. target - 0 = Heart disease present, 1 = Heart disease absentObjective1. Find any correlations between attributes2. Find correlations between each attribute and the diagnosis of heart diseaseLet's Begin!
###Code
#the usual...
import numpy as np
import pandas as pd
import scipy.stats # Needed to compute statistics for categorical data (yep I'm using my AP Stats skills!)
import matplotlib.pyplot as plt
import seaborn as sns
sns.set() # Making sns as default for plots
data = pd.read_csv('./drive/My Drive/heart.csv') #for some reason "from google.colab import files" isn't working for me...
data.head()
data.shape
data.isnull().sum()
###Output
_____no_output_____
###Markdown
Yay! No NaN or null values!Time for Pairplot
###Code
g = sns.pairplot(data)
g.fig.suptitle('Pair plot', fontsize = 20)
g.fig.subplots_adjust(top= 0.9);
###Output
_____no_output_____
###Markdown
Correlation Matrix
###Code
plt.figure(figsize=(15,10))
corrMatrix = data.corr()
sns.heatmap(corrMatrix, annot=True)
plt.show()
###Output
_____no_output_____
###Markdown
Correlation between age and heart disease
###Code
# Look into distribution by plotting a histogram
plt.figure(figsize=(10,4))
plt.legend(loc='upper left')
g = sns.countplot(data = data, x = 'age', hue = 'target')
g.legend(title = 'Heart disease patient?', loc='center left', bbox_to_anchor=(1.25, 0.5), ncol=1)
###Output
No handles with labels found to put in legend.
###Markdown
Seems like heart disease patients are clustered around the ages of late 50's and 60's
###Code
# Heart disease patients
age_corr = ['age', 'target']
age_corr1 = data[age_corr]
age_corr_y = data[age_corr1['target'] == 0].groupby(['age']).size().reset_index(name = 'count')
age_corr_y.corr()
# Healthy patients
age_corr_n = age_corr1[age_corr1['target'] == 1].groupby(['age']).size().reset_index(name = 'count')
age_corr_n.corr()
###Output
_____no_output_____
###Markdown
High correlation between heart disease patients and age. It seems like age is the precursor of heart disease. Correlation between heart disease patients and sex
###Code
# Look into distribution by plotting a histogram
plt.figure(figsize=(10,4))
plt.legend(loc='upper left')
g = sns.countplot(data = data, x = 'sex', hue = 'target')
g.legend(title = 'Heart disease patient?', loc='center left', bbox_to_anchor=(1.25, 0.5), ncol=1)
###Output
No handles with labels found to put in legend.
###Markdown
**Where 1 is male, and 0 is female
###Code
sex_corr = ['sex', 'target']
sex_corr1 = data[sex_corr]
sex_corr_y = data[sex_corr1['target'] == 0].groupby(['sex']).size().reset_index(name = 'count')
sex_corr_y.corr()
sex_corr_n = sex_corr1[sex_corr1['target'] == 1].groupby(['sex']).size().reset_index(name = 'count')
sex_corr_n.corr()
###Output
_____no_output_____
###Markdown
Chi-square testSex is a categorical variable. Target, which tells us whether the patient has heart disease or not, is also a categorical variable. To compute the correlation between two categorical data, we will need to use Chi-Square test. We will be using 95% confidence interval (95% chance that the confidence interval I calculated contains the true population mean).The null hypothesis is that they are independent.The alternative hypothesis is that they are correlated in some way.
###Code
cont = pd.crosstab(data["sex"],data["target"])
scipy.stats.chi2_contingency(cont)
###Output
_____no_output_____
###Markdown
I performed the test and obtained a p-value < 0.05 and I can reject the hypothesis of independence. So is there truly a correlation between sex and heart disease? Well, I can't really accept this result here mainly for one reason. The data for healthy female is too low. I only have 24 female individuals that are healthy. If I were to push the number up to, let's say 94, I will get a much higher p-value. Hence, I feel that there is no point in performing a correlation analysis if the difference between the test samples are too high. Correlation between chest pain and heart disease
###Code
# Chi-square test
cont1 = pd.crosstab(data["cp"],data["target"])
scipy.stats.chi2_contingency(cont1)
###Output
_____no_output_____
###Markdown
Seems like chest pain is correlated to heart disease. Correlation between resting blood pressure and heart disease
###Code
restbp_corr = ['trestbps', 'target']
restbp_corr1 = data[restbp_corr]
restbp_corr_y = restbp_corr1[restbp_corr1['target'] == 0].groupby(['trestbps']).size().reset_index(name = 'count')
restbp_corr_y.corr()
restbp_corr_n = restbp_corr1[restbp_corr1['target'] == 1].groupby(['trestbps']).size().reset_index(name = 'count')
restbp_corr_n.corr()
###Output
_____no_output_____
###Markdown
This shows that heart disease is correlated to resting blood pressure. If we look back into the Pairplot, we will see that heart disease patients have slightly higher resting blood pressure as compared to healthy patients. Correlation between serum cholesterol and heart diseaseHere, I am rounding the cholesterol value to the tenth place. If I dont do that I'll get tons of count = 1. This will affect the correlation test.
###Code
# Showing number of heart disease patients based on serum cholesterol
chol_corr = ['chol', 'target']
chol_corr1 = data[chol_corr]
chol_corr2 = chol_corr1.copy()
chol_corr2.chol = chol_corr2.chol.round(decimals=-1)
chol_corr_y = chol_corr2[chol_corr2['target'] == 0].groupby(['chol']).size().reset_index(name = 'count')
chol_corr_y.corr()
# Showing number of healthy patients based on serum cholesterol
chol_corr_n = chol_corr1[chol_corr1['target'] == 1].groupby(['chol']).size().reset_index(name = 'count')
chol_corr_n.corr()
###Output
_____no_output_____
###Markdown
No strong correlation between serum cholesterol and heart disease. Correlation between ECG results and heart diseaseValue 0: showing probable or definite left ventricular hypertrophy by Estes' criteriaValue 1: normalValue 2: having ST-T wave abnormality (T wave inversions and/or ST elevation or depression of > 0.05 mV)
###Code
# Showing number of heart disease patients based on resting ECG results
restecg_corr = ['restecg', 'target']
restecg_corr1 = data[restecg_corr]
restecg_corr_y = restecg_corr1[restecg_corr1['target'] == 0].groupby(['restecg']).size().reset_index(name = 'count')
restecg_corr_y
# Showing number of healthy patients based on resting ECG results
restecg_corr_n = restecg_corr1[restecg_corr1['target'] == 1].groupby(['restecg']).size().reset_index(name = 'count')
restecg_corr_n
# Chi-square test
cont4 = pd.crosstab(data["restecg"],data["target"])
scipy.stats.chi2_contingency(cont4)
###Output
_____no_output_____
###Markdown
I obtained a p-value of 0.00666. This shows that there is a correlation between the various types of ECG results and heart disease. I do see a huge difference normal ECG between healthy and heart disease patients. Correlation between maximum heart rate and heart disease
###Code
# Showing number of heart disease patients based on maximum heart rate
heartrate_corr = ['thalach', 'target']
heartrate_corr1 = data[heartrate_corr]
heartrate_corr_y = heartrate_corr1[heartrate_corr1['target'] == 0].groupby(['thalach']).size().reset_index(name = 'count')
heartrate_corr_y.corr()
heartrate_corr_n = heartrate_corr1[heartrate_corr1['target'] == 1].groupby(['thalach']).size().reset_index(name = 'count')
heartrate_corr_n.corr()
###Output
_____no_output_____ |
labs/lab_10_Moreno.ipynb | ###Markdown
MAT281 - Laboratorio N°10 I.- Problema 01El **cáncer de mama** es una proliferación maligna de las células epiteliales que revisten los conductos o lobulillos mamarios. Es una enfermedad clonal; donde una célula individual producto de una serie de mutaciones somáticas o de línea germinal adquiere la capacidad de dividirse sin control ni orden, haciendo que se reproduzca hasta formar un tumor. El tumor resultante, que comienza como anomalía leve, pasa a ser grave, invade tejidos vecinos y, finalmente, se propaga a otras partes del cuerpo.El conjunto de datos se denomina `BC.csv`, el cual contine la información de distintos pacientes con tumosres (benignos o malignos) y algunas características del mismo.Las características se calculan a partir de una imagen digitalizada de un aspirado con aguja fina (FNA) de una masa mamaria. Describen las características de los núcleos celulares presentes en la imagen.Los detalles se puede encontrar en [K. P. Bennett and O. L. Mangasarian: "Robust Linear Programming Discrimination of Two Linearly Inseparable Sets", Optimization Methods and Software 1, 1992, 23-34].Lo primero será cargar el conjunto de datos:
###Code
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.datasets import load_digits
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
from sklearn.dummy import DummyClassifier
from sklearn.cluster import KMeans
%matplotlib inline
sns.set_palette("deep", desat=.6)
sns.set(rc={'figure.figsize':(11.7,8.27)})
# cargar datos
df = pd.read_csv(os.path.join("data","BC.csv"), sep=",")
df['diagnosis'] = df['diagnosis'] .replace({'M':1,'B':0}) # target
df.head()
###Output
_____no_output_____
###Markdown
Basado en la información presentada responda las siguientes preguntas:1. Realice un análisis exploratorio del conjunto de datos.1. Normalizar las variables numéricas con el método **StandardScaler**.3. Realizar un método de reducción de dimensionalidad visto en clases.4. Aplique al menos tres modelos de clasificación distintos. Para cada uno de los modelos escogidos, realice una optimización de los hiperparámetros. además, calcule las respectivas métricas. Concluya. Análisis exploratorio del conjunto de datos:
###Code
print('----------------------')
print('Media de cada variable')
print('----------------------')
df.mean(axis=0)
print('-------------------------')
print('Varianza de cada variable')
print('-------------------------')
df.var(axis=0)
df.describe()
###Output
_____no_output_____
###Markdown
Veamos cuantos tumores son malignos y cuantos beningnos:
###Code
B = df[df["diagnosis"]==0]
M = df[df["diagnosis"]==1]
B
M
###Output
_____no_output_____
###Markdown
Se concluye que 357 tumores son benignos (B) contra 212 malignos (M). Normalizar las variables numéricas con el método StandardScaler:
###Code
scaler = StandardScaler()
df[df.columns.drop(["id","diagnosis"])] = scaler.fit_transform(df[df.columns.drop(["id","diagnosis"])])
df.head()
###Output
_____no_output_____
###Markdown
Realizar un método de reducción de dimensionalidad visto en clases:
###Code
# Entrenamiento modelo PCA con escalado de los datos
# ==============================================================================
pca_pipe = make_pipeline(StandardScaler(), PCA())
pca_pipe.fit(df)
# Se extrae el modelo entrenado del pipeline
modelo_pca = pca_pipe.named_steps['pca']
# Se combierte el array a dataframe para añadir nombres a los ejes.
pd.DataFrame(
data = modelo_pca.components_,
columns = df.columns,
index = ['PC1', 'PC2', 'PC3', 'PC4',
'PC5', 'PC6', 'PC7', 'PC8',
'PC9', 'PC10', 'PC11', 'PC12',
'PC13', 'PC14', 'PC15', 'PC16',
'PC17', 'PC18', 'PC19', 'PC20',
'PC21', 'PC22', 'PC23', 'PC24',
'PC25', 'PC26', 'PC27', 'PC28',
'PC29', 'PC30', 'PC31', 'PC32']
)
# Heatmap componentes
# ==============================================================================
plt.figure(figsize=(12,14))
componentes = modelo_pca.components_
plt.imshow(componentes.T, cmap='viridis', aspect='auto')
plt.yticks(range(len(df.columns)), df.columns)
plt.xticks(range(len(df.columns)), np.arange(modelo_pca.n_components_) + 1)
plt.grid(False)
plt.colorbar();
# graficar varianza por componente
percent_variance = np.round(modelo_pca.explained_variance_ratio_* 100, decimals =2)
columns = ['PC1', 'PC2', 'PC3', 'PC4',
'PC5', 'PC6', 'PC7', 'PC8',
'PC9', 'PC10', 'PC11', 'PC12',
'PC13', 'PC14', 'PC15', 'PC16',
'PC17', 'PC18', 'PC19', 'PC20',
'PC21', 'PC22', 'PC23', 'PC24',
'PC25', 'PC26', 'PC27', 'PC28',
'PC29', 'PC30', 'PC31', 'PC32']
plt.figure(figsize=(20,10))
plt.bar(x= range(1,33), height=percent_variance, tick_label=columns)
plt.xticks(np.arange(modelo_pca.n_components_) + 1)
plt.ylabel('Componente principal')
plt.xlabel('Por. varianza explicada')
plt.title('Porcentaje de varianza explicada por cada componente')
plt.show()
# graficar varianza por la suma acumulada de los componente
percent_variance_cum = np.cumsum(percent_variance)
#columns = ['PC1', 'PC1+PC2', 'PC1+PC2+PC3', 'PC1+PC2+PC3+PC4',.....]
plt.figure(figsize=(12,4))
plt.bar(x= range(1,33), height=percent_variance_cum, #tick_label=columns
)
plt.ylabel('Percentate of Variance Explained')
plt.xlabel('Principal Component Cumsum')
plt.title('PCA Scree Plot')
plt.show()
# Proyección de las observaciones de entrenamiento
# ==============================================================================
proyecciones = pca_pipe.transform(X=df)
proyecciones = pd.DataFrame(
proyecciones,
columns = ['PC1', 'PC2', 'PC3', 'PC4',
'PC5', 'PC6', 'PC7', 'PC8',
'PC9', 'PC10', 'PC11', 'PC12',
'PC13', 'PC14', 'PC15', 'PC16',
'PC17', 'PC18', 'PC19', 'PC20',
'PC21', 'PC22', 'PC23', 'PC24',
'PC25', 'PC26', 'PC27', 'PC28',
'PC29', 'PC30', 'PC31', 'PC32'],
index = df.index
)
proyecciones.head()
###Output
_____no_output_____
###Markdown
Aplique al menos tres modelos de clasificación distintos. Para cada uno de los modelos escogidos, realice una optimización de los hiperparámetros. además, calcule las respectivas métricas. Concluya.
###Code
df3 = pd.get_dummies(df)
df3.head()
X = np.array(df3)
kmeans = KMeans(n_clusters=8,n_init=25, random_state=123)
kmeans.fit(X)
centroids = kmeans.cluster_centers_ # centros
clusters = kmeans.labels_ # clusters
# etiquetar los datos con los clusters encontrados
df["cluster"] = clusters
df["cluster"] = df["cluster"].astype('category')
centroids_df = pd.DataFrame(centroids)
centroids_df["cluster"] = [1,2,3,4,5,6,7,8]
# implementación de la regla del codo
Nc = [5,10,20,30,50,75,100,200,300]
kmeans = [KMeans(n_clusters=i) for i in Nc]
score = [kmeans[i].fit(df).inertia_ for i in range(len(kmeans))]
df_Elbow = pd.DataFrame({'Number of Clusters':Nc,
'Score':score})
df_Elbow.head()
# graficar los datos etiquetados con k-means
fig, ax = plt.subplots(figsize=(11, 8.5))
plt.title('Elbow Curve')
sns.lineplot(x="Number of Clusters",
y="Score",
data=df_Elbow)
sns.scatterplot(x="Number of Clusters",
y="Score",
data=df_Elbow)
plt.show()
# PCA
#scaler = StandardScaler()
X = df.drop(columns=["id","diagnosis"])
y = df['diagnosis']
embedding = PCA(n_components=2)
X_transform = embedding.fit_transform(X)
df_pca = pd.DataFrame(X_transform,columns = ['Score1','Score2'])
df_pca['diagnosis'] = y
# Plot Digits PCA
# Set style of scatterplot
sns.set_context("notebook", font_scale=1.1)
sns.set_style("ticks")
# Create scatterplot of dataframe
sns.lmplot(x='Score1',
y='Score2',
data=df_pca,
fit_reg=False,
legend=True,
height=9,
hue='diagnosis',
scatter_kws={"s":200, "alpha":0.3})
plt.title('PCA Results: BC', weight='bold').set_fontsize('14')
plt.xlabel('Prin Comp 1', weight='bold').set_fontsize('10')
plt.ylabel('Prin Comp 2', weight='bold').set_fontsize('10')
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from matplotlib.colors import ListedColormap
X = df_pca.drop(columns='diagnosis')
y = df_pca['diagnosis']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)
h = .02 # step size in the mesh
plt.figure(figsize=(12,12))
names = ["Logistic",
"RBF SVM",
"Decision Tree",
"Random Forest"
]
classifiers = [
LogisticRegression(),
SVC(gamma=2, C=1),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
]
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable
]
figure = plt.figure(figsize=(27, 9))
i = 1
# iterate over datasets
for ds_cnt, ds in enumerate(datasets):
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=.4, random_state=42)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
if ds_cnt == 0:
ax.set_title("Input data")
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,
edgecolors='k')
# Plot the testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6,
edgecolors='k')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,
edgecolors='k')
# Plot the testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
edgecolors='k', alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
if ds_cnt == 0:
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
plt.tight_layout()
plt.show()
from metrics_classification import *
class SklearnClassificationModels:
def __init__(self,model,name_model):
self.model = model
self.name_model = name_model
@staticmethod
def test_train_model(X,y,n_size):
X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=n_size , random_state=42)
return X_train, X_test, y_train, y_test
def fit_model(self,X,y,test_size):
X_train, X_test, y_train, y_test = self.test_train_model(X,y,test_size )
return self.model.fit(X_train, y_train)
def df_testig(self,X,y,test_size):
X_train, X_test, y_train, y_test = self.test_train_model(X,y,test_size )
model_fit = self.model.fit(X_train, y_train)
preds = model_fit.predict(X_test)
df_temp = pd.DataFrame(
{
'y':y_test,
'yhat': model_fit.predict(X_test)
}
)
return df_temp
def metrics(self,X,y,test_size):
df_temp = self.df_testig(X,y,test_size)
df_metrics = summary_metrics(df_temp)
df_metrics['model'] = self.name_model
return df_metrics
# metrics
import itertools
# nombre modelos
names_models = ["Logistic",
"RBF SVM",
"Decision Tree",
"Random Forest"
]
# modelos
classifiers = [
LogisticRegression(),
SVC(gamma=2, C=1),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
]
datasets
names_dataset = ['make_moons',
'make_circles',
'linearly_separable'
]
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable
]
# juntar informacion
list_models = list(zip(names_models,classifiers))
list_dataset = list(zip(names_dataset,datasets))
frames = []
for x in itertools.product(list_models, list_dataset):
name_model = x[0][0]
classifier = x[0][1]
name_dataset = x[1][0]
dataset = x[1][1]
X = dataset[0]
Y = dataset[1]
fit_model = SklearnClassificationModels( classifier,name_model)
df = fit_model.metrics(X,Y,0.2)
df['dataset'] = name_dataset
frames.append(df)
# juntar resultados
pd.concat(frames)
###Output
_____no_output_____ |
python/d2l-en/mxnet/chapter_attention-mechanisms/attention-scoring-functions.ipynb | ###Markdown
Attention Scoring Functions:label:`sec_attention-scoring-functions`In :numref:`sec_nadaraya-watson`,we used a Gaussian kernel to modelinteractions between queries and keys.Treating the exponent of the Gaussian kernelin :eqref:`eq_nadaraya-watson-gaussian`as an *attention scoring function* (or *scoring function* for short),the results of this function wereessentially fed intoa softmax operation.As a result,we obtaineda probability distribution (attention weights)over values that are paired with keys.In the end,the output of the attention poolingis simply a weighted sum of the valuesbased on these attention weights.At a high level,we can use the above algorithmto instantiate the framework of attention mechanismsin :numref:`fig_qkv`.Denoting an attention scoring function by $a$,:numref:`fig_attention_output`illustrates how the output of attention poolingcan be computed as a weighted sum of values.Since attention weights area probability distribution,the weighted sum is essentiallya weighted average.:label:`fig_attention_output`Mathematically,suppose that we havea query $\mathbf{q} \in \mathbb{R}^q$and $m$ key-value pairs $(\mathbf{k}_1, \mathbf{v}_1), \ldots, (\mathbf{k}_m, \mathbf{v}_m)$, where any $\mathbf{k}_i \in \mathbb{R}^k$ and any $\mathbf{v}_i \in \mathbb{R}^v$.The attention pooling $f$is instantiated as a weighted sum of the values:$$f(\mathbf{q}, (\mathbf{k}_1, \mathbf{v}_1), \ldots, (\mathbf{k}_m, \mathbf{v}_m)) = \sum_{i=1}^m \alpha(\mathbf{q}, \mathbf{k}_i) \mathbf{v}_i \in \mathbb{R}^v,$$:eqlabel:`eq_attn-pooling`wherethe attention weight (scalar) for the query $\mathbf{q}$and key $\mathbf{k}_i$is computed bythe softmax operation ofan attention scoring function $a$ that maps two vectors to a scalar:$$\alpha(\mathbf{q}, \mathbf{k}_i) = \mathrm{softmax}(a(\mathbf{q}, \mathbf{k}_i)) = \frac{\exp(a(\mathbf{q}, \mathbf{k}_i))}{\sum_{j=1}^m \exp(a(\mathbf{q}, \mathbf{k}_j))} \in \mathbb{R}.$$:eqlabel:`eq_attn-scoring-alpha`As we can see,different choices of the attention scoring function $a$lead to different behaviors of attention pooling.In this section,we introduce two popular scoring functionsthat we will use to develop moresophisticated attention mechanisms later.
###Code
import math
from mxnet import np, npx
from mxnet.gluon import nn
from d2l import mxnet as d2l
npx.set_np()
###Output
_____no_output_____
###Markdown
[**Masked Softmax Operation**]As we just mentioned,a softmax operation is used tooutput a probability distribution as attention weights.In some cases,not all the values should be fed into attention pooling.For instance,for efficient minibatch processing in :numref:`sec_machine_translation`,some text sequences are padded withspecial tokens that do not carry meaning.To get an attention poolingoveronly meaningful tokens as values,we can specify a valid sequence length (in number of tokens)to filter out those beyond this specified rangewhen computing softmax.In this way,we can implement such a *masked softmax operation*in the following `masked_softmax` function,where any value beyond the valid lengthis masked as zero.
###Code
#@save
def masked_softmax(X, valid_lens):
"""Perform softmax operation by masking elements on the last axis."""
# `X`: 3D tensor, `valid_lens`: 1D or 2D tensor
if valid_lens is None:
return npx.softmax(X)
else:
shape = X.shape
if valid_lens.ndim == 1:
valid_lens = valid_lens.repeat(shape[1])
else:
valid_lens = valid_lens.reshape(-1)
# On the last axis, replace masked elements with a very large negative
# value, whose exponentiation outputs 0
X = npx.sequence_mask(X.reshape(-1, shape[-1]), valid_lens, True,
value=-1e6, axis=1)
return npx.softmax(X).reshape(shape)
###Output
_____no_output_____
###Markdown
To [**demonstrate how this function works**],consider a minibatch of two $2 \times 4$ matrix examples,where the valid lengths for these two examplesare two and three, respectively.As a result of the masked softmax operation,values beyond the valid lengthsare all masked as zero.
###Code
masked_softmax(np.random.uniform(size=(2, 2, 4)), np.array([2, 3]))
###Output
_____no_output_____
###Markdown
Similarly, we can alsouse a two-dimensional tensorto specify valid lengthsfor every row in each matrix example.
###Code
masked_softmax(np.random.uniform(size=(2, 2, 4)),
np.array([[1, 3], [2, 4]]))
###Output
_____no_output_____
###Markdown
[**Additive Attention**]:label:`subsec_additive-attention`In general,when queries and keys are vectors of different lengths,we can use additive attentionas the scoring function.Given a query $\mathbf{q} \in \mathbb{R}^q$and a key $\mathbf{k} \in \mathbb{R}^k$,the *additive attention* scoring function$$a(\mathbf q, \mathbf k) = \mathbf w_v^\top \text{tanh}(\mathbf W_q\mathbf q + \mathbf W_k \mathbf k) \in \mathbb{R},$$:eqlabel:`eq_additive-attn`wherelearnable parameters$\mathbf W_q\in\mathbb R^{h\times q}$, $\mathbf W_k\in\mathbb R^{h\times k}$, and $\mathbf w_v\in\mathbb R^{h}$.Equivalent to :eqref:`eq_additive-attn`,the query and the key are concatenatedand fed into an MLP with a single hidden layerwhose number of hidden units is $h$, a hyperparameter.By using $\tanh$ as the activation function and disablingbias terms,we implement additive attention in the following.
###Code
#@save
class AdditiveAttention(nn.Block):
"""Additive attention."""
def __init__(self, num_hiddens, dropout, **kwargs):
super(AdditiveAttention, self).__init__(**kwargs)
# Use `flatten=False` to only transform the last axis so that the
# shapes for the other axes are kept the same
self.W_k = nn.Dense(num_hiddens, use_bias=False, flatten=False)
self.W_q = nn.Dense(num_hiddens, use_bias=False, flatten=False)
self.w_v = nn.Dense(1, use_bias=False, flatten=False)
self.dropout = nn.Dropout(dropout)
def forward(self, queries, keys, values, valid_lens):
queries, keys = self.W_q(queries), self.W_k(keys)
# After dimension expansion, shape of `queries`: (`batch_size`, no. of
# queries, 1, `num_hiddens`) and shape of `keys`: (`batch_size`, 1,
# no. of key-value pairs, `num_hiddens`). Sum them up with
# broadcasting
features = np.expand_dims(queries, axis=2) + np.expand_dims(
keys, axis=1)
features = np.tanh(features)
# There is only one output of `self.w_v`, so we remove the last
# one-dimensional entry from the shape. Shape of `scores`:
# (`batch_size`, no. of queries, no. of key-value pairs)
scores = np.squeeze(self.w_v(features), axis=-1)
self.attention_weights = masked_softmax(scores, valid_lens)
# Shape of `values`: (`batch_size`, no. of key-value pairs, value
# dimension)
return npx.batch_dot(self.dropout(self.attention_weights), values)
###Output
_____no_output_____
###Markdown
Let us [**demonstrate the above `AdditiveAttention` class**]with a toy example,where shapes (batch size, number of steps or sequence length in tokens, feature size)of queries, keys, and valuesare ($2$, $1$, $20$), ($2$, $10$, $2$),and ($2$, $10$, $4$), respectively.The attention pooling outputhas a shape of (batch size, number of steps for queries, feature size for values).
###Code
queries, keys = np.random.normal(0, 1, (2, 1, 20)), np.ones((2, 10, 2))
# The two value matrices in the `values` minibatch are identical
values = np.arange(40).reshape(1, 10, 4).repeat(2, axis=0)
valid_lens = np.array([2, 6])
attention = AdditiveAttention(num_hiddens=8, dropout=0.1)
attention.initialize()
attention(queries, keys, values, valid_lens)
###Output
_____no_output_____
###Markdown
Although additive attention contains learnable parameters,since every key is the same in this example,[**the attention weights**] are uniform,determined by the specified valid lengths.
###Code
d2l.show_heatmaps(attention.attention_weights.reshape((1, 1, 2, 10)),
xlabel='Keys', ylabel='Queries')
###Output
_____no_output_____
###Markdown
[**Scaled Dot-Product Attention**]A more computationally efficientdesign for the scoring function can besimply dot product.However,the dot product operationrequires that both the query and the keyhave the same vector length, say $d$.Assume thatall the elements of the query and the keyare independent random variableswith zero mean and unit variance.The dot product ofboth vectors has zero mean and a variance of $d$.To ensure that the variance of the dot productstill remains one regardless of vector length,the *scaled dot-product attention* scoring function$$a(\mathbf q, \mathbf k) = \mathbf{q}^\top \mathbf{k} /\sqrt{d}$$divides the dot product by $\sqrt{d}$.In practice,we often think in minibatchesfor efficiency,such as computing attentionfor$n$ queries and $m$ key-value pairs,where queries and keys are of length $d$and values are of length $v$.The scaled dot-product attentionof queries $\mathbf Q\in\mathbb R^{n\times d}$,keys $\mathbf K\in\mathbb R^{m\times d}$,and values $\mathbf V\in\mathbb R^{m\times v}$is$$ \mathrm{softmax}\left(\frac{\mathbf Q \mathbf K^\top }{\sqrt{d}}\right) \mathbf V \in \mathbb{R}^{n\times v}.$$:eqlabel:`eq_softmax_QK_V`In the following implementation of the scaled dot product attention, we use dropout for model regularization.
###Code
#@save
class DotProductAttention(nn.Block):
"""Scaled dot product attention."""
def __init__(self, dropout, **kwargs):
super(DotProductAttention, self).__init__(**kwargs)
self.dropout = nn.Dropout(dropout)
# Shape of `queries`: (`batch_size`, no. of queries, `d`)
# Shape of `keys`: (`batch_size`, no. of key-value pairs, `d`)
# Shape of `values`: (`batch_size`, no. of key-value pairs, value
# dimension)
# Shape of `valid_lens`: (`batch_size`,) or (`batch_size`, no. of queries)
def forward(self, queries, keys, values, valid_lens=None):
d = queries.shape[-1]
# Set `transpose_b=True` to swap the last two dimensions of `keys`
scores = npx.batch_dot(queries, keys, transpose_b=True) / math.sqrt(d)
self.attention_weights = masked_softmax(scores, valid_lens)
return npx.batch_dot(self.dropout(self.attention_weights), values)
###Output
_____no_output_____
###Markdown
To [**demonstrate the above `DotProductAttention` class**],we use the same keys, values, and valid lengths from the earlier toy examplefor additive attention.For the dot product operation,we make the feature size of queriesthe same as that of keys.
###Code
queries = np.random.normal(0, 1, (2, 1, 2))
attention = DotProductAttention(dropout=0.5)
attention.initialize()
attention(queries, keys, values, valid_lens)
###Output
_____no_output_____
###Markdown
Same as in the additive attention demonstration,since `keys` contains the same elementthat cannot be differentiated by any query,[**uniform attention weights**] are obtained.
###Code
d2l.show_heatmaps(attention.attention_weights.reshape((1, 1, 2, 10)),
xlabel='Keys', ylabel='Queries')
###Output
_____no_output_____ |
training-data-analyst/courses/machine_learning/deepdive2/text_classification/labs/reusable_embeddings.ipynb | ###Markdown
Reusable Embeddings**Learning Objectives**1. Learn how to use a pre-trained TF Hub text modules to generate sentence vectors1. Learn how to incorporate a pre-trained TF-Hub module into a Keras model1. Learn how to deploy and use a text model on CAIP IntroductionIn this notebook, we will implement text models to recognize the probable source (Github, Tech-Crunch, or The New-York Times) of the titles we have in the title dataset.First, we will load and pre-process the texts and labels so that they are suitable to be fed to sequential Keras models with first layer being TF-hub pre-trained modules. Thanks to this first layer, we won't need to tokenize and integerize the text before passing it to our models. The pre-trained layer will take care of that for us, and consume directly raw text. However, we will still have to one-hot-encode each of the 3 classes into a 3 dimensional basis vector.Then we will build, train and compare simple DNN models starting with different pre-trained TF-Hub layers.
###Code
import os
from google.cloud import bigquery
import pandas as pd
%load_ext google.cloud.bigquery
###Output
_____no_output_____
###Markdown
Replace the variable values in the cell below:
###Code
PROJECT = "cloud-training-demos" # Replace with your PROJECT
BUCKET = PROJECT
REGION = "us-central1"
os.environ['PROJECT'] = PROJECT
os.environ['BUCKET'] = BUCKET
os.environ['REGION'] = REGION
###Output
_____no_output_____
###Markdown
Create a Dataset from BigQuery Hacker news headlines are available as a BigQuery public dataset. The [dataset](https://bigquery.cloud.google.com/table/bigquery-public-data:hacker_news.stories?tab=details) contains all headlines from the sites inception in October 2006 until October 2015. Here is a sample of the dataset:
###Code
%%bigquery --project $PROJECT
SELECT
url, title, score
FROM
`bigquery-public-data.hacker_news.stories`
WHERE
LENGTH(title) > 10
AND score > 10
AND LENGTH(url) > 0
LIMIT 10
###Output
_____no_output_____
###Markdown
Let's do some regular expression parsing in BigQuery to get the source of the newspaper article from the URL. For example, if the url is http://mobile.nytimes.com/...., I want to be left with nytimes
###Code
%%bigquery --project $PROJECT
SELECT
ARRAY_REVERSE(SPLIT(REGEXP_EXTRACT(url, '.*://(.[^/]+)/'), '.'))[OFFSET(1)] AS source,
COUNT(title) AS num_articles
FROM
`bigquery-public-data.hacker_news.stories`
WHERE
REGEXP_CONTAINS(REGEXP_EXTRACT(url, '.*://(.[^/]+)/'), '.com$')
AND LENGTH(title) > 10
GROUP BY
source
ORDER BY num_articles DESC
LIMIT 100
###Output
_____no_output_____
###Markdown
Now that we have good parsing of the URL to get the source, let's put together a dataset of source and titles. This will be our labeled dataset for machine learning.
###Code
regex = '.*://(.[^/]+)/'
sub_query = """
SELECT
title,
ARRAY_REVERSE(SPLIT(REGEXP_EXTRACT(url, '{0}'), '.'))[OFFSET(1)] AS source
FROM
`bigquery-public-data.hacker_news.stories`
WHERE
REGEXP_CONTAINS(REGEXP_EXTRACT(url, '{0}'), '.com$')
AND LENGTH(title) > 10
""".format(regex)
query = """
SELECT
LOWER(REGEXP_REPLACE(title, '[^a-zA-Z0-9 $.-]', ' ')) AS title,
source
FROM
({sub_query})
WHERE (source = 'github' OR source = 'nytimes' OR source = 'techcrunch')
""".format(sub_query=sub_query)
print(query)
###Output
_____no_output_____
###Markdown
For ML training, we usually need to split our dataset into training and evaluation datasets (and perhaps an independent test dataset if we are going to do model or feature selection based on the evaluation dataset). AutoML however figures out on its own how to create these splits, so we won't need to do that here.
###Code
bq = bigquery.Client(project=PROJECT)
title_dataset = bq.query(query).to_dataframe()
title_dataset.head()
###Output
_____no_output_____
###Markdown
AutoML for text classification requires that* the dataset be in csv form with * the first column being the texts to classify or a GCS path to the text * the last colum to be the text labelsThe dataset we pulled from BiqQuery satisfies these requirements.
###Code
print("The full dataset contains {n} titles".format(n=len(title_dataset)))
###Output
_____no_output_____
###Markdown
Let's make sure we have roughly the same number of labels for each of our three labels:
###Code
title_dataset.source.value_counts()
###Output
_____no_output_____
###Markdown
Finally we will save our data, which is currently in-memory, to disk.We will create a csv file containing the full dataset and another containing only 1000 articles for development.**Note:** It may take a long time to train AutoML on the full dataset, so we recommend to use the sample dataset for the purpose of learning the tool.
###Code
DATADIR = './data/'
if not os.path.exists(DATADIR):
os.makedirs(DATADIR)
FULL_DATASET_NAME = 'titles_full.csv'
FULL_DATASET_PATH = os.path.join(DATADIR, FULL_DATASET_NAME)
# Let's shuffle the data before writing it to disk.
title_dataset = title_dataset.sample(n=len(title_dataset))
title_dataset.to_csv(
FULL_DATASET_PATH, header=False, index=False, encoding='utf-8')
###Output
_____no_output_____
###Markdown
Now let's sample 1000 articles from the full dataset and make sure we have enough examples for each label in our sample dataset (see [here](https://cloud.google.com/natural-language/automl/docs/beginners-guide) for further details on how to prepare data for AutoML).
###Code
sample_title_dataset = title_dataset.sample(n=1000)
sample_title_dataset.source.value_counts()
###Output
_____no_output_____
###Markdown
Let's write the sample datatset to disk.
###Code
SAMPLE_DATASET_NAME = 'titles_sample.csv'
SAMPLE_DATASET_PATH = os.path.join(DATADIR, SAMPLE_DATASET_NAME)
sample_title_dataset.to_csv(
SAMPLE_DATASET_PATH, header=False, index=False, encoding='utf-8')
import datetime
import os
import shutil
import pandas as pd
import tensorflow as tf
from tensorflow.keras.callbacks import TensorBoard, EarlyStopping
from tensorflow_hub import KerasLayer
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.utils import to_categorical
print(tf.__version__)
%matplotlib inline
###Output
_____no_output_____
###Markdown
Let's start by specifying where the information about the trained models will be saved as well as where our dataset is located:
###Code
MODEL_DIR = "./text_models"
DATA_DIR = "./data"
###Output
_____no_output_____
###Markdown
Loading the dataset As in the previous labs, our dataset consists of titles of articles along with the label indicating from which source these articles have been taken from (GitHub, Tech-Crunch, or the New-York Times):
###Code
ls ./data/
DATASET_NAME = "titles_full.csv"
TITLE_SAMPLE_PATH = os.path.join(DATA_DIR, DATASET_NAME)
COLUMNS = ['title', 'source']
titles_df = pd.read_csv(TITLE_SAMPLE_PATH, header=None, names=COLUMNS)
titles_df.head()
###Output
_____no_output_____
###Markdown
Let's look again at the number of examples per label to make sure we have a well-balanced dataset:
###Code
titles_df.source.value_counts()
###Output
_____no_output_____
###Markdown
Preparing the labels In this lab, we will use pre-trained [TF-Hub embeddings modules for english](https://tfhub.dev/s?q=tf2%20embeddings%20text%20english) for the first layer of our models. One immediateadvantage of doing so is that the TF-Hub embedding module will take care for us of processing the raw text. This also means that our model will be able to consume text directly instead of sequences of integers representing the words.However, as before, we still need to preprocess the labels into one-hot-encoded vectors:
###Code
CLASSES = {
'github': 0,
'nytimes': 1,
'techcrunch': 2
}
N_CLASSES = len(CLASSES)
def encode_labels(sources):
classes = [CLASSES[source] for source in sources]
one_hots = to_categorical(classes, num_classes=N_CLASSES)
return one_hots
encode_labels(titles_df.source[:4])
###Output
_____no_output_____
###Markdown
Preparing the train/test splits Let's split our data into train and test splits:
###Code
N_TRAIN = int(len(titles_df) * 0.95)
titles_train, sources_train = (
titles_df.title[:N_TRAIN], titles_df.source[:N_TRAIN])
titles_valid, sources_valid = (
titles_df.title[N_TRAIN:], titles_df.source[N_TRAIN:])
###Output
_____no_output_____
###Markdown
To be on the safe side, we verify that the train and test splitshave roughly the same number of examples per class.Since it is the case, accuracy will be a good metric to use to measurethe performance of our models.
###Code
sources_train.value_counts()
sources_valid.value_counts()
###Output
_____no_output_____
###Markdown
Now let's create the features and labels we will feed our models with:
###Code
X_train, Y_train = titles_train.values, encode_labels(sources_train)
X_valid, Y_valid = titles_valid.values, encode_labels(sources_valid)
X_train[:3]
Y_train[:3]
###Output
_____no_output_____
###Markdown
NNLM Model We will first try a word embedding pre-trained using a [Neural Probabilistic Language Model](http://www.jmlr.org/papers/volume3/bengio03a/bengio03a.pdf). TF-Hub has a 50-dimensional one called [nnlm-en-dim50-with-normalization](https://tfhub.dev/google/tf2-preview/nnlm-en-dim50/1), which alsonormalizes the vectors produced. Lab Task 1a: Import NNLM TF Hub module into `KerasLayer`Once loaded from its url, the TF-hub module can be used as a normal Keras layer in a sequential or functional model. Since we have enough data to fine-tune the parameters of the pre-trained embedding itself, we will set `trainable=True` in the `KerasLayer` that loads the pre-trained embedding:
###Code
NNLM = "https://tfhub.dev/google/nnlm-en-dim50/2"
nnlm_module = KerasLayer(# TODO)
###Output
_____no_output_____
###Markdown
Note that this TF-Hub embedding produces a single 50-dimensional vector when passed a sentence: Lab Task 1b: Use module to encode a sentence string
###Code
nnlm_module(tf.constant([# TODO]))
###Output
_____no_output_____
###Markdown
Swivel Model Then we will try a word embedding obtained using [Swivel](https://arxiv.org/abs/1602.02215), an algorithm that essentially factorizes word co-occurrence matrices to create the words embeddings. TF-Hub hosts the pretrained [gnews-swivel-20dim-with-oov](https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim-with-oov/1) 20-dimensional Swivel module. Lab Task 1c: Import Swivel TF Hub module into `KerasLayer`
###Code
SWIVEL = "https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim-with-oov/1"
swivel_module = KerasLayer(# TODO)
###Output
_____no_output_____
###Markdown
Similarly as the previous pre-trained embedding, it outputs a single vector when passed a sentence: Lab Task 1d: Use module to encode a sentence string
###Code
swivel_module(tf.constant([# TODO]))
###Output
_____no_output_____
###Markdown
Building the models Let's write a function that * takes as input an instance of a `KerasLayer` (i.e. the `swivel_module` or the `nnlm_module` we constructed above) as well as the name of the model (say `swivel` or `nnlm`)* returns a compiled Keras sequential model starting with this pre-trained TF-hub layer, adding one or more dense relu layers to it, and ending with a softmax layer giving the probability of each of the classes: Lab Task 2: Incorporate a pre-trained TF Hub module as first layer of Keras Sequential Model
###Code
def build_model(hub_module, name):
model = Sequential([
# TODO
Dense(16, activation='relu'),
Dense(N_CLASSES, activation='softmax')
], name=name)
model.compile(
optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy']
)
return model
###Output
_____no_output_____
###Markdown
Let's also wrap the training code into a `train_and_evaluate` function that * takes as input the training and validation data, as well as the compiled model itself, and the `batch_size`* trains the compiled model for 100 epochs at most, and does early-stopping when the validation loss is no longer decreasing* returns an `history` object, which will help us to plot the learning curves
###Code
def train_and_evaluate(train_data, val_data, model, batch_size=5000):
X_train, Y_train = train_data
tf.random.set_seed(33)
model_dir = os.path.join(MODEL_DIR, model.name)
if tf.io.gfile.exists(model_dir):
tf.io.gfile.rmtree(model_dir)
history = model.fit(
X_train, Y_train,
epochs=100,
batch_size=batch_size,
validation_data=val_data,
callbacks=[EarlyStopping(), TensorBoard(model_dir)],
)
return history
###Output
_____no_output_____
###Markdown
Training NNLM
###Code
data = (X_train, Y_train)
val_data = (X_valid, Y_valid)
nnlm_model = build_model(nnlm_module, 'nnlm')
nnlm_history = train_and_evaluate(data, val_data, nnlm_model)
history = nnlm_history
pd.DataFrame(history.history)[['loss', 'val_loss']].plot()
pd.DataFrame(history.history)[['accuracy', 'val_accuracy']].plot()
###Output
_____no_output_____
###Markdown
Training Swivel
###Code
swivel_model = build_model(swivel_module, name='swivel')
swivel_history = train_and_evaluate(data, val_data, swivel_model)
history = swivel_history
pd.DataFrame(history.history)[['loss', 'val_loss']].plot()
pd.DataFrame(history.history)[['accuracy', 'val_accuracy']].plot()
###Output
_____no_output_____
###Markdown
Swivel trains faster but achieves a lower validation accuracy, and requires more epochs to train on. Deploying the model The first step is to serialize one of our trained Keras model as a SavedModel:
###Code
OUTPUT_DIR = "./savedmodels"
shutil.rmtree(OUTPUT_DIR, ignore_errors=True)
EXPORT_PATH = os.path.join(OUTPUT_DIR, 'swivel')
os.environ['EXPORT_PATH'] = EXPORT_PATH
shutil.rmtree(EXPORT_PATH, ignore_errors=True)
tf.saved_model.save(swivel_model, EXPORT_PATH)
###Output
_____no_output_____
###Markdown
Then we can deploy the model using the gcloud CLI as before: Lab Task 3a: Complete the following script to deploy the swivel model
###Code
%%bash
# TODO 5
MODEL_NAME=title_model
VERSION_NAME=swivel
if [[ $(gcloud ai-platform models list --format='value(name)' | grep $MODEL_NAME) ]]; then
echo "$MODEL_NAME already exists"
else
echo "Creating $MODEL_NAME"
gcloud ai-platform models create --region=$REGION $MODEL_NAME
fi
if [[ $(gcloud ai-platform versions list --model $MODEL_NAME --format='value(name)' | grep $VERSION_NAME) ]]; then
echo "Deleting already existing $MODEL_NAME:$VERSION_NAME ... "
echo yes | gcloud ai-platform versions delete --model=$MODEL_NAME $VERSION_NAME
echo "Please run this cell again if you don't see a Creating message ... "
sleep 2
fi
echo "Creating $MODEL_NAME:$VERSION_NAME"
gcloud ai-platform versions create $VERSION_NAME\
--model=$MODEL_NAME \
--framework=# TODO \
--python-version=# TODO \
--runtime-version=2.1 \
--origin=# TODO \
--staging-bucket=# TODO \
--machine-type n1-standard-4 \
--region=$REGION
###Output
_____no_output_____
###Markdown
Before we try our deployed model, let's inspect its signature to know what to send to the deployed API:
###Code
!saved_model_cli show \
--tag_set serve \
--signature_def serving_default \
--dir {EXPORT_PATH}
!find {EXPORT_PATH}
###Output
_____no_output_____
###Markdown
Let's go ahead and hit our model: Lab Task 3b: Create the JSON object to send a title to the API you just deployed(**Hint:** Look at the 'saved_model_cli show' command output above.)
###Code
%%writefile input.json
{# TODO}
!gcloud ai-platform predict \
--model title_model \
--json-instances input.json \
--version swivel \
--region=$REGION
###Output
_____no_output_____ |
notebooks/advanced/datetime.ipynb | ###Markdown
Table of Contents
###Code
import datetime
# set of objects for basic time
# date, time, and date time objects
# date(year, month, day) #in gregorian calendar
d1 = datetime.date(2015, 1, 23)
d1
d1.strftime("%A %m/%d/%y")
d2 = datetime.date(2015, 1, 19)
d2
d1 - d2
print(d1 - d2)
(d1-d2).days # time delta objects
datetime.date.today()
# time object
t1 = datetime.time(1, 2) # always 24hours
t1
t2 = datetime.time(18)
t2
t1.strftime('%I:%M %p')
# difference is not supported
t2 - t1
# relative times in a day, no date associated with it
#datetime
d1 = datetime.datetime.now()
d1
d2 = datetime.datetime.now()
d2
d2 - d1
datetime.datetime.strptime('1/1/15', '%m/%d/%y') #stringparsetime
# %a (%A) abbrev (full) weekday name
# w, weekday number (0 for sun, -- 6)
# b, B abbrev (full) month name
# %d day of month [01, 31]
# H I, 24 hour, 12 hour clock
# j day of year
# m month
# M minute
# p AM/PM
# S second
# U W week number of year U sunday, M monday as first day of week
# y Y year without/with century
# tz
###Output
_____no_output_____ |
docs/examples/modifying_toolbar_tools.ipynb | ###Markdown
Modifying Toolbar Tools
###Code
import warnings
import numpy as np
import holoviews as hv
from bokeh.models import HoverTool
from holoext.xbokeh import Mod
warnings.filterwarnings('ignore') # bokeh deprecation warnings
hv.extension('bokeh')
x = np.array([8, 4, 2, 1])
y = np.array([2, 4, 5, 9])
bar = hv.Bars((x, y))
###Output
_____no_output_____
###Markdown
Hide toolbar
###Code
Mod(toolbar_location=None).apply(bar)
###Output
_____no_output_____
###Markdown
Change toolbar location
###Code
Mod(toolbar_location='west').apply(bar) # user forgiving parser for location
###Output
_____no_output_____
###Markdown
Add the default HoloView's tools and additional ones
###Code
Mod(tools=['default', 'hover', 'zoom_in']).apply(bar)
###Output
_____no_output_____
###Markdown
Select specific tools delimited by comma
###Code
Mod(tools='save,xwheel_zoom, ywheel_zoom, hover').apply(bar)
###Output
_____no_output_____
###Markdown
Input your customized tools with the default
###Code
hover_tool = HoverTool(tooltips=[('X value', '@x'),
('Y value', '@y')])
Mod(tools=['default', hover_tool]).apply(bar)
###Output
_____no_output_____
###Markdown
Have hover tool but hide it in toolbar
###Code
Mod(show_hover=False).apply(bar)
###Output
_____no_output_____
###Markdown
Hide Bokeh logo in toolbar
###Code
Mod(logo=False).apply(bar)
###Output
_____no_output_____ |
CNN/120C5-MP2-200C3-MP2-200N-10N.ipynb | ###Markdown
check history lossvalidation from augmented data
###Code
%matplotlib inline
# %config InlineBackend.figure_format = 'svg'
%load_ext autoreload
%autoreload 2
import os
import sys
import pandas as pd
import tensorflow as tf
import numpy as np
import datetime
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras.optimizers import Adam
#import data
#import importlib.util
#spec = importlib.util.spec_from_file_location("data", "../mnist/data.py")
#data = importlib.util.module_from_spec(spec)
#spec.loader.exec_module(data)
#import data
! cp ../mnist/data.py data_mnist.py
! cp ../digit_recognizer/data.py data_digit_recognizer.py
import data_mnist
import data_digit_recognizer
DATA_ROOT = 'contest'
DATA_ROOT = 'dry_run'
SUBMISSION_ROOT = os.path.join(DATA_ROOT, 'submissions')
if not os.path.isdir(SUBMISSION_ROOT):
os.mkdir(SUBMISSION_ROOT)
IMAGE_COLS = 28
IMAGE_ROWS = 28
ORIGINAL_TRAIN_SIZE = 10000
ORIGINAL_TEST_SIZE = 50000
%%time
# Read contest Data
original_train_id, original_train_label = data_digit_recognizer.read_mnist_id_for_contest(os.path.join(DATA_ROOT, 'train.csv'))
original_train_id, original_train_image = data_digit_recognizer.read_mnist_for_contest(
os.path.join(DATA_ROOT, 'train'), original_train_id)
original_test_id, original_test_image = data_digit_recognizer.read_mnist_for_contest(os.path.join(DATA_ROOT, 'test'))
# check data
print('original_train_id:', original_train_id.shape)
print('original_train_label:', original_train_label.shape)
print('original_train_image:', original_train_image.shape)
assert(original_train_id.shape == (ORIGINAL_TRAIN_SIZE,))
assert(original_train_label.shape == (ORIGINAL_TRAIN_SIZE,))
assert(original_train_image.shape == (ORIGINAL_TRAIN_SIZE, IMAGE_COLS*IMAGE_ROWS))
print('original_test_id:', original_test_id.shape)
print('original_test_image', original_test_image.shape)
assert(original_test_id.shape == (ORIGINAL_TEST_SIZE,))
assert(original_test_image.shape == (ORIGINAL_TEST_SIZE, IMAGE_COLS*IMAGE_ROWS))
for i in range(10, 10+3):
data_digit_recognizer.show_digit(original_train_image[i], original_train_label[i])
data_digit_recognizer.analyze_labels(original_train_label)
for i in range(10, 10+3):
data_digit_recognizer.show_digit(original_test_image[i])
%%time
# Read dry_run test data as validation
DRY_RUN_DATA_ROOT = '../mnist/dry_run/'
! ls ../mnist/dry_run
original_valid_id, original_valid_label = data_mnist.read_contest_ids(os.path.join(DRY_RUN_DATA_ROOT, 'test.csv'))
original_valid_id, original_valid_image = data_mnist.read_contest_images(os.path.join(DRY_RUN_DATA_ROOT, 'test'), original_valid_id)
# check data
print('original_valid_id:', original_valid_id.shape)
print('original_valid_image:', original_valid_image.shape)
print('original_valid_label:', original_valid_label.shape)
for i in range(10, 10+3):
data_digit_recognizer.show_digit(original_valid_image[i], original_valid_label[i])
data_digit_recognizer.analyze_labels(original_valid_label)
# preprocessing
x_train = original_train_image.reshape(-1, 1, IMAGE_ROWS, IMAGE_COLS).astype('float32') / 255
y_train = np_utils.to_categorical(original_train_label, 10)
x_valid = original_valid_image.reshape(-1, 1, IMAGE_ROWS, IMAGE_COLS).astype('float32') / 255
y_valid = np_utils.to_categorical(original_valid_label, 10)
print('x_train shape: {}'.format(x_train.shape))
print('y_train shape: {}'.format(y_train.shape))
print('x_valid shape: {}'.format(x_valid.shape))
print('y_valid shape: {}'.format(y_valid.shape))
# model
import random
seed_num = 333
random.seed(seed_num)
np.random.seed(seed_num) # for reproducibility
model = Sequential()
# Layer 1
model.add(Convolution2D(120, 5, 5,
border_mode='valid',
input_shape=(1, 28, 28)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
# Layer 2
model.add(Convolution2D(200, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
# Full connect
model.add(Flatten())
model.add(Dense(200))
model.add(Activation('relu'))
model.add(Dropout(0.5))
# Output
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=1e-4))
%%time
# Train
BATCH_SIZE = 50
# EPOCH_COUNT = 5
EPOCH_COUNT = 30
history = model.fit(x_train, y_train, batch_size=BATCH_SIZE, nb_epoch=EPOCH_COUNT,
show_accuracy=True, verbose=1, validation_data=(x_valid, y_valid))
history.history['loss']
import matplotlib.pyplot as plt
x = np.arange(len(history.history['loss']))
plt.plot(x, history.history['loss'])
plt.plot(x, history.history['val_loss'])
plt.legend(['y = loss', 'y = val_loss'], loc='upper right')
plt.show()
for i in range(len(history.history['val_loss'])):
if(history.history['val_loss'][i]==min(history.history['val_loss'])):
print('min val_loss:{:.6f}, index:{}'.format(min(history.history['val_loss']).item(), i))
for i in range(len(history.history['loss'])):
if(history.history['loss'][i]==min(history.history['loss'])):
print('min loss: {:.6f}, index:{}'.format(min(history.history['loss']).item(), i))
#save model
def save_keras_model(model, path):
with open(path + '.json', 'w') as f:
f.write(model.to_json())
model.save_weights(path+'.h5', overwrite=True)
save_keras_model( model, '120C5-MP2-200C3-MP2-200N-10N-alphadog2' )
###Output
_____no_output_____ |
Unit04/Linear Regression_HW.ipynb | ###Markdown
基礎題 - 算出斜率w與截距by = wx + b記得計算前X須符合資料格式$$[x_1, x_2, \ldots, x_{50}]$$==> $$[[x_1], [x_2], \ldots, [x_{50}]]$$
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
x = np.array([ 0. , 0.20408163, 0.40816327, 0.6122449 , 0.81632653,
1.02040816, 1.2244898 , 1.42857143, 1.63265306, 1.83673469,
2.04081633, 2.24489796, 2.44897959, 2.65306122, 2.85714286,
3.06122449, 3.26530612, 3.46938776, 3.67346939, 3.87755102,
4.08163265, 4.28571429, 4.48979592, 4.69387755, 4.89795918,
5.10204082, 5.30612245, 5.51020408, 5.71428571, 5.91836735,
6.12244898, 6.32653061, 6.53061224, 6.73469388, 6.93877551,
7.14285714, 7.34693878, 7.55102041, 7.75510204, 7.95918367,
8.16326531, 8.36734694, 8.57142857, 8.7755102 , 8.97959184,
9.18367347, 9.3877551 , 9.59183673, 9.79591837, 10. ])
y = np.array([ 0.85848224, -0.10657947, 1.42771901, 0.53554778, 1.20216826,
1.81330509, 1.88362644, 2.23557653, 2.7384889 , 3.41174583,
4.08573636, 3.82529502, 4.39723111, 4.8852381 , 4.70092778,
4.66993962, 6.05133235, 5.44529881, 7.22571332, 6.79423911,
7.05424438, 7.00413058, 7.98149596, 7.00044008, 7.95903855,
9.96125238, 9.06040794, 9.56018295, 9.30035956, 9.26517614,
9.56401824, 10.07659844, 11.56755942, 11.38956185, 11.83586027,
12.45642786, 11.58403954, 11.60186428, 13.88486667, 13.35550112,
13.93938726, 13.31678277, 13.69551472, 14.76548676, 14.81731598,
14.9659187 , 15.19213921, 15.28195017, 15.97997265, 16.41258817])
#匯入在sklearn.linear_model套件裡面的LinearRegression模型
from sklearn.linear_model import LinearRegression
#將模型工具指派給一變數做使用
LR = LinearRegression()
#注意轉換x得格式1D->2D
X = x.reshape(-1, 1)
# print(X.shape)
#將x,y資料導入LinearRegression演算法做訓練
LR.fit(X,y)
#列印出訓練完成之函數的斜率與截距
print('斜率: ', LR.coef_)
print('截距: ', LR.intercept_)
###Output
斜率: [1.61701852]
截距: 0.2731296894942137
###Markdown
進階題 - 切割資料集分別做訓練與預測(訓練資料80%、測試資料20%)
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
x = np.array([ 0. , 0.20408163, 0.40816327, 0.6122449 , 0.81632653,
1.02040816, 1.2244898 , 1.42857143, 1.63265306, 1.83673469,
2.04081633, 2.24489796, 2.44897959, 2.65306122, 2.85714286,
3.06122449, 3.26530612, 3.46938776, 3.67346939, 3.87755102,
4.08163265, 4.28571429, 4.48979592, 4.69387755, 4.89795918,
5.10204082, 5.30612245, 5.51020408, 5.71428571, 5.91836735,
6.12244898, 6.32653061, 6.53061224, 6.73469388, 6.93877551,
7.14285714, 7.34693878, 7.55102041, 7.75510204, 7.95918367,
8.16326531, 8.36734694, 8.57142857, 8.7755102 , 8.97959184,
9.18367347, 9.3877551 , 9.59183673, 9.79591837, 10. ])
y = np.array([ 0.85848224, -0.10657947, 1.42771901, 0.53554778, 1.20216826,
1.81330509, 1.88362644, 2.23557653, 2.7384889 , 3.41174583,
4.08573636, 3.82529502, 4.39723111, 4.8852381 , 4.70092778,
4.66993962, 6.05133235, 5.44529881, 7.22571332, 6.79423911,
7.05424438, 7.00413058, 7.98149596, 7.00044008, 7.95903855,
9.96125238, 9.06040794, 9.56018295, 9.30035956, 9.26517614,
9.56401824, 10.07659844, 11.56755942, 11.38956185, 11.83586027,
12.45642786, 11.58403954, 11.60186428, 13.88486667, 13.35550112,
13.93938726, 13.31678277, 13.69551472, 14.76548676, 14.81731598,
14.9659187 , 15.19213921, 15.28195017, 15.97997265, 16.41258817])
#匯入在sklearn.linear_model套件裡面的LinearRegression模型
from sklearn.linear_model import LinearRegression
#匯入在sklearn.model_selection套件裡面的train_test_split模組
from sklearn.model_selection import train_test_split
#切割數據集(訓練資料80%、測試資料20%,設定random_state=20)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=20)
#畫出訓練資料集的matplotlib圖形m
plt.scatter(x_train, y_train)
###Output
_____no_output_____
###Markdown
用訓練資料來 fit 函數1. 只用訓練資料集的資料進行linear regression演算法2. 並計算出訓練階段的MSE3. 畫出目標點(藍色)與預測點(紅色)的對應圖
###Code
regr = LinearRegression()
regr.fit(X_train,y_train)
X_train = x_train.reshape(-1,1)
Y_train = regr.predict(X_train)
mse = np.sum((Y_train-y_train)**2) / len(y_train)
print(mse)
plt.scatter(x_train, y_train)
plt.plot(x_train, Y_train, 'r')
###Output
_____no_output_____
###Markdown
將訓練出來的函數預測測試集的X值1. 使用剛剛訓練出來的模型進行測試資料集的資料預測*注意reshape使用X = 2.44897959,預測出來數值應該為 4.3025375所有測試集資料$$\widehat{y}=xw+b=w_{1}x_{1}+b$$
###Code
w = regr.coef_[0]
b = regr.intercept_
print(w, b)
X = 2.44897959
print(X * w + b)
Y_test = regr.predict(x_test.reshape(-1, 1))
Y_test
###Output
_____no_output_____
###Markdown
2. 並計算出測試階段的MSE
###Code
mse = np.sum((Y_test-y_test)**2) / len(y_test)
print(mse)
###Output
0.41344072565862955
###Markdown
3. 畫出目標點(藍色)與預測點(紅色)的對應圖
###Code
plt.scatter(x_test, y_test)
print(x_test.shape, Y_test.shape)
plt.scatter(x_test, Y_test, c='r')
###Output
(10,) (10,)
|
analyze_program_lang.ipynb | ###Markdown
各编程语言问题数 比较
###Code
#print(df)
#print(df.loc[df.js>0])
df_lang_count = df[['answer_count', 'java','python','js','php']]
df_sum = df_lang_count.sum()
df_sum = pd.DataFrame({'lang': df_sum.index, 'count': df_sum.values}).loc[1:]
a4_dims = (10.0, 8)
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.barplot(x='lang', y='count', data=df_sum, palette=None)
### 各编成语言的回答数 比较
df_melt = df[['question_id', 'answer_count', 'java','python','js','php']]
df_melt = pd.melt(df_melt, id_vars=['question_id','answer_count'], value_vars=['java','python','js','php'])
df_melt = df_melt.loc[df_melt.value > 0]
df_melt = df_melt[['question_id','answer_count','variable']]
df_lang_ans = df_melt.groupby(['variable'])['answer_count'].sum()
df_lang_ans = pd.DataFrame({'lang': df_lang_ans.index, 'count': df_lang_ans.values})
df_lang_ans = df_lang_ans.sort_values(['count'], ascending=False)
a4_dims = (10.0, 8)
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.barplot(x='lang', y='count', data=df_lang_ans, palette=None)
#print(df)
def most_answers_lang(lang):
first = df.loc[df[lang] > 0].iloc[0]
question_title = first['question_title']
question_id = first['question_id']
answer_count = first['answer_count']
question_url = 'https://www.zhihu.com/question/%s' % question_id
print('%s 语言回答数最多的问题: %s\n%s\n回答数:%s' % (lang.upper(), question_title, question_url, answer_count))
most_answers_lang('java')
most_answers_lang('python')
most_answers_lang('js')
most_answers_lang('php')
###Output
PHP 语言回答数最多的问题: 零基础应该选择学习 java、php、前端 还是 python?
https://www.zhihu.com/question/40801731
回答数:334
|
notebooks/ex_005.ipynb | ###Markdown
Plano de negócio - Saída: - Uma tabela com as informações dos livros- Processo: A sequência de passos organizada pela lógica de execução. - Analisar o HTML da página - Pesquisar melhor forma de realizar a extração de dados - Coleta os dados seguintes dados categoria| nome_livro|avaliação_consumidor|estoque|preço - Limpeza dos dados- Entrada: 1. Fonte de dados - Site da Book to Scrape: https://books.toscrape.com 2. Ferramentas - Python 3.8.0 - Bibliotecas de Webscrapping (BS4, Selenium) - Jupyter Notebooks (Análises e Prototipagem) 0.0 Imports
###Code
import re
import requests
import pandas as pd
import numpy as np
import seaborn as sns
from datetime import datetime
from bs4 import BeautifulSoup
from IPython.core.display import HTML
from IPython.display import Image
from matplotlib import pyplot as plt
###Output
_____no_output_____
###Markdown
0.1 Helpe Functions
###Code
def jupyter_settings():
%matplotlib inline
%pylab inline
plt.style.use('bmh')
plt.rcParams['figure.figsize']=[20,10]
plt.rcParams['font.size']=10
display( HTML('<style>.container {width:100% !important; }</style>'))
pd.options.display.max_columns = None
pd.options.display.max_rows = None
#pd.set_options('display.expand_frame_repr',False )
sns.set()
jupyter_settings()
warnings.filterwarnings ('ignore')
###Output
Populating the interactive namespace from numpy and matplotlib
###Markdown
1.0 Data Collect
###Code
book_title = []
book_price = []
book_stock = []
book_rating = []
book_category = []
quantidade = 0
for page in range(1,51):
#Get webpage data
root_url = 'https://books.toscrape.com/catalogue/'
url = 'https://books.toscrape.com/catalogue/page-{}.html'.format(page)
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5),AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'}
response = requests.get(url,headers=headers)
# Make Soup
soup = BeautifulSoup(response.text,'html.parser')
book = soup.find_all('article', class_='product_pod')
for book in book:
# Get Hyper Link
base_url = book.h3.a['href'].strip()
url = root_url + base_url
page_single = requests.get(url)
soup_single = BeautifulSoup(page_single.text,'html.parser')
book_single = soup_single.find('article', class_='product_page')
info_book = book_single.find('div', class_='product_main')
#Scraping Data
title = info_book.h1.get_text().strip()
price = info_book.find('p', class_='price_color').get_text()
stock = info_book.find('p', class_='availability').get_text().split()
rating = info_book.find('p','star-rating')['class'][1].strip()
category = soup_single.find('ul', class_='breadcrumb').find_all('li')[2].a.get_text()
#clean data
stock_clean = re.findall(r"\d+", stock)[0]
#rating_clean = clean.str_to_int(rating)
book_title.append(title)
book_price.append(price)
book_stock.append(stock_clean)
book_rating.append(rating)
book_category.append(category)
quantidade = quantidade +1
print(quantidade)
data = pd.DataFrame({'book_category': book_category,
'book_title': book_title,
'book_price': book_price,
'book_stock': book_stock,
'book_rating': book_rating})
data = data.loc[((data['book_category'] == 'Classics') | (data['book_category'] == 'Science Fiction')
|(data['book_category'] == 'Humor')|(data['book_category'] == 'Business'))]
from datetime import datetime
#scrapy datetime
data.insert(1, 'scrapy_datetime',(datetime.now().strftime('%Y-%m-%d %H:%M:%S')),allow_duplicates=False)
data.head()
data.to_csv('../data/dataset_v1.csv',index=False)
###Output
_____no_output_____
###Markdown
2.0 Clean Data
###Code
data= pd.read_csv('../data/dataset_v1.csv')
data.sample(5)
## book category
data['book_category'] = data['book_category'].apply(lambda x: x.lower())
# product price
data['book_price']= data['book_price'].apply(lambda x: x.replace('£','') if pd.notnull(x) else x).astype(float)
# book rating
data['book_rating'] = data['book_rating'].apply(lambda x: x.lower())
# book stock
regex ='\W((.+?),(.+?)),'
data['book_stock']= data['book_stock'].apply(lambda x: re.match(regex, x).group(1))
data['book_stock']= data['book_stock'].apply(lambda x: x.strip("'"))
data['book_stock']= data['book_stock'].apply(lambda x: x.replace("'" , "").replace("," , ""))
data.sample(5)
data.to_csv('../data/dataset_v2.csv',index=False)
###Output
_____no_output_____ |
modelClassifier.ipynb | ###Markdown
Import Library
###Code
# Import General Packages
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import confusion_matrix, classification_report
import pickle
from pathlib import Path
import warnings
#warnings.filterwarnings('ignore')
# import dataset
df_load = pd.read_csv('https://dqlab-dataset.s3-ap-southeast-1.amazonaws.com/dqlab_telco_final.csv')
# Show the shape of the dataset
df_load.shape
# Show top 5 records
df_load.head()
# Show number of unique IDs
df_load.customerID.nunique()
###Output
_____no_output_____
###Markdown
Exploratory Data Analysis (EDA)Dalam kasus ini, Saya diminta untuk melihat persebaran dari:- Prosentase persebaran data Churn dan tidaknya dari seluruh data- Persebarang data dari variable predictor terhadap label (Churn)
###Code
# see univariate data visualization related to the percentage of churn data from customers
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
ax.axis('equal')
labels = ['Yes','No']
churn = df_load.Churn.value_counts()
ax.pie(churn, labels=labels, autopct='%.0f%%')
plt.show()
# choose a numeric variable predictor and make a bivariate plot, then interpret it
# creating bin in chart
numerical_features = ['MonthlyCharges','TotalCharges','tenure']
fig, ax = plt.subplots(1, 3, figsize=(15, 6))
# use the following code to plot two overlays of histogram per each numerical_features,
# use a color of blue and orange, respectively
df_load[df_load.Churn == 'No'][numerical_features].hist(bins=20, color='blue', alpha=0.5, ax=ax)
df_load[df_load.Churn == 'Yes'][numerical_features].hist(bins=20, color='orange', alpha=0.5, ax=ax)
plt.show()
# choose a categorical predictor variable and make a bivariate plot, then interpret it
fig, ax = plt.subplots(3, 3, figsize=(14, 12))
sns.set(style='darkgrid')
sns.countplot(data=df_load, x='gender', hue='Churn', ax=ax[0][0])
sns.countplot(data=df_load, x='Partner', hue='Churn', ax=ax[0][1])
sns.countplot(data=df_load, x='SeniorCitizen', hue='Churn', ax=ax[0][2])
sns.countplot(data=df_load, x='PhoneService', hue='Churn', ax=ax[1][0])
sns.countplot(data=df_load, x='StreamingTV', hue='Churn', ax=ax[1][1])
sns.countplot(data=df_load, x='InternetService', hue='Churn', ax=ax[1][2])
sns.countplot(data=df_load, x='PaperlessBilling', hue='Churn', ax=ax[2][1])
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
**Conclusion**Based on the results and analysis above, it can be concluded:- At the first step, we know that the data distribution as a whole, the customer does not churn, with details on Churn as much as 26% and No Churn as much as 74%.- At the second step, we can see that for MonthlyCharges there is a tendency that the smaller the value of the monthly fees charged, the smaller the tendency to do Churn. For TotalCharges there doesn't seem to be any inclination towards Churn customers. For tenure, there is a tendency that the longer the customer subscribes, the less likely it is to churn.- At the third step, we know that there is no significant difference for people doing churn in terms of gender and telephone service (Phone Service). However, there is a tendency that people who churn are people who do not have a partner (partner: No), people whose status is a senior citizen (Senior Citizen: Yes), people who have streaming TV services (StreamingTV: Yes) , people who have Internet service (internetService: Yes) and people who have paperless bills (PaperlessBilling: Yes). Pre-Processing Data
###Code
df_load.head()
#Remove the unnecessary columns customerID & UpdatedAt
cleaned_df = df_load.drop(['customerID','UpdatedAt'], axis=1)
cleaned_df.head()
cleaned_df.describe()
# Encoding Data
#Convert all the non-numeric columns to numerical data types
for column in cleaned_df.columns:
if cleaned_df[column].dtype == np.number: continue
# Perform encoding for each non-numeric column
cleaned_df[column] = LabelEncoder().fit_transform(cleaned_df[column])
cleaned_df.describe()
# Splitting Dataset
# Predictor and Target
X = cleaned_df.drop('Churn', axis = 1)
y = cleaned_df['Churn']
# Splitting train and test
# Splitting train and test
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
# Print according to the expected result
print('The number of rows and columns of x_train is: ', x_train.shape, ', while the number of rows and columns of y_train is:', y_train.shape)
print('\nChurn percentage in training data is:')
print(y_train.value_counts(normalize=True))
print('\nThe number of rows and columns of x_test is:', x_test.shape,', while the number of rows and columns of y_test is:', y_test.shape)
print('\nChurn percentage in Testing data is:')
print(y_test.value_counts(normalize=True))
###Output
The number of rows and columns of x_train is: (4865, 10) , while the number of rows and columns of y_train is: (4865,)
Churn percentage in training data is:
0 0.734841
1 0.265159
Name: Churn, dtype: float64
The number of rows and columns of x_test is: (2085, 10) , while the number of rows and columns of y_test is: (2085,)
Churn percentage in Testing data is:
0 0.738129
1 0.261871
Name: Churn, dtype: float64
###Markdown
**Conclusion**After we analyzed it further, it turned out that there were columns that were not needed in the model, namely the customer ID number (customerID) & the data collection period (UpdatedAt), so this needs to be deleted. Then we continue to change the value of the data which is still in the form of a string into numeric through encoding, after this is done, it can be seen in the data distribution, especially the min and max columns of each variable have changed to 0 & 1. The last step is to divide the data into 2 parts for modeling purposes After it is done, it can be seen that the number of rows and columns of each data is appropriate & the percentage of the churn column is also the same as the data at the beginning, this indicates that the data is separated properly and correctly. LogisticRegression
###Code
# Create a model using the LogisticRegression Algorithm
warnings.filterwarnings('ignore')
log_model = LogisticRegression().fit(x_train, y_train)
# LogisticRegression Model
log_model
# Predict
y_train_pred = log_model.predict(x_train)
# Print classification report
print(classification_report(y_train, y_train_pred))
# Form confusion matrix as a DataFrame
confusion_matrix_df = pd.DataFrame((confusion_matrix(y_train, y_train_pred)), ('No churn', 'Churn'), ('No churn', 'Churn'))
# Plot confusion matrix
plt.figure()
heatmap = sns.heatmap(confusion_matrix_df, annot=True, annot_kws={'size': 14}, fmt='d', cmap='YlGnBu')
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(), rotation=0, ha='right', fontsize=14)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(), rotation=0, ha='right', fontsize=14)
plt.title('Confusion Matrix for Training Model\n(Logistic Regression)', fontsize=18, color='darkblue')
plt.ylabel('True label', fontsize=14)
plt.xlabel('Predicted label', fontsize=14)
plt.show()
# Performance Data Testing - Displays Metrics
# Predict
y_test_pred = log_model.predict(x_test)
# Print classification report
print(classification_report(y_test, y_test_pred))
# Form confusion matrix as a DataFrame
confusion_matrix_df = pd.DataFrame((confusion_matrix(y_test, y_test_pred)), ('No churn', 'Churn'), ('No churn', 'Churn'))
# Plot confusion matrix
plt.figure()
heatmap = sns.heatmap(confusion_matrix_df, annot=True, annot_kws={'size': 14}, fmt='d', cmap='YlGnBu')
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(), rotation=0, ha='right', fontsize=14)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(), rotation=0, ha='right', fontsize=14)
plt.title('Confusion Matrix for Testing Model\n(Logistic Regression)\n', fontsize=18, color='darkblue')
plt.ylabel('True label', fontsize=14)
plt.xlabel('Predicted label', fontsize=14)
plt.show()
###Output
_____no_output_____
###Markdown
**Conclusion**From the results and analysis above, then:* From the training data, it can be seen that the model is able to predict data with an accuracy of 79%, with details of the churn guess which is actually correct, the churn guess is 636, the churn guess which is actually not churn is 3227, the churn guess which is actually correct is 654 and the churn guess that is actually correct the actual churn is 348.* From the testing data, it can be seen that the model is able to predict the data with an accuracy of 79%, with details of the churn guess which is actually true churn is 263, the non-churn guess that actually doesn't churn is 1390, the churn guess which is actually true churn is 283 and the churn guess which is actually correct actually no churn is 149. Random Forest Classifier
###Code
# Create a model using RandomForestClassifier
rdf_model = RandomForestClassifier().fit(x_train, y_train)
rdf_model
# Predict
y_train_pred = rdf_model.predict(x_train)
# Print classification report
print(classification_report(y_train, y_train_pred))
# Form confusion matrix as a DataFrame
confusion_matrix_df = pd.DataFrame((confusion_matrix(y_train, y_train_pred)), ('No churn', 'Churn'), ('No churn', 'Churn'))
# Plot confusion matrix
plt.figure()
heatmap = sns.heatmap(confusion_matrix_df, annot=True, annot_kws={'size': 14}, fmt='d', cmap='YlGnBu')
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(), rotation=0, ha='right', fontsize=14)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(), rotation=0, ha='right', fontsize=14)
plt.title('Confusion Matrix for Training Model\n(Random Forest)', fontsize=18, color='darkblue')
plt.ylabel('True label', fontsize=14)
plt.xlabel('Predicted label', fontsize=14)
plt.show()
# Performance Data Testing - Displays Metrics
# Predict
y_test_pred = rdf_model.predict(x_test)
# Print classification report
print(classification_report(y_test, y_test_pred))
# Form confusion matrix as a DataFrame
confusion_matrix_df = pd.DataFrame((confusion_matrix(y_test, y_test_pred)), ('No churn', 'Churn'), ('No churn', 'Churn'))
# Plot confusion matrix
plt.figure()
heatmap = sns.heatmap(confusion_matrix_df, annot=True, annot_kws={'size': 14}, fmt='d', cmap='YlGnBu')
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(), rotation=0, ha='right', fontsize=14)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(), rotation=0, ha='right', fontsize=14)
plt.title('Confusion Matrix for Testing Model\n(Random Forest)\n', fontsize=18, color='darkblue')
plt.ylabel('True label', fontsize=14)
plt.xlabel('Predicted label', fontsize=14)
plt.show()
###Output
_____no_output_____
###Markdown
**Kesimpulan**Dari hasil dan analisa di atas, maka:- Jika kita menggunakan menggunakan algoritma Random Forest dengan memanggil RandomForestClassifier() dari sklearn tanpa menambahi parameter apapun, maka yang dihasilkan adalah model dengan seting default dari sklearn, untuk detilnya bisa dilihat di dokumentasinya.- Dari data training terlihat bahwasannya model mampu memprediksi data dengan menghasilkan akurasi sebesar 100%, dengan detil tebakan churn yang sebenernya benar churn adalah 1278, tebakan tidak churn yang sebenernya tidak churn adalah 3566, tebakan tidak churn yang sebenernya benar churn adalah 12 dan tebakan churn yang sebenernya tidak churn adalah 9.- Dari data testing terlihat bahwasannya model mampu memprediksi data dengan menghasilkan akurasi sebesar 78%, dengan detil tebakan churn yang sebenernya benar churn adalah 262, tebakan tidak churn yang sebenernya tidak churn adalah 1360, tebakan tidak churn yang sebenernya benar churn adalah 284 dan tebakan churn yang sebenernya tidak churn adalah 179. Gradient Boosting Classifier
###Code
#Train the model
gbt_model = GradientBoostingClassifier().fit(x_train, y_train)
gbt_model
# Predict
y_train_pred = gbt_model.predict(x_train)
# Print classification report
print(classification_report(y_train, y_train_pred))
# Form confusion matrix as a DataFrame
confusion_matrix_df = pd.DataFrame((confusion_matrix(y_train, y_train_pred)), ('No churn', 'Churn'), ('No churn', 'Churn'))
# Plot confusion matrix
plt.figure()
heatmap = sns.heatmap(confusion_matrix_df, annot=True, annot_kws={'size': 14}, fmt='d', cmap='YlGnBu')
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(), rotation=0, ha='right', fontsize=14)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(), rotation=0, ha='right', fontsize=14)
plt.title('Confusion Matrix for Training Model\n(Gradient Boosting)', fontsize=18, color='darkblue')
plt.ylabel('True label', fontsize=14)
plt.xlabel('Predicted label', fontsize=14)
plt.show()
# Predict
y_test_pred = gbt_model.predict(x_test)
# Print classification report
print(classification_report(y_test, y_test_pred))
# Form confusion matrix as a DataFrame
confusion_matrix_df = pd.DataFrame((confusion_matrix(y_test, y_test_pred)), ('No churn', 'Churn'), ('No churn', 'Churn'))
# Plot confusion matrix
plt.figure()
heatmap = sns.heatmap(confusion_matrix_df, annot=True, annot_kws={'size': 14}, fmt='d', cmap='YlGnBu')
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(), rotation=0, ha='right', fontsize=14)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(), rotation=0, ha='right', fontsize=14)
plt.title('Confusion Matrix for Testing Model\n(Gradient Boosting)', fontsize=18, color='darkblue')
plt.ylabel('True label', fontsize=14)
plt.xlabel('Predicted label', fontsize=14)
plt.show()
###Output
_____no_output_____
###Markdown
**Kesimpulan**Dari hasil dan analisa di atas, maka:- Jika kita menggunakan menggunakan algoritma Gradient Boosting dengan memanggil GradientBoostingClassifier() dari package sklearn tanpa menambahi parameter apapun, maka yang dihasilkan adalah model dengan seting default dari sklearn, untuk detilnya bisa dilihat di dokumentasinya.- Dari data training terlihat bahwasannya model mampu memprediksi data dengan menghasilkan akurasi sebesar 82%, dengan detil tebakan churn yang sebenernya benar churn adalah 684, tebakan tidak churn yang sebenernya tidak churn adalah 3286, tebakan tidak churn yang sebenernya benar churn adalah 606 dan tebakan churn yang sebenernya tidak churn adalah 289.- Dari data testing terlihat bahwasannya model mampu memprediksi data dengan menghasilkan akurasi sebesar 79%, dengan detil tebakan churn yang sebenernya benar churn adalah 261, tebakan tidak churn yang sebenernya tidak churn adalah 1394, tebakan tidak churn yang sebenernya benar churn adalah 285 dan tebakan churn yang sebenernya tidak churn adalah 145.
###Code
# Save model
pickle.dump(log_model, open('best_model_churn.pkl', 'wb'))
###Output
_____no_output_____ |
cooker_whistle/simple_audio_mic.ipynb | ###Markdown
Get Audio Input
###Code
# get pyaudio input device
def getInputDevice(p):
index = None
nDevices = p.get_device_count()
print('Found %d devices:' % nDevices)
for i in range(nDevices):
deviceInfo = p.get_device_info_by_index(i)
#print(deviceInfo)
devName = deviceInfo['name']
print(devName)
# look for the "input" keyword
# choose the first such device as input
# change this loop to modify this behavior
# maybe you want "mic"?
if not index:
if 'input' in devName.lower():
index = i
# print out chosen device
if index is not None:
devName = p.get_device_info_by_index(index)["name"]
#print("Input device chosen: %s" % devName)
return index
# initialize pyaudio
p = pyaudio.PyAudio()
getInputDevice(p)
###Output
Found 8 devices:
HDA NVidia: HDMI 0 (hw:0,3)
HDA NVidia: HDMI 1 (hw:0,7)
HD-Audio Generic: ALC887-VD Analog (hw:1,0)
HD-Audio Generic: ALC887-VD Digital (hw:1,1)
HD-Audio Generic: ALC887-VD Alt Analog (hw:1,2)
hdmi
pulse
default
###Markdown
Now let's try plotting 1 second of Mic Input
###Code
def get_spectrogram(waveform):
# Padding for files with less than 16000 samples
zero_padding = tf.zeros([16000] - tf.shape(waveform), dtype=tf.float32)
# Concatenate audio with padding so that all audio clips will be of the
# same length
waveform = tf.cast(waveform, tf.float32)
equal_length = tf.concat([waveform, zero_padding], 0)
spectrogram = tf.signal.stft(
equal_length, frame_length=255, frame_step=128)
spectrogram = tf.abs(spectrogram)
return spectrogram
def plot_spectrogram(spectrogram, ax):
# Convert to frequencies to log scale and transpose so that the time is
# represented in the x-axis (columns).
log_spec = np.log(spectrogram.T)
height = log_spec.shape[0]
X = np.arange(16000, step=height + 1)
Y = range(height)
ax.pcolormesh(X, Y, log_spec)
# set sample rate
NSEC = 1
sampleRate = 16000 # #48000
sampleLen = NSEC*sampleRate
print('opening stream...')
stream = p.open(format = pyaudio.paInt16,
channels = 1,
rate = sampleRate,
input = True,
frames_per_buffer = 4096,
input_device_index = -1)
# read a chunk of data - discard first
data = stream.read(sampleLen)
print(type(data))
p.close(stream)
waveform = tf.cast(tf.io.decode_raw(data, "int16"), "float32")/32768.0
print(waveform)
spectrogram = get_spectrogram(waveform)
#spectrogram = tf.reshape(spectrogram, (spectrogram.shape[0], spectrogram.shape[1], 1))
print(spectrogram.shape)
fig, axes = plt.subplots(2, figsize=(12, 8))
timescale = np.arange(waveform.shape[0])
axes[0].plot(timescale, waveform.numpy())
axes[0].set_title('Waveform')
axes[0].set_xlim([0, 16000])
axes[0].set_ylim([-1, 1])
plot_spectrogram(spectrogram.numpy(), axes[1])
axes[1].set_title('Spectrogram')
plt.show()
commands = ['go', 'down', 'up', 'stop', 'yes', 'left', 'right', 'no']
print(spectrogram.shape)
spectrogram1= tf.reshape(spectrogram, (-1, spectrogram.shape[0], spectrogram.shape[1], 1))
print(spectrogram1.shape)
prediction = model(spectrogram1)
print(prediction)
sm = tf.nn.softmax(prediction[0])
am = tf.math.argmax(sm)
print(sm)
print(commands[am])
#plt.bar(commands, tf.nn.softmax(prediction[0]))
#plt.title(f'Predictions for "{commands[label[0]]}"')
#plt.show()
###Output
(124, 129)
(1, 124, 129, 1)
tf.Tensor(
[[ 0.5241283 0.47888047 -1.1988008 -0.5169501 0.3624149 -0.44560105
-0.53696716 0.5674314 ]], shape=(1, 8), dtype=float32)
tf.Tensor(
[0.1957875 0.18712598 0.03495637 0.06912743 0.16655348 0.07423981
0.06775746 0.20445195], shape=(8,), dtype=float32)
no
|
Data Analysis Using Python.ipynb | ###Markdown
Import Files
###Code
import pandas as pd
vanorder = pd.read_csv('vanorder.csv')
vaninterest = pd.read_csv('vaninterest.csv')
###Output
_____no_output_____
###Markdown
Exploratory Data Analysis
###Code
vanorder.head()
vaninterest.head()
vanorder.describe()
###Output
_____no_output_____
###Markdown
Convert date into Pandas datetime object
###Code
vanorder.txCreate = pd.to_datetime(vanorder.txCreate)
vaninterest.txCreate = pd.to_datetime(vaninterest.txCreate)
vanorder.head()
###Output
_____no_output_____
###Markdown
Q) 5 : What is the order fulfillment rate, i.e. percentage of orders that was completed ?
###Code
len(vanorder[vanorder.order_status == 2])/len(vanorder)
###Output
_____no_output_____
###Markdown
Order Fulfillment rate = 94% Subset Order type- A
###Code
vanorderA = vanorder[vanorder.order_subset == 'A']
vaninterestA = vaninterest[vaninterest.order_subset_assigned == 'A']
###Output
_____no_output_____
###Markdown
Create a new column matchtime i.e difference of time accepted and time created
###Code
vanorderA['txAccept'] = vaninterestA.txCreate
vanorderA['matchtime'] = vanorderA.txAccept - vanorderA.txCreate
vanorderA.head()
###Output
c:\users\pramodksh\appdata\local\programs\python\python36-32\lib\site-packages\ipykernel_launcher.py:1: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
"""Entry point for launching an IPython kernel.
c:\users\pramodksh\appdata\local\programs\python\python36-32\lib\site-packages\ipykernel_launcher.py:2: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
###Markdown
Subset Advanced/Immediate orders
###Code
van_advanced_orders = vanorderA[(vanorderA.matchtime.between('00:00:00', '01:00:00') )]
van_advanced_orders = van_advanced_orders[van_advanced_orders.matchtime < '01:00:00']
van_advanced_orders.head()
###Output
_____no_output_____
###Markdown
Q)6 (a) What is the average match time, by immediate/advanced orders?
###Code
van_advanced_orders.matchtime.mean()
###Output
_____no_output_____
###Markdown
Average matchtime is 8 Minutes 59 sec Q)6 (b) What is the median match time, by immediate/advanced orders?
###Code
van_advanced_orders.matchtime.median()
###Output
_____no_output_____
###Markdown
Median matchtime is 5 Minutes 06 sec (c) Which of the above one do you think provides a better representation the data, i.e. a better metric for tracking our performance in matching? Median gives a better metric because it doesn't get affected by outliers.(In this case midnight orders).However mean of binned hours would provide better insights.(Avg matchtime of morning hours,afternnon,evening and night) Export the file as a csv to prepeare dashnoard(Tableau)
###Code
van_advanced_orders.to_csv('1.csv')
###Output
_____no_output_____ |
datavisualization/data_visualization_python_2.ipynb | ###Markdown
Visualização de dados com Python 2 - Visualizações com mais de 2 dimensões*Cleuton Sampaio*, [**DataLearningHub**](http://datalearninghub.com)Nesta lição veremos como fornecer visualizações com mais de duas dimensões de dados. [](https://www.lcm.com.br/site/livros/busca?term=cleuton) Dispersão tridimensionalEm casos que temos três características mensuráveis e, principalmente, plotáveis (dentro da mesma escala - ou podemos ajustar a escala), é interessante ver um gráfico de dispersão para podermos avaliar visualmente a distribuição das amostras. É o que veremos com a bilbioteca Matplotlib Toolkits, em especial a MPlot3D, que tem o objeto Axes3D para geração de gráficos tridimensionais.
###Code
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d, Axes3D # Objetos que usaremos em nosso gráfico
%matplotlib inline
df = pd.read_csv('../datasets/evasao.csv') # Dados de evasão escolar que coletei
df.head()
###Output
_____no_output_____
###Markdown
Algumas explicações. Para começar, vejamos as colunas deste dataset: - "periodo": Período em que o aluno está;- "bolsa": Percentual de bolsa que o aluno recebe;- "repetiu": Quantidade de disciplinas nas quais o aluno foi reprovado;- "ematraso": Se o aluno está com mensalidades em atraso;- "disciplinas": Disciplinas que o aluno está cursando atualmente;- "desempenho": Média acadêmica até agora;- "abandonou": Se o aluno abandonou o curso depois da medição ou não.Para podermos plotar um gráfico, precisamos reduzir a quantidade de dimensões, ou seja, as características. Farei isso da maneira mais "naive" possível, selecionando três características que mais influenciaram no resultado final, ou seja o abandono do aluno (Churn).
###Code
df2 = df[['periodo','repetiu','desempenho']][df.abandonou == 1]
df2.head()
fig = plt.figure()
#ax = fig.add_subplot(111, projection='3d')
ax = Axes3D(fig) # Para Matplotlib 0.99
ax.scatter(xs=df2['periodo'],ys=df2['repetiu'],zs=df2['desempenho'], c='r',s=8)
ax.set_xlabel('periodo')
ax.set_ylabel('repetiu')
ax.set_zlabel('desempenho')
plt.show()
###Output
_____no_output_____
###Markdown
Simplesmente usei o Axes3D para obter um objeto gráfico tridimensional. O método "scatter" recebe três dimensões (xs, ys e zs), cada uma atribuída a uma das colunas do novo dataframe. O parâmetro "c" é a cor e o "s" é o tamanho de cada ponto. Informei os rótulos de cada eixo e pronto! Temos um gráfico 3D mostrando a distribuição espacial dos abandonos de curso, com relação às três variáveis. Podemos avaliar muito melhor a tendência de dados, se olharmos em visualizações 3D. Vejamos um exemplo sintético. Vamos gerar alguns valores 3D:
###Code
import numpy as np
np.random.seed(42)
X = np.linspace(1.5,3.0,num=100)
Y = np.array([x**4 + (np.random.rand()*6.5) for x in X])
Z = np.array([(X[i]*Y[i]) + (np.random.rand()*3.2) for i in range(0,100)])
###Output
_____no_output_____
###Markdown
Primeiramente veremos como ficaria isso em visualização 2D:
###Code
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(X, Y, c='b', s=20)
ax.set_xlabel('X')
ax.set_ylabel('Y')
plt.show()
###Output
_____no_output_____
###Markdown
Ok... Nada demais... Uma correlação não linear positiva, certo? Mas agora, vejamos isso com a matriz Z incluída:
###Code
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(X, Y, Z, c='r',s=8)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
###Output
_____no_output_____
###Markdown
E isso fica mais interessante quando sobrepomos uma predição sobre os dados reais. Vamos usar um Decision Tree Regressor para criar um modelo preditivo para estes dados:
###Code
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import train_test_split
features = pd.DataFrame({'X':X, 'Z':Z})
labels = pd.DataFrame({'Y':Y})
X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=0.33, random_state=42)
dtr3d = DecisionTreeRegressor(max_depth=4, random_state=42)
dtr3d.fit(X_train,y_train)
print('R2',dtr3d.score(X_train,y_train))
yhat3d = dtr3d.predict(X_test)
fig = plt.figure()
ax = ax = fig.add_subplot(111, projection='3d')
ax.scatter(X, Y, Z, c='r',s=8)
ax.scatter(X_test['X'], yhat3d, X_test['Z'], c='k', marker='*',s=100)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
###Output
_____no_output_____
###Markdown
Plotamos as predições usando marker do tipo estrela. Ficou bem interessante, não? Mais de 3 dimensõesAs vezes queremos demonstrar informações com mais de 3 dimensões, mas como fazer isso? Vamos supor que queiramos também incluir o percentual de bolsa como uma variável em nosso exemplo de evasão escolar. Como faríamos?Uma abordagem possível seria manipular os markers para que representem a bolsa. Podemos usar cores, por exemplo. Vejamos, primeiramente, precisamos saber quais faixas de bolsa existem no dataset:
###Code
print(df.groupby("bolsa").count())
###Output
periodo repetiu ematraso disciplinas faltas desempenho abandonou
bolsa
0.00 53 53 53 53 53 53 53
0.05 50 50 50 50 50 50 50
0.10 50 50 50 50 50 50 50
0.15 50 50 50 50 50 50 50
0.20 45 45 45 45 45 45 45
0.25 52 52 52 52 52 52 52
###Markdown
Podemos criar uma tabela de cores, indexada pelo percentual de bolsa:
###Code
from decimal import Decimal
bolsas = {0.00: 'b',0.05: 'r', 0.10: 'g', 0.15: 'm', 0.20: 'y', 0.25: 'k'}
df['cor'] = [bolsas[float(round(Decimal(codigo),2))] for codigo in df['bolsa']]
df.head()
###Output
_____no_output_____
###Markdown
Essa "maracutaia" merece uma explicação. Criei um dicionário indexado pelo valor da bolsa. Assim, pegamos o código da cor correspondente. Só que preciso incluir uma coluna no dataframe com esse valor, de modo a usar no gráfico. Só tem um problema: O dataset original está "sujo" (algo que acontece frequentemente) e o percentual 0.15 está como 0.1500000002. Posso retirar isso convertendo o falor de "float" para "Decimal", arredondanto e convertendo novamente em float. Quando plotarmos, vamos procurar a cor no dicionário:
###Code
fig = plt.figure()
#ax = fig.add_subplot(111, projection='3d')
ax = Axes3D(fig) # Para Matplotlib 0.99
ax.scatter(xs=df['periodo'],ys=df['repetiu'],zs=df['desempenho'], c=df['cor'],s=50)
ax.set_xlabel('periodo')
ax.set_ylabel('repetiu')
ax.set_zlabel('desempenho')
plt.show()
###Output
_____no_output_____
###Markdown
Pronto! Temos ai a cor da bola dando a quarta dimensão: O percentual de bolsa Vemos que já uma concentração de alunos com bolsa de 25% (cor preta) com poucas repetições, mas baixo desempenho, em todos os períodos. Assim como mexemos com a cor, podemos mexer com o tamanho, criando algo como um "mapa de calor". Vamos transformar essa visão em 2D, colocando o "desempenho" com tamanho diferenciado.
###Code
fig, ax = plt.subplots()
ax.scatter(df['periodo'],df['repetiu'], c='r',s=df['desempenho']*30)
ax.set_xlabel('periodo')
ax.set_ylabel('repetiu')
plt.show()
###Output
_____no_output_____
###Markdown
Isso nos mostra um fato curioso. Temos alunos com bom desempenho (bolas grandes) em todos os períodos, sem repetir nenhuma disciplina, que abandonaram. O que os teria feito fazer isto? Talvez sejam condições financeiras, ou insatisfação com o curso. Um fato a ser investigado, que só foi revelado graças a esta visualização. GeorreferenciamentoMuitas vezes temos datasets com informações geográficas e precisamos plotar os dados sobre um mapa. Vou mostrar aqui como fazer isso com um exemplo do dataset dos casos de Dengue de 2018 no Rio de Janeiro. Fonte: Data Rio: http://www.data.rio/datasets/fb9ede8d588f45b48b985e62c817f062_0Eu criei um dataset georreferenciado, que está na pasta desta demonstração. Ele está em formato CSV, separado por ponto e vírgula, com separador decimal em português (vírgula):
###Code
df_dengue = pd.read_csv('./dengue2018.csv',decimal=',', sep=';')
df_dengue.head()
###Output
_____no_output_____
###Markdown
Um simples gráfico de dispersão já dá uma boa noção do problema:
###Code
fig, ax = plt.subplots()
ax.scatter(df_dengue['longitude'],df_dengue['latitude'], c='r',s=15)
plt.show()
###Output
_____no_output_____
###Markdown
Podemos colocar o tamanho do ponto proporcional à quantidade de casos, aumentando a dimensão das informações:
###Code
fig, ax = plt.subplots()
ax.scatter(df_dengue['longitude'],df_dengue['latitude'], c='r',s=5+df_dengue['quantidade'])
plt.show()
###Output
_____no_output_____
###Markdown
Podemos manipular a cor e intensidade para criar um "mapa de calor" da Dengue:
###Code
def calcular_cor(valor):
cor = 'r'
if valor <= 10:
cor = '#ffff00'
elif valor <= 30:
cor = '#ffbf00'
elif valor <= 50:
cor = '#ff8000'
return cor
df_dengue['cor'] = [calcular_cor(codigo) for codigo in df_dengue['quantidade']]
df_dengue.head()
###Output
_____no_output_____
###Markdown
E vamos ordenar para que as maiores quantidades fiquem por último:
###Code
dfs = df_dengue.sort_values(['quantidade'])
dfs.head()
fig, ax = plt.subplots()
ax.scatter(dfs['longitude'],dfs['latitude'], c=dfs['cor'],s=10+dfs['quantidade'])
plt.show()
###Output
_____no_output_____
###Markdown
Pronto! Um mapa de calor da Dengue em 2018. Mas está faltando algo certo? Cadê o mapa do Rio de Janeiro?Muita gente usa o **geopandas** e baixa arquivos de mapas. Eu prefiro usar o Google Maps. Ele tem uma API chamada Static Maps que permite baixar mapas. Primeiramente, vou instalar o **requests**:
###Code
!pip install requests
###Output
Requirement already satisfied: requests in /home/cleuton/anaconda3/lib/python3.7/site-packages (2.21.0)
Requirement already satisfied: idna<2.9,>=2.5 in /home/cleuton/anaconda3/lib/python3.7/site-packages (from requests) (2.8)
Requirement already satisfied: urllib3<1.25,>=1.21.1 in /home/cleuton/anaconda3/lib/python3.7/site-packages (from requests) (1.24.1)
Requirement already satisfied: chardet<3.1.0,>=3.0.2 in /home/cleuton/anaconda3/lib/python3.7/site-packages (from requests) (3.0.4)
Requirement already satisfied: certifi>=2017.4.17 in /home/cleuton/anaconda3/lib/python3.7/site-packages (from requests) (2018.11.29)
###Markdown
Agora, vem uma parte um pouco mais "esperta". Eu tenho as coordenadas do centro do Rio de Janeiro (centro geográfico, não o centro da cidade). Vou montar um request à API Static Map para baixar um mapa. Veja bem, você tem que cadastrar uma API Key para usar esta API. Eu omiti a minha propositalmente. Aqui você tem as instruções para isto: https://developers.google.com/maps/documentation/maps-static/get-api-key
###Code
import requests
latitude = -22.9137528
longitude = -43.526409
zoom = 10
size = 800
scale = 1
apikey = "**INFORME SUA API KEY**"
gmapas = "https://maps.googleapis.com/maps/api/staticmap?center=" + str(latitude) + "," + str(longitude) + \
"&zoom=" + str(zoom) + \
"&scale=" + str(scale) + \
"&size=" + str(size) + "x" + str(size) + "&key=" + apikey
with open('mapa.jpg', 'wb') as handle:
response = requests.get(gmapas, stream=True)
if not response.ok:
print(response)
for block in response.iter_content(1024):
if not block:
break
handle.write(block)
###Output
_____no_output_____
###Markdown
 Bom, o mapa foi salvo, agora eu preciso saber as coordenadas dos limites. A API do Google só permite que você informe o centro (latitude e longitude) e as dimensões da imagem em pixels. Mas, para ajustar o mapa às coordenadas em latitudes e longitudes, é preciso saber as coordenadas do retângulo da imagem. Há vários exemplos de como calcular isso e eu uso um exemplo Javascript que converti para Python há algum tempo. Este cálculo é baseado no script de: https://jsfiddle.net/1wy1mm7L/6/
###Code
import math
_C = { 'x': 128, 'y': 128 };
_J = 256 / 360;
_L = 256 / (2 * math.pi);
def tb(a):
return 180 * a / math.pi
def sb(a):
return a * math.pi / 180
def bounds(a, b, c):
if b != None:
a = max(a,b)
if c != None:
a = min(a,c)
return a
def latlonToPt(ll):
a = bounds(math.sin(sb(ll[0])), -(1 - 1E-15), 1 - 1E-15);
return {'x': _C['x'] + ll[1] * _J,'y': _C['y'] + 0.5 * math.log((1 + a) / (1 - a)) * - _L}
def ptToLatlon(pt):
return [tb(2 * math.atan(math.exp((pt['y'] - _C['y']) / -_L)) - math.pi / 2),(pt['x'] - _C['x']) / _J]
def calculateBbox(ll, zoom, sizeX, sizeY, scale):
cp = latlonToPt(ll)
pixelSize = math.pow(2, -(zoom + 1));
pwX = sizeX*pixelSize;
pwY = sizeY*pixelSize;
return {'ne': ptToLatlon({'x': cp['x'] + pwX, 'y': cp['y'] - pwY}),'sw': ptToLatlon({'x': cp['x'] - pwX, 'y': cp['y'] + pwY})}
limites = calculateBbox([latitude,longitude],zoom, size, size, scale)
print(limites)
###Output
{'ne': [-22.406842952305475, -42.97709259375], 'sw': [-23.418774019100944, -44.07572540625]}
###Markdown
A função "calculateBbox" retorna um dicionário contendo os pontos Nordeste e Sudoeste, com a latitude e longitude de cada um. Para usar isso no matplotlib, eu preciso usar o método **imshow**, só que eu preciso informar a escala, ou seja, qual é o intervalo de latitudes (vertical) e longitudes (horizontal) que o mapa representa. Assim, a plotagem de pontos ficará correta. Eu vou usar a biblioteca **mpimg** para ler o arquivo de imagem que acabei de baixar. Só que a função **imshow** usa as coordenadas no atributo **extent** na ordem: ESQUERDA, DIREITA, BAIXO, TOPO. Temos que organizar a passagem dos parâmetros para ela.
###Code
import matplotlib.image as mpimg
fig, ax = plt.subplots(figsize=(10, 10))
rio_mapa=mpimg.imread('./mapa.jpg')
plt.imshow(rio_mapa, extent=[limites['sw'][1],limites['ne'][1],limites['sw'][0],limites['ne'][0]], alpha=1.0)
ax.scatter(dfs['longitude'],dfs['latitude'], c=dfs['cor'],s=10+dfs['quantidade'])
plt.ylabel("Latitude", fontsize=14)
plt.xlabel("Longitude", fontsize=14)
plt.show()
###Output
_____no_output_____ |
Applied AI Study Group #4 - January 2021/Week 3/Lecture Projects/3 - Spam Text Classification Sequential.ipynb | ###Markdown
Spam Text ClassificationIn second week of inzva Applied AI program, we are going to create a spam text classifier using RNN's. Our data have 2 columns. The first column is the label and the second column is text message itself. We are going to create our model using following techniques- Embeddings- SimpleRNN- GRU- LSTM- Ensemble Model SimpleRNNSimple RNN layer. Nothing special. The reason it is 'Simple' because it is not GRU nor LSTM layer. You can read the documentation from https://keras.io/api/layers/recurrent_layers/simple_rnn/ LSTMhttps://keras.io/api/layers/recurrent_layers/lstm/We will use tokenization and padding to preprocess our data. We are going to create 3 different models and compare them. Libraries
###Code
from keras.layers import SimpleRNN, Embedding, Dense, LSTM
from keras.models import Sequential
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns; sns.set()
###Output
_____no_output_____
###Markdown
Dataset
###Code
!wget https://raw.githubusercontent.com/inzva/Applied-AI-Study-Group/master/Applied%20AI%20Study%20Group%20%233%20-%20June%202020/week2/SpamTextClassification/datasets_2050_3494_SPAM%20text%20message%2020170820%20-%20Data.csv
data = pd.read_csv("datasets_2050_3494_SPAM text message 20170820 - Data.csv")
###Output
_____no_output_____
###Markdown
Let's see the first 20 rows of our data and read the messages. What do you think, are they really look like spam messages?
###Code
data.head
###Output
_____no_output_____
###Markdown
Let's calculate spam and non-spam message counts.
###Code
texts = []
labels = []
for i, label in enumerate(data['Category']):
texts.append(data['Message'][i])
if label == 'ham':
labels.append(0)
else:
labels.append(1)
texts = np.asarray(texts)
labels = np.asarray(labels)
print("number of texts :" , len(texts))
print("number of labels: ", len(labels))
labels
hamc= sum(labels==0)
spamc=sum(labels==1)
spamc /(hamc+spamc)
###Output
_____no_output_____
###Markdown
Data is imbalanced. Making it even more imbalanced by removing some of the spam messages and observing the model performance would be a good exercise to explore imbalanced dataset problem in Sequential Model context.
###Code
texts
###Output
_____no_output_____
###Markdown
Data PreprocessingEach sentence has different lengths. We need to have sentences of the same length. Besides, we need to represent them as integers.As a concerete example, we have following sentences- 'Go until jurong point crazy'- 'any other suggestions'First we will convert the words to integers, which is a way of doing Tokenization.- [5, 10, 26, 67, 98]- [7, 74, 107]Now we have two integer vectors with different length. We need to make them have the same length. Post Padding- [5, 10, 26, 67, 98]- [7, 74, 107, 0, 0] Pre Padding- [5, 10, 26, 67, 98]- [0, 0, 7, 74, 107]But you don't have to use padding in each task. For details please refer to this link https://github.com/keras-team/keras/issues/2375 Bucketing in NLP
###Code
from keras.layers import SimpleRNN, Embedding, Dense, LSTM
from keras.models import Sequential
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
# number of words in our vocabulary
max_features = 10000
# how many words from each document (max)?
maxlen = 500
###Output
_____no_output_____
###Markdown
Train - Test SplitWe will take a simple approach and create only train and test sets. Of course having train, test and validation sets is the best practise.
###Code
training_samples = int(len(labels)*0.8)
training_samples
validation_samples = int(5572 - training_samples)
assert len(labels) == (training_samples + validation_samples), "Not equal!"
print("The number of training {0}, validation {1} ".format(training_samples, validation_samples))
###Output
The number of training 4457, validation 1115
###Markdown
Tokenization
###Code
tokenizer = Tokenizer()
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
print("Found {0} unique words: ".format(len(word_index)))
#data = pad_sequences(sequences, maxlen=maxlen, padding='post')
data = pad_sequences(sequences, maxlen=maxlen)
print(data.shape)
data
np.random.seed(42)
# shuffle data
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
texts_train = data[:training_samples]
y_train = labels[:training_samples]
texts_test = data[training_samples:]
y_test = labels[training_samples:]
###Output
_____no_output_____
###Markdown
Model CreationWe will create 3 different models and compare their performances. One model will use SimpleRNN layer, the other will use GRU layer and the last one will use LSTM layer. Architecture of each model is the same. We can create deeper models but we already get good results.
###Code
model = Sequential()
model.add(Embedding(max_features, 32))
model.add(SimpleRNN(32))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop', loss='binary_crossentropy',
metrics=['acc'])
history_rnn = model.fit(texts_train, y_train, epochs=10,
batch_size=60, validation_split=0.2)
acc = history_rnn.history['acc']
val_acc = history_rnn.history['val_acc']
loss = history_rnn.history['loss']
val_loss = history_rnn.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, '-', color='orange', label='training acc')
plt.plot(epochs, val_acc, '-', color='blue', label='validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()
plt.plot(epochs, loss, '-', color='orange', label='training acc')
plt.plot(epochs, val_loss, '-', color='blue', label='validation acc')
plt.title('Training and validation loss')
plt.legend()
plt.show()
pred = model.predict_classes(texts_test)
acc = model.evaluate(texts_test, y_test)
proba_rnn = model.predict_proba(texts_test)
from sklearn.metrics import confusion_matrix
print("Test loss is {0:.2f} accuracy is {1:.2f} ".format(acc[0],acc[1]))
print(confusion_matrix(pred, y_test))
sum(y_test==1)
###Output
_____no_output_____
###Markdown
GRU
###Code
from keras.layers import GRU
model = Sequential()
model.add(Embedding(max_features, 32))
model.add(GRU(32))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop', loss='binary_crossentropy',
metrics=['acc'])
history_rnn = model.fit(texts_train, y_train, epochs=10,
batch_size=60, validation_split=0.2)
pred = model.predict_classes(texts_test)
acc = model.evaluate(texts_test, y_test)
proba_gru = model.predict_proba(texts_test)
from sklearn.metrics import confusion_matrix
print("Test loss is {0:.2f} accuracy is {1:.2f} ".format(acc[0],acc[1]))
print(confusion_matrix(pred, y_test))
###Output
_____no_output_____
###Markdown
LSTM
###Code
model = Sequential()
model.add(Embedding(max_features, 32))
model.add(LSTM(32))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['acc'])
history_lstm = model.fit(texts_train, y_train, epochs=10,
batch_size=60, validation_split=0.2)
acc = history_lstm.history['acc']
val_acc = history_lstm.history['val_acc']
loss = history_lstm.history['loss']
val_loss = history_lstm.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, '-', color='orange', label='training acc')
plt.plot(epochs, val_acc, '-', color='blue', label='validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()
plt.plot(epochs, loss, '-', color='orange', label='training acc')
plt.plot(epochs, val_loss, '-', color='blue', label='validation acc')
plt.title('Training and validation loss')
plt.legend()
plt.show()
pred = model.predict_classes(texts_test)
acc = model.evaluate(texts_test, y_test)
proba_ltsm = model.predict_proba(texts_test)
from sklearn.metrics import confusion_matrix
print("Test loss is {0:.2f} accuracy is {1:.2f} ".format(acc[0],acc[1]))
print(confusion_matrix(pred, y_test))
###Output
_____no_output_____
###Markdown
Ensemble Model
###Code
ensemble_proba = 0.25 * proba_rnn + 0.35 * proba_gru + 0.4 * proba_lstm
ensemble_proba[:5]
ensemble_class = np.array([1 if i >= 0.3 else 0 for i in ensemble_proba])
print(confusion_matrix(ensemble_class, y_test))
###Output
_____no_output_____ |
Python/_Movidius/Trainer.ipynb | ###Markdown
Convolutional Neural Networks & Transfer Learning For Acute Myeloid Leukemia Classification  AbstractAcute Myeloid Leukemia (AML) [1] is a rare and very agressive form of Leukemia. With this type of Leukemia early dectection is crucial but as of yet there are no warning signs, there are currently no ways to screen for AML but there are symptoms that give warning [2]. This project shows how we can use transfer learning and existing image classification models to create Deep Learning Models, specifically Inception V3, that can classify positive and negative Acute Myeloid Leukemia positive and negative lymphocytes in images. Acute Myeloid Leukemia (AML)Despite being one of the most common forms of Leukemia, Acute Myeloid Leukemia (AML) is a still a relatively rare form of Leukemia that is more common in adults, but does affect children also. AML is an agressive Leukemia where white blood cells mutate, attack and replace healthy red blood cells, effectively killing them. "About 19,520 new cases of acute myeloid leukemia (AML). Most will be in adults (United States)." [6]In comparrison, there are 180,000 women a year in the United States being diagnosed with Invasive Ductal Carcinoma (IDC), a type of breast cancer which forms in the breast duct and invades the areas surrounding it [7]. Acute Lymphoblastic Leukemia Image Database for Image Processing (ALL-IDB)Figure 3. Samples of augmented data generated from the Acute Lymphoblastic Leukemia Image Database for Image Processing dataset.The Acute Lymphoblastic Leukemia Image Database for Image Processing dataset is used for this project. The dataset was created by Fabio Scotti, Associate Professor Dipartimento di Informatica, Università degli Studi di Milano. Big thanks to Fabio for his research and time put in to creating the dataset and documentation, it is one of his personal projects. The Acute Myeloid Leukemia (AML) Movidius ClassifierThe AML Movidius Classifier shows how to train a Convolutional Neural Network using TensorFlow [8] and transfer learning trained on a dataset of Acute Myeloid Leukemia negative and positive images, Acute Lymphoblastic Leukemia Image Database for Image Processing [9]. The Tensorflow model is trained on the AI DevCloud [10] converted to a format compatible with the Movidius NCS by freezing the Tensorflow model and then running it through the NCSDK [11]. The model is then downloaded to an UP Squared, and then used for inference with NCSDK. Convolutional Neural NetworksFigure 1. Inception v3 architecture ([Source](https://github.com/tensorflow/models/tree/master/research/inception)).Convolutional neural networks are a type of deep learning neural network. These types of neural nets are widely used in computer vision and have pushed the capabilities of computer vision over the last few years, performing exceptionally better than older, more traditional neural networks; however, studies show that there are trade-offs related to training times and accuracy. Transfer LearningFigure 2. Inception V3 Transfer Learning ([Source](https://github.com/Hvass-Labs/TensorFlow-Tutorials)).Transfer learning allows you to retrain the final layer of an existing model, resulting in a significant decrease in not only training time, but also the size of the dataset required. One of the most famous models that can be used for transfer learning is the Inception V3 model created by Google This model was trained on thousands of images from 1,001 classes on some very powerful devices. Being able to retrain the final layer means that you can maintain the knowledge that the model had learned during its original training and apply it to your smaller dataset, resulting in highly accurate classifications without the need for extensive training and computational power. Hardware & SoftwareThrough my role as an Intel® Software Innovator, I get access to the latest Intel® technologies that help enhance my projects. In this particular part of the project I Intel® technologies such as Intel® AI DevCloud for data sorting and training and UP Squared with Intel Movidius (NCS) for inference. Interactive TutorialThis Notebook serves as an interactive tutorial that helps you set up your project, sort your data and train the Convolutional Neural Network. PrerequisitesThere are a few steps you need to tae to set up your AI DevCloud project, these steps are outlined below: - Clone The Github RepoYou need to clone the Acute Myeloid Leukemia Classifiers Github repo to your development machine. To do this open up a terminal and use __git clone__ to clone to the AML Classifiers repo (__https://github.com/AMLResearchProject/AML-Classifiers.git__). Once you have cloned the repo you should nagivate to __AML-Classifiers/Python/_Movidius/__ to find the related code, notebooks and tutorials. - Gain Access To ALL-IDBYou you need to be granted access to use the Acute Lymphoblastic Leukemia Image Database for Image Processing dataset. You can find the application form and information about getting access to the dataset on [this page](https://homes.di.unimi.it/scotti/all/download) as well as information on how to contribute back to the project [here](https://homes.di.unimi.it/scotti/all/results.php). If you are not able to obtain a copy of the dataset please feel free to try this tutorial on your own dataset. - Data AugmentationAssuming you have received permission to use the Acute Lymphoblastic Leukemia Image Database for Image Processing, you should follow the related Notebook first to generate a larger training and testing dataset. Follow the AML Classifier [Data Augmentation Notebook](https://github.com/AMLResearchProject/AML-Classifiers/blob/master/Python/Augmentation.ipynb) to apply various filters to the dataset. If you have not been able to obtain a copy of the dataset please feel free to try this tutorial on your own dataset.Data augmentations included are as follows...Done:- Grayscaling- Histogram Equalization- Reflection- Gaussian Blur- RotationToDo:- Shearing- TranslationYou can follow the progress of the data augmentation system on this [Github issue](https://github.com/AMLResearchProject/AML-Classifiers/issues/1). - Upload Project To AI DevCloudNow you need to upload the related project from the repo to the AI DevCloud. The directory you need to upload is __AML-Classifiers/Python/_Movidius/__. Once you have uploaded the project structure you need to upload your augmented dataset created in the previous step. Upload your data to the __0__ and __1__ directories in the __Model/Data/__ directory, you should also remove the init files from these directories.Once you have completed the above, navigate to this Notebook and continue the tutorial there. Prepare The DatasetAssuming you have uploaded your data, you now need to sort the data ready for the training process. Data Sorting JobYou need to create a shell script (provided below) that is used to create a job for sorting your uploaded data on the AI DevCloud. Before you run the following block make sure you have followed all of the steps in __Upload Project To AI DevCloud__ above.
###Code
%%writefile AML-DevCloud-Data
cd $PBS_O_WORKDIR
echo "* Compute server `hostname` on the AI DevCloud"
echo "* Current directory ${PWD}."
echo "* Compute server's CPU model and number of logical CPUs:"
lscpu | grep 'Model name\\|^CPU(s)'
echo "* Python version:"
export PATH=/glob/intel-python/python3/bin:$PATH;
which python
python --version
echo "* This job sorts the data for the AML Classifier on AI DevCloud"
python Data.py
sleep 10
echo "*Adios"
# Remember to have an empty line at the end of the file; otherwise the last command will not run
###Output
Writing AML-DevCloud-Data
###Markdown
Check the data sorter job script was created
###Code
%ls
###Output
AML-DevCloud-Data Classifier.py [0m[01;34mLogs[0m/ [01;34mModel[0m/ Trainer.ipynb
[01;34mClasses[0m/ Data.py [01;34mMedia[0m/ [01;34mRequired[0m/ Trainer.py
###Markdown
Submit the data sorter job
###Code
!qsub AML-DevCloud-Data
###Output
8390.c009
###Markdown
Check the status of the job
###Code
!qstat
###Output
Job ID Name User Time Use S Queue
------------------------- ---------------- --------------- -------- - -----
8389.c009 ...ub-singleuser u13339 00:00:07 R jupyterhub
8390.c009 ...DevCloud-Data u13339 0 R batch
###Markdown
Get more details about the job
###Code
!qstat -f 8390
###Output
qstat: Unknown Job Id Error 8390.c009
###Markdown
Check for the output files
###Code
%ls
###Output
AML-DevCloud-Data [0m[01;34mClasses[0m/ [01;34mMedia[0m/ Trainer.py
AML-DevCloud-Data.e8390 Classifier.py [01;34mModel[0m/
AML-DevCloud-Data.o8390 Data.py [01;34mRequired[0m/
AML-DevCloud-Trainer [01;34mLogs[0m/ Trainer.ipynb
###Markdown
You should see similar to the below output in your .0XXXX file, you can ignore the error (.eXXXXX) file in this case unless you are having difficulties in which case this file may have important information.```>> Converting image 347/348 shard 12018-12-23 08:36:57|convertToTFRecord|INFO: class_name: 02018-12-23 08:36:57|convertToTFRecord|INFO: class_id: 0>> Converting image 348/348 shard 12018-12-23 08:36:57|convertToTFRecord|INFO: class_name: 12018-12-23 08:36:57|convertToTFRecord|INFO: class_id: 12018-12-23 08:36:57|sortData|COMPLETE: Completed sorting data!*Adios End of output for job 8390.c009 Date: Sun Dec 23 08:37:07 PST 2018``` Training job Now it is time to create your training job, the script required for this is almost identical to the above created script, all we need to do is change filename and the commandline argument.
###Code
%%writefile AML-DevCloud-Trainer
cd $PBS_O_WORKDIR
echo "* Hello world from compute server `hostname` on the A.I. DevCloud!"
echo "* The current directory is ${PWD}."
echo "* Compute server's CPU model and number of logical CPUs:"
lscpu | grep 'Model name\\|^CPU(s)'
echo "* Python available to us:"
export PATH=/glob/intel-python/python3/bin:$PATH;
which python
python --version
echo "* This job trains the AML Classifier on the Colfax Cluster"
python Trainer.py
sleep 10
echo "*Adios"
# Remember to have an empty line at the end of the file; otherwise the last command will not run
###Output
Writing AML-DevCloud-Trainer
###Markdown
Check the training job script was created Now check that the trainer job script was created successfully by executing the following block which will print out the files located in the current directory. If all was successful, you should see the file "AML-DevCloud-Trainer". You can also open this file to confirm that the contents are correct.
###Code
%ls
###Output
AML-DevCloud-Data [0m[01;34mClasses[0m/ [01;34mMedia[0m/ Trainer.py
AML-DevCloud-Data.e8390 Classifier.py [01;34mModel[0m/
AML-DevCloud-Data.o8390 Data.py [01;34mRequired[0m/
AML-DevCloud-Trainer [01;34mLogs[0m/ Trainer.ipynb
###Markdown
Submit the training job script Now it is time to submit your training job script, this will queue the training script ready for execution and return your job ID. In this command we set the walltime to 24 hours, which should give our script enough time to fully complete without getting killed.
###Code
!qsub -l walltime=24:00:00 AML-DevCloud-Trainer
###Output
8392.c009
###Markdown
Check the status of the job
###Code
!qstat
###Output
Job ID Name User Time Use S Queue
------------------------- ---------------- --------------- -------- - -----
8389.c009 ...ub-singleuser u13339 00:00:09 R jupyterhub
8392.c009 ...Cloud-Trainer u13339 0 R batch
###Markdown
Get more details about the job
###Code
!qstat -f 8392
###Output
Job Id: 8392.c009
Job_Name = AML-DevCloud-Trainer
Job_Owner = u13339@c009-n003
resources_used.cput = 59:36:07
resources_used.energy_used = 0
resources_used.mem = 3457704kb
resources_used.vmem = 20151904kb
resources_used.walltime = 02:29:48
job_state = R
queue = batch
server = c009
Checkpoint = u
ctime = Sun Dec 23 08:39:03 2018
Error_Path = c009-n003:/home/u13339/AML-Classifier/AML-DevCloud-Trainer.e8
392
exec_host = c009-n016/0-1
Hold_Types = n
Join_Path = n
Keep_Files = n
Mail_Points = n
mtime = Sun Dec 23 08:39:04 2018
Output_Path = c009-n003:/home/u13339/AML-Classifier/AML-DevCloud-Trainer.o
8392
Priority = 0
qtime = Sun Dec 23 08:39:03 2018
Rerunable = True
Resource_List.nodect = 1
Resource_List.nodes = 1:ppn=2
Resource_List.walltime = 24:00:00
session_id = 196175
Variable_List = PBS_O_QUEUE=batch,PBS_O_HOME=/home/u13339,
PBS_O_LOGNAME=u13339,
PBS_O_PATH=/glob/intel-python/python3/bin/:/glob/intel-python/python3
/bin/:/glob/intel-python/python2/bin/:/glob/development-tools/versions
/intel-parallel-studio-2018-update3/compilers_and_libraries_2018.3.222
/linux/bin/intel64:/glob/development-tools/versions/intel-parallel-stu
dio-2018-update3/compilers_and_libraries_2018.3.222/linux/mpi/intel64/
bin:/glob/intel-python/python3/bin/:/glob/intel-python/python2/bin/:/g
lob/development-tools/versions/intel-parallel-studio-2018-update3/comp
ilers_and_libraries_2018.3.222/linux/bin/intel64:/glob/development-too
ls/versions/intel-parallel-studio-2018-update3/compilers_and_libraries
_2018.3.222/linux/mpi/intel64/bin:/usr/local/sbin:/usr/local/bin:/usr/
sbin:/usr/bin:/home/u13339/.local/bin:/home/u13339/bin:/home/u13339/.l
ocal/bin:/home/u13339/bin:/usr/local/bin:/bin,
PBS_O_MAIL=/var/spool/mail/u13339,PBS_O_SHELL=/bin/bash,
PBS_O_LANG=en_US.UTF-8,
PBS_O_SUBMIT_FILTER=/usr/local/sbin/torque_submitfilter,
PBS_O_WORKDIR=/home/u13339/AML-Classifier,PBS_O_HOST=c009-n003,
PBS_O_SERVER=c009
euser = u13339
egroup = u13339
queue_type = E
etime = Sun Dec 23 08:39:03 2018
submit_args = -l walltime=24:00:00 AML-DevCloud-Trainer
start_time = Sun Dec 23 08:39:04 2018
Walltime.Remaining = 77370
start_count = 1
fault_tolerant = False
job_radix = 0
submit_host = c009-n003
|
itng/examples/FiringRateEstimation.ipynb | ###Markdown
Firing Rate Estimation Estimating the firing rate in two different method.- Finding the optimum number of bins - Finding optimum bandwidth for gaussian kernel density estimation Reference: - Kernel bandwidth optimization in spike rate estimation- Hideaki Shimazaki & Shigeru Shinomoto - [Kernel Density Estimation](https://jakevdp.github.io/PythonDataScienceHandbook/05.13-kernel-density-estimation.html) - [Kernel density estimation, bandwidth selection](https://en.wikipedia.org/wiki/Kernel_density_estimationBandwidth_selection)
###Code
from sklearn.neighbors import KernelDensity
from sklearn.model_selection import LeaveOneOut
from sklearn.model_selection import GridSearchCV
import numpy as np
import pylab as plt
from os.path import join
from itng.statistics import (sshist, optimal_bandwidth, optimal_num_bins)
###Output
_____no_output_____
###Markdown
Reading spike rates:
###Code
with open(join("data.txt"), "r") as f:
lines = f.readlines()
spike_times = []
for line in lines:
line = [float(i) for i in line.split()]
spike_times.extend(line)
spike_times = np.asarray(spike_times)
bins = optimal_num_bins(spike_times)
print("The optimum number of bins : ", len(bins))
fig, ax = plt.subplots(1, figsize=(6, 4))
ax.set_xlabel('spike times (s)')
ax.set_ylabel("density")
ax.hist(spike_times, bins=bins, alpha=0.5, density=True);
# Kernel Density Estimation
# Selecting the bandwidth via cross-validation
bandwidth = optimal_bandwidth(spike_times)
print(bandwidth)
# the spikes need to be sorted
spike_times_sorted = np.sort(spike_times)
# instantiate and fit the KDE model
kde = KernelDensity(bandwidth=bandwidth, kernel='gaussian')
kde.fit(spike_times_sorted[:, None])
# score_samples returns the log of the probability density
logprob = kde.score_samples(spike_times_sorted[:, None])
# PLOT the results together
fig, ax = plt.subplots(1, figsize=(6, 4))
ax.set_xlabel('spike times (s)')
ax.set_ylabel("density")
ax.hist(spike_times, bins=bins, alpha=0.3, density=True);
ax.fill_between(spike_times_sorted, np.exp(logprob),
alpha=0.3,
color='gray')
ax.plot(spike_times, np.exp(logprob), alpha=1, lw=2, color="k")
plt.show()
###Output
_____no_output_____ |
.ipynb_checkpoints/diabetes-checkpoint.ipynb | ###Markdown
Diabetes Diagnosis
###Code
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.utils import shuffle
from sklearn.metrics import accuracy_score
import expectation_reflection as ER
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
import matplotlib.pyplot as plt
%matplotlib inline
np.random.seed(1)
# load data
s = pd.read_csv('../diabetes_data.csv',sep= ',', header= None)
s.head()
###Output
_____no_output_____
###Markdown
The data contains 8 features:1) Pregnancies: Number of times pregnant2) Glucose: Plasma glucose concentration at 2 hours in an oral glucose tolerance test (GTT)3) BloodPressure: Diastolic blood pressure (mmHg)4) SkinThickness: Triceps skin fold thickness (mm)5) Insulin: 2-Hour serum insulin (mu U/ml)6) BMI: Body Mass Index (weight(kg)/(height(m))^2)7) DiabetesPedigreeFunction: Diabetes Pedigree Function8) Age: Age (years)and 1 target: 1 (positive), 0 (negative) Impute missing data
###Code
# impute missing data
Xy = np.loadtxt('../diabetes_data_imputed_knn3.txt').astype(float)
# select features and target:
X = Xy[:,:8]
y = Xy[:,8]
# convert 1,0 to 1,-1:
y = 2*y - 1
from sklearn.utils import shuffle
X, y = shuffle(X, y)
from sklearn.preprocessing import MinMaxScaler
X = MinMaxScaler().fit_transform(X)
###Output
_____no_output_____
###Markdown
Prediction
###Code
def inference(X_train,y_train,X_test,y_test,method='expectation_reflection'):
if method == 'expectation_reflection':
h0,w = ER.fit(X_train,y_train,niter_max=100,regu=0.01)
y_pred = ER.predict(X_test,h0,w)
else:
if method == 'logistic_regression':
model = LogisticRegression(solver='liblinear')
if method == 'naive_bayes':
model = GaussianNB()
if method == 'random_forest':
model = RandomForestClassifier(criterion = "gini", random_state = 1,
max_depth=3, min_samples_leaf=5,n_estimators=100)
if method == 'decision_tree':
model = DecisionTreeClassifier()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
accuracy = accuracy_score(y_test,y_pred)
return accuracy
def compare_inference(X,y,train_size):
npred = 100
accuracy = np.zeros((len(list_methods),npred))
for ipred in range(npred):
X, y = shuffle(X, y)
X_train0,X_test,y_train0,y_test = train_test_split(X,y,test_size=0.2,random_state = ipred)
idx_train = np.random.choice(len(y_train0),size=int(train_size*len(y)),replace=False)
X_train,y_train = X_train0[idx_train],y_train0[idx_train]
for i,method in enumerate(list_methods):
accuracy[i,ipred] = inference(X_train,y_train,X_test,y_test,method)
return accuracy.mean(axis=1),accuracy.std(axis=1)
list_train_size = [0.8,0.6,0.4,0.2]
list_methods=['logistic_regression','naive_bayes','random_forest','decision_tree','expectation_reflection']
acc = np.zeros((len(list_train_size),len(list_methods)))
acc_std = np.zeros((len(list_train_size),len(list_methods)))
for i,train_size in enumerate(list_train_size):
acc[i,:],acc_std[i,:] = compare_inference(X,y,train_size)
print(train_size,acc[i,:])
acc_std
df = pd.DataFrame(acc,columns = list_methods)
df.insert(0, "train_size",list_train_size, True)
df
plt.figure(figsize=(4,3))
plt.plot(list_train_size,acc[:,0],'k--',marker='o',mfc='none',label='Logistic Regression')
plt.plot(list_train_size,acc[:,1],'b--',marker='s',mfc='none',label='Naive Bayes')
plt.plot(list_train_size,acc[:,2],'r--',marker='^',mfc='none',label='Random Forest')
plt.plot(list_train_size,acc[:,4],'k-',marker='o',label='Expectation Reflection')
plt.xlabel('train size')
plt.ylabel('accuracy mean')
plt.legend()
plt.figure(figsize=(4,3))
plt.plot(list_train_size,acc_std[:,0],'k--',marker='o',mfc='none',label='Logistic Regression')
plt.plot(list_train_size,acc_std[:,1],'b--',marker='s',mfc='none',label='Naive Bayes')
plt.plot(list_train_size,acc_std[:,2],'r--',marker='^',mfc='none',label='Random Forest')
plt.plot(list_train_size,acc_std[:,4],'k-',marker='o',label='Expectation Reflection')
plt.xlabel('train size')
plt.ylabel('accuracy standard deviation')
plt.legend()
###Output
_____no_output_____ |
mod_binary_MERA.ipynb | ###Markdown
###Code
def define_ham(blocksize):
"""
Define Hamiltonian (quantum critical Ising), perform preliminary blocking
of several sites into an effective site.
"""
# define Pauli matrices
sX = np.array([[0, 1], [1, 0]], dtype=float)
sZ = np.array([[1, 0], [0, -1]], dtype=float)
# define Ising local Hamiltonian
ham_orig = (tprod(sX, sX) - 0.5*tprod(sZ, np.eye(2)) -
0.5*tprod(np.eye(2), sZ))
# shift Hamiltonian to ensure negative defined
en_shift = max(LA.eigh(ham_orig)[0])
ham_loc = ham_orig - en_shift*np.eye(4)
# define block Hamiltonians
d0 = 2 # initial local dim
d1 = d0**blocksize # local dim after blocking
if blocksize==2:
ham_block = (0.5*tprod(ham_loc, np.eye(d0**2)) +
1.0*tprod(np.eye(d0**1), ham_loc, np.eye(d0**1)) +
0.5*tprod(np.eye(d0**2), ham_loc)
).reshape(d0*np.ones(8, dtype=int))
hamAB_init = ham_block.transpose(0,1,4,3,5,6,8,7
).reshape(d1, d1, d1, d1)
hamBA_init = ham_block.transpose(1,0,3,4,6,5,7,8
).reshape(d1, d1, d1, d1)
elif blocksize==3:
ham_block = (1.0*tprod(np.eye(d0**1), ham_loc, np.eye(d0**3)) +
1.0*tprod(np.eye(d0**2), ham_loc, np.eye(d0**2)) +
1.0*tprod(np.eye(d0**3), ham_loc, np.eye(d0**1))
).reshape(d0*np.ones(12, dtype=int))
hamAB_init = ham_block.transpose(0,1,2,5,4,3,6,7,8,11,10,9
).reshape(d1, d1, d1, d1)
hamBA_init = ham_block.transpose(2,1,0,3,4,5,8,7,6,9,10,11
).reshape(d1, d1, d1, d1)
elif blocksize==4:
ham_block = (0.5*tprod(np.eye(d0**1), ham_loc, np.eye(d0**5)) +
1.0*tprod(np.eye(d0**2), ham_loc, np.eye(d0**4)) +
1.0*tprod(np.eye(d0**3), ham_loc, np.eye(d0**3)) +
1.0*tprod(np.eye(d0**4), ham_loc, np.eye(d0**2)) +
0.5*tprod(np.eye(d0**5), ham_loc, np.eye(d0**1))
).reshape(d0*np.ones(16, dtype=int))
hamAB_init = ham_block.transpose(0,1,2,3,7,6,5,4,8,9,10,11,15,14,13,12
).reshape(d1, d1, d1, d1)
hamBA_init = ham_block.transpose(3,2,1,0,4,5,6,7,11,10,9,8,12,13,14,15
).reshape(d1, d1, d1, d1)
return hamAB_init, hamBA_init, en_shift
def initialize(chi, chimid, hamAB_init, hamBA_init, layers):
""" Initialize the MERA tensors """
# Initialize the MERA tensors
d1 = hamAB_init.shape[0]
iso_temp = orthogonalize(np.random.rand(d1, min(chimid, d1)))
uC = [tprod(iso_temp, iso_temp, do_matricize=False)]
wC = [orthogonalize(np.random.rand(d1, uC[0].shape[2], chi), partition=2)]
vC = [orthogonalize(np.random.rand(d1, uC[0].shape[2], chi), partition=2)]
for k in range(layers-1):
iso_temp = orthogonalize(np.random.rand(chi, chimid))
uC.append(tprod(iso_temp, iso_temp, do_matricize=False))
wC.append(orthogonalize(np.random.rand(chi, chimid, chi), partition=2))
vC.append(orthogonalize(np.random.rand(chi, chimid, chi), partition=2))
# initialize density matrices and effective Hamiltonians
rhoAB = [0]
rhoBA = [0]
hamAB = [hamAB_init]
hamBA = [hamBA_init]
for k in range(layers):
rhoAB.append(np.eye(chi**2).reshape(chi, chi, chi, chi))
rhoBA.append(np.eye(chi**2).reshape(chi, chi, chi, chi))
hamAB.append(np.zeros((chi, chi, chi, chi)))
hamBA.append(np.zeros((chi, chi, chi, chi)))
return hamAB, hamBA, wC, vC, uC, rhoAB, rhoBA
def define_networks(hamAB, hamBA, wC, vC, uC, rhoAB, rhoBA):
""" Define and plot all principle networks """
# Define the `M` principle network
connects_M = [[3,5,9], [1,5,7], [1,2,3,4], [4,6,10], [2,6,8], [7,8,9,10]]
tensors_M = [vC, vC, hamBA, wC, wC, rhoAB]
order_M = ncon_solver(tensors_M, connects_M)[0]
dims_M = [tensor.shape for tensor in tensors_M]
names_M = ['v', 'v', 'hBA', 'w', 'w', 'rhoAB']
coords_M = [(-0.5,1),(-0.5,-1), (-0.3,-0.2,0.3,0.2),(0.5,1),(0.5,-1),(0.2)]
colors_M = [0,0,1,2,2,3]
# Define the `L` principle network
connects_L = [[3,6,13], [1,8,11], [4,5,6,7], [2,5,8,9], [1,2,3,4],
[10,7,14], [10,9,12], [11,12,13,14]]
tensors_L = [wC, wC, uC, uC, hamAB, vC, vC, rhoBA]
order_L = ncon_solver(tensors_L, connects_L)[0]
dims_L = [tensor.shape for tensor in tensors_L]
names_L = ['w', 'w', 'u', 'u', 'hAB', 'v', 'v', 'rhoBA']
coords_L = [(-0.5, 1.5), (-0.5, -1.5), (-0.3,0.5,0.3,0.9), (-0.3,-0.5,0.3,-0.9),
(-0.6,-0.2,-0.1,0.2), (0.5, 1.5), (0.5, -1.5), (0.2)]
colors_L = [2,2,4,4,1,0,0,3]
# Define the `C` principle network
connects_C = [[5,6,13], [5,9,11], [3,4,6,8], [1,2,9,10], [1,2,3,4], [7,8,14],
[7,10,12], [11,12,13,14]]
tensors_C = [wC, wC, uC, uC, hamBA, vC, vC, rhoBA]
order_C = ncon_solver(tensors_C, connects_C)[0]
dims_C = [tensor.shape for tensor in tensors_C]
names_C = ['w', 'w', 'u', 'u', 'hBA', 'v', 'v', 'rhoBA']
coords_C = [(-0.5, 1.5), (-0.5, -1.5), (-0.3,0.5,0.3,0.9), (-0.3,-0.5,0.3,-0.9),
(-0.3,-0.2,0.3,0.2), (0.5, 1.5), (0.5, -1.5), (0.2)]
colors_C = [2,2,4,4,1,0,0,3]
# Define the `R` principle network
connects_R = [[10,6,13], [10,8,11], [5,3,6,7], [5,1,8,9], [1,2,3,4], [4,7,14],
[2,9,12], [11,12,13,14]]
tensors_R = [wC, wC, uC, uC, hamAB, vC, vC, rhoBA]
order_R = ncon_solver(tensors_R, connects_R)[0]
dims_R = [tensor.shape for tensor in tensors_R]
names_R = ['w', 'w', 'u', 'u', 'hAB', 'v', 'v', 'rhoBA']
coords_R = [(-0.5, 1.5), (-0.5, -1.5), (-0.3,0.5,0.3,0.9), (-0.3,-0.5,0.3,-0.9),
(0.6,-0.2,0.1,0.2), (0.5, 1.5), (0.5, -1.5), (0.2)]
colors_R = [2,2,4,4,1,0,0,3]
# Plot all principle networks
fig = plt.figure(figsize=(24,24))
figM = draw_network(connects_M, order=order_M, dims=dims_M, coords=coords_M,
names=names_M, colors=colors_M, title='M-diagrams',
draw_labels=False, show_costs=True, legend_extend=2.5,
fig=fig, subplot=141, env_pad=(-0.4,-0.4))
figL = draw_network(connects_L, order=order_L, dims=dims_L, coords=coords_L,
names=names_L, colors=colors_L, title='L-diagrams',
draw_labels=False, show_costs=True, legend_extend=2.5,
fig=fig, subplot=142, env_pad=(-0.4,-0.4))
figC = draw_network(connects_C, order=order_C, dims=dims_C, coords=coords_C,
names=names_C, colors=colors_C, title='C-diagrams',
draw_labels=False, show_costs=True, legend_extend=2.5,
fig=fig, subplot=143, env_pad=(-0.4,-0.4))
figR = draw_network(connects_R, order=order_R, dims=dims_R, coords=coords_R,
names=names_R, colors=colors_R, title='R-diagrams',
draw_labels=False, show_costs=True, legend_extend=2.5,
fig=fig, subplot=144, env_pad=(-0.4,-0.4))
# Store `connects` and `order` in a dict for later use
network_dict = {'connects_M': connects_M, 'order_M': order_M,
'connects_L': connects_L, 'order_L': order_L,
'connects_C': connects_C, 'order_C': order_C,
'connects_R': connects_R, 'order_R': order_R,}
return network_dict
def lift_hamiltonian(hamAB, hamBA, w, v, u, rhoAB, rhoBA, network_dict,
ref_sym=False):
""" Lift the Hamiltonian through one MERA layer """
hamAB_lift = xcon([v, v, hamBA, w, w, rhoAB],
network_dict['connects_M'],
order=network_dict['order_M'], which_envs=5)
hamBA_temp0 = xcon([w, w, u, u, hamAB, v, v, rhoBA],
network_dict['connects_L'],
order=network_dict['order_L'], which_envs=7)
hamBA_temp1 = xcon([w, w, u, u, hamBA, v, v, rhoBA],
network_dict['connects_C'],
order=network_dict['order_C'], which_envs=7)
if ref_sym is True:
hamBA_temp2 = hamBA_temp0.transpose(1,0,3,2)
else:
hamBA_temp2 = xcon([w, w, u, u, hamAB, v, v, rhoBA],
network_dict['connects_R'],
order=network_dict['order_R'], which_envs=7)
hamBA_lift = hamBA_temp0 + hamBA_temp1 + hamBA_temp2
return hamAB_lift, hamBA_lift
def lower_density(hamAB, hamBA, w, v, u, rhoAB, rhoBA, network_dict,
ref_sym=False):
""" Lower the density matrix through one MERA layer """
rhoBA_temp0 = xcon([v, v, hamBA, w, w, rhoAB],
network_dict['connects_M'],
order=network_dict['order_M'], which_envs=2)
rhoAB_temp0 = xcon([w, w, u, u, hamAB, v, v, rhoBA],
network_dict['connects_L'],
order=network_dict['order_L'], which_envs=4)
rhoBA_temp1 = xcon([w, w, u, u, hamBA, v, v, rhoBA],
network_dict['connects_C'],
order=network_dict['order_C'], which_envs=4)
if ref_sym is True:
rhoAB_temp1 = rhoAB_temp0.transpose(1,0,3,2)
else:
rhoAB_temp1 = xcon([w, w, u, u, hamAB, v, v, rhoBA],
network_dict['connects_R'],
order=network_dict['order_R'], which_envs=4)
rhoAB_lower = 0.5*(rhoAB_temp0 + rhoAB_temp1)
rhoBA_lower = 0.5*(rhoBA_temp0 + rhoBA_temp1)
return rhoAB_lower, rhoBA_lower
def optimize_w(hamAB, hamBA, w, v, u, rhoAB, rhoBA, network_dict,
ref_sym=False):
""" Optimise the `w` isometry """
w_env0 = xcon([v, v, hamBA, w, w, rhoAB], network_dict['connects_M'],
order=network_dict['order_M'], which_envs=3)
if ref_sym is True:
w_env1, w_env3 = xcon([w, w, u, u, hamAB, v, v, rhoBA],
network_dict['connects_L'],
order=network_dict['order_L'],
which_envs=[0,5])
else:
w_env1 = xcon([w, w, u, u, hamAB, v, v, rhoBA],
network_dict['connects_L'], order=network_dict['order_L'],
which_envs=0)
w_env3 = xcon([w, w, u, u, hamAB, v, v, rhoBA],
network_dict['connects_R'], order=network_dict['order_R'],
which_envs=0)
w_env2 = xcon([w, w, u, u, hamBA, v, v, rhoBA],
network_dict['connects_C'], order=network_dict['order_C'],
which_envs=0)
w_out = orthogonalize(w_env0 + w_env1 + w_env2 + w_env3, partition=2)
return w_out
def optimize_v(hamAB, hamBA, w, v, u, rhoAB, rhoBA, network_dict,
ref_sym=False):
""" Optimise the `v` isometry """
v_env0 = xcon([v, v, hamBA, w, w, rhoAB], network_dict['connects_M'],
order=network_dict['order_M'], which_envs=0)
if ref_sym is True:
v_env1, v_env3 = xcon([w, w, u, u, hamAB, v, v, rhoBA],
network_dict['connects_L'],
order=network_dict['order_L'],
which_envs=[0,5])
else:
v_env1 = xcon([w, w, u, u, hamAB, v, v, rhoBA],
network_dict['connects_L'], order=network_dict['order_L'],
which_envs=5)
v_env3 = xcon([w, w, u, u, hamAB, v, v, rhoBA],
network_dict['connects_R'], order=network_dict['order_R'],
which_envs=5)
v_env2 = xcon([w, w, u, u, hamBA, v, v, rhoBA],
network_dict['connects_C'], order=network_dict['order_C'],
which_envs=5)
v_out = orthogonalize(v_env0 + v_env1 + v_env2 + v_env3, partition=2)
return v_out
def optimize_u(hamAB, hamBA, w, v, u, rhoAB, rhoBA, network_dict,
ref_sym=False):
""" Optimise the `u` disentangler """
u_env0 = xcon([w, w, u, u, hamAB, v, v, rhoBA],
network_dict['connects_L'], order=network_dict['order_L'],
which_envs=2)
u_env1 = xcon([w, w, u, u, hamBA, v, v, rhoBA],
network_dict['connects_C'], order=network_dict['order_C'],
which_envs=2)
if ref_sym is True:
u_env2 = u_env0.transpose(1,0,3,2)
else:
u_env2 = xcon([w, w, u, u, hamAB, v, v, rhoBA],
network_dict['connects_R'], order=network_dict['order_R'],
which_envs=2)
utot = u_env0 + u_env1 + u_env2
if ref_sym is True:
utot = utot + utot.transpose(1,0,3,2)
u_out = orthogonalize(utot, partition=2)
return u_out
###Output
_____no_output_____ |
notebooks/EMNIST.ipynb | ###Markdown
Importing packages
###Code
import fedjax
import jax
import jax.numpy as jnp
import PLM_computation
import FedMix_computation
from grid_search import FedMixGrid, grid_search
from EMNIST_custom import emnist_load_gd_data
import itertools
from matplotlib import pyplot as plt
import pickle
###Output
_____no_output_____
###Markdown
Model setup
###Code
model = fedjax.models.emnist.create_conv_model(only_digits=False)
def loss(params, batch, rng):
# `rng` used with `apply_for_train` to apply dropout during training.
preds = model.apply_for_train(params, batch, rng)
# Per example loss of shape [batch_size].
example_loss = model.train_loss(batch, preds)
return jnp.mean(example_loss)
def loss_for_eval(params, batch):
preds = model.apply_for_eval(params, batch)
example_loss = model.train_loss(batch, preds)
return jnp.mean(example_loss)
grad_fn = jax.jit(jax.grad(loss))
grad_fn_eval = jax.jit(jax.grad(loss_for_eval))
###Output
_____no_output_____
###Markdown
Grid search setup Constants
###Code
CACHE_DIR = '../data/'
NUM_CLIENTS_GRID_SEARCH = 200
TRAIN_VALIDATION_SPLIT = 0.8
NUM_CLIENTS_PER_PLM_ROUND = 5
NUM_CLIENTS_PER_FEDMIX_ROUND = 10
FEDMIX_ALGORITHM = 'adam'
FEDMIX_NUM_ROUNDS = 500
PLM_NUM_EPOCHS = 100
###Output
_____no_output_____
###Markdown
Datasets and parameters
###Code
train_fd, validation_fd = emnist_load_gd_data(
train_val_split=TRAIN_VALIDATION_SPLIT,
only_digits=False,
cache_dir=CACHE_DIR
)
client_ids = set([cid for cid in itertools.islice(
train_fd.client_ids(), NUM_CLIENTS_GRID_SEARCH)])
train_fd = fedjax.SubsetFederatedData(train_fd, client_ids)
validation_fd = fedjax.SubsetFederatedData(validation_fd, client_ids)
plm_init_params = model.init(jax.random.PRNGKey(0))
plm_comp_params = PLM_computation.PLMComputationProcessParams(
plm_init_params, NUM_CLIENTS_PER_PLM_ROUND)
fedmix_init_params = model.init(jax.random.PRNGKey(20))
fedmix_comp_params = FedMix_computation.FedMixComputationParams(
FEDMIX_ALGORITHM, fedmix_init_params, FEDMIX_NUM_ROUNDS)
alpha = 0.7
###Output
_____no_output_____
###Markdown
Grid
###Code
fedmix_lrs = 10**jnp.arange(-5., 0.5, 1)
fedmix_batch_sizes = [20, 50, 100, 200]
plm_lrs = 10**jnp.arange(-5., 0.5, 1)
plm_batch_sizes = [10, 20, 50, 100]
grid = FedMixGrid(fedmix_lrs, plm_lrs, fedmix_batch_sizes, plm_batch_sizes)
###Output
_____no_output_____
###Markdown
Grid search
###Code
SAVE_FILE = '../results/EMNIST_{}_gd.npy'.format(int(10 * alpha))
SAVE_FILE
table = grid_search(
train_fd, validation_fd, grad_fn, grad_fn_eval, model, alpha,
plm_comp_params, fedmix_comp_params, grid, PLM_NUM_EPOCHS,
NUM_CLIENTS_PER_FEDMIX_ROUND, SAVE_FILE
)
best_ind = jnp.unravel_index(jnp.argmax(table), table.shape)
best_ind
plm_batch_size = plm_batch_sizes[best_ind[0]]
plm_lr = plm_lrs[best_ind[1]]
fedmix_batch_size = fedmix_batch_sizes[best_ind[2]]
fedmix_lr = fedmix_lrs[best_ind[3]]
###Output
_____no_output_____
###Markdown
FedMix
###Code
num_rounds = 3000
###Output
_____no_output_____
###Markdown
Now we download full train and test datasets.
###Code
train_fd, test_fd = fedjax.datasets.emnist.load_data(only_digits=False,
cache_dir='../data/')
plm_comp_hparams = PLM_computation.PLMComputationHParams(PLM_NUM_EPOCHS,
plm_lr,
plm_batch_size)
PLM_dict = PLM_computation.plm_computation(train_fd,
grad_fn,
plm_comp_hparams,
plm_comp_params)
alpha
alpha_dict = {}
for cid in train_fd.client_ids():
alpha_dict[cid] = alpha
len(alpha_dict)
fedmix_hparams = FedMix_computation.FedMixHParams(
fedmix_lr, NUM_CLIENTS_PER_FEDMIX_ROUND, fedmix_batch_size)
fedmix_batch_size
fedmix_comp_params = FedMix_computation.FedMixComputationParams(
FEDMIX_ALGORITHM, fedmix_init_params, num_rounds)
_, stats = FedMix_computation.fedmix_computation_with_statistics(
train_fd, test_fd, grad_fn, grad_fn_eval, model, PLM_dict, alpha_dict,
fedmix_hparams, fedmix_comp_params, 100)
###Output
Round 3000 / 3000
###Markdown
FedAvg
###Code
client_optimizer = fedjax.optimizers.sgd(learning_rate=10**(-1.5))
server_optimizer = fedjax.optimizers.adam(
learning_rate=10**(-2.5), b1=0.9, b2=0.999, eps=10**(-4))
# Hyperparameters for client local traing dataset preparation.
client_batch_hparams = fedjax.ShuffleRepeatBatchHParams(batch_size=20)
algorithm = fedjax.algorithms.fed_avg.federated_averaging(grad_fn, client_optimizer,
server_optimizer,
client_batch_hparams)
# Initialize model parameters and algorithm server state.
init_params = model.init(jax.random.PRNGKey(17))
server_state = algorithm.init(init_params)
train_client_sampler = fedjax.client_samplers.UniformGetClientSampler(fd=train_fd, num_clients=10, seed=0)
fedavg_test_acc_progress = []
for round_num in range(1, max_rounds + 1):
# Sample 10 clients per round without replacement for training.
clients = train_client_sampler.sample()
# Run one round of training on sampled clients.
server_state, client_diagnostics = algorithm.apply(server_state, clients)
print(f'[round {round_num}]', end='\r')
# Optionally print client diagnostics if curious about each client's model
# update's l2 norm.
# print(f'[round {round_num}] client_diagnostics={client_diagnostics}')
if round_num % 100 == 0:
test_eval_datasets = [cds for _, cds in test_fd.clients()]
test_eval_batches = fedjax.padded_batch_client_datasets(test_eval_datasets, batch_size=256)
test_metrics = fedjax.evaluate_model(model, server_state.params, test_eval_batches)
fedavg_test_acc_progress.append(test_metrics['accuracy'])
print('Test accuracy = {}'.format(test_metrics['accuracy']))
save_file = '../results/test_acc_fedavg.pickle'
with open(save_file, 'wb') as handle:
pickle.dump(fedavg_test_acc_progress, handle)
with open(save_file, 'rb') as handle:
fedavg_test_acc_progress = pickle.load(handle)
fedavg_test_acc_progress = fedavg_test_acc_progress[:30]
###Output
_____no_output_____
###Markdown
Plots
###Code
accs = [stat['accuracy'] for stat in stats]
round_nums = jnp.linspace(100, 3000, num=30, endpoint=True)
plt.plot(round_nums, accs, label='FLIX')
plt.plot(round_nums, fedavg_test_acc_progress, label='FedAvg')
plt.xlim(left=0)
plt.ylabel('accuracy')
plt.xlabel('rounds')
plt.grid()
plt.title('EMNIST')
plt.legend()
plt.tight_layout()
plt.savefig('../results/plots/EMNIST_preliminary_7.pdf')
###Output
_____no_output_____ |
Cryptography Workshops/TUDev's_Cryptography_Workshop!_Workshop_I_Substitution_Cipher_(Caesar_Cipher)_(FULL).ipynb | ###Markdown
**Substitution Cipher**
**Is a rearrangement of the plaintext alphabet using ciphertext. The plaintext alphabet can be mapped to numbers, letters or some other unit using a fixed system.**
Source: Website - [Simple Substitution Cipher](https://www.cs.uri.edu/cryptography/classicalsubstitution.htm) from the University of Rhode Island's cryptography webpage **Caesar Cipher** **Definition**
**The Caesar Cipher is a Substitution Cipher and one of earliest known forms of Cryptography.**
**Julius Caesar is said to have used this namesake cipher to communicate with his army. The letters in the Latin alphabet were shifted to create encrypted messages. Using the English alphabet as an example, if we shift the letters 4 places then in the Caesar Cipher the letter "e" will translate to "a". The number of shifts is also known as the cipher's key. A table of the shift can be seen below.**
| Alphabet | a | b | c | d | e | f | g | h | i | j | k | l | m | n | o | p | q | r | s | t | u | v | w | x | y | z |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| **Caesar Cipher (4 Shifts)** | **e** | **f** | **g** | **h** | **i** | **j** | **k** | **l** | **m** | **n** | **o** | **p** | **q** | **r** | **s** | **t** | **u** | **v** | **w** | **x** | **y** | **z** | **a** | **b** | **c** | **d** |
Source: Article - [Cracking the Code](https://www.cia.gov/news-information/featured-story-archive/2007-featured-story-archive/cracking-the-code.html) from the CIA's webpage **Coding a Caesar Cipher**
**Let's get started!** **Caesar Cipher using Slicing**
###Code
def caesar_cipher(key, message):
ascii_lower = [i for i in string.ascii_lowercase]
caesars_list = [i for i in string.ascii_lowercase]
#shift the caesars list based on the given key
caesars_list = caesars_list[key:] + caesars_list[:key]
#add in spaces and punctuation so the cipher can deal with sentences
caesars_list.insert((len(caesars_list)+1)," ")
ascii_lower.insert((len(caesars_list)+1)," ")
ascii_lower.extend([i for i in string.punctuation])
caesars_list.extend([i for i in string.punctuation])
#encode and return the encrypted message
cipher = [caesars_list[ascii_lower.index(i)] for i in message]
return ''.join(cipher)
#testing our caesars cipher
key = int(input('How many shifts do you want in your caesars cipher?\n'))
message = input('What is your message?\n')
caesar_message = caesar_cipher(key, message.lower())
print(caesar_message)
###Output
How many shifts do you want in your caesars cipher?
4
What is your message?
hello world!
lipps asvph!
###Markdown
**Decoding Caesar Cipher (Slicing)**
###Code
def caesar_cipher_decoder(key, encrypted_message):
ascii_lower = [i for i in string.ascii_lowercase]
caesars_list = [i for i in string.ascii_lowercase]
#shift the caesars list based on the given key
caesars_list = caesars_list[key:] + caesars_list[:key]
#add in spaces and punctuation so the cipher can deal with sentences
caesars_list.insert((len(caesars_list)+1)," ")
ascii_lower.insert((len(caesars_list)+1)," ")
ascii_lower.extend([i for i in string.punctuation])
caesars_list.extend([i for i in string.punctuation])
#encode and return the encrypted message
decrypted_message = [ascii_lower[caesars_list.index(i)] for i in encrypted_message]
return ''.join(decrypted_message)
decoder_key = int(input('How many shifts are in the caesars cipher?\n'))
encrypted_message = input('What is the encrypted message?\n')
decoded_message = caesar_cipher_decoder(decoder_key, encrypted_message.lower())
print(decoded_message)
###Output
How many shifts are in the caesars cipher?
4
What is the encrypted message?
lipps asvph!
hello world!
###Markdown
**Breaking a Caesar Cipher**
**What if we intercepted an encrypted message that we know was encrypted using Caesars Cipher. How could we break it? Would it be easy to break?** **Slicing**
###Code
intercepted_message = 'uwdm bw bpm miab ib uqlvqopb. lw vwb ow qvbw bwev, abig qv bpm apilwea.'
for i in range(len(string.ascii_lowercase)):
print(caesar_cipher_decoder(i, intercepted_message),"\n")
###Output
uwdm bw bpm miab ib uqlvqopb. lw vwb ow qvbw bwev, abig qv bpm apilwea.
tvcl av aol lhza ha tpkupnoa. kv uva nv puav avdu, zahf pu aol zohkvdz.
subk zu znk kgyz gz sojtomnz. ju tuz mu otzu zuct, yzge ot znk yngjucy.
rtaj yt ymj jfxy fy rnisnlmy. it sty lt nsyt ytbs, xyfd ns ymj xmfitbx.
qszi xs xli iewx ex qmhrmklx. hs rsx ks mrxs xsar, wxec mr xli wlehsaw.
pryh wr wkh hdvw dw plgqljkw. gr qrw jr lqwr wrzq, vwdb lq wkh vkdgrzv.
oqxg vq vjg gcuv cv okfpkijv. fq pqv iq kpvq vqyp, uvca kp vjg ujcfqyu.
npwf up uif fbtu bu njeojhiu. ep opu hp joup upxo, tubz jo uif tibepxt.
move to the east at midnight. do not go into town, stay in the shadows.
lnud sn sgd dzrs zs lhcmhfgs. cn mns fn hmsn snvm, rszx hm sgd rgzcnvr.
kmtc rm rfc cyqr yr kgblgefr. bm lmr em glrm rmul, qryw gl rfc qfybmuq.
jlsb ql qeb bxpq xq jfakfdeq. al klq dl fkql qltk, pqxv fk qeb pexaltp.
ikra pk pda awop wp iezjecdp. zk jkp ck ejpk pksj, opwu ej pda odwzkso.
hjqz oj ocz zvno vo hdyidbco. yj ijo bj dioj ojri, novt di ocz ncvyjrn.
gipy ni nby yumn un gcxhcabn. xi hin ai chni niqh, mnus ch nby mbuxiqm.
fhox mh max xtlm tm fbwgbzam. wh ghm zh bgmh mhpg, lmtr bg max latwhpl.
egnw lg lzw wskl sl eavfayzl. vg fgl yg aflg lgof, klsq af lzw kzsvgok.
dfmv kf kyv vrjk rk dzuezxyk. uf efk xf zekf kfne, jkrp ze kyv jyrufnj.
celu je jxu uqij qj cytdywxj. te dej we ydje jemd, ijqo yd jxu ixqtemi.
bdkt id iwt tphi pi bxscxvwi. sd cdi vd xcid idlc, hipn xc iwt hwpsdlh.
acjs hc hvs sogh oh awrbwuvh. rc bch uc wbhc hckb, ghom wb hvs gvorckg.
zbir gb gur rnfg ng zvqavtug. qb abg tb vagb gbja, fgnl va gur funqbjf.
yahq fa ftq qmef mf yupzustf. pa zaf sa uzfa faiz, efmk uz ftq etmpaie.
xzgp ez esp plde le xtoytrse. oz yze rz tyez ezhy, delj ty esp dslozhd.
wyfo dy dro okcd kd wsnxsqrd. ny xyd qy sxdy dygx, cdki sx dro crknygc.
vxen cx cqn njbc jc vrmwrpqc. mx wxc px rwcx cxfw, bcjh rw cqn bqjmxfb.
###Markdown
**Challenge: Caesar Cipher**
**How would you code a Caesar Cipher? Can you code it using an imported data structure? What about with modular arithmetic? How fast does your Caesar Cipher run when compared to the given example?** **Challenge Answer 1**
**The following Caesar Cipher uses a deque to encrypt and decrypt messages.** **Caesar Cipher using Deque**
###Code
#creating our caesars cipher function
def caesar_cipher_deque(key, message):
ascii_lower = [i for i in string.ascii_lowercase]
caesars_list = deque(ascii_lower)
caesars_list.rotate(-key)
caesars_list.insert((len(caesars_list)+1)," ")
ascii_lower.insert((len(caesars_list)+1)," ")
ascii_lower.extend([i for i in string.punctuation])
caesars_list.extend([i for i in string.punctuation])
cipher = [caesars_list[ascii_lower.index(i)] for i in message]
return ''.join(cipher)
###Output
_____no_output_____
###Markdown
**Testing Caesar Cipher**
###Code
#testing our caesars cipher
key = int(input('How many shifts do you want in your caesars cipher?\n'))
message = input('What is your message?\n')
caesar_message = caesar_cipher_deque(key, message.lower())
print(caesar_message)
###Output
How many shifts do you want in your caesars cipher?
4
What is your message?
hello world!
lipps asvph!
###Markdown
**Decoding Caesar Cipher (Deque)**
###Code
#decoding the message
def caesar_deque_decoder(key, encrypted_message):
ascii_lower = [i for i in string.ascii_lowercase]
caesars_list = deque(ascii_lower)
caesars_list.rotate(-key)
caesars_list.insert((len(caesars_list)+1)," ")
ascii_lower.insert((len(caesars_list)+1)," ")
ascii_lower.extend([i for i in string.punctuation])
caesars_list.extend([i for i in string.punctuation])
decrypted_message = [ascii_lower[caesars_list.index(i)] for i in encrypted_message]
return ''.join(decrypted_message)
decoder_key = int(input('How many shifts are in the caesars cipher?\n'))
encrypted_message = input('What is the encrypted message?\n')
decoded_message = caesar_deque_decoder(decoder_key, encrypted_message.lower())
print(decoded_message)
###Output
How many shifts are in the caesars cipher?
4
What is the encrypted message?
lipps asvph!
hello world!
###Markdown
**Breaking a Caesar Cipher (Deque)**
###Code
intercepted_message = 'uwdm bw bpm miab ib uqlvqopb. lw vwb ow qvbw bwev, abig qv bpm apilwea.'
for i in range(len(string.ascii_lowercase)):
print(caesar_deque_decoder(i, intercepted_message),"\n")
###Output
uwdm bw bpm miab ib uqlvqopb. lw vwb ow qvbw bwev, abig qv bpm apilwea.
tvcl av aol lhza ha tpkupnoa. kv uva nv puav avdu, zahf pu aol zohkvdz.
subk zu znk kgyz gz sojtomnz. ju tuz mu otzu zuct, yzge ot znk yngjucy.
rtaj yt ymj jfxy fy rnisnlmy. it sty lt nsyt ytbs, xyfd ns ymj xmfitbx.
qszi xs xli iewx ex qmhrmklx. hs rsx ks mrxs xsar, wxec mr xli wlehsaw.
pryh wr wkh hdvw dw plgqljkw. gr qrw jr lqwr wrzq, vwdb lq wkh vkdgrzv.
oqxg vq vjg gcuv cv okfpkijv. fq pqv iq kpvq vqyp, uvca kp vjg ujcfqyu.
npwf up uif fbtu bu njeojhiu. ep opu hp joup upxo, tubz jo uif tibepxt.
move to the east at midnight. do not go into town, stay in the shadows.
lnud sn sgd dzrs zs lhcmhfgs. cn mns fn hmsn snvm, rszx hm sgd rgzcnvr.
kmtc rm rfc cyqr yr kgblgefr. bm lmr em glrm rmul, qryw gl rfc qfybmuq.
jlsb ql qeb bxpq xq jfakfdeq. al klq dl fkql qltk, pqxv fk qeb pexaltp.
ikra pk pda awop wp iezjecdp. zk jkp ck ejpk pksj, opwu ej pda odwzkso.
hjqz oj ocz zvno vo hdyidbco. yj ijo bj dioj ojri, novt di ocz ncvyjrn.
gipy ni nby yumn un gcxhcabn. xi hin ai chni niqh, mnus ch nby mbuxiqm.
fhox mh max xtlm tm fbwgbzam. wh ghm zh bgmh mhpg, lmtr bg max latwhpl.
egnw lg lzw wskl sl eavfayzl. vg fgl yg aflg lgof, klsq af lzw kzsvgok.
dfmv kf kyv vrjk rk dzuezxyk. uf efk xf zekf kfne, jkrp ze kyv jyrufnj.
celu je jxu uqij qj cytdywxj. te dej we ydje jemd, ijqo yd jxu ixqtemi.
bdkt id iwt tphi pi bxscxvwi. sd cdi vd xcid idlc, hipn xc iwt hwpsdlh.
acjs hc hvs sogh oh awrbwuvh. rc bch uc wbhc hckb, ghom wb hvs gvorckg.
zbir gb gur rnfg ng zvqavtug. qb abg tb vagb gbja, fgnl va gur funqbjf.
yahq fa ftq qmef mf yupzustf. pa zaf sa uzfa faiz, efmk uz ftq etmpaie.
xzgp ez esp plde le xtoytrse. oz yze rz tyez ezhy, delj ty esp dslozhd.
wyfo dy dro okcd kd wsnxsqrd. ny xyd qy sxdy dygx, cdki sx dro crknygc.
vxen cx cqn njbc jc vrmwrpqc. mx wxc px rwcx cxfw, bcjh rw cqn bqjmxfb.
###Markdown
**Challenge Answer 2**
**The following Caesar Cipher uses modular arithmetic to encrypt and decrypt messages.**
###Code
#see the khan academy link to learn how to use modular arithmetic when implementing caesar cipher
def caesar_cipher_modulo(key, message):
alphabet = dict(zip(string.ascii_lowercase, [i for i in range(len(string.ascii_lowercase))]))
cipher = []
for i in message:
if i.isalnum() == True:
cipher.append(list(alphabet.keys())[list(alphabet.values()).index((alphabet[i] + key) % len(alphabet))])
else:
cipher.append(i)
return ''.join(cipher)
###Output
_____no_output_____
###Markdown
**Caesar Cipher using Modular Arithmetic**
###Code
#testing our caesars cipher
key = int(input('How many shifts do you want in your caesars cipher?\n'))
message = input('What is your message?\n')
caesar_message = caesar_cipher_modulo(key, message.lower())
print(caesar_message)
###Output
How many shifts do you want in your caesars cipher?
4
What is your message?
hello world!
lipps asvph!
###Markdown
**Decoding Caesar Cipher (Modular Arithmetic)**
###Code
#decoding the message
def caesar_modulo_decoder(key, message):
alphabet = dict(zip(string.ascii_lowercase, [i for i in range(len(string.ascii_lowercase))]))
cipher = []
for i in message:
if i.isalnum() == True:
cipher.append(list(alphabet.keys())[list(alphabet.values()).index((alphabet[i] - key) % len(alphabet))])
else:
cipher.append(i)
return ''.join(cipher)
decoder_key = int(input('How many shifts are in the caesars cipher?\n'))
encrypted_message = input('What is the encrypted message?\n')
decoded_message = caesar_modulo_decoder(decoder_key, encrypted_message.lower())
print(decoded_message)
###Output
How many shifts are in the caesars cipher?
4
What is the encrypted message?
lipps asvph!
hello world!
|
Air_Quality_Index/Linear_Regression.ipynb | ###Markdown
Linear Regression
###Code
X=data.drop(columns='PM2.5')
y=data['PM2.5']
sns.heatmap(X.corr(),annot=True,cmap='RdYlGn')
plt.plot()
###Output
_____no_output_____
###Markdown
high Multicolinearity - drop TM,Tm,VM column
###Code
# X.drop(columns=['TM','Tm','VM'],inplace=True)
###Output
_____no_output_____
###Markdown
Feature Selection
###Code
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
X_train,X_test,y_train,y_test=train_test_split(X,y,train_size=0.7,random_state=0)
lr=LinearRegression()
lr.fit(X_train,y_train)
lr.score(X_train,y_train)
lr.score(X_test,y_test)
pd.DataFrame(lr.coef_,X.columns,columns=['Coeficient'])
y_pred=lr.predict(X_test)
sns.distplot(y_pred-y_test)
plt.plot()
plt.scatter(y_test,y_pred)
plt.plot()
###Output
_____no_output_____
###Markdown
Metrics
###Code
from sklearn.metrics import mean_squared_error,mean_absolute_error
# MAE
mean_absolute_error(y_test,y_pred)
# MSE
mean_squared_error(y_test,y_pred)
# RMSE
np.sqrt(mean_squared_error(y_test,y_pred))
###Output
_____no_output_____
###Markdown
Save Model
###Code
import pickle
with open('Models/Linear_Regression.pkl','wb') as f:
pickle.dump(lr,f)
###Output
_____no_output_____ |
analyses/metrics/determine_qc_metric_schema.ipynb | ###Markdown
Goal:Outline the process of producing shared QC metric schema that delegates to picard names when they are adequately descriptive of what they measure. The following two workflows were used to extract metrics, and files were downloaded to `picard_metric_dir` and `optimus_metric_dir`: ```https://job-manager.mint-dev.broadinstitute.org/jobs/a39b92db-bed0-40d4-83de-3ca0505dc5a8 10x v2https://job-manager.mint-dev.broadinstitute.org/jobs/b9ff68b4-2434-4909-8275-850cb84ebb13 ss2```
###Code
import os
from crimson import picard
###Output
_____no_output_____
###Markdown
Examine SS2 pipeline metrics outputsListed below are the file names of metrics files emitted by a smart-seq2 workflow
###Code
picard_metric_dir = os.path.expanduser('~/Desktop/picard')
!ls $picard_metric_dir
###Output
SRR1294925_qc.alignment_summary_metrics.txt
SRR1294925_qc.bait_bias_detail_metrics.txt
SRR1294925_qc.bait_bias_summary_metrics.txt
SRR1294925_qc.base_distribution_by_cycle_metrics.txt
SRR1294925_qc.error_summary_metrics.txt
SRR1294925_qc.gc_bias.detail_metrics.txt
SRR1294925_qc.gc_bias.summary_metrics.txt
SRR1294925_qc.pre_adapter_detail_metrics.txt
SRR1294925_qc.quality_by_cycle_metrics.txt
SRR1294925_qc.quality_distribution_metrics.txt
###Markdown
This method parses a few of the files that are in a consistent format
###Code
metric_files = [os.path.join(picard_metric_dir, f) for f in os.listdir(picard_metric_dir)]
def parse_picard(metric_file):
with open(metric_file, 'r') as f:
json_data = picard.parse(f)
metric_class_name = json_data['metrics']['class']
metrics = {}
for d in json_data['metrics']['contents']:
for k, v in d.items():
metrics[k] = type(v)
del metrics['SAMPLE_ALIAS'], metrics['LIBRARY']
return metric_class_name, metrics
###Output
_____no_output_____
###Markdown
This is a map between the metric class and the names of metrics calculated by each class, mapped to the output type. Caveat: 5 of the files don't decode. Those are printed in full below.
###Code
all_metrics_and_names = {}
for m in metric_files[:-2]:
try:
all_metrics_and_names.__setitem__(*parse_picard(m))
except:
print(m)
all_metrics_and_names
###Output
_____no_output_____
###Markdown
Below, files that didn't convert are just printed to console to get a sense of their metric names
###Code
!cat $picard_metric_dir/SRR1294925_qc.base_distribution_by_cycle_metrics.txt
!cat $picard_metric_dir/SRR1294925_qc.gc_bias.summary_metrics.txt
!cat $picard_metric_dir/SRR1294925_qc.gc_bias.detail_metrics.txt
!cat $picard_metric_dir/SRR1294925_qc.error_summary_metrics.txt
!cat $picard_metric_dir/SRR1294925_qc.quality_by_cycle_metrics.txt
!cat $picard_metric_dir/SRR1294925_qc.alignment_summary_metrics.txt
###Output
## htsjdk.samtools.metrics.StringHeader
# CollectMultipleMetrics INPUT=/cromwell_root/broad-dsde-mint-dev-cromwell-execution/cromwell-executions/TestSmartSeq2SingleCellPR/dbec853f-f908-44d5-abf8-c2b3e9a1c1dd/call-target_workflow/SmartSeq2SingleCell/efca6617-3b23-4620-8227-dd9484b9547f/call-HISAT2PairedEnd/SRR1294925_qc.bam ASSUME_SORTED=true OUTPUT=SRR1294925_qc METRIC_ACCUMULATION_LEVEL=[ALL_READS] FILE_EXTENSION=.txt PROGRAM=[CollectAlignmentSummaryMetrics, CollectInsertSizeMetrics, CollectGcBiasMetrics, CollectBaseDistributionByCycle, QualityScoreDistribution, MeanQualityByCycle, CollectSequencingArtifactMetrics, CollectQualityYieldMetrics] VALIDATION_STRINGENCY=SILENT REFERENCE_SEQUENCE=/cromwell_root/hca-dcp-mint-test-data/reference/GRCh38_Gencode/GRCh38.primary_assembly.genome.fa STOP_AFTER=0 INCLUDE_UNPAIRED=false VERBOSITY=INFO QUIET=false COMPRESSION_LEVEL=5 MAX_RECORDS_IN_RAM=500000 CREATE_INDEX=false CREATE_MD5_FILE=false GA4GH_CLIENT_SECRETS=client_secrets.json USE_JDK_DEFLATER=false USE_JDK_INFLATER=false
## htsjdk.samtools.metrics.StringHeader
# Started on: Mon Jun 11 18:18:02 UTC 2018
## METRICS CLASS picard.analysis.AlignmentSummaryMetrics
CATEGORY TOTAL_READS PF_READS PCT_PF_READS PF_NOISE_READS PF_READS_ALIGNED PCT_PF_READS_ALIGNED PF_ALIGNED_BASES PF_HQ_ALIGNED_READS PF_HQ_ALIGNED_BASES PF_HQ_ALIGNED_Q20_BASES PF_HQ_MEDIAN_MISMATCHES PF_MISMATCH_RATE PF_HQ_ERROR_RATE PF_INDEL_RATE MEAN_READ_LENGTH READS_ALIGNED_IN_PAIRS PCT_READS_ALIGNED_IN_PAIRS PF_READS_IMPROPER_PAIRS PCT_PF_READS_IMPROPER_PAIRS BAD_CYCLES STRAND_BALANCE PCT_CHIMERAS PCT_ADAPTER SAMPLE LIBRARY READ_GROUP
FIRST_OF_PAIR 708464 708464 1 0 620557 0.875919 15474854 545623 13614147 13481269 0 0.000979 0.000904 0.000052 25 569427 0.917606 57860 0.093239 0 0.50191 0.012 0.000001
SECOND_OF_PAIR 708464 708464 1 0 613894 0.866514 15296633 539174 13442337 13231754 0 0.001114 0.001002 0.000123 25 569427 0.927566 51197 0.083397 0 0.500181 0.012132 0.000001
PAIR 1416928 1416928 1 0 1234451 0.871216 30771487 1084797 27056484 26713023 0 0.001046 0.000953 0.000088 25 1138854 0.922559 109057 0.088345 0 0.50105 0.012066 0.000001
###Markdown
Optimus MetricsNow, do the same for Optimus metrics. Optimus has all of the metrics in one file, although may not have the depth of analysis that the picard ones do. We could use picard + user research to identify missing metrics and expand our complement as recommended.
###Code
import pandas as pd
optimus_metric_dir = os.path.expanduser('~/Desktop/optimus')
print('cell metrics\n')
for c in pd.read_csv(os.path.join(optimus_metric_dir, 'merged-cell-metrics.csv.gz')).columns[1:]:
print(c)
print('\ngene metrics\n')
for c in pd.read_csv(os.path.join(optimus_metric_dir, 'merged-gene-metrics.csv.gz')).columns[1:]:
print(c)
###Output
cell metrics
n_reads
noise_reads
perfect_molecule_barcodes
reads_mapped_exonic
reads_mapped_intronic
reads_mapped_utr
reads_mapped_uniquely
reads_mapped_multiple
duplicate_reads
spliced_reads
antisense_reads
molecule_barcode_fraction_bases_above_30_mean
molecule_barcode_fraction_bases_above_30_variance
genomic_reads_fraction_bases_quality_above_30_mean
genomic_reads_fraction_bases_quality_above_30_variance
genomic_read_quality_mean
genomic_read_quality_variance
n_molecules
n_fragments
reads_per_molecule
reads_per_fragment
fragments_per_molecule
fragments_with_single_read_evidence
molecules_with_single_read_evidence
perfect_cell_barcodes
reads_mapped_intergenic
reads_unmapped
reads_mapped_too_many_loci
cell_barcode_fraction_bases_above_30_variance
cell_barcode_fraction_bases_above_30_mean
n_genes
genes_detected_multiple_observations
gene metrics
n_reads
noise_reads
perfect_molecule_barcodes
reads_mapped_exonic
reads_mapped_intronic
reads_mapped_utr
reads_mapped_uniquely
reads_mapped_multiple
duplicate_reads
spliced_reads
antisense_reads
molecule_barcode_fraction_bases_above_30_mean
molecule_barcode_fraction_bases_above_30_variance
genomic_reads_fraction_bases_quality_above_30_mean
genomic_reads_fraction_bases_quality_above_30_variance
genomic_read_quality_mean
genomic_read_quality_variance
n_molecules
n_fragments
reads_per_molecule
reads_per_fragment
fragments_per_molecule
fragments_with_single_read_evidence
molecules_with_single_read_evidence
number_cells_detected_multiple
number_cells_expressing
|
tests_jupyter/genetic_algorithm_parameters.ipynb | ###Markdown
Compare the effect of crossover_thres
###Code
ag.run(ngen=20,seed=2)
ag.run(ngen=20,seed=2,crossover_thres=100)
###Output
gen nevals pareto correlation distance
0 100 3 4.33 - 14.92 0.74 - 63.45
1 50 3 4.33 - 9.32 5.76 - 63.45
2 50 3 4.33 - 8.37 5.87 - 63.45
3 50 3 4.33 - 7.5 5.87 - 63.45
4 50 2 4.27 - 7.23 5.87 - 63.45
5 50 3 4.16 - 6.15 5.87 - 63.45
6 50 5 4.16 - 5.68 36.76 - 63.46
7 50 3 3.88 - 5.59 44.89 - 63.57
8 50 5 3.85 - 5.51 53.27 - 63.57
9 50 3 3.74 - 5.38 53.3 - 63.57
10 50 5 3.74 - 5.17 53.3 - 63.57
11 50 8 3.74 - 4.92 53.31 - 63.57
12 50 7 3.74 - 4.89 57.68 - 63.58
13 50 4 3.74 - 4.89 57.69 - 63.58
14 50 4 3.74 - 4.75 57.69 - 63.58
15 50 5 3.68 - 4.89 57.69 - 63.58
16 50 10 3.63 - 4.99 57.69 - 63.58
17 50 11 3.63 - 4.99 57.69 - 63.59
18 50 13 3.63 - 4.99 57.69 - 63.59
19 50 15 3.63 - 4.99 57.81 - 63.59
20 50 16 3.63 - 4.99 57.82 - 63.59
|
nlp/exercise/text-classification.ipynb | ###Markdown
**[Natural Language Processing Home Page](https://www.kaggle.com/learn/natural-language-processing)**--- Natural Language ClassificationYou did a great such a great job for DeFalco's restaurant in the previous exercise that the chef has hired you for a new project.The restaurant's menu includes an email address where visitors can give feedback about their food. The manager wants you to create a tool that automatically sends him all the negative reviews so he can fix them, while automatically sending all the positive reviews to the owner, so the manager can ask for a raise. You will first build a model to distinguish positive reviews from negative reviews using Yelp reviews because these reviews include a rating with each review. Your data consists of the text body of each review along with the star rating. Ratings with 1-2 stars count as "negative", and ratings with 4-5 stars are "positive". Ratings with 3 stars are "neutral" and have been dropped from the data.Let's get started. First, run the next code cell.
###Code
# setup code checking
from learntools.core import binder
binder.bind(globals())
from learntools.nlp.ex2 import *
print("Setup is completed.")
###Output
Setup is completed.
###Markdown
Step 1: Evaluate the ApproachIs there anything about this approach that concerns you? After you've thought about it, run the function below to see one point of view.
###Code
# check your answer (run this code cell to receive credit!)
step_1.solution()
###Output
_____no_output_____
###Markdown
Step 2: Review Data and Create the ModelMoving forward with your plan, you'll need to load the data. Here's some basic code to load data and split it into a training and validation set. Run this code.
###Code
import pandas as pd
def load_data(csv_file, split=0.9):
data = pd.read_csv(csv_file)
# shuffle data, sampling with frac < 1, upsampling with frac > 1
train_data = data.sample(frac=1, random_state=7)
texts = train_data["text"].values
labels = [
{"POSITIVE": bool(y), "NEGATIVE": not bool(y)} for y in train_data["sentiment"].values
]
split = int(len(train_data) * split)
train_labels = [{"cats": labels} for labels in labels[:split]]
val_labels = [{"cats": labels} for labels in labels[split:]]
return texts[:split], train_labels, texts[split:], val_labels
train_texts, train_labels, val_texts, val_labels = load_data('../input/nlp-course/yelp_ratings.csv')
###Output
_____no_output_____
###Markdown
You will use this training data to build a model. The code to build the model is the same as what you saw in the tutorial. So that is copied below for you.But because your data is different, there are **two lines in the modeling code cell that you'll need to change.** Can you figure out what they are? First, run the cell below to look at a couple elements from your training data.
###Code
print('Texts from training data\n', '-'*10)
print(train_texts[:2])
print('\n')
print('Labels from training data\n', '-'*10)
print(train_labels[:2])
###Output
Texts from training data
----------
["Some of the best sushi I've ever had....and I come from the East Coast. Unreal toro, have some of it's available."
"One of the best burgers I've ever had and very well priced. I got the tortilla burger and is was delicious especially with there tortilla soup!"]
Labels from training data
----------
[{'cats': {'POSITIVE': True, 'NEGATIVE': False}}, {'cats': {'POSITIVE': True, 'NEGATIVE': False}}]
###Markdown
Now, having seen this data, find the two lines that need to be changed.
###Code
# create an empty model
import spacy
nlp = spacy.blank("en")
# create the TextCategorizer with exclusive classes and Bag of Words (bow) architecture
textcat = nlp.create_pipe(
"textcat",
config={
"exclusive_classes": True,
"architecture": "bow"
}
)
# add the TextCategorizer to the empty model
nlp.add_pipe(textcat)
# add labels to text classifier
textcat.add_label("NEGATIVE")
textcat.add_label("POSITIVE")
# check your answer
step_2.check()
# lines below will give you a hint or solution code
# step_2.hint()
# step_2.solution()
###Output
_____no_output_____
###Markdown
Step 3: Train FunctionImplement a function `train` that updates a model with training data. Most of this is general data munging, which we've filled in for you. Just add the one line of code necessary to update your model.
###Code
import random
from spacy.util import minibatch
nlp.begin_training()
def train(model, train_data, optimizer, batch_size=8):
losses = {}
random.seed(1)
random.shuffle(train_data)
# create the batch generator
batches = minibatch(train_data, size=batch_size)
for batch in batches:
# split batch into texts and labels
texts, labels = zip(*batch)
# update model with texts and labels
nlp.update(texts, labels, sgd=optimizer, losses=losses)
return losses
# check your answer
step_3.check()
# lines below will give you a hint or solution code
# step_3.hint()
# step_3.solution()
# fix seed for reproducibility
spacy.util.fix_random_seed(1)
random.seed(1)
optimizer = nlp.begin_training()
train_data = list(zip(train_texts, train_labels))
losses = train(nlp, train_data, optimizer)
print(losses['textcat'])
###Output
8.185380340941789
###Markdown
We can try this slightly trained model on some example text and look at the probabilities assigned to each label.
###Code
text = "This tea cup was full of holes. Do not recommend."
doc = nlp(text)
print(doc.cats)
###Output
{'NEGATIVE': 0.7562618851661682, 'POSITIVE': 0.24373817443847656}
###Markdown
These probabilities look reasonable. Now you should turn them into an actual prediction. Step 4: Making PredictionsImplement a function `predict` that uses a model to predict the sentiment of text examples. The function takes a spaCy model (with a `TextCategorizer`) and a list of texts. First, tokenize the texts using `model.tokenizer`. Then, pass those docs to the `TextCategorizer` which you can get from `model.get_pipe`. Use the `textcat.predict` method to get scores for each document, then choose the class with the highest score (probability) as the predicted class.
###Code
def predict(model, texts):
# Use the model's tokenizer to tokenize each input text
docs = [model.tokenizer(text) for text in texts]
# use textcat to get the scores for each doc
textcat = model.get_pipe('textcat')
scores, _ = textcat.predict(docs)
# from the scores, find the class with the highest score/probability
predicted_class = scores.argmax(axis=1)
return predicted_class
# check your answer
step_4.check()
# lines below will give you a hint or solution code
# step_4.hint()
# step_4.solution()
texts = val_texts[34:38]
predictions = predict(nlp, texts)
for p, t in zip(predictions, texts):
print(f"{textcat.labels[p]}: {t} \n")
predict(nlp, texts)
###Output
_____no_output_____
###Markdown
It looks like your model is working well after going through the data just once. However you need to calculate some metric for the model's performance on the hold-out validation data. Step 5: Evaluate The ModelImplement a function that evaluates a `TextCategorizer` model. This function `evaluate` takes a model along with texts and labels. It returns the accuracy of the model, which is the number of correct predictions divided by all predictions.First, use the `predict` method you wrote earlier to get the predicted class for each text in `texts`. Then, find where the predicted labels match the true "gold-standard" labels and calculate the accuracy.
###Code
def evaluate(model, texts, labels):
"""
Returns the accuracy of a TextCategorizer model.
Arguments
---------
model: ScaPy model with a TextCategorizer
texts: Text samples, from load_data function
labels: True labels, from load_data function
"""
# get predictions from textcat model (using your predict method)
predicted_class = predict(model, texts)
# from labels, get the true class as a list of integers (POSITIVE -> 1, NEGATIVE -> 0)
true_class = [int(label['cats']['POSITIVE']) for label in labels]
# a boolean or int array indicating correct predictions
correct_predictions = (predicted_class == true_class)
# the accuracy, number of correct predictions divided by all predictions
accuracy = correct_predictions.mean()
return accuracy
# check your answer
step_5.check()
# lines below will give you a hint or solution code
# step_5.hint()
# step_5.solution()
accuracy = evaluate(nlp, val_texts, val_labels)
print(f"Accuracy: {accuracy:.4f}")
###Output
Accuracy: 0.9486
###Markdown
With the functions implemented, you can train and evaluate in a loop.
###Code
n_iters = 5
for i in range(n_iters):
losses = train(nlp, train_data, optimizer)
accuracy = evaluate(nlp, val_texts, val_labels)
print(f"Loss: {losses['textcat']:.3f} \t Accuracy: {accuracy:.3f}")
###Output
Loss: 4.454 Accuracy: 0.945
Loss: 3.079 Accuracy: 0.946
Loss: 2.343 Accuracy: 0.945
Loss: 1.913 Accuracy: 0.943
Loss: 1.584 Accuracy: 0.945
###Markdown
Step 6: Keep ImprovingYou've built the necessary components to train a text classifier with SpaCy. What could you do further to optimize the model?Run the next line to check your answer.
###Code
# check your answer (run this code cell to receive credit!)
step_6.solution()
###Output
_____no_output_____ |
modules/module10 - inferential spatial models/module10.ipynb | ###Markdown
Advanced Spatial Analysis Module 10: Inferential Spatial ModelingStatistical inference is the process of using a sample to *infer* the characteristics of an underlying population (from which this sample was drawn) through estimation and hypothesis testing. Contrast this with descriptive statistics, which focus simply on describing the characteristics of the sample itself.Common goals of inferential statistics include: - parameter estimation and confidence intervals - hypothesis rejection - prediction - model selectionTo conduct statistical inference, we rely on *statistical models*: sets of assumptions plus mathematical relationships between variables, producing a formal representation of some theory. We are essentially trying to explain the process underlying the generation of our data. What is the probability distribution (the probabilities of occurrence of different possible outcome values of our response variable)?**Spatial inference** introduces explicit spatial relationships into the statistical modeling framework, as both theory-driven (e.g., spatial spillovers) and data-driven (e.g., MAUP) issues could otherwise violate modeling assumptions.Schools of statistical inference: - frequentist - frequentists think of probability as proportion of times some outcome occurs (relative frequency) - given lots of repeated trials, how likely is the observed outcome? - concepts: statistical hypothesis testing, *p*-values, confidence intervals - bayesian - bayesians think of probability as amount of certainty observer has about an outcome occurring (subjective probability) - probability as a measure of how much info the observer has about the real world, updated as info changes - concepts: prior probability, likelihood, bayes' rule, posterior probability
###Code
import geopandas as gpd
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pysal as ps
import seaborn as sns
import statsmodels.api as sm
from scipy import stats
from statsmodels.stats.outliers_influence import variance_inflation_factor as vif
from statsmodels.tools.tools import add_constant
np.random.seed(0)
%matplotlib inline
# load the data
tracts = gpd.read_file('data/census_tracts_data.geojson')
tracts.shape
# map the data
tracts.plot()
tracts.columns
###Output
_____no_output_____
###Markdown
1. Statistical inference: introduction 1a. Estimating population parameters
###Code
# descriptive stats
tracts['med_household_income'].describe()
# descriptive stat: average tract-level median income
tracts['med_household_income'].mean()
# descriptive stat of a simple random sample
n = 500
sample = tracts['med_household_income'].sample(n)
sample.mean()
###Output
_____no_output_____
###Markdown
How similar is our sample mean to our population mean? Is it a good estimate?
###Code
# calculate confidence interval using t-distribution (bc population std dev is unknown)
sample = sample.dropna() #drop nulls
conf = 0.95 #confidence level
df = len(sample) - 1 #degrees of freedom
loc = sample.mean() #the mean
scale = stats.sem(sample) #the standard error
conf_lower, conf_upper = stats.t.interval(conf, df, loc=loc, scale=scale)
# calculate the margin of error
moe = conf_upper - sample.mean()
# display confidence interval
print(f'{conf_lower:0.0f} – {conf_upper:0.0f} ({conf*100:0.0f}% confidence interval)')
print(f'{loc:0.0f} ± {moe:0.0f} (at {conf*100:0.0f}% confidence level)')
###Output
_____no_output_____
###Markdown
We are 95% confident that this interval contains the true population parameter value. That is, if we were to repeat this process many times (sampling then computing CI), on average 95% of the CIs would contain the true population parameter value (and 5% wouldn't).
###Code
# now it's your turn
# try different sample sizes and alpha levels: how do these change the confidence interval's size?
# now it's your turn
# randomly sample 100 tract-level median home values then calculate the mean and 99% confidence interval
###Output
_____no_output_____
###Markdown
1b. *t*-tests: difference in meansIs the difference between two groups statistically significant?
###Code
# choose a variable
var = 'med_home_value'
# create two data subsets
black_tracts = tracts[tracts['pct_black'] > 50]
group1 = black_tracts[var]
hispanic_tracts = tracts[tracts['pct_hispanic'] > 50]
group2 = hispanic_tracts[var]
# what are the probability distributions of these two data sets?
fig, ax = plt.subplots()
ax = group1.plot.kde(ls='--', c='k', alpha=0.5, lw=2, bw_method=0.7)
ax = group2.plot.kde(ls='-', c='k', alpha=0.5, lw=2, bw_method=0.7, ax=ax)
ax.set_xlim(left=0)
ax.set_ylim(bottom=0)
plt.show()
print(int(group1.mean()))
print(int(group2.mean()))
# calculate difference in means
diff = group1.mean() - group2.mean()
diff
# compute the t-stat and its p-value
t_statistic, p_value = stats.ttest_ind(group1, group2, equal_var=False, nan_policy='omit')
p_value
# is the difference in means statistically significant?
alpha = 0.05 #significance level
p_value < alpha
# now it's your turn
# what is the difference in mean tract-level median home values in majority white vs majority black tracts?
# is it statistically significant?
# what if you randomly sample just 25 tracts from each group: is their difference significant?
###Output
_____no_output_____
###Markdown
2. Statistical modelsIntroduction to OLS linear regression.Lots to cover in a course on regression that we must skip for today's quick overview. But in general you'd want to: - specify a model (or alternative models) based on theory - inspect candidate predictors' relationships with the response - inspect the predictors' relationships with each other (and reduce multicollinearity) - transform predictors for better linearity - identify and handle outlier observations - regression diagnostics 2a. Simple (bivariate) linear regressionOLS regression with a single predictor
###Code
# choose a response variable and drop any rows in which it is null
response = 'med_home_value'
tracts = tracts.dropna(subset=[response])
# create design matrix containing predictors (drop nulls), and a response variable vector
predictors = 'med_household_income'
X = tracts[predictors].dropna()
y = tracts.loc[X.index][response]
# estimate a simple linear regression model with scipy
m, b, r, p, se = stats.linregress(x=X, y=y)
print('m={:.4f}, b={:.4f}, r^2={:.4f}, p={:.4f}'.format(m, b, r ** 2, p))
# estimate a simple linear regression model with statsmodels
Xc = add_constant(X)
model = sm.OLS(y, Xc)
result = model.fit()
print(result.summary())
###Output
_____no_output_____
###Markdown
This single predictor explains about half the variation of the response. To explain more, we need more predictors. 2b. Multiple regressionOLS regression with multiple predictors
###Code
# create design matrix containing predictors (drop nulls), and a response variable vector
predictors = ['med_household_income', 'pct_white']
X = tracts[predictors].dropna()
y = tracts.loc[X.index][response]
# estimate a linear regression model
Xc = add_constant(X)
model = sm.OLS(y, Xc)
result = model.fit()
print(result.summary())
###Output
_____no_output_____
###Markdown
statsmodels diagnostic outputWe discuss diagnostics and standardized regression in more detail below, but here's a quick summary of the output above:If we get warnings about multicollinearity, but have good VIF scores and significant variables, then check a standardized regression (below) to see if it's just scaling or the intercept/constant causing it (intercept shouldn't cause high condition number if we center/standardize our predictors). A high condition number indicates multicollinearity.Durbin-Watson tests for autocorrelation: a value around 1.5 to 2.5 is considered fine.Omnibus tests for normality of residuals: if prob < 0.05, we reject the null hypothesis that they are normally distributed (skew and kurtosis describe their distribution)Jarque-Bera tests for normality of residuals: if prob < 0.05, we reject the null hypothesis that they are normally distributed Now add in more variables...
###Code
tracts.columns
# create design matrix containing predictors (drop nulls), and a response variable vector
predictors = ['med_household_income', 'pct_white', 'pct_single_family_home', 'pct_built_before_1940',
'med_rooms_per_home', 'pct_bachelors_degree']
X = tracts[predictors].dropna()
y = tracts.loc[X.index][response]
# estimate a linear regression model
Xc = add_constant(X)
model = sm.OLS(y, Xc)
result = model.fit()
print(result.summary())
# now it's your turn
# try different sets of predictors to increase R-squared while keeping the total number of predictors relatively low and theoretically sound
###Output
_____no_output_____
###Markdown
2c. Standardized regression*Beta coefficients* are the estimated regression coefficients when the response and predictors are standardized so that their variances equal 1. Thus, we can interpret these coefficients as how many standard deviations the response changes for each standard deviation increase in the predictor. This tells us about "effect size": which predictors have greater effects on the response by ignoring the variables' different units/scales of measurement. However, it relies on the variables' distributions having similar shapes (otherwise the meaning of a std dev in one will differ from a std dev in another).
###Code
# estimate a standardized regression model
y_stdrd = pd.Series(stats.mstats.zscore(y), index=y.index, name=y.name)
X_stdrd = pd.DataFrame(stats.mstats.zscore(X), index=X.index, columns=X.columns)
Xc_stdrd = add_constant(X_stdrd)
model_stdrd = sm.OLS(y_stdrd, Xc_stdrd)
result_stdrd = model_stdrd.fit()
print(result_stdrd.summary())
###Output
_____no_output_____
###Markdown
2d. DiagnosticsLet's take a step back and think about some of the steps we might take prior to specifying the model, and then to diagnose its fit.
###Code
# correlation matrix
# how well are predictors correlated with response... and with each other?
correlations = tracts[[response] + sorted(predictors)].corr()
correlations.round(2)
# visual correlation matrix via seaborn heatmap
# use vmin, vmax, center to set colorbar scale properly
sns.set(style='white')
ax = sns.heatmap(correlations, vmin=-1, vmax=1, center=0,
cmap=plt.cm.coolwarm, square=True, linewidths=1)
# plot pairwise relationships with seaborn
grid = sns.pairplot(tracts[[response] + sorted(predictors)], markers='.')
###Output
_____no_output_____
###Markdown
**Actual vs Predicted**: how well do our model's predicted y values match up to the actual y values? Is the variance the same throughout (homoskedastic)? Point's distance from line is the residual (difference between actual value and predicted value).
###Code
# plot observed (y-axis) vs fitted (x-axis)
observed = model.endog #actual response
fitted = result.fittedvalues #predicted response
fig, ax = plt.subplots(figsize=(6, 6))
ax.scatter(x=fitted, y=observed, s=0.2)
# draw a 45° y=x line
ax.set_xlim((min(np.append(observed, fitted)), max(np.append(observed, fitted))))
ax.set_ylim((min(np.append(observed, fitted)), max(np.append(observed, fitted))))
ax.plot(ax.get_xlim(), ax.get_ylim(), ls='--', c='k', alpha=0.5)
ax.set_xlabel('predicted values')
ax.set_ylabel('actual values')
plt.show()
###Output
_____no_output_____
###Markdown
**Residual Plot**: plot our residuals to look for heteroskedasticity. We want this plot to resemble a random point pattern with no discernable trend. If the spread grows as you move from left to right, you are seeing heteroskedasticity.
###Code
# standardized (internally studentized) residuals
resids_stud = result.get_influence().resid_studentized_internal
fig, ax = plt.subplots(figsize=(6, 6))
ax.scatter(x=result.fittedvalues, y=resids_stud, s=0.2)
ax.axhline(y=0, ls='--', c='k', alpha=0.5)
ax.set_title('residuals vs fitted plot')
ax.set_xlabel('fitted values')
ax.set_ylabel('standardized residuals')
plt.show()
###Output
_____no_output_____
###Markdown
**QQ-Plot**: are the residuals approximately normally distributed? That is, how well do they match a theoretical normal distribution. We want the points to follow the line.
###Code
fig, ax = plt.subplots(figsize=(6, 6))
fig = sm.qqplot(resids_stud, line='45', ax=ax)
ax.set_title('normal probability plot of the standardized residuals')
plt.show()
###Output
_____no_output_____
###Markdown
^^ looks like we've got a problem with our model! Can we improve it any with a transformation?
###Code
# estimate a linear regression model
Xc = add_constant(X)
model = sm.OLS(np.log(y), Xc)
result = model.fit()
#print(result.summary())
resids_stud = result.get_influence().resid_studentized_internal
fig, ax = plt.subplots(figsize=(6, 6))
fig = sm.qqplot(resids_stud, line='45', ax=ax)
ax.set_title('normal probability plot of the standardized residuals')
plt.show()
###Output
_____no_output_____
###Markdown
**Multicollinearity**: inspecting correlation among the predictors with condition number and VIF
###Code
# calculate condition numbers
print(np.linalg.cond(Xc))
print(np.linalg.cond(X))
print(np.linalg.cond(stats.mstats.zscore(X)))
###Output
_____no_output_____
###Markdown
A high condition number indicates multicollinearity. Rule of thumb, you want this to be below ~20 (in real-world applied analyses it will often be a bit higher though). Condition number is the ratio of the largest eigenvalue in the design matrix to the smallest. In other words, the large condition number in this case results from scaling rather than from multicollinearity. If we have just one variable with units in the thousands (ie, a large eigenvalue) and add a constant with units of 1 (ie, a small eigenvalue), we'll get a large condition number as the ratio, and statsmodels warns of multicollinearity. If you standardize the design matrix, you see condition number without the scaling effects.VIF is a measure for the collinearity of one variable with all the others. As a rule of thumb, a VIF > 10 indicates strong multicollinearity. If multicollinearity is present in our regression model, the correlated predictors can have large standard errors and thus become insignificant, even though they are theoretically important. By removing redundant predictors, we'll have more sensible regression results for the ones we left in. In statsmodels, the function expects the presence of a constant in the matrix of explanatory variables.
###Code
# calculate VIFs for all predictors then view head
vif_values = [vif(X.values, i) for i in range(len(X.columns))]
vifs = pd.Series(data=vif_values, index=X.columns).sort_values(ascending=False).head()
vifs
# remove the worst offender from the design matrix
# ...but is this theoretically sound?
highest_vif = vifs.index[0]
X = X.drop(highest_vif, axis='columns')
# re-calculate VIFs
vif_values = [vif(X.values, i) for i in range(len(X.columns))]
vifs = pd.Series(data=vif_values, index=X.columns).sort_values(ascending=False).head()
vifs
# estimate a linear regression model
Xc = add_constant(X)
model = sm.OLS(y, Xc)
result = model.fit()
print(result.summary())
# now it's your turn
# try removing variables from the set of predictors, or transforming them, then re-calculate VIFs
# can you find a set of predictors that makes good theoretical sense and has less multicollinearity?
###Output
_____no_output_____
###Markdown
3. Spatial modelsBasic types: - **Spatial heterogeneity**: account for systematic differences across space without explicitly modeling interdependency (non-spatial estimation) - spatial fixed effects (intercept varies for each spatial group) - spatial regimes (intercept and coefficients vary for each spatial group) - **Spatial dependence**: model interdependencies between observations through space - spatial lag model (spatially-lagged endogenous variable added as predictor; because of endogeneity, cannot use OLS to estimate) - spatial error model (spatial effects in error term) - spatial lag+error combo model 3a. Spatial fixed effectsUsing dummy variables representing the counties into which our observations (tracts) are nested
###Code
# create a new dummy variable for each county, with 1 if tract is in this county and 0 if not
for county in tracts['COUNTYFP'].unique():
new_col = f'dummy_county_{county}'
tracts[new_col] = (tracts['COUNTYFP'] == county).astype(int)
# remove one dummy from dummies to prevent perfect collinearity
# ie, a subset of predictors sums to 1 (which full set of dummies will do)
county_dummies = [f'dummy_county_{county}' for county in tracts['COUNTYFP'].unique()]
county_dummies = county_dummies[1:]
# create design matrix containing predictors (drop nulls), and a response variable vector
predictors = ['med_household_income', 'pct_white', 'pct_single_family_home', 'pct_built_before_1940',
'med_rooms_per_home', 'pct_bachelors_degree']
X = tracts[predictors + county_dummies].dropna()
y = tracts.loc[X.index][response]
# estimate a linear regression model
Xc = add_constant(X)
model = sm.OLS(y, Xc)
result = model.fit()
print(result.summary())
###Output
_____no_output_____
###Markdown
3b. Spatial regimesEach spatial regime can have different model coefficients. Here, the regimes are counties. We'll take a subset of our data (all the tracts appearing in 3 counties). This subsection just uses OLS for estimation, but you can also combine spatial regimes with spatial autogression models (the latter is introduced later).
###Code
# pick 3 counties as the regimes, and only estimate a regimes model for this subset
counties = tracts['COUNTYFP'].value_counts().index[:3]
mask = tracts['COUNTYFP'].isin(counties)
# create design matrix containing predictors (drop nulls), a response variable matrix, and a regimes vector
X = tracts.loc[mask, predictors].dropna() #only take rows in the 3 counties
Y = tracts.loc[X.index][[response]] #notice this is a matrix this time for pysal
regimes = tracts.loc[X.index]['COUNTYFP'] #define the regimes
# estimate spatial regimes model with OLS
olsr = ps.model.spreg.OLS_Regimes(y=Y.values, x=X.values, regimes=regimes.values, name_regimes='county',
name_x=X.columns.tolist(), name_y=response, name_ds='tracts')
print(olsr.summary)
###Output
_____no_output_____
###Markdown
3c. Spatial diagnosticsSo far we've seen two spatial heterogeneity models. Now we'll explore spatial dependence, starting by using queen-contiguity spatial weights to model spatial relationships between observations and OLS to check diagnostics.
###Code
# create design matrix containing predictors (drop nulls), and a response variable matrix
predictors = ['med_household_income', 'pct_white', 'pct_single_family_home', 'pct_built_before_1940',
'med_rooms_per_home', 'pct_bachelors_degree']
X = tracts[predictors].dropna()
Y = tracts.loc[X.index][[response]] #notice this is a matrix this time for pysal
# compute spatial weights from tract geometries (but only those tracts that appear in design matrix!)
W = ps.lib.weights.Queen.from_dataframe(tracts.loc[X.index])
W.transform = 'r'
# compute OLS spatial diagnostics to check the nature of spatial dependence
ols = ps.model.spreg.OLS(y=Y.values, x=X.values, w=W, spat_diag=True, moran=True)
# calculate moran's I (for the response) and its significance
mi = ps.explore.esda.Moran(y=Y, w=W, two_tailed=True)
print(mi.I)
print(mi.p_sim)
# moran's I (for the residuals): moran's i, standardized i, p-value
ols.moran_res
###Output
_____no_output_____
###Markdown
Interpreting the resultsA significant Moran's *I* suggests spatial autocorrelation, but doesn't tell us which alternative specification should be used. Lagrange Multiplier (LM) diagnostics can help with that. If one LM test is significant and the other isn't, then that tells us which model specification (spatial lag vs spatial error) to use:
###Code
# lagrange multiplier test for spatial lag model: stat, p
ols.lm_lag
# lagrange multiplier test for spatial error model: stat, p
ols.lm_error
###Output
_____no_output_____
###Markdown
Interpreting the resultsIf (and only if) both the LM tests produce significant statistics, try the robust versions (the nonrobust LM tests are sensitive to each other):
###Code
# robust lagrange multiplier test for spatial lag model: stat, p
ols.rlm_lag
# robust lagrange multiplier test for spatial error model: stat, p
ols.rlm_error
###Output
_____no_output_____
###Markdown
So... which model specification to choose?If neither LM test is significant: use regular OLS.If only one LM test is significant: use that model spec.If both LM tests are significant: run robust versions.If only one robust LM test is significant: use that model spec.If both robust LM tests are significant (this can often happen with large sample sizes): - first consider if the initial model specification is actually a good fit - if so, use the spatial specification corresponding to the larger robust-LM statistic - or consider a combo model 3d. Spatial lag modelWhen the diagnostics indicate the presence of a spatial diffusion process.Model specification:$y = \rho W y + X \beta + u$where $y$ is a $n \times 1$ vector of observations (response), $W$ is a $n \times n$ spatial weights matrix (thus $Wy$ is the spatially-lagged response), $\rho$ is the spatial autoregressive parameter to be estimated, $X$ is a $n \times k$ matrix of observations (exogenous predictors), $\beta$ is a $k \times 1$ vector of parameters (coefficients) to be estimated, and $u$ is a $n \times 1$ vector of errors.
###Code
# maximum-likelihood estimation with full matrix expression
mll = ps.model.spreg.ML_Lag(y=Y.values, x=X.values, w=W, method='full', name_w='queen',
name_x=X.columns.tolist(), name_y=response, name_ds='tracts')
print(mll.summary)
# the spatial autoregressive parameter estimate, rho
mll.rho
###Output
_____no_output_____
###Markdown
3e. Spatial error modelWhen the diagnostics indicate the presence of spatial error dependence.Model specification:$y = X \beta + u$where $X$ is a $n \times k$ matrix of observations (exogenous predictors), $\beta$ is a $k \times 1$ vector of parameters (coefficients) to be estimated, and $u$ is a $n \times 1$ vector of errors. The errors $u$ follow a spatial autoregressive specification:$u = \lambda Wu + \epsilon$where $\lambda$ is a spatial autoregressive parameter to be estimated and $\epsilon$ is the vector of errors.
###Code
# maximum-likelihood estimation with full matrix expression
mle = ps.model.spreg.ML_Error(y=Y.values, x=X.values, w=W, method='full', name_w='queen',
name_x=X.columns.tolist(), name_y=response, name_ds='tracts')
print(mle.summary)
# the spatial autoregressive parameter estimate, lambda
mle.lam
###Output
_____no_output_____
###Markdown
3f. Spatial lag+error combo modelEstimated with GMM (generalized method of moments). Essentially a spatial error model with endogenous explanatory variables.Model specification:$y = \rho W y + X \beta + u$where $y$ is a $n \times 1$ vector of observations (response), $W$ is a $n \times n$ spatial weights matrix (thus $Wy$ is the spatially-lagged response), $\rho$ is the spatial autoregressive parameter to be estimated, $X$ is a $n \times k$ matrix of observations (exogenous predictors), $\beta$ is a $k \times 1$ vector of parameters (coefficients) to be estimated, and $u$ is a $n \times 1$ vector of errors.The errors $u$ follow a spatial autoregressive specification:$u = \lambda Wu + \epsilon$where $\lambda$ is a spatial autoregressive parameter to be estimated and $\epsilon$ is the vector of errors.
###Code
gmc = ps.model.spreg.GM_Combo_Het(y=Y.values, x=X.values, w=W, name_w='queen', name_ds='tracts',
name_x=X.columns.tolist(), name_y=response)
print(gmc.summary)
# now it's your turn
# with a new set of predictors, compute spatial diagnostics and estimate a new spatial model accordingly
###Output
_____no_output_____ |
project_mid1/project_mid1.ipynb | ###Markdown
ASTRO 533 - Mid Project 1**Created:** Sep. 2020 **Last Edit:** Sep. 2020 **Author:** Bill Chen **Email:** [email protected] Load packages and read data
###Code
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from astropy.table import Table as tb
from astropy.coordinates import SkyCoord # High-level coordinates
from astropy.coordinates import ICRS, Galactic, FK4, FK5 # Low-level frames
from astropy.coordinates import Angle, Latitude, Longitude # Anglesfrom astropy.coordinates
import astropy.units as u
import copy
plt.style.use('bill')
my_YlGnBu = copy.copy(mpl.cm.get_cmap('YlGnBu')) # copy the default cmap
my_YlGnBu.set_bad('w')
my_magma = copy.copy(mpl.cm.get_cmap('magma'))
my_magma.set_bad('k')
gaiarv_cat = tb.read('../glt13.fits', format='fits')
gaiarv_cat_m45 = tb.read('./glt21_m45.fits', format='fits')
# gaiarv_cat_m22 = tb.read('./glt19_m22.fits', format='fits')
###Output
_____no_output_____
###Markdown
Pre-parameters
###Code
size_min = 0 * u.pc # in pc
size_max = 100 * u.pc # in pc
###Output
_____no_output_____
###Markdown
Data processing*There will be several useless warnings.* ***Ignore them!***
###Code
m45ra = 15 * (3 + (47/60) + (24/3600)) # RA = 3h 47m 24s
m45dec = 24 + (7/60) + (0/3600) # Dec = 24deg 7min 0sec
gaiarv_cat['d'] = 1000*u.pc*u.mas / (gaiarv_cat['parallax']) # distance in pc
gaiarv_cat['absmag'] = gaiarv_cat['phot_g_mean_mag'] - 5*np.log10(gaiarv_cat['d']/10) # absolute magnitude
gaiarv_cat_m45['d'] = 1000*u.pc*u.mas / (gaiarv_cat_m45['parallax']) # distance in pc
gaiarv_cat_m45['absmag'] = gaiarv_cat_m45['phot_g_mean_mag'] - 5*np.log10(gaiarv_cat_m45['d']/10) # absolute magnitude
# indices of neighborhood stars
ind_nb_pre, = np.where((gaiarv_cat['d'] < size_max) & (gaiarv_cat['d'] > size_min)) # only for plotting
ind_nb, = np.where((gaiarv_cat['d'] < size_max) & (gaiarv_cat['d'] > size_min) &
(gaiarv_cat['absmag'] < 4*gaiarv_cat['bp_rp']+2) &
(((gaiarv_cat['absmag'] > 15*gaiarv_cat['bp_rp']-10.5) & (gaiarv_cat['bp_rp'] < 1)) |
((gaiarv_cat['absmag'] > 2.25*gaiarv_cat['bp_rp']+2.25) & (gaiarv_cat['bp_rp'] > 1))))
print('# of pre-filter neighborhood stars:', len(ind_nb_pre))
print('# of neighborhood stars:', len(ind_nb))
# indices of m45 stars
ind_m45, = np.where((abs(gaiarv_cat_m45['ra']-m45ra) < 3) & (abs(gaiarv_cat_m45['dec']-m45dec) < 3) &
(abs(gaiarv_cat_m45['pmra']-20) < 5) & (abs(gaiarv_cat_m45['pmdec']+45) < 5) &
(abs(gaiarv_cat_m45['parallax']-7.3) < 0.7))
print('# of m45 stars:', len(ind_m45))
###Output
c:\users\bill\appdata\local\programs\python\python36\lib\site-packages\ipykernel_launcher.py:5: RuntimeWarning: invalid value encountered in log10
"""
c:\users\bill\appdata\local\programs\python\python36\lib\site-packages\ipykernel_launcher.py:8: RuntimeWarning: invalid value encountered in log10
###Markdown
CMD
###Code
# plot parameters
x_min, x_max = -0.2, 3
y_min, y_max = 0, 12
bins = 100
bins_m45 = 50
# plot
# fig, ax = plt.subplots(figsize=(6,6))
fig, [ax1,ax2] = plt.subplots(1, 2, figsize=(12,6), sharey=True, sharex=True)
fig.subplots_adjust(wspace=0)
ax1.hist2d(gaiarv_cat['bp_rp'][ind_nb_pre], gaiarv_cat['absmag'][ind_nb_pre], range=[[x_min, x_max], [y_min, y_max]],
bins = bins, norm=mcolors.LogNorm(), cmap=my_YlGnBu)
ax1.plot([-0.2,2.5], [1.2,12], c='gray', ls='--') # y < 4x + 2
ax1.plot([0.7,1,3], [0,4.5,9], c='gray', ls='--') # y > 15x - 10.5 (x<1) 2.25x + 2.25 (x>1)
ax1.fill_between([-0.2,2.5], [1.2,12], [12,12], facecolor='gray', alpha=0.1)
ax1.fill_between([0.7,1,3], [0,4.5,9], [0,0,0], facecolor='gray', alpha=0.1)
ax1.set_xlabel(r'$\mathrm{BP-RP}$')
ax1.set_ylabel(r'$\mathrm{G}$')
ax1.set_xlim(x_min, x_max)
ax1.set_ylim(y_max, y_min)
ax1.set_xticks([0, 1, 2, 3])
ax1.set_xticklabels([r'$0$', r'$1$', r'$2$', r'$3$'])
ax1.set_yticks([0, 2, 4, 6, 8, 10, 12])
ax1.set_yticklabels([r'$0$', r'$2$', r'$4$', r'$6$', r'$8$', r'$10$', r'$12$'])
ax1.text(0.96, 0.96, r'$r<%d\ \mathrm{pc}$' % size_max.value, ha='right', va='top', transform=ax1.transAxes, fontsize=18)
ax2.hist2d(gaiarv_cat_m45['bp_rp'][ind_m45], gaiarv_cat_m45['absmag'][ind_m45], range=[[x_min, x_max], [y_min, y_max]],
bins = bins_m45, norm=mcolors.LogNorm(), cmap=my_YlGnBu)
ax2.set_xlabel(r'$\mathrm{BP-RP}$')
ax2.set_ylim(y_max, y_min)
ax2.text(0.96, 0.96, r'$\mathrm{M45}$' % size_max.value, ha='right', va='top', transform=ax2.transAxes, fontsize=18)
plt.savefig('./figures/cmd.pdf')
plt.show()
###Output
_____no_output_____
###Markdown
PDMF
###Code
# plot parameters
x_min, x_max = -2, 12
y_min, y_max = 0, 0.4
bins = 40
bin_edges = np.linspace(x_min, x_max, bins+1)
# detection limit
xs = (bin_edges[1:] + bin_edges[:-1])/2
d_lim = np.clip(10**(0.2*(13 - xs) + 1), 0, 100)
correct = (100 / d_lim)**3 # correction factor
# main plot
fig, ax = plt.subplots(figsize=(6,6))
hist_nb, bin_edges = np.histogram(gaiarv_cat['absmag'][ind_nb], bins=bin_edges)
hist_m45, bin_edges = np.histogram(gaiarv_cat_m45['absmag'][ind_m45], bins=bin_edges)
err_nb = np.sqrt(hist_nb) * correct
err_nb = err_nb * bins / (x_max-x_min) / np.sum(hist_nb)
hist_nb = hist_nb * correct
hist_nb = hist_nb * bins / (x_max-x_min) / np.sum(hist_nb)
err_m45 = np.sqrt(hist_m45)
err_m45 = err_m45 * bins / (x_max-x_min) / np.sum(hist_m45)
hist_m45 = hist_m45 * bins / (x_max-x_min) / np.sum(hist_m45)
ax.errorbar(xs, hist_nb, err_nb, fmt='none', alpha=0.5, c='k', elinewidth=1, label=None)
ax.errorbar(xs+0.05, hist_m45, err_m45, fmt='none', alpha=0.5, c='r', elinewidth=1, label=None)
ax.scatter(xs, hist_nb, marker='^', edgecolors='k', facecolor='k', alpha=0.5, s=20, label=r'$r<100\ \mathrm{pc}$')
ax.scatter(xs+0.05, hist_m45, marker='d', edgecolors='r', facecolor='r', alpha=0.5, s=20, label=r'$\mathrm{M45}$')
ax.plot([-1,4,4,-1,-1], [0,0,0.04,0.04,0], c='gray', ls='--')
ax.fill_between([-1,4], [0,0], [0.04,0.04], facecolor='gray', alpha=0.1)
ax.set_xlabel(r'$\mathrm{G}$')
ax.set_ylabel(r'$f\,(\mathrm{G})$')
ax.set_xlim(x_min, x_max)
ax.set_ylim(y_min, y_max)
ax.set_xticks([-2, 0, 2, 4, 6, 8, 10, 12])
ax.set_xticklabels([r'$-2$', r'$0$', r'$2$', r'$4$', r'$6$', r'$8$', r'$10$', r'$12$'])
ax.set_yticks([0, 0.1, 0.2, 0.3, 0.4])
ax.set_yticklabels([r'$0$', r'$0.1$', r'$0.2$', r'$0.3$', r'$0.4$'])
ax.legend(loc=1)
# top ticks
secax = ax.twiny()
secax.set_xlabel(r'$M\,/\,M_\odot$')
secax.set_xlim(x_min, x_max)
secax.set_xticks(-np.array([np.log10(6), np.log10(5), np.log10(4), np.log10(3), np.log10(2), np.log10(1),
np.log10(0.9), np.log10(0.8), np.log10(0.7), np.log10(0.6), np.log10(0.5), np.log10(0.4),
np.log10(0.3), np.log10(0.2)])*8.75+5.2) # G_sun = 5.2
secax.set_xticklabels(['', r'$5$', '', '', r'$2$', r'$1$', '', '', '', '', r'$0.5$', '', '', r'$0.2$'])
# small plot
ax2 = fig.add_axes([0.22,0.40,0.4,0.4])
ax2.errorbar(xs, hist_nb, err_nb, fmt='none', alpha=0.8, c='k', label=None)
ax2.errorbar(xs+0.05, hist_m45, err_m45, fmt='none', alpha=0.8, c='r', label=None)
ax2.scatter(xs, hist_nb, marker='^', edgecolors='k', facecolor='k', alpha=0.8, s=40, label=r'$r<100\ \mathrm{pc}$')
ax2.scatter(xs+0.05, hist_m45, marker='d', edgecolors='r', facecolor='r', alpha=0.8, s=40, label=r'$\mathrm{M45}$')
ax2.set_xlim(-1, 4)
ax2.set_ylim(0, 0.04)
ax2.set_xticks([-1, 0, 1, 2, 3, 4])
ax2.set_xticklabels([r'$-1$', r'$0$', r'$1$', r'$2$', r'$3$', r'$4$'])
ax2.set_yticks([0, 0.01, 0.02, 0.03, 0.04])
ax2.set_yticklabels([r'$0$', r'$0.01$', r'$0.02$', r'$0.03$', r'$0.04$'])
# top ticks
secax2 = ax2.twiny()
secax2.set_xlim(-2, 4)
secax2.set_xticks(-np.array([np.log10(6), np.log10(5), np.log10(4), np.log10(3), np.log10(2)])*8.75+5.2) # G_sun = 5.2
secax2.set_xticklabels([r'$6$', r'$5$', r'$4$', r'$3$', r'$2$'])
plt.savefig('./figures/pdmf.pdf')
plt.show()
###Output
_____no_output_____
###Markdown
Get MF from luminosity functions
###Code
# plot parameters
x_min, x_max = np.log10(0.15), np.log10(5)
y_min, y_max = 0, 2
# main plot
fig, ax = plt.subplots(figsize=(6,6))
# get MF from luminosity functions
m_nb = 10**(-(xs[4:]-5.2)/8.75) # corresponding mass
m_edges_nb = 10**(-(10**(-(bin_edges[4:]-5.2)/8.75)-5.2)/8.75) # corresponding mass lags
fm_nb = hist_nb[4:] * 8.75 * 10**((xs[4:]-5.2)/8.75)/np.log(10) # pdmf
fm_err_nb = err_nb[4:] * 8.75 * 10**((xs[4:]-5.2)/8.75)/np.log(10)
fm_m45 = hist_m45[4:] * 8.75 * 10**((xs[4:]-5.2)/8.75)/np.log(10) # imf
fm_err_m45 = err_m45[4:] * 8.75 * 10**((xs[4:]-5.2)/8.75)/np.log(10)
eta = fm_nb / fm_m45
eta_err = eta * np.sqrt((fm_err_nb/fm_nb)**2 + (fm_err_m45/fm_m45)**2)
ax.errorbar(np.log10(m_nb), eta, eta_err, fmt='none', alpha=0.8, c='m', elinewidth=1, label=None)
ax.scatter(np.log10(m_nb), eta, marker='o', edgecolors='m', facecolor='m', alpha=0.8, s=20, label=r'$r<100\ \mathrm{pc}$')
ax.axhline(1, ls='-.', c='gray')
ax.set_xlabel(r'$M\,/\,M_\odot$')
ax.set_ylabel(r'$\eta\,(M)$')
ax.set_xlim(x_min, x_max)
ax.set_ylim(y_min, y_max)
ax.set_xticks(np.array([np.log10(5), np.log10(4), np.log10(3), np.log10(2), np.log10(1),
np.log10(0.9), np.log10(0.8), np.log10(0.7), np.log10(0.6), np.log10(0.5), np.log10(0.4),
np.log10(0.3), np.log10(0.2)])) # G_sun = 5.2
ax.set_xticklabels([r'$5$', '', '', r'$2$', r'$1$', '', '', '', '', r'$0.5$', '', '', r'$0.2$'])
ax.set_yticks([0, 0.5, 1, 1.5, 2])
ax.set_yticklabels([r'$0$', r'$0.5$', r'$1$', r'$1.5$', r'$2$'])
# top ticks
secax = ax.twiny()
secax.set_xlabel(r'$\left.T\,(M)\,\right/\,T_\odot$')
secax.set_xlim(x_min, x_max)
secax.set_xticks(-(np.array([-1, 0,1,2]))/2.5) # G_sun = 5.2
secax.set_xticklabels([r'$0.1$', r'$1$', r'$10$', r'$100$'])
plt.show()
###Output
_____no_output_____
###Markdown
Get SFH from LFs
###Code
newx = (m_nb[1:] + m_nb[:-1])/2
psi_list = np.zeros([10000,len(newx)])
for i in range(10000):
test_eta = np.random.normal(eta, eta_err)
d_eta = (test_eta[1:] - test_eta[:-1]) / (m_nb[1:] - m_nb[:-1])
psi_list[i] = -d_eta * newx**3.5
psi = np.mean(psi_list, axis=0)
psi_err = np.std(psi_list, axis=0)
# plot parameters
x_min, x_max = np.log10(0.15), np.log10(5)
y_min, y_max = -70, 70
# main plot
fig, ax = plt.subplots(figsize=(6,6))
ax.errorbar(np.log10(newx), psi, psi_err, fmt='none', alpha=0.8, c='m', elinewidth=1, label=None)
ax.scatter(np.log10(newx), psi, marker='o', edgecolors='m', facecolor='m', alpha=0.8, s=20, label=r'$r<100\ \mathrm{pc}$')
ax.axhline(0, ls='-.', c='gray')
ax.scatter(np.log10(newx[8]), psi[8], marker='*', c='r', s=160)
print('red star mass: %f M_sun' % newx[8], '; time: %f Gyr' % (10*(newx[8])**(-2.5)))
ax.set_xlabel(r'$M\,/\,M_\odot$')
ax.set_ylabel(r'$\psi\,(M)$')
ax.set_xlim(x_min, x_max)
ax.set_ylim(y_min, y_max)
ax.set_xticks(np.array([np.log10(5), np.log10(4), np.log10(3), np.log10(2), np.log10(1),
np.log10(0.9), np.log10(0.8), np.log10(0.7), np.log10(0.6), np.log10(0.5), np.log10(0.4),
np.log10(0.3), np.log10(0.2)])) # G_sun = 5.2
ax.set_xticklabels([r'$5$', '', '', r'$2$', r'$1$', '', '', '', '', r'$0.5$', '', '', r'$0.2$'])
ax.set_yticks([0])
ax.set_yticklabels([r'$0$'])
# top ticks
secax = ax.twiny()
secax.set_xlabel(r'$\left.T\,(M)\,\right/\,T_\odot$')
secax.set_xlim(x_min, x_max)
secax.set_xticks(-np.array([-1, 0,1,2])/2.5) # G_sun = 5.2
secax.set_xticklabels([r'$0.1$', r'$1$', r'$10$', r'$100$'])
plt.savefig('./figures/sfh.pdf')
plt.show()
###Output
red star mass: 2.010562 M_sun ; time: 1.744642 Gyr
|
Codigo/20211102Clase8.ipynb | ###Markdown
Introducción a AlgotradingIng. Carlos Crespo Elizondo, MSFMF-013 Análisis de InversiónClase del 26 de octubre 2021Maestría de Finanzas, Facultad de EconomíaUANL Trabajando con datos financieros > _"Claramente los datos le ganan a los algortimos. Sin datos exahustivos tiendes a obtener predicciones no-exahustivas."_ Rob Thomas (Gerente General la división Analytics Business de IBM). Tipos de información financiera (ejemplos). Datos Estructurados Datos No Estructurados Datos históricos Precios de cierre Noticias financieras Datos en tiempo real Precios bid/ask de las criptos Un tweet de Ellon Musk Tipos de archivos Hay muchos formatos de datos que provienen de fuentes externas. Durante el resto del curso trabajaremos con archivos CSV y JSON's. Archivos CSV Son archivos de texto simple, separados por comas. CSV es la abreviación de "Comma Separated Values".En la mayoría de los archivos CSV, la primer fila representa los encabezados de las columnas. Todas las filas posteriores, representan entradas de datos. En otros casos, las primeras filas representan espcificaciones del archivo en cuestión. Por lo general es una descarga manual del usuario. Archivos JSON Son archivos que guarda estructura de datos en formato JSON (JavaScript Object Notation). Es un formato utilizado para transmitir datos entre una aplicación y un servidor. Archivos CSV y Python Python tiene su propia librería para leer archivos CSV. La librería se llama `csv`. Una limitante es que no puedes cargar directamente un archivo de internet como lo hicmos con la función de Numpy `loadtxt( )`. Para poder obtener datos de internet se ocuparía otra librería como `requests` o `urlib`, haciendo la obtención de datos de internet más complicada de lo que es.Por lo anterior solo utilizaremos pandas para leer archivos csv. Importar precios de WALMEX Pasos:1. Importar datos de internet, guardarlos en un DataFrame de pandas y gurdarlo como "__*walmex*__"1. Formato del DataFrame: * Index: Columna de fechas * Fecha más antigua: Index 0 * Fecha más reciente: Index -1 * Nombre y órden de las columnas: "Apertura", "Maximo", "Minimo", "Cierre"$^+$1. Crear una columna del DataFrame con los Retornos logarítmicos de los precios de cierre diarios1. Realizar las siguientes gráficas: * Precios de cierre * Retornos diarios * Histograma de los retornos__**NOTAS:__$+$ No te recomiendo utilizar acentos al momento de definir el nombre de variables, columnas, df, etc. Lista de emisoras ac alfaa alpeka alsea amxl asurb bimboa bolsaa cemexcpo elektra femsaubd gapb gcarsoa1 gcc gmexicob grumab ienova kimbera kofubl labb livepolc1 megacpo omab orbia penoles pinfra tlevisacpo walmex Obtener datos de más acciones De la liga cambiar "walmex" por la acción de interés, por ejemplo "ac":http://bit.ly/oncedos-walmex ---> http://bit.ly/oncedos-ac Función `read_csv( )` de pandas `read_csv( )` nos permite controlar varios parámetros y termina siendo un DataFrame. La clase `DataFrame` tiene varios métodos que tienen muchos usos en el campo de las finanzas. https://pandas.pydata.org/docs/reference/api/pandas.read_csv.html
###Code
import pandas as pd
import numpy as np
###Output
_____no_output_____
###Markdown
1. Importar datos de Walmex
###Code
url = 'http://bit.ly/oncedos-walmex'
walmex = pd.read_csv(url)
walmex.head(10)
###Output
_____no_output_____
###Markdown
Importar datos sin las primeras 6 líneas
###Code
walmex = pd.read_csv(url, skiprows=6)
walmex.head()
walmex.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 2455 entries, 0 to 2454
Data columns (total 13 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Date 2455 non-null object
1 PX_LAST 2455 non-null float64
2 Change 699 non-null float64
3 % Change 699 non-null float64
4 PX_OPEN 2455 non-null float64
5 Change.1 699 non-null float64
6 % Change.1 699 non-null float64
7 PX_HIGH 2455 non-null float64
8 Change.2 699 non-null float64
9 % Change.2 699 non-null float64
10 PX_LOW 2455 non-null float64
11 Change.3 699 non-null float64
12 % Change.3 699 non-null float64
dtypes: float64(12), object(1)
memory usage: 249.5+ KB
###Markdown
2. Formato del DataFrame 2.1 Index: Columna de fechas Dos opciones para cambiar la columna "*__Date__*" al index:1. Cambiar la columan "*__Date__*" a index de manera manual.2. Importar los datos espcificando que la columna "_**Date**_" es el index. Una vez ya importado el `DataFrame`
###Code
walmex.set_index('Date', inplace=True)
walmex.head()
###Output
_____no_output_____
###Markdown
Importar datos especificando que la columan "Date" será el index
###Code
walmex = pd.read_csv(url, skiprows=6, index_col=0)
walmex.head()
walmex.loc['04/10/2019']
walmex.iloc[0]
###Output
_____no_output_____
###Markdown
2.2 / 2.3 Orden de fechas
###Code
walmex.sort_index(axis=0, inplace=True)
walmex.head()
walmex.head(20)
walmex = pd.read_csv(url, skiprows=6, index_col=0, parse_dates=True, dayfirst=True)
walmex.info()
###Output
<class 'pandas.core.frame.DataFrame'>
DatetimeIndex: 2455 entries, 2019-10-04 to 2010-01-04
Data columns (total 12 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 PX_LAST 2455 non-null float64
1 Change 699 non-null float64
2 % Change 699 non-null float64
3 PX_OPEN 2455 non-null float64
4 Change.1 699 non-null float64
5 % Change.1 699 non-null float64
6 PX_HIGH 2455 non-null float64
7 Change.2 699 non-null float64
8 % Change.2 699 non-null float64
9 PX_LOW 2455 non-null float64
10 Change.3 699 non-null float64
11 % Change.3 699 non-null float64
dtypes: float64(12)
memory usage: 249.3 KB
###Markdown
Método `sort_index( )` Atributo `parse_dates`
###Code
walmex.sort_index(axis=0, inplace=True)
walmex.head()
walmex.tail()
###Output
_____no_output_____
###Markdown
2.4 Nombre y órden de las columnas: "Apertura", "Maximo", "Minimo", "Cierre" Cambios finales al df:1. Borrar las columnas que no utilizaremos1. Renombrar las columnas a español1. Ordenar las columnas en el formato O-H-L-C 2.4.1 Borrar columnas
###Code
borrar_columnas = ['Change', '% Change','Change.1', '% Change.1','Change.2', '% Change.2','Change.3', '% Change.3']
walmex.drop(borrar_columnas, axis=1, inplace=True)
walmex.head()
###Output
_____no_output_____
###Markdown
2.4.2 Renombrar columnas
###Code
dicc_renombrar = {'PX_LAST':'Cierre', 'PX_OPEN':'Apertura', 'PX_HIGH':'Maximo', 'PX_LOW':'Minimo'}
walmex.rename(dicc_renombrar, axis=1, inplace=True)
walmex.head()
###Output
_____no_output_____
###Markdown
2.4.3 Reordenar columnas
###Code
orden_columnas = ['Apertura', 'Maximo', 'Minimo', 'Cierre']
walmex.reindex(columns=orden_columnas, inplace=True)
walmex.reindex(columns=orden_columnas)
walmex.head()
walmex = walmex.reindex(columns=orden_columnas)
walmex.head()
###Output
_____no_output_____
###Markdown
Volver a hacer todo en una sola celda
###Code
del walmex
columnas_a_importar = ['Date', 'PX_OPEN', 'PX_HIGH', 'PX_LOW', 'PX_LAST']
walmex = pd.read_csv(url, skiprows = 6, index_col = 0, parse_dates = True, dayfirst = True,
usecols = columnas_a_importar)
walmex.rename({'PX_LAST':'Cierre', 'PX_OPEN':'Apertura', 'PX_HIGH':'Maximo', 'PX_LOW':'Minimo'}, axis = 1,
inplace = True)
orden_columnas = ['Apertura', 'Maximo', 'Minimo', 'Cierre']
walmex = walmex.reindex(columns=orden_columnas)
walmex.sort_index(inplace = True)
walmex.head()
walmex.tail()
###Output
_____no_output_____
###Markdown
Crear funciones Las funciones es un bloque de código diseñado para hacer un trabajo específico. Las funciones pueden:* Recibir valores* Tener valores predeterminados *por defalut* para el caso en que no se definan valores* NO RECIBIR VALORES!* Regresar un resultado* No regresar nada
###Code
a = 2
b = 3
suma1 = a+b
print(suma1)
i = 8
j = 10
suma2 = i+j
print(suma2)
y = 10
z = 20
suma_n = y + z
print(suma_n)
def funcion_sumar(valor_1, valor_2):
suma = valor_1 + valor_2
print(suma)
return suma
suma_n = funcion_sumar(10, 20)
suma_n
suma1 = funcion_sumar(100,200)
suma2 = funcion_sumar(10,40)
suma3 = funcion_sumar(3,3)
suma2
suma3
###Output
_____no_output_____
###Markdown
Función para importar datos archivos de Bloomberg
###Code
def importar_bloomberg(accion):
url = f'http://bit.ly/oncedos-{accion}'
columnas_a_importar = ['Date', 'PX_OPEN', 'PX_HIGH', 'PX_LOW', 'PX_LAST']
df = pd.read_csv(url, skiprows = 6, index_col = 0, parse_dates = True, dayfirst = True,
usecols = columnas_a_importar)
df.rename({'PX_LAST':'Cierre', 'PX_OPEN':'Apertura', 'PX_HIGH':'Maximo', 'PX_LOW':'Minimo'}, axis = 1,
inplace = True)
orden_columnas = ['Apertura', 'Maximo', 'Minimo', 'Cierre']
df = df.reindex(columns=orden_columnas)
df.sort_index(inplace = True)
return df
ac = importar_bloomberg('ac')
ac.tail()
alfaa = importar_bloomberg('alfaa')
alfaa.head()
###Output
_____no_output_____
###Markdown
Hacer librería HERRAMIENTAS FINANCIERASEsta sección lo hicimos en el archivo de python `herramientas_financieras.py` Probar librería HERRAMIENTAS FINANCIERAS en otra libretaEsta sección la hicimos en otra libreta de jupyter `20211102Clase8PruebasLibreria.ipynb` 2.3 Crear columna de retornos diarios$$RetLogaritmico = ln(Precio_n) - ln(Precio_{n-1})$$$$ =ln\frac{Precio_n}{Precio_{n-1}}$$
###Code
walmex.head()
import numpy as np
walmex['Ret'] = np.log(walmex['Cierre'] / walmex['Cierre'].shift(1))
walmex.head()
walmex.tail()
walmex.dropna(inplace=True)
walmex.head()
###Output
_____no_output_____
###Markdown
Retornos en la escala logarítmica
###Code
walmex['Ret'].sum()
###Output
_____no_output_____
###Markdown
Retornos en la escala números reales
###Code
( walmex['Cierre'].iloc[-1] - walmex['Cierre'].iloc[0] ) / walmex['Cierre'].iloc[0]
np.exp(walmex['Ret'].sum()) - 1
###Output
_____no_output_____
###Markdown
2.4 Graficar 2.4.1 Precio de cierre
###Code
walmex['Cierre'].plot(figsize=(12,8), title='Precios de cierre WALMEX');
###Output
_____no_output_____
###Markdown
2.4.2 Retornos
###Code
walmex['Ret'].plot(figsize=(12,8), title='Retornos diarios WALMEX');
###Output
_____no_output_____
###Markdown
2.4.3 Histograma
###Code
walmex['Ret'].hist(figsize=(12,8), bins = 10);
walmex['Ret'].plot(kind='hist', figsize=(12,8), bins=18, title='Histograma retornos Walmex');
###Output
_____no_output_____ |
TradingAI/AI Algorithms in Trading/Lesson 26 - Optimization with Transaction Costs /optimization_with_tcosts.ipynb | ###Markdown
Optimization with Transaction costsIn this lesson, we’ll show you how to incorporate transaction costs into portfolio optimization. This will give your backtest a more realistic measure of your alpha’s performance. In addition, we’ll show you some additional ways to design your optimization with efficiency in mind. This is really helpful when backtesting, because having reasonably shorter runtimes allows you to test and iterate on your alphas more quickly.
###Code
import sys
!{sys.executable} -m pip install -r requirements.txt
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pickle
import gzip
import bz2
from statsmodels.formula.api import ols
from scipy.stats import gaussian_kde
import scipy
import scipy.sparse
import patsy
from statistics import median
import datetime
###Output
_____no_output_____
###Markdown
Barra dataWe’ll be using factor data that is generated by Barra. This will be good practice because Barra data is used throughout the industry. Note that we've pre-processed the raw barra data files and stored the data into pickle files. The alternative would be to load the original data, and perform the parsing each time. Since parsing and pre-processing takes time, we recommend doing the pre-processing once and saving the pre-processed data for later use in your backtest.Choose the number of years to use for the backtest. The data is available for years 2003 to 2008 inclusive.
###Code
barra_dir = '../../data/project_8_barra/'
!ls {barra_dir}
data = {}
for year in [2003]:
fil = barra_dir + "pandas-frames." + str(year) + ".pickle"
data.update(pickle.load( open( fil, "rb" ) ))
covariance = {}
for year in [2003]:
fil = barra_dir + "covariance." + str(year) + ".pickle"
covariance.update(pickle.load( open(fil, "rb" ) ))
daily_return = {}
for year in [2003, 2004]:
fil = barra_dir + "price." + str(year) + ".pickle"
daily_return.update(pickle.load( open(fil, "rb" ) ))
###Output
_____no_output_____
###Markdown
Notice that the frames variale is a dictionary, where the keys are strings representing each business day. View the Barra dataWe'll take a look at the value stored for a single day (it's a data frame).As a general reminder of best practices, remember to check what unit of measure your data is in. In some cases, the unit of measure isn’t available in the documentation, so you’ll want to inspect the data to see what makes sense.For instance, there are volatility fields that are large enough that we can assume they are in percentage units, as opposed to decimal values. In other cases, when we look at daily volume, we may not have documentation about whether the units are in number of shares or in dollars. One way to find this out is to spot check a single stock on a single day, and cross-reference with another source, such as Bloomberg or Yahoo Finance.Remember to inspect the data before you use it, as it will help you derive more meaningful results in your portfolio optimization, and in your backtest.Remember to inspect the data before you use it, as it will help you derive more meaningful results in your portfolio optimization, and in your backtest.In the exercise, we'll re-scale the data before using it, and there will be comments to point out when we re-scale the data. So don't worry about adjusting anything here, just take a look to get familiar with the data.
###Code
data.keys()
data['20030102'].head()
data['20030102'].shape
###Output
_____no_output_____
###Markdown
FactorsNote that the data fields that start with the prefix U-S-F-A-S-T are factor exposures, one column for each factor. We will use some of these as alpha factors, and the rest as risk factors. The reason this makes sense is that, for the time periods in which we’re back-testing, some of these factors were able to produce better than average returns. Barra works with its clients (funds) and gathers information about alphas that worked in the past. These were calculated on historical data to produce the factor exposure data found in the Barra data.  FactorsHere's a partial list of the barra factors in our dataset and their definitions. These are collected from documentation by Barra. There are style factors and industry factors. The industry factors will be used as risk factors. You can consider using the style factors as alpha factors. Any factors not used as alpha factors can be included in the risk factors category. Style factors* beta: Describes market risk that cannot be explained by the Country factor. The Beta factor is typically the most important style factor. We calculate Beta by time-series regression of stock excess returns against the market return.* 1 day reversal* dividend yield: Describes differences in stock returns attributable to stock's historical and predicted dividend-to-price ratios.* downside risk (maximum drawdown)* earnings quality: Describes stock return differences due to the accrual components of earnings.* earnings yield: Describes return differences based on a company’s earnings relative to its price. Earnings Yield is considered by many investors to be a strong value signal. The most important descriptor in this factor is the analyst-predicted 12-month earnings-to-price ratio.* growth: Differentiates stocks based on their prospects for sales or earnings growth. The most important descriptor in this factor is the analyst predicted long-term earnings growth. Other descriptors include sales and earnings growth over the previous five years.* leverage: Describes return differences between high and low-leverage stocks. The descriptors within this style factor include market leverage, book leverage, and debt-to-assets ratio.* liquidity: Describes return differences due to relative trading activity. The descriptors for this factor are based on the fraction of total shares outstanding that trade over a recent window.* long-term reversal: Describes common variation in returns related to a long-term (five years ex. recent thirteen months) stock price behavior.* management quality* Mid capitalization: Describes non-linearity in the payoff to the Size factor across the market-cap spectrum. This factor is based on a single raw descriptor: the cube of the Size exposure. However, because this raw descriptor is highly collinear with the Size factor, it is orthogonalized with respect to Size. This procedure does not affect the fit of the model, but does mitigate the confounding effects of collinearity, while preserving an intuitive meaning for the Size factor. As described by Menchero (2010), the Mid Capitalization factor roughly captures the risk of a “barbell portfolio” that is long mid-cap stocks and short small-cap and large-cap stocks.* Momentum – Differentiates stocks based on their performance over the trailing 12 months. When computing Momentum exposures, we exclude the most recent returns in order to avoid the effects of short-term reversal. The Momentum factor is often the second strongest factor in the model, although sometimes it may surpass Beta in importance.* Profitability – Combines profitability measures that characterize efficiency of a firm's operations and total activities.* Residual Volatility – Measures the idiosyncratic volatility anomaly. It has three descriptors: (a) the volatility of daily excess returns, (b) the volatility of daily residual returns, and (c) the cumulative range of the stock over the last 12 months. Since these descriptors tend to be highly collinear with the Beta factor, the Residual Volatility factor is orthogonalized with respect to the Beta and Size factors.* seasonality* sentiment* Size – Represents a strong source of equity return covariance, and captures return differences between large-cap and small-cap stocks. We measure Size by the log of market capitalization.* Short term reversal* Value* Prospect -- is a function of skewness and maximum drawdown.* Management Quality -- is a function of the following: * Asset Growth: Annual reported company assets are regressed against time over the past five fiscal years. The slope coefficient is then divided by the average annual assets to obtain the asset growth. * Issuance Growth Annual reported company number of shares outstanding regressed against time over the past five fiscal years. The slope coefficient is then divided by the average annual number of shares outstanding. * Capital Expenditure Growth: Annual reported company capital expenditures are regressed against time over the past five fiscal years. The slope coefficient is then divided by the average annual capital expenditures to obtain the capital expenditures growth. * Capital Expenditure: The most recent capital expenditures are scaled by the average of capital expenditures over the last five fiscal years. Industry Factors* aerospace and defense* airlines* aluminum and steel* apparel* Automotive* banks* beta (market)* beverage and tobacco* biotech & life science* building products* chemicals* construction & engineering* construction & machinery* construction materials* commercial equipment* computer & electronics* commercial services* industrial conglomerates* containers (forest, paper, & packaging)* distributors* diversified financials* electrical equipment* electrical utility* food & household products & personal* food & staples retailing* gas & multi-utilities* healthcare equipment and services* health services* home building* household durables* industry machinery* non-life insurance* leisure products* leisure services* life insurance* managed healthcare* multi-utilities* oil & gas conversion* oil & gas drilling* oil & gas equipment* oil and gas export* paper* pharmaceuticals* precious metals* personal products* real estate* restaurants* road & rail* semiconductors* semiconductors equipment* software* telecommunications* transportation* wireless* SPTY\* and SPLTY\* are various industries
###Code
data['20030102'].columns
###Output
_____no_output_____
###Markdown
covariance of factorsLet's look at the covariance of the factors.
###Code
covariance.keys()
###Output
_____no_output_____
###Markdown
View the data for a single day. Notice that the factors are listed in two columns, followed by the covariance between them. We'll use this data later to create a factor covariance matrix.
###Code
covariance['20030102'].head()
###Output
_____no_output_____
###Markdown
Daily returns
###Code
daily_return.keys()
daily_return['20030102'].head()
###Output
_____no_output_____
###Markdown
Add date for returnsWe'll be dealing with two different dates; to help us keep track, let's add an additional column in the daily_return dataframes that stores the date of the returns.
###Code
tmp_date = '20030102'
tmp = daily_return[tmp_date]
tmp.head()
tmp_n_rows = tmp.shape[0]
pd.Series([tmp_date]*tmp_n_rows)
tmp['DlyReturnDate'] = pd.Series([tmp_date]*tmp_n_rows)
tmp.head()
###Output
_____no_output_____
###Markdown
Quiz: add daily return date to each dataframe in daily_return dictionaryName the column `DlyReturnDate`.**Hint**: create a list containing copies of the date, then create a pandas series.
###Code
for DlyReturnDate, df in daily_return.items():
# TODO
n_rows = df.shape[0]
df['DlyReturnDate'] = pd.Series([DlyReturnDate]*n_rows)
# check results
daily_return['20030102'].head()
###Output
_____no_output_____
###Markdown
Adjust dates to account for trade executionThe data stored in `data` and `covariance` are used to choose the optimal portfolio, whereas the data in `daily_return` represents the the returns that the optimized portfolio would realize, but only after we've received the data, then chosen the optimal holdings, and allowed a day to trade into the optimal holdings. In other words, if we use the data from `data` and `covariance` that is collected at the end of Monday, we'll use portfolio optimization to choose the optimal holdings based on this data, perhaps after hours on Monday. Then on Tuesday, we'll have a day to execute trades to adjust the portfolio into the optimized positions. Then on Wednesday, we'll realize the returns using those optimal holdings.
###Code
# Example of what we want
data_date_l = sorted(data.keys())
return_date_l = sorted(daily_return.keys())
len(data_date_l)
len(return_date_l)
return_date_l_shifted = return_date_l[2:len(data) + 2]
len(return_date_l_shifted)
# data date
data_date_l[0]
# returns date
return_date_l_shifted[0]
tmp = data['20030102'].merge(daily_return['20030102'], on="Barrid")
tmp.head()
###Output
_____no_output_____
###Markdown
Merge data and daily returns into single dataframeUse a loop to merge the `data` and `daily_return` tables on the `barrid` column.
###Code
frames ={}
# TODO
dlyreturn_n_days_delay = 2
# TODO
date_shifts = zip(
sorted(data.keys()),
sorted(daily_return.keys())[dlyreturn_n_days_delay:len(data) + dlyreturn_n_days_delay])
# TODO
for data_date, price_date in date_shifts:
frames[price_date] = data[data_date].merge(daily_return[price_date], on='Barrid')
###Output
_____no_output_____
###Markdown
Let's work with a single day's data. Later, we'll put this into a loopNotice how the keys are now dates of the returns. So the earliest date in "frames" dictionary is two business days after the earliest date in "data" dictionary.
###Code
frames.keys()
df = frames['20030106']
df.head()
###Output
_____no_output_____
###Markdown
QuizFilter the stocks so that the estimation universe has stocks with at least 1 billion in market cap. As an aside, it doesn't make much of a difference whether we choose a ">" or ">=", since the threshold we choose is just meant to get a set of relatively liquid assets.**Hint**: use `.copy(deep=True)` to make an independent copy of the data.
###Code
# TODO
estu = df.loc[df.IssuerMarketCap >= 1e9].copy(deep=True)
estu.head()
###Output
_____no_output_____
###Markdown
For all the columns in the dataframe, the ones with the prefix "USFAST" are factors. We'll use a helper function to get the list of factors.
###Code
def factors_from_names(n):
return(list(filter(lambda x: "USFASTD_" in x, n)))
all_factors = factors_from_names(list(df))
all_factors
###Output
_____no_output_____
###Markdown
factors exposures and factor returnsRecall that a factor's factor return times its factor exposure gives the part of a stock's return that is explained by that factor.The Barra data contains the factor exposure of each factor. We'll use regression to estimate the factor returns of each factor, on each day. The observations will be the cross section of stock factor exposures, as well as the stock returns that are realized two trading days later. Recall from an earlier lesson that this is a cross-sectional regression, because it's a cross section of stocks, for a single time period.$r_{i,t} = \sum_{j=1}^{k} (\beta_{i,j,t-2} \times f_{j,t})$ where $i=1...N$ (N assets), and $j=1...k$ (k factors).In the regression, the factor exposure, $\beta_{i,j,t-2}$ is the independent variable, $r_{i,t}$ is the dependent variable, and the factor return $f_{j,t}$ is the coefficient that we'll estimate. Calculating factor returnsWe'll estimate the factor returns $f_{j,t}$ of our chosen alpha factors, using the daily returns of the stocks $r_{i,t}$, where $i=1...N$ and the factor exposure $\beta_{i,j,t-2}$ of each stock to each factor. Note that we'll use a universe of stocks where the companies have a market capitalization of at least 1 billion. The factor returns estimated would be slightly different depending on which stock universe is chosen, but choosing a market cap of 1 billion or more provides a reasonable estimate of what you'd expect to be tradable. The estimated factor returns would be fairly close to what you'd find if you used the Russell 3000 index as the stock universe. formulaWe'll use a helper function that creates a string that defines which are the independent and dependent variables for a model to use. This string is called a "formula." We'll use this in the regression, and later again when we work with matrices.
###Code
def get_formula(factors, Y):
L = ["0"]
L.extend(factors)
return Y + " ~ " + " + ".join(L)
form = get_formula(all_factors, "DlyReturn")
###Output
_____no_output_____
###Markdown
So, the formula is saying `DlyReturn` is the dependent variable, whereas the `USFAST...` columns are the independent variables.
###Code
form
###Output
_____no_output_____
###Markdown
QuizRun an ordinary least squares regression[ols documentation](https://www.statsmodels.org/dev/example_formulas.html)Here's an example of the syntax.```ols(formula='y ~ x1 + x2 + x3', data=dataframe)```Note that you're free to choose other regression models, such as ridge, lasso, or elastic net. These may give you slightly different estimations of factor returns, but shouldn't be too different from each other.
###Code
# TODO
model = ols(formula=form, data=estu)
# TODO
results = model.fit()
###Output
_____no_output_____
###Markdown
Since the factor data that we're using as the independent variables are the factor exposures, the coefficients estimated by the regression are the estimated factor returns.
###Code
results.params
###Output
_____no_output_____
###Markdown
Quiz: winsorize daily returns before calculating factor returnsWe're going to use regression to estimate the factor returns of all the factors. To avoid using extreme values in the regression, we'll winsorize, or "clip" the returns. We can check the data distribution using a density plot.Note that [numpy.where](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.where.html) has the form ```numpy.where(, , )```
###Code
def wins(x,wins_lower,wins_upper):
#TODO
clipped_upper = np.where(x >= wins_upper, wins_upper, x)
clipped_both = np.where(clipped_upper <= wins_lower,wins_lower, clipped_upper)
return clipped_both
###Output
_____no_output_____
###Markdown
A density plot will help us visually check the effect of winsorizing returns.
###Code
def density_plot(data):
density = gaussian_kde(data)
xs = np.linspace(np.min(data),np.max(data),200)
density.covariance_factor = lambda : .25
density._compute_covariance()
plt.plot(xs,density(xs))
plt.show()
# distribution without winsorizing
test = frames['20040102']
density_plot(test['DlyReturn'])
# distribution after winsorizing
test['DlyReturn_wins'] = wins(test['DlyReturn'], wins_lower=-0.1, wins_upper=0.1)
density_plot(test['DlyReturn_wins'])
###Output
_____no_output_____
###Markdown
QuizPut the factor returns estimation into a function, so that this can be re-used for each day's data.
###Code
def estimate_factor_returns(df, wins_lower=-.25, wins_upper=0.25):
## TODO: build estimation universe based on filters
estu = df.loc[df.IssuerMarketCap > 1e9].copy(deep=True)
## TODO: winsorize returns for fitting
estu['DlyReturn'] = wins(estu['DlyReturn'], wins_lower, wins_upper)
## get a list of all the factors
all_factors = factors_from_names(list(df))
## define a 'formula' for the regression
form = get_formula(all_factors, "DlyReturn")
## create the OLS model, passing in the formula and the estimation universe dataframe
model = ols(formula=form, data=estu)
## return the estimated coefficients
results = model.fit()
return(results.params)
###Output
_____no_output_____
###Markdown
Choose alpha factorsWe'll choose the 1 day reversal, earnings yield, value, and sentiment factors as alpha factors. We'll calculate the factor returns of these alpha factors to see how they performed.
###Code
alpha_factors = ["USFASTD_1DREVRSL", "USFASTD_EARNYILD", "USFASTD_VALUE", "USFASTD_SENTMT"]
print(alpha_factors)
###Output
['USFASTD_1DREVRSL', 'USFASTD_EARNYILD', 'USFASTD_VALUE', 'USFASTD_SENTMT']
###Markdown
Quiz: estimate factor returns of alpha factorsLoop through each day, and estimate the factors returns of each factor, that date, in the `frames` dictionary. This may take a minute or more to run per year of data used.
###Code
facret = {}
for date in frames:
# TODO: store factor returns as key-value pairs in a dictionary
facret[date] = estimate_factor_returns(frames[date])
type(facret['20040102'])
facret['20040102'].head()
###Output
_____no_output_____
###Markdown
put the factor returns into a dataframeThe pandas series are stored inside a dictionary. We'll put the factor returns into a dataframe where the rows are the dates and the columns are the factor returns (one column for each factor).First, let's get a list of dates, as Timestamp objects. We'll use [pandas.to_datetime](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.to_datetime.html)
###Code
# example of how to convert the keys of the dataframe into Timestamp objects
pd.to_datetime('20040102', format='%Y%m%d')
###Output
_____no_output_____
###Markdown
QuizStore the timestamp objects in a list (can use a list comprehension, or for loop).
###Code
# TODO
dates_unsorted = [pd.to_datetime(date, format='%Y%m%d') for date in frames.keys()]
# sort the dates in ascending order
my_dates = sorted(dates_unsorted)
# We'll make an empty dataframe with the dates set as the row index.
facret_df = pd.DataFrame(index = my_dates)
facret_df.head()
###Output
_____no_output_____
###Markdown
The rows are the dates. The columns will be the factor returns. To convert from Timestamp objects back into a string, we can use [Timestamp.strftime('%Y%m%d')](https://www.programiz.com/python-programming/datetime/strftime).
###Code
## example usage of Timestamp.strftime('%Y%m%d')
my_dates[0].strftime('%Y%m%d')
###Output
_____no_output_____
###Markdown
QuizFor each date, and for each factor return, get the value from the dictionary and put it into the dataframe.We can use [pandas.DataFrame.at¶](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.at.html), ```DataFrame.at[,] = ```
###Code
# TODO: for each date (rows), and for each factor (columns),
# store factor return in the dataframe
for dt in my_dates:
for alp in alpha_factors:
facret_df.at[dt, alp] = facret[dt.strftime('%Y%m%d')][alp]
###Output
_____no_output_____
###Markdown
Portfolio optimization for a single periodWhen we get to the project, we'll want to define the portfolio optimization within a function. For now, let's walk through the steps we'll take in separate cells, so that we can see what's going on. The optimization will want to know about the prior trading day's portfolio holdings, also called holdings. The previous day's holdings will be used to estimate the size of the trades due to position changes, which in turn helps us estimate transaction costs. We'll start with an initial holding of zero for a single stock. The reason we'll use a single stock is that the estimation universe chosen on each day will include all stocks that have holdings on the previous day. So we want to keep this list small when we first start out, else we'll keep many stocks that may fall below the 1 billion market cap threshold, just because they were chosen in the initialization of the backtest.We'll want to choose a stock that is likely to satisfy the 1 billion market cap threshold on any day. So let's choose the stock with the largest market cap.
###Code
# we're going to set a single barra id to have a zero portfolio holding,
# so let's pick any barra id of the stock with the largest issuer market cap
estu.sort_values('IssuerMarketCap',ascending=False)[['Barrid','IssuerMarketCap']].head()
###Output
_____no_output_____
###Markdown
Quiz: Intialize previous holdings dataframeCreate a new dataframe and initialize it with a dictionary, where the key is "Barrid" followed by a value that is a pandas.Series containing the barra id of the largest market cap in the stock universe. Also set another key value pair to "x.opt.previous" and the value is set to a pandas.Series with the value 0.
###Code
# TODO
# create a dataframe of previous holdings,
# initializing a single stock (barra id) to zero portfolio holding
previous_holdings = pd.DataFrame(data = {"Barrid" : pd.Series( ["USA0001"]),
"x.opt.previous" : pd.Series(0)})
previous_holdings
###Output
_____no_output_____
###Markdown
Get a single day's data to be used for the portfolio optimization.
###Code
dt = my_dates[0]
date = dt.strftime('%Y%m%d')
print(date)
df = frames[date]
df.head()
###Output
20030106
###Markdown
Let's add the previous holdings column to the dataframe
###Code
## merge previous portfolio holdings
df = df.merge(previous_holdings, how = 'left', on = 'Barrid')
df.head()
###Output
_____no_output_____
###Markdown
Clean missing and zero values.First replace missing values with zero.
###Code
def na2z(x):
return(np.nan_to_num(x))
def names_numeric_columns(df):
return(df.select_dtypes(include=[np.number]).columns.tolist())
def clean_nas(df):
for x in names_numeric_columns(df):
df[x] = na2z(df[x])
return(df)
df = clean_nas(df)
###Output
_____no_output_____
###Markdown
Quiz: Clean specific riskBarra calculates specific risk for each asset. If the value in the data is zero, this may be due to missing data rather than the specific risk actually being zero. So we'll set zero values to the median, to make sure our model is more realistic.
###Code
# TODO: if SpecRisk is zero, set it to median
df.loc[df['SpecRisk'] == 0]['SpecRisk'] = median(df['SpecRisk'])
###Output
_____no_output_____
###Markdown
universeWe'll look at stocks that are 1 billion in market cap or greater. An important point here is that we'll need to account for stocks that are already in our portfolio, even if the market cap of the stock is no longer 1 billion on the current day. Quiz: think about what would happen if we had an existing position in a stock, then the market cap fell below the threshold and the stock was excluded from the stock universe. What would happen to the position on that stock? AnswerThe stock would not be included in the optimization, which means it would be given a zero position. So this effectively says to sell all holdings in the asset once it falls below the market cap threshold. That's not what we want to do. Modify the code to account for the prior day's positions.
###Code
## TODO: modify the given code to include the prior day's assets
universe = df.loc[(df['IssuerMarketCap'] >= 1e9)].copy()
universe.head()
###Output
_____no_output_____
###Markdown
Quiz: Nothing here should be allowed to look at returns when forming the portfolio.Make this impossible by removing the Daily returns data from the dataframe. Drop the DlyReturn field from the dataframe.
###Code
# TODO: drop DlyReturn column
universe = df.loc[(df['IssuerMarketCap'] >= 1e9) | (abs(df['x.opt.previous']) > 0)].copy()
## this will extract all of the factors, including the alphas
# list(universe) gets a list of the column names of the dataframe
all_factors = factors_from_names(list(universe))
all_factors
###Output
_____no_output_____
###Markdown
Alpha factorsJust a reminder that we chose four of these factors that represent previously effective alpha factors. Since these factors became well known over time, they were added to the Barra data set. For the time frame that we're running the back-test, these were effective alpha factors.
###Code
alpha_factors #4 alpha factors
###Output
_____no_output_____
###Markdown
Quiz: risk factorsThe risk factors we'll use are all the factors that are not alpha factors. Complete the setdiff function so that it takes a superset, a subset, and returns the difference as a set.diff= SuperSet \ Subset
###Code
def setdiff(superset, subset):
# TODO
s = set(subset)
diffset = [x for x in superset if x not in s]
return(diffset)
risk_factors = setdiff(all_factors, alpha_factors)
# 77 risk factors
len(risk_factors)
###Output
_____no_output_____
###Markdown
Save initial holdings in a variable for easier access. We'll later use it in matrix multiplications, so let's convert this to a numpy array. We'll also use another variable to represent the current holdings, which are to be run through the optimizer. We'll set this to be a copy of the previous holdings. Later the optimizer will continually update this to optimize the objective function.
###Code
## initial holdings (before optimization)
# optimal holding from prior day
h0 = np.asarray( universe['x.opt.previous'] )
h = h0.copy()
###Output
_____no_output_____
###Markdown
Matrix of Risk Factor Exposures $\textbf{B}$The dataframe contains several columns that we'll use as risk factors exposures. Extract these and put them into a matrix.The data, such as industry category, are already one-hot encoded, but if this were not the case, then using `patsy.dmatrices` would help, as this function extracts categories and performs the one-hot encoding. We'll practice using this package, as you may find it useful with future data sets. You could also store the factors in a dataframe if you prefer to avoid using patsy.dmatrices. How to use patsy.dmatricespatsy.dmatrices takes in a formula and the dataframe. The formula tells the function which columns to take. The formula will look something like this: `SpecRisk ~ 0 + USFASTD_AERODEF + USFASTD_AIRLINES + ...` where the variable to the left of the ~ is the "dependent variable" and the others to the right are the independent variables (as if we were preparing data to be fit to a model).This just means that the pasty.dmatrices function will return two matrix variables, one that contains the single column for the dependent variable `outcome`, and the independent variable columns are stored in a matrix `predictors`.The `predictors` matrix will contain the matrix of risk factors, which is what we want. We don't actually need the `outcome` matrix; it's just created because that's the way patsy.dmatrices works.
###Code
# Note that we chose "SpecRisk" simply because it's not one of the USFAST factors.
# it will be discarded in the next step.
formula = get_formula(risk_factors, "SpecRisk")
formula
# the factors will be in the second returned variable (predictors)
# the outcome variable contains the SpecRisk data, which we don't actually need here
outcome, predictors = patsy.dmatrices(formula,universe)
###Output
_____no_output_____
###Markdown
`predictors` contains the factor exposures of each asset to each factor.
###Code
predictors.shape
###Output
_____no_output_____
###Markdown
Factor exposure matrix $\textbf{B}$Remember, the factor exposure matrix has the exposure of each asset to each factor. Thee number of rows is number of assets, and number of columns is the number of factors.
###Code
def NROW(x):
return(np.shape(x)[0])
def NCOL(x):
return(np.shape(x)[1])
###Output
_____no_output_____
###Markdown
QuizSet the factor exposure matrix and its transpose, using one of the outputs from calling patsy.dmatrices
###Code
## TODO: risk exposure matrix:
B = predictors
BT = B.transpose()
k = NCOL(B) #number of factors (77)
n = NROW(B) #number of assets (2000+)
###Output
_____no_output_____
###Markdown
Factor covariance matrix $\textbf{F}$We can improve on the factor covariance matrix by reducing noise and also increasing computational efficiency.If we have, 70 risk factors in our risk model, then the covariance matrix of factors is a 70 by 70 square matrix. The diagonal contains the variances of each factor, while the off-diagonals contain the pairwise covariances of two different risk factors. In general, it’s good to have a healthy suspicion of correlations and covariances, and to ask if correlation data adds information or just more noise. One way to be conservative about the information in a covariance matrix is to shrink the covariances, or even reduce them to zero. In other words, we could keep just the variances along the diagonal, and set the covariances in the off-diagonals to zero. In the case where we’re using the covariance matrix in a risk factor model, there’s also some additional intuition for why we can try using just the variances, and discard the covariances. The goal of the optimizer is to reduce the portfolio’s exposure to these risk factors. So if the optimizer reduces the portfolio’s exposure to risk factor “one”, and also reduces its exposure to risk factor “two”, then it’s less important to know exactly how factor one varies with factor two.You may wonder what are the benefits of throwing away the information about the covariances. In addition to making your model more conservative, and limiting possible noise in your data, a diagonal matrix also makes matrix operations more efficient. This theme of computational efficiency is one that you’ll come across in many use cases, including backtesting. Backtesting is a computationally and time-intensive process, so the more efficient you can make it, the more quickly you can test your alphas, and iterate to make improvements. Create Factor covariance matrix $\textbf{F}$You can try getting all covariances into the matrix. Notice that we'll run into some issues where the covariance data doesn't exist.One important point to remember is that we need to order the factors in the covariance matrix F so that they match up with the order of the factors in the factor exposures matrix B.Note that covariance data is in percentage units squared, so to use decimals, so we'll rescale it to convert it to decimal.
###Code
## With all covariances
def colnames(X):
if(type(X) == patsy.design_info.DesignMatrix):
return(X.design_info.column_names)
if(type(X) == pandas.core.frame.DataFrame):
return(X.columns.tolist())
return(None)
## extract a diagonal element from the factor covariance matrix
def get_cov_version1(cv, factor1, factor2):
try:
return(cv.loc[(cv.Factor1==factor1) & (cv.Factor2==factor2),"VarCovar"].iloc[0])
except:
print(f"didn't find covariance for: factor 1: {factor1} factor2: {factor2}")
return 0
def diagonal_factor_cov_version1(date, B):
"""
Notice that we'll use the order of column names of the factor exposure matrix
to set the order of factors in the factor covariance matrix
"""
cv = covariance[date]
k = NCOL(B)
Fm = np.zeros([k,k])
for i in range(0,k):
for j in range(0,k):
fac1 = colnames(B)[i]
fac2 = colnames(B)[j]
# Convert from percentage units squared to decimal
Fm[i,j] = (0.01**2) * get_cov_version1(cv, fac1, fac2)
return(Fm)
###Output
_____no_output_____
###Markdown
Here's an example where the two factors don't have covariance data for the date selected
###Code
cv = covariance['20031211']
cv.loc[(cv.Factor1=='USFASTD_AERODEF') & (cv.Factor2=='USFASTD_ALUMSTEL')]
###Output
_____no_output_____
###Markdown
We can see where all the factor covariances aren't found in the data. Which date?Recall that there's a DataDate column and DlyReturnDate column in the dataframe. We're going to use a date to access the covariance data. Which date should we use?
###Code
df.head()
###Output
_____no_output_____
###Markdown
Answer here QuizChoose the correct date, then use the `diagonal_factor_cov_version1` to get the factor covariance matrix of that date.
###Code
# TODO
date = str(int(universe['DataDate'][1]))
print(date, end =" ")
F_version1 = diagonal_factor_cov_version1(date, B)
###Output
20030102 didn't find covariance for: factor 1: USFASTD_AERODEF factor2: USFASTD_ALUMSTEL
didn't find covariance for: factor 1: USFASTD_AERODEF factor2: USFASTD_BETA
didn't find covariance for: factor 1: USFASTD_AERODEF factor2: USFASTD_CHEM
didn't find covariance for: factor 1: USFASTD_AERODEF factor2: USFASTD_CNSTMATL
didn't find covariance for: factor 1: USFASTD_AERODEF factor2: USFASTD_CONTAINR
didn't find covariance for: factor 1: USFASTD_AERODEF factor2: USFASTD_DIVYILD
didn't find covariance for: factor 1: USFASTD_AERODEF factor2: USFASTD_DWNRISK
didn't find covariance for: factor 1: USFASTD_AERODEF factor2: USFASTD_EARNQLTY
didn't find covariance for: factor 1: USFASTD_AERODEF factor2: USFASTD_GROWTH
didn't find covariance for: factor 1: USFASTD_AERODEF factor2: USFASTD_INDMOM
didn't find covariance for: factor 1: USFASTD_AERODEF factor2: USFASTD_LEVERAGE
didn't find covariance for: factor 1: USFASTD_AERODEF factor2: USFASTD_LIQUIDTY
didn't find covariance for: factor 1: USFASTD_AERODEF factor2: USFASTD_LTREVRSL
didn't find covariance for: factor 1: USFASTD_AERODEF factor2: USFASTD_MGMTQLTY
didn't find covariance for: factor 1: USFASTD_AERODEF factor2: USFASTD_MIDCAP
didn't find covariance for: factor 1: USFASTD_AERODEF factor2: USFASTD_MOMENTUM
didn't find covariance for: factor 1: USFASTD_AERODEF factor2: USFASTD_OILGSCON
didn't find covariance for: factor 1: USFASTD_AERODEF factor2: USFASTD_OILGSDRL
didn't find covariance for: factor 1: USFASTD_AERODEF factor2: USFASTD_OILGSEQP
didn't find covariance for: factor 1: USFASTD_AERODEF factor2: USFASTD_OILGSEXP
didn't find covariance for: factor 1: USFASTD_AERODEF factor2: USFASTD_PAPER
didn't find covariance for: factor 1: USFASTD_AERODEF factor2: USFASTD_PRECMTLS
didn't find covariance for: factor 1: USFASTD_AERODEF factor2: USFASTD_PROFIT
didn't find covariance for: factor 1: USFASTD_AERODEF factor2: USFASTD_PROSPECT
didn't find covariance for: factor 1: USFASTD_AERODEF factor2: USFASTD_RESVOL
didn't find covariance for: factor 1: USFASTD_AERODEF factor2: USFASTD_SEASON
didn't find covariance for: factor 1: USFASTD_AERODEF factor2: USFASTD_SIZE
didn't find covariance for: factor 1: USFASTD_AERODEF factor2: USFASTD_SPTYCHEM
didn't find covariance for: factor 1: USFASTD_AERODEF factor2: USFASTD_STREVRSL
didn't find covariance for: factor 1: USFASTD_AIRLINES factor2: USFASTD_AERODEF
didn't find covariance for: factor 1: USFASTD_AIRLINES factor2: USFASTD_ALUMSTEL
didn't find covariance for: factor 1: USFASTD_AIRLINES factor2: USFASTD_BETA
didn't find covariance for: factor 1: USFASTD_AIRLINES factor2: USFASTD_BLDGPROD
didn't find covariance for: factor 1: USFASTD_AIRLINES factor2: USFASTD_CHEM
didn't find covariance for: factor 1: USFASTD_AIRLINES factor2: USFASTD_CNSTENG
didn't find covariance for: factor 1: USFASTD_AIRLINES factor2: USFASTD_CNSTMACH
didn't find covariance for: factor 1: USFASTD_AIRLINES factor2: USFASTD_CNSTMATL
didn't find covariance for: factor 1: USFASTD_AIRLINES factor2: USFASTD_COMSVCS
didn't find covariance for: factor 1: USFASTD_AIRLINES factor2: USFASTD_CONGLOM
didn't find covariance for: factor 1: USFASTD_AIRLINES factor2: USFASTD_CONTAINR
didn't find covariance for: factor 1: USFASTD_AIRLINES factor2: USFASTD_DIVYILD
didn't find covariance for: factor 1: USFASTD_AIRLINES factor2: USFASTD_DWNRISK
didn't find covariance for: factor 1: USFASTD_AIRLINES factor2: USFASTD_EARNQLTY
didn't find covariance for: factor 1: USFASTD_AIRLINES factor2: USFASTD_ELECEQP
didn't find covariance for: factor 1: USFASTD_AIRLINES factor2: USFASTD_GROWTH
didn't find covariance for: factor 1: USFASTD_AIRLINES factor2: USFASTD_INDMACH
didn't find covariance for: factor 1: USFASTD_AIRLINES factor2: USFASTD_INDMOM
didn't find covariance for: factor 1: USFASTD_AIRLINES factor2: USFASTD_LEVERAGE
didn't find covariance for: factor 1: USFASTD_AIRLINES factor2: USFASTD_LIQUIDTY
didn't find covariance for: factor 1: USFASTD_AIRLINES factor2: USFASTD_LTREVRSL
didn't find covariance for: factor 1: USFASTD_AIRLINES factor2: USFASTD_MGMTQLTY
didn't find covariance for: factor 1: USFASTD_AIRLINES factor2: USFASTD_MIDCAP
didn't find covariance for: factor 1: USFASTD_AIRLINES factor2: USFASTD_MOMENTUM
didn't find covariance for: factor 1: USFASTD_AIRLINES factor2: USFASTD_OILGSCON
didn't find covariance for: factor 1: USFASTD_AIRLINES factor2: USFASTD_OILGSDRL
didn't find covariance for: factor 1: USFASTD_AIRLINES factor2: USFASTD_OILGSEQP
didn't find covariance for: factor 1: USFASTD_AIRLINES factor2: USFASTD_OILGSEXP
didn't find covariance for: factor 1: USFASTD_AIRLINES factor2: USFASTD_PAPER
didn't find covariance for: factor 1: USFASTD_AIRLINES factor2: USFASTD_PRECMTLS
didn't find covariance for: factor 1: USFASTD_AIRLINES factor2: USFASTD_PROFIT
didn't find covariance for: factor 1: USFASTD_AIRLINES factor2: USFASTD_PROSPECT
didn't find covariance for: factor 1: USFASTD_AIRLINES factor2: USFASTD_RESVOL
didn't find covariance for: factor 1: USFASTD_AIRLINES factor2: USFASTD_SEASON
didn't find covariance for: factor 1: USFASTD_AIRLINES factor2: USFASTD_SIZE
didn't find covariance for: factor 1: USFASTD_AIRLINES factor2: USFASTD_SPTYCHEM
didn't find covariance for: factor 1: USFASTD_AIRLINES factor2: USFASTD_STREVRSL
didn't find covariance for: factor 1: USFASTD_AIRLINES factor2: USFASTD_TRADECO
didn't find covariance for: factor 1: USFASTD_ALUMSTEL factor2: USFASTD_BETA
didn't find covariance for: factor 1: USFASTD_ALUMSTEL factor2: USFASTD_CHEM
didn't find covariance for: factor 1: USFASTD_ALUMSTEL factor2: USFASTD_CNSTMATL
didn't find covariance for: factor 1: USFASTD_ALUMSTEL factor2: USFASTD_CONTAINR
didn't find covariance for: factor 1: USFASTD_ALUMSTEL factor2: USFASTD_DIVYILD
didn't find covariance for: factor 1: USFASTD_ALUMSTEL factor2: USFASTD_DWNRISK
didn't find covariance for: factor 1: USFASTD_ALUMSTEL factor2: USFASTD_EARNQLTY
didn't find covariance for: factor 1: USFASTD_ALUMSTEL factor2: USFASTD_GROWTH
didn't find covariance for: factor 1: USFASTD_ALUMSTEL factor2: USFASTD_INDMOM
didn't find covariance for: factor 1: USFASTD_ALUMSTEL factor2: USFASTD_LEVERAGE
didn't find covariance for: factor 1: USFASTD_ALUMSTEL factor2: USFASTD_LIQUIDTY
didn't find covariance for: factor 1: USFASTD_ALUMSTEL factor2: USFASTD_LTREVRSL
didn't find covariance for: factor 1: USFASTD_ALUMSTEL factor2: USFASTD_MGMTQLTY
didn't find covariance for: factor 1: USFASTD_ALUMSTEL factor2: USFASTD_MIDCAP
didn't find covariance for: factor 1: USFASTD_ALUMSTEL factor2: USFASTD_MOMENTUM
didn't find covariance for: factor 1: USFASTD_ALUMSTEL factor2: USFASTD_OILGSCON
didn't find covariance for: factor 1: USFASTD_ALUMSTEL factor2: USFASTD_OILGSDRL
didn't find covariance for: factor 1: USFASTD_ALUMSTEL factor2: USFASTD_OILGSEQP
didn't find covariance for: factor 1: USFASTD_ALUMSTEL factor2: USFASTD_OILGSEXP
didn't find covariance for: factor 1: USFASTD_ALUMSTEL factor2: USFASTD_PAPER
didn't find covariance for: factor 1: USFASTD_ALUMSTEL factor2: USFASTD_PROFIT
didn't find covariance for: factor 1: USFASTD_ALUMSTEL factor2: USFASTD_PROSPECT
didn't find covariance for: factor 1: USFASTD_ALUMSTEL factor2: USFASTD_RESVOL
didn't find covariance for: factor 1: USFASTD_ALUMSTEL factor2: USFASTD_SEASON
didn't find covariance for: factor 1: USFASTD_ALUMSTEL factor2: USFASTD_SIZE
didn't find covariance for: factor 1: USFASTD_ALUMSTEL factor2: USFASTD_SPTYCHEM
didn't find covariance for: factor 1: USFASTD_ALUMSTEL factor2: USFASTD_STREVRSL
didn't find covariance for: factor 1: USFASTD_APPAREL factor2: USFASTD_AERODEF
didn't find covariance for: factor 1: USFASTD_APPAREL factor2: USFASTD_AIRLINES
didn't find covariance for: factor 1: USFASTD_APPAREL factor2: USFASTD_ALUMSTEL
didn't find covariance for: factor 1: USFASTD_APPAREL factor2: USFASTD_AUTO
didn't find covariance for: factor 1: USFASTD_APPAREL factor2: USFASTD_BETA
didn't find covariance for: factor 1: USFASTD_APPAREL factor2: USFASTD_BLDGPROD
didn't find covariance for: factor 1: USFASTD_APPAREL factor2: USFASTD_CHEM
didn't find covariance for: factor 1: USFASTD_APPAREL factor2: USFASTD_CNSTENG
didn't find covariance for: factor 1: USFASTD_APPAREL factor2: USFASTD_CNSTMACH
didn't find covariance for: factor 1: USFASTD_APPAREL factor2: USFASTD_CNSTMATL
didn't find covariance for: factor 1: USFASTD_APPAREL factor2: USFASTD_COMSVCS
didn't find covariance for: factor 1: USFASTD_APPAREL factor2: USFASTD_CONGLOM
didn't find covariance for: factor 1: USFASTD_APPAREL factor2: USFASTD_CONTAINR
didn't find covariance for: factor 1: USFASTD_APPAREL factor2: USFASTD_DISTRIB
didn't find covariance for: factor 1: USFASTD_APPAREL factor2: USFASTD_DIVYILD
didn't find covariance for: factor 1: USFASTD_APPAREL factor2: USFASTD_DWNRISK
didn't find covariance for: factor 1: USFASTD_APPAREL factor2: USFASTD_EARNQLTY
didn't find covariance for: factor 1: USFASTD_APPAREL factor2: USFASTD_ELECEQP
didn't find covariance for: factor 1: USFASTD_APPAREL factor2: USFASTD_GROWTH
didn't find covariance for: factor 1: USFASTD_APPAREL factor2: USFASTD_HOMEBLDG
didn't find covariance for: factor 1: USFASTD_APPAREL factor2: USFASTD_HOUSEDUR
didn't find covariance for: factor 1: USFASTD_APPAREL factor2: USFASTD_INDMACH
didn't find covariance for: factor 1: USFASTD_APPAREL factor2: USFASTD_INDMOM
didn't find covariance for: factor 1: USFASTD_APPAREL factor2: USFASTD_LEISPROD
didn't find covariance for: factor 1: USFASTD_APPAREL factor2: USFASTD_LEISSVCS
didn't find covariance for: factor 1: USFASTD_APPAREL factor2: USFASTD_LEVERAGE
didn't find covariance for: factor 1: USFASTD_APPAREL factor2: USFASTD_LIQUIDTY
didn't find covariance for: factor 1: USFASTD_APPAREL factor2: USFASTD_LTREVRSL
didn't find covariance for: factor 1: USFASTD_APPAREL factor2: USFASTD_MEDIA
didn't find covariance for: factor 1: USFASTD_APPAREL factor2: USFASTD_MGMTQLTY
didn't find covariance for: factor 1: USFASTD_APPAREL factor2: USFASTD_MIDCAP
didn't find covariance for: factor 1: USFASTD_APPAREL factor2: USFASTD_MOMENTUM
didn't find covariance for: factor 1: USFASTD_APPAREL factor2: USFASTD_OILGSCON
didn't find covariance for: factor 1: USFASTD_APPAREL factor2: USFASTD_OILGSDRL
didn't find covariance for: factor 1: USFASTD_APPAREL factor2: USFASTD_OILGSEQP
didn't find covariance for: factor 1: USFASTD_APPAREL factor2: USFASTD_OILGSEXP
didn't find covariance for: factor 1: USFASTD_APPAREL factor2: USFASTD_PAPER
didn't find covariance for: factor 1: USFASTD_APPAREL factor2: USFASTD_PRECMTLS
didn't find covariance for: factor 1: USFASTD_APPAREL factor2: USFASTD_PROFIT
###Markdown
Quiz: Create matrix of factor variancesJust use the factor variances and set the off diagonal covariances to zero.
###Code
def colnames(X):
if(type(X) == patsy.design_info.DesignMatrix):
return(X.design_info.column_names)
if(type(X) == pandas.core.frame.DataFrame):
return(X.columns.tolist())
return(None)
## extract a diagonal element from the factor covariance matrix
def get_var(cv, factor):
# TODO
return(cv.loc[(cv.Factor1==factor) & (cv.Factor2==factor),"VarCovar"].iloc[0])
def diagonal_factor_cov(date, B):
"""
Notice that we'll use the order of column names of the factor exposure matrix
to set the order of factors in the factor covariance matrix
"""
# TODO: set the variances only
cv = covariance[date]
k = NCOL(B)
Fm = np.zeros([k,k])
for j in range(0,k):
fac = colnames(B)[j]
Fm[j,j] = (0.01**2) * get_var(cv, fac)
return(Fm)
## factor variances
# gets factor vars into diagonal matrix
# takes B to know column names of B; F will be multipled by B later
# F is square; so row and col names must match column names of B.
F = diagonal_factor_cov(date, B)
F.shape
###Output
_____no_output_____
###Markdown
Note how the off diagonals are all set to zero. alpha combinationAs a simple alpha combination, combine the alphas with equal weight. In the project, you're welcome to try other ways to combine the alphas. For example, you could calculate some metric for each factor, which indicates which factor should be given more or less weight. Scale factor exposures Note that the terms that we're calculating for the objective function will be in dollar units. So the expected return $-\alpha^T h$ will be in dollar units. The $h$ vector of portfolio holdings will be in dollar units. The vector of alpha factor exposures $\alpha$ will represent the percent change expected for each stock. Based on the ranges of values in the factor exposure data, which are mostly between -5 and +5 and centered at zero, **we'll make an assumption that a factor exposure of 1 maps to 1 basis point of daily return on that stock.**So we'll convert the factor values into decimals: 1 factor exposure value $\rightarrow \frac{1}{10,000}$ in daily returns. In other words, we'll rescale the alpha factors by dividing by 10,000.This is to make the term representing the portfolio's expected return $\alpha^T h$ be scaled so that it represents dollar units.
###Code
alpha_factors
def model_matrix(formula, data):
outcome, predictors = patsy.dmatrices(formula, data)
return(predictors)
## matrix of alpha factors
B_alpha = model_matrix(get_formula(alpha_factors, "SpecRisk"), data = universe)
B_alpha
###Output
_____no_output_____
###Markdown
QuizSum across the rows, then re-scale so that the expression $\mathbf{\alpha}^T \mathbf{h}$ is in dollar units.
###Code
def rowSums(m):
# TODO
return(np.sum(m, axis=1))
# TODO
scale = 1e-4
alpha_vec = scale * rowSums(B_alpha) #sum across rows (collapse 4 columns into one)
alpha_vec.shape
###Output
_____no_output_____
###Markdown
Original method of calculating common risk termRecall that the common risk term looks like this:$\textbf{h}^T\textbf{BFB}^T\textbf{h}$Where h is the vector of portfolio holdings, B is the factor exposure matrix, and F is the factor covariance matrix.We'll walk through this calculation to show how it forms an N by N matrix, which is computationally expensive, and may lead to memory overflow for large values of N.
###Code
np.dot( np.dot( h.T, np.matmul( np.matmul(B,F),BT) ), h)
tmp = np.matmul(B,F)
tmp.shape
# this makes an N by matrix (large)
tmp = np.matmul(tmp,BT)
tmp.shape
tmp = np.matmul(h.T,tmp)
tmp.shape
tmp = np.dot(tmp,h)
tmp.shape
tmp
###Output
_____no_output_____
###Markdown
Efficiently calculate common risk term (avoid N by N matrix)Calculate the portfolio risk that is attributable to the risk factors:$\mathbf{h}^T\mathbf{BFB}^T\mathbf{h}$Note that this can become computationally infeasible and/or slow. Use matrix factorization and carefully choose the order of matrix multiplications to avoid creating an N by N matrix. square root of a matrix.We can find a matrix $\mathbf{B}$ that's the matrix square root of another matrix $\mathbf{A}$, which means that if we matrix multiply $\mathbf{BB}$, we'd get back to the original matrix $\mathbf{A}$.Find $\mathbf{Q}$ such that $\mathbf{Q}^T\mathbf{Q}$ is the same as $\mathbf{BFB}^T$. Let's let $\mathbf{G}$ denote the square root of matrix $\mathbf{F}$, so that $\mathbf{GG} = \mathbf{F}$.Then the expression for the covariance matrix of assets, $\mathbf{BFB}^T$, can be written as $\mathbf{BGGB}^T$. Let's let $\mathbf{Q}=\mathbf{GB}^T$ and let $\mathbf{Q}^T=\mathbf{BG}$, which means we can rewrite $\mathbf{BGGB}^T = \mathbf{Q}^T\mathbf{Q}$, and the common risk term is $\mathbf{h}^T\mathbf{Q}^T\mathbf{Qh}$Also, note that we don't have to calculate $\mathbf{BFB}^T$ explicitly, because the actual value we wish to calculate in the objective function will apply the holdings $\mathbf{h}$ to the covariance matrix of assets. Quiz: matrix square root of FWe'll call this square root matrix $\mathbf{G}$Use [scipy.linalg.sqrtm](https://docs.scipy.org/doc/scipy-0.15.1/reference/generated/scipy.linalg.sqrtm.html)
###Code
# TODO
G = scipy.linalg.sqrtm(F)
G.shape
###Output
_____no_output_____
###Markdown
Double check that multiplying the square root matrix to itself returns us back to the original matrix of factor variances.
###Code
np.matmul(G,G) - F
###Output
_____no_output_____
###Markdown
Quiz: calculate $\textbf{Q}$ and $\textbf{Q}^T$
###Code
# TODO
# Q = GB'
# Q should be a short and wide matrix
Q = np.matmul(G, BT)
Q.shape
# TODO
# Q' = BG
# Q should be a tall and narrow matrix
QT = np.matmul(B,G)
QT.shape
# notice we could also use the transpose of Q to get Q'
QT - Q.transpose()
###Output
_____no_output_____
###Markdown
Quiz: Include portfolio holdingsSo the original formula of $h^TBFB^Th$ became $h^TBGGB^Th$, where $GG = F$. And then, if we let $Q^T=BG$ and $Q = GB^T$: $h^TQ^TQh$Let $R = Q h$ and $R^T = h^T Q^T$: The risk term becomes: $R^TR$, where $R^T=h^TQ$ and $R=Q^Th$So an important point here is that we don't want to multiply $Q^TQ$ itself, because this creates the large N by N matrix. We want to multiply $h^TQ^T$ and $Qh$ separately, creating vectors of length k (k is number of risk factors).
###Code
# TODO
# R = Qh
R = np.matmul(Q, h)
R.shape
# TODO
# R' = Q'h'
RT = np.matmul(h.T,QT)
RT.shape
###Output
_____no_output_____
###Markdown
Notice how we avoided creating a full N by N matrixAlso, notice that if we have Q, we can take its transpose to get $Q^T$ instead of doing the matrix multiplication. Similarly, if we have R, which is a vector, we notice that $R^TR$ is the same as taking the dot product. In other words, it's squaring each element in the vector R, and adding up all the squared values.$R^TR = \sum_{i}^{k}(r_i^2)$ Quiz: Put it all together: calculate common risk term efficiently
###Code
## TODO: common risk term in term
# TODO: calculate square root of F
G = scipy.linalg.sqrtm(F)
# TODO: calculate Q
Q = np.matmul(G, BT)
# TODO: calculate R
R = np.matmul(Q, h)
# TODO: calculate common risk term
common_risk = np.sum( R ** 2)
###Output
_____no_output_____
###Markdown
Specific Risk termThe portfolio's variance that is specific to each asset is found by combining the holdings with the specific variance matrix: $h^TSh$, where $h^T$ is a 1 by N vector, S is an N by N matrix, and h is an N by 1 vector.Recall that S is a diagonal matrix, so all the off-diagonals are zero. So instead of doing the matrix multiplication, we could save computation by working with the vector containing the diagonal values.$h^TSh = \sum_i^{N}(h_i^2 \times S_i)$ because $S$ is a diagonal matrix.
###Code
## check the unit of measure of SpecRisk
# Notice that these are in percent; multiply by .01 to get them back to decimals.aa
universe['SpecRisk'][0:2]
###Output
_____no_output_____
###Markdown
Quiz: Specific Risk termGiven specific risk (volatility), calculate specific variance. First re-scale the specific risk data so that it's in decimal instead of percent.
###Code
## TODO: specific variance : rescale it and then square to get specific variance
specVar = (0.01 * universe['SpecRisk']) ** 2
# TODO: specific risk term (include holdings)
spec_risk_term = np.dot(specVar**2, specVar)
###Output
_____no_output_____
###Markdown
Maximize portfolio returnsSince the alpha vector $\mathbf{\alpha}$ is supposed to be indicative of future asset returns, when we look at a portfolio of assets, the weighted sum of these alphas $\mathbf{\alpha}^T \mathbf{h}$ is predictive of the portfolio's future returns. We want to maximize the portfolio's expected future returns, so we want to minimize the negative of portfolio's expected returns $-\mathbf{\alpha}^T \mathbf{h}$
###Code
## TODO
expected_return = np.dot(specVar, alpha_vec)
###Output
_____no_output_____
###Markdown
Linear price impact of tradingAssume transaction cost is linearly related to the trade size as a fraction of the average daily volume. Since we won't know the actual daily volume until the day that we're executing, we want to use past data as an estimate for future daily volume. This would be kind of noisy if we simply use the prior day's daily volume, so we'd prefer a more stable estimate like a 30 day rolling average.A commonly used **estimate for linear market impact is that if a trade size is 1% of the ADV, this moves the price by 10 basis points (1/10,000).**$Trade size_{i,t}$ is the fraction of your trade relative to the average dollar volume estimated for that stock, for that day. $Trade_{i,t}$ = dollar amount to trade = $h_{t} - h_{t-1}$, which is the new holding of the asset minus the previous holding.$ADV_{i,t}$: (average dollar volume) is total dollar amount expected to be traded, based on a moving average of historical daily volume.$TradeSize_{i,t} = \frac{Trade_{i,t}}{ADV_{i,t}}$: The size of the trade relative to the estimated daily volume.$\% \Delta Price_{i,t}$ = price change due to trading, as a fraction of the original price (it's a percent change). We'll write out the ratio: change in price divided by the trade size.$ \frac{\% \Delta price_{i,t}}{TradeSize_{i,t}} = \frac{10 bps}{1\%}$ $ \frac{\% \Delta price_{i,t}}{TradeSize_{i,t}} = \frac{10/10^4}{1/100}$$ \frac{\% \Delta price_{i,t}}{TradeSize_{i,t}} = \frac{10^{-3}}{10^{-2}}$$ \frac{\% \Delta price_{i,t}}{TradeSize_{i,t}} = 10^{-1}$Now we'll move things around to solve for the change in price.$\% \Delta price_{i,t} = 10^{-1} \times TradeSize_{i,t}$We defined TradeSize to be the Trade divided by ADV.$\% \Delta price_{i,t} = 10^{-1} \times \frac{Trade_{i,t}}{ADV_{i,t}}$Note that Trade is the current position minus the prior day's position$\% \Delta price_{i,t} = 10^{-1} \times \frac{h_{i,t} - h_{i,t-1}}{ADV_{i,t}}$For convenience, we'll combine the constant $10^{-1}$ and $\frac{1}{ADV_{i}}$ and call it lambda $\lambda_{i}$$\% \Delta price_{i,t} = \lambda_{i,t} \times (h_{i,t} - h_{i,t-1})$ where $\lambda_{i,t} = 10^{-1}\times \frac{1}{ADV_{i,t}} = \frac{1}{10 \times ADV_{i,t}}$ Note that since we're dividing by $ADV_{i,t}$, we'll want to handle cases when $ADV_{i,t}$ is missing or zero. In those instances, we can set $ADV_{i,t}$ to a small positive number, such as 10,000, which, in practice assumes that the stock is illiquid.Represent the market impact as $\Delta price_{i} = \lambda_{i} (h_{i,t} - h_{i,t-1})$. $\lambda_{i}$ incorporates the $ADV_{i,t}$. Review the lessons to see how to do this.Note that since we're dividing by $ADV_{i,t}$, we'll want to handle cases when $ADV_{i,t}$ is missing or zero. In those instances, we can set $ADV_{i,t}$ to a small positive number, such as 10,000, which, in practice assumes that the stock is illiquid. QuizIf the ADV field is missing or zero, set it to 10,000.
###Code
# TODO: if missing, set to 10000
universe.loc[np.isnan(universe['ADTCA_30']), 'ADTCA_30'] = 1.0e4 ## assume illiquid if no volume information
# TODO: if zero, set to 10000
universe.loc[universe['ADTCA_30'] == 0, 'ADTCA_30'] = 1.0e4 ## assume illiquid if no volume information
###Output
_____no_output_____
###Markdown
Quiz: calculate Lambda
###Code
# TODO
adv = universe['ADTCA_30']
Lambda = 0.1 / adv
###Output
_____no_output_____
###Markdown
Quiz: transaction cost termTransaction cost is change in price times dollar amount traded. For a single asset "i":$tcost_{i,t} = (\% \Delta price_{i,t}) \times (DollarsTraded_{i,t})$$tcost_{i,t} = (\lambda_{i,t} \times (h_{i,t} - h_{i,t-1}) ) \times (h_{i,t} - h_{i,t-1})$Notice that we can simplify the notation so it looks like this:$tcost_{i,t} = \lambda_{i,t} \times (h_{i,t} - h_{i,t-1})^2$The transaction cost term to be minimized (for all assets) is:$tcost_{t} = \sum_i^{N} \lambda_{i,t} (h_{i,t} - h_{i,t-1})^2$ where $\lambda_{i,t} = \frac{1}{10\times ADV_{i,t}}$For matrix notation, we'll use a capital Lambda, $\Lambda_{t}$, instead of the lowercase lambda $\lambda_{i,t}$.$tcost_{t} = (\mathbf{h}_{t} - \mathbf{h}_{t-1})^T \mathbf{\Lambda}_t (\mathbf{h}_{t} - \mathbf{h}_{t-1})$Note that we'll pass in a vector of holdings as a numpy array. For practice, we'll use the h variable that is initialized to zero.
###Code
# TODO
tcost = np.dot( (h - h0) ** 2, Lambda)
###Output
_____no_output_____
###Markdown
objective functionCombine the common risk, idiosyncratic risk, transaction costs and expected portfolio return into the objective function. Put this inside a function.Objective function is: factor risk + idiosyncratic risk - expected portfolio return + transaction costs $f(\mathbf{h}) = \frac{1}{2}\kappa \mathbf{h}_t^T\mathbf{Q}^T\mathbf{Q}\mathbf{h}_t + \frac{1}{2} \kappa \mathbf{h}_t^T \mathbf{S} \mathbf{h}_t - \mathbf{\alpha}^T \mathbf{h}_t + (\mathbf{h}_{t} - \mathbf{h}_{t-1})^T \mathbf{\Lambda} (\mathbf{h}_{t} - \mathbf{h}_{t-1})$ Risk Aversion $\kappa$The risk aversion term is set to target a particular gross market value (GMV), or to target a desired volatility. In our case, we tried a few values of the risk aversion term, ran the backtest, and calculated the GMV. Ideally, a quant who is just starting out may have a targeted GMV of 50 million. A risk aversion term of $10^{-6}$ gets the GMV to be in the tens of millions. A higher risk aversion term would decrease the GMV, and a lower risk aversion term would increase the GMV, and also the risk. Note that this isn't necessarily a linear mapping, so in practice, you'll try different values and check the results.Also, in practice, you'd normally keep the risk aversion term constant, unless your fund is accepting more investor cash, or handling redemptions. In those instances, the fund size itself changes, so the targeted GMV also changes. Therefore, we'd adjust the risk aversion term to adjust for the desired GMV. Also, note that we would keep this risk aversion term constant, and not adjust it on a daily basis. Adjusting the risk aversion term too often would result in unecessary trading that isn't informed by the alphas. QuizAn important point is to think about what matrices can be multiplied independently of the vector of asset holdings, because those can be done once outside of the objective function. The rest of the objective function that depends on the holdings vector will be evaluated inside the objective function multiple times by the optimizer, as it searches for the optimal holdings. For instance, $\mathbf{h}^T\mathbf{BFB}^T\mathbf{h}$ became $\mathbf{h}^T\mathbf{BGGB}^T\mathbf{h}$, where $\mathbf{GG} = \mathbf{F}$. And then, if we let $\mathbf{Q}^T=\mathbf{BG}$ and $\mathbf{Q} = \mathbf{GB}^T$: $\mathbf{h}^T\mathbf{Q}^T\mathbf{Qh}$Let $\mathbf{R} = \mathbf{Q h}$ and $\mathbf{R}^T = \mathbf{h}^T \mathbf{Q}^T$: The risk term becomes: $\mathbf{R}^T\mathbf{R}$, where $\mathbf{R}^T=\mathbf{h}^T\mathbf{Q}$ and $\mathbf{R}=\mathbf{Q}^T\mathbf{h}$* Can we pre-compute Q outside of the objective function? * Can we pre-compute R outside of the objective function? AnswerQ doesn't depend on h, the holdings vector, so it can be pre-computed once outside of the objective function.R is created using h, the holdings vector. This should be computed each time the objective function is called, not pre-computed beforehand. Risk Aversion parameterThe risk aversion term is set to target a particular gross market value (GMV), or to target a desired volatility. The gross market value is the dollar value of the absolute value of the long and short positions.$ GMV = \sum_i^N(|h_{i,t}|)$When we think about what it means to take more risk when investing, taking bigger bets with more money is a way to take on more risk. So the risk aversion term controls how much risk we take by controlling the dollar amount of our positions, which is the gross market value.In our case, we tried a few values of the risk aversion term, ran the backtest, and calculated the GMV. Ideally, a quant who is just starting out may have a targeted book size of 50 million. In other words, they try to keep their GMV around 50 million. A risk aversion term of $10^{-6}$ gets the GMV to be in the tens of millions. A higher risk aversion term would decrease the GMV, and a lower risk aversion term would increase the GMV, and also the risk. Note that this isn't necessarily a linear mapping, so in practice, you'll try different values and check the results.Also, in practice, you'd normally keep the risk aversion term constant, unless your fund is accepting more investor cash, or handling redemptions. In those instances, the fund size itself changes, so the targeted GMV also changes. Therefore, we'd adjust the risk aversion term to adjust for the desired GMV. Also, note that we would keep this risk aversion term constant, and not adjust it on a daily basis. Adjusting the risk aversion term too often would result in unnecessary trading that isn't informed by the alphas.
###Code
## Risk aversion
risk_aversion=1.0e-6
###Output
_____no_output_____
###Markdown
Quiz: define objective functionCombine the common risk, idiosyncratic risk, transaction costs and expected portfolio return into the objective function. Put this inside a function.Objective function is: factor risk + idiosyncratic risk - expected portfolio return + transaction costs $f(\mathbf{h}) = \frac{1}{2}\kappa \mathbf{h}_t^T\mathbf{Q}^T\mathbf{Q}\mathbf{h}_t + \frac{1}{2} \kappa \mathbf{h}_t^T \mathbf{S} \mathbf{h}_t - \mathbf{\alpha}^T \mathbf{h}_t + (\mathbf{h}_{t} - \mathbf{h}_{t-1})^T \mathbf{\Lambda} (\mathbf{h}_{t} - \mathbf{h}_{t-1})$
###Code
def func(h):
# TODO: define the objective function, where h is the vector of asset holdings
f = 0.0
f += 0.5 * risk_aversion * np.sum( np.matmul(Q, h) ** 2 )
f += 0.5 * risk_aversion * np.dot(h ** 2, specVar) #since Specific Variance is diagonal, don't have to do matmul
f -= np.dot(h, alpha_vec)
f += np.dot( (h - h0) ** 2, Lambda)
return(f)
###Output
_____no_output_____
###Markdown
GradientBefore, when we used cvxpy, we didn't have to calculate the gradient, because the library did that for us.Objective function is: factor risk + idiosyncratic risk - expected portfolio return + transaction costs $f(\mathbf{h}) = \frac{1}{2}\kappa \mathbf{h}^T\mathbf{Q}^T\mathbf{Qh} + \frac{1}{2} \kappa \mathbf{h}^T \mathbf{S h} - \mathbf{\alpha^T h} + (\mathbf{h}_{t} - \mathbf{h}_{t-1})^T \Lambda (\mathbf{h}_{t} - \mathbf{h}_{t-1})$Let's think about the shape of the resulting gradient. The reason we're interested in calculating the derivative is so that we can tell the optimizer in which direction, and how much, it should shift the portfolio holdings in order to improve the objective function (minimize variance, minimize transaction cost, and maximize expected portfolio return). So we want to calculate a derivative for each of the N assets (about 2000+ in our defined universe). So the resulting gradient will be a row vector of length N.The gradient, or derivative of the objective function, with respect to the portfolio holdings h, is: $f'(\mathbf{h}) = \frac{1}{2}\kappa (2\mathbf{Q}^T\mathbf{Qh}) + \frac{1}{2}\kappa (2\mathbf{Sh}) - \mathbf{\alpha} + 2(\mathbf{h}_{t} - \mathbf{h}_{t-1}) \mathbf{\Lambda}$We can check that each of these terms is a row vector with one value for each asset (1 by N row vector) QuizCalculate the gradient of the common risk term:$\kappa (\mathbf{Q}^T\mathbf{Qh})$
###Code
# TODO: gradient of common risk term
tmp = risk_aversion * np.matmul(QT, np.matmul(Q,h))
###Output
_____no_output_____
###Markdown
Verify that the calculation returns one value for each asset in the stock universe (about 2000+ )
###Code
tmp.shape
###Output
_____no_output_____
###Markdown
QuizCalculate gradient of idiosyncratic risk term$\kappa (\mathbf{Sh})$
###Code
# TODO: idiosyncratic risk gradient
tmp = risk_aversion * specVar * h
tmp.shape
###Output
_____no_output_____
###Markdown
QuizCalculate the gradient of the expected return$- \mathbf{\alpha} $
###Code
# TODO: expected return gradient
tmp = -alpha_vec
tmp.shape
###Output
_____no_output_____
###Markdown
QuizCalculate the gradient of the transaction cost.$ 2(\mathbf{h}_{t} - \mathbf{h}_{t-1}) \mathbf{\Lambda}$
###Code
# transaction cost
tmp = 2 * (h - h0 ) * Lambda
tmp.shape
###Output
_____no_output_____
###Markdown
Quiz: Define gradient functionPut this all together to define the gradient function. The optimizer will use this to make small adjustments to the portfolio holdings. gradient (slightly cleaned up)We'll simplify the expression a bit by pulling the common $\kappa$ out of the common risk and specific risk. Also, the 1/2 and 2 cancel for both risk terms.$f'(\mathbf{h}) = \frac{1}{2}\kappa (2\mathbf{Q}^T\mathbf{Qh}) + \frac{1}{2}\kappa (2\mathbf{h}^T\mathbf{S}) - \mathbf{\alpha} + 2(\mathbf{h}_{t} - \mathbf{h}_{t-1})\cdot \Lambda$becomes$f'(\mathbf{h}) = \kappa (\mathbf{Q}^T\mathbf{Qh} + \mathbf{Sh}) - \mathbf{\alpha} + 2(\mathbf{h}_{t} - \mathbf{h}_{t-1}) \mathbf{\Lambda}$
###Code
# Solution
def grad(x):
# TODO
g = risk_aversion * (np.matmul(QT, np.matmul(Q,h)) + \
(specVar * h) ) - alpha_vec + \
2 * (h-h0) * Lambda
return(np.asarray(g))
###Output
_____no_output_____
###Markdown
OptimizerChoose an optimizer. You can read about these optimizers:* L-BFGS * Powell* Nelder-Mead* Conjugate GradientIn this [page about math optimization](http://scipy-lectures.org/advanced/mathematical_optimization/)Also read the [scipy.optimize documentation](https://docs.scipy.org/doc/scipy/reference/optimize.html)Pass in the objective function, prior day's portfolio holdings, and the gradient.
###Code
# TODO
optimizer_result = scipy.optimize.fmin_l_bfgs_b("""<your code here>""", """<your code here>""", fprime="""<your code here>""")
h1 = optimizer_result[0]
opt_portfolio = pd.DataFrame(data = {"Barrid" : universe['Barrid'], "h.opt" : h1})
opt_portfolio.head()
###Output
_____no_output_____
###Markdown
risk exposuresfactor exposures times the portfolio holdings for each asset, gives the portfolio's exposure to the factors (portfolio's risk exposure).$\mathbf{B}^T\mathbf{h}$
###Code
# TODO: risk exposures
risk_exposures = np.matmul("""<your code here>""", """<your code here>""")
# put this into a pandas series
pd.Series(risk_exposures, index = colnames(B))
###Output
_____no_output_____
###Markdown
Quiz: alpha exposuresThe portfolio's exposures to the alpha factors is equal to the matrix of alpha exposures times the portfolio holdings. We'll use the holdings returned by the optimizer.$\textbf{B}_{\alpha}^T\mathbf{h}$
###Code
# Solution: portfolio's alpha exposure
alpha_exposures = np.matmul("""<your code here>""", """<your code here>""")
# put into a pandas series
pd.Series(alpha_exposures, index = colnames(B_alpha))
###Output
_____no_output_____ |
Notebooks/01-Open-loop.ipynb | ###Markdown
Open-loop simulations: Situation without controlIn this notebook the open-loop simulations for *A Hierarchical Approach For Splitting Truck Plattoons Near Network Discontinuities* are presented:- [Network topology](network_topology) - [Symuvia connection](symuvia_connection)- [Data examination](data_examination) Network topology Length of main road - Before merge *1000m*, merge zone *100m*, after merge *400m*Length of onramp road- Before merge *900m*, merge zone *100m* Parameters
###Code
DT = 0.1 # Sample time
KC = 0.16 # CAV max density
KH = 0.0896 # HDV max density
VF = 25.0 # Speed free flow
W = 6.25 # Congestion speed
E = 25.0*0.3 # Speed drop for relaxation
GCAV = 1/(KC*W) # Time headway CAV
GHDV = 1/(KH*W) # Time headway HDV
SCAV = VF/(KC*W)+1/KC # Desired space headway CAV
SHDV = VF/(KH*W)+1/KH # Desired space headway HDV
dveh_twy = {'CAV': GCAV, 'HDV': GHDV}
dveh_dwy = {'CAV': 1/KC, 'HDV': 1/KH}
U_MAX = 1.5 # Max. Acceleration
U_MIN = -1.5 # Min. Acceleration
###Output
_____no_output_____
###Markdown
Symuvia connectionLibraries should be charged via `ctypes` module in python: Connection with SymuviaIn this case connect to the simulator. First define the `libSymuVia.dylib` file
###Code
import os
from ctypes import cdll, create_string_buffer, c_int, byref, c_bool
from sqlalchemy import create_engine, MetaData
from sqlalchemy import Table, Column, String, Integer, Float
from sqlalchemy import insert, delete, select, case, and_
from xmltodict import parse
from collections import OrderedDict, Counter
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Bokeh
from bokeh.plotting import figure, show
from bokeh.sampledata.iris import flowers
from bokeh.io import output_notebook
from bokeh.palettes import Viridis, Spectral11
from bokeh.plotting import figure, show, output_file
from bokeh.models import Span
output_notebook()
# Plotly
import plotly as py
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
import plotly.graph_objs as go
init_notebook_mode(connected=True)
import matplotlib
from matplotlib import cm
import ipywidgets as widgets
from IPython.display import display
###Output
_____no_output_____
###Markdown
Load traffic library
###Code
dir_path = os.getcwd()
lib_path_name = ('..','Symuvia','Contents','Frameworks','libSymuVia.dylib')
full_name = os.path.join(dir_path,*lib_path_name)
symuvialib = cdll.LoadLibrary(full_name)
###Output
_____no_output_____
###Markdown
Load Traffic network
###Code
file_path = ('..', 'Network', 'Merge_Demand_CAV.xml')
file_name = os.path.join(dir_path, *file_path)
m = symuvialib.SymLoadNetworkEx(file_name.encode('UTF8'))
###Output
_____no_output_____
###Markdown
Define Output: DatabaseAl results are stored in the folder `Output/SymOut.sqlite`. Table for storing results:1. `traj` stores trajectories in open loop.
###Code
engine_path = ('..','Output','SymOut.sqlite')
engine_name = os.path.join(os.path.sep,*engine_path)
engine_full_name = os.path.join(dir_path,*engine_path)
engine_call = 'sqlite://'+engine_name
engine = create_engine(engine_call)
metadata = MetaData()
try:
ltbstr = 'Loaded table in: '
connection = engine.connect()
traj = Table('traj', metadata, autoload=True, autoload_with=engine)
stmt = delete(traj)
results = connection.execute(stmt)
except:
ltbstr = 'Loaded table in: '
traj = Table('traj', metadata,
Column('ti', Float()),
Column('id', Integer()),
Column('type', String(3)),
Column('tron', String(10)),
Column('voie', Integer()),
Column('dst', Float()),
Column('abs', Float()),
Column('vit', Float()),
Column('ldr', Integer()),
Column('spc', Float()),
Column('vld', Float()))
metadata.create_all(engine)
connection = engine.connect()
finally:
print(ltbstr, engine)
###Output
Loaded table in: Engine(sqlite:///../Output/SymOut.sqlite)
###Markdown
Symuvia parsers This functions are intended to extract particular information from `Symuvia` or to parse information from the simulator, for use within this study. 1. Pointers: Variables to request data at each time step of the simluation 2. Parsers: Data format converters 3. V2V information: Information required to deploy the control strategy
###Code
# Pointers
sRequest = create_string_buffer(100000)
bEnd = c_int()
bSecond = c_bool(True)
def typedict(veh_dict):
"""
Converts dictionary file from xmltodict
into numeric formats to be stored in a database
"""
data = {'id': int(veh_dict['@id']),
'type': veh_dict['@type'],
'tron': veh_dict['@tron'],
'voie': int(veh_dict['@voie']),
'dst': float(veh_dict['@dst']),
'abs': float(veh_dict['@abs']),
'vit': float(veh_dict['@vit']),
}
return data
###Output
_____no_output_____
###Markdown
V2V informationInformation regarding V2V communicatioin is computed. In particular which is the connectivity, and states derived from this case (*spacing* , *speed leader*) in this case only a single leader is identified
###Code
# Identify Leader
def queueveh(dLeader, veh):
"""
This function creates a queue of vehicles
for a particular road segment
"""
if veh['tron'] in dLeader.keys():
if veh['id'] not in dLeader[veh['tron']]:
dLeader[veh['tron']].append(veh['id'])
else:
dLeader[veh['tron']] = [veh['id']]
return dLeader
def getlead(dLeader, veh):
"""
This function identifies the leader of a specific
vehicle i
"""
idx = dLeader[veh['tron']].index(veh['id'])
if idx != 0:
return dLeader[veh['tron']][idx-1]
else:
return dLeader[veh['tron']][idx]
###Output
_____no_output_____
###Markdown
Take into account that in order to finish writing of the `XML` file the kernel of the current session should be shut down.
###Code
# Spacing
def getspace(lTrajVeh):
"""
This function obtains spacing between two vehicles
"""
# Equilibrium
det_eq_s = lambda x: SCAV if x['type']=='CAV' else SHDV
try:
# Case single vehicle
if lTrajVeh['id'] == lTrajVeh['ldr']:
return [{'spc':0.0+det_eq_s(lTrajVeh)}]
else:
# Last vehicle
# Leader out of Network @ ti
return [{'spc':None}]
except (TypeError, IndexError):
# Multiple veh @ ti
space = []
for veh in lTrajVeh:
if veh['id'] == veh['ldr']:
space.append(0.0+det_eq_s(veh))
else:
veh_pos = veh['abs']
ldr_id = veh['ldr']
ldr_pos = [ldr['abs'] for ldr in lTrajVeh if ldr['id']==ldr_id]
if ldr_pos:
space.append(ldr_pos[0]-veh_pos)
else:
# Leader out of Network @ ti
space.append(0.0)
space_dct = [{'spc': val} for val in space]
return space_dct
# Spacing
def getleaderspeed(lTrajVeh):
"""
This function obtains speed from the leader.
"""
try:
# Case single vehicle
if lTrajVeh['id'] == lTrajVeh['ldr']:
return [{'vld': lTrajVeh['vit']}]
else:
# Leader out of Network @ ti
return [{'vld':None}]
except (TypeError, IndexError):
# Multiple veh @ ti
speedldr = []
for veh in lTrajVeh:
if veh['id'] == veh['ldr']:
speedldr.append(veh['vit'])
else:
ldr_id = veh['ldr']
ldr_vit = [ldr['vit'] for ldr in lTrajVeh if ldr['id']==ldr_id]
if ldr_vit:
speedldr.append(ldr_vit[0])
else:
speedldr.append(veh['vit'])
speedldr_dct = [{'vld': val} for val in speedldr]
return speedldr_dct
def updatelist(lTrajVeh,lDict):
"""
Considering a list of dictionaries as an input
the funciton updates the parameter given by lDict
"""
try:
lTrajVeh.update(lDict[0])
except AttributeError:
for d,s in zip(lTrajVeh,lDict):
d.update(s)
return lTrajVeh
###Output
_____no_output_____
###Markdown
Launch symulation
###Code
max_time = 120
progressSim = widgets.FloatProgress(
value=5,
min=0,
max=max_time,
step=0.1,
description='Simulating:',
bar_style='info',
orientation='horizontal'
)
tiVal = widgets.BoundedFloatText(
value=7.5,
min=0,
max=max_time,
step=0.1,
description='Time step:',
disabled=False
)
%%time
N = 1200 # Simulation steps
# Start simulation from beginning
m = symuvialib.SymLoadNetworkEx(file_name.encode('UTF8'))
# Clean table
stmt = delete(traj)
results = connection.execute(stmt)
step = iter(range(N))
stmt = insert(traj)
t = []
display(progressSim)
display(tiVal)
#for step in steps:
bSuccess = 2
while bSuccess>0:
bSuccess = symuvialib.SymRunNextStepEx(sRequest, True, byref(bEnd))
try:
next(step)
dParsed = parse(sRequest.value.decode('UTF8'))
ti = dParsed['INST']['@val']
if dParsed['INST']['TRAJS'] is None:
#dummy = 1 # Guarantees correct export of XML
pass #print('')
#print('No vehicles in the network at time: {}'.format(ti))
else:
lVehOD = dParsed['INST']['TRAJS']['TRAJ']
lTrajVeh = []
try:
lTrajVeh = typedict(lVehOD)
lTrajVeh['ti'] = ti
dLeader = {lTrajVeh['tron']: [lTrajVeh['id']]}
lTrajVeh['ldr'] = getlead(dLeader, lTrajVeh)
except TypeError:
# Multiple veh @ ti
for i, veh in enumerate(lVehOD):
TrajVeh = typedict(veh)
TrajVeh['ti'] = ti
dLeader = queueveh(dLeader, TrajVeh)
TrajVeh['ldr'] = getlead(dLeader, TrajVeh)
lTrajVeh.append(TrajVeh)
lSpc = getspace(lTrajVeh)
lLdrV = getleaderspeed(lTrajVeh)
lTrajVeh = updatelist(lTrajVeh,lSpc)
lTrajVeh = updatelist(lTrajVeh,lLdrV)
results = connection.execute(stmt,lTrajVeh)
# print('{} vehicles in the network at time: {}'.format(results.rowcount, ti))
t.append(ti)
progressSim.value = ti
tiVal.value = ti
except StopIteration:
print('Stop by iteration')
print('Last simluation step at time: {}'.format(ti))
bSuccess = 0
except:
print(i)
bSuccess = symuvialib.SymRunNextStepEx(sRequest, True, byref(bEnd))
print('Return from Symuvia Empty: {}'.format(sRequest.value.decode('UTF8')))
print('Last simluation step at time: {}'.format(ti))
bSuccess = 0
###Output
_____no_output_____
###Markdown
Data examinationThis section reads results from the database and depicts plots of the open loop trajectories
###Code
stmt = select([traj])
results = connection.execute(stmt).fetchall()
column_names = traj.columns.keys()
trajDf = pd.DataFrame(results, columns = column_names)
trajDf.head()
trajDf.info()
vehicle_iden = trajDf['id'].unique().tolist()
vehicle_type = trajDf['type'].unique().tolist()
###Output
_____no_output_____
###Markdown
Visualization BokehNon interactive visualization
###Code
# Colormap
colormap = {'In_main': 'lightblue', 'In_onramp': 'crimson', 'Merge_zone': 'green', 'Out_main': 'gold'}
colors = [colormap[x] for x in trajDf.tron]
# Figure
p = figure(title = "Trajectories",
width=900,
height=900
)
p.xaxis.axis_label = 'Time [s]'
p.yaxis.axis_label = 'Position [m]'
# Horizontal line
hline = Span(location=0, dimension='width', line_color='darkslategrey', line_width=3)
# Data
p.circle(trajDf['ti'], trajDf['abs'], color = colors, size = 2)
p.renderers.extend([hline])
show(p)
###Output
_____no_output_____
###Markdown
Visualization PlotlyInteractive visualization (Only notebook mode)
###Code
layout = go.Layout(
title = 'Trajectories without Control',
yaxis = dict(
title = 'Position X [m]'
),
xaxis = dict(
title = 'Time [s]'
),
width = 900,
height = 900,
)
def trace_position_vehicle(traj_type, v_id, vtype):
"""
Plot trace single vehicle
"""
dashtrj = {'CAV': 'solid', 'HDV': 'dot'}
trace = go.Scatter(
x = traj_type['ti']-20,
y = traj_type['abs']-500,
mode = 'lines',
name = f'Vehicle {vtype} - {v_id}',
line = dict(
shape = 'spline',
width = 1,
dash = dashtrj[vtype]
)
)
return trace
def update_position_plot(vtype):
traj_type = trajDf[trajDf.type.isin(vtype)]
traj_id = traj_type.id.unique()
data = []
for v in traj_id:
traj_veh = traj_type[traj_type.id == v]
veh_type = traj_veh.type.unique()[0]
trace_i = trace_position_vehicle(traj_veh, v, veh_type)
data.append(trace_i)
fig = go.Figure(data = data, layout = layout)
iplot(fig)
veh_type_wgt = widgets.SelectMultiple(
options=vehicle_type,
value=vehicle_type,
rows=2,
description='Vehicle type',
disabled=False
)
widgets.interactive(update_position_plot, vtype=veh_type_wgt)
#update_position_plot(veh_type_wgt.value) #non-interactive
trajDf.head()
trajDf['ctr']=None
trajDf.to_sql(name='closed', con = engine, if_exists='replace', index=False)
layout = go.Layout(
title = 'Spacing without Control',
yaxis = dict(
title = 'Position X [m]'
),
xaxis = dict(
title = 'Time [s]'
),
width = 900,
height = 900,
)
def trace_space_vehicle(traj_type, v_id, vtype):
"""
Plot trace single vehicle
"""
trace = go.Scatter(
x = traj_type['ti'],
y = traj_type['spc'],
mode = 'lines',
name = f'Vehicle {vtype} - {v_id}',
line = dict(
shape = 'spline',
width = 1,
)
)
return trace
def update_space_plot(veh_id):
traj_type = trajDf[trajDf.id.isin(veh_id)]
traj_id = traj_type.id.unique()
data = []
for v in traj_id:
traj_veh = traj_type[traj_type.id == v]
veh_type = traj_veh.type.unique()[0]
trace_i = trace_space_vehicle(traj_veh, v, veh_type)
data.append(trace_i)
fig = go.Figure(data = data, layout = layout)
iplot(fig)
veh_id_wgt = widgets.SelectMultiple(
options=vehicle_iden,
value=vehicle_iden,
rows=12,
description='Vehicle type',
disabled=False
)
widgets.interactive(update_space_plot, veh_id=veh_id_wgt)
#update_space_plot(veh_id_wgt.value)
###Output
_____no_output_____ |
Notebooks/Casava Plant Disease Prediction/Cassava_Plant_Disease.ipynb | ###Markdown
Building the model
###Code
# Loading the ResNet152 architecture with imagenet weights as base
base = tf.keras.applications.ResNet152(include_top=False, weights='imagenet',input_shape=[IMG_SIZE,IMG_SIZE,3])
base.summary()
model = tf.keras.Sequential()
model.add(base)
model.add(BatchNormalization(axis=-1))
model.add(GlobalAveragePooling2D())
model.add(Dense(5, activation='softmax'))
model.compile(loss=tf.keras.losses.CategoricalCrossentropy(), optimizer=tf.keras.optimizers.Adamax(learning_rate=0.01), metrics=['acc'])
model.summary()
###Output
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
resnet152 (Functional) (None, 7, 7, 2048) 58370944
_________________________________________________________________
batch_normalization (BatchNo (None, 7, 7, 2048) 8192
_________________________________________________________________
global_average_pooling2d (Gl (None, 2048) 0
_________________________________________________________________
dense (Dense) (None, 5) 10245
=================================================================
Total params: 58,389,381
Trainable params: 58,233,861
Non-trainable params: 155,520
_________________________________________________________________
###Markdown
Loading the trained model
###Code
history = model.fit(
train_generator,
steps_per_epoch=BATCH_SIZE,
epochs=20,
validation_data=valid_generator,
batch_size=BATCH_SIZE
)
model.save('ResNet152.h5')
# Loading the ResNet101 architecture with imagenet weights as base
base = tf.keras.applications.ResNet101(include_top=False, weights='imagenet',input_shape=[IMG_SIZE,IMG_SIZE,3])
model = tf.keras.Sequential()
model.add(base)
model.add(BatchNormalization(axis=-1))
model.add(GlobalAveragePooling2D())
model.add(Dense(5, activation='softmax'))
model.compile(loss=tf.keras.losses.CategoricalCrossentropy(), optimizer=tf.keras.optimizers.Adamax(learning_rate=0.01), metrics=['acc'])
history = model.fit(
train_generator,
steps_per_epoch=BATCH_SIZE,
epochs=20,
validation_data=valid_generator,
batch_size=BATCH_SIZE
)
model.save('ResNet101.h5')
# Loading the ResNet50 architecture with imagenet weights as base
base = tf.keras.applications.ResNet50(include_top=False, weights='imagenet',input_shape=[IMG_SIZE,IMG_SIZE,3])
model = tf.keras.Sequential()
model.add(base)
model.add(BatchNormalization(axis=-1))
model.add(GlobalAveragePooling2D())
model.add(Dense(5, activation='softmax'))
model.compile(loss=tf.keras.losses.CategoricalCrossentropy(), optimizer=tf.keras.optimizers.Adamax(learning_rate=0.01), metrics=['acc'])
history = model.fit(
train_generator,
steps_per_epoch=BATCH_SIZE,
epochs=20,
validation_data=valid_generator,
batch_size=BATCH_SIZE
)
model.save('ResNet50.h5')
test_img_path = data_path+"test_images/2216849948.jpg"
img = cv2.imread(test_img_path)
resized_img = cv2.resize(img, (IMG_SIZE, IMG_SIZE)).reshape(-1, IMG_SIZE, IMG_SIZE, 3)/255
plt.figure(figsize=(8,4))
plt.title("TEST IMAGE")
plt.imshow(resized_img[0])
preds = []
ss = pd.read_csv(data_path+'sample_submission.csv')
for image in ss.image_id:
img = tf.keras.preprocessing.image.load_img(data_path+'test_images/' + image)
img = tf.keras.preprocessing.image.img_to_array(img)
img = tf.keras.preprocessing.image.smart_resize(img, (IMG_SIZE, IMG_SIZE))
img = tf.reshape(img, (-1, IMG_SIZE, IMG_SIZE, 3))
prediction = model.predict(img/255)
preds.append(np.argmax(prediction))
my_submission = pd.DataFrame({'image_id': ss.image_id, 'label': preds})
my_submission.to_csv('submission.csv', index=False)
# Submission file ouput
print("Submission File: \n---------------\n")
print(my_submission.head()) # Predicted Output
###Output
_____no_output_____ |
models/Korean_multisentiment_classifier_KoBERT.ipynb | ###Markdown
###Code
# 구글 드라이브와 연동합니다
from google.colab import drive
drive.mount('/content/drive')
# 필요한 모듈을 설치합니다
!pip install mxnet-cu101
!pip install gluonnlp pandas tqdm
!pip install sentencepiece==0.1.85
!pip install transformers==2.1.1
!pip install torch #원래 ==1.3.1
#SKT에서 공개한 KoBERT 모델을 불러옵니다
!pip install git+https://[email protected]/SKTBrain/KoBERT.git@master
###Output
_____no_output_____
###Markdown
1. 데이터 불러오기, 데이터 전처리
###Code
import pandas as pd
sad = pd.read_excel('/content/drive/My Drive/data/tweet_list_슬픔 1~5000.xlsx')
happy = pd.read_excel('/content/drive/My Drive/data/tweet_list_기쁨 labeling_완료.xlsx')
annoy = pd.read_excel('/content/drive/My Drive/data/tweet_list_짜증_완료.xlsx')
fear = pd.read_excel('/content/drive/My Drive/data/tweet_list_공포_완료.xlsx')
sad2 = pd.read_csv('/content/drive/My Drive/data/추가_슬픔.csv')
happy2 = pd.read_csv('/content/drive/My Drive/data/추가_기쁨.csv')
annoy2 = pd.read_csv('/content/drive/My Drive/data/추가_분노.csv')
fear2 = pd.read_csv('/content/drive/My Drive/data/추가_공포1.txt', encoding='utf8')
sad
#전처리를 위한 함수
def preprocessing(data, label):
import re
dt = data['raw_text'].copy() #문장만 선택
dt = dt.dropna() #결측치 제거
dt = dt.drop_duplicates() #중복 제거
sentences = dt.tolist()
new_sent=[]
for i in range(len(sentences)):
sent = sentences[i]
if type(sent) != str: # 문장 중 str 아닌 것 처리
sent = str(sent)
if len(sent) < 2: continue #길이 1 이상인 것만 선택
sent = re.sub('ㅋㅋ+','ㅋㅋ',sent)
sent = re.sub('ㅠㅠ+','ㅠㅠ',sent)
sent = re.sub('ㅇㅇ+','ㅇㅇ',sent)
sent = re.sub('ㄷㄷ+','ㄷㄷ',sent)
sent = re.sub('ㅎㅎ+','ㅎㅎ',sent)
sent = re.sub('ㅂㅂ+','ㅂㅂ',sent)
sent = re.sub(';;;+',';;',sent)
sent = re.sub('!!!+','!!',sent)
sent = re.sub('~+','~',sent)
sent = re.sub('[?][?][?]+','??',sent)
sent = re.sub('[.][.][.]+','...',sent)
sent = re.sub('[-=+,#/:$@*\"※&%ㆍ』\\‘|\(\)\[\]\<\>`\'…》]','',sent)
new_sent.append(sent)
dt = pd.DataFrame(pd.Series(new_sent), columns=['raw_text'])
dt['emotion'] = label
return dt
sad = preprocessing(sad, '슬픔')
sad2 = preprocessing(sad2, '슬픔')
happy = preprocessing(happy, '기쁨')
happy2 = preprocessing(happy2, '기쁨')
annoy = preprocessing(annoy, '분노')
annoy2 = preprocessing(annoy2, '분노')
fear = preprocessing(fear, '공포')
fear2 = preprocessing(fear2, '공포')
for i in [sad, happy, annoy, fear]:
print('1차 레이블 결과', i['emotion'][0],len(i))
for i in [sad2, happy2, annoy2, fear2]:
print('2차 레이블 결과', i['emotion'][0],len(i))
print('최소 데이터: 공포 ', len(fear)+len(fear2))
## 데이터 개수 확인 후 학습을 위해 각 감정별 데이터 개수를 동일하게 맞춰줍니다.
sad_3 = sad[:1400]
happy_3 = happy[:800]
annoy_3 = annoy[:2400]
# 각 감정별 키워드 데이터가 약 1000개 씩으로 이루어져 있기 때문에 마지막 키워드에 대한 데이터 1000개를 평가 데이터로 선택
sentence_train = pd.concat([sad_3, happy_3, annoy_3, fear, sad2[:-1000], annoy2[:-1000], happy2[:-1000], fear2[:-1000]], axis=0, ignore_index=True)
sentence_eval = pd.concat([sad2[-1000:], annoy2[-1000:], happy2[-1000:], fear2[-1000:]], axis=0, ignore_index=True)
for i in ['슬픔','기쁨','분노','공포']:
print('sentence_train',i,len(sentence_train[sentence_train['emotion'] == i]))
print('-------------------------')
for i in ['슬픔','기쁨','분노','공포']:
print('sentence_eval',i,len(sentence_eval[sentence_eval['emotion'] == i]))
#모델에 입력하기 위해 형식을 맞춰줍니다
def label(x):
if x=='슬픔': return 0.0
elif x=='기쁨': return 1.0
elif x=='분노': return 2.0
elif x=='공포': return 3.0
else: return x
sentence_train["emotion"] = sentence_train["emotion"].apply(label)
dtls = [list(sentence_train.iloc[i,:]) for i in range(len(sentence_train))]
dtls[:10] #형식이 통일되었습니다
###Output
_____no_output_____
###Markdown
2. 모델 투입 준비
###Code
import torch
from torch import nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import gluonnlp as nlp
import numpy as np
from tqdm import tqdm, tqdm_notebook
from tqdm.notebook import tqdm
from kobert.utils import get_tokenizer
from kobert.pytorch_kobert import get_pytorch_kobert_model
from transformers import AdamW
from transformers.optimization import WarmupLinearSchedule
##GPU 사용 시
device = torch.device("cuda:0")
bertmodel, vocab = get_pytorch_kobert_model()
#koBERT의 토크나이저를 사용합니다
tokenizer = get_tokenizer()
tok = nlp.data.BERTSPTokenizer(tokenizer, vocab, lower=False)
class BERTDataset(Dataset):
def __init__(self, dataset, sent_idx, label_idx, bert_tokenizer, max_len,
pad, pair):
transform = nlp.data.BERTSentenceTransform(
bert_tokenizer, max_seq_length=max_len, pad=pad, pair=pair)
self.sentences = [transform([i[sent_idx]]) for i in dataset]
self.labels = [np.int32(i[label_idx]) for i in dataset]
def __getitem__(self, i):
return (self.sentences[i] + (self.labels[i], ))
def __len__(self):
return (len(self.labels))
class BERTClassifier(nn.Module):
def __init__(self,
bert,
hidden_size = 768,
num_classes=4,
dr_rate=None,
params=None):
super(BERTClassifier, self).__init__()
self.bert = bert
self.dr_rate = dr_rate
self.classifier = nn.Linear(hidden_size , num_classes)
if dr_rate:
self.dropout = nn.Dropout(p=dr_rate)
def gen_attention_mask(self, token_ids, valid_length):
attention_mask = torch.zeros_like(token_ids)
for i, v in enumerate(valid_length):
attention_mask[i][:v] = 1
return attention_mask.float()
def forward(self, token_ids, valid_length, segment_ids):
attention_mask = self.gen_attention_mask(token_ids, valid_length)
_, pooler = self.bert(input_ids = token_ids, token_type_ids = segment_ids.long(), attention_mask = attention_mask.float().to(token_ids.device))
if self.dr_rate:
out = self.dropout(pooler)
return self.classifier(out)
###Output
_____no_output_____
###Markdown
3. 학습
###Code
## Setting parameters
max_len = 64
batch_size = 64
warmup_ratio = 0.1
num_epochs = 1
max_grad_norm = 1
log_interval = 200
learning_rate = 5e-5
# train, validation, test set을 나눠주세요
from sklearn.model_selection import train_test_split
dataset_train, dataset_test = train_test_split(dtls, test_size=0.2, random_state=123)
data_train = BERTDataset(dataset_train, 0, 1, tok, max_len, True, False)
data_test = BERTDataset(dataset_test, 0, 1, tok, max_len, True, False)
train_dataloader = torch.utils.data.DataLoader(data_train, batch_size=batch_size, num_workers=5)
test_dataloader = torch.utils.data.DataLoader(data_test, batch_size=batch_size, num_workers=5)
#모델을 만들고 GPU 사용 설정을 해줍니다
model = BERTClassifier(bertmodel, dr_rate=0.5).to(device)
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
#옵티마이저와 손실함수 설정
optimizer = AdamW(optimizer_grouped_parameters, lr=learning_rate)
loss_fn = nn.CrossEntropyLoss()
t_total = len(train_dataloader) * num_epochs
warmup_step = int(t_total * warmup_ratio)
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=warmup_step, t_total=t_total)
#정확도를 계산하기 위한 함수
def calc_accuracy(X,Y):
max_vals, max_indices = torch.max(X, 1)
train_acc = (max_indices == Y).sum().data.cpu().numpy()/max_indices.size()[0]
return train_acc
#학습 과정
for e in range(num_epochs):
train_acc = 0.0
test_acc = 0.0
model.train()
for batch_id, (token_ids, valid_length, segment_ids, label) in enumerate(tqdm(train_dataloader)):
optimizer.zero_grad()
token_ids = token_ids.long().to(device)
segment_ids = segment_ids.long().to(device)
valid_length= valid_length
label = label.long().to(device)
out = model(token_ids, valid_length, segment_ids)
loss = loss_fn(out, label)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
train_acc += calc_accuracy(out, label)
if batch_id % log_interval == 0:
print("epoch {} batch id {} loss {} train acc {}".format(e+1, batch_id+1, loss.data.cpu().numpy(), train_acc / (batch_id+1)))
print("epoch {} train acc {}".format(e+1, train_acc / (batch_id+1)))
model.eval() #모델 평가 부분
for batch_id, (token_ids, valid_length, segment_ids, label) in enumerate(tqdm(test_dataloader)):
token_ids = token_ids.long().to(device)
segment_ids = segment_ids.long().to(device)
valid_length= valid_length
label = label.long().to(device)
out = model(token_ids, valid_length, segment_ids)
test_acc += calc_accuracy(out, label)
print("epoch {} test acc {}".format(e+1, test_acc / (batch_id+1)))
'''
# 차후 사용을 위해 학습된 모델을 저장했습니다
torch.save(model.state_dict(), 'drive/My Drive//kobert_ending_finale.pt')
'''
###Output
_____no_output_____
###Markdown
4 평가
###Code
##################################################
# 평가용 Test_set을 모델에 입력하기 위해 형식을 맞춰줍니다
sentence_eval["emotion"] = sentence_eval["emotion"].apply(label)
dtls_eval = [list(sentence_eval.iloc[i,:]) for i in range(len(sentence_eval))]
data_test = BERTDataset(dtls_eval, 0, 1, tok, max_len, True, False)
test_dataloader = torch.utils.data.DataLoader(data_test, batch_size=batch_size, num_workers=5)
# 해당 데이터에 대해 분류를 시작합니다
model.eval()
answer=[]
train_acc = 0.0
test_acc = 0.0
for batch_id, (token_ids, valid_length, segment_ids, label) in enumerate(tqdm_notebook(test_dataloader)):
token_ids = token_ids.long().to(device)
segment_ids = segment_ids.long().to(device)
valid_length= valid_length
label = label.long().to(device)
out = model(token_ids, valid_length, segment_ids)
max_vals, max_indices = torch.max(out, 1)
answer.append(max_indices.cpu().clone().numpy())
test_acc += calc_accuracy(out, label)
print('정답률: ',test_acc / (batch_id+1))
# 제출 형식에 맞춰 파일을 저장해줍니다
ls = []
for i in answer:
ls.extend(i)
pred = pd.DataFrame(ls, columns=['Predicted'])
df = pd.concat([sentence_eval['raw_text'], pred['Predicted'], sentence_eval['emotion']], axis=1)
def test(x):
if x==0.0: return '슬픔'
elif x==1.0: return '기쁨'
elif x==2.0: return '분노'
elif x==3.0: return '공포'
else: return x
df["Predicted"] = df["Predicted"].apply(test)
df["emotion"] = df["emotion"].apply(test)
for i in ['슬픔','기쁨','분노','공포']:
print(i, '개수', len(df[df['emotion'] == i]))
print('예측 개수', len(df[df['emotion'] == i][df['Predicted'] == i]))
print('정답률',len(df[df['emotion'] == i][df['Predicted'] == i])/len(df[df['emotion'] == i]))
###Output
_____no_output_____ |
edx-stochastic-data-analysis/downloaded_files/04/.ipynb_checkpoints/Stochastic_Processes_week04_3-checkpoint.ipynb | ###Markdown
Stochastic Processes: Data Analysis and Computer Simulation Brownian motion 2: computer simulation -Making Animations- Note 1- In the previous plot, we wrote and used a vary simple python code to simulate the motion of Brownian particles.- Although the code is enough to produce trajectory data that can be used for later analysis, the strong graphic capability of the Jupyter notebook allows us to perform simulations with on-the-fly animations quite easily.- Today, I will show you how to take advantage of this graphics capability by modifying our previous simulation code to display the results in real time. Simulation code with on-the-fly animation Import libraries
###Code
% matplotlib nbagg
import numpy as np # import numpy library as np
import matplotlib.pyplot as plt # import pyplot library as plt
import matplotlib.mlab as mlab # import mlab module to use MATLAB commands with the same names
import matplotlib.animation as animation # import animation modules from matplotlib
from mpl_toolkits.mplot3d import Axes3D # import Axes3D from mpl_toolkits.mplot3d
plt.style.use('ggplot') # use "ggplot" style for graphs
###Output
_____no_output_____
###Markdown
Note 2- As always, we begin by importing the necessary numerical and plotting libraries.- Compared to the previous code example, we import two additional libraries, the `mlab` and `animation` modules from the `matplotlib` library. Define `init` function for `FuncAnimation`
###Code
def init():
global R,V,W,Rs,Vs,Ws,time
R[:,:] = 0.0 # initialize all the variables to zero
V[:,:] = 0.0 # initialize all the variables to zero
W[:,:] = 0.0 # initialize all the variables to zero
Rs[:,:,:] = 0.0 # initialize all the variables to zero
Vs[:,:,:] = 0.0 # initialize all the variables to zero
Ws[:,:,:] = 0.0 # initialize all the variables to zero
time[:] = 0.0 # initialize all the variables to zero
title.set_text(r'') # empty title
line.set_data([],[]) # set line data to show the trajectory of particle n in 2d (x,y)
line.set_3d_properties([]) # add z-data separately for 3d plot
particles.set_data([],[]) # set position current (x,y) position data for all particles
particles.set_3d_properties([]) # add current z data of particles to get 3d plot
return particles,title,line # return listed objects that will be drawn by FuncAnimation
###Output
_____no_output_____
###Markdown
Note 3- For this lesson, we will perform a simulation of Brownian particles and we wish to see how their positions evolve in time. In addition, we want to visualize the trajectory of one chosen particle, to see how it moves in space.- The easiest way to animate your data in python is to use the "FuncAnimation" function provided by matplotlib.- To use this, we must define two basic functions that tell the library how to update and animate our data.- The first of these functions is "init". As its name implies, it is used to initialize the figure. - "init" will only be called once, at the beginning of the animation procedure.- It should define the different objects or "artists" that will be drawn.- Notice how we declare global variables explicitly in the function definition.- This allows us to modify variables which are declared outside of the function.- R,V,W will contain the current position,velocity and Wiener increment for each of the particles- Rs,Vs,Ws the corresponding values for all time steps- time will contain the time values.- We initialize all the variables to zero- We will define three different objects to draw, "particles", "line", and "title".- "particles" is used to display the particles as points in 3d space- "line" is used to display the trajectory of a given particle- "title" is used to display the current time- Here, the particles and line data are just empty arrays and time is set as an empty string.- These three objects will be modified later, when we call the "animate" function Define `animate` function for `FuncAnimation`
###Code
def animate(i):
global R,V,W,Rs,Vs,Ws,time # define global variables
time[i]=i*dt # store time in each step in an array time
W = std*np.random.randn(nump,dim) # generate an array of random forces accordingly to Eqs.(F10) and (F11)
V = V*(1-zeta/m*dt)+W/m # update velocity via Eq.(F9)
R = R+V*dt # update position via Eq.(F5)
Rs[i,:,:]=R # accumulate particle positions at each step in an array Rs
Vs[i,:,:]=V # accumulate particle velocitys at each step in an array Vs
Ws[i,:,:]=W # accumulate random forces at each step in an array Ws
title.set_text(r"t = "+str(time[i])) # set the title to display the current time
line.set_data(Rs[:i+1,n,0],Rs[:i+1,n,1]) # set the line in 2D (x,y)
line.set_3d_properties(Rs[:i+1,n,2]) # add z axis to set the line in 3D
particles.set_data(R[:,0],R[:,1]) # set the current position of all the particles in 2d (x,y)
particles.set_3d_properties(R[:,2]) # add z axis to set the particle in 3D
return particles,title,line # return listed objects that will be drawn by FuncAnimation
###Output
_____no_output_____
###Markdown
Note 4- The "animate" function is the main funciton used by "FuncAnimation". It is called at every step in order to update the figure and create the animation.- Thus, the animate procedure should be responsible for performing the integration in time. It udpates the positions and velocities by propagating the solution to the Langevin equation over $\Delta t$. - After the updated configuration is found, we udpate the trajectory variables Rs,Vs,and Ws.- Next, we udpate the objects in our animation.- We set the title to display the current time- We set the line, which displays the trajectory of particle n, to contain all the x,y, and z points until step i- Finally, we set the current position of all the particles to be R- It is important that animate, as well as init, return the objects that are redrawn (in this case particles, title, line)- Notice how we used "n" even though it was not declared as global, this is because we never tried to modify the value, we only read it, but never tried to write to it. Set parameters and initialize variables
###Code
dim = 3 # system dimension (x,y,z)
nump = 1000 # number of independent Brownian particles to simulate
nums = 1024 # number of simulation steps
dt = 0.05 # set time increment, \Delta t
zeta = 1.0 # set friction constant, \zeta
m = 1.0 # set particle mass, m
kBT = 1.0 # set temperatute, k_B T
std = np.sqrt(2*kBT*zeta*dt) # calculate std for \Delta W via Eq.(F11)
np.random.seed(0) # initialize random number generator with a seed=0
R = np.zeros([nump,dim]) # array to store current positions and set initial condition Eq.(F12)
V = np.zeros([nump,dim]) # array to store current velocities and set initial condition Eq.(F12)
W = np.zeros([nump,dim]) # array to store current random forcces
Rs = np.zeros([nums,nump,dim]) # array to store positions at all steps
Vs = np.zeros([nums,nump,dim]) # array to store velocities at all steps
Ws = np.zeros([nums,nump,dim]) # array to store random forces at all steps
time = np.zeros([nums]) # an array to store time at all steps
###Output
_____no_output_____
###Markdown
Note 5- Here, we define the parameters of our simulations.- We will work in 3d, with 1000 particles.- We use a time step of 0.05 and will simulate over a total of 1024 steps.- We set the friction constant, mass, and thermal energy equal to one.- We define the standard deviation of the Wiener process in order to satisfy the fluctuation dissipation theorem.- Finally, we create the necessary arrays. R,V,W will store the current position, velocity, and Wiener updates for each of the 1000 particles.- Rs,Vs,Ws will store the corresponding values for all 1024 time steps.- and the time array will contain the time value for each step Perform and animate the simulation using `FuncAnimation`
###Code
fig = plt.figure(figsize=(10,10)) # set fig with its size 10 x 10 inch
ax = fig.add_subplot(111,projection='3d') # creates an additional axis to the standard 2D axes
box = 40 # set draw area as box^3
ax.set_xlim(-box/2,box/2) # set x-range
ax.set_ylim(-box/2,box/2) # set y-range
ax.set_zlim(-box/2,box/2) # set z-range
ax.set_xlabel(r"x",fontsize=20) # set x-lavel
ax.set_ylabel(r"y",fontsize=20) # set y-lavel
ax.set_zlabel(r"z",fontsize=20) # set z-lavel
ax.view_init(elev=12,azim=120) # set view point
particles, = ax.plot([],[],[],'ro',ms=8,alpha=0.5) # define object particles
title = ax.text(-180.,0.,250.,r'',transform=ax.transAxes,va='center') # define object title
line, = ax.plot([],[],[],'b',lw=1,alpha=0.8) # define object line
n = 0 # trajectry line is plotted for the n-th particle
anim = animation.FuncAnimation(fig,func=animate,init_func=init,
frames=nums,interval=5,blit=True,repeat=False)
## If you have ffmpeg installed on your machine
## you can save the animation by uncomment the last line
## You may install ffmpeg by typing the following command in command prompt
## conda install -c menpo ffmpeg
##
# anim.save('movie.mp4',fps=50,dpi=100)
###Output
_____no_output_____ |
week_07/Evaluating_Forecasts.ipynb | ###Markdown
Evaluating Forecasts
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import TimeSeriesSplit, cross_val_score
# Set figure size to (14,6)
plt.rcParams['figure.figsize'] = (14,6)
###Output
_____no_output_____
###Markdown
Step 1 - Load the Data
###Code
flights = pd.read_csv('flights_train.csv', index_col=0, parse_dates=True)
flights.head()
# Inspect the size of the data
flights.shape
flights.describe()
flights.info()
###Output
_____no_output_____
###Markdown
Plot the data
###Code
def plot_flights(df, title='Monthly Passenger Numbers in 1000 over Time', ylim=True):
'''
Custom plotting function for plotting the flights dataset
Parameters
----------
df : pd.DataFrame
The data to plot.
title : str
The title of the plot
ylim : bool
Whether to fix the minimum value of y; default is True
Returns
-------
Plots the data
'''
df.plot()
plt.title(title)
plt.ylabel('# of Passengers in 1000')
if ylim:
plt.ylim(ymin=0)
plt.show()
plot_flights(flights)
###Output
_____no_output_____
###Markdown
Step 2 - Clean the Data - this maybe 80% of your job as a DS!!Fortunately we do not have to do that in case of the flights data. Step 3 - Extract the Timestep and the Seasonal Dummies for the whole Dataset
###Code
# Create a timestep variable - if you had missing values or dirty data, then the below assumption wouldn't hold
#flights['timestep'] = list(range(len(flights)))
flights['timestep'] = range(len(flights))
flights.head()
###Output
_____no_output_____
###Markdown
Q: Why can we use matthias suggestion of a range object instead of a list of a range object? A: A range object is a generator* A range object can create a list of numbers, but in its nascent state it isn't a list* to extract a list from a range object, you need to pull the values out of it* how do you pull values out of a range object? - you need to iterate over them
###Code
iterator = iter(range(len(flights)))
#we can run cell this 131 (len(flights)) times, before we hit an error
next(iterator)
flights.head()
# Q: why does pandas accept list(range(len(flights))) or range(len(flights)) ?
# A: I don't exactly know, but there'll be something like the below in pandas codebase somewhere
def make_a_column(input_):
if type(input_) == list:
#make a column of that list
elif type(input_) == range:
#use a iterable on the range object, store the results in a list and proceed
# Create the seasonal dummies
seasonal_dummies = pd.get_dummies(flights.index.month,
prefix='month',
drop_first=True).set_index(flights.index)
flights = flights.join(seasonal_dummies)
flights.head()
###Output
_____no_output_____
###Markdown
Q: what does drop_first=True do? A: lets think about 3 breakfast_drinks * coffee* tea* water
###Code
df1 = pd.get_dummies(['coffee', 'tea', 'water'])
df1
df2 = pd.get_dummies(['coffee', 'tea', 'water'], drop_first=True)
df2.columns= ['tea', 'water_or_coffee']
df2
###Output
_____no_output_____
###Markdown
4) Train-Test-SplitFortunately not necessary for the flights data.* How would you train-test split a time-series? would you use train_test_split in sklearn? or some other method?* you can't use a random splitter, we can time-series split, or you can do it manually 5) Model the Trend_Seasonal model
###Code
# Define X and y
X = flights.drop(columns=['passengers'])
y = flights['passengers']
# Create and fit the model
m = LinearRegression()
m.fit(X, y)
# Create a new column with the predictions of the trend_seasonal model
flights['trend_seasonal'] = m.predict(X)
flights.head()
###Output
_____no_output_____
###Markdown
Plot the original data and preliminary model
###Code
plot_flights(flights[['passengers', 'trend_seasonal']])
###Output
_____no_output_____
###Markdown
6) - Extract the remainder
###Code
# Fast - fourier transform - which decomposes a time-series into subcomponents
# We want to extract the part of the model that the trend_seasonal is not able to explain
flights['remainder'] = flights['passengers'] - flights['trend_seasonal']
plot_flights(flights['remainder'], title='Remainder after modelling trend and seasonality', ylim=False)
###Output
_____no_output_____
###Markdown
7) - Inspect the remainder to decide how many lags to includeFor now, I will include one lag only. - you might want to look autocorrelations to help you 8) - Add the lags of the remainder to the training data
###Code
flights['lag1'] = flights['remainder'].shift(1)
flights.dropna(inplace=True)
flights.head()
###Output
_____no_output_____
###Markdown
9) Run the full model
###Code
# Assign X
X_full = flights.drop(columns=['passengers', 'trend_seasonal', 'remainder'])
y_full = flights['passengers']
X_full.head()
m_full = LinearRegression()
m_full.fit(X_full, y_full)
# Create a new predictions column
flights['predictions_full_model'] = m_full.predict(X_full)
###Output
_____no_output_____
###Markdown
10) - Plot the prediction vs passengers for the training data
###Code
plot_flights(flights[['passengers', 'trend_seasonal', 'predictions_full_model']])
###Output
_____no_output_____
###Markdown
Is this model good? 10) - Evaluate our modelWe want to understand how good our model would work on data it has not been trained on. We can get an estimate of that by using cross-validation.Cross-validation so far:- Dividing training data into subsets (folds)- in each iteration singled out one fold as validation set- trained on the remaining training data and evaluated the fit on the validation set.Cross-validation for time series:- Dividing training data into subsets (folds)- in the first iteration, use the first fold to evaluate the second fold- in the second iteration, use the first and the second fold to evaluate the third fold- ...
###Code
# Create a TimeSeriesSplit object
ts_split = TimeSeriesSplit(n_splits=5)
ts_split.split(X_full, y_full)
# Split the training data into folds
for i, (train_index, validation_index) in enumerate(ts_split.split(X_full, y_full)):
print(f'The training data for the {i+1}th iteration are the observations {train_index}')
print(f'The validation data for the {i+1}th iteration are the observations {validation_index}')
print()
# Create the time series split
time_series_split = ts_split.split(X_full, y_full)
# Do the cross validation
result = cross_val_score(estimator=m_full, X=X_full, y=y_full, cv=time_series_split)
result
result.mean()
result_ordinary_cv = cross_val_score(estimator=m_full, X=X_full, y=y_full, cv=5)
result_ordinary_cv
result_ordinary_cv.mean()
###Output
_____no_output_____
###Markdown
--- im talking about 2 different things when i talk about metrics* Cost function - is the fuel for gradient descent* Score on the data - how you evaluate a fitted model* Cost - MSE* Score - R^2 Evaluation Metrics Cost 1. Mean-Squared-Error (MSE)$\frac{1}{n} \sum (y_t - \hat{y_t}) ^2$ Advantages:- Is widely implemented Disadvantages:- Strong penalty on outliers - preprocess to remove outliers (what is an outlier?)- Unit hardly interpretable- Not comparable across models with different units 2. Mean Absolute Error (MAE)$\frac{1}{n} \sum |y_t - \hat{y}_t|$ Advantages:- Error is in the unit of interest- Does not overly value outliers Disadvantages:- Ranges from 0 to infinity- Not comparable across models with different units 3. Root-Mean-Squared-Error (RMSE)$\sqrt{\frac{1}{n} \sum (y_t - \hat{y_t}) ^2}$ Advantages:- Errors in the unit of interest- Does not overly value outliers Disadvantages:- Can only be compared between models whos errors are measured in the same unit 4. Mean Absolute Percent Error (MAPE)$\frac{1}{n} \sum |\frac{y_t - \hat{y}_t}{y_t}| * 100$ Advantages:- Comparable over different models Disadvantages:- Is not defined for 0 values 5. Root Mean Squared Log Error (RMSLE)$\sqrt{\frac{1}{n} \sum (log(y_t + 1) - log(\hat{y_t} + 1)) ^2}$ Advantages:- Captures relative error- Penalizes underestimation stronger than overestimation Score 6. $R^2$$1 - \frac{\sum{(y_i - \hat{y_i})^2}}{\sum{(y_i - \bar{y})^2}}$ 7. $R_{adj}^2$$1 - (1-R^2)\frac{n-1}{n-p-1} $* n = no.of data points* p = no. of features ---
###Code
from sklearn.metrics import mean_squared_error, mean_squared_log_error, mean_absolute_error, r2_score
#paraphased from stackoverflow1!! - link to follow
def adj_r2(df, r2_score, y_test, y_pred):
adj_r2 = (1 - (1 - r2_score(y_test,y_pred)) * ((df.shape[0] - 1) /
(df.shape[0] - df.shape[1] - 1)))
return adj_r2
mses = []
maes = []
rmse = []
mape = []
rmsle = []
r2 = []
ar2 = []
for i, (train_index, validation_index) in enumerate(ts_split.split(X_full, y_full)):
model = LinearRegression()
model.fit(X_full.iloc[train_index], y_full.iloc[train_index])
ypred = model.predict(X_full.iloc[validation_index])
mses.append(mean_squared_error(y_full.iloc[validation_index], ypred))
maes.append(mean_absolute_error(y_full.iloc[validation_index], ypred))
rmse.append(np.sqrt(mean_squared_error(y_full.iloc[validation_index], ypred)))
mape.append(sum(abs((y_full.iloc[validation_index] - ypred) / y_full.iloc[validation_index])) * 100 / len(y_full.iloc[validation_index]))
rmsle.append(np.sqrt(mean_squared_log_error(y_full.iloc[validation_index], ypred)))
r2.append(r2_score(y_full.iloc[validation_index], ypred))
ar2.append(adj_r2(X_full,r2_score,y_full.iloc[validation_index], ypred))
#create a descriptive index labelling each time-series split %
index = [f'{x}%' for x in range(20,120,20)]
evaluations = pd.DataFrame(dict(mse=mses, mae=maes, rmse=rmse, mape=mape, rmsle=rmsle, r2=r2, adj_r2=ar2), index=index)
evaluations
###Output
_____no_output_____ |
Examples/Text_Classification_with_ArabicTransformer_with_PyTorchXLA_on_TPU_or_with_PyTorch_on_GPU.ipynb | ###Markdown
**Text Classification with ArabicTransformer and TPU*** First, you need to activate TPU by going to Runtime-> Change RunTime Type -> TPU .* This example was tested with HuggingFace Transformer Library version v4.11.2 . If you experience any issue roll back to this version.* This example uses PyTorchXLA, a library that allows you to use PyTorch code on TPU by having PyTorchXLA in the middle. You may experience that the pre-processing of the dataset is slow if you run the code for the first time, but this is just for the first time. If you change the batch size, the pre-processing again will be slow. So try to fix the batch size every time you do a grid search for the best hyperparameters. * In our paper, we use the original implementation of funnel transformer (PyTorch) (https://github.com/laiguokun/Funnel-Transformer) and V100 GPU, which is no longer provided for Google Colab Pro users. We will update you later on our modified code of the Funnel Transfomer library. However, in the meantime, you need to find the best hyperparameters here and dont rely in our setting in this notebook since the implementation is different from our paper. However, our current set of hyperparameters in this example is still close to what we reported in our paper. You may also get better results with our model than what we reported if you extend the grid search (:* You can easily run this code on GPU with O2 mixed precision by just changing the runtime to GPU and removing this line from fine-tuning code ```!python /content/transformers/examples/pytorch/xla_spawn.py --num_cores=8 transformers/examples/pytorch/text-classification/run_glue.py ```with ```!python transformers/examples/pytorch/text-classification/run_glue.py```* The new pytorch library >1.6 allow you to use Automatic Mixed Precision (AMP) without APEX since its part of the native PyTroch library. * This example is based on GLUE fine-tuning task example from huggingface team but it can work with any text classification task and can be used to fine-tune any Arabic Language Model that was uploaded to HuggingFace Hub here https://huggingface.co/models . A text classification task is where we have a sentence and a label like sentiment analysis tasks. You just need to name the header of first sentence that you need to classify to sentence1 and label to "label" colmun. If you want to classify two sentences, then name the first sentence as sentence1 and the other one to sentence2 .* When you use PyTorchXLA, then you should be aware the batch size will be batch_size*8 since we have 8 cores on the TPU. In this example, we choose a batch size of 4 to get the final batch size of 32.* We did not include language models that use pre-segmentation (FARASA), such as AraBERTv2, in the list of models below. You can do the pre-segmentation part from your own side using codes that AUB Mind published here https://github.com/aub-mind/arabert. Then use our code to fine-tune AraBERTv2 or similar models.* If the model scale is changed (small, base, large) or the architecture is different (Funnel, BERT, ELECTRA, ALBERT), you need to change your hyperparameters. Evaluating all models using the same hyperparameters across different scales and architectures is bad practice to report results.
###Code
!git clone https://github.com/huggingface/transformers
!pip3 install -e transformers
!pip3 install -r transformers/examples/pytorch/text-classification/requirements.txt
!pip install cloud-tpu-client==0.10 https://storage.googleapis.com/tpu-pytorch/wheels/torch_xla-1.9-cp37-cp37m-linux_x86_64.whl
import pandas as pd
!rm -r /content/data
!mkdir -p data/raw/scarcasmv2
!mkdir -p data/scarcasmv2
!wget -O data/raw/scarcasmv2/dev.csv https://raw.githubusercontent.com/iabufarha/ArSarcasm-v2/main/ArSarcasm-v2/testing_data.csv
!wget -O data/raw/scarcasmv2/train.csv https://raw.githubusercontent.com/iabufarha/ArSarcasm-v2/main/ArSarcasm-v2/training_data.csv
df = pd.read_csv(r'data/raw/scarcasmv2/train.csv', header=0,escapechar='\n',usecols = [0,2],names=["sentence1", "label"])
df.to_csv('data/scarcasmv2/train.csv',index=False)
df.to_csv('data/scarcasmv2/train.tsv',sep='\t',index=False)
df = pd.read_csv(r'data/raw/scarcasmv2/dev.csv', header=0, escapechar='\n',usecols = [0,2],names=["sentence1", "label"])
df.to_csv('data/scarcasmv2/dev.csv',index=False)
df.to_csv('data/scarcasmv2/dev.tsv',sep='\t',index=False)
import pandas as pd
from sklearn.metrics import f1_score,classification_report,accuracy_score
def calc_scarcasm(y_pred,y_true):
y_pred=pd.read_csv(y_pred, sep='\t',header=None,usecols=[1] )
y_true=pd.read_csv(y_true,usecols=[1],header=None)
print("Accur Score:",accuracy_score(y_true, y_pred)*100)
print("F1 PN Score:",f1_score(y_true, y_pred,labels=['NEG','POS'],average="macro")*100)
print("########################### Full Report ###########################")
print(classification_report(y_true, y_pred,digits=4,labels=['NEG','POS'] ))
###Output
_____no_output_____
###Markdown
**ArabicTransformer Small (B4-4-4)**
###Code
import os
model= "sultan/ArabicTransformer-small" #@param ["sultan/ArabicTransformer-small","sultan/ArabicTransformer-intermediate","sultan/ArabicTransformer-large","aubmindlab/araelectra-base-discriminator","asafaya/bert-base-arabic","aubmindlab/bert-base-arabertv02","aubmindlab/bert-base-arabert", "aubmindlab/bert-base-arabertv01","kuisailab/albert-base-arabic","aubmindlab/bert-large-arabertv02"]
task= "scarcasmv2" #@param ["scarcasmv2"]
seed= "42" #@param ["42", "123", "1234","12345","666"]
batch_size = 4 #@param {type:"slider", min:4, max:128, step:4}
learning_rate = "3e-5"#@param ["1e-4", "3e-4", "1e-5","3e-5","5e-5","7e-5"]
epochs_num = 2 #@param {type:"slider", min:1, max:50, step:1}
max_seq_length= "256" #@param ["128", "256", "384","512"]
os.environ['batch_size'] = str(batch_size)
os.environ['learning_rate'] = str(learning_rate)
os.environ['epochs_num'] = str(epochs_num)
os.environ['task'] = str(task)
os.environ['model'] = str(model)
os.environ['max_seq_length'] = str(max_seq_length)
os.environ['seed'] = str(seed)
!python /content/transformers/examples/pytorch/xla_spawn.py --num_cores=8 transformers/examples/pytorch/text-classification/run_glue.py --model_name_or_path $model \
--train_file data/$task/train.csv \
--validation_file data/$task/dev.csv \
--test_file data/$task/dev.csv \
--output_dir output_dir/$task \
--overwrite_cache \
--seed $seed \
--overwrite_output_dir \
--logging_steps 1000000 \
--max_seq_length $max_seq_length \
--per_device_train_batch_size $batch_size \
--learning_rate $learning_rate \
--warmup_ratio 0.1 \
--num_train_epochs $epochs_num \
--save_steps 50000 \
--do_train \
--do_predict
calc_scarcasm('/content/output_dir/scarcasmv2/predict_results_None.txt','/content/data/scarcasmv2/dev.csv')
###Output
Accur Score: 69.97667444185271
F1 PN Score: 72.46443739729156
########################### Full Report ###########################
precision recall f1-score support
NEG 0.7741 0.8050 0.7892 1677
POS 0.5886 0.7513 0.6600 575
micro avg 0.7191 0.7913 0.7535 2252
macro avg 0.6813 0.7782 0.7246 2252
weighted avg 0.7267 0.7913 0.7563 2252
|
Lab7/Lab7.ipynb | ###Markdown
They are not the same, which means that the approximation doesn't match the model that I've used. But it is very close since the model is a Poisson with high mean which is like a Gaussian. The approximation is for a Gaussian.
###Code
mass_cut = [180, 150, 140, 135, 130]
for i in mass_cut:
print(f'mass cut: {i}')
cut_qcd = qcd[qcd['mass'] < i]
cut_higgs = higgs[higgs['mass'] < i]
n_qcd = 2000/len(qcd)*len(cut_qcd)
n_higgs = 50/len(higgs)*len(cut_higgs)
print(f'N_qcd: {n_qcd:0.3f} N_higgs: {n_higgs:0.3f}')
theory_sigma = theory(n_qcd, n_higgs)
approx_sigma = approximation(n_qcd, n_higgs)
print(f'theory sigma: {theory_sigma:.3f} approximate sigma: {approx_sigma:.3f}\n')
keys = ['pt', 'eta', 'phi', 'mass', 'ee2', 'ee3', 'd2', 'angularity', 't1',
't2', 't3', 't21', 't32', 'KtDeltaR']
title = ['No Cut', 'Mass Cut']
normalization_higgs = 50/len(higgs)
normalization_qcd = 2000/len(qcd)
cut_qcd = qcd[qcd['mass']<140]
cut_higgs = higgs[higgs['mass']<140]
def get_ylims(y1, y2, y3, y4):
all_y = np.hstack((y1, y2, y3, y4))
ymax = all_y.max()+10
ymin = all_y.min()
#print(all_y)
return ymax, ymin
fig, ax = plt.subplots(14, 2, figsize = (20,140))
for i in range(len(keys)):
#for i in range(1):
hist1 = ax[i,0].hist(qcd[keys[i]], weights = np.ones(len(qcd))*normalization_qcd, bins = 50, histtype = 'step' ,label = 'QCD');
hist2 = ax[i,0].hist(higgs[keys[i]], weights = np.ones(len(higgs))*normalization_higgs, bins = hist1[1], histtype = 'step' ,label = 'Higgs');
hist3 = ax[i,1].hist(cut_qcd[keys[i]], weights = np.ones(len(cut_qcd))*normalization_qcd, bins = hist1[1], histtype = 'step' , label = 'QCD');
hist4 = ax[i,1].hist(cut_higgs[keys[i]], weights = np.ones(len(cut_higgs))*normalization_higgs, bins = hist1[1], histtype = 'step', label = 'Higgs');
#print(hist1[0], hist2[0], hist3[0], hist4[0])
ymax, ymin = get_ylims(hist1[0], hist2[0], hist3[0], hist4[0])
#print(ymin, ymax)
for k in range(len(title)):
ax[i,k].set_ylim(ymin, ymax)
ax[i,k].set_title(title[k])
ax[i,k].set_ylabel('Normalized Counts')
ax[i,k].set_xlabel(keys[i])
ax[i,k].legend()
plt.show()
t21_cut = [0.6, 0.5, 0.4, 0.3]
for i in t21_cut:
print(f't12 cut: {i}')
cut2_qcd = cut_qcd[cut_qcd['t21'] < i]
cut2_higgs = cut_higgs[cut_higgs['t21'] < i]
n_qcd = 2000/len(qcd)*len(cut2_qcd)
n_higgs = 50/len(higgs)*len(cut2_higgs)
print(f'N_qcd: {n_qcd:0.3f} N_higgs: {n_higgs:0.3f}')
theory_sigma = theory(n_qcd, n_higgs)
approx_sigma = approximation(n_qcd, n_higgs)
print(f'theory sigma: {theory_sigma:.3f} approximate sigma: {approx_sigma:.3f}\n')
keys = ['pt', 'eta', 'phi', 'mass', 'ee2', 'ee3', 'd2', 'angularity', 't1',
't2', 't3', 't21', 't32', 'KtDeltaR']
#title = ['No Cut', 'Mass Cut', 't21 Cut']
title = ['Mass Cut', 't21 Cut']
normalization_higgs = 50/len(higgs)
normalization_qcd = 2000/len(qcd)
cut_qcd = qcd[qcd['mass']<140]
cut_higgs = higgs[higgs['mass']<140]
cut2_qcd = cut_qcd[cut_qcd['t21'] < 0.6]
cut2_higgs = cut_higgs[cut_higgs['t21'] < 0.6]
def get_ylims(y3, y4, y5, y6):
all_y = np.hstack((y3, y4, y5, y6))
ymax = all_y.max()+5
ymin = all_y.min()
#print(all_y)
return ymax, ymin
fig, ax = plt.subplots(14, 2, figsize = (20,140))
for i in range(len(keys)):
#hist1 = ax[i,0].hist(qcd[keys[i]], weights = np.ones(len(qcd))*normalization_qcd, bins = 50, histtype = 'step', label = 'QCD');
#hist2 = ax[i,0].hist(higgs[keys[i]], weights = np.ones(len(higgs))*normalization_higgs, bins = hist1[1], histtype = 'step', label = 'Higgs');
hist3 = ax[i,0].hist(cut_qcd[keys[i]], weights = np.ones(len(cut_qcd))*normalization_qcd, bins = 50, histtype = 'step', label = 'QCD');
hist4 = ax[i,0].hist(cut_higgs[keys[i]], weights = np.ones(len(cut_higgs))*normalization_higgs, bins = hist3[1], histtype = 'step', label = 'Higgs');
hist5 = ax[i,1].hist(cut2_qcd[keys[i]], weights = np.ones(len(cut2_qcd))*normalization_qcd, bins = hist3[1], histtype = 'step', label = 'QCD');
hist6 = ax[i,1].hist(cut2_higgs[keys[i]], weights = np.ones(len(cut2_higgs))*normalization_higgs, bins = hist3[1], histtype = 'step', label = 'Higgs');
#ymax, ymin = get_ylims(hist1[0], hist2[0], hist3[0], hist4[0], hist5[0], hist6[0])
ymax, ymin = get_ylims(hist3[0], hist4[0], hist5[0], hist6[0])
for k in range(len(title)):
ax[i,k].set_ylim(ymin, ymax)
ax[i,k].set_title(title[k])
ax[i,k].set_ylabel('Normalized Counts')
ax[i,k].set_xlabel(keys[i])
ax[i,k].legend()
plt.show()
ktdeltar_cut = [0.1, 0.2]
for i in ktdeltar_cut:
print(f'ktdeltar cut: {i}')
cut3_qcd = cut2_qcd[cut2_qcd['KtDeltaR'] > i]
cut3_higgs = cut2_higgs[cut2_higgs['KtDeltaR'] > i]
n_qcd = 2000/len(qcd)*len(cut3_qcd)
n_higgs = 50/len(higgs)*len(cut3_higgs)
print(f'N_qcd: {n_qcd:0.3f} N_higgs: {n_higgs:0.3f}')
theory_sigma = theory(n_qcd, n_higgs)
approx_sigma = approximation(n_qcd, n_higgs)
print(f'theory sigma: {theory_sigma:.3f} approximate sigma: {approx_sigma:.3f}\n')
keys = ['pt', 'eta', 'phi', 'mass', 'ee2', 'ee3', 'd2', 'angularity', 't1',
't2', 't3', 't21', 't32', 'KtDeltaR']
title = ['Mass and t21 Cut', '+ KtDeltaR Cut']
normalization_higgs = 50/len(higgs)
normalization_qcd = 2000/len(qcd)
cut_qcd = qcd[qcd['mass']<140]
cut_higgs = higgs[higgs['mass']<140]
cut2_qcd = cut_qcd[cut_qcd['t21'] < 0.6]
cut2_higgs = cut_higgs[cut_higgs['t21'] < 0.6]
cut3_qcd = cut2_qcd[cut2_qcd['KtDeltaR'] > 0.2]
cut3_higgs = cut2_higgs[cut2_higgs['KtDeltaR'] > 0.2]
def get_ylims(y1, y2, y3, y4):
all_y = np.hstack((y1, y2, y3, y4))
ymax = all_y.max()+1
ymin = all_y.min()
#print(all_y)
return ymax, ymin
fig, ax = plt.subplots(14, 2, figsize = (20,140))
for i in range(len(keys)):
hist1 = ax[i,0].hist(cut2_qcd[keys[i]], weights = np.ones(len(cut2_qcd))*normalization_qcd, bins = 50, histtype = 'step', label = 'QCD');
hist2 = ax[i,0].hist(cut2_higgs[keys[i]], weights = np.ones(len(cut2_higgs))*normalization_higgs, bins = hist1[1], histtype = 'step', label = 'Higgs');
hist3 = ax[i,1].hist(cut3_qcd[keys[i]], weights = np.ones(len(cut3_qcd))*normalization_qcd, bins = hist1[1], histtype = 'step', label = 'QCD');
hist4 = ax[i,1].hist(cut3_higgs[keys[i]], weights = np.ones(len(cut3_higgs))*normalization_higgs, bins = hist1[1], histtype = 'step', label = 'Higgs');
ymax, ymin = get_ylims(hist1[0], hist2[0], hist3[0], hist4[0])
for k in range(len(title)):
ax[i,k].set_ylim(ymin, ymax)
ax[i,k].set_title(title[k])
ax[i,k].set_ylabel('Normalized Counts')
ax[i,k].set_xlabel(keys[i])
ax[i,k].legend()
plt.show()
###Output
_____no_output_____
###Markdown
Overall, I chose the cuts: mass 0.2. These cuts give a sigma of around 5. Testing out some supervised learning:
###Code
keys = ['pt', 'eta', 'phi', 'mass', 'ee2', 'ee3', 'd2', 'angularity', 't1',
't2', 't3', 't21', 't32', 'KtDeltaR']
X = pd.concat([higgs, qcd], ignore_index = True)
Y = np.hstack((np.ones(len(higgs)), np.zeros(len(qcd))))
print(X.shape, Y.shape)
clf1 = RandomForestClassifier(n_estimators = 10)
clf1 = clf1.fit(X,Y)
feature_importance1 = np.vstack((keys, clf1.feature_importances_))
feature_importance1.sort(axis = 1)
for i in range(len(feature_importance1[0])):
print(f'{feature_importance1[0][i]}: {float(feature_importance1[1][i]):.3f}')
X = pd.concat([higgs, qcd], ignore_index = True)
Y = np.hstack((np.ones(len(higgs)), np.zeros(len(qcd))))
fig, ax = plt.subplots(figsize = (10,10))
ax.hist2d(X['t3'], X['t21'], bins = 50)
ax.set_xlabel('t3')
ax.set_ylabel('t21')
plt.show()
from matplotlib.colors import ListedColormap
X = pd.concat([higgs.loc[:, ['t3', 't21']], qcd.loc[:,['t3', 't21']]]).to_numpy()
Y = np.hstack((np.ones(len(higgs)), np.zeros(len(qcd))))
cmap = plt.cm.RdBu
clf2 = RandomForestClassifier(n_estimators = 10)
clf2 = clf2.fit(X,Y)
#take bounds
xmin, xmax = X[:, 0].min()-1, X[:, 0].max()+1
ymin, ymax = X[:, 1].min()-1, X[:, 1].max()+1
xgrid = np.arange(xmin, xmax, 0.1)
ygrid = np.arange(ymin, ymax, 0.1)
xx, yy = np.meshgrid(xgrid, ygrid)
# make predictions for the grid
Z = clf2.predict(np.c_[xx.ravel(), yy.ravel()])
# reshape the predictions back into a grid
zz = Z.reshape(xx.shape)
# plot the grid of x, y and z values as a surface
fig, ax = plt.subplots(figsize = (10,10))
ax.contourf(xx, yy, zz, cmap = cmap)
ax.scatter(
X[:, 0],
X[:, 1],
c=Y,
cmap=ListedColormap(["r", "b"]),
edgecolor="k",
s=20,
)
ax.set_xlabel('t3')
ax.set_ylabel('t21')
plt.show()
###Output
_____no_output_____
###Markdown
Домашняя лабораторная работа №7 по вычислительной математике Державин Андрей, Б01-909 Задача X.9.3 
###Code
import numpy as np
from matplotlib import pyplot as plt
###Output
_____no_output_____
###Markdown
Описание метода Имеем уравнение Рэлея:$$\frac{d^2x}{dt^2} - \mu \left( 1 - \left(\frac{dx}{dt}\right)^2\right)\frac{dx}{dt} + x = 0$$С Н.У.:$$x(0) = 0, \:\:\:\:\:\:\:\:\:\:\:\: \dot{x}(0) = 0.001$$Вводя замену $y = \frac{dx}{dt}$, перейдём к системе:$$\left\lbrace\begin{matrix} \frac{dx}{dt} &=& y\\ \frac{dy}{dt} &=& \mu \left( 1 - y^2\right)y - x\end{matrix}\right.$$С Н.У.:$$x(0) = 0, \:\:\:\:\:\:\:\:\:\:\:\: y(0) = 0.001$$Для удобства обозначим$$\overrightarrow{u} = \left[\begin{matrix} x\\y\end{matrix} \right], \:\:\:\:\:\:\:\:\:\:\:\:\overrightarrow{u_0} = \left[\begin{matrix} x(0)\\y(0)\end{matrix} \right] = \left[\begin{matrix} 0\\0.001\end{matrix} \right]$$$$f\left(\overrightarrow{u}\right) = \left[\begin{matrix}y\\\mu \left( 1 - y^2\right)y - x\end{matrix}\right]$$Тогда наша система принимает окончательный вид:$$\dot{\overrightarrow{u}} = f\left(\overrightarrow{u}\right)$$ Будем использовать метод Розенброка со следующими формулами:$$\overrightarrow{u_{n+1}} = \overrightarrow{u_{n}} + \tau \cdot\Re\left(\overrightarrow{k}\right)\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\left( E - \frac{1+j}{2}\tau f_u(\overrightarrow{y_n}, t)\right) \overrightarrow{k} = f\left(\overrightarrow{y_n}, t + \frac{\tau}{2}\right)$$В силу автономности системы:$$\left( E - \frac{1+j}{2}\tau f_u(\overrightarrow{y_n})\right) \overrightarrow{k} = f(\overrightarrow{y_n})$$где $E$ - единичная матрица, $f_u$ - матрица Якоби системы Реализация
###Code
# service class
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
def __str__(self):
return f'({self.x}, {self.y})'
def __repr__(self):
return f'({self.x}, {self.y})'
def __add__(self, other):
return Point(self.x + other.x, self.y + other.y)
def __sub__(self, other):
return Point(self.x - other.x, self.y - other.y)
def __mul__(self, num):
return Point(self.x * num, self.y * num)
def __div__(self, num):
return Point(self.x / num, self.y / num)
x0 = 0
xdot0 = 0.001
p0 = Point(x0, xdot0)
c_p = Point(0, 0)
mu = 1000
T_k = 10000 # конечное время
def func(time, u:Point) -> Point:
return Point(u.y, mu * (1 - u.y * u.y) * u.y - u.x)
def jac(time, u:Point):
return np.matrix([
[0, 1],
[-1, mu * (1 - 3 * u.y * u.y)]
])
def Rosenbrock(t_end, p0:Point, tau):
u_prev = p0
u = [u_prev]
times = np.arange(0, t_end + tau, tau)
for t_n in times[1:]:
# matrix
mat = np.identity(2) - tau * (1 + 1j) / 2 * jac(t_n, u_prev)
col = func(t_n, u_prev)
k = np.real(np.linalg.solve(mat, [col.x, col.y]))
k = Point(k[0], k[1])
u_next = u_prev + k * tau
u_prev = u_next
u.append(u_next)
return u, times
def SolveNPrint(time, start_p:Point, center_p:Point, method, step = 1e-3):
u, ts = method(time, start_p, step)
u = np.array(u) - center_p
plt.figure(figsize=[20, 10])
plt.title(f'Временная зависимость $x(t)$')
plt.plot(ts, [i.x for i in u], 'b.')
plt.xlabel('$t$', fontsize=20)
plt.ylabel('$x(t)$', fontsize=20)
plt.grid()
plt.show()
plt.figure(figsize=[20, 10])
plt.title(f'Фазовая траектория $y(x)$')
plt.plot([i.x for i in u], [i.y for i in u], "b.")
plt.xlabel('$x$', fontsize=20)
plt.ylabel('$y$', fontsize=20)
plt.grid()
SolveNPrint(T_k, p0, c_p, Rosenbrock)
SolveNPrint(5000, p0, c_p, Rosenbrock)
###Output
_____no_output_____
###Markdown
**Table of contents*** [PCFG lib](lib) * [Span](span)* [CKY+](cky+) * [Item](item) * [Agenda](agenda) * [Inference rules](inference-rules) * [Deduction](deduction)* [PCFG recap](pcfg)* [Inside](inside) * [Semirings](semirings) **Table of Exercises*** Theory (9 points) * [Exercise 7-1](ex7-1) * [Exercise 7-2](ex7-2) * [Exercise 7-3](ex7-3) * [Exercise 7-4](ex7-4) * [Exercise 7-5](ex7-5) * Practicals (26 points) * [Exercise 7-6](ex7-6) * [Exercise 7-7](ex7-7) * [Exercise 7-8](ex7-8)* Bonus (see below for information about points) * Theory: [Exercise 7-9](ex7-9) * Practical: [Exercise 7-10](ex7-10) **General notes*** In this notebook you are expected to use $\LaTeX$* Use python3.* Use NLTK to read annotated data.* **Document your code**: TAs are more likely to understand the steps if you document them. If you don't, it's also difficult to give you partial points for exercises that are not completely correct.* This document contains 2 optional exercises worth bonus points. PCFG libWe are going to use the basic objects defined in the last lab* Symbol, Terminal, and Nonterminal* Rule, and CFGCheck the file `pcfglib.py` where you will find these definitions.
###Code
from pcfglib import Symbol, Terminal, Nonterminal, Rule, CFG
###Output
_____no_output_____
###Markdown
SpanFor convenience, we will define one more type of Symbol, this will be a Span. A Span is just a Nonterminal decorated with two integers which represent a half-open interval $(i, j]$, that is:* start (exclusive) of phrase* end (inclusive) of phraseIt is very easy to define such Span class by inheriting from Nonterminal.
###Code
class Span(Nonterminal):
def __init__(self, nonterminal: Nonterminal, start: int, end: int):
"""
:param nonterminal: a Nonterminal category
:param start: start position of the phrase (exclusive)
:param end: end position of the phrase (inclusive)
"""
if not isinstance(nonterminal, Nonterminal):
raise ValueError('Only a Nonterminal can make a span')
super(Span, self).__init__('%s:%d-%d' % (nonterminal.category, start, end))
self._base_nonterminal = nonterminal
self._span = (start, end)
@property
def base_nonterminal(self) -> Nonterminal:
"""Returns the base nonterminal: the Nonterminal without span information"""
return self._base_nonterminal
@property
def start(self):
"""Begin of the span (open)"""
return self._span[0]
@property
def end(self):
"""End of the span (closed)"""
return self._span[1]
@property
def span(self):
"""Returns _span"""
return self._span
###Output
_____no_output_____
###Markdown
The function definition below constructs our running example PCFG. Note that it returns both the CFG object and the cpds.As in the previous lab a collection of cpds is stored in a dictionary such that ```cpds[lhs]``` is a dictionary mapping from rules that rewrite that LHS symbol to their probability values.
###Code
from collections import defaultdict
def get_toy_pcfg():
# Some symbols
S = Nonterminal('S')
NP = Nonterminal('NP')
VP = Nonterminal('VP')
PP = Nonterminal('PP')
NN = Nonterminal('NN')
Vt = Nonterminal('Vt')
Vi = Nonterminal('Vi')
DT = Nonterminal('DT')
IN = Nonterminal('IN')
CC = Nonterminal('CC')
# Grammar
G = CFG(S)
cpds = defaultdict(lambda: defaultdict(float))
# Phrasal rules
G.add(Rule(S, [NP, VP]))
cpds[S][Rule(S, [NP, VP])] = 1.0
G.add(Rule(NP, [DT, NN]))
G.add(Rule(NP, [NN]))
G.add(Rule(NP, [NP, PP]))
G.add(Rule(NP, [NP, CC, NP]))
cpds[NP][Rule(NP, [DT, NN])] = 0.4
cpds[NP][Rule(NP, [NN])] = 0.1
cpds[NP][Rule(NP, [NP, PP])] = 0.3
cpds[NP][Rule(NP, [NP, CC, NP])] = 0.2
G.add(Rule(VP, [Vt, NP]))
G.add(Rule(VP, [VP, PP]))
G.add(Rule(VP, [Vi]))
G.add(Rule(VP, [VP, CC, VP]))
cpds[VP][Rule(VP, [Vt, NP])] = 0.3
cpds[VP][Rule(VP, [VP, PP])] = 0.4
cpds[VP][Rule(VP, [Vi])] = 0.2
cpds[VP][Rule(VP, [VP, CC, VP])] = 0.1
G.add(Rule(PP, [IN, NP]))
cpds[PP][Rule(PP, [IN, NP])] = 1.
# Preterminal rules
G.add(Rule(NN, [Terminal('dog')]))
G.add(Rule(NN, [Terminal('cat')]))
G.add(Rule(NN, [Terminal('man')]))
G.add(Rule(NN, [Terminal('telescope')]))
cpds[NN][Rule(NN, [Terminal('dog')])] = 0.3
cpds[NN][Rule(NN, [Terminal('cat')])] = 0.2
cpds[NN][Rule(NN, [Terminal('man')])] = 0.4
cpds[NN][Rule(NN, [Terminal('telescope')])] = 0.1
G.add(Rule(DT, [Terminal('the')]))
G.add(Rule(DT, [Terminal('a')]))
cpds[DT][Rule(DT, [Terminal('the')])] = 0.6
cpds[DT][Rule(DT, [Terminal('a')])] = 0.4
G.add(Rule(CC, [Terminal('and')]))
G.add(Rule(CC, [Terminal(',')]))
cpds[CC][Rule(CC, [Terminal('and')])] = 0.8
cpds[CC][Rule(CC, [Terminal(',')])] = 0.2
G.add(Rule(IN, [Terminal('with')]))
G.add(Rule(IN, [Terminal('through')]))
G.add(Rule(IN, [Terminal('within')]))
cpds[IN][Rule(IN, [Terminal('with')])] = 0.5
cpds[IN][Rule(IN, [Terminal('through')])] = 0.3
cpds[IN][Rule(IN, [Terminal('within')])] = 0.2
G.add(Rule(Vt, [Terminal('saw')]))
G.add(Rule(Vt, [Terminal('barked')]))
G.add(Rule(Vt, [Terminal('meowed')]))
G.add(Rule(Vt, [Terminal('moved')]))
cpds[Vt][Rule(Vt, [Terminal('saw')])] = 0.4
cpds[Vt][Rule(Vt, [Terminal('barked')])] = 0.3
cpds[Vt][Rule(Vt, [Terminal('meowed')])] = 0.2
cpds[Vt][Rule(Vt, [Terminal('moved')])] = 0.1
G.add(Rule(Vi, [Terminal('barked')]))
G.add(Rule(Vi, [Terminal('ran')]))
G.add(Rule(Vi, [Terminal('meowed')]))
cpds[Vi][Rule(Vi, [Terminal('barked')])] = 0.2
cpds[Vi][Rule(Vi, [Terminal('ran')])] = 0.7
cpds[Vi][Rule(Vi, [Terminal('meowed')])] = 0.1
return G, cpds
###Output
_____no_output_____
###Markdown
Let's inspect our grammar
###Code
G, cpds = get_toy_pcfg()
print(G)
###Output
[S] -> [NP] [VP]
[NP] -> [DT] [NN]
[NP] -> [NN]
[NP] -> [NP] [PP]
[NP] -> [NP] [CC] [NP]
[PP] -> [IN] [NP]
[VP] -> [Vt] [NP]
[VP] -> [VP] [PP]
[VP] -> [Vi]
[VP] -> [VP] [CC] [VP]
[CC] -> 'and'
[CC] -> ','
[DT] -> 'the'
[DT] -> 'a'
[IN] -> 'with'
[IN] -> 'through'
[IN] -> 'within'
[NN] -> 'dog'
[NN] -> 'cat'
[NN] -> 'man'
[NN] -> 'telescope'
[Vi] -> 'barked'
[Vi] -> 'ran'
[Vi] -> 'meowed'
[Vt] -> 'saw'
[Vt] -> 'barked'
[Vt] -> 'meowed'
[Vt] -> 'moved'
###Markdown
as well as our cpds
###Code
for lhs, cpd in cpds.items():
for rule, prob in cpd.items():
print(prob, rule)
###Output
0.4 [VP] -> [VP] [PP]
0.2 [VP] -> [Vi]
0.1 [VP] -> [VP] [CC] [VP]
0.3 [VP] -> [Vt] [NP]
0.3 [NN] -> 'dog'
0.1 [NN] -> 'telescope'
0.2 [NN] -> 'cat'
0.4 [NN] -> 'man'
0.7 [Vi] -> 'ran'
0.1 [Vi] -> 'meowed'
0.2 [Vi] -> 'barked'
1.0 [PP] -> [IN] [NP]
1.0 [S] -> [NP] [VP]
0.2 [Vt] -> 'meowed'
0.4 [Vt] -> 'saw'
0.1 [Vt] -> 'moved'
0.3 [Vt] -> 'barked'
0.4 [NP] -> [DT] [NN]
0.3 [NP] -> [NP] [PP]
0.2 [NP] -> [NP] [CC] [NP]
0.1 [NP] -> [NN]
0.2 [IN] -> 'within'
0.5 [IN] -> 'with'
0.3 [IN] -> 'through'
0.6 [DT] -> 'the'
0.4 [DT] -> 'a'
0.8 [CC] -> 'and'
0.2 [CC] -> ','
###Markdown
CKY+ In this section we will implement a generalised CKY algorithm which can deal with an arbitrary epsilon-free CFG.We will implement the parsing strategy **for you** to guarantee that it is correct. The focus of this lab is on the **inside recursion**. An extra will involve implementing a different parsing strategy, for that some of the data structures we will develop here are indeed very useful, thus take this as a learning opportunity and try to reuse some code if you decide to implement the extra.There will be nonetheless questions throught this lab, so stay tuned.Again we will use a deductive system to describe the parsing strategy:\begin{align}\text{Item} &\qquad [i, X \rightarrow \alpha_\blacksquare \, \bullet \, \beta_\square, j] \\\text{Goal} &\qquad [1, S \rightarrow \beta_\blacksquare \, \bullet, n] \\\text{Axioms} &\qquad [i, X \rightarrow \bullet \alpha_\square, i] &~\text{ for all } X \rightarrow \alpha \in \mathcal R \\\text{Scan} &\qquad \frac{[i, X \rightarrow \alpha_\blacksquare \, \bullet \, x_{j+1} \, \beta_\square, j]}{[i, X \rightarrow \alpha_\blacksquare \, x_{j+1} \bullet \, \beta_\square, j + 1]} \\\text{Complete} &\qquad \frac{[i, X \rightarrow \alpha_\blacksquare \, \bullet \, Y \, \beta_\square ,k] [k, Y \rightarrow \gamma_\blacksquare \, \bullet , j]}{[i, X \rightarrow \alpha_\blacksquare \, Y_{k,j} \, \bullet \, \beta_\square , j]}\end{align} **Exercise 7-1** **[1 point]** Explain the meaning of an item (make sure to discuss all elements in it). - An item is a representation of a segment of sentence $x_1^n = \{x_1,...,x_n\}$ which spans from $i$ to $j$- $X \rightarrow \alpha \, \beta \in \mathcal R$ corresponds to a rule - But because its general CNF $\alpha$ and $\beta$ don't correspond to one single (Non)Terminal but to subset of the RHS - $\alpha$ corresponds to the part of the RHS that has been scanned - $\beta$ corresponds to the part of the RHS that hasn't been scanned- $\blacksquare$ represents the spans of all the elements of $\alpha$ and is moved when the complete rule is used- $\bullet$ represents the position of the "word scanner" which checks whether a preterminal rule can be used that matches the next word in the sentence **Exercise 7-2** **[1 point]** Explain the goal of the program `Typo in Goal item: should i should be 0 not 1`- The goal of the program is to have scanned all words in $x_1^n$ ($\bullet$ to the right) and know for each symbol in $\beta$ what its span is ($\blacksquare$ to the right).- Goal item spans (0,n] **Exercise 7-3** **[1 point]** Explain the axioms of the program `Typo in Axiom item: should be S not X a nd i and j 0`- The axiom is the start point of where to start. **Exercise 7-4** **[1 point]** Explain SCAN (make sure to discuss all elements of the rule) **Exercise 7-5** **[1 point]** Explain the COMPLETE rule including all of its elements including the side condition. The actual **deduction** is nothing but an exhaustive enumeration of valid items.* we start from axioms* and proceed by either scanning or completing previously derived items* each such operation creates additional items* if these items were not yet discovered, they make it to what we call an **agenda*** the agenda is much like a queue of items yet to be processed* processing an item means simply giving it the chance to participate in scan and complete* we should be careful to never process an item twice under the same premises * items that are yet to be processed are called **active items*** items already processed are called **passive items*** at the end there should be no active item and many passive items* parsing is possible if we derive/prove/reach the goal item* the complete items in the passive set can be used to derive a **parse forest*** a parse forest is much like a CFG but its rules have symbols which are decorated with spans indicating how they parse the input sentence* we can use parse forests to answer questions such as: what trees can parse the sentence? And when we introduce PCFGs, we will be able to answer quetions such as: what's the best tree that parses the sentence? what's the total probability value of the sentence (once we marginalise all possible parse trees). Now we turn to implementation, which will require a few classes and data structures, but we will discuss them one by one. ItemWe have to start by turning items into code!We are using dotted rules to represent items in CKY+. A dotted rule is basically a container for * a context-free production* a list of positions already covered in the input sentence * together this represents the start and end position as well as the black squares in the item This is an item formally\begin{align}\qquad [i, X \rightarrow \alpha_\blacksquare \, \bullet \, \beta_\square, j]\end{align} and this is how we realise it in our implementation [LHS -> RHS, [i...j]]the first element of the pair is the rule `LHS -> RHS` and the second is a list of positions where the dot has been.
###Code
class Item:
"""
A dotted rule used in CKY
We store a rule and a list of positions (which we call `dots` because they indicate
positions where the dots have been)
We make an Item a hashable object so that we can use it in dictionaries.
"""
def __init__(self, rule: Rule, dots: list):
if len(dots) == 0:
raise ValueError('I do not accept an empty list of dots')
self._rule = rule
self._dots = tuple(dots)
def __eq__(self, other: 'Item'):
"""Two items are identical if they contain the same rule and cover the same positions"""
return self._rule == other._rule and self._dots == other._dots
def __hash__(self):
"""We let python hash the two objects that represent an Item"""
return hash((self._rule, self._dots))
def __str__(self):
return '[{0}, {1}]'.format(self._rule, self._dots)
def __repr__(self):
return str(self)
@property
def lhs(self):
return self._rule.lhs
@property
def rule(self):
return self._rule
@property
def dot(self):
return self._dots[-1]
@property
def start(self):
return self._dots[0]
@property
def next(self):
"""return the symbol to the right of the dot (or None, if the item is complete)"""
if self.is_complete():
return None
return self._rule.rhs[len(self._dots) - 1]
def state(self, i):
return self._dots[i]
def advance(self, dot):
"""return a new item with an extended sequence of dots"""
return Item(self._rule, self._dots + (dot,))
def is_complete(self):
"""complete items are those whose dot reached the end of the RHS sequence"""
return len(self._rule.rhs) + 1 == len(self._dots)
###Output
_____no_output_____
###Markdown
Let's play a bit with item objects to see how they work
###Code
r = Rule(Nonterminal('S'), [Nonterminal('X')])
i1 = Item(r, [0])
i2 = i1.advance(1)
print(i1)
print(i2)
i1 != i2
i1.is_complete()
i2.is_complete()
i1.next
i2.next
###Output
_____no_output_____
###Markdown
AgendaNext we need an agenda of items. In CKY+ we have to track quite a bit of information, so we will design a more complex agenda. Because there will be a lot of functionality, we will use a class. In an agenda, some items are active, others are passive.Functionally, the active agenda is nothing but a stack or queue, whereas the passive agenda is simply a set (all items that have already been processed). However, to make our inferences run faster, we can further organise the passive items for easy/quick access within inference rules.
###Code
from collections import deque, defaultdict
class Agenda:
"""
An Agenda for CKY+.
The agenda will organise a queue of active items as well as a set of passive items.
This agenda is such that it does not push an item twice into the queue
that is equivalent to saying that the agenda is capable of maintaining a set of already discovered items.
This agenda will also organise the passive items for quick access in the COMPLETE rule.
This means we will store complete and incomplete items separately and hash them by some useful criterion.
A complete item essentially contributes to further advancing incomplete items.
Incomplete items need to be further completed.
"""
def __init__(self):
# we are organising active items in a stack (last in first out)
self._active = deque([])
# an item should never queue twice, thus we will manage a set of items which we have already seen
self._discovered = set()
# Passive items may be complete
# in which case they help us complete other items
# and they may be incomplete
# in which case we will be trying to complete them
# In order to make COMPLETE inferences easier, we will separate passive items into these two groups
# and we will also organise each group conveniently.
# We organise incomplete items by the symbols they wait for at a certain position
# that is, if the key is a pair (Y, i)
# the value is a set of items of the form
# [X -> alpha * Y beta, [...i]]
# in other words "items waiting for a Y to project a span from i"
self._incomplete = defaultdict(set)
# We organise complete items by their LHS symbol spanning from a certain position
# if the key is a pair (X, i)
# then the value is a set of items of the form
# [X -> gamma *, [i ... j]]
self._complete = defaultdict(set)
def __len__(self):
"""return the number of active items"""
return len(self._active)
def push(self, item: Item):
"""push an item into the queue of active items"""
if item not in self._discovered: # if an item has been seen before, we simply ignore it
self._active.append(item)
self._discovered.add(item)
return True
return False
def pop(self):
"""pop an active item"""
if len(self._active) == 0:
raise ValueError('I have no items left')
return self._active.pop()
def make_passive(self, item: Item):
if item.is_complete(): # complete items offer a way to rewrite a certain LHS from a certain position
self._complete[(item.lhs, item.start)].add(item)
else: # incomplete items are waiting for the completion of the symbol to the right of the dot
self._incomplete[(item.next, item.dot)].add(item)
def waiting(self, symbol: Symbol, dot: int):
return self._incomplete.get((symbol, dot), set())
def complete(self, lhs: Nonterminal, start: int):
return self._complete.get((lhs, start), set())
def itercomplete(self):
"""an iterator over complete items in arbitrary order"""
for items in self._complete.values():
for item in items:
yield item
###Output
_____no_output_____
###Markdown
Let's see how this works
###Code
A = Agenda()
r1 = Rule(Nonterminal('S'), [Nonterminal('S'), Nonterminal('X')])
r1
###Output
_____no_output_____
###Markdown
we can push items into the agenda
###Code
A.push(Item(r1, [0])) # S -> S X, [0] (earley axiom)
###Output
_____no_output_____
###Markdown
and the agenda will make sure there are no duplicates
###Code
A.push(Item(r1, [0]))
len(A)
i1 = Item(r1, [0])
i1
A.make_passive(i1)
A.push(Item(Rule(Nonterminal('S'), [Nonterminal('X')]), [0]))
A.make_passive(Item(Rule(Nonterminal('S'), [Nonterminal('X')]), [0]))
A.push(Item(Rule(Nonterminal('S'), [Nonterminal('X')]), [0, 1]))
A.make_passive(Item(Rule(Nonterminal('S'), [Nonterminal('X')]), [0, 1]))
list(A.itercomplete())
###Output
_____no_output_____
###Markdown
Inference rules Basic axiomsFor every rule X -> alpha, and every input position (i) between 0 and n-1, we have an item of the kind:\begin{equation}[i, X \rightarrow \bullet \alpha_\square, i]\end{equation}In our implementation an axiom looks like this [X -> alpha, [i]]
###Code
def axioms(cfg: CFG, sentence: list):
"""
:params cfg: a context-free grammar (an instance of WCFG)
:params sentence: the input sentence (as a list or tuple)
:returns: a list of items
"""
items = []
for rule in cfg:
for i, x in enumerate(sentence):
# We will implement a tiny optimisation here
# For rules that start with terminals we can use "look ahead"
if isinstance(rule.rhs[0], Terminal):
# this is a mechanism by which we avoid constructing items which we know cannot be scanned
# that's the terminal that starts the rule does not occur in the sentence we are parsing
if rule.rhs[0] == x:
items.append(Item(rule, [i]))
else:
items.append(Item(rule, [i]))
return items
###Output
_____no_output_____
###Markdown
Let's have a look what type of axioms we get, note that CKY+ is very greedy. Earley parsing is an alternative strategy that's far more conservative than CKY+, for example, Earley avoids instantiating items that are not yet required and instead uses a simpler axiom (you will seee it later).
###Code
sentence = [Terminal(w) for w in 'the man saw the dog with a telescope'.split()]
axioms(G, sentence)
###Output
_____no_output_____
###Markdown
ScanIf the dot is placed at a position just before a *terminal*, we can **scan** it provided that the terminal matches the corresponding input position.\begin{equation} \frac{[i, A \rightarrow \alpha_\blacksquare \, \bullet \, x_{j+1} \, \beta_\square, j]}{[i, A \rightarrow \alpha_\blacksquare \, x_{j+1} \bullet \, \beta_\square, j + 1]}\end{equation}In our implementation with dot lists it looks like this [X -> alpha * x beta, [i ... j]] -------------------------------------------- [X -> alpha x * beta, [i ... j] + [j + 1]] note that the `*` is simply indicating where the last dot would be.
###Code
def scan(item: Item, sentence):
if isinstance(item.next, Terminal):
if item.dot < len(sentence) and sentence[item.dot] == item.next:
return item.advance(item.dot + 1)
else:
return None
scanned = []
for item in axioms(G, sentence):
new = scan(item, sentence)
if new is not None:
scanned.append(new)
scanned
###Output
_____no_output_____
###Markdown
CompleteHere we let an active item interact with passive items:* either an active item is complete, then we try to advance incomplete passive items* or an active item is incomplete, in which case we try to advance the item itself by looking back to complete passive itemsBoth cases are covered by the inference rule\begin{align}\qquad \frac{[i, X \rightarrow \alpha_\blacksquare \, \bullet \, Y \, \beta_\square ,k] [k, Y \rightarrow \gamma_\blacksquare \, \bullet , j]}{[i, X \rightarrow \alpha_\blacksquare \, Y_{k,j} \, \bullet \, \beta_\square , j]}\end{align}In our implementation with dot lists it looks like this [X -> alpha * Y beta, [i ... k]] [Y -> gamma *, [k ... j]] ---------------------------------------------------------- [X -> alpha Y * beta, [i ... k] + [j]]
###Code
def complete(item: Item, agenda: Agenda):
items = []
# This has two cases
# either the input item corresponds to the second antecedent in the COMPLETE inference rule
# in which case the item is complete (the dot stands at the end)
# or the input item corresponds to the first antecedent in the COMPLETE inference rule
# in which case the item is itself incomplete
# When it is *complete* we use it to advance incomplete ones.
# When it is *incomplete* we check if we know a complete item that can advance it.
# First we deal with the complete case
if item.is_complete(): # If the item is complete, it can be used to advance incomplete items
# We then look for incomplete items that are waiting for
# the LHS nonterminal of our complete item
# in particular, if it matches the start position of our complete item
for incomplete in agenda.waiting(item.lhs, item.start):
items.append(incomplete.advance(item.dot))
else: # Then we deal with the incomplete case
# look for completions of item.next spanning from item.dot
ends = set()
for complete in agenda.complete(item.next, item.dot):
ends.add(complete.dot)
# advance the dot of the input item for each position that completes a span
for end in ends:
items.append(item.advance(end))
return items
###Output
_____no_output_____
###Markdown
Forest from complete itemsEach **complete** item in the (passive) agenda can be mapped to a new CFG rule (with nonterminal symbols annotated with spans).For example, an item such as [X -> A x B *, [0,1,2,3]] results in the rule X:0-3 -> A:0-1 x B:2-3 observe how only nonterminal nodes get annotated: this helps us keep terminals and nonterminals clearly separate.
###Code
def make_span(sym: Symbol, start: int, end: int):
"""
Helper function that returns a Span for a certain symbol.
This function will only make spans out of nonterminals, terminals are return as is.
:param sym: Terminal or Nonterminal symbol
:param start: open begin
:param end: closed end
:returns: Span(sym, start, end) or sym (if Terminal)
"""
if isinstance(sym, Nonterminal):
return Span(sym, start, end)
else:
return sym
###Output
_____no_output_____
###Markdown
Making a forest is indeed really simple, we just need to return a new CFG with rules derived from complete items in the passive set. The rules will have their nonterminals annotated into spans.
###Code
def make_forest(complete_items: list, forest_start: Nonterminal):
"""
Converts complete items from CKY+ into a forest, that is, a CFG whose rules have spans for nonterminals.
:param complete_items: a collection of dotted items which are complete (dot at the end of the RHS)
:param forest_start: the start nonterminal (a Span) of the forest
"""
if not isinstance(forest_start, Span):
raise ValueError('The start symbol of a forest should be a span')
forest = CFG(forest_start)
for item in complete_items:
lhs = make_span(item.lhs, item.start, item.dot)
rhs = []
for i, sym in enumerate(item.rule.rhs):
if isinstance(sym, Terminal):
rhs.append(sym)
else:
rhs.append(make_span(sym, item.state(i), item.state(i + 1)))
forest.add(Rule(lhs, rhs))
return forest
###Output
_____no_output_____
###Markdown
DeductionStart with axioms and exhaustively apply inference rules
###Code
def cky(cfg: CFG, sentence):
A = Agenda()
for item in axioms(cfg, sentence):
A.push(item)
while A:
item = A.pop()
# a complete item can be used to complete other items
# alternatively, we may be able to advance an incomplete item
# whose next symbol is a nonterminal by combining it with some passive complete item
if item.is_complete() or isinstance(item.next, Nonterminal):
for new in complete(item, A):
A.push(new)
else: # here we have a terminal ahead of the dot, thus only scan is possible
new = scan(item, sentence)
if new is not None: # if we managed to scan
A.push(new)
A.make_passive(item)
forest_start = make_span(cfg.start, 0, len(sentence))
forest = make_forest(A.itercomplete(), forest_start)
if forest.can_rewrite(forest_start):
return forest
else:
return CFG(forest_start)
forest = cky(G, sentence)
forest.start
forest.can_rewrite(forest.start)
print(forest)
###Output
[S:0-8] -> [NP:0-2] [VP:2-8]
[NP:0-2] -> [DT:0-1] [NN:1-2]
[NP:1-2] -> [NN:1-2]
[NP:3-5] -> [DT:3-4] [NN:4-5]
[NP:3-8] -> [NP:3-5] [PP:5-8]
[NP:4-5] -> [NN:4-5]
[NP:4-8] -> [NP:4-5] [PP:5-8]
[NP:6-8] -> [DT:6-7] [NN:7-8]
[NP:7-8] -> [NN:7-8]
[PP:5-8] -> [IN:5-6] [NP:6-8]
[S:0-5] -> [NP:0-2] [VP:2-5]
[S:1-5] -> [NP:1-2] [VP:2-5]
[S:1-8] -> [NP:1-2] [VP:2-8]
[VP:2-5] -> [Vt:2-3] [NP:3-5]
[VP:2-8] -> [VP:2-5] [PP:5-8]
[VP:2-8] -> [Vt:2-3] [NP:3-8]
[DT:0-1] -> 'the'
[DT:3-4] -> 'the'
[DT:6-7] -> 'a'
[IN:5-6] -> 'with'
[NN:1-2] -> 'man'
[NN:4-5] -> 'dog'
[NN:7-8] -> 'telescope'
[Vt:2-3] -> 'saw'
###Markdown
Note that if we modify the sentence in a way that it can't be parsed by G we will get an empty forest
###Code
empty_forest = cky(G, sentence + [Terminal('!')])
empty_forest.start
empty_forest.can_rewrite(empty_forest.start)
###Output
_____no_output_____
###Markdown
PCFG recapA probabilistic CFG is a simple extension to CFGs where we assign a joint probability distribution over the space of context-free *derivations*. A random **derivation** $D = \langle R_1, \ldots, R_m \rangle$ is a sequence of $m$ *random rule applications*.A random rule is a pair of a random LHS nonterminal $V$ and a random RHS sequence $\beta$, where $V \rightarrow \beta$ corresponds to a valid rule in the grammar.We assume that a derivation is generated one rule at a time and each rule is generated independently. Moreover, the probability value of a rule is given by a conditional probability distribution over RHS sequences given LHS nonterminal. \begin{align}P_{D|M}(r_1^m|m) &= \prod_{i=1}^m P_R(r_i) \\ &= \prod_{i=1}^m P_{\text{RHS}|\text{LHS}}(\beta_i | v_i)\\ &= \prod_{i=1}^m \text{Cat}(\beta_i | \boldsymbol \theta^{v_i})\\ &= \prod_{i=1}^m \theta_{v_i \rightarrow \beta_i}\\\end{align}We can implement PCFGs rather easily by pairing a CFG grammar with a dictionary mapping from rules to their probabilities. But we must remember that for each given LHS symbol, the probability values of all of its rewriting rules must sum to 1.\begin{equation}\sum_{\beta} \theta_{v \rightarrow \beta} = 1\end{equation} Inside algorithmThis is the core of this lab, the inside recursion. The inside recursion (also known as **value recursion**) is incredibly general, it can be used to compute a range of interesting quantities.The formula below corresponds to the recursion:\begin{align}(1)\qquad I(v) &= \begin{cases} \bar{1} & \text{if }v \text{ is terminal and } \text{BS}(v) = \emptyset\\ \bar{0} & \text{if }v \text{ is nonterminal and } \text{BS}(v) = \emptyset \\ \displaystyle\bigoplus_{\frac{a_1 \ldots a_n}{v: \theta} \in \text{BS}(v)} \theta \otimes \bigotimes_{i=1}^n I(a_i) & \text{otherwise} \end{cases}\end{align}In this formula $\text{BS}(v)$ is the *backward-star* of the node, or the set of edges **incoming** to the node. That is, all edges (rules with spans) that have that node as an LHS symbol. There is one detail important to remember. In principle only *terminal* nodes would have an empty backward-star. But because our parsing strategy can produce some dead-end nodes (nodes that cannot be expanded) we will have some nonterminal nodes with empty backward-star. Those are special cases, which we treat specially. Essentially, we give them an inside value of $\bar 0$. SemiringsIn this formula we use generalised sum $\oplus$ and generalised product $\otimes$ which we explain below.A **semiring** is algebraic structure $\langle \mathbb K, \oplus, \otimes, \bar 0, \bar 1\rangle$ which corresponds to a set $\mathbb K$ equipped with addition $\oplus$ and multiplication $\otimes$. Real semiringFor example, the algebra you learnt at school is a semiring! The set of interest is the real line $\mathbb K = \mathbb R$.Then if we have two real numbers, $a \in \mathbb R$ and $b \in \mathbb R$, we define **sum** as\begin{equation}a \oplus b = a + b\end{equation}which is simply the standard addition.The additive identity is the value in the set that does not affect summation, we indicate it by $\bar 0$. In this case, we are talking about the real number 0:\begin{equation}a \oplus \bar 0 = a + 0 = a\end{equation}We can also define **multiplication**\begin{equation}a \otimes b = a \times b\end{equation}which is simply the standard multiplication.The multiplicative identity is the value in the set that does not affect multiplication, we indicate it by $\bar 1$. In this case, we are talking about the read number 1:\begin{equation}a \otimes \bar 1 = a \times 1 = a\end{equation} Log-Probability semiringWhen we compute a log-marginal, we are essentially using a logarithmic semiring. Then the set of interest is the set of log-probability values. Probabilities range between $0$ and $1$ and therefore log-probabilities range from $-\infty$ (which is $\log 0$) to $0$ (which is $\log 1$). We denote this set $\mathbb K = \mathbb R_{\le 0} \cup \{-\infty\}$.Then if we have two log-probability values $a \in \mathbb K$ and $b \in \mathbb K$, our sum becomes\begin{equation}a \oplus b = \log(\exp a + \exp b)\end{equation}Here we first exponentiate the values bringing them back to the real semiring (where we know how to sum), then we use the standard sum (from high school), and convert the result back to the log-probability semiring by applying $\log$ to the result.Our product becomes\begin{equation}a \otimes b = a + b\end{equation}which exploits a basic property of logarithms.Our additive identity is\begin{equation}a \oplus \bar 0 = \log (\exp a + \underbrace{\exp(-\infty)}_{0}) = \log \exp a = a\end{equation}this is the case because exponentiating an infinitely negative number converges to $0$.Finally, our multiplicative identity is\begin{equation}a \otimes \bar 1 = a \times 1 = a\end{equation}The interesting thing about semirings is that they manipulate different *types of numbers* but they are coherent with the basic axioms of math that we are used to. They help us realise that several algorithms are actually all the same, but they happen to operate under different algebraic structures (read: different definitions of what sum and multiplication are). We will define a general class for semirings and you will implement various specialisations. This class will only contain **class methods** this makes the class more or less like a package that can be used to organise coherent functions without really storing any content.
###Code
class Semiring:
"""
This is the interface for semirings.
"""
@classmethod
def from_real(cls, a: float):
"""This method takes a number in the Real semiring and converts it to the semiring of interest"""
raise NotImplementedError('You need to implement this in the child class')
@classmethod
def to_real(cls, a: float):
"""This method takes a number in this semiring and converts it to the Real semiring"""
raise NotImplementedError('You need to implement this in the child class')
@classmethod
def one(cls):
"""This method returns the multiplicative identity of the semiring"""
raise NotImplementedError('You need to implement this in the child class')
@classmethod
def zero(cls):
"""This method returns the additive identity of the semiring"""
raise NotImplementedError('You need to implement this in the child class')
@classmethod
def plus(cls, a, b):
"""
This method sums a and b (in the semiring sense)
where a and b are elements already converted to the type of numbers manipulated by the semiring
"""
raise NotImplementedError('You need to implement this in the child class')
@classmethod
def times(cls, a, b):
"""
This method multiplies a and b (in the semiring sense)
where a and b are elements already converted to the type of numbers manipulated by the semiring
"""
raise NotImplementedError('You need to implement this in the child class')
###Output
_____no_output_____
###Markdown
We will implement for you the *Marginal semiring*, that is, the basic algebra from school.
###Code
class MarginalSemiring(Semiring):
@classmethod
def from_real(cls, a: float):
return a
@classmethod
def to_real(cls, a: float):
return a
@classmethod
def one(cls):
return 1.
@classmethod
def zero(cls):
return 0.
@classmethod
def plus(cls, a, b):
return a + b
@classmethod
def times(cls, a, b):
return a * b
MarginalSemiring.from_real(0.2)
MarginalSemiring.to_real(0.5)
MarginalSemiring.plus(0.1, 0.2)
MarginalSemiring.times(0.2, 0.3)
MarginalSemiring.one()
MarginalSemiring.zero()
###Output
_____no_output_____
###Markdown
and we also implement for you the *ViterbiSemiring* used to compute maximum probabilities.
###Code
import numpy as np
class ViterbiSemiring(Semiring):
@classmethod
def from_real(cls, a: float):
return a
@classmethod
def to_real(cls, a: float):
return a
@classmethod
def one(cls):
return 1.
@classmethod
def zero(cls):
return 0.
@classmethod
def plus(cls, a, b):
return np.maximum(a, b)
@classmethod
def times(cls, a, b):
return a * b
ViterbiSemiring.times(0.2, 0.3)
###Output
_____no_output_____
###Markdown
note how the following will pick the maximum rather than accumulate the numbers
###Code
ViterbiSemiring.plus(0.1, 0.4)
###Output
_____no_output_____
###Markdown
Now you implement the $\log$ variants of both semirings:**Exercise 7-6** **[6 points]** Implement LogMarginalSemiring below as a log-variant of the MarginalSemiring as well as LogViterbiSemiring as a log-variant of the ViterbiSemiring. Run examples of all methods and confirm that the quantities they compute correspond to the correct quantities when converted back to the Real semiring using `to_real`.* **[3 points]** LogMarginalSemiring* **[3 points]** LogViterbiSemiring
###Code
class LogMarginalSemiring(Semiring):
@classmethod
def from_real(cls, a: float):
return np.log(a)
@classmethod
def to_real(cls, a: float):
return np.exp(a)
@classmethod
def one(cls):
return 0.
@classmethod
def zero(cls):
return -float('Inf')
@classmethod
def plus(cls, a, b):
return np.log(np.exp(a) + np.exp(b))
@classmethod
def times(cls, a, b):
return a + b
class LogViterbiSemiring(Semiring):
@classmethod
def from_real(cls, a: float):
return np.log(a)
@classmethod
def to_real(cls, a: float):
return np.exp(a)
@classmethod
def one(cls):
return 0.
@classmethod
def zero(cls):
return -float('Inf')
@classmethod
def plus(cls, a, b):
return max(a, b)
@classmethod
def times(cls, a, b):
return a + b
###Output
_____no_output_____
###Markdown
Implementing the inside recursion For the inside recursion you need the weight (parameter converted to the appropriate semiring) of the rule that justifies each edge. For that we provide you with a helper function. It receives an edge (Rule with spans) and the cpds of the original grammar and returns the correct parameter.
###Code
from typing import Dict
def get_parameter(edge: Rule, cpds: Dict[Nonterminal, Dict[Rule, float]]):
base_rhs = [node.base_nonterminal if isinstance(node, Span) else node for node in edge.rhs]
base_rule = Rule(edge.lhs.base_nonterminal, base_rhs)
return cpds[base_rule.lhs][base_rule]
# Now if you ever need to get the parameter for a rule in the grammar you can use the function above
# For example,
for edge in forest:
print(get_parameter(edge, cpds), edge)
###Output
1.0 [S:1-8] -> [NP:1-2] [VP:2-8]
0.5 [IN:5-6] -> 'with'
0.3 [NN:4-5] -> 'dog'
1.0 [S:0-8] -> [NP:0-2] [VP:2-8]
0.3 [VP:2-8] -> [Vt:2-3] [NP:3-8]
0.3 [NP:4-8] -> [NP:4-5] [PP:5-8]
0.6 [DT:0-1] -> 'the'
0.4 [Vt:2-3] -> 'saw'
0.4 [NP:6-8] -> [DT:6-7] [NN:7-8]
0.1 [NN:7-8] -> 'telescope'
0.4 [NP:0-2] -> [DT:0-1] [NN:1-2]
1.0 [S:1-5] -> [NP:1-2] [VP:2-5]
0.3 [NP:3-8] -> [NP:3-5] [PP:5-8]
0.1 [NP:4-5] -> [NN:4-5]
0.4 [NN:1-2] -> 'man'
0.1 [NP:7-8] -> [NN:7-8]
0.1 [NP:1-2] -> [NN:1-2]
0.4 [NP:3-5] -> [DT:3-4] [NN:4-5]
1.0 [S:0-5] -> [NP:0-2] [VP:2-5]
0.3 [VP:2-5] -> [Vt:2-3] [NP:3-5]
0.4 [VP:2-8] -> [VP:2-5] [PP:5-8]
0.6 [DT:3-4] -> 'the'
1.0 [PP:5-8] -> [IN:5-6] [NP:6-8]
0.4 [DT:6-7] -> 'a'
###Markdown
**Exercise 7-7** **[15 points]** Now you should implement the inside recursion below* see below for example of inside values for a correct implementation
###Code
def compute_inside_table(forest: CFG, cpds: Dict[Nonterminal, Dict[Rule, float]], semiring: Semiring):
"""
Computes the inside table, that is, the table that assigns an inside value to each
node in the forest, where a node is a Span.
For convenience, this table may also contain inside values for nodes that are not spans, such as the leaves
or terminals of the forest, but then that inside should be semiring.one()
Our parsing strategies sometimes create useless nodes, these are nonterminal nodes that have no way
of being expanded (there are no edges incoming to those nodes, they have an empty backward-star).
We consider those nodes have an inside value of semiring.zero().
This is necessary to circumvent the fact that the parsing strategy can create such useless items.
:param forest: a forest as produced by CKY+
:param cpds: the cpds of the original grammar
:param semiring: a choice of Semiring
:return: inside table as a dictionary from a Span to an inside value (as a number in the semiring)
"""
inside_table = dict()
# Start at S -> Find all Span(S)
start_set = set()
for rule in forest:
if rule.lhs.base_nonterminal == Nonterminal('S'):
start_set.add(rule.lhs)
# print(cpds[rule.lhs.base_nonterminal])
for s in start_set:
iS = inside_value(s, forest, cpds, semiring, inside_table)
inside_table[s] = iS
return inside_table
# print(semiring.to_real(iS))
def get_bs(item: Span, forest: CFG):
bs = [r for r in forest if r.lhs == item]
return bs
def inside_value(item: Span, forest: CFG, cpds, semiring:Semiring, inside_table):
if isinstance(item, Terminal):
return semiring.one()
iS = semiring.zero()
bs = get_bs(item, forest)
if len(bs) == 0:
return semiring.zero()
for edge in get_bs(item, forest):
theta = semiring.from_real(get_parameter(edge, cpds))
prod = semiring.one()
# print(edge)
for sym in edge.rhs:
if sym not in inside_table:
inside_table[sym] = inside_value(sym, forest, cpds, semiring, inside_table)
prod = semiring.times(prod, inside_table[sym])
iS = semiring.plus(iS, semiring.times(theta, prod))
return iS
s = Span(Nonterminal('DT'), 3, 4)
# print(isinstance(get_bs(s, forest)[0].rhs[0], Terminal))
semiring = LogMarginalSemiring()
inside_table = compute_inside_table(forest, cpds, semiring)
###Output
_____no_output_____
###Markdown
Marginal probability is the inside of the GOAL item in the LogMarginalSemiring (converted back to a real number) .Here is what your result should look like```pythoninside_table = compute_inside_table(forest, cpds, LogMarginalSemiring)LogMarginalSemiring.to_real(inside_table[forest.start])4.6448640000000001e-06```
###Code
LogMarginalSemiring.to_real(inside_table[forest.start])
###Output
_____no_output_____
###Markdown
Maximum probability is the inside of the GOAL item in the LogViterbiSemiring (converted back to a real number) .Here is what your result should look like```pythonviterbi_table = compute_inside_table(forest, cpds, LogViterbiSemiring)LogViterbiSemiring.to_real(viterbi_table[forest.start])2.6542080000000048e-06```
###Code
viterbi_table = compute_inside_table(forest, cpds, LogViterbiSemiring)
LogViterbiSemiring.to_real(viterbi_table[forest.start])
###Output
_____no_output_____
###Markdown
We can even define a semiring to count! Imagine that a semiring maps from the real numbers by saying that if something has non-zero probability it counts as $1$ and if it has zero probability it counts as $0$.
###Code
class CountSemiring(Semiring):
@classmethod
def from_real(cls, a: float):
"""Map to 1 if a bigger than 0"""
return 1. if a > 0. else 0.
@classmethod
def to_real(cls, a: float):
return a
@classmethod
def one(cls):
return 1.
@classmethod
def zero(cls):
return 0.
@classmethod
def plus(cls, a, b):
return a + b
@classmethod
def times(cls, a, b):
return a * b
###Output
_____no_output_____
###Markdown
Then we can use the inside algorithm to find the number of **derivations** in the parse forest! If your inside implementation is corret, this is what your result should look like:```pythoncount_table = compute_inside_table(forest, cpds, CountSemiring)CountSemiring.to_real(count_table[forest.start])2.0```
###Code
count_table = compute_inside_table(forest, cpds, CountSemiring)
CountSemiring.to_real(count_table[forest.start])
###Output
_____no_output_____
###Markdown
Isn't this great? :D Now you are ready to compute the actual Viterbi derivation! Viterbi derivationThe Viterbi path is a top-down traversal of the forest, where each time we have to choose which rule/edge to use to expand a certain nonterminal symbol (span node), we choose the one whose inside value is maximum. But recall that the inside value associated with an *edge* must take into account the weight of the edge and the inside value of its children. Of course, all of this must happen within a maximising semiring (e.g. LogViterbiSemiring or ViterbiSemiring). \begin{align} (2) \qquad e^\star &= \arg\!\max_{e \in \text{BS(v)}} \theta \otimes \bigotimes_{i=1}^n I(a_i) \\ &~\text{where }e:=\frac{a_1, \ldots, a_n}{v}:\theta\end{align} **Exercise 7-8** **[5 points]** Implement a function that returns the Viterbi derivation (a sequence of rule applications that attains maximum probability).
###Code
def viterbi_derivation(forest: CFG, cpds: Dict[Nonterminal, Dict[Rule, float]], inside_table: Dict[Symbol, float], semiring: Semiring):
"""
Return the derivation (and its yield) that attains maximum probability.
This is a top-down traversal from the root, where for each node v that we need to expand, we
solve equation (2) above.
:param forest: a forest
:param cpds: cpds of the original grammar
:param inside_table: inside values produced with a certain maximising semiring
:param semiring: a maximising semiring e.g. ViterbiSemiring or LogViterbiSemiring
:returns: a tuple
- first element is an ordered list of rule applications
- second element is the yield of the derivation
"""
pass
###Output
_____no_output_____
###Markdown
If your implementation is correct you should get```pythonviterbi_derivation(forest, cpds, viterbi_table, LogViterbiSemiring)(([S:0-8] -> [NP:0-2] [VP:2-8], [NP:0-2] -> [DT:0-1] [NN:1-2], [DT:0-1] -> 'the', [NN:1-2] -> 'man', [VP:2-8] -> [VP:2-5] [PP:5-8], [VP:2-5] -> [Vt:2-3] [NP:3-5], [Vt:2-3] -> 'saw', [NP:3-5] -> [DT:3-4] [NN:4-5], [DT:3-4] -> 'the', [NN:4-5] -> 'dog', [PP:5-8] -> [IN:5-6] [NP:6-8], [IN:5-6] -> 'with', [NP:6-8] -> [DT:6-7] [NN:7-8], [DT:6-7] -> 'a', [NN:7-8] -> 'telescope'), ('the', 'man', 'saw', 'the', 'dog', 'with', 'a', 'telescope'))``` You can draw trees using NLTK, here is an example, you can adjust this to visualise trees predicted by your own Viterbi derivation algorithm.
###Code
from nltk.tree import Tree
parse_sent = '(S (NP (DT the) (NN cat)) (VP (VBD ate) (NP (DT a) (NN cookie))))'
t = Tree.fromstring(parse_sent)
t
###Output
_____no_output_____ |
.ipynb_checkpoints/Run Sims-checkpoint.ipynb | ###Markdown
Run a bunch of sims with the following settings:* demand is always 7 mil, no reason to change it* renewable varies from ~10% - ~70%* flexible varies from ~10% - ~50%* backup power to buy varies from 2 standard deviations to 25 standard deviations (in case model variance is also off)
###Code
# still need to make a decision on the pricing points for everything, then I'm ready to run a bunch of sims and be done with
# this
num_runs = 10
renewable_scales = np.linspace(.1, .7, 13)
flexible_scales = np.linspace(.1, .5, 9)
backup_power = np.linspace(0,25, 26)
# just add the desired key-value to these run params to run a sim
run_params = {
'time_horizon': 730 # run for 2 years
}
m = miniSCOTnotebook()
market_demand = 7000000
path = "./sims/renewable_scales/"
for s in renewable_scales:
for i in range(num_runs):
print(path + "renewable_{}_{}".format(s,i))
sys.stdout = path + "renewable_{}_{}".format(s,i)
run_params['renewable_scale'] = s * market_demand
m.start(**run_params)
m.run()
sys.stdout.close()
###Output
DEBUG:scse.profiles.profile:Open profile file = /home/cperreault/scse1/lib/python3.7/site-packages/scse/profiles/power_supply.json.
DEBUG:scse.profiles.profile:module_name is scse.metrics.power_contract_profit
DEBUG:scse.profiles.profile:module_name is scse.modules.production.renewables_firm
DEBUG:scse.profiles.profile:module_name is scse.modules.production.cheap_ramp_firm
DEBUG:scse.profiles.profile:module_name is scse.modules.production.expensive_ramp_firm
DEBUG:scse.profiles.profile:module_name is scse.modules.demand.power_demand
INFO:GP:initializing Y
INFO:GP:initializing inference method
INFO:GP:adding kernel and likelihood as parameters
|
Analysis/script_Frequency.ipynb | ###Markdown
Frequency To use the data transformation script `Frequency.pl`, we provide it with a single input file followed by what we want it to name the output file it creates and a channel number:`$ perl ./perl/Frequency.pl [inputFile1 inputFile2 ...] [outputFile1 outputFile2 ...] [column] [binType switch] [binValue]`The last two values have a peculiar usage compared to the other transformation scripts. Here, `binType` is a switch that can be either `0` or `1` to tell the script how you want to divide the data into bins; this choice then determines what the `binValue` parameter means. The choices are 0: Divide the data into a number of bins equal to `binValue` 1: Divide the data into bins of width `binValue` (in nanoseconds)It isn't immedately obvious what this means, though, or what the `column` parameter does. We'll try it out on the test data in the `test_data` directory. Use the UNIX shell command `$ ls test_data` to see what's there:
###Code
!ls test_data
###Output
6119.2016.0104.1.test.thresh combineOut sortOut15
6148.2016.0109.0.test.thresh sortOut sortOut51
6203.2016.0104.1.test.thresh sortOut11
###Markdown
Let's start simple, using a single input file and a single output file. We'll run`$ perl ./perl/Frequency.pl test_data/6148.2016.0109.0.test.thresh test_data/freqOut01 1 1 2`to see what happens. The `binType` switch is set to the e-Lab default of `1`, "bin by fixed width," and the value of that fixed width is set to the e-Lab-default of `2`ns. Notice that we've named the output file `freqOut01`; we may have to do lots of experimentation to figure out what exactly the transformation `Frequency.pl` does, so we'll increment that number each time to keep a record of our progess. The `column` parameter is `1`.Before we begin, we'll make sure we know what the input file looks like. The UNIX `wc` (word count) utility tells us that `6148.2016.0109.0.test.thresh` has over a thousand lines:
###Code
!wc -l test_data/6148.2016.0109.0.test.thresh
###Output
1003 test_data/6148.2016.0109.0.test.thresh
###Markdown
(`wc` stands for "word count", and the `-l` flag means "but count lines instead of words." The first number in the output, before the filename, is the number of lines, in this case 1003) The UNIX `head` utility will show us the beginning of the file:
###Code
!head -25 test_data/6148.2016.0109.0.test.thresh
###Output
#$md5
#md5_hex(0)
#ID.CHANNEL, Julian Day, RISING EDGE(sec), FALLING EDGE(sec), TIME OVER THRESHOLD (nanosec), RISING EDGE(INT), FALLING EDGE(INT)
6148.4 2457396 0.5006992493422453 0.5006992493424479 17.51 4326041514317000 4326041514318750
6148.3 2457396 0.5006992493422887 0.5006992493424768 16.25 4326041514317375 4326041514319000
6148.2 2457396 0.5007005963399161 0.5007005963400029 7.49 4326053152376876 4326053152377625
6148.3 2457396 0.5007005963401910 0.5007005963404514 22.49 4326053152379250 4326053152381500
6148.4 2457396 0.5007005963401765 0.5007005963404658 25.00 4326053152379125 4326053152381624
6148.1 2457396 0.5014987243978154 0.5014987243980903 23.75 4332948978797125 4332948978799500
6148.2 2457396 0.5014987243980759 0.5014987243982495 15.00 4332948978799376 4332948978800875
6148.1 2457396 0.5020062862072049 0.5020062862076967 42.49 4337334312830250 4337334312834500
6148.2 2457396 0.5020062862074218 0.5020062862076389 18.75 4337334312832125 4337334312834000
6148.2 2457396 0.5020062862076823 0.5020062862078704 16.25 4337334312834374 4337334312836000
6148.2 2457396 0.5020062862086806 0.5020062862088253 12.50 4337334312843000 4337334312844250
6148.1 2457396 0.5021121718857783 0.5021121718861401 31.26 4338249165093124 4338249165096250
6148.2 2457396 0.5021121718860532 0.5021121718865741 45.01 4338249165095500 4338249165100000
6148.3 2457396 0.5021121718866174 0.5021121718867042 7.50 4338249165100374 4338249165101124
6148.4 2457396 0.5021121718865018 0.5021121718868924 33.75 4338249165099376 4338249165102750
6148.3 2457396 0.5021781527571470 0.5021781527575087 31.25 4338819239821750 4338819239824875
6148.4 2457396 0.5021781527571325 0.5021781527574218 25.00 4338819239821625 4338819239824125
6148.1 2457396 0.5023430585295574 0.5023430585298612 26.24 4340244025695376 4340244025698000
6148.2 2457396 0.5023430585298176 0.5023430585300203 17.51 4340244025697624 4340244025699375
6148.4 2457396 0.5023430585301071 0.5023430585304110 26.25 4340244025700126 4340244025702750
6148.3 2457396 0.5023430585300781 0.5023430585305989 45.00 4340244025699875 4340244025704374
6148.2 2457396 0.5024351469382090 0.5024351469384260 18.74 4341039669546126 4341039669548000
###Markdown
Now, we'll execute`$ perl ./perl/Frequency.pl test_data/6148.2016.0109.0.test.thresh test_data/freqOut01 1 1 2`from the command line and see what changes. After doing so, we can see that `freqOut01` was created in the `test_data/` folder, so we must be on the right track:
###Code
!ls test_data
!wc -l test_data/freqOut01
###Output
1 test_data/freqOut01
###Markdown
It only has one line, though! Better investigate further:
###Code
!cat test_data/freqOut01
###Output
6149.000000 1000 4
###Markdown
It turns out that `SingleChannel` has a little bit more power, though. It can actually handle multiple single channels at a time, as odd as that might sound. We'll try specifying additional channels while adding additional respective output names for them:`$ perl ./perl/SingleChannel.pl test_data/6148.2016.0109.0.test.thresh "test_data/singleChannelOut1 test_data/singleChannelOut2 test_data/singleChannelOut3 test_data/singleChannelOut4" "1 2 3 4"`(for multiple channels/outputs, we have to add quotes `"` to make sure `SingleChannel` knows which arguments are the output filenames and which are the channel numbers)If we run this from the command line, we do in fact get four separate output files:
###Code
!ls -1 test_data/
###Output
6119.2016.0104.1.test.thresh
6148.2016.0109.0.test.thresh
6203.2016.0104.1.test.thresh
combineOut
singleChannelOut1
singleChannelOut2
singleChannelOut3
singleChannelOut4
sortOut
sortOut11
sortOut15
sortOut51
###Markdown
Out of curiosity, let's line-count them using the UNIX `wc` utility:
###Code
!wc -l test_data/singleChannelOut1
!wc -l test_data/singleChannelOut2
!wc -l test_data/singleChannelOut3
!wc -l test_data/singleChannelOut4
###Output
238 test_data/singleChannelOut4
|
Data-X Mindful Part2.ipynb | ###Markdown
Data-X Mindful Project Part 2 Data analysis and modeling
###Code
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report
from sklearn.metrics import recall_score
from sklearn.metrics import precision_score
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import *
from sklearn.preprocessing import MinMaxScaler
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegressionCV
from IPython.display import Image, display
import random
###Output
_____no_output_____
###Markdown
Read data files
###Code
X = pd.read_csv('X_df.csv').drop('Unnamed: 0',axis=1)
Y = pd.read_csv('Y_df.csv').drop('Unnamed: 0',axis=1)
###Output
_____no_output_____
###Markdown
Normalization
###Code
scaler = MinMaxScaler()
norm_X = scaler.fit_transform(X)
new_Y = Y.Depressed
X_train, X_test, y_train, y_test = train_test_split(norm_X, new_Y, test_size=0.1)
###Output
/Users/daveliu/.local/share/virtualenvs/daveliu-o5npLomY/lib/python3.7/site-packages/sklearn/preprocessing/data.py:323: DataConversionWarning: Data with input dtype int64 were all converted to float64 by MinMaxScaler.
return self.partial_fit(X, y)
###Markdown
Initial trial with different classifiers
###Code
# LogisticRegressionCV
clf = LogisticRegressionCV(penalty = 'l2', solver='liblinear', multi_class='ovr').fit(X_train, y_train)
print(clf.score(X_train, y_train))
print(clf.score(X_test, y_test))
#y_pred_train = clf.predict(X_train)
#y_pred_test = clf.predict(X_test)
#clf.predict_proba(X_test)
#recall_score(y_test, y_pred_test, average='micro')
#precision_score(y_test, y_pred_test, average='micro')
#confusion_matrix(y_train, y_pred_train)
#confusion_matrix(y_test, y_pred_test)
# Random Forest
clf = RandomForestClassifier(n_estimators=100, max_depth=3,
random_state=80)
clf.fit(X_train, y_train)
print(clf.score(X_train, y_train))
print(clf.score(X_test, y_test))
# Decision Tree
clf = DecisionTreeClassifier(max_depth=3,max_leaf_nodes=3,min_samples_leaf=1)
clf.fit(X_train, y_train)
print(clf.score(X_train, y_train))
print(clf.score(X_test, y_test))
#cross_val_score(clf, X_test, y_test, cv=10)
###Output
0.7951807228915663
0.8
###Markdown
Gradient Boost
###Code
ENTITY_TYPE = "Gradient Boost"
clf = GradientBoostingClassifier(n_estimators=15, max_depth=5)
clf.fit(X_train, y_train)
print("Train: ", clf.score(X_train, y_train))
print("Test: ", clf.score(X_test, y_test))
print("Features used:", len(clf.feature_importances_))
print("-----")
importance_pairs = zip(X.columns, clf.feature_importances_)
sorted_importance_pairs = sorted(importance_pairs, key=lambda k: k[1], reverse=True)
for k, v in sorted_importance_pairs[:20]:
print(k, "\t", v, "\n")
# Feature Importance
feat_imp = pd.Series(clf.feature_importances_, X.columns).sort_values(ascending=False).head(20)
feat_imp.plot(kind='bar', title='Feature Importances for ' + ENTITY_TYPE)
plt.ylabel('Feature Importance Scores' + " (" + ENTITY_TYPE + ")")
plt.tight_layout()
plt.show()
# Recall and Precision
recall = recall_score(y_test, clf.predict(X_test))
precision = precision_score(y_test, clf.predict(X_test))
print('recall: ' + str(recall))
print('precision: ' +str(precision))
print("F1-Score: ", 2 * recall * precision / (recall + precision))
###Output
recall: 0.8
precision: 1.0
F1-Score: 0.888888888888889
###Markdown
xgboost
###Code
ENTITY_TYPE = "xgboost"
from xgboost import XGBClassifier
import xgboost
test_score = 0
precision, recall = 0,0
n = 1
for _ in range(n):
X_train, X_test, y_train, y_test = train_test_split(X, new_Y, test_size=0.1)
clf = XGBClassifier(estimators=20, max_depth = 5, eval_metric='aucpr')
clf.fit(X_train, y_train)
test_score += clf.score(X_test, y_test)
recall += recall_score(y_test, clf.predict(X_test))
precision += precision_score(y_test, clf.predict(X_test))
print("Train: ", clf.score(X_train, y_train))
print("Test: ", clf.score(X_test, y_test))
print("Features used:", len(clf.feature_importances_))
print("precision: ", precision/n)
print("recall: ", recall/n)
print("-----")
print(test_score/n)
fig, ax = plt.subplots(figsize=(16,16))
xgboost.plot_importance(clf,ax=ax)
plt.show()
# Feature Importance
importance_pairs = zip(X_train.columns, clf.feature_importances_)
sorted_importance_pairs = sorted(importance_pairs, key=lambda k: k[1], reverse=True)
for k, v in sorted_importance_pairs:
print(k, "\t", v, "\n")
feat_imp = pd.Series(clf.feature_importances_, X_train.columns).sort_values(ascending=False)
plt.figure(figsize=(15,10))
feat_imp.plot(kind='bar', title='Feature Importances for ' + ENTITY_TYPE)
plt.ylabel('Feature Importance Scores' + " (" + ENTITY_TYPE + ")")
plt.tight_layout()
plt.show()
# Recall and Precision
recall = recall_score(y_test, clf.predict(X_test))
precision = precision_score(y_test, clf.predict(X_test))
print('recall: ' + str(recall))
print('precision: ' +str(precision))
print("F1-Score: ", 2 * recall * precision / (recall + precision))
X_results = X.copy()
X_results["results"] = clf.predict(X)
# Average features
X_results_avg = X_results.groupby("results").mean()
X_results_avg.columns
X_results_avg.loc[:,["Fruit", "Water", "F_Average", "F_None", "F_Decline", "Healthy", "Unhealthy","Dry_mouth", "Dry_skin"]]
# Look at correlation between some features
features = ["Fruit", "Water", "F_Average", "F_None", "F_Decline", "Healthy", "Unhealthy","Dry_mouth", "Dry_skin"]
correlations = X.loc[:,features].corr()
sns.heatmap(correlations)
plt.show()
###Output
_____no_output_____
###Markdown
Example Person
###Code
example_person = X.iloc[[15],:]
example_person
# Get average "non-Depressed", and this example person
X_results_avg.iloc[[0]].reset_index().drop("results", axis=1).append(example_person)
# Get difference between average "non-Depressed" person, and this example person
example_difference = X_results_avg.iloc[[1]].reset_index().drop("results", axis=1).append(example_person).diff().iloc[[1]]
example_difference
# Weights and difference
weights_and_diff = pd.DataFrame(data=[feat_imp.values], columns=feat_imp.index).append(example_difference, sort=True)
weights_and_diff
weights_and_diff.iloc[0].multiply(weights_and_diff.iloc[1]).abs().sort_values(ascending=False).head(10)
###Output
_____no_output_____
###Markdown
Sample output and response
###Code
# Sample responses
responses = {
"Relaxed": "Mindfulness and meditation can really help overcome stressful times.",
"Hobby": "Find time for the things that make you happy! Reading, sports, music… Having a hobby really increases your quality of life. ",
"Sweat": "Do some intense exercise! Releasing some stress is always a good idea. ",
"Volunteering": "Have you considered engaging in some volunteering? Even the smallest effort can have huge impact!",
"SP_Late": "Watch out for your sleep habits! Having consistent sleep schedules is vital for getting a good night sleep. ",
"Snack": "Stop snacking all day! Comfort food is not the answer, eat a proper meal instead – I’m sure your cooking abilities are not that bad… 😉",
"Fruit": "Are you getting your daily vitamins? Fruit is a very important part of our diet, and it’s delicious! ",
"Water": "Drink some more water! We are 60% made of water, don’t let that percentage drop 😉",
"Lonely": "It’s normal to feel lonely sometimes, but it’s important to remember that there ARE people who care about us, and to keep in touch with them!",
"F_Average": "Maybe your food choices are not completely unhealthy, but don’t you think you could do better? Food impacts our mood more than you may think!",
"W_Late": "Get out of bed and take on the world! Waking up early and feeling productive is very comforting 🙂",
"Anxious": "Sometimes we are overwhelmed with projects, work, tasks… However, our mindset is very important in overcoming those situations. Tell yourself it’s going to be OK, you can do it!",
"Occupation": "Having an occupation makes us feel useful and is a self-esteem boost! Whether it’s your job, a class project, or housekeeping 😉",
"Energized": "It is very important to feel motivated and with energy! Every morning, think about the things that make you feel happy, excited and give you energy to make it successfully through the day!",
"W_Time": "Waking up on time and being prepared for all the tasks and commitments for the day is very comforting 🙂",
"Talk_2F": "How many friends do you have? And how many of them have you talk to recently? Make sure to keep in touch with the people that are important to us, it really makes us happier.",
"Average": "Watch out for your sleep habits! Having consistent sleep schedules, and relaxing before going to bed, is vital to get a good night sleep.",
"Oil": "Stop eating oily food! Comfort food is not the answer, if you give healthy food a try I’m sure it will make you feel better 😉",
"Sore": "Do some exercise! Is there a bigger feeling of accomplishment that being tired after an intense workout?",
"Fried": "Stop eating fried food! Comfort food is not the answer, if you give healthy food a try I’m sure it will make you feel better 😉",
"S_Late": "If only the day had more than 24 hours! However, staying up until late is not going to change that. Why don’t you try to go to sleep a little bit earlier? You’ll feel well rested the next day 😉",
"Veggies": "Veggies might not be your favourite food, I get that. But how good does it make us feel when we eat healthy and clean?",
"Thankful": "It is important to remember every day how lucky we are. Why don’t you try each morning to think about three things that you are grateful for?",
"Excited": "It is very important to feel motivated and excited! Every morning, think about the things that make you feel happy, excited and give you energy to make it successfully through the day!",
"Exercise": "Do some exercise! Releasing some stress is always a good idea.",
"Family": "Becoming a teenager, moving to a different city (or country!), always makes us become less attached to our family. Call your mom more often, she’ll always be there to help you!",
"Sugar": "Stop eating sugary food! Comfort food is not the answer, if you give healthy food a try I’m sure it will make you feel better 😉",
"Peaceful": "Mindfulness and meditation can really help overcome stressful times.",
"Vitamin": "Get some vitamins! It could really boost your defenses and make you feel better 🙂",
"SP_Tired": "Watch out for your sleep habits! Having consistent sleep schedules is vital for getting a good night sleep.",
"Meal": "Why don’t you eat a proper meal instead of snacking? I’m sure your cooking abilities are not that bad… 😉"
}
##############################
# Example Person (2nd Time)
# RUN THIS CELL TO HAVE A GOOD TIME
##############################
example_person = X.iloc[[random.randint(0,len(X)-1)]]
if clf.predict(example_person.loc[:,:]) == 1:
display(Image("bad.png"))
example_diff = X_results_avg.iloc[[0]].reset_index().drop("results", axis=1).append(example_person).diff().iloc[1]
weights_and_diff = pd.DataFrame(data=[feat_imp.values], columns=feat_imp.index).append(example_diff, sort=True)
top_10_features = weights_and_diff.iloc[0].multiply(weights_and_diff.iloc[1]).abs().sort_values(ascending=False).head(10)
i = 1
for feat in top_10_features.index:
if feat in responses:
print(F"{i}) {responses[feat]}")
i += 1
else:
display(Image("good.png"))
###Output
_____no_output_____
###Markdown
Cosine Similarity Tests
###Code
AVG_POS = X_results_avg.loc[1, :]
AVG_NEG = X_results_avg.loc[0, :]
def dot(A,B):
return (sum(a*b for a,b in zip(A,B)))
def cosine_similarity(a,b):
return dot(a,b) / (1+( (dot(a,a) **.5) * (dot(b,b) ** .5) ))
def cosine_compare_pos(row):
return cosine_similarity(row, AVG_POS)
def cosine_compare_neg(row):
return cosine_similarity(row, AVG_NEG)
def cosine_ratio_pos(row):
return cosine_similarity(row, AVG_POS) / (cosine_similarity(row, AVG_NEG) + cosine_similarity(row, AVG_POS))
X_results[X_results["results"] == 0].drop("results", axis=1).apply(cosine_compare_neg, axis=1).mean()
X_results[X_results["results"] == 1].drop("results", axis=1).apply(cosine_compare_pos, axis=1).mean()
cosine_similarity(X.loc[10, :], AVG_NEG) / (cosine_similarity(X.loc[10, :], AVG_NEG) + cosine_similarity(X.loc[10, :], AVG_POS))
cos_sims = []
for i in range(len(X)):
example_person = X.loc[i, :]
pos_score = cosine_similarity(example_person, AVG_POS) / (cosine_similarity(example_person, AVG_NEG) + cosine_similarity(example_person, AVG_POS))
# print(pos_score)
cos_sims.append(pos_score)
import numpy as np
print(F" max and min: {max(cos_sims), min(cos_sims)}")
print(F" One standard deviation is: {np.sqrt(np.var(cos_sims))}")
X_results[X_results["results"] == 0].drop("results", axis=1).apply(cosine_ratio_pos, axis=1).mean()
X_results[X_results["results"] == 1].drop("results", axis=1).apply(cosine_ratio_pos, axis=1).mean()
###Output
_____no_output_____ |
Notebook_Archive/FeatureConsistencyScore_2.2-PT18-GetriebeflanschBatch1-2-4.ipynb | ###Markdown
Insert the folder path as **input_dir** where the GAN transformed images with corresponding JSON label are located.
###Code
input_dir = '/mnt/robolab/data/Bilddaten/GAN_train_data_sydavis-ai/Evaluation/BatchSize/Getriebeflansch/Batch4_joint_GF'
output_dir = input_dir+'_mask'
print(output_dir)
!python3 labelme2voc.py $input_dir $output_dir --labels labels.txt
masks_gan = output_dir+'/SegmentationObjectPNG'
###Output
_____no_output_____
###Markdown
3. GAN Image Data 3.1 Prepare Data: Create Folder with binary images
###Code
def binarize(im_path, threshold=10):
"""Read, binarize and save images as png.
Args:
path: A string, path of images.
"""
size=1024
img = Image.open(im_path).convert('L')
img = np.array(img)
#print(img[210,:-50])
# störungen im Bild:
#16 128 148 35 31 143 153 16 128 153 153 153 153 127 15 0 10 116 35
thresh = threshold
Flansch = 89
Abdeckung = 76
Mutter =174
Wandler = 157
im_bool = img > thresh
#im_bool = np.logical_or(img == Wandler, img ==4)
#im_bool = img == Wandler
maxval = 255
im_bin = (img > thresh) * maxval
#save array to images
im_save_bi = Image.fromarray(np.uint8(im_bin))
im_save_bool = Image.fromarray((im_bool))
return im_save_bool
#test GAN Data
masks_gan = masks_gan
masks_gan_save = output_dir+'/binarized'
if not os.path.exists(masks_gan_save):
try:
os.mkdir(masks_gan_save)
except FileExistsError:
print('Folder already exists')
pass
path = os.path.join(masks_gan, '*.png')
files = list(glob.glob(path))
files.sort(reverse=True)
for file in files:
image= binarize(file, threshold=20)
plt.imshow(image)
bbox = image.getbbox()
plt.title(f'Bbox: {bbox} Name: {file[-10:]}')
image.save(os.path.join(masks_gan_save,file[-10:]))
###Output
_____no_output_____
###Markdown
4. Syntetic Image Masks 4.1 Prepare Data: Create Folder with binary images Operation for reading png segmentation masks from folder path, resize, convert to greyscale and save imagesin new folder
###Code
masks_syn = masks_syn_1024
masks_syn_save = masks_syn+'_binarized'
#test Syn Data
if not os.path.exists(masks_syn_save):
try:
os.mkdir(masks_syn_save)
except FileExistsError:
print('Folder already exists')
pass
path = os.path.join(masks_syn, '*.png')
files = list(glob.glob(path))
files.sort(reverse=True)
for file in files:
image = binarize(file, threshold=10)
plt.imshow(image)
bbox = image.getbbox()
plt.title(f'Bbox: {bbox} Name: {file[-18:]}')
image.save(os.path.join(masks_syn_save,file[-18:]))
def loadpolygon():
return
###Output
_____no_output_____
###Markdown
Since True is regarded as 1 and False is regarded as 0, when multiplied by 255 which is the Max value of uint8, True becomes 255 (white) and False becomes 0 (black)
###Code
masks_syn_save_filled = masks_syn_save+'_convex'
if not os.path.exists(masks_syn_save_filled):
try:
os.mkdir(masks_syn_save_filled)
except FileExistsError:
print('Folder already exists')
path = os.path.join(masks_syn_save, '*.png')
files = list(glob.glob(path))
files.sort(reverse=True)
for file in files:
image = cv2.imread(file, cv2.IMREAD_GRAYSCALE)
#print(image.shape, image.dtype)
contour,hierarchy = cv2.findContours(image,cv2.RETR_CCOMP,cv2.CHAIN_APPROX_SIMPLE)
for cnt in contour:
cv2.drawContours(image,[cnt],0,255,-1)
#image = cv2.bitwise_not(image)
image.dtype
plt.imshow(image)
#bbox = image.getbbox()
plt.title(f'Bbox: {bbox} Name: {file[-18:]}')
cv2.imwrite(os.path.join(masks_syn_save_filled,file[-18:]),image)
def calculatescore(ground_truth, prediction_gan):
"""
Compute feature consitency score of two segmentation masks.
IoU(A,B) = |A & B| / (| A U B|)
Dice(A,B) = 2*|A & B| / (|A| + |B|)
Args:
y_true: true masks, one-hot encoded.
y_pred: predicted masks, either softmax outputs, or one-hot encoded.
metric_name: metric to be computed, either 'iou' or 'dice'.
metric_type: one of 'standard' (default), 'soft', 'naive'.
In the standard version, y_pred is one-hot encoded and the mean
is taken only over classes that are present (in y_true or y_pred).
The 'soft' version of the metrics are computed without one-hot
encoding y_pred.
Returns:
IoU of ground truth and GAN transformed syntetic Image, as a float.
Inputs are B*W*H*N tensors, with
B = batch size,
W = width,
H = height,
N = number of classes
"""
# check image shape to be the same
assert ground_truth.shape == prediction_gan.shape, 'Input masks should be same shape, instead are {}, {}'.format(ground_truth.shape, prediction_gan.shape)
#print('Ground truth shape: '+str(ground_truth.shape))
#print('Predicted GAN image shape: '+str(prediction_gan.shape))
intersection = np.logical_and(ground_truth, prediction_gan)
union = np.logical_or(ground_truth, prediction_gan)
mask_sum = np.sum(np.abs(union)) + np.sum(np.abs(intersection))
iou_score = np.sum(intersection) / np.sum(union)
dice_score = 2*np.sum(intersection) / np.sum(mask_sum)
print('IoU is: '+str(iou_score))
print('Dice/F1 Score is: '+str(dice_score))
return iou_score, dice_score
###Output
_____no_output_____
###Markdown
6. Calculate mean IoUTranslate image mask to white RGB(255,255,255), fill convex hull, and compare masks to calculate 'Feature Consistency Score'
###Code
path_syn = masks_syn_save_filled
path_gan = masks_gan_save
print(path_gan)
print(path_syn)
path_syn = os.path.join(path_syn, '*.png')
path_gan = os.path.join(path_gan, '*.png')
files_syn = list(glob.glob(path_syn))
files_gan = list(glob.glob(path_gan))
files_syn.sort(reverse=True)
files_gan.sort(reverse=True)
combined_list = zip(files_syn, files_gan)
z = list(combined_list)
iou_list = []
dice_list = []
for syn, gan in zip(files_syn, files_gan):
img_syn = np.array(Image.open(syn))
img_gan = np.array(Image.open(gan))
print(f'Image name: {syn[-9:]}')
iou, dice = calculatescore(img_syn, img_gan)
print('\n')
iou_list.append(iou)
dice_list.append(dice)
mean_iou = np.mean(iou_list)
mean_dice = np.mean(dice_list)
print(f'Mean IoU is: {mean_iou}')
print(f'{iou_list}\n')
print(f'Mean Dice score is: {mean_dice}')
print(dice_list)
import sys
base_dir = input_dir
prefix = 'batch1'
score_name = prefix+'_score.txt'
path = os.path.join(base_dir,score_name)
if not os.path.exists(path):
try:
os.mknod(path)
except FileExistsError:
print('Folder already exists')
pass
original_stdout = sys.stdout # Save a reference to the original standard output
with open(path, 'w') as f:
sys.stdout = f # Change the standard output to the file we created.
iou_list = []
dice_list = []
print(f'Consistency Metrics for {prefix}:\n')
for syn, gan in zip(files_syn, files_gan):
img_syn = np.array(Image.open(syn))
img_gan = np.array(Image.open(gan))
print(f'Image name: {syn[-9:]}')
iou, dice = calculatescore(img_syn, img_gan)
print('\n')
iou_list.append(iou)
dice_list.append(dice)
mean_iou = np.mean(iou_list)
mean_dice = np.mean(dice_list)
print(f'Mean IoU is: {mean_iou}')
print(f'{iou_list}\n')
print(f'Mean Dice score is: {mean_dice}')
print(dice_list)
sys.stdout = original_stdout # Reset the standard output to its original value
f.close()
#overlapping of 2 masks
#Image.blend()
###Output
_____no_output_____
###Markdown
Notebook for calculating Mask Consistency Score for GAN-transformed images
###Code
from PIL import Image
import cv2
from matplotlib import pyplot as plt
import tensorflow as tf
import glob, os
import numpy as np
import sys
import matplotlib.image as mpimg
#from keras.preprocessing.image import img_to_array, array_to_img
###Output
_____no_output_____
###Markdown
1. Resize GAN-transformed Dataset to 1024*1024 1.1 Specify Args: Directory, folder name and the new image size
###Code
dir = '/mnt/robolab/data/Bilddaten/GAN_train_data_sydavis-ai/Powertrain18_all/Results/Batch2_100ep_1600trainA_256/samples_testing_Getriebehalter'
###Output
_____no_output_____
###Markdown
1.2 Create new Folder "/A2B_FID_1024" in Directory
###Code
folder = 'A2B_FID'
image_size = 1024
old_folder = (os.path.join(dir, folder))
new_folder = (os.path.join(dir, folder+'_'+str(image_size)))
if not os.path.exists(new_folder):
try:
os.mkdir(new_folder)
except FileExistsError:
print('Folder already exists')
pass
print(os.path.join(old_folder))
print(os.path.join(dir, folder+'_'+str(image_size)))
###Output
/mnt/robolab/data/Bilddaten/GAN_train_data_sydavis-ai/Powertrain18_all/Results/Batch2_100ep_1600trainA_256/samples_testing_Getriebehalter/A2B_FID
/mnt/robolab/data/Bilddaten/GAN_train_data_sydavis-ai/Powertrain18_all/Results/Batch2_100ep_1600trainA_256/samples_testing_Getriebehalter/A2B_FID_1024
###Markdown
1.3 Function for upsampling images of 256-256 or 512-512 to images with size 1024-1024
###Code
def resize_upsampling(old_folder, new_folder, size):
dim = (size, size)
for image in os.listdir(old_folder):
img = cv2.imread(os.path.join(old_folder, image))
# INTER_CUBIC or INTER_LANCZOS4
img_resized = cv2.resize(img, dim, interpolation = cv2.INTER_LANCZOS4)
print('Shape: '+str(img.shape)+' is now resized to: '+str(img_resized.shape))
cv2.imwrite(os.path.join(new_folder , image),img_resized)
def resize_downsampling(old_folder, new_folder, size):
dim = (size, size)
for image in os.listdir(old_folder):
img = cv2.imread(os.path.join(old_folder, image))
img_resized = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)
print('Shape: '+str(img.shape)+' is now resized to: '+str(img_resized.shape))
cv2.imwrite(os.path.join(new_folder , image),img_resized)
###Output
_____no_output_____
###Markdown
1.4 Run the aforementoined function
###Code
resize_upsampling(old_folder, new_folder, 1024)
###Output
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)
###Markdown
Resize the syntetic image masks to 1024-1024
###Code
dir2 = '/mnt/robolab/data/Bilddaten/GAN_train_data_sydavis-ai/Evaluation/BatchSize/Getriebeflansch'
folder = 'SegmentationMasks'
size = 1024
old_folder = (os.path.join(dir2, folder))
masks_syn_1024 = (os.path.join(dir2, folder+'_'+str(size)))
if not os.path.exists(masks_syn_1024):
try:
os.mkdir(masks_syn_1024)
except FileExistsError:
print('Folder already exists')
pass
resize_downsampling(old_folder, masks_syn_1024, size)
###Output
Shape: (1080, 1920, 3) is now resized to: (1024, 1024, 3)
Shape: (1080, 1920, 3) is now resized to: (1024, 1024, 3)
Shape: (1080, 1920, 3) is now resized to: (1024, 1024, 3)
Shape: (1080, 1920, 3) is now resized to: (1024, 1024, 3)
Shape: (1080, 1920, 3) is now resized to: (1024, 1024, 3)
Shape: (1080, 1920, 3) is now resized to: (1024, 1024, 3)
Shape: (1080, 1920, 3) is now resized to: (1024, 1024, 3)
Shape: (1080, 1920, 3) is now resized to: (1024, 1024, 3)
Shape: (1080, 1920, 3) is now resized to: (1024, 1024, 3)
Shape: (1080, 1920, 3) is now resized to: (1024, 1024, 3)
Shape: (1080, 1920, 3) is now resized to: (1024, 1024, 3)
Shape: (1080, 1920, 3) is now resized to: (1024, 1024, 3)
Shape: (1080, 1920, 3) is now resized to: (1024, 1024, 3)
Shape: (1080, 1920, 3) is now resized to: (1024, 1024, 3)
Shape: (1080, 1920, 3) is now resized to: (1024, 1024, 3)
Shape: (1080, 1920, 3) is now resized to: (1024, 1024, 3)
Shape: (1080, 1920, 3) is now resized to: (1024, 1024, 3)
Shape: (1080, 1920, 3) is now resized to: (1024, 1024, 3)
Shape: (1080, 1920, 3) is now resized to: (1024, 1024, 3)
Shape: (1080, 1920, 3) is now resized to: (1024, 1024, 3)
###Markdown
2. Use the annotation Tool Labelme to create polygons for GAN Images in JSON format We than use the JSON files with polygon data to create semantic segmentation mask - no instance segmentation needed, because we do not need to differenciate between distinct features. We use the bash and python skript in this directory to do the mask translation.
###Code
!ls
!pwd
###Output
augmentation.py
data.py
datasets
download_dataset.sh
FeatureConsistencyScore_2.0-BlattfederBatch1.ipynb
FeatureConsistencyScore_2.0-BlattfederBatch2.ipynb
FeatureConsistencyScore_2.0-BlattfederBatch4.ipynb
FeatureConsistencyScore_2.0-EntluefterBatch1.ipynb
FeatureConsistencyScore_2.0-EntluefterBatch2.ipynb
FeatureConsistencyScore_2.0.ipynb
FeatureConsistencyScore_2.1-EntluefterBatch4.ipynb
FeatureConsistencyScore_2.1-GetriebeflanschBatch1.ipynb
FeatureConsistencyScore_2.2-GetriebeflanschBatch1.ipynb
FeatureConsistencyScore_2.2-GetriebeflanschBatch2.ipynb
FeatureConsistencyScore_2.2-GetriebeflanschBatch4.ipynb
FeatureConsistencyScore_2.2-PT18-BlattfederBatch1-2-4.ipynb
FeatureConsistencyScore_2.2-PT18-EntluefterBatch1-2-4.ipynb
FeatureConsistencyScore_2.2-PT18-GetriebeflanschBatch1-2-4.ipynb
FeatureConsistencyScore_2.2-PT18-WandlerhalterBatch1-2-4.ipynb
FeatureConsistencyScore_2.2-WandlerhalterBatch1.ipynb
FeatureConsistencyScore_2.2-WandlerhalterBatch2.ipynb
FeatureConsistencyScore_2.2-WandlerhalterBatch4.ipynb
fid.py
filename.txt
imlib
interpolation.py
labelme2coco.py
labelme2voc.py
labels.txt
LICENSE
mask-score.ipynb
module.py
Notebook_Archive
output
path
__pycache__
pylib
README.md
resize_images_pascalvoc
test.py
tf2gan
tf2lib
train.py
/home/molu1019/workspace/CycleGAN-Tensorflow-2
|
notebooks/welter_issue002-01_Spot_Check_the_Pipeline_Spectra.ipynb | ###Markdown
Welter issue 2 Spot Check the Pipeline Spectra Notebook 01Michael Gully-Santiago Wednesday, November 25, 2015 We will make plots of the pipeline spectra.
###Code
import warnings
warnings.filterwarnings("ignore")
import numpy as np
from astropy.io import fits
import matplotlib.pyplot as plt
% matplotlib inline
% config InlineBackend.figure_format = 'retina'
import seaborn as sns
sns.set_context('notebook')
###Output
_____no_output_____
###Markdown
Raw standard star spectrum: `20151117/SDCH_20151117_0199.spec.fits`Read in the `.fits` files. The `.spec.` are the 1D spectra.
###Code
hdu_raw = fits.open('../data/raw/LkCa4_gully/outdata/20151117/SDCH_20151117_0199.spec.fits')
hdu_raw.info()
###Output
Filename: ../data/raw/LkCa4_gully/outdata/20151117/SDCH_20151117_0199.spec.fits
No. Name Type Cards Dimensions Format
0 PRIMARY PrimaryHDU 182 (2048, 28) float32
1 ImageHDU 87 (2048, 28) float64
###Markdown
Header/Data Unit 0 is the $N_{pix} \times N_{orders}$ **spectrum**. Header/Data Unit 1 is the $N_{pix} \times N_{orders}$ **wavelength solution**. The **metadata** about the observations are saved in the header of the spectrum.
###Code
#np.array(list(hdu[0].header.keys()))[0:40]
hdr = hdu_raw[0].header
string = 'This spectrum is of the source {OBJECT}.\n The object type is listed as: "{OBJTYPE}".\n\
The spectra were acquired at {ACQTIME1} UTC. \n The units of the raw spectrum are {UNITS}. \n\
The exposure time was {EXPTIME} seconds. \n The airmass was {AMSTART}.'
formatted_string = string.format(ACQTIME1=hdr['ACQTIME1'], UNITS=hdr['UNITS'], EXPTIME=hdr['EXPTIME'],
OBJECT=hdr['OBJECT'], AMSTART=hdr['AMSTART'], OBJTYPE=hdr['OBJTYPE'])
print(formatted_string)
###Output
This spectrum is of the source HR 1237.
The object type is listed as: "STD".
The spectra were acquired at 2015-11-18-08:39:48.860 UTC.
The units of the raw spectrum are ADUs.
The exposure time was 120.00 seconds.
The airmass was 1.0990.
###Markdown
Single order plot.We'll pick a single order and make a plot.
###Code
o=10
plt.plot(hdu_raw[1].data[o, :], hdu_raw[0].data[o, :])
plt.ylim(ymin=0)
plt.xlabel("$\lambda$ ($\mu$m)")
plt.ylabel("Raw signal (ADU)");
###Output
_____no_output_____
###Markdown
...what we really want is the `.spec_flattened.` file. Flattened A0V Star: 20151117/SDCH_20151117_0199.spec_flattened.fits
###Code
hdu_f = fits.open('../data/raw/LkCa4_gully/outdata/20151117/SDCH_20151117_0199.spec_flattened.fits')
hdu_f.info()
###Output
Filename: ../data/raw/LkCa4_gully/outdata/20151117/SDCH_20151117_0199.spec_flattened.fits
No. Name Type Cards Dimensions Format
0 SPEC_FLATTENED PrimaryHDU 182 (2048, 28) float64
###Markdown
The header info for the flattened file is the same as the header for the raw file.
###Code
#hdu_f['SPEC_FLATTENED'].header[0:10]
o=10
plt.plot(hdu_raw[1].data[o, :], hdu_f[0].data[o, :])
plt.ylim(ymin=0)
plt.xlabel("$\lambda$ ($\mu$m)")
plt.ylabel("Normalized signal");
plt.title('{OBJECT} flattened spectrum'.format(OBJECT=hdr['OBJECT']));
###Output
_____no_output_____
###Markdown
Science data file: `SDCH_20151117_0205.spec.fits`
###Code
hdu_tar = fits.open('../data/raw/LkCa4_gully/outdata/20151117/SDCH_20151117_0205.spec.fits')
hdu_tar.info()
hdr = hdu_tar[0].header
string = 'This spectrum is of the source {OBJECT}.\n The object type is listed as: "{OBJTYPE}".\n\
The spectra were acquired at {ACQTIME1} UTC. \n The units of the raw spectrum are {UNITS}. \n\
The exposure time was {EXPTIME} seconds. \n The airmass was {AMSTART}.'
formatted_string = string.format(ACQTIME1=hdr['ACQTIME1'], UNITS=hdr['UNITS'], EXPTIME=hdr['EXPTIME'],
OBJECT=hdr['OBJECT'], AMSTART=hdr['AMSTART'], OBJTYPE=hdr['OBJTYPE'])
print(formatted_string)
o=10
plt.plot(hdu_tar[1].data[o, :], hdu_tar[0].data[o, :])
plt.ylim(ymin=0)
plt.xlabel("$\lambda$ ($\mu$m)")
plt.ylabel("Raw signal (ADU)");
plt.title('{OBJECT} raw spectrum'.format(OBJECT=hdr['OBJECT']));
###Output
_____no_output_____ |
notebooks/00 - Build reference.ipynb | ###Markdown
Download and extract `hg19` assembly
###Code
ls -lah ../ref
%%bash
wget ftp://igenome:[email protected]/Homo_sapiens/UCSC/hg19/Homo_sapiens_UCSC_hg19.tar.gz \
--directory-prefix=../ref
tar -xzvf ../ref/Homo_sapiens_UCSC_hg19.tar.gz -C ../ref
rm ../ref/Homo_sapiens_UCSC_hg19.tar.gz
###Output
Homo_sapiens/UCSC/hg19/
Homo_sapiens/UCSC/hg19/Annotation/
Homo_sapiens/UCSC/hg19/Annotation/Genes
Homo_sapiens/UCSC/hg19/Annotation/README.txt
Homo_sapiens/UCSC/hg19/Annotation/SmallRNA
Homo_sapiens/UCSC/hg19/Annotation/Variation
Homo_sapiens/UCSC/hg19/Annotation/Archives/
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2011-08-30-21-45-18/
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2011-08-30-21-45-18/Genes/
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2011-08-30-21-45-18/Genes/genes.gtf
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2011-08-30-21-45-18/Genes/ChromInfo.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2011-08-30-21-45-18/Genes/refSeqSummary.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2011-08-30-21-45-18/Genes/cytoBand.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2011-08-30-21-45-18/Genes/refFlat.txt.gz
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2011-08-30-21-45-18/Genes/knownGene.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2011-08-30-21-45-18/Genes/knownToRefSeq.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2011-08-30-21-45-18/Genes/refGene.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2011-08-30-21-45-18/Genes/kgXref.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2011-08-30-21-45-18/README.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2011-08-30-21-45-18/SmallRNA/
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2011-08-30-21-45-18/SmallRNA/precursor.fa
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2011-08-30-21-45-18/SmallRNA/mature.fa
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2011-08-30-21-45-18/Variation/
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2011-08-30-21-45-18/Variation/snp131.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2011-08-30-21-45-18/Variation/snp132.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2011-01-27-18-25-49/
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2011-01-27-18-25-49/Genes/
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2011-01-27-18-25-49/Genes/genes.gtf
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2011-01-27-18-25-49/Genes/refMrna.fa
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2011-01-27-18-25-49/Genes/ChromInfo.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2011-01-27-18-25-49/Genes/refSeqSummary.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2011-01-27-18-25-49/Genes/cytoBand.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2011-01-27-18-25-49/Genes/refFlat.txt.gz
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2011-01-27-18-25-49/Genes/knownGene.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2011-01-27-18-25-49/Genes/knownToRefSeq.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2011-01-27-18-25-49/Genes/refGene.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2011-01-27-18-25-49/Genes/kgXref.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2011-01-27-18-25-49/README.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2011-01-27-18-25-49/SmallRNA/
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2011-01-27-18-25-49/SmallRNA/mature.fa
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2011-01-27-18-25-49/SmallRNA/hairpin.fa
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2011-01-27-18-25-49/Variation/
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2011-01-27-18-25-49/Variation/snp130.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2011-01-27-18-25-49/Variation/snp131.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2011-01-27-18-25-49/Variation/snp132.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2013-03-06-11-23-03/
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2013-03-06-11-23-03/Genes/
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2013-03-06-11-23-03/Genes/genes.gtf
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2013-03-06-11-23-03/Genes/ChromInfo.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2013-03-06-11-23-03/Genes/refSeqSummary.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2013-03-06-11-23-03/Genes/cytoBand.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2013-03-06-11-23-03/Genes/refFlat.txt.gz
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2013-03-06-11-23-03/Genes/knownGene.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2013-03-06-11-23-03/Genes/knownToRefSeq.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2013-03-06-11-23-03/Genes/refGene.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2013-03-06-11-23-03/Genes/kgXref.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2013-03-06-11-23-03/README.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2013-03-06-11-23-03/SmallRNA/
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2013-03-06-11-23-03/SmallRNA/precursor.fa
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2013-03-06-11-23-03/SmallRNA/mature.fa
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2013-03-06-11-23-03/Variation/
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2013-03-06-11-23-03/Variation/snp131.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2013-03-06-11-23-03/Variation/snp135.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2013-03-06-11-23-03/Variation/snp137.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2013-03-06-11-23-03/Variation/snp138.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2013-03-06-11-23-03/Variation/snp132.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2012-03-09-03-24-41/
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2012-03-09-03-24-41/Genes/
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2012-03-09-03-24-41/Genes/genes.gtf
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2012-03-09-03-24-41/Genes/ChromInfo.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2012-03-09-03-24-41/Genes/refSeqSummary.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2012-03-09-03-24-41/Genes/cytoBand.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2012-03-09-03-24-41/Genes/refFlat.txt.gz
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2012-03-09-03-24-41/Genes/knownGene.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2012-03-09-03-24-41/Genes/knownToRefSeq.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2012-03-09-03-24-41/Genes/refGene.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2012-03-09-03-24-41/Genes/kgXref.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2012-03-09-03-24-41/README.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2012-03-09-03-24-41/SmallRNA/
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2012-03-09-03-24-41/SmallRNA/precursor.fa
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2012-03-09-03-24-41/SmallRNA/mature.fa
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2012-03-09-03-24-41/Variation/
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2012-03-09-03-24-41/Variation/snp131.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2012-03-09-03-24-41/Variation/snp135.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2012-03-09-03-24-41/Variation/snp132.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2014-06-02-13-47-56/
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2014-06-02-13-47-56/Genes/
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2014-06-02-13-47-56/Genes/genes.gtf
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2014-06-02-13-47-56/Genes/refSeqSummary.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2014-06-02-13-47-56/Genes/cytoBand.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2014-06-02-13-47-56/Genes/refFlat.txt.gz
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2014-06-02-13-47-56/Genes/knownGene.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2014-06-02-13-47-56/Genes/knownToRefSeq.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2014-06-02-13-47-56/Genes/refGene.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2014-06-02-13-47-56/Genes/kgXref.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2014-06-02-13-47-56/README.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2014-06-02-13-47-56/SmallRNA/
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2014-06-02-13-47-56/SmallRNA/mature.fa
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2014-06-02-13-47-56/SmallRNA/hairpin.fa
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2014-06-02-13-47-56/Variation/
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2014-06-02-13-47-56/Variation/snp142.txt.idx
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2014-06-02-13-47-56/Variation/snp142.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2014-06-02-13-47-56/Variation/snp142.vcf
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2014-06-02-13-47-56/Variation/snp135.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2014-06-02-13-47-56/Variation/snp137.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2014-06-02-13-47-56/Variation/snp138.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2015-07-17-14-32-32/
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2015-07-17-14-32-32/Genes/
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2015-07-17-14-32-32/Genes/genes.gtf
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2015-07-17-14-32-32/Genes/refSeqSummary.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2015-07-17-14-32-32/Genes/cytoBand.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2015-07-17-14-32-32/Genes/refFlat.txt.gz
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2015-07-17-14-32-32/Genes/knownGene.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2015-07-17-14-32-32/Genes/knownToRefSeq.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2015-07-17-14-32-32/Genes/refGene.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2015-07-17-14-32-32/Genes/kgXref.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2015-07-17-14-32-32/README.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2015-07-17-14-32-32/SmallRNA/
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2015-07-17-14-32-32/SmallRNA/mature.fa
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2015-07-17-14-32-32/SmallRNA/hairpin.fa
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2015-07-17-14-32-32/Variation/
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2015-07-17-14-32-32/Variation/snp142.txt.idx
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2015-07-17-14-32-32/Variation/snp142.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2015-07-17-14-32-32/Variation/snp142.vcf
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2015-07-17-14-32-32/Variation/snp135.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2015-07-17-14-32-32/Variation/snp137.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2015-07-17-14-32-32/Variation/snp138.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-current
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2010-09-27-22-25-17/
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2010-09-27-22-25-17/splice_sites_49/
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2010-09-27-22-25-17/splice_sites_49/splice_sites-49.fa.2bpb
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2010-09-27-22-25-17/splice_sites_49/exon_coords.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2010-09-27-22-25-17/splice_sites_49/splice_sites-49.fa.vld
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2010-09-27-22-25-17/splice_sites_49/splice_sites-49.fa.idx
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2010-09-27-22-25-17/splice_sites_49/splice_sites-49.fa
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2010-09-27-22-25-17/genes.gtf
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2010-09-27-22-25-17/refMrna.fa
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2010-09-27-22-25-17/DATE.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2010-09-27-22-25-17/ChromInfo.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2010-09-27-22-25-17/refSeqSummary.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2010-09-27-22-25-17/cytoBand.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2010-09-27-22-25-17/refFlat.txt.gz
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2010-09-27-22-25-17/splice_sites_34/
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2010-09-27-22-25-17/splice_sites_34/splice_sites-34.fa
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2010-09-27-22-25-17/splice_sites_34/splice_sites-34.fa.vld
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2010-09-27-22-25-17/splice_sites_34/exon_coords.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2010-09-27-22-25-17/splice_sites_34/splice_sites-34.fa.idx
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2010-09-27-22-25-17/splice_sites_34/splice_sites-34.fa.2bpb
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2010-09-27-22-25-17/refFlat.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2010-09-27-22-25-17/knownGene.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2010-09-27-22-25-17/knownToRefSeq.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2010-09-27-22-25-17/refGene.txt
Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2010-09-27-22-25-17/kgXref.txt
Homo_sapiens/UCSC/hg19/Sequence/
Homo_sapiens/UCSC/hg19/Sequence/Bowtie2Index/
Homo_sapiens/UCSC/hg19/Sequence/Bowtie2Index/genome.3.bt2
Homo_sapiens/UCSC/hg19/Sequence/Bowtie2Index/genome.1.bt2
Homo_sapiens/UCSC/hg19/Sequence/Bowtie2Index/genome.rev.2.bt2
Homo_sapiens/UCSC/hg19/Sequence/Bowtie2Index/genome.rev.1.bt2
Homo_sapiens/UCSC/hg19/Sequence/Bowtie2Index/genome.4.bt2
Homo_sapiens/UCSC/hg19/Sequence/Bowtie2Index/genome.fa.fai
Homo_sapiens/UCSC/hg19/Sequence/Bowtie2Index/genome.2.bt2
Homo_sapiens/UCSC/hg19/Sequence/Bowtie2Index/genome.fa
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/FM.chrM.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/bwt.chr20.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/SA.chr19.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/SA.chr1.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/SA.chr7.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/bwt.chr16.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/SA.chrM.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/bwt.chr11.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/SA.chr22.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/SA.chr14.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/bwt.chr2.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/FM.chr20.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/SA.chr15.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/FM_profile.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/FM.chr16.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/SA.chr8.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/bwt.chr1.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/bwt.chr4.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/SA.chr21.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/bwt.chr7.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/bwt.chr13.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/FM.chrX.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/SA.chrY.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/bwt.chr22.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/FM.chr4.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/bwt.chr21.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/bwt.chrY.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/FM.chr21.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/bwt.chr9.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/bwt.chr6.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/FM.chr3.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/sno.txt
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/SA.chr12.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/SA.chr13.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/FM.chr17.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/FM.chr14.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/FM.chr15.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/FM.chr9.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/bwt.chr3.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/SA.chr6.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/tRNA.txt
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/FM.chr19.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/SA.chr11.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/FM.chr6.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/FM.chr18.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/SA.chr9.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/bwt.chr17.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/bwt.chr5.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/bwt.chr18.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/SA.chr4.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/FM.chr8.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/FM.chr2.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/SA.chr2.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/SA.chr18.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/SA.chr17.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/FM.chr5.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/bwt.chr8.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/FM.chr12.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/SA.chr20.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/FM.chr10.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/bwt.chr19.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/bwt.chr12.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/SA.chr5.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/FM.chr13.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/FM.chr1.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/bwt.chr15.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/bwt.chrM.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/SA.chr16.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/SA.chrX.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/miRBase/
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/miRBase/knownMiR.gff3
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/miRBase/mature.fa
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/miRBase/hairpin.fa
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/bwt.chr10.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/FM.chrY.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/bwt.chr14.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/FM.chr22.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/FM.chr7.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/FM.chr11.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/bwt.chrX.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/SA.chr3.idx
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/repeatMasker.txt
Homo_sapiens/UCSC/hg19/Sequence/MDSBowtieIndex/SA.chr10.idx
Homo_sapiens/UCSC/hg19/Sequence/BWAIndex/
Homo_sapiens/UCSC/hg19/Sequence/BWAIndex/genome.fa.amb
Homo_sapiens/UCSC/hg19/Sequence/BWAIndex/genome.fa.sa
Homo_sapiens/UCSC/hg19/Sequence/BWAIndex/version0.5.x/
Homo_sapiens/UCSC/hg19/Sequence/BWAIndex/version0.5.x/genome.fa.amb
Homo_sapiens/UCSC/hg19/Sequence/BWAIndex/version0.5.x/genome.fa.sa
Homo_sapiens/UCSC/hg19/Sequence/BWAIndex/version0.5.x/genome.fa.bwt
Homo_sapiens/UCSC/hg19/Sequence/BWAIndex/version0.5.x/genome.fa.ann
Homo_sapiens/UCSC/hg19/Sequence/BWAIndex/version0.5.x/genome.fa.rbwt
Homo_sapiens/UCSC/hg19/Sequence/BWAIndex/version0.5.x/genome.fa.pac
Homo_sapiens/UCSC/hg19/Sequence/BWAIndex/version0.5.x/genome.fa.rpac
Homo_sapiens/UCSC/hg19/Sequence/BWAIndex/version0.5.x/genome.fa
Homo_sapiens/UCSC/hg19/Sequence/BWAIndex/version0.5.x/genome.fa.rsa
Homo_sapiens/UCSC/hg19/Sequence/BWAIndex/version0.6.0/
Homo_sapiens/UCSC/hg19/Sequence/BWAIndex/version0.6.0/genome.fa.amb
Homo_sapiens/UCSC/hg19/Sequence/BWAIndex/version0.6.0/genome.fa.sa
Homo_sapiens/UCSC/hg19/Sequence/BWAIndex/version0.6.0/genome.fa.bwt
Homo_sapiens/UCSC/hg19/Sequence/BWAIndex/version0.6.0/genome.fa.ann
Homo_sapiens/UCSC/hg19/Sequence/BWAIndex/version0.6.0/genome.fa.pac
Homo_sapiens/UCSC/hg19/Sequence/BWAIndex/version0.6.0/genome.fa
Homo_sapiens/UCSC/hg19/Sequence/BWAIndex/genome.fa.bwt
Homo_sapiens/UCSC/hg19/Sequence/BWAIndex/genome.fa.ann
Homo_sapiens/UCSC/hg19/Sequence/BWAIndex/genome.fa.pac
Homo_sapiens/UCSC/hg19/Sequence/BWAIndex/genome.fa
Homo_sapiens/UCSC/hg19/Sequence/Chromosomes/
Homo_sapiens/UCSC/hg19/Sequence/Chromosomes/chrY.fa
Homo_sapiens/UCSC/hg19/Sequence/Chromosomes/chr21.fa
Homo_sapiens/UCSC/hg19/Sequence/Chromosomes/chr5.fa
Homo_sapiens/UCSC/hg19/Sequence/Chromosomes/chr3.fa
Homo_sapiens/UCSC/hg19/Sequence/Chromosomes/chr2.fa
Homo_sapiens/UCSC/hg19/Sequence/Chromosomes/chr6.fa
Homo_sapiens/UCSC/hg19/Sequence/Chromosomes/chr16.fa
Homo_sapiens/UCSC/hg19/Sequence/Chromosomes/chr20.fa
Homo_sapiens/UCSC/hg19/Sequence/Chromosomes/chr15.fa
Homo_sapiens/UCSC/hg19/Sequence/Chromosomes/chr12.fa
Homo_sapiens/UCSC/hg19/Sequence/Chromosomes/chrM.fa
Homo_sapiens/UCSC/hg19/Sequence/Chromosomes/chr1.fa
Homo_sapiens/UCSC/hg19/Sequence/Chromosomes/chr4.fa
Homo_sapiens/UCSC/hg19/Sequence/Chromosomes/chr9.fa
Homo_sapiens/UCSC/hg19/Sequence/Chromosomes/chr18.fa
Homo_sapiens/UCSC/hg19/Sequence/Chromosomes/chr10.fa
Homo_sapiens/UCSC/hg19/Sequence/Chromosomes/chr22.fa
Homo_sapiens/UCSC/hg19/Sequence/Chromosomes/chr14.fa
Homo_sapiens/UCSC/hg19/Sequence/Chromosomes/chrX.fa
Homo_sapiens/UCSC/hg19/Sequence/Chromosomes/chr11.fa
Homo_sapiens/UCSC/hg19/Sequence/Chromosomes/chr13.fa
Homo_sapiens/UCSC/hg19/Sequence/Chromosomes/chr19.fa
Homo_sapiens/UCSC/hg19/Sequence/Chromosomes/chr8.fa
Homo_sapiens/UCSC/hg19/Sequence/Chromosomes/chr17.fa
Homo_sapiens/UCSC/hg19/Sequence/Chromosomes/chr7.fa
Homo_sapiens/UCSC/hg19/Sequence/BlastDB/
Homo_sapiens/UCSC/hg19/Sequence/BlastDB/genome.fa.nhr
Homo_sapiens/UCSC/hg19/Sequence/BlastDB/genome.fa.nsq
Homo_sapiens/UCSC/hg19/Sequence/BlastDB/genome.fa
Homo_sapiens/UCSC/hg19/Sequence/BlastDB/genome.fa.nsd
Homo_sapiens/UCSC/hg19/Sequence/BlastDB/genome.fa.nsi
Homo_sapiens/UCSC/hg19/Sequence/BlastDB/genome.fa.nin
Homo_sapiens/UCSC/hg19/Sequence/BlastDB/genome.fa.nog
Homo_sapiens/UCSC/hg19/Sequence/BowtieIndex/
Homo_sapiens/UCSC/hg19/Sequence/BowtieIndex/genome.2.ebwt
Homo_sapiens/UCSC/hg19/Sequence/BowtieIndex/genome.rev.2.ebwt
Homo_sapiens/UCSC/hg19/Sequence/BowtieIndex/genome.1.ebwt
Homo_sapiens/UCSC/hg19/Sequence/BowtieIndex/genome.3.ebwt
Homo_sapiens/UCSC/hg19/Sequence/BowtieIndex/genome.rev.1.ebwt
Homo_sapiens/UCSC/hg19/Sequence/BowtieIndex/genome.fa
Homo_sapiens/UCSC/hg19/Sequence/BowtieIndex/genome.4.ebwt
Homo_sapiens/UCSC/hg19/Sequence/WholeGenomeFasta/
Homo_sapiens/UCSC/hg19/Sequence/WholeGenomeFasta/Bisulfite_Genome/
Homo_sapiens/UCSC/hg19/Sequence/WholeGenomeFasta/Bisulfite_Genome/GA_conversion/
Homo_sapiens/UCSC/hg19/Sequence/WholeGenomeFasta/Bisulfite_Genome/GA_conversion/BS_GA.1.bt2
Homo_sapiens/UCSC/hg19/Sequence/WholeGenomeFasta/Bisulfite_Genome/GA_conversion/BS_GA.2.bt2
Homo_sapiens/UCSC/hg19/Sequence/WholeGenomeFasta/Bisulfite_Genome/GA_conversion/genome_mfa.GA_conversion.fa
Homo_sapiens/UCSC/hg19/Sequence/WholeGenomeFasta/Bisulfite_Genome/GA_conversion/BS_GA.3.bt2
Homo_sapiens/UCSC/hg19/Sequence/WholeGenomeFasta/Bisulfite_Genome/GA_conversion/BS_GA.rev.2.bt2
Homo_sapiens/UCSC/hg19/Sequence/WholeGenomeFasta/Bisulfite_Genome/GA_conversion/BS_GA.rev.1.bt2
Homo_sapiens/UCSC/hg19/Sequence/WholeGenomeFasta/Bisulfite_Genome/GA_conversion/BS_GA.4.bt2
Homo_sapiens/UCSC/hg19/Sequence/WholeGenomeFasta/Bisulfite_Genome/CT_conversion/
Homo_sapiens/UCSC/hg19/Sequence/WholeGenomeFasta/Bisulfite_Genome/CT_conversion/genome_mfa.CT_conversion.fa
Homo_sapiens/UCSC/hg19/Sequence/WholeGenomeFasta/Bisulfite_Genome/CT_conversion/BS_CT.rev.2.bt2
Homo_sapiens/UCSC/hg19/Sequence/WholeGenomeFasta/Bisulfite_Genome/CT_conversion/BS_CT.4.bt2
Homo_sapiens/UCSC/hg19/Sequence/WholeGenomeFasta/Bisulfite_Genome/CT_conversion/BS_CT.2.bt2
Homo_sapiens/UCSC/hg19/Sequence/WholeGenomeFasta/Bisulfite_Genome/CT_conversion/BS_CT.rev.1.bt2
Homo_sapiens/UCSC/hg19/Sequence/WholeGenomeFasta/Bisulfite_Genome/CT_conversion/BS_CT.3.bt2
Homo_sapiens/UCSC/hg19/Sequence/WholeGenomeFasta/Bisulfite_Genome/CT_conversion/BS_CT.1.bt2
Homo_sapiens/UCSC/hg19/Sequence/WholeGenomeFasta/GenomeSize.xml
Homo_sapiens/UCSC/hg19/Sequence/WholeGenomeFasta/genome.dict
Homo_sapiens/UCSC/hg19/Sequence/WholeGenomeFasta/genome.fa.fai
Homo_sapiens/UCSC/hg19/Sequence/WholeGenomeFasta/genome.fa
Homo_sapiens/UCSC/hg19/Sequence/AbundantSequences/
Homo_sapiens/UCSC/hg19/Sequence/AbundantSequences/polyA.fa
Homo_sapiens/UCSC/hg19/Sequence/AbundantSequences/hum5SrDNA.fa
Homo_sapiens/UCSC/hg19/Sequence/AbundantSequences/chrM.fa
Homo_sapiens/UCSC/hg19/Sequence/AbundantSequences/polyC.fa
Homo_sapiens/UCSC/hg19/Sequence/AbundantSequences/humRibosomal.fa
Homo_sapiens/UCSC/hg19/Sequence/AbundantSequences/adapter_contam1.fa
Homo_sapiens/UCSC/hg19/Sequence/AbundantSequences/phix.fa
README.txt
###Markdown
Build `GTF` dataframe from the `lncRNA` annotation file
###Code
def gtf_df(filename):
res = []
with open(filename, 'rt') as fi:
for line in fi:
fields = line.strip().split('\t')
if fields[2] == 'exon':
rec = {}
idfields = fields[8].strip().split(';')
for idfield in idfields:
if idfield:
key, val = idfield.split()
if key == 'transcript_id' or key == 'exon_number':
rec.update({key: val.strip('"')})
rec.update({'chr': fields[0],
'start': int(fields[3]),
'end': int(fields[4])})
res.append(rec)
return pd.DataFrame.from_records(res)
gtf = gtf_df('../ref/lncRNA.gtf')
gtf
###Output
_____no_output_____
###Markdown
Extract the sequence of the locus annotated in `lncRNA.gtf` plus 500 bp on each side
###Code
parser = parse_fasta('../ref/Homo_sapiens/UCSC/hg19/Sequence/Chromosomes/chr2.fa')
_, chr2 = next(parser)
def get_seqs(rec):
return chr2[rec.start:rec.end]
gtf['sequence'] = gtf.apply(get_seqs, axis=1)
gtf['seq_length'] = gtf['end'] - gtf['start']
gtf
fa_tpl = '>{}'
with open('../ref/ref_locus.fa', 'wt') as fo:
header = fa_tpl.format('lnrCXCR4')
fo.write('{}\n{}\n'.format(header, chr2[gtf.start.min()-500:gtf.end.max()+500]))
!head ../ref/ref_locus.fa
###Output
>lnrCXCR4
AGGAGTTTCCAGGTGACCCCTGGAAGTCCCAGTGCATTGCAGTCTTAGCACATTGCTCgagaaggtgagggagaagaagagagaaatgaaagaaaatttccagatgaagaaaagacaggaaagacagaggaagaaaggagggagggagattgaataaaagaaagagggagaaggtgaagaaggaaagagagagagagaATATATATAACGCTTTTAGGTGTTACCTTTGATCAGGGCGATTGACCAAGGTCAGCTTTCTTCAACGTGTATTCAGAGGAGGGCTCATGTCCTATAAGGTATTCATTGGTGTTTTACGGGGGAAATTTTTAAAAAGTGGGGCAGGGAAATCCACTGGTCCCACCCATTTGGGAAGTGTTTGgttcagcaggtttctctggtgtagctcctctcagagcctttcgtaaactggagtgcattatggagctccaagatggggccatagtatacaatttctccttacattatttTATTGAGATATTGTTTATTCAAGGACAAGCAGTCTGAGAAATGGAGTTTTTGAAATAATGATCCAGGCCTTTCCTGCAACACTGAGCTGTTTCTTTCCTTTTCTTTTTTAACCATGCAACAAAACCTTTATTAGCATTTTGAACAGGTTCAGCTATTACTGAAACTTGTAATTTCTAAACTTAAGTTGGGGCAAATGGCTATACGGCAGAGTAATGCCATCACTGGGCACTGCGAATGCAAGACTGGAGAATTAACAGCCACCCCTCAGGTGCAGGACCAGGTGCAGGGTTGACTCTTTCTGGATGTTGTAGTCAGAAAGAGTGCGGCCATCTTCCAGCTGCTTGCCTGCAAAGATGAGCCTCTGCTGGTCGGGGCTGGGGGTGGGGGGGTGCCTTCTTTATCCTGGATCTTGGCCTTCACATTTTCCATGGTGTCACTGGGCTCCACTTCCAGGGTGATGGTCTTGCCAGTCAGGGTCTTCACGAAGATCTGCATACCACCTCTCAGACACAGGACCAGGTGCAGGGTCTACTCTTTCTGGATGTTATAGTCAGAATGAGTGCAGTCATCTTCCACCTGCTTGACTGCAAAGATGAGCCTCTGCTGGTCCGGGGTAATGCCTTCCTTATCCTGGATCTTGGCCTTCACATTTTCGATGGTGTCCCTGGGCTCCACTTCAAGGGCAATGGTCTTGCTGGTAAGGGTCTTCACGAAGATCTGCATTTTGACCTGTTAGCGGATATGACGAGGCTCCGAAACACCAGTCATGTCCAGCCACAGGGACACCACCACATACTCACCCAACAAAGCCAGTCATCCCTACCACTGAGCTATTTCTATGCGAGTTCTTCCCTTGGCCCTTAAGCTGGGATAAATCCCTGTCTTCATGCAAAGTTAGAGACATGATTAGATACAAGATCTACAATATTTGTGGATAAAAACCAAACAGTTCCTTAAGAAAACTACAACTATTTTTTTTGGCTGACACCAGAGTGAAATTTCCCCCATTTATCCCCCATCAGCCTTTGGTAGGAGCACAAAAGCTACGTGGCAGGGCACATTCCAGCACCATGCCCATGACACCAACTCTCGTTCATTCATTCCTTGACGTATTTACATTCAAACTCCGTCCTCGTTTGCTGCTGTGCTGCTGGTTCTGGCTCCAAGCACttctttccttcttttttttttgagacaaagtctcgctgtcacccaggctggagtgcagtggcgtgatctcagttcactgcaacctccgcctcctgggttcaagcgattctcctgtctcagcttcccgaatagctgggagtgggccaccacacctggctaatttttgtatttttagtagagagggagccatgttagccaggctggtcttgaactcctaacctcaggtgatccacccgccttggcctcccaaagtgctgggattacaggcttgagtcatcacacctggccTCCAAGCACTTCTTACTCTGTCCTCAGACTTACGTGCTCATGCCTGACTCCCATATCTTCAAAGTTGAAAATGTTCTGATTTGTTTTCTCG
###Markdown
Build `bowtie2` index for the locus reference
###Code
%%bash
bowtie2-build ../ref/ref_locus.fa ../ref/lncRNA_locus
###Output
Settings:
Output files: "../ref/lncRNA_locus.*.bt2"
Line rate: 6 (line is 64 bytes)
Lines per side: 1 (side is 64 bytes)
Offset rate: 4 (one in 16)
FTable chars: 10
Strings: unpacked
Max bucket size: default
Max bucket size, sqrt multiplier: default
Max bucket size, len divisor: 4
Difference-cover sample period: 1024
Endianness: little
Actual local endianness: little
Sanity checking: disabled
Assertions: disabled
Random seed: 0
Sizeofs: void*:8, int:4, long:8, size_t:8
Input files DNA, FASTA:
../ref/ref_locus.fa
Reading reference sizes
Time reading reference sizes: 00:00:00
Calculating joined length
Writing header
Reserving space for joined string
Joining reference sequences
Time to join reference sequences: 00:00:00
bmax according to bmaxDivN setting: 509
Using parameters --bmax 382 --dcv 1024
Doing ahead-of-time memory usage test
Passed! Constructing with these parameters: --bmax 382 --dcv 1024
Constructing suffix-array element generator
Building DifferenceCoverSample
Building sPrime
Building sPrimeOrder
V-Sorting samples
V-Sorting samples time: 00:00:00
Allocating rank array
Ranking v-sort output
Ranking v-sort output time: 00:00:00
Invoking Larsson-Sadakane on ranks
Invoking Larsson-Sadakane on ranks time: 00:00:00
Sanity-checking and returning
Building samples
Reserving space for 12 sample suffixes
Generating random suffixes
QSorting 12 sample offsets, eliminating duplicates
QSorting sample offsets, eliminating duplicates time: 00:00:00
Multikey QSorting 12 samples
(Using difference cover)
Multikey QSorting samples time: 00:00:00
Calculating bucket sizes
Splitting and merging
Splitting and merging time: 00:00:00
Avg bucket size: 2038 (target: 381)
Converting suffix-array elements to index image
Allocating ftab, absorbFtab
Entering Ebwt loop
Getting block 1 of 1
No samples; assembling all-inclusive block
Sorting block of length 2038 for bucket 1
(Using difference cover)
Sorting block time: 00:00:00
Returning block of 2039 for bucket 1
Exited Ebwt loop
fchr[A]: 0
fchr[C]: 507
fchr[G]: 982
fchr[T]: 1466
fchr[$]: 2038
Exiting Ebwt::buildToDisk()
Returning from initFromVector
Wrote 4195178 bytes to primary EBWT file: ../ref/lncRNA_locus.1.bt2
Wrote 516 bytes to secondary EBWT file: ../ref/lncRNA_locus.2.bt2
Re-opening _in1 and _in2 as input streams
Returning from Ebwt constructor
Headers:
len: 2038
bwtLen: 2039
sz: 510
bwtSz: 510
lineRate: 6
offRate: 4
offMask: 0xfffffff0
ftabChars: 10
eftabLen: 20
eftabSz: 80
ftabLen: 1048577
ftabSz: 4194308
offsLen: 128
offsSz: 512
lineSz: 64
sideSz: 64
sideBwtSz: 48
sideBwtLen: 192
numSides: 11
numLines: 11
ebwtTotLen: 704
ebwtTotSz: 704
color: 0
reverse: 0
Total time for call to driver() for forward index: 00:00:00
Reading reference sizes
Time reading reference sizes: 00:00:00
Calculating joined length
Writing header
Reserving space for joined string
Joining reference sequences
Time to join reference sequences: 00:00:00
Time to reverse reference sequence: 00:00:00
bmax according to bmaxDivN setting: 509
Using parameters --bmax 382 --dcv 1024
Doing ahead-of-time memory usage test
Passed! Constructing with these parameters: --bmax 382 --dcv 1024
Constructing suffix-array element generator
Building DifferenceCoverSample
Building sPrime
Building sPrimeOrder
V-Sorting samples
V-Sorting samples time: 00:00:00
Allocating rank array
Ranking v-sort output
Ranking v-sort output time: 00:00:00
Invoking Larsson-Sadakane on ranks
Invoking Larsson-Sadakane on ranks time: 00:00:00
Sanity-checking and returning
Building samples
Reserving space for 12 sample suffixes
Generating random suffixes
QSorting 12 sample offsets, eliminating duplicates
QSorting sample offsets, eliminating duplicates time: 00:00:00
Multikey QSorting 12 samples
(Using difference cover)
Multikey QSorting samples time: 00:00:00
Calculating bucket sizes
Splitting and merging
Splitting and merging time: 00:00:00
Avg bucket size: 2038 (target: 381)
Converting suffix-array elements to index image
Allocating ftab, absorbFtab
Entering Ebwt loop
Getting block 1 of 1
No samples; assembling all-inclusive block
Sorting block of length 2038 for bucket 1
(Using difference cover)
Sorting block time: 00:00:00
Returning block of 2039 for bucket 1
Exited Ebwt loop
fchr[A]: 0
fchr[C]: 507
fchr[G]: 982
fchr[T]: 1466
fchr[$]: 2038
Exiting Ebwt::buildToDisk()
Returning from initFromVector
Wrote 4195178 bytes to primary EBWT file: ../ref/lncRNA_locus.rev.1.bt2
Wrote 516 bytes to secondary EBWT file: ../ref/lncRNA_locus.rev.2.bt2
Re-opening _in1 and _in2 as input streams
Returning from Ebwt constructor
Headers:
len: 2038
bwtLen: 2039
sz: 510
bwtSz: 510
lineRate: 6
offRate: 4
offMask: 0xfffffff0
ftabChars: 10
eftabLen: 20
eftabSz: 80
ftabLen: 1048577
ftabSz: 4194308
offsLen: 128
offsSz: 512
lineSz: 64
sideSz: 64
sideBwtSz: 48
sideBwtLen: 192
numSides: 11
numLines: 11
ebwtTotLen: 704
ebwtTotSz: 704
color: 0
reverse: 1
Total time for backward call to driver() for mirror index: 00:00:00
|
PHYS2211.Measurement.ipynb | ###Markdown
PHYS 2211 - Introductory Physics Laboratory I Measurement andError Propagation Name: Tatiana Krivosheev Partners: Oleg Krivosheev Annex A
###Code
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import sympy
%matplotlib inline
###Output
_____no_output_____
###Markdown
Annex A - Data and Calculations 1. Rectangular Block
###Code
class ListTable(list):
""" Overridden list class which takes a 2-dimensional list of
the form [[1,2,3],[4,5,6]], and renders an HTML Table in
IPython Notebook. """
def _repr_html_(self):
html = ["<table>"]
for row in self:
html.append("<tr>")
for col in row:
html.append("<td>{0}</td>".format(col))
html.append("</tr>")
html.append("</table>")
return ''.join(html)
# plain text
plt.title('alpha > beta')
# math text
plt.title(r'$\alpha > \beta$')
from sympy import symbols, init_printing
init_printing(use_latex=True)
delta = symbols('delta')
delta**2/3
from sympy import symbols, init_printing
init_printing(use_latex=True)
delta = symbols('delta')
table = ListTable()
table.append(['measuring device', ' ', 'delta', 'w', 'delta w', 'h', 'delta h'])
table.append([' ', '(cm)', '(cm)', '(cm)','(cm)', '(cm)', '(cm)'])
lr=4.9
wr=2.5
hr=1.2
lc=4.90
wc=2.54
hc=1.27
deltar=0.1
deltac=0.01
table.append(['ruler',lr, deltar, wr, deltar, hr, deltar])
table.append(['vernier caliper', lc, deltac, wc, deltac, hc, deltac])
table
s(t) = \mathcal{A}\/\sin(2 \omega t)
table = ListTable()
table.append(['l', 'deltal', 'w', 'deltaw', 'h', 'deltah'])
table.append(['(cm)', '(cm)', '(cm)','(cm)', '(cm)', '(cm)'])
lr=4.9
wr=2.5
hr=1.2
lc=4.90
wc=2.54
hc=1.27
deltar=0.1
deltac=0.01
for i in range(0,len(x)):
xx = x[i]
yy = y[i]
ttable.append([lr, deltar, wr, deltar, hr, deltar])able.append([lr, deltar, wr, deltar, hr, deltar])
table
# code below demonstrates...
import numpy as np
x = [7,10,15,20,25,30,35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95]
y= [0.228,0.298,0.441,0.568,0.697,0.826,0.956, 1.084, 1.211, 1.339,1.468, 1.599, 1.728, 1.851, 1.982, 2.115, 2.244, 2.375, 2.502]
plt.scatter(x, y)
plt.title('Linearity test')
plt. xlabel('Length (cm)')
plt. ylabel('Voltage (V)')
fit = np.polyfit(x,y,1)
fit_fn = np.poly1d(fit)
plt.plot(x,y, 'yo', x, fit_fn(x), '--k')
m,b = np.polyfit(x, y, 1)
print ('m={0}'.format(m))
print ('b={0}'.format(b))
plt.show()
###Output
m=0.0258164673413
b=0.0491959521619
###Markdown
2. Wheatstone bridge measurements
###Code
Rk = 3.5 # kOhms
table = ListTable()
table.append(['Ru', 'Ru, acc', 'L1', 'L2', 'Ru, wheatstone', 'Disc'])
table.append(['(kOhms)', '(kOhms)', '(cm)', '(cm)', '(kOhms)', ' % '])
x = [0.470,0.680,1.000, 1.500]
y= [0.512,0.712,1.131,1.590]
z= [88.65, 84.50, 76.90, 69.80]
for i in range(0,len(x)):
xx = x[i]
yy = y[i]
zz = z[i]
Rw = (100.0 - zz)/zz*Rk
Disc = (Rw-yy)/yy*100.0
table.append([xx, yy, zz, 100.0-zz,Rw, Disc])
table
x = [0.470,0.680,1.000, 1.500]
y= [0.512,0.712,1.131,1.590]
z= [88.65, 84.50, 76.90, 69.80]
for i in range(0,len(x)):
xx = x[i]
yy = y[i]
zz = z[i]
Rw = (100.0 - zz)/zz*Rk
Disc = (Rw-yy)/yy*100.0
plt.scatter(yy, Disc)
plt.title('Discrepancy vs Resistance')
plt. xlabel('Resistance (kOhms)')
plt. ylabel('Discrepancy (%)')
plt.show()
###Output
_____no_output_____ |
experiment_CLEO.ipynb | ###Markdown
**How to save this notebook to your personal Drive**To copy this notebook to your Google Drive, go to File and select "Save a copy in Drive", where it will automatically open the copy in a new tab for you to work in. This notebook will be saved into a folder on your personal Drive called "Colab Notebooks".Still stumped? Check out this video for help What is CLEO?
###Code
from IPython.display import Image
Image(url='https://raw.githubusercontent.com/particle-physics-playground/playground/master/activities/images/cleo_det_proc.jpg',width=400)
###Output
_____no_output_____
###Markdown
$$e^+e^- \rightarrow \chi \chi$$ The CLEO-II detector was designed to measure the properties of particles produced in the collisions of electrons and positrons supplied by CESR. The CLEO-II detector was made of many sub-detectors. When the particles are created in each electron-positron collision, they fly through these detectors and we are able to measure the direction in which all these particles went.
###Code
from IPython.display import Image
Image(url='https://raw.githubusercontent.com/particle-physics-playground/playground/master/activities/images/kpipi_color_enhanced-resized.png',width=400)
###Output
_____no_output_____
###Markdown
Displays like the one above can be difficult to understand, but they are not what we physicists actually analyze. Instead, we use the displays to get information about the electric charge, energy, and momentum of the particles, and that is the data we use.Let's go take a look at some of that data! The first step is to import some helper functions. One is to get the collisions data out of the files, and the other is to display the particles that are produced in these collisions.
###Code
###### This cell need only be run once per session ##############
###### Make sure your runtime type is Python 3 #########
# Import h5hep from Github. This is to allow us to read these
# particular files.
!pip install git+https://github.com/mattbellis/h5hep.git
# Import custom tools package from Github. These are some simple accessor functions
# to make it easier to work with these data files.
!pip install git+https://github.com/mattbellis/particle_physics_simplified.git
import pps_tools as pps
import h5hep
###Output
_____no_output_____
###Markdown
Next, we will open the file and pull out the collision data. This will return a Python list of all the collisions in that file.You can use these data to visualize individual collisions or to perform a data analysis on all the collisions.
###Code
pps.download_from_drive('small_CLEO_test_file.hdf5')
infile = 'data/small_CLEO_test_file.hdf5'
collisions = pps.get_collisions(infile,experiment="CLEO",verbose=False)
number_of_collisions = len(collisions)
print("# of electron-positron collisions: %d" % (number_of_collisions))
import matplotlib.pylab as plt
###Output
_____no_output_____
###Markdown
Let's take a look at some of these collisions!
###Code
pps.display_collision3D(collisions[6],experiment='CLEO')
pps.display_collision3D(collisions[3],experiment='CLEO')
pps.display_collision3D(collisions[6],experiment='CLEO')
###Output
_____no_output_____
###Markdown
What are we looking at here?* The green lines represent the electrons colliding.* The other lines represent particles created in the collisions. The length of these lines tell us how much momentum (or energy) they have. The colors are different particles/object. * Red - pions * Orange - kaons * Blue - muons * Green - electrons * Gray - photons You can also make plots of the properties of the particles.
###Code
energies = []
for collision in collisions:
pions = collision['pions']
for pion in pions:
energy = pion['e']
energies.append(energy)
plt.figure(figsize=(4,4))
h = plt.hist(energies)
plt.xlabel('Energy'),plt.ylabel('Frequency'),plt.title('Histogram of Pion Energies');
###Output
_____no_output_____ |
AirBNB_Tensorflow_keras.ipynb | ###Markdown
Build a Neural Network with Tensorflow Keras on AirBNB prices in Berlin, Germany This is a cleaned dataset that I worked with in Unit 3 of Lambda School. The unit project was to use a neural network that would be pickled into an API for a web team to utilize in a web app. Here's my version of the project:
###Code
# Import tesorflow
import tensorflow as tf
# Imports
import pandas as pd
# Read in data with shape and head
df = pd.read_csv('data/airbnb data cleaned.csv')
print(df.shape)
df.head()
df = df.drop(['Unnamed: 0'], axis=1)
###Output
_____no_output_____
###Markdown
Get a feel for the data:
###Code
# Mean price of the rentals is $57 per night
df.describe().T
# Have a look at the corr table
df.corr()
# Visualize correlation of features to price
df.corr()['price'].sort_values()[:-2].plot(kind='bar')
# Distributions between 'price' and 'accommodates'
sns.boxplot(x='accommodates',y='price',data=df)
# Distributions between 'price' and 'bedrooms'
sns.boxplot(x='bedrooms',y='price',data=df)
###Output
_____no_output_____
###Markdown
Construct the model:
###Code
# Train test split:
from sklearn.model_selection import train_test_split
X = df.drop('price', axis=1).values
y = df['price'].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2, random_state=42)
print(X_train.shape)
print(y_train.shape)
print(X_test.shape)
print(y_test.shape)
# Scale data:
from sklearn.preprocessing import MinMaxScaler
# Create scaler object
sc = MinMaxScaler()
# Fit scaler on X_train to apply transformation on X sets
sc.fit(X_train)
# Transform both X
X_train = sc.transform(X_train)
X_test = sc.transform(X_test)
# Viz of X_train[0] as scaled
print(X_train[0])
X_train.shape
X_test.shape
# Create the Neural Network
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense,Dropout,Flatten
from tensorflow.keras import metrics
# Create model object
model = Sequential()
# Input, hidden, output layers
model.add(Dense(128, activation='relu', input_shape=(28,)))
model.add(Dropout(0.2))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(32, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(16, activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(1))
# Compile model
model.compile(loss='mean_absolute_error', optimizer='adam', metrics=[metrics.mae])
# Fit model with validation data to test for overfitting
model.fit(X_train,y_train, validation_data=(X_test,y_test), batch_size=128, epochs=400)
###Output
Train on 17884 samples, validate on 4471 samples
Epoch 1/400
17884/17884 [==============================] - 1s 65us/sample - loss: 35.0920 - mean_absolute_error: 35.0920 - val_loss: 22.4138 - val_mean_absolute_error: 22.4138
Epoch 2/400
17884/17884 [==============================] - 1s 31us/sample - loss: 22.5816 - mean_absolute_error: 22.5816 - val_loss: 19.3218 - val_mean_absolute_error: 19.3218
Epoch 3/400
17884/17884 [==============================] - 1s 30us/sample - loss: 20.8706 - mean_absolute_error: 20.8706 - val_loss: 18.7210 - val_mean_absolute_error: 18.7210
Epoch 4/400
17884/17884 [==============================] - 1s 30us/sample - loss: 20.4810 - mean_absolute_error: 20.4810 - val_loss: 18.6344 - val_mean_absolute_error: 18.6344
Epoch 5/400
17884/17884 [==============================] - 1s 30us/sample - loss: 20.2841 - mean_absolute_error: 20.2841 - val_loss: 18.9863 - val_mean_absolute_error: 18.9863
Epoch 6/400
17884/17884 [==============================] - 1s 30us/sample - loss: 20.1098 - mean_absolute_error: 20.1098 - val_loss: 18.3586 - val_mean_absolute_error: 18.3586
Epoch 7/400
17884/17884 [==============================] - 1s 30us/sample - loss: 20.0400 - mean_absolute_error: 20.0400 - val_loss: 18.5255 - val_mean_absolute_error: 18.5255
Epoch 8/400
17884/17884 [==============================] - 1s 31us/sample - loss: 19.7998 - mean_absolute_error: 19.7998 - val_loss: 18.3066 - val_mean_absolute_error: 18.3066
Epoch 9/400
17884/17884 [==============================] - 1s 35us/sample - loss: 19.8719 - mean_absolute_error: 19.8719 - val_loss: 18.5013 - val_mean_absolute_error: 18.5013
Epoch 10/400
17884/17884 [==============================] - 1s 32us/sample - loss: 19.8865 - mean_absolute_error: 19.8865 - val_loss: 18.1483 - val_mean_absolute_error: 18.1483
Epoch 11/400
17884/17884 [==============================] - 1s 31us/sample - loss: 19.8793 - mean_absolute_error: 19.8793 - val_loss: 18.1612 - val_mean_absolute_error: 18.1612
Epoch 12/400
17884/17884 [==============================] - 1s 32us/sample - loss: 19.6183 - mean_absolute_error: 19.6183 - val_loss: 18.2320 - val_mean_absolute_error: 18.2320
Epoch 13/400
17884/17884 [==============================] - 1s 31us/sample - loss: 19.7510 - mean_absolute_error: 19.7510 - val_loss: 18.0235 - val_mean_absolute_error: 18.0235
Epoch 14/400
17884/17884 [==============================] - 1s 34us/sample - loss: 19.6622 - mean_absolute_error: 19.6622 - val_loss: 18.0414 - val_mean_absolute_error: 18.0414
Epoch 15/400
17884/17884 [==============================] - 1s 33us/sample - loss: 19.6716 - mean_absolute_error: 19.6716 - val_loss: 17.8773 - val_mean_absolute_error: 17.8773
Epoch 16/400
17884/17884 [==============================] - 1s 30us/sample - loss: 19.3872 - mean_absolute_error: 19.3872 - val_loss: 17.8772 - val_mean_absolute_error: 17.8772
Epoch 17/400
17884/17884 [==============================] - 1s 30us/sample - loss: 19.4264 - mean_absolute_error: 19.4264 - val_loss: 17.8594 - val_mean_absolute_error: 17.8594
Epoch 18/400
17884/17884 [==============================] - 1s 29us/sample - loss: 19.4258 - mean_absolute_error: 19.4258 - val_loss: 17.8050 - val_mean_absolute_error: 17.8050
Epoch 19/400
17884/17884 [==============================] - 1s 29us/sample - loss: 19.1933 - mean_absolute_error: 19.1932 - val_loss: 17.7356 - val_mean_absolute_error: 17.7356
Epoch 20/400
17884/17884 [==============================] - 1s 33us/sample - loss: 19.1847 - mean_absolute_error: 19.1847 - val_loss: 17.8885 - val_mean_absolute_error: 17.8885
Epoch 21/400
17884/17884 [==============================] - 1s 31us/sample - loss: 19.0782 - mean_absolute_error: 19.0782 - val_loss: 17.8778 - val_mean_absolute_error: 17.8778
Epoch 22/400
17884/17884 [==============================] - 1s 30us/sample - loss: 19.0334 - mean_absolute_error: 19.0334 - val_loss: 17.6169 - val_mean_absolute_error: 17.6169
Epoch 23/400
17884/17884 [==============================] - 1s 34us/sample - loss: 19.0238 - mean_absolute_error: 19.0238 - val_loss: 17.8816 - val_mean_absolute_error: 17.8816
Epoch 24/400
17884/17884 [==============================] - 1s 33us/sample - loss: 18.9815 - mean_absolute_error: 18.9815 - val_loss: 17.5812 - val_mean_absolute_error: 17.5812
Epoch 25/400
17884/17884 [==============================] - 1s 30us/sample - loss: 18.8964 - mean_absolute_error: 18.8964 - val_loss: 17.5293 - val_mean_absolute_error: 17.5293
Epoch 26/400
17884/17884 [==============================] - 1s 31us/sample - loss: 18.9565 - mean_absolute_error: 18.9565 - val_loss: 17.4800 - val_mean_absolute_error: 17.4800
Epoch 27/400
17884/17884 [==============================] - 1s 32us/sample - loss: 18.9331 - mean_absolute_error: 18.9331 - val_loss: 17.4876 - val_mean_absolute_error: 17.4876
Epoch 28/400
17884/17884 [==============================] - 1s 34us/sample - loss: 18.7055 - mean_absolute_error: 18.7054 - val_loss: 17.3937 - val_mean_absolute_error: 17.3937
Epoch 29/400
17884/17884 [==============================] - 1s 37us/sample - loss: 18.7485 - mean_absolute_error: 18.7485 - val_loss: 17.3934 - val_mean_absolute_error: 17.3934
Epoch 30/400
17884/17884 [==============================] - 1s 31us/sample - loss: 18.7017 - mean_absolute_error: 18.7017 - val_loss: 17.4754 - val_mean_absolute_error: 17.4754
Epoch 31/400
17884/17884 [==============================] - 1s 31us/sample - loss: 18.6107 - mean_absolute_error: 18.6107 - val_loss: 17.4382 - val_mean_absolute_error: 17.4382
Epoch 32/400
17884/17884 [==============================] - 1s 30us/sample - loss: 18.5969 - mean_absolute_error: 18.5969 - val_loss: 17.4233 - val_mean_absolute_error: 17.4233
Epoch 33/400
17884/17884 [==============================] - 1s 30us/sample - loss: 18.6776 - mean_absolute_error: 18.6776 - val_loss: 17.2994 - val_mean_absolute_error: 17.2994
Epoch 34/400
17884/17884 [==============================] - 1s 31us/sample - loss: 18.5067 - mean_absolute_error: 18.5067 - val_loss: 17.3161 - val_mean_absolute_error: 17.3161
Epoch 35/400
17884/17884 [==============================] - 1s 32us/sample - loss: 18.4643 - mean_absolute_error: 18.4643 - val_loss: 17.3579 - val_mean_absolute_error: 17.3579
Epoch 36/400
17884/17884 [==============================] - 1s 32us/sample - loss: 18.4630 - mean_absolute_error: 18.4630 - val_loss: 17.2330 - val_mean_absolute_error: 17.2330
Epoch 37/400
17884/17884 [==============================] - 1s 31us/sample - loss: 18.3340 - mean_absolute_error: 18.3340 - val_loss: 17.3424 - val_mean_absolute_error: 17.3424
Epoch 38/400
17884/17884 [==============================] - 1s 30us/sample - loss: 18.2960 - mean_absolute_error: 18.2960 - val_loss: 17.2255 - val_mean_absolute_error: 17.2255
Epoch 39/400
17884/17884 [==============================] - 1s 31us/sample - loss: 18.2572 - mean_absolute_error: 18.2572 - val_loss: 17.2390 - val_mean_absolute_error: 17.2390
Epoch 40/400
17884/17884 [==============================] - 1s 30us/sample - loss: 18.3318 - mean_absolute_error: 18.3318 - val_loss: 17.2855 - val_mean_absolute_error: 17.2855
Epoch 41/400
17884/17884 [==============================] - 1s 33us/sample - loss: 18.2098 - mean_absolute_error: 18.2098 - val_loss: 17.5945 - val_mean_absolute_error: 17.5945
Epoch 42/400
17884/17884 [==============================] - 1s 30us/sample - loss: 18.3576 - mean_absolute_error: 18.3576 - val_loss: 17.2633 - val_mean_absolute_error: 17.2633
Epoch 43/400
17884/17884 [==============================] - 1s 30us/sample - loss: 18.2974 - mean_absolute_error: 18.2974 - val_loss: 17.3174 - val_mean_absolute_error: 17.3174
Epoch 44/400
17884/17884 [==============================] - 1s 30us/sample - loss: 18.3942 - mean_absolute_error: 18.3942 - val_loss: 17.1705 - val_mean_absolute_error: 17.1705
Epoch 45/400
17884/17884 [==============================] - 1s 30us/sample - loss: 18.1661 - mean_absolute_error: 18.1661 - val_loss: 17.2829 - val_mean_absolute_error: 17.2829
Epoch 46/400
17884/17884 [==============================] - 1s 29us/sample - loss: 18.1160 - mean_absolute_error: 18.1160 - val_loss: 17.1563 - val_mean_absolute_error: 17.1563
Epoch 47/400
###Markdown
Model Evaluations:
###Code
#Here's the baseline accuracy for the model:
scores = model.evaluate(X_train, y_train)
print(f"{model.metrics_names[1]}: {scores[1]*100}")
# Plot the model's loss to see if model is overfitting
model_loss = pd.DataFrame(model.history.history)
model_loss.plot()
# See predictions
from sklearn.metrics import mean_absolute_error
predictions = model.predict(X_test)
mean_absolute_error(y_test,predictions)
# Random test:
index = 90
X_pred = sc.transform([df.drop('price', axis=1).iloc[index]])
y_real = df.iloc[index]['price']
y_pred = model.predict([X_pred])
print(f'Prediction: ${y_pred[0][0]} | Real: ${y_real}')
###Output
Prediction: $57.080543518066406 | Real: $55.0
|
tests/python/mnist/MnistSegDistillation.ipynb | ###Markdown
MNISTでセグメンテーションに挑戦
###Code
import os
import shutil
import random
import pickle
import numpy as np
import matplotlib.pyplot as plt
from tqdm.notebook import tqdm
#from tqdm import tqdm
import torch
import torchvision
import torchvision.transforms as transforms
import binarybrain as bb
print(bb.get_version_string())
#print(bb.get_device_name(0))
bb.get_device_allocated_memory_size()
###Output
_____no_output_____
###Markdown
初期設定
###Code
# configuration
bb.set_device(0)
net_name = 'MnistSegClassDistillation'
data_path = os.path.join('./data/', net_name + '')
rtl_sim_path = '../../verilog/mnist'
rtl_module_name = 'MnistSegmentationAndClassification'
output_velilog_file = os.path.join(data_path, net_name + '.v')
sim_velilog_file = os.path.join(rtl_sim_path, rtl_module_name + '.v')
bin_mode = True
frame_modulation_size = 3
depth_integration_size = 1
epochs = 0
mini_batch_size = 16
###Output
_____no_output_____
###Markdown
データセット準備データセットを自作する数値が中央に来るピクセル以外も学習させる必要がるため、28x28のMNSIT画像をタイル状に並べて学習データを作る
###Code
# 並べるタイル数
rows=3
cols=3
# 面積の比率で重みを作っておく
if False:
areas = np.zeros((11))
for img, label in dataset_train:
img = img.numpy()
areas[label] += np.mean(img)
areas[10] += np.mean(1.0-img)
areas /= len(dataset_train)
wight = 1 / areas
wight /= np.max(wight)
def make_teacher_image(gen, rows, cols, margin=0):
source_img = np.zeros((1, rows*28, cols*28), dtype=np.float32)
teaching_img = np.zeros((11, rows*28, cols*28), dtype=np.float32)
for row in range(rows):
for col in range(cols):
x = col*28
y = row*28
img, label = gen.__next__()
source_img[0,y:y+28,x:x+28] = img
teaching_img[label,y:y+28,x:x+28] = img
teaching_img[10,y:y+28,x:x+28] = (1.0-img)
teaching_img = (teaching_img > 0.5).astype(np.float32)
# ランダムに反転
if random.random() > 0.5:
source_img = 1.0 - source_img
return source_img, teaching_img[:,margin:-margin,margin:-margin]
def transform_data(dataset, n, rows, cols, margin):
def data_gen():
l = len(dataset)
i = 0
while True:
yield dataset[i%l]
i += 1
gen = data_gen()
source_imgs = []
teaching_imgs = []
for _ in range(n):
x, t = make_teacher_image(gen, rows, cols, margin)
source_imgs.append(x)
teaching_imgs.append(t)
return source_imgs, teaching_imgs
class MyDatasets(torch.utils.data.Dataset):
def __init__(self, source_imgs, teaching_imgs, transforms=None):
self.transforms = transforms
self.source_imgs = source_imgs
self.teaching_imgs = teaching_imgs
def __len__(self):
return len(self.source_imgs)
def __getitem__(self, index):
source_img = self.source_imgs[index]
teaching_img = self.teaching_imgs[index]
if self.transforms:
source_img, teaching_img = self.transforms(source_img, teaching_img)
return source_img, teaching_img
# dataset
dataset_path = './data/'
dataset_train = torchvision.datasets.MNIST(root=dataset_path, train=True, transform=transforms.ToTensor(), download=True)
dataset_test = torchvision.datasets.MNIST(root=dataset_path, train=False, transform=transforms.ToTensor(), download=True)
dataset_fname = os.path.join(data_path, 'dataset.pickle')
if os.path.exists(dataset_fname):
with open(dataset_fname, 'rb') as f:
source_imgs_train = pickle.load(f)
teaching_imgs_train = pickle.load(f)
source_imgs_test = pickle.load(f)
teaching_imgs_test = pickle.load(f)
else:
os.makedirs(data_path, exist_ok=True)
source_imgs_train, teaching_imgs_train = transform_data(dataset_train, 4096, rows, cols, 29)
source_imgs_test, teaching_imgs_test = transform_data(dataset_test, 128, rows, cols, 29)
with open(dataset_fname, 'wb') as f:
pickle.dump(source_imgs_train, f)
pickle.dump(teaching_imgs_train, f)
pickle.dump(source_imgs_test, f)
pickle.dump(teaching_imgs_test, f)
my_dataset_train = MyDatasets(source_imgs_train, teaching_imgs_train)
my_dataset_test = MyDatasets(source_imgs_test, teaching_imgs_test)
loader_train = torch.utils.data.DataLoader(dataset=my_dataset_train, batch_size=mini_batch_size, shuffle=True)
loader_test = torch.utils.data.DataLoader(dataset=my_dataset_test, batch_size=mini_batch_size, shuffle=False)
def plt_data(x, y):
plt.figure(figsize=(16,8))
plt.subplot(1,12,1)
plt.imshow(x[0], 'gray')
for i in range(11):
plt.subplot(1,12,2+i)
plt.imshow(y[i], 'gray')
plt.show()
plt.figure(figsize=(16,8))
for source_imgs, teaching_imgs in loader_test:
print(source_imgs[0].shape)
print(teaching_imgs[0].shape)
for i in range(min(mini_batch_size, 10)):
plt_data(source_imgs[i], teaching_imgs[i])
break
def view(net, loader):
num = 0;
for x_imgs, t_imgs in loader:
plt.figure(figsize=(16,8))
x_buf = bb.FrameBuffer.from_numpy(np.array(x_imgs).astype(np.float32))
# t0_buf = bb.FrameBuffer.from_numpy(np.array(t_imgs[:,0:10,:,:]).astype(np.float32))
# t1_buf = bb.FrameBuffer.from_numpy(np.array(1.0 - t_imgs[:,10:11,:,:]).astype(np.float32))
y0_buf, y1_buf = net.forward(x_buf, train=False)
result_imgs0 = y0_buf.numpy()
result_imgs1 = y1_buf.numpy()
result_imgs = np.hstack((result_imgs0, result_imgs1))
plt_data(x_imgs[0], result_imgs[0])
num += 1
if num >= 2: break
###Output
_____no_output_____
###Markdown
ネットワーク構築
###Code
# バイナリ時は BIT型を使えばメモリ削減可能
bin_dtype = bb.DType.BIT if bin_mode else bb.DType.FP32
def create_lut_depthwise_conv(name, output_ch, filter_size=(3, 3), padding='valid', batch_norm=True, fw_dtype=bin_dtype):
"""LUTのDepthwiseConv層生成"""
return bb.Convolution2d(
bb.Sequential([
bb.DifferentiableLut([output_ch, 1, 1], connection='depthwise', batch_norm=batch_norm, name='lut_dl_depthwise_' + name, bin_dtype=fw_dtype),
]),
filter_size=filter_size,
padding=padding,
name='lut_conv_depthwise_' + name,
fw_dtype=fw_dtype)
def create_lut_conv1(name, output_ch, filter_size=(1, 1), padding='valid', connection='serial', batch_norm=True, fw_dtype=bin_dtype):
"""LUTのConv層生成"""
return bb.Convolution2d(
bb.DifferentiableLut([output_ch, 1, 1], connection=connection, batch_norm=batch_norm, name=(name + '_lut_dl'), bin_dtype=fw_dtype),
filter_size=filter_size,
padding=padding,
name=(name + '_lut_conv'),
fw_dtype=fw_dtype)
def create_lut_conv2(name, output_ch, filter_size=(1, 1), padding='valid', connection='serial', batch_norm=True, fw_dtype=bin_dtype):
"""LUTのConv層生成"""
return bb.Convolution2d(
bb.Sequential([
bb.DifferentiableLut([output_ch*6, 1, 1], connection=connection, batch_norm=batch_norm, name=(name + '_lut_dl0'), bin_dtype=fw_dtype),
bb.DifferentiableLut([output_ch, 1, 1], connection='serial', batch_norm=batch_norm, name=(name + '_lut_dl1'), bin_dtype=fw_dtype),
]),
filter_size=filter_size,
padding=padding,
name=(name + '_lut_conv'),
fw_dtype=fw_dtype)
def create_lut_conv_mn(name, input_ch, output_ch, filter_size=(3, 3), padding='valid', batch_norm=True, fw_dtype=bin_dtype):
return bb.Sequential([
create_lut_depthwise_conv(name, input_ch, filter_size=filter_size, padding=padding, fw_dtype=fw_dtype),
create_lut_conv2(name, output_ch, filter_size=(1, 1), fw_dtype=fw_dtype),
])
def create_dense_affine(name, output_ch, fw_dtype=bin_dtype):
"""バイナリ化したDenseAffine層生成"""
return bb.Sequential([
bb.DenseAffine([output_ch, 1, 1], name=(name + '_dense_affine')),
bb.BatchNormalization(name=(name + '_dense_bn')),
bb.Binarize(name=(name + '_dense_act'), bin_dtype=fw_dtype),
])
def create_dense_conv(name, output_ch, filter_size=(1, 1), padding='valid', fw_dtype=bin_dtype):
"""バイナリ化したDenseConv層生成"""
return bb.Convolution2d(
create_dense_affine(name, output_ch, fw_dtype),
filter_size=filter_size,
padding=padding,
name=(name + '_dense_conv'),
fw_dtype=fw_dtype)
class SegmentationNetwork(bb.Sequential):
"""蒸留用ネットワーク"""
def __init__(self):
self.input_r2b = bb.RealToBinary(frame_modulation_size=frame_modulation_size, bin_dtype=bin_dtype)
self.cls_b2r = bb.BinaryToReal(frame_integration_size=frame_modulation_size, bin_dtype=bin_dtype)
self.seg_b2r = bb.BinaryToReal(frame_integration_size=frame_modulation_size, bin_dtype=bin_dtype)
# 入力層生成
layer_name = 'input'
self.input_lut = create_lut_conv1(layer_name, 36, filter_size=(3, 3), connection='random', batch_norm=True, fw_dtype=bin_dtype)
self.input_dense = create_dense_conv(layer_name, 36, filter_size=(3, 3), fw_dtype=bin_dtype)
self.net_input = bb.Switcher({'lut': self.input_lut, 'dense': self.input_dense}, init_model_name='dense')
# Conv層生成
self.net_cnv = bb.Sequential()
for i in range(28):
layer_name = 'cnv%d'%(i)
cnv_lut = create_lut_conv_mn(layer_name, 36, 36, filter_size=(3, 3), padding='valid', batch_norm=True, fw_dtype=bin_dtype)
cnv_dense = create_dense_conv(layer_name, 36, filter_size=(3, 3), padding='valid', fw_dtype=bin_dtype)
self.net_cnv.append(
bb.Switcher({
'lut': cnv_lut,
'dense': cnv_dense
}, init_model_name='dense'))
# classifier
self.net_cls = bb.Sequential([
bb.Switcher({
'lut': create_lut_conv2('cls0', 2*36, filter_size=(1, 1)),
'dense': create_dense_conv('cls0', 2*36, filter_size=(1, 1)),
}, init_model_name='dense'),
bb.Switcher({
'lut': create_lut_conv2('cls1', 10, filter_size=(1, 1)),
'dense': create_dense_conv('cls1', 10, filter_size=(1, 1)),
}, init_model_name='dense')
])
# segmentation
self.net_seg = bb.Sequential([
bb.Switcher({
'lut': create_lut_conv2('seg0', 2*36, filter_size=(1, 1)),
'dense': create_dense_conv('seg0', 2*36, filter_size=(1, 1)),
}, init_model_name='dense'),
bb.Switcher({
'lut': create_lut_conv2('seg1', 1, filter_size=(1, 1)),
'dense': create_dense_conv('seg1', 1, filter_size=(1, 1)),
}, init_model_name='dense')
])
super(SegmentationNetwork, self).__init__([self.net_input, self.net_cnv, self.net_cls, self.net_seg])
def set_input_shape(self, shape):
shape = self.input_r2b.set_input_shape(shape)
shape = self.net_input.set_input_shape(shape)
shape = self.net_cnv.set_input_shape(shape)
shape_cls = self.net_cls.set_input_shape(shape)
self.cls_b2r.set_input_shape(shape_cls)
shape_seg = self.net_seg.set_input_shape(shape)
self.seg_b2r.set_input_shape(shape_seg)
def forward(self, x, train):
x = self.input_r2b.forward(x, train)
x = self.net_input.forward(x, train)
x = self.net_cnv.forward(x, train)
y0 = self.net_cls.forward(x, train)
y0 = self.cls_b2r.forward(y0)
y1 = self.net_seg.forward(x, train)
y1 = self.seg_b2r.forward(y1)
return y0, y1
def backward(self, dy0, dy1):
dy0 = self.cls_b2r.backward(dy0)
dy0 = self.net_cls.backward(dy0)
dy1 = self.seg_b2r.backward(dy1)
dy1 = self.net_seg.backward(dy1)
dy = self.net_cnv.backward(dy0*0.3 + dy1*0.7)
dx = self.net_input.backward(dy)
return dx
net = SegmentationNetwork()
net.send_command("switch_model dense")
net.set_input_shape([1, rows*28, cols*28])
net.set_name(net_name)
net.send_command("binary true")
#bb.load_networks(data_path, net)
bb.load_networks(data_path, net, name='dense_base')
###Output
_____no_output_____
###Markdown
学習実施学習を行います
###Code
def learning(data_path, net, epochs=2):
# learning
loss0 = bb.LossSoftmaxCrossEntropy()
loss1 = bb.LossSigmoidCrossEntropy()
metrics0 = bb.MetricsCategoricalAccuracy()
metrics1 = bb.MetricsBinaryCategoricalAccuracy()
optimizer = bb.OptimizerAdam()
optimizer.set_variables(net.get_parameters(), net.get_gradients())
for epoch in range(epochs):
# learning
loss0.clear()
metrics0.clear()
loss1.clear()
metrics1.clear()
with tqdm(loader_train) as tqdm_loadr:
for x_imgs, t_imgs in tqdm_loadr:
x_buf = bb.FrameBuffer.from_numpy(np.array(x_imgs).astype(np.float32))
t0_buf = bb.FrameBuffer.from_numpy(np.array(t_imgs[:,0:10,:,:]).astype(np.float32))
t1_buf = bb.FrameBuffer.from_numpy(1.0 - np.array(t_imgs[:,10:11,:,:]).astype(np.float32))
y0_buf, y1_buf = net.forward(x_buf, train=True)
dy0_buf = loss0.calculate(y0_buf, t0_buf)
dy1_buf = loss1.calculate(y1_buf, t1_buf)
metrics0.calculate(y0_buf, t0_buf)
metrics1.calculate(y1_buf, t1_buf)
net.backward(dy0_buf, dy1_buf)
optimizer.update()
tqdm_loadr.set_postfix(loss0=loss0.get(), acc0=metrics0.get(), loss1=loss1.get(), acc1=metrics1.get())
# test
loss0.clear()
metrics0.clear()
loss1.clear()
metrics1.clear()
for x_imgs, t_imgs in loader_test:
x_buf = bb.FrameBuffer.from_numpy(np.array(x_imgs).astype(np.float32))
t0_buf = bb.FrameBuffer.from_numpy(np.array(t_imgs[:,0:10,:,:]).astype(np.float32))
t1_buf = bb.FrameBuffer.from_numpy(1.0 - np.array(t_imgs[:,10:11,:,:]).astype(np.float32))
y0_buf, y1_buf = net.forward(x_buf, train=False)
loss0.calculate(y0_buf, t0_buf)
loss1.calculate(y1_buf, t1_buf)
metrics0.calculate(y0_buf, t0_buf)
metrics1.calculate(y1_buf, t1_buf)
bb.save_networks(data_path, net)
print('epoch[%d] : loss0=%f acc0=%f loss1=%f acc1=%f' % (epoch, loss0.get(), metrics0.get(), loss1.get(), metrics1.get()))
view(net, loader_test)
def distillation_input(data_path, net, epochs=4):
# learning
loss = bb.LossMeanSquaredError()
optimizer = bb.OptimizerAdam()
net_input = net.net_input
bin2real0 = bb.BinaryToReal(frame_integration_size=frame_modulation_size, bin_dtype=bin_dtype)
bin2real1 = bb.BinaryToReal(frame_integration_size=frame_modulation_size, bin_dtype=bin_dtype)
# LUT層をOptimizerに接続
net_input.send_command("switch_model lut")
net_input.send_command('parameter_lock false')
optimizer.set_variables(net_input.get_parameters(), net_input.get_gradients())
for epoch in range(epochs):
# learning
loss.clear()
with tqdm(loader_train) as tqdm_loadr:
for x_imgs, t_imgs in tqdm_loadr:
x_buf = bb.FrameBuffer.from_numpy(np.array(x_imgs).astype(np.float32))
x_buf = net.input_r2b.forward(x_buf, train=False)
# dense に切り替えて教師データ生成
net_input.send_command("switch_model dense")
t_buf = net_input.forward(x_buf, train=False)
t_buf = bin2real0.forward(t_buf, train=False)
# LUTに戻して学習
net_input.send_command("switch_model lut")
y_buf = net_input.forward(x_buf, train=True)
y_buf = bin2real1.forward(y_buf, train=True)
dy_buf = loss.calculate(y_buf, t_buf)
dy_buf = bin2real1.backward(dy_buf)
net_input.backward(dy_buf)
optimizer.update()
tqdm_loadr.set_postfix(loss=loss.get())
bb.save_networks(data_path, net)
print('distillation epoch[%d] : loss=%f' % (epoch, loss.get()))
def distillation_cnv(data_path, net, index, epochs=4):
# learning
loss = bb.LossMeanSquaredError()
optimizer = bb.OptimizerAdam()
cnv_layer = net.net_cnv[index]
bin2real0 = bb.BinaryToReal(frame_integration_size=frame_modulation_size, bin_dtype=bin_dtype)
bin2real1 = bb.BinaryToReal(frame_integration_size=frame_modulation_size, bin_dtype=bin_dtype)
# LUT層をOptimizerに接続
cnv_layer.send_command("switch_model lut")
cnv_layer.send_command('parameter_lock false')
optimizer.set_variables(cnv_layer.get_parameters(), cnv_layer.get_gradients())
for epoch in range(epochs):
# learning
loss.clear()
with tqdm(loader_train) as tqdm_loadr:
for x_imgs, t_imgs in tqdm_loadr:
# LUTに切り替えて前段計算
net.send_command("switch_model lut")
x_buf = bb.FrameBuffer.from_numpy(np.array(x_imgs).astype(np.float32))
x_buf = net.input_r2b.forward(x_buf, train=False)
x_buf = net.net_input.forward(x_buf, train=False)
for i in range(index):
x_buf = net.net_cnv[i].forward(x_buf, train=False)
# dense に切り替えて教師データ生成
cnv_layer.send_command("switch_model dense")
t_buf = cnv_layer.forward(x_buf, train=False)
t_buf = bin2real0.forward(t_buf, train=False)
# LUTに戻して学習
cnv_layer.send_command("switch_model lut")
y_buf = cnv_layer.forward(x_buf, train=True)
y_buf = bin2real1.forward(y_buf, train=True)
dy_buf = loss.calculate(y_buf, t_buf)
dy_buf = bin2real1.backward(dy_buf)
cnv_layer.backward(dy_buf)
optimizer.update()
tqdm_loadr.set_postfix(loss=loss.get())
bb.save_networks(data_path, net)
print('distillation epoch[%d] : loss=%f' % (epoch, loss.get()))
# 基準となるDenseAffineで学習
if not bb.load_networks(data_path, net, name='dense_base'):
learning(os.path.join(data_path, 'dense'), net, epochs=32)
bb.save_networks(data_path, net, name='dense_split', write_layers=True)
bb.save_networks(data_path, net, name='dense_base')
bb.save_networks(data_path, net)
# 入力層のLUT学習
layer_name = 'input'
if not bb.load_networks(data_path, net, name=layer_name):
# 蒸留
distillation_input(os.path.join(data_path, layer_name), net, epochs=4)
# 全体初期化
net.send_command("switch_model dense")
net.send_command('parameter_lock true')
view(net, loader_test)
# LUT切り替え
net.net_input.send_command("switch_model lut")
view(net, loader_test)
# LUT個別学習
net.net_input.send_command('parameter_lock false')
# learning(os.path.join(data_path, layer_name), net, epochs=2) # 蒸留で代替
# 後段含めた学習
net.send_command('parameter_lock false')
learning(os.path.join(data_path, layer_name), net, epochs=2)
# 保存
bb.save_networks(data_path, net, name=(layer_name + '_split'), write_layers=True)
bb.save_networks(data_path, net, name=layer_name)
bb.save_networks(data_path, net)
# 畳み込み層のLUT学習
for i in range(0, 29):
layer_name = 'cnv%d'%i
print('----- %s -----'%layer_name)
if not bb.load_networks(data_path, net, name=layer_name):
# 蒸留
distillation_cnv(os.path.join(data_path, layer_name), net, i, epochs=2)
# 全体初期化
net.send_command("switch_model dense")
net.send_command('parameter_lock true')
# LUT切り替え
net.net_input.send_command("switch_model lut")
for j in range(i+1):
net.net_cnv[j].send_command("switch_model lut")
view(net, loader_test)
# 個別学習
net.net_cnv[i].send_command('parameter_lock false')
# learning(os.path.join(data_path, layer_name), net, epochs=2) # 蒸留で代替
# 後段含めた学習
net.send_command('parameter_lock false')
net.net_input.send_command("parameter_lock true")
for j in range(i):
net.net_cnv[j].send_command("parameter_lock true")
learning(os.path.join(data_path, layer_name), net, epochs=2)
# 保存
bb.save_networks(data_path, net, name=(layer_name + '_split'), write_layers=True)
bb.save_networks(data_path, net, name=layer_name)
bb.save_networks(data_path, net)
----------------------
bb.load_networks(data_path, net, name='cnv0')
print(bb.get_device_allocated_memory_size())
gc.collect()
bb.garbage_collect_device_memory()
bb.get_device_allocated_memory_size()
###Output
_____no_output_____ |
models/word_count_pipelines.ipynb | ###Markdown
There are different ways to clean the text. Perhaps we should consider the method we want to use: naive, tokenizer, lemmatization, or stemming? Below I have used a single case to demonstrate naive, tokenizer, lemmatizer (couldn't figure out stemmer, but will do this upcoming week.
###Code
#naive pipeline
def clean1(x):
x=x.replace('\n\n','') # remove the line breaks
x=x.lower()# lower text
x = ''.join(ch for ch in x if ch not in exclude) #remove punctuation
x=re.sub('[0-9]+', '', x) # remove numbers
x=x.split() #split words
x=[word for word in x if word not in stopwords.words('english')]#remove stopwords
#x=" ".join(str(x) for x in x) # you can do this if you want to remove list structure
return x
#tokenizer
def nlp_pipeline1(text):
text=text.lower()
#tokenize words for each sentence
text = nltk.word_tokenize(text)
text = ''.join(ch for ch in text if ch not in exclude) #remove punctuation
text=re.sub('[0-9]+', '', text)
text=text.split("'") #split words
# remove punctuation and numbers
#text = [token for token in text if token.isalpha()] #for some reason, this step was removing almost all of the words so replaced it with the above two lines
# remove stopwords - be careful with this step
text = [token for token in text if token not in stop_words]
return text
#lemmatization
def nlp_lem(text):
#tokenize words for each sentence
text = nltk.word_tokenize(text)
# pos tagger
text = nltk.pos_tag(text)
# lemmatizer
text = [wordnet_lemmatizer.lemmatize(token.lower(),"v")if "V" in pos else wordnet_lemmatizer.lemmatize(token.lower()) for token,pos in text]
# remove punctuation and numbers
text = ''.join(ch for ch in text if ch not in exclude) #remove punctuation
text=re.sub('[0-9]+', '', text)
text=text.split("'") #split words
# remove stopwords - be careful with this step
text = [token for token in text if token not in stop_word_list]
return text
#stemming
#stem_list1 = [snowball_stemmer.stem(word) for word in list1]
#def nlp_stem(text):
#tokenize words for each sentence
#text = nltk.word_tokenize(text)
# pos tagger
#text = nltk.pos_tag(text)
# stemmer
#text = [snowball_stemmer.stem(word) for word in text]
# remove punctuation and numbers
#text = ''.join(ch for ch in text if ch not in exclude) #remove punctuation
#text=re.sub('[0-9]+', '', text)
#text=text.split("'") #split words
# remove stopwords - be careful with this step
#text = [token for token in text if token not in stop_word_list]
#return text
#random case, D4.Feb23.2001.MAJ
d4feb232001maj = codecs.open("/Users/schap/Desktop/TA Data/AC/2002/1/TXT/D1.Mar26.2002.MAJ.txt", "r", "utf-8").read().strip().split()
d4feb232001maj = str(d4feb232001maj)
#cleaning using naive pipeline
maj = clean1(d4feb232001maj)
print (Counter(maj).most_common())
token_d4feb232001maj = codecs.open("/Users/schap/Desktop/TA Data/AC/2002/1/TXT/D1.Mar26.2002.MAJ.txt", "r", "utf-8").read().strip().split()
token_d4feb232001maj = str(token_d4feb232001maj)
#cleaning using tokenizer pipeline
token_maj = nlp_pipeline1(token_d4feb232001maj)
print (Counter(token_maj).most_common())
lem_d4feb232001maj = codecs.open("/Users/schap/Desktop/TA Data/AC/2002/1/TXT/D1.Mar26.2002.MAJ.txt", "r", "utf-8").read().strip().split()
lem_d4feb232001maj = str(lem_d4feb232001maj)
#cleaning using lemmaztizer pipeline
lem_maj = nlp_lem(lem_d4feb232001maj)
print (Counter(lem_maj).most_common())
###Output
[('', 177), ('§', 58), ('art', 51), ('article', 46), ('para', 40), ('radio', 39), ('created', 39), ('law', 31), ('television', 26), ('operators', 26), ('paragraph', 24), ('rta', 21), ('item', 20), ('programs', 20), ('-', 19), ('according', 19), ('activity', 19), ('constitution', 18), ('registration', 17), ('program', 17), ('cem', 16), ('constitutional', 15), ('national', 15), ('regime', 15), ('frequency', 15), ('telecommunications', 15), ('new', 14), ('terrestrial', 14), ('license', 14), ('rights', 13), ('distribution', 13), ('isa', 13), ('provisions', 12), ('protection', 12), ('petitioners', 12), ('state', 12), ('economic', 12), ('case', 11), ('amended', 11), ('court', 11), ('content', 11), ('legal', 11), ('provision', 11), ('request', 10), ('crc', 10), ('means', 10), ('must', 10), ('ofa', 10), ('act', 10), ('licenses', 10), ('regulation', 9), ('broadcasting', 9), ('technical', 9), ('use', 9), ('unconstitutional', 9), ('added', 8), ('claim', 8), ('licensing', 8), ('resource', 8), ('part', 7), ('certain', 7), ('sense', 7), ('spectrum', 7), ('also', 7), ('activities', 7), ('therefore', 7), ('scope', 7), ('requirements', 7), ('provided', 7), ('cable', 7), ('grounds', 7), ('licensed', 7), ('may', 7), ('/', 6), ('assembly', 6), ('council', 6), ('media', 6), ('within', 6), ('set', 6), ('whicha', 6), ('limited', 6), ('witha', 6), ('capital', 6), ('procedure', 6), ('related', 6), ('inequality', 6), ('terms', 6), ('sg', 5), ('mps', 5), ('texts', 5), ('persons', 5), ('term', 5), ('view', 5), ('two', 5), ('exist', 5), ('conditions', 5), ('termination', 5), ('right', 5), ('free', 5), ('satellite', 5), ('freedom', 5), ('violation', 5), ('ina', 5), ('way', 5), ('body', 5), ('powers', 5), ('decision', 5), ('digital', 5), ('instructions', 5), ('№', 4), ('promulgated', 4), ('unconstitutionality', 4), ('ministers', 4), ('bmc', 4), ('rule', 4), ('respect', 4), ('disputed', 4), ('hypothesis', 4), ('entities', 4), ('equal', 4), ('origin', 4), ('information', 4), ('country', 4), ('transitional', 4), ('consequence', 4), ('mandatory', 4), ('paragraphs', 4), ('provide', 4), ('established', 4), ('initiative', 4), ('operator', 4), ('norms', 4), ('distributed', 4), ('telecommunication', 4), ('registered', 4), ('norm', 4), ('sanctions', 4), ('restrictions', 4), ('security', 4), ('b', 4), ('c', 4), ('thus', 4), ('consumer', 4), ('bulgarian', 3), ('electronic', 3), ('wordsor', 3), ('registrations”', 3), ('or”', 3), ('fees”', 3), ('creation', 3), ('contradicts', 3), ('regulates', 3), ('different', 3), ('regimes', 3), ('differences', 3), ('one', 3), ('hand', 3), ('exercises', 3), ('sovereign', 3), ('guarantees', 3), ('nature', 3), ('public', 3), ('final', 3), ('availability', 3), ('intervention', 3), ('exercise', 3), ('asa', 3), ('difference', 3), ('nota', 3), ('however', 3), ('individual', 3), ('listed', 3), ('carrying', 3), ('introduced', 3), ('accordance', 3), ('held', 3), ('determined', 3), ('principle', 3), ('regard', 3), ('revocation', 3), ('revoked', 3), ('anda', 3), ('without', 3), ('property', 3), ('considers', 3), ('finds', 3), ('networks', 3), ('justified', 3), ('declaration', 3), ('last', 3), ('documents', 3), ('protected', 3), ('whether', 3), ('current', 3), ('consumers', 3), ('market', 3), ('rules', 3), ('regarding', 3), ('items', 3), ('opportunity', 3), ('ground', 3), ('method', 3), ('implementation', 3), ('rumen', 2), ('yankov', 2), ('members', 2), ('xxxix', 2), ('order', 2), ('amendments', 2), ('disabilities', 2), ('opinions', 2), ('wordsby', 2), ('wordsregistration', 2), ('provides', 2), ('fora', 2), ('point', 2), ('fundamental', 2), ('regardless', 2), ('interpretation', 2), ('distribute', 2), ('fact', 2), ('first', 2), ('states', 2), ('citizens', 2), ('aspect', 2), ('specifics', 2), ('functions', 2), ('carried', 2), ('regulated', 2), ('compliance', 2), ('referring', 2), ('specified', 2), ('text', 2), ('international', 2), ('direction', 2), ('conclusion', 2), ('available', 2), ('objective', 2), ('step', 2), ('towards', 2), ('deregulation', 2), ('wishing', 2), ('ona', 2), ('compared', 2), ('seek', 2), ('receive', 2), ('line', 2), ('situation', 2), ('existence', 2), ('result', 2), ('similar', 2), ('freely', 2), ('choice', 2), ('subject', 2), ('allegation', 2), ('contradiction', 2), ('analysis', 2), ('required', 2), ('standards', 2), ('newly', 2), ('regulating', 2), ('years', 2), ('copyright', 2), ('terminated', 2), ('shall', 2), ('unfounded', 2), ('imperative', 2), ('necessary', 2), ('due', 2), ('clear', 2), ('network', 2), ('via', 2), ('amount', 2), ('project', 2), ('profile', 2), ('procurement', 2), ('placed', 2), ('administrative', 2), ('broadcasters', 2), ('requirement', 2), ('construction', 2), ('framework', 2), ('zidzrt', 2), ('createda', 2), ('unified', 2), ('argued', 2), ('issuance', 2), ('specific', 2), ('value', 2), ('restrict', 2), ('judged', 2), ('legislation', 2), ('lack', 2), ('meaning', 2), ('understood', 2), ('regulatory', 2), ('declare', 2), ('possibility', 2), ('issue', 2), ('existing', 2), ('respectively', 2), ('competition', 2), ('presence', 2), ('circumstance', 2), ('present', 2), ('issues', 2), ('articles', 2), ('regulate', 2), ('violated', 2), ('deprived', 2), ('wording', 2), ('absence', 2), ('various', 2), ('omission', 2), ('consider', 2), ('quality', 2), ('give', 2), ('speech', 2), ('censorship', 2), ('comply', 2), ('include', 2), ('judgment', 1), ('sofia', 1), ('march', 1), ('judge-rapporteur', 1), ('april', 1), ('hristo', 1), ('danov', 1), ('chairman', 1), ('georgi', 1), ('markov', 1), ('dimitar', 1), ('gochev', 1), ('todor', 1), ('todorov', 1), ('nedelcho', 1), ('beronov', 1), ('stefanka', 1), ('stoyanova', 1), ('margarita', 1), ('zlatareva', 1), ('vasil', 1), ('gotsev', 1), ('lyudmil', 1), ('neykov', 1), ('rapporteur', 1), ('zhivan', 1), ('belchev', 1), ('penka', 1), ('tomcheva', 1), ('instituted', 1), ('december', 1), ('establish', 1), ('televisionzidzrt', 1), ('inconsistency', 1), ('european', 1), ('convention', 1), ('transfrontier', 1), ('bya', 1), ('ruling', 1), ('january', 1), ('allowed', 1), ('consideration', 1), ('merits', 1), ('assemblyna', 1), ('ministerscom', 1), ('mediacem', 1), ('communications', 1), ('commissioncrc', 1), ('coalitionbmc', 1), ('constituted', 1), ('stakeholders', 1), ('received', 1), ('took', 1), ('account', 1), ('followinga', 1), ('section', 1), ('iv', 1), ('human', 1), ('freedoms', 1), ('weight', 1), ('believes', 1), ('audience', 1), ('equally', 1), ('guaranteed', 1), ('transmission', 1), ('environment', 1), ('maintains', 1), ('need', 1), ('support', 1), ('legislature', 1), ('based', 1), ('level', 1), ('sufficient', 1), ('repealed', 1), ('replaced', 1), ('excluding', 1), ('nuances', 1), ('summarized', 1), ('conditioned', 1), ('field', 1), ('creates', 1), ('build', 1), ('sucha', 1), ('system', 1), ('overall', 1), ('addition', 1), ('noted', 1), ('mainly', 1), ('expressed', 1), ('social', 1), ('cultural', 1), ('political', 1), ('carry', 1), ('focus', 1), ('direct', 1), ('investments', 1), ('separate', 1), ('advertising', 1), ('interests', 1), ('producers', 1), ('represented', 1), ('kind', 1), ('directly', 1), ('indirectly', 1), ('contribute', 1), ('circulation', 1), ('commercial', 1), ('subjects', 1), ('strictly', 1), ('defined', 1), ('limits', 1), ('actta', 1), ('agreement', 1), ('party', 1), ('borne', 1), ('mind', 1), ('currently', 1), ('explicit', 1), ('secondly', 1), ('requires', 1), ('another', 1), ('violate', 1), ('agreements', 1), ('candidates', 1), ('unoccupied', 1), ('imposes', 1), ('projection', 1), ('obligation', 1), ('frequenciesdecision', 1), ('..', 1), ('code', 1), ('civil', 1), ('limitation', 1), ('et', 1), ('seq', 1), ('engage', 1), ('facilitated', 1), ('registering', 1), ('easier', 1), ('band', 1), ('frequencies', 1), ('used', 1), ('disseminate', 1), ('expanded', 1), ('constitutionally', 1), ('enshrined', 1), ('logical', 1), ('assume', 1), ('preconditions', 1), ('introduction', 1), ('creating', 1), ('correlated', 1), ('choose', 1), ('prerequisite', 1), ('participation', 1), ('buta', 1), ('made', 1), ('mode', 1), ('substantiate', 1), ('comparing', 1), ('contained', 1), ('regimesa', 1), ('brief', 1), ('relating', 1), ('paraa', 1), ('contentperforms', 1), ('deletes', 1), ('registrations', 1), ('cases', 1), ('law”', 1), ('words', 1), ('thematically', 1), ('united', 1), ('concern', 1), ('concluded', 1), ('contradict', 1), ('deadlines', 1), ('ways', 1), ('indefinite', 1), ('objectively', 1), ('determines', 1), ('manner', 1), ('second', 1), ('agreed', 1), ('foreign', 1), ('contract', 1), ('excludes', 1), ('contracts', 1), ('area', 1), ('groundswithout', 1), ('seeking', 1), ('deletion', 1), ('systematic', 1), ('lawart', 1), ('respective', 1), ('revoked”', 1), ('upon', 1), ('functional', 1), ('dependence', 1), ('types', 1), ('regards', 1), ('usinga', 1), ('deleted', 1), ('mentioned', 1), ('connection', 1), ('betweena', 1), ('ban', 1), ('or``another', 1), ('differ', 1), ('former', 1), ('sanctioned', 1), ('latter', 1), ('requested', 1), ('scheme', 1), ('ata', 1), ('disadvantage', 1), ('ones', 1), ('obviously', 1), ('legislator', 1), ('introducea', 1), ('type', 1), ('chapter', 1), ('penal', 1), ('explicitly', 1), ('regional', 1), ('local', 1), ('receivea', 1), ('dependent', 1), ('limitations', 1), ('practical', 1), ('limit', 1), ('number', 1), ('transponders', 1), ('throughout', 1), ('possible', 1), ('arrangements', 1), ('owners', 1), ('non-air', 1), ('expression', 1), ('registration”', 1), ('proving', 1), ('candidate', 1), ('operatorsa', 1), ('list', 1), ('companies', 1), ('shareholders', 1), ('partners', 1), ('justification', 1), ('applying', 1), ('applicants', 1), ('complaint', 1), ('factually', 1), ('applicant', 1), ('obtaina', 1), ('circumstances', 1), ('presentthat', 1), ('shares', 1), ('stocks', 1), ('antitrust', 1), ('submita', 1), ('lawpart', 1), ('prove', 1), ('ownership', 1), ('measures', 1), ('money', 1), ('laundering', 1), ('well', 1), ('three', 1), ('register', 1), ('submit', 1), ('categories', 1), ('establishment', 1), ('itcannot', 1), ('parties', 1), ('unforeseen', 1), ('restriction', 1), ('proportionality', 1), ('impart', 1), ('far', 1), ('things', 1), ('put', 1), ('plane', 1), ('threat', 1), ('light', 1), ('factual', 1), ('mechanisms', 1), ('users', 1), ('viewers', 1), ('listeners', 1), ('definitionscope', 1), ('program”', 1), ('regulator', 1), ('require', 1), ('region', 1), ('settlement', 1), ('already', 1), ('rural', 1), ('automatically', 1), ('acquire', 1), ('status', 1), ('programs``', 1), ('structured', 1), ('collapsing', 1), ('misinterpretation', 1), ('spatialgeographical', 1), ('rather', 1), ('substantiveprogrammatic', 1), ('refuse', 1), ('non-compliance', 1), ('concept', 1), ('irregularities', 1), ('additional', 1), ('statement', 1), ('clarified', 1), ('concerns', 1), ('official', 1), ('certificate', 1), ('hold', 1), ('six', 1), ('months', 1), ('presentation', 1), ('payment', 1), ('initial', 1), ('fee', 1), ('envisages', 1), ('nothing', 1), ('else', 1), ('usea', 1), ('re-registered', 1), ('liberal', 1), ('ultimately', 1), ('relations', 1), ('rearranged', 1), ('unconstitutionalb', 1), ('capacity', 1), ('non-state', 1), ('followed', 1), ('indeed', 1), ('holdinga', 1), ('maintenance', 1), ('interested', 1), ('person', 1), ('holda', 1), ('issuinga', 1), ('starts', 1), ('spectruma', 1), ('positive', 1), ('commission', 1), ('appliesa', 1), ('draft', 1), ('tender', 1), ('continues', 1), ('foregoing', 1), ('shows', 1), ('the``commission', 1), ('independent', 1), ('basis', 1), ('obtaininga', 1), ('furthermore', 1), ('regulatea', 1), ('issued', 1), ('refuses', 1), ('evidence', 1), ('issuea', 1), ('matter', 1), ('model', 1), ('bound', 1), ('unbound', 1), ('special', 1), ('mood', 1), ('contains', 1), ('amends', 1), ('supplements', 1), ('suspends', 1), ('terminates', 1), ('revokes', 1), ('positions', 1), ('geostationary', 1), ('orbit', 1), ('entry', 1), ('force', 1), ('andc', 1), ('dispute', 1), ('constitutionality', 1), ('so-calledtie', 1), ('licensing”', 1), ('distributors', 1), ('disadvantaged', 1), ('sectorsanalogue', 1), ('development', 1), ('blocked``crc', 1), ('opinion', 1), ('argues', 1), ('incompleteness', 1), ('opposite', 1), ('regulations', 1), ('specifically', 1), ('objection', 1), ('spheres', 1), ('could', 1), ('connected', 1), ('assessment', 1), ('expediency', 1), ('hence', 1), ('management', 1), ('analyze', 1), ('lulucf', 1), ('allegations', 1), ('suitable', 1), ('since', 1), ('protects', 1), ('offered', 1), ('goods', 1), ('services', 1), ('risks', 1), ('claimed', 1), ('considered', 1), ('preferred', 1), ('service', 1), ('gives', 1), ('discuss', 1), ('protective', 1), ('incomplete', 1), ('sphere', 1), ('life', 1), ('shortcoming', 1), ('thatcannot', 1), ('associated', 1), ('cannot', 1), ('declared', 1), ('common', 1), ('aspects', 1), ('analogue', 1), ('substantiated', 1), ('given', 1), ('areacannot', 1), ('bea', 1), ('declaring', 1), ('power', 1), ('granted', 1), ('restricts', 1), ('editorial', 1), ('independence', 1), ('violates', 1), ('equality', 1), ('claims', 1), ('opportunities', 1), ('preliminary', 1), ('schemes', 1), ('selection', 1), ('employees', 1), ('media``', 1), ('giving', 1), ('reasons', 1), ('intended', 1), ('eliminate', 1), ('operational', 1), ('supervisory', 1), ('exercising', 1), ('formulated', 1), ('determine', 1), ('contain', 1), ('create', 1), ('general', 1), ('proclaiming', 1), ('exclusion', 1), ('form', 1), ('refer', 1), ('referred', 1), ('obligations', 1), ('schedule', 1), ('achieving', 1), ('ratios', 1), ('latest', 1), ('interference', 1), ('personnel', 1), ('policy', 1), ('stated', 1), ('considerations', 1), ('resolved', 1), ('rejects', 1), ('following', 1), ('wordsthrough', 1)]
###Markdown
Word Count Using all 2002 documents This is just using naive pipeline
###Code
#using all decisions
all2002 = codecs.open("/Users/schap/Desktop/TA Data/All Text Files Combined/ALL/all2002text.txt", "r", "utf-8").read().strip().split()
all2002 = str(all2002)
a2002 = clean1(all2002)
print (Counter(a2002).most_common())
#using only AC
all2002ac = codecs.open("/Users/schap/Desktop/TA Data/All Text Files Combined/AC/all2002AC.txt", "r", "utf-8").read().strip().split()
all2002ac = str(all2002ac)
a2002ac = clean1(all2002ac)
print (Counter(a2002ac).most_common())
#using only dissent
all2002diss = codecs.open("/Users/schap/Desktop/TA Data/All Text Files Combined/Dissent/all2002dissent.txt", "r", "utf-8").read().strip().split()
all2002diss = str(all2002diss)
a2002d = clean1(all2002diss)
print (Counter(a2002d).most_common())
#using only majority
all2002maj = codecs.open("/Users/schap/Desktop/TA Data/All Text Files Combined/Majority/all2002majority.txt", "r", "utf-8").read().strip().split()
all2002maj = str(all2002maj)
a2002m = clean1(all2002maj)
print (Counter(a2002m).most_common())
###Output
[('article', 391), ('art', 378), ('§', 350), ('para', 350), ('law', 349), ('court', 255), ('constitution', 247), ('constitutional', 233), ('paragraph', 214), ('item', 175), ('national', 159), ('information', 151), ('jsa', 150), ('state', 140), ('right', 137), ('provision', 129), ('№', 128), ('request', 128), ('part', 121), ('assembly', 108), ('according', 108), ('rights', 106), ('legal', 106), ('security', 95), ('sg', 91), ('minister', 90), ('protection', 88), ('decision', 88), ('also', 86), ('provisions', 85), ('act', 83), ('supreme', 81), ('cipa', 80), ('case', 78), ('issue', 77), ('activity', 75), ('general', 74), ('new', 70), ('access', 70), ('promulgated', 69), ('persons', 68), ('public', 68), ('privatization', 68), ('regarding', 66), ('created', 64), ('justice', 64), ('former', 59), ('members', 55), ('bulgaria', 55), ('words', 54), ('disputed', 54), ('unconstitutional', 53), ('amends', 52), ('judicial', 52), ('judiciary', 51), ('council', 50), ('b', 50), ('order', 48), ('obligation', 46), ('procedure', 46), ('may', 46), ('republic', 46), ('prosecutors', 46), ('basic', 45), ('unconstitutionality', 44), ('cassation', 44), ('citizens', 43), ('conditions', 43), ('grounds', 43), ('rule', 42), ('therefore', 42), ('provided', 42), ('respective', 42), ('parties', 42), ('service', 42), ('radio', 41), ('documents', 41), ('cannot', 41), ('bodies', 41), ('certain', 40), ('one', 40), ('tax', 40), ('classified', 39), ('administrative', 38), ('opinion', 38), ('bulgarian', 37), ('appeal', 37), ('repealed', 36), ('creates', 36), ('related', 36), ('judges', 36), ('laws', 34), ('ministers', 33), ('cd', 33), ('disabilities', 32), ('entities', 32), ('economic', 32), ('property', 32), ('interested', 32), ('amended', 31), ('within', 31), ('regime', 31), ('final', 31), ('established', 31), ('legislator', 31), ('decisions', 31), ('work', 31), ('view', 30), ('termination', 30), ('well', 30), ('sjc', 30), ('television', 29), ('must', 29), ('activities', 29), ('provide', 29), ('principle', 29), ('opportunity', 29), ('adopted', 29), ('pcpa', 29), ('term', 28), ('content', 28), ('transitional', 28), ('accordance', 28), ('contradict', 28), ('violation', 28), ('body', 28), ('powers', 28), ('legislative', 28), ('committee', 28), ('operators', 27), ('basis', 27), ('services', 27), ('repeal', 27), ('following', 26), ('subject', 26), ('cases', 26), ('determined', 26), ('shall', 26), ('due', 26), ('cooperatives', 26), ('petitioners', 25), ('sense', 25), ('freedom', 25), ('without', 25), ('norm', 25), ('amount', 25), ('parliamentary', 25), ('mps', 24), ('claim', 24), ('interests', 24), ('international', 24), ('requirements', 24), ('rules', 24), ('committees', 24), ('control', 24), ('income', 24), ('two', 23), ('additional', 23), ('cooperative', 23), ('procedural', 23), ('acts', 23), ('investigators', 23), ('chairman', 22), ('regulation', 22), ('registration', 22), ('rta', 22), ('civil', 22), ('unfounded', 22), ('register', 22), ('specific', 22), ('sofia', 21), ('set', 21), ('programs', 21), ('means', 21), ('receive', 21), ('program', 21), ('possibility', 21), ('arbitration', 21), ('fundamental', 20), ('states', 20), ('constitutionally', 20), ('exercise', 20), ('participation', 20), ('revocation', 20), ('already', 20), ('stated', 20), ('application', 20), ('parliament', 20), ('data', 20), ('b”', 20), ('based', 19), ('made', 19), ('norms', 19), ('use', 19), ('explicitly', 19), ('whether', 19), ('considerations', 19), ('principles', 19), ('courts', 19), ('magistrates', 19), ('municipal', 19), ('arbitral', 19), ('texts', 18), ('opinions', 18), ('provides', 18), ('human', 18), ('different', 18), ('text', 18), ('however', 18), ('necessary', 18), ('tfp', 18), ('prosecutor', 18), ('amendment', 18), ('justice”', 18), ('position', 18), ('indicated', 18), ('defense', 18), ('archives', 18), ('period', 18), ('establish', 17), ('cem', 17), ('contradicts', 17), ('carried', 17), ('commercial', 17), ('held', 17), ('requirement', 17), ('force', 17), ('member', 17), ('intelligence', 17), ('interior', 17), ('lppdop', 17), ('lta', 17), ('consideration', 16), ('country', 16), ('code', 16), ('free', 16), ('years', 16), ('connection', 16), ('restrictions', 16), ('applicants', 16), ('existing', 16), ('power', 16), ('reasons', 16), ('october', 16), ('written', 16), ('office', 16), ('personal', 16), ('constituted', 15), ('hand', 15), ('first', 15), ('compliance', 15), ('frequency', 15), ('specified', 15), ('telecommunications', 15), ('contradiction', 15), ('terms', 15), ('area', 15), ('possible', 15), ('restriction', 15), ('current', 15), ('person', 15), ('articles', 15), ('violated', 15), ('violates', 15), ('adoption', 15), ('chamber', 15), ('sports', 15), ('stefanka', 14), ('commission', 14), ('account', 14), ('support', 14), ('terrestrial', 14), ('nature', 14), ('political', 14), ('regulated', 14), ('used', 14), ('seek', 14), ('existence', 14), ('license', 14), ('given', 14), ('association', 14), ('submitted', 14), ('district', 14), ('imposed', 14), ('make', 14), ('military', 14), ('proposals', 14), ('ministry', 14), ('relevant', 14), ('acquired', 14), ('june', 14), ('annulment', 14), ('privatized', 14), ('creditors', 14), ('taxes', 14), ('rumen', 13), ('yankov', 13), ('zlatareva', 13), ('vasil', 13), ('gotsev', 13), ('lyudmil', 13), ('neykov', 13), ('zhivan', 13), ('penka', 13), ('tomcheva', 13), ('media', 13), ('distribution', 13), ('point', 13), ('guaranteed', 13), ('need', 13), ('fact', 13), ('party', 13), ('another', 13), ('number', 13), ('companies', 13), ('way', 13), ('declare', 13), ('independent', 13), ('contains', 13), ('positions', 13), ('entry', 13), ('deprived', 13), ('considered', 13), ('declared', 13), ('claims', 13), ('personnel', 13), ('resolved', 13), ('practice', 13), ('change', 13), ('organization', 13), ('annual', 13), ('report', 13), ('meetings', 13), ('proceedings', 13), ('secret', 13), ('refusal', 13), ('time', 13), ('judgment', 12), ('march', 12), ('hristo', 12), ('danov', 12), ('dimitar', 12), ('gochev', 12), ('todor', 12), ('todorov', 12), ('nedelcho', 12), ('beronov', 12), ('margarita', 12), ('belchev', 12), ('european', 12), ('merits', 12), ('respect', 12), ('limited', 12), ('guarantees', 12), ('addition', 12), ('paragraphs', 12), ('scope', 12), ('contained', 12), ('mentioned', 12), ('chapter', 12), ('three', 12), ('meaning', 12), ('hold', 12), ('payment', 12), ('issues', 12), ('regulations', 12), ('give', 12), ('determine', 12), ('referred', 12), ('maintained', 12), ('interest', 12), ('follows', 12), ('would', 12), ('process', 12), ('arguments', 12), ('proposal', 12), ('refers', 12), ('rejected', 12), ('v', 12), ('attacked', 12), ('directorate', 12), ('commerce', 12), ('deductions', 12), ('agency', 12), ('award', 12), ('judgerapporteur', 11), ('stoyanova', 11), ('xxxix', 11), ('crc', 11), ('listed', 11), ('regard', 11), ('type', 11), ('list', 11), ('protected', 11), ('items', 11), ('matter', 11), ('could', 11), ('comply', 11), ('obligations', 11), ('said', 11), ('society', 11), ('temporary', 11), ('standing', 11), ('abovementioned', 11), ('investigation', 11), ('assigned', 11), ('zidzsv', 11), ('impugned', 11), ('materials', 11), ('consent', 11), ('letter', 11), ('enterprise', 11), ('revenues', 11), ('patent', 11), ('georgi', 10), ('markov', 10), ('rapporteur', 10), ('convention', 10), ('functions', 10), ('directly', 10), ('explicit', 10), ('line', 10), ('newly', 10), ('licenses', 10), ('upon', 10), ('regional', 10), ('finds', 10), ('limit', 10), ('framework', 10), ('declaration', 10), ('submit', 10), ('measures', 10), ('relations', 10), ('respectively', 10), ('supplements', 10), ('various', 10), ('implementation', 10), ('composition', 10), ('cooperation', 10), ('tfr', 10), ('affect', 10), ('accepted', 10), ('guarantee', 10), ('podns', 10), ('take', 10), ('appellate', 10), ('appointment', 10), ('administration', 10), ('staff', 10), ('absolute', 10), ('legality', 10), ('affected', 10), ('pointed', 10), ('permit', 10), ('announced', 10), ('budget', 10), ('municipalities', 10), ('gross', 10), ('instance', 10), ('allowed', 9), ('maintains', 9), ('capital', 9), ('mind', 9), ('mandatory', 9), ('conclusion', 9), ('broadcasting', 9), ('technical', 9), ('individual', 9), ('carrying', 9), ('introduced', 9), ('considers', 9), ('categories', 9), ('commissions', 9), ('special', 9), ('thus', 9), ('development', 9), ('life', 9), ('granted', 9), ('independence', 9), ('policy', 9), ('alleged', 9), ('challenged', 9), ('membership', 9), ('group', 9), ('action', 9), ('examination', 9), ('executive', 9), ('transfer', 9), ('check', 9), ('professional', 9), ('a”', 9), ('question', 9), ('category', 9), ('example', 9), ('apia', 9), ('citizen', 9), ('file', 9), ('instruction', 9), ('board', 9), ('financial', 9), ('amendments', 8), ('ruling', 8), ('added', 8), ('licensing', 8), ('freedoms', 8), ('regardless', 8), ('interpretation', 8), ('spectrum', 8), ('equal', 8), ('system', 8), ('expressed', 8), ('defined', 8), ('resource', 8), ('objective', 8), ('similar', 8), ('difference', 8), ('fees', 8), ('”', 8), ('foreign', 8), ('revoked', 8), ('expression', 8), ('present', 8), ('value', 8), ('restrict', 8), ('legislation', 8), ('official', 8), ('holding', 8), ('dispute', 8), ('ground', 8), ('consider', 8), ('preliminary', 8), ('employees', 8), ('contain', 8), ('division', 8), ('unions', 8), ('internal', 8), ('conduct', 8), ('formed', 8), ('participate', 8), ('facts', 8), ('irrelevant', 8), ('mandate', 8), ('entire', 8), ('taken', 8), ('d”', 8), ('president', 8), ('determining', 8), ('authorities', 8), ('rank', 8), ('whose', 8), ('organizations', 8), ('insofar', 8), ('apply', 8), ('disclosure', 8), ('names', 8), ('enterprises', 8), ('youth', 8), ('precautionary', 8), ('instituted', 7), ('received', 7), ('creation', 7), ('carry', 7), ('borne', 7), ('violate', 7), ('candidates', 7), ('determines', 7), ('cable', 7), ('excludes', 7), ('terminated', 7), ('licensed', 7), ('circumstances', 7), ('issuing', 7), ('applies', 7), ('regulate', 7), ('issued', 7), ('expediency', 7), ('create', 7), ('include', 7), ('rejects', 7), ('panel', 7), ('meeting', 7), ('bar', 7), ('union', 7), ('contrary', 7), ('insurance', 7), ('purpose', 7), ('initially', 7), ('precisely', 7), ('lead', 7), ('resolution', 7), ('favor', 7), ('permanent', 7), ('five', 7), ('nonparticipation', 7), ('dismissal', 7), ('mean', 7), ('actual', 7), ('although', 7), ('vote', 7), ('supplement', 7), ('files', 7), ('f', 7), ('allow', 7), ('important', 7), ('criminal', 7), ('jsaa', 7), ('court”', 7), ('contested', 7), ('scis', 7), ('included', 7), ('assess', 7), ('competence', 7), ('k', 7), ('everyone', 7), ('lists', 7), ('element', 7), ('review', 7), ('stored', 7), ('supported', 7), ('fulltime', 7), ('parttime', 7), ('methods', 7), ('receivables', 7), ('transactions', 7), ('pay', 7), ('amounts', 7), ('ofthe', 7), ('city', 7), ('regulates', 6), ('level', 6), ('sovereign', 6), ('direct', 6), ('separate', 6), ('enshrined', 6), ('initiative', 6), ('situation', 6), ('result', 6), ('introduction', 6), ('required', 6), ('concluded', 6), ('inequality', 6), ('second', 6), ('requested', 6), ('issuance', 6), ('far', 6), ('put', 6), ('lack', 6), ('definition', 6), ('substantive', 6), ('capacity', 6), ('presence', 6), ('absence', 6), ('assessment', 6), ('management', 6), ('refer', 6), ('statutes', 6), ('pursuant', 6), ('inconsistent', 6), ('significant', 6), ('natural', 6), ('including', 6), ('name', 6), ('actions', 6), ('amending', 6), ('less', 6), ('establishing', 6), ('education', 6), ('elka', 6), ('panchova', 6), ('anastasova', 6), ('siika', 6), ('nedyalkova', 6), ('dimovska', 6), ('forestry', 6), ('nadka', 6), ('radeva', 6), ('pangarova', 6), ('stella', 6), ('dimitrova', 6), ('elect', 6), ('statements', 6), ('election', 6), ('relationship', 6), ('virtue', 6), ('every', 6), ('views', 6), ('bailiffs', 6), ('sent', 6), ('offices', 6), ('presented', 6), ('inadmissible', 6), ('authority', 6), ('adopt', 6), ('perform', 6), ('government', 6), ('containing', 6), ('reference', 6), ('permission', 6), ('competent', 6), ('democratic', 6), ('performing', 6), ('armed', 6), ('forces', 6), ('collected', 6), ('covenant', 6), ('disputes', 6), ('funds', 6), ('officers', 6), ('scc', 6), ('private', 6), ('finance', 6), ('along', 6), ('page', 6), ('nonmonetary', 6), ('bmc', 5), ('regimes', 5), ('differences', 5), ('exist', 5), ('noted', 5), ('specifics', 5), ('requires', 5), ('allegation', 5), ('analysis', 5), ('relating', 5), ('concern', 5), ('satellite', 5), ('contracts', 5), ('registered', 5), ('obviously', 5), ('local', 5), ('justified', 5), ('establishment', 5), ('argued', 5), ('impart', 5), ('status', 5), ('understood', 5), ('concept', 5), ('statement', 5), ('nothing', 5), ('competition', 5), ('c', 5), ('shows', 5), ('constitutionality', 5), ('socalled', 5), ('digital', 5), ('hence', 5), ('allegations', 5), ('since', 5), ('instructions', 5), ('equality', 5), ('intended', 5), ('supervisory', 5), ('exercising', 5), ('form', 5), ('november', 5), ('fifth', 5), ('decide', 5), ('terminate', 5), ('leads', 5), ('ca', 5), ('central', 5), ('maintain', 5), ('obliged', 5), ('associations', 5), ('good', 5), ('away', 5), ('obtained', 5), ('providing', 5), ('raised', 5), ('previous', 5), ('makes', 5), ('year', 5), ('subjective', 5), ('argument', 5), ('angelova', 5), ('bankova', 5), ('september', 5), ('obstacle', 5), ('discussed', 5), ('peoples', 5), ('elected', 5), ('concerned', 5), ('groups', 5), ('even', 5), ('namely', 5), ('reports', 5), ('investigative', 5), ('appointed', 5), ('become', 5), ('irremovable', 5), ('separation', 5), ('deprives', 5), ('plovdiv', 5), ('obliges', 5), ('stipulates', 5), ('institute', 5), ('qualification', 5), ('function', 5), ('recognized', 5), ('done', 5), ('extent', 5), ('place', 5), ('etc', 5), ('crime', 5), ('elements', 5), ('incorrect', 5), ('criteria', 5), ('responsible', 5), ('prohibition', 5), ('exclude', 5), ('cited', 5), ('treaties', 5), ('league', 5), ('lppd', 5), ('repeals', 5), ('archival', 5), ('communist', 5), ('research', 5), ('revoke', 5), ('undoubtedly', 5), ('disclose', 5), ('includes', 5), ('affiliation', 5), ('past', 5), ('associates', 5), ('industry', 5), ('validity', 5), ('approval', 5), ('ububthe', 5), ('dated', 5), ('april', 4), ('december', 4), ('inconsistency', 4), ('electronic', 4), ('took', 4), ('section', 4), ('iv', 4), ('environment', 4), ('legislature', 4), ('summarized', 4), ('exercises', 4), ('field', 4), ('hypothesis', 4), ('origin', 4), ('social', 4), ('referring', 4), ('consequence', 4), ('direction', 4), ('et', 4), ('seq', 4), ('operator', 4), ('united', 4), ('deadlines', 4), ('distributed', 4), ('telecommunication', 4), ('copyright', 4), ('clear', 4), ('sanctions', 4), ('latter', 4), ('procurement', 4), ('ones', 4), ('practical', 4), ('ownership', 4), ('last', 4), ('acquire', 4), ('noncompliance', 4), ('envisages', 4), ('opposite', 4), ('spheres', 4), ('consumer', 4), ('declaring', 4), ('opportunities', 4), ('exclusion', 4), ('complaints', 4), ('declares', 4), ('interfere', 4), ('іі', 4), ('ensures', 4), ('iii', 4), ('changes', 4), ('reading', 4), ('th', 4), ('criterion', 4), ('protect', 4), ('formation', 4), ('achieve', 4), ('assigns', 4), ('permits', 4), ('takes', 4), ('rest', 4), ('selfgovernment', 4), ('vladimir', 4), ('ivanov', 4), ('dimitrov', 4), ('health', 4), ('admitted', 4), ('pursuance', 4), ('observations', 4), ('affairs', 4), ('among', 4), ('normative', 4), ('character', 4), ('ie', 4), ('legally', 4), ('full', 4), ('sessions', 4), ('plenary', 4), ('registry', 4), ('exception', 4), ('considering', 4), ('branch', 4), ('prosecution', 4), ('prepare', 4), ('summary', 4), ('forensic', 4), ('specialized', 4), ('send', 4), ('governed', 4), ('relation', 4), ('appropriate', 4), ('with”', 4), ('ordinance', 4), ('director', 4), ('chairmen', 4), ('effect', 4), ('legitimate', 4), ('enforcement', 4), ('irremovability', 4), ('committed', 4), ('judge', 4), ('authorized', 4), ('disciplinary', 4), ('ranks', 4), ('get', 4), ('transferred', 4), ('taking', 4), ('interpretative', 4), ('department', 4), ('phcip', 4), ('employment', 4), ('envisaged', 4), ('deprivation', 4), ('series', 4), ('job', 4), ('ensure', 4), ('objects', 4), ('accepts', 4), ('task', 4), ('ruled', 4), ('others', 4), ('proclaimed', 4), ('secrecy', 4), ('sources', 4), ('indicates', 4), ('goals', 4), ('endanger', 4), ('majority', 4), ('jurisdiction', 4), ('thesis', 4), ('covers', 4), ('determination', 4), ('degree', 4), ('opening', 4), ('performed', 4), ('classification', 4), ('countries', 4), ('age', 4), ('admiral', 4), ('territorial', 4), ('apart', 4), ('creditor', 4), ('diversity', 4), ('transaction', 4), ('burdens', 4), ('sports”', 4), ('deduction', 4), ('realized', 4), ('taxable', 4), ('investment', 4), ('base', 4), ('site', 4), ('awards', 4), ('tribunal', 4), ('zidzmta', 4), ('zidzrt', 3), ('na', 3), ('stakeholders', 3), ('registrations”', 3), ('or”', 3), ('believes', 3), ('sufficient', 3), ('replaced', 3), ('aspect', 3), ('subjects', 3), ('availability', 3), ('intervention', 3), ('available', 3), ('towards', 3), ('choose', 3), ('choice', 3), ('substantiate', 3), ('law”', 3), ('manner', 3), ('functional', 3), ('dependence', 3), ('placed', 3), ('introduce', 3), ('limitations', 3), ('networks', 3), ('owners', 3), ('applying', 3), ('obtain', 3), ('unified', 3), ('light', 3), ('mechanisms', 3), ('require', 3), ('consumers', 3), ('market', 3), ('regulatory', 3), ('clarified', 3), ('initial', 3), ('maintenance', 3), ('draft', 3), ('foregoing', 3), ('evidence', 3), ('bound', 3), ('specifically', 3), ('method', 3), ('protects', 3), ('quality', 3), ('claimed', 3), ('preferred', 3), ('associated', 3), ('common', 3), ('restricts', 3), ('speech', 3), ('formulated', 3), ('changed', 3), ('lc', 3), ('condition', 3), ('days', 3), ('motives', 3), ('stenographic', 3), ('session', 3), ('sitting', 3), ('forms', 3), ('serve', 3), ('nonprofit', 3), ('sanction', 3), ('fine', 3), ('statute', 3), ('decided', 3), ('occurrence', 3), ('harm', 3), ('remains', 3), ('unification', 3), ('subsequent', 3), ('find', 3), ('dismissed', 3), ('science', 3), ('released', 3), ('essentially', 3), ('agree', 3), ('auxiliary', 3), ('leaving', 3), ('despite', 3), ('sittings', 3), ('autonomy', 3), ('varna', 3), ('veliko', 3), ('tarnovo', 3), ('promotion', 3), ('heads', 3), ('orders', 3), ('appoint', 3), ('annually', 3), ('inspectorate', 3), ('longer', 3), ('excluded', 3), ('storage', 3), ('e', 3), ('experts', 3), ('third', 3), ('supervision', 3), ('accurate', 3), ('representative', 3), ('unacceptable', 3), ('training', 3), ('supplemented', 3), ('establishes', 3), ('investigator', 3), ('allows', 3), ('concerning', 3), ('depending', 3), ('reduction', 3), ('ethics', 3), ('unconstitutionally', 3), ('employees”', 3), ('bailiff', 3), ('obstacles', 3), ('turn', 3), ('still', 3), ('structure', 3), ('whole', 3), ('approved', 3), ('kept', 3), ('representing', 3), ('informed', 3), ('conformity', 3), ('archive', 3), ('helsinki', 3), ('correct', 3), ('remaining', 3), ('examined', 3), ('described', 3), ('fund', 3), ('sites', 3), ('constitute', 3), ('exercised', 3), ('main', 3), ('units', 3), ('entered', 3), ('reliability', 3), ('senior', 3), ('equated', 3), ('collaborators', 3), ('dignity', 3), ('course', 3), ('clarify', 3), ('appealed', 3), ('completion', 3), ('affects', 3), ('able', 3), ('cipaa', 3), ('separately', 3), ('known', 3), ('see', 3), ('preservation', 3), ('sentence', 3), ('p', 3), ('ideas', 3), ('і', 3), ('entirety', 3), ('circle', 3), ('checked', 3), ('disclosing', 3), ('handed', 3), ('socially', 3), ('ds', 3), ('elections', 3), ('acquainted', 3), ('exceptions', 3), ('useful', 3), ('hidden', 3), ('vat', 3), ('date', 3), ('applicable', 3), ('collateral', 3), ('operation', 3), ('annex', 3), ('rugb', 3), ('repressive', 3), ('constituting', 3), ('totalitarian', 3), ('bills', 3), ('transformation', 3), ('owned', 3), ('inviolability', 3), ('postprivatization', 3), ('outside', 3), ('approach', 3), ('single', 3), ('gazette', 3), ('economy', 3), ('business', 3), ('necessarily', 3), ('speed', 3), ('cpc', 3), ('characteristics', 3), ('collegium', 3), ('resp', 3), ('physical', 3), ('football', 3), ('athletes', 3), ('promote', 3), ('lfvs', 3), ('imc', 3), ('liabilities', 3), ('understanding', 3), ('”and', 3), ('andthe', 3), ('constitutionthe', 3), ('unfoundedthe', 3), ('promulgation', 3), ('forprivatization', 3), ('councils', 3), ('”the', 3), ('indication', 3), ('publicly', 3), ('auction', 3), ('vouchers', 3), ('ordered', 3), ('threemonth', 3), ('effective', 3), ('instances', 3), ('stage', 3), ('fees”', 2), ('distribute', 2), ('overall', 2), ('cultural', 2), ('kind', 2), ('indirectly', 2), ('contribute', 2), ('limits', 2), ('imposes', 2), ('frequencies', 2), ('step', 2), ('deregulation', 2), ('wishing', 2), ('compared', 2), ('logical', 2), ('creating', 2), ('freely', 2), ('standards', 2), ('regulating', 2), ('ways', 2), ('agreed', 2), ('seeking', 2), ('imperative', 2), ('types', 2), ('network', 2), ('ban', 2), ('via', 2), ('project', 2), ('profile', 2), ('broadcasters', 2), ('construction', 2), ('throughout', 2), ('justification', 2), ('applicant', 2), ('shares', 2), ('unforeseen', 2), ('threat', 2), ('judged', 2), ('factual', 2), ('settlement', 2), ('rather', 2), ('refuse', 2), ('concerns', 2), ('six', 2), ('months', 2), ('presentation', 2), ('fee', 2), ('else', 2), ('ultimately', 2), ('nonstate', 2), ('followed', 2), ('indeed', 2), ('circumstance', 2), ('positive', 2), ('tied', 2), ('analogue', 2), ('blocked', 2), ('argues', 2), ('wording', 2), ('connected', 2), ('omission', 2), ('suitable', 2), ('gives', 2), ('discuss', 2), ('incomplete', 2), ('sphere', 2), ('giving', 2), ('operational', 2), ('censorship', 2), ('interference', 2), ('seised', 2), ('lawful', 2), ('implements', 2), ('bring', 2), ('discussing', 2), ('diaries', 2), ('xxxviii', 2), ('voluntary', 2), ('manifestation', 2), ('satisfy', 2), ('progress', 2), ('failure', 2), ('replacement', 2), ('chosen', 2), ('remain', 2), ('cpa', 2), ('prescribed', 2), ('long', 2), ('bringing', 2), ('complied', 2), ('parts', 2), ('applied', 2), ('operate', 2), ('clearly', 2), ('balance', 2), ('expiration', 2), ('people', 2), ('beyond', 2), ('fair', 2), ('conflict', 2), ('incompatibility', 2), ('agriculture', 2), ('works', 2), ('integration', 2), ('petitions', 2), ('culture', 2), ('elects', 2), ('untenable', 2), ('stating', 2), ('deputy', 2), ('outlines', 2), ('representation', 2), ('solely', 2), ('replace', 2), ('addresses', 2), ('quota', 2), ('oblige', 2), ('conscience', 2), ('competencies', 2), ('forward', 2), ('unlikely', 2), ('deputies', 2), ('together', 2), ('plenum', 2), ('statistical', 2), ('services”', 2), ('reaching', 2), ('council”', 2), ('completely', 2), ('burgas', 2), ('attached', 2), ('increasing', 2), ('enables', 2), ('demotion', 2), ('assemblies', 2), ('authorizes', 2), ('absolutely', 2), ('movement', 2), ('examinations', 2), ('entitled', 2), ('autonomous', 2), ('uniform', 2), ('cooperate', 2), ('grand', 2), ('highest', 2), ('mediation', 2), ('keep', 2), ('appointing', 2), ('implemented', 2), ('happen', 2), ('theoretical', 2), ('lawyers', 2), ('inextricably', 2), ('linked', 2), ('sends', 2), ('coordinated', 2), ('coordination', 2), ('ii', 2), ('entrusted', 2), ('generally', 2), ('definite', 2), ('administer', 2), ('offices”', 2), ('and”', 2), ('agents', 2), ('judges”', 2), ('immunity', 2), ('contradicted', 2), ('initiate', 2), ('verify', 2), ('completing', 2), ('stability', 2), ('initially”', 2), ('demoted', 2), ('qualities', 2), ('expressly', 2), ('offenses', 2), ('liability', 2), ('length', 2), ('approve', 2), ('today', 2), ('domestic', 2), ('obvious', 2), ('dismisses', 2), ('remainder', 2), ('beginning', 2), ('discrimination', 2), ('occupation', 2), ('ratified', 2), ('infringes', 2), ('relate', 2), ('requests', 2), ('points', 2), ('contradictions', 2), ('gave', 2), ('crpd', 2), ('requiring', 2), ('challenge', 2), ('leaves', 2), ('correspond', 2), ('withdrew', 2), ('oneyear', 2), ('prevent', 2), ('confidential', 2), ('pzrzzki', 2), ('phcipc', 2), ('huge', 2), ('importance', 2), ('pzrza', 2), ('something', 2), ('withdrawal', 2), ('reluctance', 2), ('inspection', 2), ('reveal', 2), ('working', 2), ('qualifications', 2), ('arbitrary', 2), ('suspicion', 2), ('characteristic', 2), ('talking', 2), ('defend', 2), ('grant', 2), ('exhaustively', 2), ('reliability”', 2), ('terminating', 2), ('revoking', 2), ('refusing', 2), ('motivated', 2), ('open', 2), ('k”', 2), ('come', 2), ('address', 2), ('accessory', 2), ('administratively', 2), ('structures', 2), ('structural', 2), ('led', 2), ('denial', 2), ('votes', 2), ('either', 2), ('prevents', 2), ('systems', 2), ('disposition', 2), ('recognize', 2), ('procedures', 2), ('passed', 2), ('pointless', 2), ('problem', 2), ('proved', 2), ('essence', 2), ('operative', 2), ('clarification', 2), ('necessity', 2), ('hypotheses', 2), ('identify', 2), ('always', 2), ('july', 2), ('denied', 2), ('receipt', 2), ('serie', 2), ('n', 2), ('negative', 2), ('note', 2), ('deceased', 2), ('spouse', 2), ('relatives', 2), ('journalistic', 2), ('bylaw', 2), ('defining', 2), ('nevertheless', 2), ('involved', 2), ('assessing', 2), ('honor', 2), ('reputation', 2), ('e”', 2), ('initiated', 2), ('disclosed', 2), ('deviate', 2), ('event', 2), ('deprive', 2), ('collection', 2), ('benefited', 2), ('publication', 2), ('rugsh', 2), ('scheduling', 2), ('coalitions', 2), ('ask', 2), ('high', 2), ('many', 2), ('destroyed', 2), ('express', 2), ('gap', 2), ('moreover', 2), ('prepared', 2), ('marked', 2), ('processing', 2), ('individuals', 2), ('processed', 2), ('destroy', 2), ('lmi', 2), ('processes', 2), ('directorates', 2), ('proposes', 2), ('destruction', 2), ('army', 2), ('complete', 2), ('missing', 2), ('bfsa', 2), ('managing', 2), ('fixed', 2), ('meantime', 2), ('future', 2), ('thousands', 2), ('needs', 2), ('nations', 2), ('efforts', 2), ('morals', 2), ('directed', 2), ('adopting', 2), ('functioning', 2), ('early', 2), ('performance', 2), ('achieved', 2), ('zidzovsrb', 2), ('iss', 2), ('brigadier', 2), ('higher', 2), ('especially', 2), ('effectiveness', 2), ('values', 2), ('”in', 2), ('fourth', 2), ('forcible', 2), ('expropriation', 2), ('supports', 2), ('upheld', 2), ('consequences', 2), ('urgent', 2), ('obligatory', 2), ('debtors', 2), ('additionally', 2), ('writing', 2), ('conclude', 2), ('deals', 2), ('arose', 2), ('formal', 2), ('continue', 2), ('owner', 2), ('real', 2), ('deal', 2), ('presupposes', 2), ('ppc', 2), ('addressee', 2), ('company', 2), ('disposing', 2), ('minutes', 2), ('indicators', 2), ('imposing', 2), ('transfers', 2), ('paid', 2), ('tourism', 2), ('zfvs', 2), ('admission', 2), ('mmc', 2), ('children', 2), ('adolescents', 2), ('sport', 2), ('assign', 2), ('permitted', 2), ('expanding', 2), ('taxation', 2), ('prescribes', 2), ('onprivatization', 2), ('courtadministrative', 2), ('community', 2), ('toproperty', 2), ('inbulgaria', 2), ('inthe', 2), ('adopts', 2), ('specify', 2), ('tothe', 2), ('next', 2), ('withstate', 2), ('instead', 2), ('inbulgariaaccording', 2), ('submissionsopinions', 2), ('fulfillment', 2), ('privatizationtransactions', 2), ('pronounce', 2), ('justiceunder', 2), ('acquisition', 2), ('atthe', 2), ('vouchersin', 2), ('compensation', 2), ('disposal', 2), ('setting', 2), ('deadline', 2), ('stipulated', 2), ('site”', 2), ('crafts', 2), ('square', 2), ('meter', 2), ('indicator', 2), ('expected', 2), ('forth', 2), ('c”', 2), ('volume', 2), ('net', 2), ('area”', 2), ('measure', 2), ('assignment', 2), ('remarks', 2), ('threeinstance', 2), ('book', 2), ('vicious', 2), ('preserved', 2), ('undisputed', 2), ('stages', 2), ('rendered', 2), ('typical', 2), ('pr', 2), ('transfrontier', 1), ('january', 1), ('com', 1), ('communications', 1), ('coalition', 1), ('weight', 1), ('audience', 1), ('equally', 1), ('transmission', 1), ('excluding', 1), ('nuances', 1), ('conditioned', 1), ('build', 1), ('mainly', 1), ('focus', 1), ('investments', 1), ('advertising', 1), ('producers', 1), ('represented', 1), ('circulation', 1), ('strictly', 1), ('ta', 1), ('agreement', 1), ('currently', 1), ('secondly', 1), ('agreements', 1), ('unoccupied', 1), ('projection', 1), ('limitation', 1), ('engage', 1), ('facilitated', 1), ('registering', 1), ('easier', 1), ('band', 1), ('disseminate', 1), ('expanded', 1), ('assume', 1), ('preconditions', 1), ('correlated', 1), ('prerequisite', 1), ('mode', 1), ('comparing', 1), ('brief', 1), ('performs', 1), ('deletes', 1), ('registrations', 1), ('thematically', 1), ('indefinite', 1), ('objectively', 1), ('contract', 1), ('deletion', 1), ('systematic', 1), ('regards', 1), ('using', 1), ('deleted', 1), ('anothers', 1), ('differ', 1), ('sanctioned', 1), ('scheme', 1), ('disadvantage', 1), ('penal', 1), ('dependent', 1), ('transponders', 1), ('arrangements', 1), ('nonair', 1), ('registration”', 1), ('proving', 1), ('candidate', 1), ('shareholders', 1), ('partners', 1), ('complaint', 1), ('factually', 1), ('stocks', 1), ('antitrust', 1), ('prove', 1), ('money', 1), ('laundering', 1), ('proportionality', 1), ('things', 1), ('plane', 1), ('users', 1), ('viewers', 1), ('listeners', 1), ('program”', 1), ('regulator', 1), ('region', 1), ('rural', 1), ('automatically', 1), ('structured', 1), ('collapsing', 1), ('misinterpretation', 1), ('spatial', 1), ('geographical', 1), ('programmatic', 1), ('irregularities', 1), ('certificate', 1), ('reregistered', 1), ('liberal', 1), ('rearranged', 1), ('starts', 1), ('tender', 1), ('continues', 1), ('obtaining', 1), ('furthermore', 1), ('refuses', 1), ('model', 1), ('unbound', 1), ('mood', 1), ('suspends', 1), ('terminates', 1), ('revokes', 1), ('geostationary', 1), ('orbit', 1), ('licensing”', 1), ('distributors', 1), ('disadvantaged', 1), ('sectors', 1), ('crcs', 1), ('incompleteness', 1), ('objection', 1), ('analyze', 1), ('lulucf', 1), ('offered', 1), ('goods', 1), ('risks', 1), ('protective', 1), ('shortcoming', 1), ('aspects', 1), ('substantiated', 1), ('editorial', 1), ('schemes', 1), ('selection', 1), ('eliminate', 1), ('proclaiming', 1), ('schedule', 1), ('achieving', 1), ('ratios', 1), ('latest', 1), ('submitters', 1), ('tf', 1), ('reasonable', 1), ('consistent', 1), ('arise', 1), ('sanctioning', 1), ('behavior', 1), ('stimulate', 1), ('comment', 1), ('evident', 1), ('equalize', 1), ('mutual', 1), ('assistance', 1), ('bases', 1), ('outlined', 1), ('conditions”', 1), ('extends', 1), ('fit', 1), ('defective', 1), ('governing', 1), ('strengthened', 1), ('maintaining', 1), ('eg', 1), ('cooperators', 1), ('reasonably', 1), ('hinders', 1), ('merging', 1), ('stimulation', 1), ('differs', 1), ('significantly', 1), ('counterparties', 1), ('sure', 1), ('participants', 1), ('definability', 1), ('stay', 1), ('noncommencement', 1), ('culpable', 1), ('approaching', 1), ('rise', 1), ('unfair', 1), ('interpret', 1), ('combination', 1), ('filed', 1), ('sign', 1), ('framed', 1), ('binding', 1), ('releasing', 1), ('mandate”', 1), ('angelovabankova', 1), ('requesting', 1), ('reduced', 1), ('nonadmission', 1), ('left', 1), ('repression', 1), ('enjoys', 1), ('assisted', 1), ('literal', 1), ('mp', 1), ('identified', 1), ('preparing', 1), ('declarations', 1), ('reach', 1), ('linking', 1), ('group”', 1), ('leadership', 1), ('expelling', 1), ('insolvent', 1), ('convictions', 1), ('unlimited', 1), ('partycolored', 1), ('participating', 1), ('let', 1), ('alone', 1), ('logically', 1), ('competences', 1), ('exempted', 1), ('submitting', 1), ('closed', 1), ('forests', 1), ('care', 1), ('sdaa', 1), ('position”', 1), ('substantially', 1), ('eventual', 1), ('logic', 1), ('consistency', 1), ('asks', 1), ('rejecting', 1), ('comments', 1), ('developed', 1), ('extend', 1), ('summarize', 1), ('magistracy', 1), ('reduce', 1), ('dismiss', 1), ('extended', 1), ('cover', 1), ('closing', 1), ('inspections', 1), ('improve', 1), ('references', 1), ('statistics', 1), ('inclusion', 1), ('leave', 1), ('managers', 1), ('communicated', 1), ('recognizes', 1), ('legitimizes', 1), ('balancing', 1), ('coincidence', 1), ('almost', 1), ('moment', 1), ('attitude', 1), ('greater', 1), ('excessively', 1), ('increased', 1), ('particularly', 1), ('presidents', 1), ('obligatorily', 1), ('figure', 1), ('extremely', 1), ('dismissing', 1), ('center', 1), ('proclamation', 1), ('noninterference', 1), ('accountable', 1), ('absurd', 1), ('hear', 1), ('representatives', 1), ('inspect', 1), ('chief', 1), ('supervises', 1), ('methodological', 1), ('guidance', 1), ('supervise', 1), ('junior', 1), ('auspices', 1), ('helps', 1), ('conducting', 1), ('expertises', 1), ('expert', 1), ('opinion”', 1), ('authorize', 1), ('staffing', 1), ('plenums', 1), ('directors', 1), ('reflected', 1), ('raising', 1), ('resources', 1), ('destabilizing', 1), ('monitor', 1), ('observance', 1), ('waive', 1), ('accusatory', 1), ('minimum', 1), ('collect', 1), ('evaluate', 1), ('introducing', 1), ('distinction', 1), ('hierarchy', 1), ('ordinary', 1), ('managerial', 1), ('precludes', 1), ('practically', 1), ('word', 1), ('possesses', 1), ('duties”', 1), ('punishment', 1), ('magistrate', 1), ('like', 1), ('crimes', 1), ('otherwise', 1), ('entirely', 1), ('proven', 1), ('results', 1), ('spot', 1), ('experience', 1), ('с', 1), ('т', 1), ('б', 1), ('quorum', 1), ('vi', 1), ('promoted', 1), ('practices', 1), ('attestation', 1), ('chaired', 1), ('violations', 1), ('dynamics', 1), ('causes', 1), ('fight', 1), ('combating', 1), ('buildings', 1), ('complies', 1), ('assistant', 1), ('qualified', 1), ('assist', 1), ('vii', 1), ('supplementing', 1), ('clraip', 1), ('sdbdsbrugst', 1), ('enacts', 1), ('settle', 1), ('inquiries', 1), ('decree', 1), ('presidium', 1), ('izvestia', 1), ('conventions', 1), ('motivates', 1), ('repressed', 1), ('truth', 1), ('foundation', 1), ('thoroughly', 1), ('retained', 1), ('say', 1), ('anything', 1), ('detailed', 1), ('argumentation', 1), ('infringe', 1), ('rfbh', 1), ('pdpa', 1), ('nonindication', 1), ('designed', 1), ('publicity', 1), ('dictated', 1), ('mention', 1), ('stand', 1), ('occasion', 1), ('disappeared”', 1), ('analyzes', 1), ('threaten', 1), ('reapplying', 1), ('abuses', 1), ('indicating', 1), ('strategic', 1), ('secrets', 1), ('pcpcpc', 1), ('repealing', 1), ('assumed', 1), ('examine', 1), ('accordingly', 1), ('restricted', 1), ('dwell', 1), ('sdbsa', 1), ('commented', 1), ('perversely”', 1), ('fully', 1), ('duties', 1), ('particular', 1), ('inform', 1), ('document', 1), ('systematize', 1), ('ex', 1), ('officio', 1), ('array', 1), ('fulfill', 1), ('relates', 1), ('delegate', 1), ('withdrawn', 1), ('reapply', 1), ('tfpa', 1), ('judgments', 1), ('lustration', 1), ('apparatus', 1), ('zddbds', 1), ('relationships', 1), ('study', 1), ('handling', 1), ('move', 1), ('personality', 1), ('withdrawer', 1), ('impaired', 1), ('repeat', 1), ('survey', 1), ('barrier', 1), ('abuse', 1), ('nature”', 1), ('drawn', 1), ('engaged', 1), ('discriminatory', 1), ('room', 1), ('withdraw', 1), ('belongs', 1), ('по', 1), ('refused', 1), ('researched', 1), ('meet', 1), ('knowingly', 1), ('surveys', 1), ('cryptographic', 1), ('industrial', 1), ('govern', 1), ('appeal”', 1), ('appealing', 1), ('unambiguously', 1), ('nato', 1), ('constitutive', 1), ('great', 1), ('abused', 1), ('priority”', 1), ('affirm', 1), ('speak', 1), ('authoritative', 1), ('emergence', 1), ('conditional', 1), ('suspend', 1), ('authorization', 1), ('encroached', 1), ('often', 1), ('employer', 1), ('exclusively', 1), ('trial', 1), ('residence', 1), ('departure', 1), ('religion', 1), ('cast', 1), ('drawing', 1), ('universally', 1), ('doubtful', 1), ('simple', 1), ('expresses', 1), ('politically', 1), ('prescribe', 1), ('mix', 1), ('radically', 1), ('justify', 1), ('exclusive', 1), ('compliant', 1), ('thought', 1), ('draw', 1), ('verifies', 1), ('improvement', 1), ('overcome', 1), ('abolition', 1), ('decriminalization', 1), ('ignorance', 1), ('comes', 1), ('exchange', 1), ('democratically', 1), ('organized', 1), ('communication', 1), ('impossibility', 1), ('reasoning', 1), ('variety', 1), ('formulation', 1), ('leander', 1), ('gaskin', 1), ('minor', 1), ('echr', 1), ('obstructing', 1), ('wish', 1), ('la', 1), ('liberte', 1), ('de', 1), ('recevoire', 1), ('des', 1), ('informations', 1), ('…', 1), ('interdit', 1), ('emperor', 1), ('reception', 1), ('authors', 1), ('fournir', 1), ('traced', 1), ('denies', 1), ('improved', 1), ('version', 1), ('original', 1), ('kinds…', 1), ('involves', 1), ('notwithstanding', 1), ('firmly', 1), ('legislators', 1), ('preserve', 1), ('needed', 1), ('reg', 1), ('specially', 1), ('copy', 1), ('lateral', 1), ('corresponds', 1), ('sdba', 1), ('pseudonyms', 1), ('appear', 1), ('diary', 1), ('zidzddbds', 1), ('wanted', 1), ('forming', 1), ('selecting', 1), ('headed', 1), ('rfb', 1), ('discredited', 1), ('damaged', 1), ('verification', 1), ('worked', 1), ('proceedings”', 1), ('aml', 1), ('cft', 1), ('objections', 1), ('reason', 1), ('amend', 1), ('naturally', 1), ('unsettled', 1), ('irrevocable', 1), ('proclaims', 1), ('war', 1), ('martial', 1), ('emergency', 1), ('right”', 1), ('it”', 1), ('emphasized', 1), ('arises', 1), ('subsequently', 1), ('nonsocially', 1), ('traditionally', 1), ('treated', 1), ('space', 1), ('reflects', 1), ('consensus', 1), ('politicians', 1), ('administrators', 1), ('neutralize', 1), ('ensured', 1), ('informing', 1), ('produced', 1), ('search', 1), ('evaluation', 1), ('ensuring', 1), ('preparation', 1), ('internet', 1), ('improvements', 1), ('understand', 1), ('suffered', 1), ('aware', 1), ('connections', 1), ('figures', 1), ('forthcoming', 1), ('ability', 1), ('appeared', 1), ('filled', 1), ('bylaws', 1), ('seized', 1), ('receiving', 1), ('disseminating', 1), ('degrees', 1), ('calculated', 1), ('unambiguous”', 1), ('administrator', 1), ('store', 1), ('receives', 1), ('registers', 1), ('declassification', 1), ('manages', 1), ('cs', 1), ('bulk', 1), ('expired', 1), ('eternal', 1), ('manual', 1), ('ministerial', 1), ('fill', 1), ('certainty', 1), ('lci', 1), ('boards', 1), ('tangible', 1), ('assets', 1), ('subsumed', 1), ('removal', 1), ('becomes', 1), ('meaningless', 1), ('died', 1), ('played', 1), ('role', 1), ('historical', 1), ('generations', 1), ('never', 1), ('forbade', 1), ('civilized', 1), ('encroachment', 1), ('tens', 1), ('destinies', 1), ('governance', 1), ('keeping', 1), ('mass', 1), ('trace', 1), ('announce', 1), ('convening', 1), ('conference', 1), ('un', 1), ('cornerstone', 1), ('broadcast', 1), ('publish', 1), ('news', 1), ('anywhere', 1), ('key', 1), ('serious', 1), ('peace', 1), ('world', 1), ('universal', 1), ('hindrance', 1), ('frontiers', 1), ('assuming', 1), ('interpreting', 1), ('says', 1), ('guided', 1), ('brought', 1), ('text”', 1), ('pacts', 1), ('restrictive', 1), ('acsaa', 1), ('associate', 1), ('active', 1), ('invest', 1), ('beneficiaries', 1), ('preamble', 1), ('на', 1), ('seen', 1), ('none', 1), ('enumeration', 1), ('warsaw', 1), ('pact', 1), ('help', 1), ('cooperated', 1), ('tasks', 1), ('”promulgated', 1), ('canceled', 1), ('legacy', 1), ('europe', 1), ('welcomes', 1), ('scrutiny', 1), ('advises', 1), ('enable', 1), ('recommends', 1), ('johannesburg', 1), ('recommendations', 1), ('czechoslovakia', 1), ('закон', 1), ('czech', 1), ('hungary', 1), ('xxiii', 1), ('screening', 1), ('poland', 1), ('romania', 1), ('securitate', 1), ('police', 1), ('germany', 1), ('gdr', 1), ('stasi', 1), ('comparative', 1), ('advance', 1), ('sdbdsrugsh', 1), ('know', 1), ('lives', 1), ('miserable', 1), ('happy', 1), ('peaceful', 1), ('history', 1), ('properly', 1), ('characterize', 1), ('wwg', 1), ('operated', 1), ('institutions', 1), ('rhipci', 1), ('pleaded', 1), ('rejection', 1), ('colonel”', 1), ('captain', 1), ('rank”', 1), ('major', 1), ('rear', 1), ('lieutenant', 1), ('vice', 1), ('colonel', 1), ('habilitated', 1), ('occupy', 1), ('habilitation', 1), ('scientific', 1), ('doctor', 1), ('assoc', 1), ('identity', 1), ('debates', 1), ('bill', 1), ('restricting', 1), ('permissible', 1), ('stem', 1), ('ububand', 1), ('principles…', 1), ('optimally', 1), ('sovereignty', 1), ('integrity', 1), ('trend', 1), ('retirement', 1), ('grd', 1), ('inventory', 1), ('found', 1), ('favorable', 1), ('suspended', 1), ('simultaneously', 1), ('petitioner', 1), ('notified', 1), ('achieves', 1), ('investors', 1), ('satisfaction', 1), ('adequate', 1), ('missed', 1), ('privilege', 1), ('unfavorable', 1), ('material', 1), ('notification', 1), ('constitutes', 1), ('comparison', 1), ('resurrect', 1), ('incorporated', 1), ('challenging', 1), ('decisive', 1), ('lose', 1), ('sixmonth', 1), ('month', 1), ('enters', 1), ('signs', 1), ('reform', 1), ('called', 1), ('decrees', 1), ('dedicated', 1), ('detail', 1), ('prospective', 1), ('buyers', 1), ('addressed', 1), ('secure', 1), ('discredit', 1), ('anyone', 1), ('derived', 1), ('objectives', 1), ('indefinitely', 1), ('prescription', 1), ('several', 1), ('effects', 1), ('notifies', 1), ('purchase', 1), ('candidatebuyers', 1), ('price', 1), ('optimal', 1), ('stable', 1), ('guaranteeing', 1), ('inaction', 1), ('loses', 1), ('incentive', 1), ('loss', 1), ('passive', 1), ('preserving', 1), ('specificity', 1), ('efficient', 1), ('qualitatively', 1), ('characterized', 1), ('flexible', 1), ('efficiency', 1), ('nuanced', 1), ('priority', 1), ('contradicting', 1), ('areas', 1), ('caused', 1), ('notify', 1), ('prejudice', 1), ('conversely', 1), ('external', 1), ('bond', 1), ('theory', 1), ('remuneration', 1), ('refutes', 1), ('inviolable', 1), ('disagreement', 1), ('disregard', 1), ('preclusive', 1), ('duration', 1), ('precise', 1), ('ongoing', 1), ('lifethreatening', 1), ('lcs', 1), ('lfsa', 1), ('andsuppl', 1), ('departments', 1), ('possibilities', 1), ('supplementation', 1), ('generals', 1), ('clubs', 1), ('ububpublic', 1), ('obliging', 1), ('percent', 1), ('executed', 1), ('derive', 1), ('financing', 1), ('indisputable', 1), ('burden', 1), ('statutory', 1), ('concepts', 1), ('represents', 1), ('allocation', 1), ('unilaterally', 1), ('range', 1), ('monetary', 1), ('commands', 1), ('tourism”', 1), ('objectified', 1), ('compulsory', 1), ('fvs', 1), ('suppl', 1), ('solution', 1), ('stoyanovapromulgated', 1), ('composed', 1), ('georgimarkov', 1), ('stoyanovamargarita', 1), ('belchevpenka', 1), ('tomchevawith', 1), ('secretaryprotocol', 1), ('galina', 1), ('dobreva', 1), ('indoorshearing', 1), ('reported', 1), ('byjudge', 1), ('stoyanovathe', 1), ('peoplerepresentatives', 1), ('assemblythe', 1), ('shes', 1), ('himchallenged', 1), ('regardingtheir', 1), ('provisionsby', 1), ('underessence', 1), ('constitutedthe', 1), ('justiceprivatization', 1), ('bulgariawithin', 1), ('caseinterested', 1), ('courtthe', 1), ('assessed', 1), ('considerationscontained', 1), ('therein', 1), ('ruleconsider', 1), ('followingunder', 1), ('pcpaaccording', 1), ('contesting', 1), ('propertyrestricts', 1), ('ininterest', 1), ('ofprivate', 1), ('propertythe', 1), ('pcpaannouncement', 1), ('bya', 1), ('onlyof', 1), ('localselfgovernment', 1), ('enterprisepromulgated', 1), ('becausethe', 1), ('themselvesto', 1), ('actually', 1), ('restrictthe', 1), ('ownershipthe', 1), ('declaringmunicipal', 1), ('councilsthe', 1), ('coverthe', 1), ('asannounced', 1), ('municipalthe', 1), ('yesthe', 1), ('constitutionaccording', 1), ('saleof', 1), ('haveadministrative', 1), ('availableintroduced', 1), ('zpsk', 1), ('true', 1), ('liststhe', 1), ('ofan', 1), ('initiating', 1), ('thereinproperty', 1), ('typethe', 1), ('incapital', 1), ('pcpaof', 1), ('providespossibility', 1), ('selfdetermination', 1), ('bymunicipal', 1), ('theythey', 1), ('willbe', 1), ('indicate', 1), ('liststo', 1), ('andthey', 1), ('announcement', 1), ('artthe', 1), ('making', 1), ('directionallows', 1), ('isnecessary', 1), ('notit', 1), ('yesassess', 1), ('propertyyou', 1), ('infringing', 1), ('dispose', 1), ('owntheir', 1), ('infringement', 1), ('existof', 1), ('provisionsthe', 1), ('fromwhich', 1), ('thatthe', 1), ('toits', 1), ('contentsaccording', 1), ('formedcompanies', 1), ('municipalparticipation', 1), ('thatnamely', 1), ('legislativeresolution', 1), ('notthe', 1), ('privatizationof', 1), ('participationin', 1), ('incontradiction', 1), ('protecting', 1), ('harmsthe', 1), ('themopportunity', 1), ('manage', 1), ('communityunder', 1), ('pcpathe', 1), ('ofchange', 1), ('conditionsof', 1), ('forconsequence', 1), ('favoring', 1), ('buyers”', 1), ('putting', 1), ('inprivileged', 1), ('ministerof', 1), ('”to', 1), ('worded', 1), ('preciselyand', 1), ('wellthe', 1), ('bodiesannounce', 1), ('changing', 1), ('contentsthat', 1), ('presupposesopportunity', 1), ('occur', 1), ('contradictthe', 1), ('whichour', 1), ('allcitizens', 1), ('activitythe', 1), ('casethe', 1), ('artprivatization', 1), ('inevitably', 1), ('accompanied', 1), ('certainpractice', 1), ('tovarious', 1), ('certainprovisions', 1), ('whatever', 1), ('reasonshowever', 1), ('driven', 1), ('itfull', 1), ('correspondence', 1), ('respectivelegal', 1), ('othertherefore', 1), ('inherently', 1), ('lawand', 1), ('depend', 1), ('legislationpermission', 1), ('ofprivatization', 1), ('throughit', 1), ('ofbusiness', 1), ('entitiesassuming', 1), ('toannounce', 1), ('groundsundoubtedly', 1), ('hisimplementation', 1), ('acceptance', 1), ('arbitraryunmotivated', 1), ('ensuresconditions', 1), ('acquaintance', 1), ('partiesthat', 1), ('andart', 1), ('conductingprivatization', 1), ('lawunder', 1), ('pcpawith', 1), ('authorityof', 1), ('underlegality', 1), ('powersunder', 1), ('lawaccording', 1), ('isunreasonable', 1), ('thatwhat', 1), ('forlegality', 1), ('obstruct', 1), ('path', 1), ('partarticle', 1), ('whichaccording', 1), ('agencyone', 1), ('areconcluded', 1), ('valueexceeds', 1), ('million', 1), ('levs', 1), ('regulationsthe', 1), ('compliancewith', 1), ('strategies', 1), ('isnotes', 1), ('reviewof', 1), ('poweris', 1), ('forcarrying', 1), ('normatively', 1), ('totransfer', 1), ('onescompanies', 1), ('except', 1), ('theseproperty', 1), ('findapplication', 1), ('sharesthe', 1), ('asways', 1), ('requirementarising', 1), ('approvalon', 1), ('forthere', 1), ('requirementconversely', 1), ('introductionit', 1), ('lawthe', 1), ('viewthe', 1), ('exceptthe', 1), ('exerciseof', 1), ('noresolves', 1), ('excludejudicial', 1), ('herewhy', 1), ('courtsthe', 1), ('pcpathese', 1), ('containbinding', 1), ('pwhich', 1), ('eligible', 1), ('commercialcompanies', 1), ('ministerboard', 1), ('ofequality', 1), ('encroaches', 1), ('onprivate', 1), ('patrimony', 1), ('tookwhereasthe', 1), ('issuedunder', 1), ('andmunicipal', 1), ('norefers', 1), ('ofa', 1), ('paymentsuch', 1), ('revokednonmonetary', 1), ('examplelisted', 1), ('voucherscompensation', 1), ('housing', 1), ('longtermbonds', 1), ('debt', 1), ('normativethe', 1), ('paymentsfunds', 1), ('theirsregime', 1), ('conditionsfor', 1), ('validitywhen', 1), ('lapsed', 1), ('changesthe', 1), ('paymentthe', 1), ('unequal', 1), ('individualtypes', 1), ('inequalitybetween', 1), ('nottransferred', 1), ('bythem', 1), ('providestheir', 1), ('wishnecessary', 1), ('introducingthe', 1), ('compulsorily', 1), ('prior', 1), ('faircompensation', 1), ('expropriatedtherefore', 1), ('welltaking', 1), ('constitutionhas', 1), ('followsrejects', 1), ('narodnoassembly', 1), ('oflaw', 1), ('post', 1), ('concerningtheir', 1), ('provisionsjudgment', 1), ('pita', 1), ('alleges', 1), ('expressing', 1), ('inadmissibly', 1), ('audit', 1), ('debtor', 1), ('estimated', 1), ('scale', 1), ('fairness', 1), ('relative', 1), ('arguing', 1), ('defines', 1), ('letters', 1), ('l”', 1), ('m”', 1), ('x”', 1), ('щ”', 1), ('ю”', 1), ('size', 1), ('adds', 1), ('bgn', 1), ('per', 1), ('categorization', 1), ('object', 1), ('settlements', 1), ('zones', 1), ('jobs', 1), ('intensity', 1), ('presumed', 1), ('precondition', 1), ('turnover', 1), ('lower', 1), ('really', 1), ('sita', 1), ('cc', 1), ('interpreted', 1), ('object”', 1), ('amlta', 1), ('conditionality', 1), ('ibaa', 1), ('define', 1), ('unity', 1), ('predictability', 1), ('determinations', 1), ('directions', 1), ('claim”', 1), ('entitles', 1), ('null', 1), ('void', 1), ('filing', 1), ('sixth', 1), ('enter', 1), ('suspension', 1), ('execution', 1), ('overturns', 1), ('return', 1), ('hearing', 1), ('allegedly', 1), ('singleinstance', 1), ('nongovernmental', 1), ('institution', 1), ('reversal', 1), ('brings', 1), ('closer', 1), ('contrast', 1), ('principled', 1), ('predominant', 1), ('ububsuch', 1), ('discipline', 1), ('emphasizes', 1), ('assigning', 1), ('expands', 1), ('ubub', 1), ('show', 1), ('unanimity', 1), ('accepting', 1), ('rationalization', 1), ('stabilization', 1), ('discussion', 1), ('met', 1), ('listing', 1), ('settled', 1), ('hardly', 1), ('doubt', 1), ('prevailing', 1), ('triinstance', 1), ('imperatives', 1), ('disputable', 1), ('connect', 1), ('cassation”', 1), ('controlrevocation', 1), ('convincing', 1), ('entering', 1), ('details', 1), ('judiciary”', 1), ('process”', 1), ('insignificant', 1), ('retrial', 1), ('confirms', 1), ('realization', 1), ('annuls', 1), ('essential', 1), ('courts”', 1), ('delegated', 1), ('follow', 1), ('prohibited', 1), ('resolves', 1), ('jurisdictional', 1), ('resolve', 1), ('ububcourt', 1), ('annul', 1), ('unfinished', 1), ('weeks', 1), ('predestines', 1), ('fate', 1), ('inherent', 1), ('annulling', 1), ('option', 1), ('enactment', 1), ('duly', 1), ('pending', 1), ('cancels', 1), ('measures”', 1), ('xa', 1)]
|
machine_learning/gan/wgan/tf_wgan/tf_wgan_module.ipynb | ###Markdown
README.md
###Code
%%writefile README.md
Implementation of [Wasserstein GAN](https://arxiv.org/abs/1701.07875).
###Output
_____no_output_____
###Markdown
print_object.py
###Code
%%writefile wgan_module/trainer/print_object.py
def print_obj(function_name, object_name, object_value):
"""Prints enclosing function, object name, and object value.
Args:
function_name: str, name of function.
object_name: str, name of object.
object_value: object, value of passed object.
"""
# pass
print("{}: {} = {}".format(function_name, object_name, object_value))
###Output
_____no_output_____
###Markdown
image_utils.py
###Code
%%writefile wgan_module/trainer/image_utils.py
import tensorflow as tf
from .print_object import print_obj
def preprocess_image(image, params):
"""Preprocess image tensor.
Args:
image: tensor, input image with shape
[cur_batch_size, height, width, depth].
params: dict, user passed parameters.
Returns:
Preprocessed image tensor with shape
[cur_batch_size, height, width, depth].
"""
func_name = "preprocess_image"
# Convert from [0, 255] -> [-1.0, 1.0] floats.
image = tf.cast(x=image, dtype=tf.float32) * (2. / 255) - 1.0
print_obj(func_name, "image", image)
return image
def resize_fake_images(fake_images, params):
"""Resizes fake images to match real image sizes.
Args:
fake_images: tensor, fake images from generator.
params: dict, user passed parameters.
Returns:
Resized image tensor.
"""
func_name = "resize_real_image"
print_obj("\n" + func_name, "fake_images", fake_images)
# Resize fake images to match real image sizes.
resized_fake_images = tf.image.resize(
images=fake_images,
size=[params["height"], params["width"]],
method="nearest",
name="resized_fake_images"
)
print_obj(func_name, "resized_fake_images", resized_fake_images)
return resized_fake_images
###Output
_____no_output_____
###Markdown
input.py
###Code
%%writefile wgan_module/trainer/input.py
import tensorflow as tf
from . import image_utils
from .print_object import print_obj
def decode_example(protos, params):
"""Decodes TFRecord file into tensors.
Given protobufs, decode into image and label tensors.
Args:
protos: protobufs from TFRecord file.
params: dict, user passed parameters.
Returns:
Image and label tensors.
"""
func_name = "decode_example"
# Create feature schema map for protos.
features = {
"image_raw": tf.FixedLenFeature(shape=[], dtype=tf.string),
"label": tf.FixedLenFeature(shape=[], dtype=tf.int64)
}
# Parse features from tf.Example.
parsed_features = tf.parse_single_example(
serialized=protos, features=features
)
print_obj("\n" + func_name, "features", features)
# Convert from a scalar string tensor (whose single string has
# length height * width * depth) to a uint8 tensor with shape
# [height * width * depth].
image = tf.decode_raw(
input_bytes=parsed_features["image_raw"], out_type=tf.uint8
)
print_obj(func_name, "image", image)
# Reshape flattened image back into normal dimensions.
image = tf.reshape(
tensor=image,
shape=[params["height"], params["width"], params["depth"]]
)
print_obj(func_name, "image", image)
# Preprocess image.
image = image_utils.preprocess_image(image=image, params=params)
print_obj(func_name, "image", image)
# Convert label from a scalar uint8 tensor to an int32 scalar.
label = tf.cast(x=parsed_features["label"], dtype=tf.int32)
print_obj(func_name, "label", label)
return {"image": image}, label
def read_dataset(filename, mode, batch_size, params):
"""Reads TF Record data using tf.data, doing necessary preprocessing.
Given filename, mode, batch size, and other parameters, read TF Record
dataset using Dataset API, apply necessary preprocessing, and return an
input function to the Estimator API.
Args:
filename: str, file pattern that to read into our tf.data dataset.
mode: The estimator ModeKeys. Can be TRAIN or EVAL.
batch_size: int, number of examples per batch.
params: dict, dictionary of user passed parameters.
Returns:
An input function.
"""
def _input_fn():
"""Wrapper input function used by Estimator API to get data tensors.
Returns:
Batched dataset object of dictionary of feature tensors and label
tensor.
"""
# Create list of files that match pattern.
file_list = tf.gfile.Glob(filename=filename)
# Create dataset from file list.
if params["input_fn_autotune"]:
dataset = tf.data.TFRecordDataset(
filenames=file_list,
num_parallel_reads=tf.contrib.data.AUTOTUNE
)
else:
dataset = tf.data.TFRecordDataset(filenames=file_list)
# Shuffle and repeat if training with fused op.
if mode == tf.estimator.ModeKeys.TRAIN:
dataset = dataset.apply(
tf.contrib.data.shuffle_and_repeat(
buffer_size=50 * batch_size,
count=None # indefinitely
)
)
# Decode CSV file into a features dictionary of tensors, then batch.
if params["input_fn_autotune"]:
dataset = dataset.apply(
tf.contrib.data.map_and_batch(
map_func=lambda x: decode_example(
protos=x,
params=params
),
batch_size=batch_size,
num_parallel_calls=tf.contrib.data.AUTOTUNE
)
)
else:
dataset = dataset.apply(
tf.contrib.data.map_and_batch(
map_func=lambda x: decode_example(
protos=x,
params=params
),
batch_size=batch_size
)
)
# Prefetch data to improve latency.
if params["input_fn_autotune"]:
dataset = dataset.prefetch(buffer_size=tf.contrib.data.AUTOTUNE)
else:
dataset = dataset.prefetch(buffer_size=1)
# Create a iterator, then get batch of features from example queue.
batched_dataset = dataset.make_one_shot_iterator().get_next()
return batched_dataset
return _input_fn
###Output
_____no_output_____
###Markdown
generator.py
###Code
%%writefile wgan_module/trainer/generator.py
import tensorflow as tf
from .print_object import print_obj
class Generator(object):
"""Generator that takes latent vector input and outputs image.
Fields:
name: str, name of `Generator`.
kernel_regularizer: `l1_l2_regularizer` object, regularizar for kernel
variables.
bias_regularizer: `l1_l2_regularizer` object, regularizar for bias
variables.
"""
def __init__(self, kernel_regularizer, bias_regularizer, name):
"""Instantiates and builds generator network.
Args:
kernel_regularizer: `l1_l2_regularizer` object, regularizar for
kernel variables.
bias_regularizer: `l1_l2_regularizer` object, regularizar for bias
variables.
name: str, name of generator.
"""
# Set name of generator.
self.name = name
# Regularizer for kernel weights.
self.kernel_regularizer = kernel_regularizer
# Regularizer for bias weights.
self.bias_regularizer = bias_regularizer
def get_fake_images(self, Z, mode, params):
"""Creates generator network and returns generated images.
Args:
Z: tensor, latent vectors of shape [cur_batch_size, latent_size].
mode: tf.estimator.ModeKeys with values of either TRAIN, EVAL, or
PREDICT.
params: dict, user passed parameters.
Returns:
Generated image tensor of shape
[cur_batch_size, height, width, depth].
"""
func_name = "get_fake_images"
print_obj("\n" + func_name, "Z", Z)
# Dictionary containing possible final activations.
final_activation_dict = {
"sigmoid": tf.nn.sigmoid, "relu": tf.nn.relu, "tanh": tf.nn.tanh
}
with tf.variable_scope("generator", reuse=tf.AUTO_REUSE):
# Project latent vectors.
projection_height = params["generator_projection_dims"][0]
projection_width = params["generator_projection_dims"][1]
projection_depth = params["generator_projection_dims"][2]
# shape = (
# cur_batch_size,
# projection_height * projection_width * projection_depth
# )
projection = tf.layers.dense(
inputs=Z,
units=projection_height * projection_width * projection_depth,
activation=None,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
name="projection_dense_layer"
)
print_obj(func_name, "projection", projection)
projection_leaky_relu = tf.nn.leaky_relu(
features=projection,
alpha=params["generator_leaky_relu_alpha"],
name="projection_leaky_relu"
)
print_obj(
func_name, "projection_leaky_relu", projection_leaky_relu
)
# Add batch normalization to keep the inputs from blowing up.
# shape = (
# cur_batch_size,
# projection_height * projection_width * projection_depth
# )
projection_batch_norm = tf.layers.batch_normalization(
inputs=projection_leaky_relu,
training=(mode == tf.estimator.ModeKeys.TRAIN),
name="projection_batch_norm"
)
print_obj(
func_name, "projection_batch_norm", projection_batch_norm
)
# Reshape projection into "image".
# shape = (
# cur_batch_size,
# projection_height,
# projection_width,
# projection_depth
# )
network = tf.reshape(
tensor=projection_batch_norm,
shape=[
-1, projection_height, projection_width, projection_depth
],
name="projection_reshaped"
)
print_obj(func_name, "network", network)
# Iteratively build upsampling layers.
for i in range(len(params["generator_num_filters"])):
# Add conv transpose layers with given params per layer.
# shape = (
# cur_batch_size,
# generator_kernel_sizes[i - 1] * generator_strides[i],
# generator_kernel_sizes[i - 1] * generator_strides[i],
# generator_num_filters[i]
# )
network = tf.layers.conv2d_transpose(
inputs=network,
filters=params["generator_num_filters"][i],
kernel_size=params["generator_kernel_sizes"][i],
strides=params["generator_strides"][i],
padding="same",
activation=None,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
name="layers_conv2d_tranpose_{}".format(i)
)
print_obj(func_name, "network", network)
network = tf.nn.leaky_relu(
features=network,
alpha=params["generator_leaky_relu_alpha"],
name="leaky_relu_{}".format(i)
)
print_obj(func_name, "network", network)
# Add batch normalization to keep the inputs from blowing up.
network = tf.layers.batch_normalization(
inputs=network,
training=(mode == tf.estimator.ModeKeys.TRAIN),
name="layers_batch_norm_{}".format(i)
)
print_obj(func_name, "network", network)
# Final conv2d transpose layer for image output.
# shape = (cur_batch_size, height, width, depth)
fake_images = tf.layers.conv2d_transpose(
inputs=network,
filters=params["generator_final_num_filters"],
kernel_size=params["generator_final_kernel_size"],
strides=params["generator_final_stride"],
padding="same",
activation=final_activation_dict.get(
params["generator_final_activation"].lower(), None
),
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
name="layers_conv2d_tranpose_fake_images"
)
print_obj(func_name, "fake_images", fake_images)
return fake_images
def get_generator_loss(self, fake_logits):
"""Gets generator loss.
Args:
fake_logits: tensor, shape of
[cur_batch_size, 1].
Returns:
Tensor of generator's total loss of shape [].
"""
func_name = "get_generator_loss"
# Calculate base generator loss.
generator_loss = -tf.reduce_mean(
input_tensor=fake_logits,
name="generator_loss"
)
print_obj("\n" + func_name, "generator_loss", generator_loss)
# Get regularization losses.
generator_reg_loss = tf.losses.get_regularization_loss(
scope="generator",
name="generator_reg_loss"
)
print_obj(func_name, "generator_reg_loss", generator_reg_loss)
# Combine losses for total losses.
generator_total_loss = tf.math.add(
x=generator_loss,
y=generator_reg_loss,
name="generator_total_loss"
)
print_obj(func_name, "generator_total_loss", generator_total_loss)
# Add summaries for TensorBoard.
tf.summary.scalar(
name="generator_loss", tensor=generator_loss, family="losses"
)
tf.summary.scalar(
name="generator_reg_loss",
tensor=generator_reg_loss,
family="losses"
)
tf.summary.scalar(
name="generator_total_loss",
tensor=generator_total_loss,
family="total_losses"
)
return generator_total_loss
###Output
_____no_output_____
###Markdown
critic.py
###Code
%%writefile wgan_module/trainer/critic.py
import tensorflow as tf
from .print_object import print_obj
class Critic(object):
"""Critic that takes image input and outputs logits.
Fields:
name: str, name of `Critic`.
kernel_regularizer: `l1_l2_regularizer` object, regularizar for kernel
variables.
bias_regularizer: `l1_l2_regularizer` object, regularizar for bias
variables.
"""
def __init__(self, kernel_regularizer, bias_regularizer, name):
"""Instantiates and builds critic network.
Args:
kernel_regularizer: `l1_l2_regularizer` object, regularizar for
kernel variables.
bias_regularizer: `l1_l2_regularizer` object, regularizar for bias
variables.
name: str, name of critic.
"""
# Set name of critic.
self.name = name
# Regularizer for kernel weights.
self.kernel_regularizer = kernel_regularizer
# Regularizer for bias weights.
self.bias_regularizer = bias_regularizer
def get_critic_logits(self, X, params):
"""Creates critic network and returns logits.
Args:
X: tensor, image tensors of shape
[cur_batch_size, height, width, depth].
params: dict, user passed parameters.
Returns:
Logits tensor of shape [cur_batch_size, 1].
"""
func_name = "get_critic_logits"
# Create the input layer to our CNN.
# shape = (cur_batch_size, height * width * depth)
network = X
print_obj("\n" + func_name, "network", network)
with tf.variable_scope("critic", reuse=tf.AUTO_REUSE):
# Iteratively build downsampling layers.
for i in range(len(params["critic_num_filters"])):
# Add convolutional layers with given params per layer.
# shape = (
# cur_batch_size,
# critic_kernel_sizes[i - 1] / critic_strides[i],
# critic_kernel_sizes[i - 1] / critic_strides[i],
# critic_num_filters[i]
# )
network = tf.layers.conv2d(
inputs=network,
filters=params["critic_num_filters"][i],
kernel_size=params["critic_kernel_sizes"][i],
strides=params["critic_strides"][i],
padding="same",
activation=None,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
name="layers_conv2d_{}".format(i)
)
print_obj(func_name, "network", network)
network = tf.nn.leaky_relu(
features=network,
alpha=params["critic_leaky_relu_alpha"],
name="leaky_relu_{}".format(i)
)
print_obj(func_name, "network", network)
# Add some dropout for better regularization and stability.
network = tf.layers.dropout(
inputs=network,
rate=params["critic_dropout_rates"][i],
name="layers_dropout_{}".format(i)
)
print_obj(func_name, "network", network)
# Flatten network output.
# shape = (
# cur_batch_size,
# (critic_kernel_sizes[-2] / critic_strides[-1]) ** 2 * critic_num_filters[-1]
# )
network_flat = tf.layers.Flatten()(inputs=network)
print_obj(func_name, "network_flat", network_flat)
# Final linear layer for logits.
# shape = (cur_batch_size, 1)
logits = tf.layers.dense(
inputs=network_flat,
units=1,
activation=None,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
name="layers_dense_logits"
)
print_obj(func_name, "logits", logits)
return logits
def get_critic_loss(self, fake_logits, real_logits):
"""Gets critic loss.
Args:
fake_logits: tensor, shape of [cur_batch_size, 1].
real_logits: tensor, shape of [cur_batch_size, 1].
Returns:
Tensor of critic's total loss of shape [].
"""
func_name = "get_critic_loss"
# Calculate base critic loss.
critic_real_loss = tf.reduce_mean(
input_tensor=real_logits, name="critic_real_loss"
)
print_obj("\n" + func_name, "critic_real_loss", critic_real_loss)
critic_fake_loss = tf.reduce_mean(
input_tensor=fake_logits, name="critic_fake_loss"
)
print_obj(
func_name, "critic_fake_loss", critic_fake_loss
)
critic_loss = tf.subtract(
x=critic_fake_loss, y=critic_real_loss, name="critic_loss"
)
print_obj(func_name, "critic_loss", critic_loss)
# Get regularization losses.
critic_reg_loss = tf.losses.get_regularization_loss(
scope="critic", name="critic_reg_loss"
)
print_obj(func_name, "critic_reg_loss", critic_reg_loss)
# Combine losses for total losses.
critic_total_loss = tf.math.add(
x=critic_loss, y=critic_reg_loss, name="critic_total_loss"
)
print_obj(func_name, "critic_total_loss", critic_total_loss)
# Add summaries for TensorBoard.
tf.summary.scalar(
name="critic_real_loss", tensor=critic_real_loss, family="losses"
)
tf.summary.scalar(
name="critic_fake_loss", tensor=critic_fake_loss, family="losses"
)
tf.summary.scalar(
name="critic_loss", tensor=critic_loss, family="losses"
)
tf.summary.scalar(
name="critic_reg_loss", tensor=critic_reg_loss, family="losses"
)
tf.summary.scalar(
name="critic_total_loss",
tensor=critic_total_loss,
family="total_losses"
)
return critic_total_loss
###Output
_____no_output_____
###Markdown
train_and_eval.py
###Code
%%writefile wgan_module/trainer/train_and_eval.py
import tensorflow as tf
from . import image_utils
from .print_object import print_obj
def get_logits_and_losses(features, generator, critic, mode, params):
"""Gets logits and losses for both train and eval modes.
Args:
features: dict, feature tensors from input function.
generator: instance of generator.`Generator`.
critic: instance of critic.`Critic`.
mode: tf.estimator.ModeKeys with values of either TRAIN or EVAL.
params: dict, user passed parameters.
Returns:
Real and fake logits and generator and critic losses.
"""
func_name = "get_logits_and_losses"
# Extract real images from features dictionary.
real_images = features["image"]
print_obj("\n" + func_name, "real_images", real_images)
# Get dynamic batch size in case of partial batch.
cur_batch_size = tf.shape(
input=real_images,
out_type=tf.int32,
name="{}_cur_batch_size".format(func_name)
)[0]
# Create random noise latent vector for each batch example.
Z = tf.random.normal(
shape=[cur_batch_size, params["latent_size"]],
mean=0.0,
stddev=1.0,
dtype=tf.float32
)
print_obj(func_name, "Z", Z)
# Get generated image from generator network from gaussian noise.
print("\nCall generator with Z = {}.".format(Z))
fake_images = generator.get_fake_images(Z=Z, mode=mode, params=params)
# Resize fake images to match real image sizes.
fake_images = image_utils.resize_fake_images(fake_images, params)
print_obj(func_name, "fake_images", fake_images)
# Add summaries for TensorBoard.
tf.summary.image(
name="fake_images",
tensor=tf.reshape(
tensor=fake_images,
shape=[-1, params["height"], params["width"], params["depth"]]
),
max_outputs=5,
)
# Get fake logits from critic using generator's output image.
print("\nCall critic with fake_images = {}.".format(fake_images))
fake_logits = critic.get_critic_logits(
X=fake_images, params=params
)
# Get real logits from critic using real image.
print(
"\nCall critic with real_images = {}.".format(real_images)
)
real_logits = critic.get_critic_logits(
X=real_images, params=params
)
# Get generator total loss.
generator_total_loss = generator.get_generator_loss(
fake_logits=fake_logits
)
# Get critic total loss.
critic_total_loss = critic.get_critic_loss(
fake_logits=fake_logits, real_logits=real_logits
)
return (real_logits,
fake_logits,
generator_total_loss,
critic_total_loss)
###Output
_____no_output_____
###Markdown
train.py
###Code
%%writefile wgan_module/trainer/train.py
import tensorflow as tf
from .print_object import print_obj
def get_variables_and_gradients(loss, scope):
"""Gets variables and their gradients wrt. loss.
Args:
loss: tensor, shape of [].
scope: str, the network's name to find its variables to train.
Returns:
Lists of variables and their gradients.
"""
func_name = "get_variables_and_gradients"
# Get trainable variables.
variables = tf.trainable_variables(scope=scope)
print_obj("\n{}_{}".format(func_name, scope), "variables", variables)
# Get gradients.
gradients = tf.gradients(
ys=loss,
xs=variables,
name="{}_gradients".format(scope)
)
print_obj("\n{}_{}".format(func_name, scope), "gradients", gradients)
# Add variable names back in for identification.
gradients = [
tf.identity(
input=g,
name="{}_{}_gradients".format(func_name, v.name[:-2])
)
if tf.is_tensor(x=g) else g
for g, v in zip(gradients, variables)
]
print_obj("\n{}_{}".format(func_name, scope), "gradients", gradients)
return variables, gradients
def create_variable_and_gradient_histogram_summaries(loss_dict, params):
"""Creates variable and gradient histogram summaries.
Args:
loss_dict: dict, keys are scopes and values are scalar loss tensors
for each network kind.
params: dict, user passed parameters.
"""
for scope, loss in loss_dict.items():
# Get variables and their gradients wrt. loss.
variables, gradients = get_variables_and_gradients(loss, scope)
# Add summaries for TensorBoard.
for g, v in zip(gradients, variables):
tf.summary.histogram(
name="{}".format(v.name[:-2]),
values=v,
family="{}_variables".format(scope)
)
if tf.is_tensor(x=g):
tf.summary.histogram(
name="{}".format(v.name[:-2]),
values=g,
family="{}_gradients".format(scope)
)
def train_network(loss, global_step, params, scope):
"""Trains network and returns loss and train op.
Args:
loss: tensor, shape of [].
global_step: tensor, the current training step or batch in the
training loop.
params: dict, user passed parameters.
scope: str, the variables that to train.
Returns:
Loss tensor and training op.
"""
func_name = "train_network"
print_obj("\n" + func_name, "scope", scope)
# Create optimizer map.
optimizers = {
"Adam": tf.train.AdamOptimizer,
"Adadelta": tf.train.AdadeltaOptimizer,
"AdagradDA": tf.train.AdagradDAOptimizer,
"Adagrad": tf.train.AdagradOptimizer,
"Ftrl": tf.train.FtrlOptimizer,
"GradientDescent": tf.train.GradientDescentOptimizer,
"Momentum": tf.train.MomentumOptimizer,
"ProximalAdagrad": tf.train.ProximalAdagradOptimizer,
"ProximalGradientDescent": tf.train.ProximalGradientDescentOptimizer,
"RMSProp": tf.train.RMSPropOptimizer
}
# Get optimizer and instantiate it.
if params["{}_optimizer".format(scope)] == "Adam":
optimizer = optimizers[params["{}_optimizer".format(scope)]](
learning_rate=params["{}_learning_rate".format(scope)],
beta1=params["{}_adam_beta1".format(scope)],
beta2=params["{}_adam_beta2".format(scope)],
epsilon=params["{}_adam_epsilon".format(scope)],
name="{}_{}_optimizer".format(
scope, params["{}_optimizer".format(scope)].lower()
)
)
elif params["{}_optimizer".format(scope)] == "RMSProp":
optimizer = optimizers[params["{}_optimizer".format(scope)]](
learning_rate=params["{}_learning_rate".format(scope)],
decay=params["{}_rmsprop_decay".format(scope)],
momentum=params["{}_rmsprop_momentum".format(scope)],
epsilon=params["{}_rmsprop_epsilon".format(scope)],
name="{}_{}_optimizer".format(
scope, params["{}_optimizer".format(scope)].lower()
)
)
else:
optimizer = optimizers[params["{}_optimizer".format(scope)]](
learning_rate=params["{}_learning_rate".format(scope)],
name="{}_{}_optimizer".format(
scope, params["{}_optimizer".format(scope)].lower()
)
)
print_obj("{}_{}".format(func_name, scope), "optimizer", optimizer)
# Get gradients.
gradients = tf.gradients(
ys=loss,
xs=tf.trainable_variables(scope=scope),
name="{}_gradients".format(scope)
)
print_obj("\n{}_{}".format(func_name, scope), "gradients", gradients)
# Clip gradients.
if params["{}_clip_gradients".format(scope)]:
gradients, _ = tf.clip_by_global_norm(
t_list=gradients,
clip_norm=params["{}_clip_gradients".format(scope)],
name="{}_clip_by_global_norm_gradients".format(scope)
)
print_obj("\n{}_{}".format(func_name, scope), "gradients", gradients)
# Zip back together gradients and variables.
grads_and_vars = zip(gradients, tf.trainable_variables(scope=scope))
print_obj(
"{}_{}".format(func_name, scope), "grads_and_vars", grads_and_vars
)
# Create train op by applying gradients to variables and incrementing
# global step.
train_op = optimizer.apply_gradients(
grads_and_vars=grads_and_vars,
global_step=global_step,
name="{}_apply_gradients".format(scope)
)
# Clip weights.
if params["{}_clip_weights".format(scope)]:
with tf.control_dependencies(control_inputs=[train_op]):
clip_value_min = params["{}_clip_weights".format(scope)][0]
clip_value_max = params["{}_clip_weights".format(scope)][1]
train_op = tf.group(
[
tf.assign(
ref=v,
value=tf.clip_by_value(
t=v,
clip_value_min=clip_value_min,
clip_value_max=clip_value_max
)
)
for v in tf.trainable_variables(scope=scope)
],
name="{}_clip_by_value_weights".format(scope)
)
return loss, train_op
def get_loss_and_train_op(
generator_total_loss, critic_total_loss, params):
"""Gets loss and train op for train mode.
Args:
generator_total_loss: tensor, scalar total loss of generator.
critic_total_loss: tensor, scalar total loss of critic.
params: dict, user passed parameters.
Returns:
Loss scalar tensor and train_op to be used by the EstimatorSpec.
"""
func_name = "get_loss_and_train_op"
# Get global step.
global_step = tf.train.get_or_create_global_step()
# Determine if it is time to train generator or critic.
cycle_step = tf.mod(
x=global_step,
y=tf.cast(
x=tf.add(
x=params["critic_train_steps"],
y=params["generator_train_steps"]
),
dtype=tf.int64
),
name="{}_cycle_step".format(func_name)
)
# Create choose critic condition.
condition = tf.less(
x=cycle_step, y=params["critic_train_steps"]
)
# Needed for batch normalization, but has no effect otherwise.
update_ops = tf.get_collection(key=tf.GraphKeys.UPDATE_OPS)
# Ensure update ops get updated.
with tf.control_dependencies(control_inputs=update_ops):
# Conditionally choose to train generator or critic subgraph.
loss, train_op = tf.cond(
pred=condition,
true_fn=lambda: train_network(
loss=critic_total_loss,
global_step=global_step,
params=params,
scope="critic"
),
false_fn=lambda: train_network(
loss=generator_total_loss,
global_step=global_step,
params=params,
scope="generator"
)
)
return loss, train_op
###Output
_____no_output_____
###Markdown
eval_metrics.py
###Code
%%writefile wgan_module/trainer/eval_metrics.py
import tensorflow as tf
from .print_object import print_obj
def get_eval_metric_ops(fake_logits, real_logits, params):
"""Gets eval metric ops.
Args:
fake_logits: tensor, shape of [cur_batch_size, 1] that came from
critic having processed generator's output image.
real_logits: tensor, shape of [cur_batch_size, 1] that came from
critic having processed real image.
params: dict, user passed parameters.
Returns:
Dictionary of eval metric ops.
"""
func_name = "get_eval_metric_ops"
# Concatenate critic logits and labels.
critic_logits = tf.concat(
values=[real_logits, fake_logits],
axis=0,
name="critic_concat_logits"
)
print_obj("\n" + func_name, "critic_logits", critic_logits)
critic_labels = tf.concat(
values=[
tf.ones_like(tensor=real_logits),
tf.zeros_like(tensor=fake_logits)
],
axis=0,
name="critic_concat_labels"
)
print_obj(func_name, "critic_labels", critic_labels)
# Calculate critic probabilities.
critic_probabilities = tf.nn.sigmoid(
x=critic_logits, name="critic_probabilities"
)
print_obj(
func_name, "critic_probabilities", critic_probabilities
)
# Create eval metric ops dictionary.
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
labels=critic_labels,
predictions=critic_probabilities,
name="critic_accuracy"
),
"precision": tf.metrics.precision(
labels=critic_labels,
predictions=critic_probabilities,
name="critic_precision"
),
"recall": tf.metrics.recall(
labels=critic_labels,
predictions=critic_probabilities,
name="critic_recall"
),
"auc_roc": tf.metrics.auc(
labels=critic_labels,
predictions=critic_probabilities,
num_thresholds=200,
curve="ROC",
name="critic_auc_roc"
),
"auc_pr": tf.metrics.auc(
labels=critic_labels,
predictions=critic_probabilities,
num_thresholds=200,
curve="PR",
name="critic_auc_pr"
)
}
print_obj(func_name, "eval_metric_ops", eval_metric_ops)
return eval_metric_ops
###Output
_____no_output_____
###Markdown
predict.py
###Code
%%writefile wgan_module/trainer/predict.py
import tensorflow as tf
from . import image_utils
from .print_object import print_obj
def get_predictions_and_export_outputs(features, generator, params):
"""Gets predictions and serving export outputs.
Args:
features: dict, feature tensors from serving input function.
generator: instance of `Generator`.
params: dict, user passed parameters.
Returns:
Predictions dictionary and export outputs dictionary.
"""
func_name = "get_predictions_and_export_outputs"
# Extract given latent vectors from features dictionary.
Z = features["Z"]
print_obj("\n" + func_name, "Z", Z)
# Get generated images from generator using latent vector.
generated_images = generator.get_fake_images(
Z=Z, mode=tf.estimator.ModeKeys.PREDICT, params=params
)
print_obj(func_name, "generated_images", generated_images)
# Resize generated images to match real image sizes.
generated_images = image_utils.resize_fake_images(
fake_images=generated_images, params=params
)
print_obj(func_name, "generated_images", generated_images)
# Create predictions dictionary.
predictions_dict = {
"generated_images": generated_images
}
print_obj(func_name, "predictions_dict", predictions_dict)
# Create export outputs.
export_outputs = {
"predict_export_outputs": tf.estimator.export.PredictOutput(
outputs=predictions_dict)
}
print_obj(func_name, "export_outputs", export_outputs)
return predictions_dict, export_outputs
###Output
_____no_output_____
###Markdown
wgan.py
###Code
%%writefile wgan_module/trainer/wgan.py
import tensorflow as tf
from . import critic
from . import eval_metrics
from . import generator
from . import predict
from . import train
from . import train_and_eval
from .print_object import print_obj
def wgan_model(features, labels, mode, params):
"""Wasserstein GAN custom Estimator model function.
Args:
features: dict, keys are feature names and values are feature tensors.
labels: tensor, label data.
mode: tf.estimator.ModeKeys with values of either TRAIN, EVAL, or
PREDICT.
params: dict, user passed parameters.
Returns:
Instance of `tf.estimator.EstimatorSpec` class.
"""
func_name = "wgan_model"
print_obj("\n" + func_name, "features", features)
print_obj(func_name, "labels", labels)
print_obj(func_name, "mode", mode)
print_obj(func_name, "params", params)
# Loss function, training/eval ops, etc.
predictions_dict = None
loss = None
train_op = None
eval_metric_ops = None
export_outputs = None
# Instantiate generator.
wgan_generator = generator.Generator(
kernel_regularizer=tf.contrib.layers.l1_l2_regularizer(
scale_l1=params["generator_l1_regularization_scale"],
scale_l2=params["generator_l2_regularization_scale"]
),
bias_regularizer=None,
name="generator"
)
# Instantiate critic.
wgan_critic = critic.Critic(
kernel_regularizer=tf.contrib.layers.l1_l2_regularizer(
scale_l1=params["critic_l1_regularization_scale"],
scale_l2=params["critic_l2_regularization_scale"]
),
bias_regularizer=None,
name="critic"
)
if mode == tf.estimator.ModeKeys.PREDICT:
# Get predictions and export outputs.
(predictions_dict,
export_outputs) = predict.get_predictions_and_export_outputs(
features=features, generator=wgan_generator, params=params
)
else:
# Get logits and losses from networks for train and eval modes.
(real_logits,
fake_logits,
generator_total_loss,
critic_total_loss) = train_and_eval.get_logits_and_losses(
features=features,
generator=wgan_generator,
critic=wgan_critic,
mode=mode,
params=params
)
if mode == tf.estimator.ModeKeys.TRAIN:
# Create variable and gradient histogram summaries.
train.create_variable_and_gradient_histogram_summaries(
loss_dict={
"generator": generator_total_loss,
"critic": critic_total_loss
},
params=params
)
# Get loss and train op for EstimatorSpec.
loss, train_op = train.get_loss_and_train_op(
generator_total_loss=generator_total_loss,
critic_total_loss=critic_total_loss,
params=params
)
else:
# Set eval loss.
loss = critic_total_loss
# Get eval metrics.
eval_metric_ops = eval_metrics.get_eval_metric_ops(
real_logits=real_logits,
fake_logits=fake_logits,
params=params
)
# Return EstimatorSpec
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions_dict,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=export_outputs
)
###Output
_____no_output_____
###Markdown
serving.py
###Code
%%writefile wgan_module/trainer/serving.py
import tensorflow as tf
from .print_object import print_obj
def serving_input_fn(params):
"""Serving input function.
Args:
params: dict, user passed parameters.
Returns:
ServingInputReceiver object containing features and receiver tensors.
"""
func_name = "serving_input_fn"
# Create placeholders to accept data sent to the model at serving time.
# shape = (batch_size,)
feature_placeholders = {
"Z": tf.placeholder(
dtype=tf.float32,
shape=[None, params["latent_size"]],
name="serving_input_placeholder_Z"
)
}
print_obj("\n" + func_name, "feature_placeholders", feature_placeholders)
# Create clones of the feature placeholder tensors so that the SavedModel
# SignatureDef will point to the placeholder.
features = {
key: tf.identity(
input=value,
name="{}_identity_placeholder_{}".format(func_name, key)
)
for key, value in feature_placeholders.items()
}
print_obj(func_name, "features", features)
return tf.estimator.export.ServingInputReceiver(
features=features, receiver_tensors=feature_placeholders
)
###Output
_____no_output_____
###Markdown
model.py
###Code
%%writefile wgan_module/trainer/model.py
import tensorflow as tf
from . import input
from . import serving
from . import wgan
from .print_object import print_obj
def train_and_evaluate(args):
"""Trains and evaluates custom Estimator model.
Args:
args: dict, user passed parameters.
Returns:
`Estimator` object.
"""
func_name = "train_and_evaluate"
print_obj("\n" + func_name, "args", args)
# Ensure filewriter cache is clear for TensorBoard events file.
tf.summary.FileWriterCache.clear()
# Set logging to be level of INFO.
tf.logging.set_verbosity(tf.logging.INFO)
# Create a RunConfig for Estimator.
config = tf.estimator.RunConfig(
model_dir=args["output_dir"],
save_summary_steps=args["save_summary_steps"],
save_checkpoints_steps=args["save_checkpoints_steps"],
keep_checkpoint_max=args["keep_checkpoint_max"]
)
# Create our custom estimator using our model function.
estimator = tf.estimator.Estimator(
model_fn=wgan.wgan_model,
model_dir=args["output_dir"],
config=config,
params=args
)
# Create train spec to read in our training data.
train_spec = tf.estimator.TrainSpec(
input_fn=input.read_dataset(
filename=args["train_file_pattern"],
mode=tf.estimator.ModeKeys.TRAIN,
batch_size=args["train_batch_size"],
params=args
),
max_steps=args["train_steps"]
)
# Create exporter to save out the complete model to disk.
exporter = tf.estimator.LatestExporter(
name="exporter",
serving_input_receiver_fn=lambda: serving.serving_input_fn(args)
)
# Create eval spec to read in our validation data and export our model.
eval_spec = tf.estimator.EvalSpec(
input_fn=input.read_dataset(
filename=args["eval_file_pattern"],
mode=tf.estimator.ModeKeys.EVAL,
batch_size=args["eval_batch_size"],
params=args
),
steps=args["eval_steps"],
start_delay_secs=args["start_delay_secs"],
throttle_secs=args["throttle_secs"],
exporters=exporter
)
# Create train and evaluate loop to train and evaluate our estimator.
tf.estimator.train_and_evaluate(
estimator=estimator, train_spec=train_spec, eval_spec=eval_spec)
return estimator
###Output
_____no_output_____
###Markdown
task.py
###Code
%%writefile wgan_module/trainer/task.py
import argparse
import json
import os
from . import model
def convert_string_to_bool(string):
"""Converts string to bool.
Args:
string: str, string to convert.
Returns:
Boolean conversion of string.
"""
return False if string.lower() == "false" else True
def convert_string_to_none_or_float(string):
"""Converts string to None or float.
Args:
string: str, string to convert.
Returns:
None or float conversion of string.
"""
return None if string.lower() == "none" else float(string)
def convert_string_to_none_or_int(string):
"""Converts string to None or int.
Args:
string: str, string to convert.
Returns:
None or int conversion of string.
"""
return None if string.lower() == "none" else int(string)
def convert_string_to_list_of_ints(string, sep):
"""Converts string to list of ints.
Args:
string: str, string to convert.
sep: str, separator string.
Returns:
List of ints conversion of string.
"""
if not string:
return []
return [int(x) for x in string.split(sep)]
def convert_string_to_list_of_floats(string, sep):
"""Converts string to list of floats.
Args:
string: str, string to convert.
sep: str, separator string.
Returns:
List of floats conversion of string.
"""
if not string:
return []
return [float(x) for x in string.split(sep)]
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# File arguments.
parser.add_argument(
"--train_file_pattern",
help="GCS location to read training data.",
required=True
)
parser.add_argument(
"--eval_file_pattern",
help="GCS location to read evaluation data.",
required=True
)
parser.add_argument(
"--output_dir",
help="GCS location to write checkpoints and export models.",
required=True
)
parser.add_argument(
"--job-dir",
help="This model ignores this field, but it is required by gcloud.",
default="junk"
)
# Training parameters.
parser.add_argument(
"--train_batch_size",
help="Number of examples in training batch.",
type=int,
default=32
)
parser.add_argument(
"--train_steps",
help="Number of steps to train for.",
type=int,
default=100
)
parser.add_argument(
"--save_summary_steps",
help="How many steps to train before saving a summary.",
type=int,
default=100
)
parser.add_argument(
"--save_checkpoints_steps",
help="How many steps to train before saving a checkpoint.",
type=int,
default=100
)
parser.add_argument(
"--keep_checkpoint_max",
help="Max number of checkpoints to keep.",
type=int,
default=100
)
parser.add_argument(
"--input_fn_autotune",
help="Whether to autotune input function performance.",
type=str,
default="True"
)
# Eval parameters.
parser.add_argument(
"--eval_batch_size",
help="Number of examples in evaluation batch.",
type=int,
default=32
)
parser.add_argument(
"--eval_steps",
help="Number of steps to evaluate for.",
type=str,
default="None"
)
parser.add_argument(
"--start_delay_secs",
help="Number of seconds to wait before first evaluation.",
type=int,
default=60
)
parser.add_argument(
"--throttle_secs",
help="Number of seconds to wait between evaluations.",
type=int,
default=120
)
# Image parameters.
parser.add_argument(
"--height",
help="Height of image.",
type=int,
default=32
)
parser.add_argument(
"--width",
help="Width of image.",
type=int,
default=32
)
parser.add_argument(
"--depth",
help="Depth of image.",
type=int,
default=3
)
# Generator parameters.
parser.add_argument(
"--latent_size",
help="The latent size of the noise vector.",
type=int,
default=3
)
parser.add_argument(
"--generator_projection_dims",
help="The 3D dimensions to project latent noise vector into.",
type=str,
default="8,8,256"
)
parser.add_argument(
"--generator_num_filters",
help="Number of filters for generator conv layers.",
type=str,
default="128, 64"
)
parser.add_argument(
"--generator_kernel_sizes",
help="Kernel sizes for generator conv layers.",
type=str,
default="5,5"
)
parser.add_argument(
"--generator_strides",
help="Strides for generator conv layers.",
type=str,
default="1,2"
)
parser.add_argument(
"--generator_final_num_filters",
help="Number of filters for final generator conv layer.",
type=int,
default=3
)
parser.add_argument(
"--generator_final_kernel_size",
help="Kernel sizes for final generator conv layer.",
type=int,
default=5
)
parser.add_argument(
"--generator_final_stride",
help="Strides for final generator conv layer.",
type=int,
default=2
)
parser.add_argument(
"--generator_leaky_relu_alpha",
help="The amount of leakyness of generator's leaky relus.",
type=float,
default=0.2
)
parser.add_argument(
"--generator_final_activation",
help="The final activation function of generator.",
type=str,
default="None"
)
parser.add_argument(
"--generator_l1_regularization_scale",
help="Scale factor for L1 regularization for generator.",
type=float,
default=0.0
)
parser.add_argument(
"--generator_l2_regularization_scale",
help="Scale factor for L2 regularization for generator.",
type=float,
default=0.0
)
parser.add_argument(
"--generator_optimizer",
help="Name of optimizer to use for generator.",
type=str,
default="Adam"
)
parser.add_argument(
"--generator_learning_rate",
help="How quickly we train our model by scaling the gradient for generator.",
type=float,
default=0.1
)
parser.add_argument(
"--generator_adam_beta1",
help="Adam optimizer's beta1 hyperparameter for first moment.",
type=float,
default=0.9
)
parser.add_argument(
"--generator_adam_beta2",
help="Adam optimizer's beta2 hyperparameter for second moment.",
type=float,
default=0.999
)
parser.add_argument(
"--generator_adam_epsilon",
help="Adam optimizer's epsilon hyperparameter for numerical stability.",
type=float,
default=1e-8
)
parser.add_argument(
"--generator_rmsprop_decay",
help="RMSProp optimizer's decay hyperparameter for discounting factor for the history/coming gradient.",
type=float,
default=0.9
)
parser.add_argument(
"--generator_rmsprop_momentum",
help="RMSProp optimizer's momentum hyperparameter for first moment.",
type=float,
default=0.999
)
parser.add_argument(
"--generator_rmsprop_epsilon",
help="RMSProp optimizer's epsilon hyperparameter for numerical stability.",
type=float,
default=1e-8
)
parser.add_argument(
"--generator_clip_gradients",
help="Global clipping to prevent gradient norm to exceed this value for generator.",
type=str,
default="None"
)
parser.add_argument(
"--generator_clip_weights",
help="Clip weights within this range for generator.",
type=str,
default="None"
)
parser.add_argument(
"--generator_train_steps",
help="Number of steps to train generator for per cycle.",
type=int,
default=100
)
# Critic parameters.
parser.add_argument(
"--critic_num_filters",
help="Number of filters for critic conv layers.",
type=str,
default="64, 128"
)
parser.add_argument(
"--critic_kernel_sizes",
help="Kernel sizes for critic conv layers.",
type=str,
default="5,5"
)
parser.add_argument(
"--critic_strides",
help="Strides for critic conv layers.",
type=str,
default="1,2"
)
parser.add_argument(
"--critic_dropout_rates",
help="Dropout rates for critic dropout layers.",
type=str,
default="0.3,0.3"
)
parser.add_argument(
"--critic_leaky_relu_alpha",
help="The amount of leakyness of critic's leaky relus.",
type=float,
default=0.2
)
parser.add_argument(
"--critic_l1_regularization_scale",
help="Scale factor for L1 regularization for critic.",
type=float,
default=0.0
)
parser.add_argument(
"--critic_l2_regularization_scale",
help="Scale factor for L2 regularization for critic.",
type=float,
default=0.0
)
parser.add_argument(
"--critic_optimizer",
help="Name of optimizer to use for critic.",
type=str,
default="Adam"
)
parser.add_argument(
"--critic_learning_rate",
help="How quickly we train our model by scaling the gradient for critic.",
type=float,
default=0.1
)
parser.add_argument(
"--critic_adam_beta1",
help="Adam optimizer's beta1 hyperparameter for first moment.",
type=float,
default=0.9
)
parser.add_argument(
"--critic_adam_beta2",
help="Adam optimizer's beta2 hyperparameter for second moment.",
type=float,
default=0.999
)
parser.add_argument(
"--critic_adam_epsilon",
help="Adam optimizer's epsilon hyperparameter for numerical stability.",
type=float,
default=1e-8
)
parser.add_argument(
"--critic_rmsprop_decay",
help="RMSProp optimizer's decay hyperparameter for discounting factor for the history/coming gradient.",
type=float,
default=0.9
)
parser.add_argument(
"--critic_rmsprop_momentum",
help="RMSProp optimizer's momentum hyperparameter for first moment.",
type=float,
default=0.999
)
parser.add_argument(
"--critic_rmsprop_epsilon",
help="RMSProp optimizer's epsilon hyperparameter for numerical stability.",
type=float,
default=1e-8
)
parser.add_argument(
"--critic_clip_gradients",
help="Global clipping to prevent gradient norm to exceed this value for critic.",
type=str,
default="None"
)
parser.add_argument(
"--critic_clip_weights",
help="Clip weights within this range for critic.",
type=str,
default="None"
)
parser.add_argument(
"--critic_train_steps",
help="Number of steps to train critic for per cycle.",
type=int,
default=100
)
# Parse all arguments.
args = parser.parse_args()
arguments = args.__dict__
# Unused args provided by service.
arguments.pop("job_dir", None)
arguments.pop("job-dir", None)
# Fix input_fn_autotune.
arguments["input_fn_autotune"] = convert_string_to_bool(
string=arguments["input_fn_autotune"]
)
# Fix eval steps.
arguments["eval_steps"] = convert_string_to_none_or_int(
string=arguments["eval_steps"])
# Fix generator_projection_dims.
arguments["generator_projection_dims"] = convert_string_to_list_of_ints(
string=arguments["generator_projection_dims"], sep=","
)
# Fix num_filters.
arguments["generator_num_filters"] = convert_string_to_list_of_ints(
string=arguments["generator_num_filters"], sep=","
)
arguments["critic_num_filters"] = convert_string_to_list_of_ints(
string=arguments["critic_num_filters"], sep=","
)
# Fix kernel_sizes.
arguments["generator_kernel_sizes"] = convert_string_to_list_of_ints(
string=arguments["generator_kernel_sizes"], sep=","
)
arguments["critic_kernel_sizes"] = convert_string_to_list_of_ints(
string=arguments["critic_kernel_sizes"], sep=","
)
# Fix strides.
arguments["generator_strides"] = convert_string_to_list_of_ints(
string=arguments["generator_strides"], sep=","
)
arguments["critic_strides"] = convert_string_to_list_of_ints(
string=arguments["critic_strides"], sep=","
)
# Fix critic_dropout_rates.
arguments["critic_dropout_rates"] = convert_string_to_list_of_floats(
string=arguments["critic_dropout_rates"], sep=","
)
# Fix clip_gradients.
arguments["generator_clip_gradients"] = convert_string_to_none_or_float(
string=arguments["generator_clip_gradients"]
)
arguments["critic_clip_gradients"] = convert_string_to_none_or_float(
string=arguments["critic_clip_gradients"]
)
# Fix clip_weights.
arguments["generator_clip_weights"] = convert_string_to_list_of_floats(
string=arguments["generator_clip_weights"], sep=","
)
arguments["critic_clip_weights"] = convert_string_to_list_of_floats(
string=arguments["critic_clip_weights"], sep=","
)
# Append trial_id to path if we are doing hptuning.
# This code can be removed if you are not using hyperparameter tuning.
arguments["output_dir"] = os.path.join(
arguments["output_dir"],
json.loads(
os.environ.get(
"TF_CONFIG", "{}"
)
).get("task", {}).get("trial", ""))
# Run the training job.
model.train_and_evaluate(arguments)
###Output
_____no_output_____ |
posts/developing-a-hierarchical-bayesian-linear-regression-model.ipynb | ###Markdown
In an earlier [post](), I explained how to apply a Bayesian linear regression model to retrievI use the historically accurate dataset behind the development of NASA OBPG's chlorophyll algorithms.
###Code
import pandas as pd
import matplotlib.pyplot as pl
from sklearn.linear_model import LinearRegression
import re
import os
import numpy as np
import seaborn as sb
from mpl_toolkits.basemap import Basemap
import pymc3 as pm
import warnings
from cmocean import cm
warnings.filterwarnings('ignore')
% matplotlib inline
def ParseTextFile(textFileHandle, topickle=False, convert2DateTime=False, **kwargs):
"""
* topickle: pickle resulting DataFrame if True
* convert2DateTime: join date/time columns and convert entries to datetime objects
* kwargs:
pkl_fname: pickle file name to save DataFrame by, if topickle=True
"""
# Pre-compute some regex
columns = re.compile('^/fields=(.+)') # to get field/column names
units = re.compile('^/units=(.+)') # to get units -- optional
endHeader = re.compile('^/end_header') # to know when to start storing data
# Set some milestones
noFields = True
getData = False
# loop through the text data
for line in textFileHandle:
if noFields:
fieldStr = columns.findall(line)
if len(fieldStr)>0:
noFields = False
fieldList = fieldStr[0].split(',')
dataDict = dict.fromkeys(fieldList)
continue # nothing left to do with this line, keep looping
if not getData:
if endHeader.match(line):
# end of header reached, start acquiring data
getData = True
else:
dataList = line.split(',')
for field,datum in zip(fieldList, dataList):
if not dataDict[field]:
dataDict[field] = []
dataDict[field].append(datum)
df = pd.DataFrame(dataDict, columns=fieldList)
if convert2DateTime:
datetimelabels=['year', 'month', 'day', 'hour', 'minute', 'second']
df['Datetime']= pd.to_datetime(df[datetimelabels],
format='%Y-%m-%dT%H:%M:%S')
df.drop(datetimelabels, axis=1, inplace=True)
if topickle:
fname=kwargs.pop('pkl_fname', 'dfNomad2.pkl')
df.to_pickle(fname)
return df
def FindNaNs(df):
for col in df.columns:
sn = np.where(df[col].values=='NaN', True, False).sum()
s9 = np.where('-999' in df[col].values, True, False).sum()
print("%s: %d NaNs & %d -999s" % (col, sn, s9))
def FitPoly(X,y, order=4, lin=False):
"""
Numpy regression. Returns coeffs.
kwargs:
lin: specifies whether data is log transformed. Data is log transformed if not."""
if lin:
X = np.log10(X)
y = np.log10(y)
coeffs = np.polyfit(X,y,deg=order)
return coeffs
with open('/accounts/ekarakoy/DATA/ocprep_v4_iop.txt') as fdata:
df = ParseTextFile(fdata, topickle=True, convert2DateTime=True,
pkl_fname=os.path.join(savDir, 'JeremyOCx_data'))
df.info() # skipping output which shows a lot of unnecessary features for this exercise
###Output
_____no_output_____
###Markdown
Select features I want for this modeling bit.
###Code
basicCols = ['cruise', 'lat', 'lon', 'type', 'chl', 'Datetime']
IwantCols = basicCols + [col for col in df.columns if 'rrs' in col]
dfRrs = df[IwantCols]
swflbls = ['rrs411','rrs443','rrs489','rrs510','rrs555','rrs670']
swfCols = basicCols + swflbls
dfSwf = dfRrs[swfCols]
savDir = '/accounts/ekarakoy/DEV-ALL/BLOGS/DataScienceCorner/posts/bayesianChl_stuff/'
df.to_pickle(os.path.join(savDir, 'dfOcPrepHistoric.pkl'))
dfRrs.to_pickle(os.path.join(savDir, 'dfOcPrepRrs.pkl'))
del df, dfRrs
dfSwf.info() # skipping the output which shows that most columns are object type...
FindNaNs(dfSwf)
dfSwf.replace(to_replace='NaN',value=np.NaN,inplace=True)
dfSwf.dropna(inplace=True)
numCols = ['chl','lat','lon','rrs411','rrs443','rrs489','rrs510','rrs555','rrs670']
dfSwf[numCols] = dfSwf[numCols].apply(pd.to_numeric)
dfSwf.info()
dfSwf['maxBlue'] = dfSwf[['rrs443', 'rrs489', 'rrs510']].max(axis=1)
dfSwf['OCxRatio'] = dfSwf.maxBlue/dfSwf.rrs555
dfLogOCx = pd.DataFrame(columns = ['OCxRatio','chl','type','cruise'])
dfLogOCx.OCxRatio = np.log10(dfSwf.OCxRatio)
dfLogOCx.chl = np.log10(dfSwf.chl)
dfLogOCx[['type','cruise']] = dfSwf[['type','cruise']]
dfSwf.to_pickle(os.path.join(savDir, 'dfSwf'))
dfLogOCx.to_pickle(os.path.join(savDir, 'dfLogOCx'))
sb.set(font_scale=1.5)
g = sb.PairGrid(dfLogOCx, hue='type', vars=['chl','OCxRatio'], size=5,
palette=sb.color_palette("cubehelix",2))
g = g.map_upper(pl.scatter, alpha=0.5, edgecolor='k',linewidth=2)
g = g.map_diag(sb.kdeplot, lw=3)
g = g.map_lower(sb.kdeplot,cmap="Reds_d")
g.add_legend();
f,ax2 = pl.subplots(ncols=2, figsize=(14,6))
sb.violinplot(x='OCxRatio',y='type',data=dfLogOCx, hue='type', ax=ax2[0])
sb.violinplot(x='chl', y='type', data=dfLogOCx, hue='type', ax=ax2[1]);
ax2[0].legend().set_visible(False)
ax2[1].legend().set_visible(False)
dfSwf.type.unique()
###Output
_____no_output_____
###Markdown
Pooled bayesian model:
###Code
logChlObs = dfLogOCx.chl.values
logOCxRatio = dfLogOCx.OCxRatio.values
OC4v6_coeffs = {'a0': 0.3272, 'a1': -2.9940, 'a2': 2.7218, 'a3': -1.2259, 'a4': -0.5683}
with pm.Model() as pooled_model:
a0 = pm.Normal('a0', mu=OC4v6_coeffs['a0'], sd=10)
a1 = pm.Normal('a1', mu=OC4v6_coeffs['a1'], sd=10)
a2 = pm.Normal('a2', mu=OC4v6_coeffs['a2'], sd=10)
a3 = pm.Normal('a3', mu=OC4v6_coeffs['a3'], sd=10)
a4 = pm.Normal('a4', mu=OC4v6_coeffs['a4'], sd=10)
epsilon = pm.Uniform('epsilon', lower=0, upper=10)
mu = a0 + a1 * logOCxRatio + a2 * logOCxRatio**2 + a3 *\
logOCxRatio**3 + a4 * logOCxRatio**4
logChlPred = pm.Normal('chlPred', mu=mu, sd=epsilon, observed=logChlObs)
start = pm.find_MAP()
step = pm.NUTS(scaling=start)
traceOCx_pooled = pm.sample(10000, step=step, start=start)
chainOCx_pooled = traceOCx_pooled[1000:]
varnames=['a%d' %d for d in range(5)] + ['epsilon']
#refvals = [chainOCx_pooles['a%d'] % d for d in arange(5)]
#refval = {'a%d' % d: rv for d,rv in zip(range(5), chainOCx_pooled['a%d'] )}
pm.traceplot(chainOCx_pooled,varnames=varnames, grid=True);
cfs = FitPoly(logOCxRatio,logChlObs)
{'a%d' %d:rv for d,rv in zip(range(5),cfs[::-1])}
OC4v6_coeffs
refvals = [chainOCx_pooled['a%d'% d].mean() for d in range(5)]
# bayes means with OC4_v6 mean normal priors
refvals
# bayes means with 0-mean normal priors
refvals
###Output
_____no_output_____ |
How to BRUTE-FORCE a Hash function.ipynb | ###Markdown
How to BRUTE-FORCE a Hash function *Md. Abrar Jahin* 2nd year, Khulna University of Engineering and TechnologyTo be a good Hash Function H(x), where y is the Hash value:1. H must be efficient to compute2. H must be deterministic3. y must be random looking4. H must be resistant to forgery * It should be very time consuming to find collisions * y should depend in every bit of the origin Using the standard library hashlib module I computed the MD5, SHA1 and SHA256 (that's SHA2 with a hash size of n=256 ) of the string "Hello, world!"
###Code
import hashlib
md=hashlib.md5()
md.update(b"Hello, world!")
sha1=hashlib.sha1()
sha1.update(b"Hello, world!")
sha2= hashlib.sha256()
sha2.update(b"Hello, world!")
print(md.hexdigest())
print(sha1.hexdigest())
print(sha2.hexdigest())
###Output
6cd3556deb0da54bca060b4c39479839
943a702d06f34599aee1f8da8ef9f7296031d699
315f5bdb76d078c43b8ac0064e4a0164612b1fce77c869345bfc94c75894edd3
###Markdown
I implemented a hash function `simple_hash` that given a string `s`, computes its hash as follows: it starts with r = 7, and for every character in the string, multiplies r by 31, adds that character to r, and keeps everything modulo 216 .
###Code
def simple_hash(s):
r = 7
for c in s:
r = (r * 31 + ord(c)) % 2**16
return r
###Output
_____no_output_____
###Markdown
I'll now Brute-force the hash function that I've just written in the above cell!I've implemented a function `crack` that given a string s, loops until it finds a different string that collides with it, and returns the different string.
###Code
import random
import string
def get_random_string(length):
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(length))
def crack(s):
hash1=simple_hash(s)
for i in range(10*2**16):
s2 = get_random_string(4) # log(2^16)/log(26) ~ 4
if simple_hash(s2)==hash1:
break
# print(i)
return s2 # return s2 such that s != s2 and simple_hash(s) == simple_hash(s2)
print(crack('hello'))
###Output
myph
###Markdown
The function `weak_md5` is a "weaker" version of MD5, using only the first 5 bytes of the MD5 hash. This means its hashing size is n=40 and it can be brute forced rather easily.I implemented a function `find_collisions` that loops over all the possible strings until it finds an arbitrary collision - that is, two different strings whose hash is the same - and returns them (as a tuple).
###Code
import hashlib
import itertools
from itertools import product
import string
def weak_md5(s):
return hashlib.md5(s).digest()[:5]
def find_collisions():
chars = string.ascii_letters + '1234567890'
d = {}
for i in range(40):
generator = itertools.product(chars, repeat = i)
for password in generator:
password = ''.join(password)
h1 = weak_md5(password.encode('utf-8'))
if h1 not in d:
d[h1] = password
else:
return (password, d[h1])
# return (s1, s2) such that s1 != s2 and weak_md5(s1) == weak_md5(s2)
###Output
_____no_output_____
###Markdown
To see how hard it is to brute force a real hash function, I tried running the function that I wrote in the previous cell, but using the full MD5.
###Code
import hashlib
def md5(s):
return hashlib.md5(s).digest()
def find_collisions():
chars = string.ascii_letters + '1234567890'
d = {}
for i in range(40):
generator = itertools.product(chars, repeat = i)
for password in generator:
password = ''.join(password)
h1 = weak_md5(password.encode('utf-8'))
if h1 not in d:
d[h1] = password
else:
return (password, d[h1])
###Output
_____no_output_____ |
Notebooks/Distribution of predictions.ipynb | ###Markdown
Distribution of predictionsRight now, sums are generated by randomly sampling `n_terms` numbers in the range \[0, `n_digits`\]. the problem with this is that sums summing "around the middle" occur most often. For example, if `n_digits=2` and `n_terms=2`, then sums are from 0+0 to 99+99, giving a range of 0 to 198. Thus sums summing to the midpoint of 99 occur the most often, and very few training examples are generated for sums summing to the lower or higher end. So the point of this notebook is to write functions that can generate a uniform distribution of sums with respect to the sum.
###Code
import numpy as np
from matplotlib import pyplot as plt
import random
###Output
_____no_output_____
###Markdown
Baseline
###Code
def generate_sample(n_terms, n_digits):
x = [np.random.randint(10 ** n_digits - 1) for _ in range(n_terms)]
y = np.sum(x)
return x, y
sums = []
x_s = []
for _ in range(10**5):
x, y = generate_sample(3, 2)
x_s.extend(x)
sums.append(y)
plt.figure(figsize=(12, 8))
plt.hist(sums, bins=100);
plt.figure(figsize=(12, 8))
plt.hist(x_s, bins=100);
###Output
_____no_output_____
###Markdown
Uniform sampling
###Code
def generate_uniform_sample(n_terms, n_digits, y):
x = []
while len(x) < n_terms - 1:
y_upper_bound = y - np.sum(x)
n_digits_upper_bound = 10 ** n_digits - 1
upper_bound = min([y_upper_bound, n_digits_upper_bound])
if upper_bound > 0:
x.append(np.random.randint(upper_bound+1))
else:
x.append(0)
x.append(y - np.sum(x))
random.shuffle(x)
return x, y
def uniform_samples(n_terms, n_digits):
max_sum = (10**n_digits - 1) * n_terms
possible_sums = range(max_sum + 1)
sums = []
x_s = []
for _ in range(10**5):
x, y = generate_uniform_sample(n_terms, n_digits, np.random.choice(possible_sums))
sums.append(y)
x_s.extend(x)
return x_s, sums
x_s, sums = uniform_samples(n_terms=2, n_digits=2)
plt.figure(figsize=(12, 8))
plt.hist(sums, bins=100);
plt.figure(figsize=(12, 8))
plt.hist(x_s, bins=100);
###Output
_____no_output_____ |
notebooks/OrphanedBlocks.ipynb | ###Markdown
Orphaned Blocks AnalyzerChart the distribution of orphaned blocks, show top winners and losers.
###Code
import glob
import json
from pandas import DataFrame
from pandas import json_normalize
import pandas
import requests
# Load blocks from disk into dataframe
def load_blocks_from_disk(path_to_blocks="./archive-blocks/"):
block_files = glob.glob(path_to_blocks + "*.json")
blocks = []
for file in block_files:
with open(file) as fp:
blocks.append(json.load(fp))
return blocks
blocks_query = '''
query BlocksQuery {
blocks(limit: 4000) {
protocolState {
consensusState {
slot
blockHeight
blockchainLength
}
}
canonical
creator
stateHash
receivedTime
dateTime
}
}
'''
def load_blocks_from_block_explorer(url="https://graphql.minaexplorer.com/", limit=100):
r = requests.post(url, json={'query': blocks_query})
payload = json.loads(r.text)
blocks = payload["data"]["blocks"]
cleaned = []
for block in blocks:
cleaned.append({
"slot": block["protocolState"]["consensusState"]["slot"],
"blockHeight": block["protocolState"]["consensusState"]["blockHeight"],
"canonical": block["canonical"],
"creator": block["creator"],
"stateHash": block["stateHash"],
"receivedTime": block["receivedTime"],
"dateTime": block["dateTime"],
})
return cleaned
blocks = load_blocks_from_block_explorer()
print(len(blocks))
df = DataFrame(blocks)
display(df)
vc = df["slot"].value_counts().reset_index(name="count")
pandas.set_option('display.max_rows', 500)
pandas.set_option('display.max_columns', 500)
pandas.set_option('display.width', 1000)
vc
fullSlots = df.slot.unique()
handicap = 1000
nFullSlots = len(df.slot.unique())
max_slot = 4324
# max_slot - (count of unique slots) = nEmptySlots
emptySlots = max_slot - nFullSlots - handicap
ratioEmpty = emptySlots/(max_slot-handicap)
print(f"Total Slots: {max_slot}")
print(f"Slot Handicap: {handicap}")
print(f"Filled Slots: {nFullSlots}")
print(f"Empty Slots: {emptySlots}")
print(f"Ratio Empty: {ratioEmpty}")
import plotly.express as px
fig = px.bar(vc, x="index", y="count")
fig.show()
###Output
_____no_output_____ |
Pretrain/pretrain.ipynb | ###Markdown
Creation of the environment
###Code
%tensorflow_version 2.x
!pip3 install --upgrade pip
#!pip install -qU t5
!pip3 install git+https://github.com/google-research/text-to-text-transfer-transformer.git #extra_id_x support
import functools
import os
import time
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
import t5
#Set the base dir(Google cloud bucket)
BASE_DIR = "gs://bucket_code_completion"
if not BASE_DIR or BASE_DIR == "gs://":
raise ValueError("You must enter a BASE_DIR.")
ON_CLOUD = True
if ON_CLOUD:
import tensorflow_gcs_config
from google.colab import auth
# Set credentials for GCS reading/writing from Colab and TPU.
TPU_TOPOLOGY = "2x2"
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver() # TPU detection
TPU_ADDRESS = tpu.get_master()
print('Running on TPU:', TPU_ADDRESS)
except ValueError:
raise BaseException('ERROR: Not connected to a TPU runtime; please see the previous cell in this notebook for instructions!')
auth.authenticate_user()
tf.config.experimental_connect_to_host(TPU_ADDRESS)
tensorflow_gcs_config.configure_gcs_from_colab_auth()
tf.disable_v2_behavior()
# Improve logging.
from contextlib import contextmanager
import logging as py_logging
if ON_CLOUD:
tf.get_logger().propagate = False
py_logging.root.setLevel('INFO')
@contextmanager
def tf_verbosity_level(level):
og_level = tf.logging.get_verbosity()
tf.logging.set_verbosity(level)
yield
tf.logging.set_verbosity(og_level)
###Output
Collecting pip
[?25l Downloading https://files.pythonhosted.org/packages/de/47/58b9f3e6f611dfd17fb8bd9ed3e6f93b7ee662fb85bdfee3565e8979ddf7/pip-21.0-py3-none-any.whl (1.5MB)
[K |████████████████████████████████| 1.5MB 6.9MB/s
[?25hInstalling collected packages: pip
Found existing installation: pip 19.3.1
Uninstalling pip-19.3.1:
Successfully uninstalled pip-19.3.1
Successfully installed pip-21.0
Collecting git+https://github.com/google-research/text-to-text-transfer-transformer.git
Cloning https://github.com/google-research/text-to-text-transfer-transformer.git to /tmp/pip-req-build-paaxxk36
Running command git clone -q https://github.com/google-research/text-to-text-transfer-transformer.git /tmp/pip-req-build-paaxxk36
Requirement already satisfied: absl-py in /usr/local/lib/python3.6/dist-packages (from t5==0.8.1) (0.10.0)
Requirement already satisfied: babel in /usr/local/lib/python3.6/dist-packages (from t5==0.8.1) (2.9.0)
Requirement already satisfied: gin-config in /usr/local/lib/python3.6/dist-packages (from t5==0.8.1) (0.4.0)
Collecting mesh-tensorflow[transformer]>=0.1.13
Downloading mesh_tensorflow-0.1.18-py3-none-any.whl (361 kB)
[K |████████████████████████████████| 361 kB 6.7 MB/s
[?25hRequirement already satisfied: nltk in /usr/local/lib/python3.6/dist-packages (from t5==0.8.1) (3.2.5)
Requirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from t5==0.8.1) (1.19.5)
Requirement already satisfied: pandas in /usr/local/lib/python3.6/dist-packages (from t5==0.8.1) (1.1.5)
Collecting rouge-score
Downloading rouge_score-0.0.4-py2.py3-none-any.whl (22 kB)
Collecting sacrebleu
Downloading sacrebleu-1.5.0-py3-none-any.whl (65 kB)
[K |████████████████████████████████| 65 kB 3.7 MB/s
[?25hRequirement already satisfied: scikit-learn in /usr/local/lib/python3.6/dist-packages (from t5==0.8.1) (0.22.2.post1)
Requirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from t5==0.8.1) (1.4.1)
Collecting sentencepiece
Downloading sentencepiece-0.1.95-cp36-cp36m-manylinux2014_x86_64.whl (1.2 MB)
[K |████████████████████████████████| 1.2 MB 18.2 MB/s
[?25hRequirement already satisfied: six>=1.14 in /usr/local/lib/python3.6/dist-packages (from t5==0.8.1) (1.15.0)
Collecting tensorflow-text
Downloading tensorflow_text-2.4.3-cp36-cp36m-manylinux1_x86_64.whl (3.4 MB)
[K |████████████████████████████████| 3.4 MB 28.1 MB/s
[?25hCollecting tfds-nightly
Downloading tfds_nightly-4.2.0.dev202101280107-py3-none-any.whl (3.8 MB)
[K |████████████████████████████████| 3.8 MB 74.2 MB/s
[?25hRequirement already satisfied: torch in /usr/local/lib/python3.6/dist-packages (from t5==0.8.1) (1.7.0+cu101)
Collecting transformers>=2.7.0
Downloading transformers-4.2.2-py3-none-any.whl (1.8 MB)
[K |████████████████████████████████| 1.8 MB 90.4 MB/s
[?25hRequirement already satisfied: future in /usr/local/lib/python3.6/dist-packages (from mesh-tensorflow[transformer]>=0.1.13->t5==0.8.1) (0.16.0)
Requirement already satisfied: tensorflow-datasets in /usr/local/lib/python3.6/dist-packages (from mesh-tensorflow[transformer]>=0.1.13->t5==0.8.1) (4.0.1)
Requirement already satisfied: tqdm>=4.27 in /usr/local/lib/python3.6/dist-packages (from transformers>=2.7.0->t5==0.8.1) (4.41.1)
Requirement already satisfied: dataclasses in /usr/local/lib/python3.6/dist-packages (from transformers>=2.7.0->t5==0.8.1) (0.8)
Requirement already satisfied: importlib-metadata in /usr/local/lib/python3.6/dist-packages (from transformers>=2.7.0->t5==0.8.1) (3.4.0)
Requirement already satisfied: filelock in /usr/local/lib/python3.6/dist-packages (from transformers>=2.7.0->t5==0.8.1) (3.0.12)
Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.6/dist-packages (from transformers>=2.7.0->t5==0.8.1) (2019.12.20)
Requirement already satisfied: packaging in /usr/local/lib/python3.6/dist-packages (from transformers>=2.7.0->t5==0.8.1) (20.8)
Collecting sacremoses
Downloading sacremoses-0.0.43.tar.gz (883 kB)
[K |████████████████████████████████| 883 kB 77.6 MB/s
[?25hRequirement already satisfied: requests in /usr/local/lib/python3.6/dist-packages (from transformers>=2.7.0->t5==0.8.1) (2.23.0)
Collecting tokenizers==0.9.4
Downloading tokenizers-0.9.4-cp36-cp36m-manylinux2010_x86_64.whl (2.9 MB)
[K |████████████████████████████████| 2.9 MB 86.0 MB/s
[?25hRequirement already satisfied: pytz>=2015.7 in /usr/local/lib/python3.6/dist-packages (from babel->t5==0.8.1) (2018.9)
Requirement already satisfied: typing-extensions>=3.6.4 in /usr/local/lib/python3.6/dist-packages (from importlib-metadata->transformers>=2.7.0->t5==0.8.1) (3.7.4.3)
Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.6/dist-packages (from importlib-metadata->transformers>=2.7.0->t5==0.8.1) (3.4.0)
Requirement already satisfied: pyparsing>=2.0.2 in /usr/local/lib/python3.6/dist-packages (from packaging->transformers>=2.7.0->t5==0.8.1) (2.4.7)
Requirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.6/dist-packages (from pandas->t5==0.8.1) (2.8.1)
Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests->transformers>=2.7.0->t5==0.8.1) (2.10)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests->transformers>=2.7.0->t5==0.8.1) (2020.12.5)
Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests->transformers>=2.7.0->t5==0.8.1) (3.0.4)
Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests->transformers>=2.7.0->t5==0.8.1) (1.24.3)
Collecting portalocker
Downloading portalocker-2.1.0-py2.py3-none-any.whl (13 kB)
Requirement already satisfied: click in /usr/local/lib/python3.6/dist-packages (from sacremoses->transformers>=2.7.0->t5==0.8.1) (7.1.2)
Requirement already satisfied: joblib in /usr/local/lib/python3.6/dist-packages (from sacremoses->transformers>=2.7.0->t5==0.8.1) (1.0.0)
Requirement already satisfied: tensorflow-metadata in /usr/local/lib/python3.6/dist-packages (from tensorflow-datasets->mesh-tensorflow[transformer]>=0.1.13->t5==0.8.1) (0.27.0)
Requirement already satisfied: promise in /usr/local/lib/python3.6/dist-packages (from tensorflow-datasets->mesh-tensorflow[transformer]>=0.1.13->t5==0.8.1) (2.3)
Requirement already satisfied: attrs>=18.1.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow-datasets->mesh-tensorflow[transformer]>=0.1.13->t5==0.8.1) (20.3.0)
Requirement already satisfied: importlib-resources in /usr/local/lib/python3.6/dist-packages (from tensorflow-datasets->mesh-tensorflow[transformer]>=0.1.13->t5==0.8.1) (5.1.0)
Requirement already satisfied: protobuf>=3.6.1 in /usr/local/lib/python3.6/dist-packages (from tensorflow-datasets->mesh-tensorflow[transformer]>=0.1.13->t5==0.8.1) (3.12.4)
Requirement already satisfied: dill in /usr/local/lib/python3.6/dist-packages (from tensorflow-datasets->mesh-tensorflow[transformer]>=0.1.13->t5==0.8.1) (0.3.3)
Requirement already satisfied: termcolor in /usr/local/lib/python3.6/dist-packages (from tensorflow-datasets->mesh-tensorflow[transformer]>=0.1.13->t5==0.8.1) (1.1.0)
Requirement already satisfied: dm-tree in /usr/local/lib/python3.6/dist-packages (from tensorflow-datasets->mesh-tensorflow[transformer]>=0.1.13->t5==0.8.1) (0.1.5)
Requirement already satisfied: setuptools in /usr/local/lib/python3.6/dist-packages (from protobuf>=3.6.1->tensorflow-datasets->mesh-tensorflow[transformer]>=0.1.13->t5==0.8.1) (51.3.3)
Requirement already satisfied: googleapis-common-protos<2,>=1.52.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow-metadata->tensorflow-datasets->mesh-tensorflow[transformer]>=0.1.13->t5==0.8.1) (1.52.0)
Requirement already satisfied: tensorflow-hub>=0.8.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow-text->t5==0.8.1) (0.11.0)
Requirement already satisfied: tensorflow<2.5,>=2.4.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow-text->t5==0.8.1) (2.4.1)
Requirement already satisfied: tensorboard~=2.4 in /usr/local/lib/python3.6/dist-packages (from tensorflow<2.5,>=2.4.0->tensorflow-text->t5==0.8.1) (2.4.1)
Requirement already satisfied: grpcio~=1.32.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow<2.5,>=2.4.0->tensorflow-text->t5==0.8.1) (1.32.0)
Requirement already satisfied: wheel~=0.35 in /usr/local/lib/python3.6/dist-packages (from tensorflow<2.5,>=2.4.0->tensorflow-text->t5==0.8.1) (0.36.2)
Requirement already satisfied: flatbuffers~=1.12.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow<2.5,>=2.4.0->tensorflow-text->t5==0.8.1) (1.12)
Requirement already satisfied: keras-preprocessing~=1.1.2 in /usr/local/lib/python3.6/dist-packages (from tensorflow<2.5,>=2.4.0->tensorflow-text->t5==0.8.1) (1.1.2)
Requirement already satisfied: opt-einsum~=3.3.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow<2.5,>=2.4.0->tensorflow-text->t5==0.8.1) (3.3.0)
Requirement already satisfied: tensorflow-estimator<2.5.0,>=2.4.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow<2.5,>=2.4.0->tensorflow-text->t5==0.8.1) (2.4.0)
Requirement already satisfied: h5py~=2.10.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow<2.5,>=2.4.0->tensorflow-text->t5==0.8.1) (2.10.0)
Requirement already satisfied: astunparse~=1.6.3 in /usr/local/lib/python3.6/dist-packages (from tensorflow<2.5,>=2.4.0->tensorflow-text->t5==0.8.1) (1.6.3)
Requirement already satisfied: google-pasta~=0.2 in /usr/local/lib/python3.6/dist-packages (from tensorflow<2.5,>=2.4.0->tensorflow-text->t5==0.8.1) (0.2.0)
Requirement already satisfied: gast==0.3.3 in /usr/local/lib/python3.6/dist-packages (from tensorflow<2.5,>=2.4.0->tensorflow-text->t5==0.8.1) (0.3.3)
Requirement already satisfied: wrapt~=1.12.1 in /usr/local/lib/python3.6/dist-packages (from tensorflow<2.5,>=2.4.0->tensorflow-text->t5==0.8.1) (1.12.1)
Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.6/dist-packages (from tensorboard~=2.4->tensorflow<2.5,>=2.4.0->tensorflow-text->t5==0.8.1) (3.3.3)
Requirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.6/dist-packages (from tensorboard~=2.4->tensorflow<2.5,>=2.4.0->tensorflow-text->t5==0.8.1) (1.0.1)
Requirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in /usr/local/lib/python3.6/dist-packages (from tensorboard~=2.4->tensorflow<2.5,>=2.4.0->tensorflow-text->t5==0.8.1) (0.4.2)
Requirement already satisfied: google-auth<2,>=1.6.3 in /usr/local/lib/python3.6/dist-packages (from tensorboard~=2.4->tensorflow<2.5,>=2.4.0->tensorflow-text->t5==0.8.1) (1.17.2)
Requirement already satisfied: tensorboard-plugin-wit>=1.6.0 in /usr/local/lib/python3.6/dist-packages (from tensorboard~=2.4->tensorflow<2.5,>=2.4.0->tensorflow-text->t5==0.8.1) (1.8.0)
Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.6/dist-packages (from google-auth<2,>=1.6.3->tensorboard~=2.4->tensorflow<2.5,>=2.4.0->tensorflow-text->t5==0.8.1) (4.7)
Requirement already satisfied: cachetools<5.0,>=2.0.0 in /usr/local/lib/python3.6/dist-packages (from google-auth<2,>=1.6.3->tensorboard~=2.4->tensorflow<2.5,>=2.4.0->tensorflow-text->t5==0.8.1) (4.2.1)
Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.6/dist-packages (from google-auth<2,>=1.6.3->tensorboard~=2.4->tensorflow<2.5,>=2.4.0->tensorflow-text->t5==0.8.1) (0.2.8)
Requirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.6/dist-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard~=2.4->tensorflow<2.5,>=2.4.0->tensorflow-text->t5==0.8.1) (1.3.0)
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /usr/local/lib/python3.6/dist-packages (from pyasn1-modules>=0.2.1->google-auth<2,>=1.6.3->tensorboard~=2.4->tensorflow<2.5,>=2.4.0->tensorflow-text->t5==0.8.1) (0.4.8)
Requirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.6/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard~=2.4->tensorflow<2.5,>=2.4.0->tensorflow-text->t5==0.8.1) (3.1.0)
Building wheels for collected packages: t5, sacremoses
Building wheel for t5 (setup.py) ... [?25l[?25hdone
Created wheel for t5: filename=t5-0.8.1-py3-none-any.whl size=219997 sha256=a2d8b5da8014968b2541565069260658b0545c00e8e4fb9fd48b026ec9c30c80
Stored in directory: /tmp/pip-ephem-wheel-cache-0_isxo0a/wheels/aa/e1/a1/847d16e451940b1fe89940aa88875c96ae2f7cc63e509e9226
Building wheel for sacremoses (setup.py) ... [?25l[?25hdone
Created wheel for sacremoses: filename=sacremoses-0.0.43-py3-none-any.whl size=893258 sha256=1e7ab957dc7fc3f191b19d8fae8dfa26f0d8b1d070a14fb46fa7d405ac07cc74
Stored in directory: /root/.cache/pip/wheels/49/25/98/cdea9c79b2d9a22ccc59540b1784b67f06b633378e97f58da2
Successfully built t5 sacremoses
Installing collected packages: tokenizers, sacremoses, portalocker, mesh-tensorflow, transformers, tfds-nightly, tensorflow-text, sentencepiece, sacrebleu, rouge-score, t5
Successfully installed mesh-tensorflow-0.1.18 portalocker-2.1.0 rouge-score-0.0.4 sacrebleu-1.5.0 sacremoses-0.0.43 sentencepiece-0.1.95 t5-0.8.1 tensorflow-text-2.4.3 tfds-nightly-4.2.0.dev202101280107 tokenizers-0.9.4 transformers-4.2.2
Running on TPU: grpc://10.108.201.82:8470
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/compat/v2_compat.py:96: disable_resource_variables (from tensorflow.python.ops.variable_scope) is deprecated and will be removed in a future version.
Instructions for updating:
non-resource variables are not supported in the long term
###Markdown
Path to csv fileThis variable contains the path to the tsv file for training loaded on the bucket. Please be sure to insert the correct path
###Code
nq_tsv_path = {
"train":'gs://bucket_code_completion/T5_extension/data/code.tsv',
"validation":'gs://bucket_code_completion/T5_extension/data/code.tsv',
}
###Output
_____no_output_____
###Markdown
Preprocess of the datasetIn this step we preprocess the dataset. You have to change the path to vocab files (*vocab_model_path* and *vocab_path*)
###Code
from t5.data import postprocessors as t5_postprocessors
from t5.seqio import Feature,SentencePieceVocabulary
# # Set the path of sentencepiece model and vocab files
vocab_model_path = 'gs://bucket_code_completion/T5_extension/code.model'
vocab_path = 'gs://bucket_code_completion/T5_extension/code.vocab'
TaskRegistry = t5.data.TaskRegistry
TfdsTask = t5.data.TfdsTask
def get_default_vocabulary():
return SentencePieceVocabulary(vocab_model_path, 100)
DEFAULT_OUTPUT_FEATURES = {
"inputs": Feature(
vocabulary=get_default_vocabulary(), add_eos=True, required=False),
"targets": Feature(
vocabulary=get_default_vocabulary(), add_eos=True)
}
def nq_dataset_fn(split, shuffle_files=True):
# We only have one file for each split.
del shuffle_files
# Load lines from the text file as examples.
ds = tf.data.TextLineDataset(nq_tsv_path[split])
ds = ds.map(
functools.partial(tf.io.decode_csv, record_defaults=["string","string"],
field_delim="\t", use_quote_delim=False),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds = ds.map(lambda *ex: dict(zip(["input", "output"], ex)))
return ds
print("A few raw train examples...")
for ex in tfds.as_numpy(nq_dataset_fn("train").take(5)):
print(ex)
def preprocessing(ds):
def to_inputs_and_targets(ex):
inputs = tf.strings.join([ ex['input']], separator=' ')
class_label = tf.strings.join([ex['output']], separator=' ')
return {'inputs': inputs, 'targets': class_label }
return ds.map(to_inputs_and_targets, num_parallel_calls=tf.data.experimental.AUTOTUNE)
#Create a new training task
t5.data.TaskRegistry.remove('pretraining')
t5.data.TaskRegistry.add(
"pretraining",
dataset_fn=nq_dataset_fn,
splits=["train", "validation"],
text_preprocessor=[preprocessing],
output_features = DEFAULT_OUTPUT_FEATURES,
metric_fns=[t5.evaluation.metrics.accuracy],
)
nq_task = t5.data.TaskRegistry.get("pretraining")
ds = nq_task.get_dataset(split="train", sequence_length={"inputs": 256, "targets": 256})
print("A few preprocessed training examples...")
for ex in tfds.as_numpy(ds.take(5)):
print(ex)
###Output
/usr/local/lib/python3.6/dist-packages/t5/seqio/preprocessors.py:65: UserWarning: Creating resources inside a function passed to Dataset.map() is not supported. Create each resource outside the function, and capture it inside the function to use it.
_tokenize, num_parallel_calls=tf.data.experimental.AUTOTUNE)
###Markdown
Pretraining of the modelYou can pretrain the model running the following two cells. Please set the correct path of the variable *MODEL_DIR* (the path to save the pretrained model in) and *PATH_GIN_FILE* (the gin file configuration for the pre-training)
###Code
from mesh_tensorflow.transformer.learning_rate_schedules import learning_rate_schedule_noam
#See https://github.com/google-research/text-to-text-transfer-transformer if you want to scale up the model
MODEL_SIZE = "small"
MODEL_DIR = 'gs://bucket_code_completion/T5_extension/pretrained_with_masking'
model_parallelism, train_batch_size, keep_checkpoint_max = {
"small": (1, 256, 16),
"base": (2, 128, 8),
"large": (8, 64, 4),
"3B": (8, 16, 1),
"11B": (8, 16, 1)}[MODEL_SIZE]
tf.io.gfile.makedirs(MODEL_DIR)
model = t5.models.MtfModel(
model_dir=MODEL_DIR,
tpu=TPU_ADDRESS,
tpu_topology=TPU_TOPOLOGY,
model_parallelism=model_parallelism,
batch_size=train_batch_size,
sequence_length={"inputs": 256, "targets": 256},
learning_rate_schedule = learning_rate_schedule_noam,
save_checkpoints_steps=5000,
keep_checkpoint_max=keep_checkpoint_max if ON_CLOUD else None
)
PATH_GIN_FILE = 'gs://bucket_code_completion/T5_extension/pretrain_config/operative_config.gin'
import gin
with gin.unlock_config():
gin.parse_config_file(PATH_GIN_FILE)
TRAIN_STEPS = 200000
model.train("pretraining", steps=TRAIN_STEPS)
###Output
_____no_output_____ |
st_dfb_tests_8s_hmdd.ipynb | ###Markdown
###Code
#@title # Clone the repository and upgrade Keras {display-mode: "form"}
!git clone https://github.com/iamsoroush/DeepEEGAbstractor.git
!pip install --upgrade keras
!rm -r DeepEEGAbstractor
#@title # Imports {display-mode: "form"}
import os
import pickle
import sys
sys.path.append('DeepEEGAbstractor')
import numpy as np
from src.helpers import CrossValidator
from src.models import DeepEEGAbstractor
from src.dataset import DataLoader, Splitter, FixedLenGenerator
from google.colab import drive
drive.mount('/content/gdrive')
#@title # Set data path {display-mode: "form"}
#@markdown ---
#@markdown Type in the folder in your google drive that contains numpy _data_ folder:
parent_dir = 'soroush'#@param {type:"string"}
gdrive_path = os.path.abspath(os.path.join('gdrive/My Drive', parent_dir))
data_dir = os.path.join(gdrive_path, 'data')
cv_results_dir = os.path.join(gdrive_path, 'cross_validation')
if not os.path.exists(cv_results_dir):
os.mkdir(cv_results_dir)
print('Data directory: ', data_dir)
print('Cross validation results dir: ', cv_results_dir)
#@title ## Set Parameters
batch_size = 80
epochs = 100
k = 10
t = 10
instance_duration = 8
instance_overlap = 2
sampling_rate = 256
n_channels = 20
task = 'hmdd'
data_mode = 'cross_subject'
#@title ## DeepEEGAbstractor -Default params
model_name = 'Deep-EEG-Abstractor'
train_generator = FixedLenGenerator(batch_size=batch_size,
duration=instance_duration,
overlap=instance_overlap,
sampling_rate=sampling_rate,
is_train=True)
test_generator = FixedLenGenerator(batch_size=8,
duration=instance_duration,
overlap=instance_overlap,
sampling_rate=sampling_rate,
is_train=False)
params = {'task': task,
'data_mode': data_mode,
'main_res_dir': cv_results_dir,
'model_name': model_name,
'epochs': epochs,
'train_generator': train_generator,
'test_generator': test_generator,
't': t,
'k': k,
'channel_drop': True}
validator = CrossValidator(**params)
dataloader = DataLoader(data_dir,
task,
data_mode,
sampling_rate,
instance_duration,
instance_overlap)
data, labels = dataloader.load_data()
input_shape = (sampling_rate * instance_duration,
n_channels)
model_obj = DeepEEGAbstractor(input_shape,
model_name=model_name)
scores = validator.do_cv(model_obj,
data,
labels)
#@title ## DeepEEGAbstractor - Without WN
model_name = 'Deep-EEG-Abstractor-NoWN'
train_generator = FixedLenGenerator(batch_size=batch_size,
duration=instance_duration,
overlap=instance_overlap,
sampling_rate=sampling_rate,
is_train=True)
test_generator = FixedLenGenerator(batch_size=8,
duration=instance_duration,
overlap=instance_overlap,
sampling_rate=sampling_rate,
is_train=False)
params = {'task': task,
'data_mode': data_mode,
'main_res_dir': cv_results_dir,
'model_name': model_name,
'epochs': epochs,
'train_generator': train_generator,
'test_generator': test_generator,
't': t,
'k': k,
'channel_drop': True}
validator = CrossValidator(**params)
dataloader = DataLoader(data_dir,
task,
data_mode,
sampling_rate,
instance_duration,
instance_overlap)
data, labels = dataloader.load_data()
input_shape = (sampling_rate * instance_duration,
n_channels)
model_obj = DeepEEGAbstractor(input_shape,
model_name=model_name,
weight_norm=False)
scores = validator.do_cv(model_obj,
data,
labels)
#@title ## DeepEEGAbstractor - BatchNormalization
model_name = 'Deep-EEG-Abstractor-BN'
train_generator = FixedLenGenerator(batch_size=batch_size,
duration=instance_duration,
overlap=instance_overlap,
sampling_rate=sampling_rate,
is_train=True)
test_generator = FixedLenGenerator(batch_size=8,
duration=instance_duration,
overlap=instance_overlap,
sampling_rate=sampling_rate,
is_train=False)
params = {'task': task,
'data_mode': data_mode,
'main_res_dir': cv_results_dir,
'model_name': model_name,
'epochs': epochs,
'train_generator': train_generator,
'test_generator': test_generator,
't': t,
'k': k,
'channel_drop': True}
validator = CrossValidator(**params)
dataloader = DataLoader(data_dir,
task,
data_mode,
sampling_rate,
instance_duration,
instance_overlap)
data, labels = dataloader.load_data()
input_shape = (sampling_rate * instance_duration,
n_channels)
model_obj = DeepEEGAbstractor(input_shape,
model_name=model_name,
normalization='batch')
scores = validator.do_cv(model_obj,
data,
labels)
#@title ## DeepEEGAbstractor - InstanceNormalization
model_name = 'Deep-EEG-Abstractor-IN'
train_generator = FixedLenGenerator(batch_size=batch_size,
duration=instance_duration,
overlap=instance_overlap,
sampling_rate=sampling_rate,
is_train=True)
test_generator = FixedLenGenerator(batch_size=8,
duration=instance_duration,
overlap=instance_overlap,
sampling_rate=sampling_rate,
is_train=False)
params = {'task': task,
'data_mode': data_mode,
'main_res_dir': cv_results_dir,
'model_name': model_name,
'epochs': epochs,
'train_generator': train_generator,
'test_generator': test_generator,
't': t,
'k': k,
'channel_drop': True}
validator = CrossValidator(**params)
dataloader = DataLoader(data_dir,
task,
data_mode,
sampling_rate,
instance_duration,
instance_overlap)
data, labels = dataloader.load_data()
input_shape = (sampling_rate * instance_duration,
n_channels)
model_obj = DeepEEGAbstractor(input_shape,
model_name=model_name,
normalization='instance')
scores = validator.do_cv(model_obj,
data,
labels)
#@title ## DeepEEGAbstractor - Deeper
model_name = 'Deep-EEG-Abstractor-Deeper'
train_generator = FixedLenGenerator(batch_size=batch_size,
duration=instance_duration,
overlap=instance_overlap,
sampling_rate=sampling_rate,
is_train=True)
test_generator = FixedLenGenerator(batch_size=8,
duration=instance_duration,
overlap=instance_overlap,
sampling_rate=sampling_rate,
is_train=False)
params = {'task': task,
'data_mode': data_mode,
'main_res_dir': cv_results_dir,
'model_name': model_name,
'epochs': epochs,
'train_generator': train_generator,
'test_generator': test_generator,
't': t,
'k': k,
'channel_drop': True}
validator = CrossValidator(**params)
dataloader = DataLoader(data_dir,
task,
data_mode,
sampling_rate,
instance_duration,
instance_overlap)
data, labels = dataloader.load_data()
input_shape = (sampling_rate * instance_duration,
n_channels)
model_obj = DeepEEGAbstractor(input_shape,
model_name=model_name,
n_kernels=(6, 6, 6, 4, 4))
scores = validator.do_cv(model_obj,
data,
labels)
#@title ## DeepEEGAbstractor - Wider
model_name = 'Deep-EEG-Abstractor-Wider'
train_generator = FixedLenGenerator(batch_size=batch_size,
duration=instance_duration,
overlap=instance_overlap,
sampling_rate=sampling_rate,
is_train=True)
test_generator = FixedLenGenerator(batch_size=8,
duration=instance_duration,
overlap=instance_overlap,
sampling_rate=sampling_rate,
is_train=False)
params = {'task': task,
'data_mode': data_mode,
'main_res_dir': cv_results_dir,
'model_name': model_name,
'epochs': epochs,
'train_generator': train_generator,
'test_generator': test_generator,
't': t,
'k': k,
'channel_drop': True}
validator = CrossValidator(**params)
dataloader = DataLoader(data_dir,
task,
data_mode,
sampling_rate,
instance_duration,
instance_overlap)
data, labels = dataloader.load_data()
input_shape = (sampling_rate * instance_duration,
n_channels)
model_obj = DeepEEGAbstractor(input_shape,
model_name=model_name,
n_kernels=(6, 6, 8, 10))
scores = validator.do_cv(model_obj,
data,
labels)
#@title ## DeepEEGAbstractor - Attv1
model_name = 'Deep-EEG-Abstractor-Attv1'
train_generator = FixedLenGenerator(batch_size=batch_size,
duration=instance_duration,
overlap=instance_overlap,
sampling_rate=sampling_rate,
is_train=True)
test_generator = FixedLenGenerator(batch_size=8,
duration=instance_duration,
overlap=instance_overlap,
sampling_rate=sampling_rate,
is_train=False)
params = {'task': task,
'data_mode': data_mode,
'main_res_dir': cv_results_dir,
'model_name': model_name,
'epochs': epochs,
'train_generator': train_generator,
'test_generator': test_generator,
't': t,
'k': k,
'channel_drop': True}
validator = CrossValidator(**params)
dataloader = DataLoader(data_dir,
task,
data_mode,
sampling_rate,
instance_duration,
instance_overlap)
data, labels = dataloader.load_data()
input_shape = (sampling_rate * instance_duration,
n_channels)
model_obj = DeepEEGAbstractor(input_shape,
model_name=model_name,
attention='v1')
scores = validator.do_cv(model_obj,
data,
labels)
#@title ## DeepEEGAbstractor - Attv2
model_name = 'Deep-EEG-Abstractor-Attv2'
train_generator = FixedLenGenerator(batch_size=batch_size,
duration=instance_duration,
overlap=instance_overlap,
sampling_rate=sampling_rate,
is_train=True)
test_generator = FixedLenGenerator(batch_size=8,
duration=instance_duration,
overlap=instance_overlap,
sampling_rate=sampling_rate,
is_train=False)
params = {'task': task,
'data_mode': data_mode,
'main_res_dir': cv_results_dir,
'model_name': model_name,
'epochs': epochs,
'train_generator': train_generator,
'test_generator': test_generator,
't': t,
'k': k,
'channel_drop': True}
validator = CrossValidator(**params)
dataloader = DataLoader(data_dir,
task,
data_mode,
sampling_rate,
instance_duration,
instance_overlap)
data, labels = dataloader.load_data()
input_shape = (sampling_rate * instance_duration,
n_channels)
model_obj = DeepEEGAbstractor(input_shape,
model_name=model_name,
attention='v2')
scores = validator.do_cv(model_obj,
data,
labels)
#@title ## DeepEEGAbstractor - Attv3
model_name = 'Deep-EEG-Abstractor-Attv3'
train_generator = FixedLenGenerator(batch_size=batch_size,
duration=instance_duration,
overlap=instance_overlap,
sampling_rate=sampling_rate,
is_train=True)
test_generator = FixedLenGenerator(batch_size=8,
duration=instance_duration,
overlap=instance_overlap,
sampling_rate=sampling_rate,
is_train=False)
params = {'task': task,
'data_mode': data_mode,
'main_res_dir': cv_results_dir,
'model_name': model_name,
'epochs': epochs,
'train_generator': train_generator,
'test_generator': test_generator,
't': t,
'k': k,
'channel_drop': True}
validator = CrossValidator(**params)
dataloader = DataLoader(data_dir,
task,
data_mode,
sampling_rate,
instance_duration,
instance_overlap)
data, labels = dataloader.load_data()
input_shape = (sampling_rate * instance_duration,
n_channels)
model_obj = DeepEEGAbstractor(input_shape,
model_name=model_name,
attention='v3')
scores = validator.do_cv(model_obj,
data,
labels)
#@title ## DeepEEGAbstractor - HDropout
model_name = 'Deep-EEG-Abstractor-HDropout'
train_generator = FixedLenGenerator(batch_size=batch_size,
duration=instance_duration,
overlap=instance_overlap,
sampling_rate=sampling_rate,
is_train=True)
test_generator = FixedLenGenerator(batch_size=8,
duration=instance_duration,
overlap=instance_overlap,
sampling_rate=sampling_rate,
is_train=False)
params = {'task': task,
'data_mode': data_mode,
'main_res_dir': cv_results_dir,
'model_name': model_name,
'epochs': epochs,
'train_generator': train_generator,
'test_generator': test_generator,
't': t,
'k': k,
'channel_drop': True}
validator = CrossValidator(**params)
dataloader = DataLoader(data_dir,
task,
data_mode,
sampling_rate,
instance_duration,
instance_overlap)
data, labels = dataloader.load_data()
input_shape = (sampling_rate * instance_duration,
n_channels)
model_obj = DeepEEGAbstractor(input_shape,
model_name=model_name,
spatial_dropout_rate=0.2,
dropout_rate=0.5)
scores = validator.do_cv(model_obj,
data,
labels)
#@title ## DeepEEGAbstractor - InputDropout
model_name = 'Deep-EEG-Abstractor-InputDropout'
train_generator = FixedLenGenerator(batch_size=batch_size,
duration=instance_duration,
overlap=instance_overlap,
sampling_rate=sampling_rate,
is_train=True)
test_generator = FixedLenGenerator(batch_size=8,
duration=instance_duration,
overlap=instance_overlap,
sampling_rate=sampling_rate,
is_train=False)
params = {'task': task,
'data_mode': data_mode,
'main_res_dir': cv_results_dir,
'model_name': model_name,
'epochs': epochs,
'train_generator': train_generator,
'test_generator': test_generator,
't': t,
'k': k,
'channel_drop': True}
validator = CrossValidator(**params)
dataloader = DataLoader(data_dir,
task,
data_mode,
sampling_rate,
instance_duration,
instance_overlap)
data, labels = dataloader.load_data()
input_shape = (sampling_rate * instance_duration,
n_channels)
model_obj = DeepEEGAbstractor(input_shape,
model_name=model_name,
input_dropout=True)
scores = validator.do_cv(model_obj,
data,
labels)
###Output
_____no_output_____ |
kaala-mark2.ipynb | ###Markdown
Features/Attributes1. Rainfall2. Temperature3. Vegetation4. Potential evapotranspiration5. Length of growing period as a function of rainfall.6. Soil storage7. Soil scape8. Soil type 9. Current season 10. Companion crops 11. Time for plant to growSource: [How to determine the kinds of crops suitable to different types of soil? - ResearchGate](https://www.researchgate.net/post/How_to_determine_the_kinds_of_crops_suitable_to_different_types_of_soil) Classes/Labels/CropsCEREALS1. Rice 2. Jowar (Cholam) 3. Bajra (Cumbu) 4. Ragi PULSES9. Bengalgram 10. Redgram Source: [Season and Crop Report of Tamil Nadu](http://www.tn.gov.in/crop/AreaProduction.htm)which gives us 6 classes.
###Code
import numpy as np
import pandas as pd
from sklearn.datasets import make_classification
X, y = make_classification(n_samples=(24*60*60*7), n_features=11, n_classes=6,n_informative=5, random_state=42)
pd.Series(y).value_counts()
X.shape
df = pd.DataFrame(X)
df['class'] = y
df.head()
df.shape
df.to_csv('kaala-init.csv', header=None, index=False)
###Output
_____no_output_____
###Markdown
Building the model.
###Code
# helper tools
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
###Output
_____no_output_____
###Markdown
Applying PCA
###Code
from sklearn.decomposition import PCA
pca = PCA(n_components=5)
pca.fit(X)
X_dash = pca.transform(X)
X_train, X_test, y_train, y_test = train_test_split(X_dash, y, test_size=0.2, random_state=69)
from sklearn.neighbors import KNeighborsClassifier
model = KNeighborsClassifier(n_neighbors = 9)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print (accuracy_score(y_test, y_pred))
seed = np.random.randint(0, 1000)
seed
X_test[seed]
print(model.predict_proba(X_test[seed].reshape(1, -1)))
from sklearn.neighbors import KNeighborsClassifier
model = KNeighborsClassifier(n_neighbors = 30)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print (accuracy_score(y_test, y_pred))
print(model.predict_proba(X_test[seed].reshape(1, -1)))
###Output
[[ 0. 0.96666667 0. 0. 0. 0.03333333]]
###Markdown
Now testing with random sample from the dataframe
###Code
df.iloc[[seed]] # selects random observation from the df
pca.transform(df.iloc[[seed], :-1]) # passing only the features of random observation to the PCA to reduce it to 5 componenets
print(model.predict_proba(pca.transform(df.iloc[[seed], :-1])))
###Output
[[ 0. 0.03333333 0. 0. 0.96666667 0. ]]
|
MeanVarianceCorrelation.ipynb | ###Markdown
Expectations of test functionsThe expected value of a function $\phi(X): \mathcal{X} \rightarrow \mathcal{R}$ is defined as$$E[\phi(X)] = \int \phi(X) p(X) dx$$* Data distribution: $p(X)$* Test function: $\phi(X)$Intuitively, this is the average value that the function $\phi$ take when given random inputs $X$ with a distribution of $p(X)$. Some test functions are special Mean$\phi(X) = X$$$E[X] = \int p(X) X dx = \int X \mu(dx)$$ Variance$\phi(X) = (X - E[X])^2$$$Var[X] = E[(X - E[X])^2] = \int p(X) (X - E[X])^2 dx$$ CovarianceData distribution: $p(X, Y)$$$\phi = (X-E[X])(Y - E[Y])$$$$Cov[X,Y] = E[(X-E[X])(Y - E[Y])]$$ Correlation Coefficient$$\rho(X,Y) = \frac{Cov[X,Y]}{\sqrt{Var[X]Var[Y]}}$$$$-1 \leq \rho\leq 1$$ Emprical distributionsSuppose we are given a dataset $X = \{x_1, x_2, \dots, x_N\}$$$\tilde{p}(x) = \frac{1}{N}\sum_{i=1}^N \delta(x - x_i)$$ Emprical bivariate distributionDataset of pairs $X = \{(x_1,y_1), (x_2,y_2), \dots, (x_N, y_N)\}$$$\tilde{p}(x, y) = \frac{1}{N}\sum_{i=1}^N \delta(x - x_i)\delta(y - y_i)$$ Sample average and sample varianceCompute expectations with respect to the emprical distribution$$E[x] = \int x \tilde{p}(x) dx = \int x \frac{1}{N}\sum_{i=1}^N \delta(x - x_i) dx = \frac{1}{N}\sum_{i=1}^N x_i \equiv s_1/N$$$$Var[x] = \int (x-E[x])^2 \tilde{p}(x) dx = E[x^2] - m^2 = \frac{1}{N}\sum_{i=1}^N x^2_i - \frac{1}{N^2}s_1^2 \equiv\frac{1}{N}s_2 - \frac{1}{N^2}s_1^2$$Here, $s_1 = \sum_{i=1}^N x_i$ and $s_2 = \sum_{i=1}^N x_i^2$ are known as the first and second (sample) moments, respectively. Generative models A generative model is a computational procedure with random inputs that describes how to simulate a dataset $X$. The model defines a joint distribution of the variables of the dataset and possibly additional hidden (unobserved) variables and parameters $H$ to aid the data generation mechanism, denoted as $p(X, H)$.A new terminology for a generative model is a _probabilistic program_.Given a generative model and a dataset, the posterior distribution over the hidden variables can be computed via Bayesian inference $P(H|X)$. The hidden variables and parameters provide explanations for the observed data. Generative Model Example\begin{eqnarray}w & \sim & \mathcal{U}(0,1) \\u & = & \cos(2\pi w) \end{eqnarray}
###Code
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
N = 50
u = np.cos(2*np.pi*np.random.rand(N))
plt.figure(figsize=(6,2))
plt.plot(u, np.zeros_like(u), 'o')
plt.show()
N = 500
u = np.cos(2*np.pi*np.random.rand(N))
plt.figure(figsize=(6,2))
plt.hist(u, bins=30)
plt.show()
###Output
_____no_output_____
###Markdown
Generative Model Example \begin{eqnarray}w & \sim & \mathcal{U}(0,1) \\u & = & \cos(2\pi w) \\e & \sim & \mathcal{N}\left(0, (\sigma u)^2 \left(\begin{array}{cc} 1 & 0\\ 0 & 1\\\end{array}\right) \right) \\x & \sim & \left(\begin{array}{c} \theta_1 \\ \theta_2 \end{array} \right)u + e\end{eqnarray}
###Code
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
N = 100
sigma = 0.8
theta = np.mat([3,-1]).T
u = np.cos(2*np.pi*np.random.rand(1,N))
X = theta*u
X = X + sigma*u*np.random.randn(X.shape[0],X.shape[1])
plt.figure(figsize=(6,6))
plt.plot(X[0,:],X[1,:],'k.')
plt.show()
import seaborn as sns
import pandas as pd
sns.set(color_codes=True)
plt.figure(figsize=(5,5))
df = pd.DataFrame(X.T, columns=['x','y'])
sns.jointplot(x="x", y="y", data=df);
plt.show()
###Output
_____no_output_____
###Markdown
Generative Model Example\begin{eqnarray}w & \sim & \mathcal{U}(w; 0,2\pi) \\\epsilon & \sim & \mathcal{N}(\epsilon; 0, I) \\u & = & \left(\begin{array}{c} \mu_1 \\ \mu_2 \end{array}\right) + \left(\begin{array}{cc} s_1 & 0 \\ 0& s_2 \end{array}\right) \left(\begin{array}{c} \cos(w) \\ \sin(w) \end{array}\right) + \left(\begin{array}{cc} \sigma_1 & 0 \\ 0& \sigma_2 \end{array}\right) \left(\begin{array}{c} \epsilon_1 \\ \epsilon_2 \end{array}\right)\end{eqnarray}
###Code
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
N = 100
sigma_1 = 0.1
sigma_2 = 0.0
mu_1 = 5
mu_2 = 5
s_1 = 1
s_2 = 3
w = 2*np.pi*np.random.rand(1,N)
u1 = mu_1 + s_1*np.cos(w) + sigma_1*np.random.randn(1,N)
u2 = mu_2 + s_2*np.sin(w) + sigma_2*np.random.randn(1,N)
plt.figure(figsize=(6,6))
plt.plot(u1, u2,'k.')
plt.axis('equal')
plt.show()
for i in range(N):
print('%3.3f %3.3f' % (u1[0,i],u2[0,i] ))
###Output
_____no_output_____
###Markdown
Generative Model Example\begin{eqnarray}w & \sim & \mathcal{U}(0,1) \\u & = & 2 w - 1 \\x|u & \sim & \mathcal{N}\left(x; u^2, r \right) \end{eqnarray}
###Code
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
N = 100
r = 0.01
u = 2*np.random.randn(1,N)-1
x = u**2 + np.sqrt(r)*np.random.randn(1,N)
plt.figure(figsize=(6,6))
plt.plot(u,x,'k.')
plt.xlabel('u')
plt.ylabel('x')
plt.show()
###Output
_____no_output_____
###Markdown
Generative Model Example (Principal Components Analysis)$h \in \mathbb{R}^{D_h}$, $x \in \mathbb{R}^{D_x}$, $A \in \mathbb{R}^{{D_x}\times {D_h}}$, $r\in \mathbb{R}^+$\begin{eqnarray}h & \sim & {\mathcal N}(h; 0, I) \\x|h & \sim & {\mathcal N}(x; A h, rI)\end{eqnarray}
###Code
%matplotlib inline
from IPython.display import display, Math, Latex
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from notes_utilities import pnorm_ball_points
from notes_utilities import mat2latex
import pandas as pd
import seaborn as sns
# Number of points
N = 30
# Parameters
A = np.mat('[3;-1]')
r = 0.1
Dh = 1
Dx = 2
h = np.random.randn(Dh, N)
y = A*h + np.sqrt(r)*np.random.randn(Dx, N)
#sns.jointplot(x=y[0,:], y=y[1,:]);
plt.figure(figsize=(5,5))
plt.scatter(y[0,:],y[1,:])
plt.xlabel('y_0')
plt.ylabel('y_1')
plt.show()
###Output
_____no_output_____
###Markdown
ExampleGenerate a data set as follows\begin{eqnarray}x & \sim & {\mathcal N}(x; 0, 1) \\y|x & \sim & {\mathcal N}(a x, R)\end{eqnarray}How is this model related to the PCA?
###Code
%matplotlib inline
from IPython.display import display, Math, Latex
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from notes_utilities import pnorm_ball_points
from notes_utilities import mat2latex
import pandas as pd
#import seaborn as sns
#sns.set(color_codes=True)
# Number of points
N = 10
# Parameters
a = -0.8
R = 0.1
x = np.random.randn(N)
y = a*x + np.sqrt(R)*np.random.randn(N)
sns.jointplot(x=x, y=y);
###Output
_____no_output_____
###Markdown
We can work out the joint distribution as:\begin{eqnarray}\left(\begin{array}{c} x \\ y \end{array}\right) \sim\mathcal{N}\left( \left(\begin{array}{c} 0 \\ 0 \end{array}\right) , \left(\begin{array}{cc} 1 & a\\ a & a^2 + R \end{array}\right)\right)\end{eqnarray}
###Code
%matplotlib inline
from IPython.display import display, Math, Latex
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from notes_utilities import pnorm_ball_points
from notes_utilities import mat2latex
import pandas as pd
#import seaborn as sns
#sns.set(color_codes=True)
# Number of points
N = 10
# Parameters
a = -0.8
R = 0.1
# Theoretical Covariance
Cov = np.mat([[1,a],[a, a**2+R]])
x = np.random.randn(N)
y = a*x + np.sqrt(R)*np.random.randn(N)
np.set_printoptions(precision=4)
X = np.c_[x,y].T
N = X.shape[1]
print('True Covariance')
display(Math(r'\mu='+mat2latex(np.mat('[0;0]'))))
display(Math(r'\Sigma='+mat2latex(Cov)))
print('The ML Estimates from Data')
mean_est = np.mean(X,axis=1,keepdims=True)
cov_est = np.cov(X,bias=True)
display(Math(r'\bar{m}='+mat2latex(mean_est)))
display(Math(r'\bar{S}='+mat2latex(cov_est)))
print('The estimate when we assume that we know the true mean')
cov2_est = X.dot(X.T)/N
display(Math(r'\bar{\Sigma}='+mat2latex(cov2_est)))
plt.figure(figsize=(8,8))
plt.plot(x, y, '.')
ax = plt.gca()
ax.axis('equal')
ax.set_xlabel('x')
ax.set_ylabel('y')
# True mean and Covariance
dx,dy = pnorm_ball_points(3*np.linalg.cholesky(Cov))
ln = plt.Line2D(dx,dy, color='r')
ln.set_label('True')
ax.add_line(ln)
ln = plt.Line2D([0],[0], color='r', marker='o')
ax.add_line(ln)
dx,dy = pnorm_ball_points(3*np.linalg.cholesky(Cov), mu=mean_est)
ln = plt.Line2D(dx,dy, color='b')
ln.set_label('ML Estimate')
ax.add_line(ln)
ln = plt.Line2D(mean_est[0],mean_est[1], color='b', marker='o')
ax.add_line(ln)
# Estimate conditioned on knowing the true mean
dx,dy = pnorm_ball_points(3*np.linalg.cholesky(cov2_est))
ln = plt.Line2D(dx,dy, color='g')
ln.set_label('Conditioned on true mean')
ax.add_line(ln)
ln = plt.Line2D([0],[0], color='g', marker='o')
ax.add_line(ln)
Lim = 6
ax.set_ylim([-Lim,Lim])
ax.set_xlim([-Lim,Lim])
ax.legend()
plt.title('Covariance Matrix Estimates')
plt.show()
###Output
True Covariance
###Markdown
Frequentist approach to statistics* Assume there is a true parameter that we don't know. For example the covariance $\Sigma$* Construct an estimator (=a function that spits out a parameter value given data)$$\bar{\Sigma} = X^\top X/N$$* (Conceptually) sample new random dataset from the same distribution for $i=1\dots K$$$X^{(i)} \sim p(X)$$* Study the distribution of the estimator -- the output of the estimator is random as input data is random$$\bar{\Sigma}^{(i)} = {X^{(i)}}^\top X^{(i)}/N$$
###Code
EPOCH = 20
fig = plt.figure(figsize=(6,6))
ax = fig.gca()
Lim = 6
ax.set_ylim([-Lim,Lim])
ax.set_xlim([-Lim,Lim])
for i in range(EPOCH):
x = np.random.randn(N)
y = a*x + np.sqrt(R)*np.random.randn(N)
X = np.c_[x,y].T
cov2_est = X.dot(X.T)/N
dx,dy = pnorm_ball_points(3*np.linalg.cholesky(cov2_est))
ln = plt.Line2D(dx,dy, color='g')
ax.add_line(ln)
dx,dy = pnorm_ball_points(3*np.linalg.cholesky(Cov))
ln = plt.Line2D(dx,dy, color='r', linewidth=3)
ax.add_line(ln)
plt.show()
###Output
_____no_output_____
###Markdown
Every green ellipse corresponds to an estimated covariance $\Sigma^{(i)}$ from each new dataset $X^{(i)}$ sampled from the data distribution. The picture suggests that the true covariance could be somehow obtained as the average ellipse.An estimator is called unbiased, if the true parameter is exactly the expected value of the estimator. Otherwise, the estimator is called biased.The variance of the estimator is the amount of fluctuation around the mean. Ideally, we wish it to be small, in fact zero. However, obtaining a zero variance turns out to be impossible when the bias is zero. The variance is always greater or equal to a positive quantity called the Cramer-Rao bound. BootstrapIn practice, we have only a single dataset, so we need to approximate the data distribution $p(X)$. The effect of sampling new datasets can be done by sampling data points with replacement. This procedure is known as the bootstrap.Below, we use a dataset of $M+N$
###Code
EPOCH = 100
M = N
x = np.random.randn(N+M)
y = a*x + np.sqrt(R)*np.random.randn(N+M)
fig = plt.figure(figsize=(6,6))
ax = fig.gca()
Lim = 6
ax.set_ylim([-Lim,Lim])
ax.set_xlim([-Lim,Lim])
for i in range(EPOCH):
idx = np.random.permutation(N+M)
X = np.c_[x[idx[0:N]],y[idx[0:N]]].T
cov2_est = X.dot(X.T)/N
dx,dy = pnorm_ball_points(3*np.linalg.cholesky(cov2_est))
ln = plt.Line2D(dx,dy, color='g')
ax.add_line(ln)
dx,dy = pnorm_ball_points(3*np.linalg.cholesky(Cov))
ln = plt.Line2D(dx,dy, color='r', linewidth=3)
ax.add_line(ln)
plt.show()
###Output
_____no_output_____
###Markdown
Bayesian approach to statistics- Assume there is only one dataset $X$ -- namely only the one that we have observed- Postulate a prior for the parameter $p(\Sigma)$- Compute the posterior $p(\Sigma|X)$
###Code
EPOCH = 20
fig = plt.figure(figsize=(6,6))
ax = fig.gca()
Lim = 6
ax.set_ylim([-Lim,Lim])
ax.set_xlim([-Lim,Lim])
x = np.random.randn(N)
y = a*x + np.sqrt(R)*np.random.randn(N)
X = np.c_[x,y].T
cov2_est = X.dot(X.T)/N
W = np.linalg.cholesky(cov2_est)
plt.plot(x,y,'.')
for i in range(EPOCH):
U = W.dot(np.random.randn(2,N))
S = U.dot(U.T)/N
dx,dy = pnorm_ball_points(3*np.linalg.cholesky(S))
ln = plt.Line2D(dx,dy, color='k')
ax.add_line(ln)
dx,dy = pnorm_ball_points(3*np.linalg.cholesky(Cov))
ln = plt.Line2D(dx,dy, color='r', linewidth=3)
ax.add_line(ln)
plt.show()
from notes_utilities import mat2latex
print(mat2latex(np.mat([[1,0],[0,1]])))
###Output
\left(\begin{array}{cc} 1 & 0\\ 0 & 1\\\end{array}\right)
|
personal/Lele/analysis notebooks/analysis_durations.ipynb | ###Markdown
first analysis if you think that there should be more things to analize deeper or if anything isn't clear just let me know. if you also found out something usefull that is not listed here, add it
###Code
import numpy as np
import os
import pandas as pd
from scipy.sparse import *
from tqdm import tqdm
pl = pd.read_csv("../../../dataset/playlists.csv", sep='\t')
pl.head()
pl2 = pl[['pid','num_tracks','duration_ms']]
pl_np = np.squeeze(pl2.as_matrix())
import plotly.plotly as py
import matplotlib.pyplot as plt
import seaborn as sns
# import matplotlib and allow it to plot inline
%matplotlib inline
# seaborn can generate several warnings, we ignore them
import warnings
warnings.filterwarnings("ignore")
sns.set(style="white", color_codes=True)
sns.set_context(rc={"font.family":'sans',"font.size":20,"axes.titlesize":4,"axes.labelsize":24})
num_playlists = [0] *251
duration_playlists = [None] *251
for i in range(251):
num_playlists[i] = len( pl2.loc[pl2['num_tracks'] == i])
duration_playlists[i] = pl2.loc[pl2['num_tracks'] == i]['duration_ms'].as_matrix().copy()
if num_playlists[i]!=len(duration_playlists[i]):
print("error")
duration_playlists
var1 = list()
mean1 = list()
std1 = list()
for i in range(len(num_playlists)):
var1.append( np.var(durate_playlists[i]/i) )
mean1.append( np.mean(durate_playlists[i]/i) )
std1.append( np.std(durate_playlists[i]/i) )
var2 = list()
mean2 = list()
std2 = list()
duration_in_minutes = durate_playlists.copy()
for i in range(len(num_playlists)):
duration_in_minutes[i] = durate_playlists[i]/1000/60/i
var2.append( np.var(duration_in_minutes[i]))
mean2.append(np.mean(duration_in_minutes[i]))
std2.append(np.std(duration_in_minutes[i]))
###Output
_____no_output_____
###Markdown
graphs of duration mean / variance / standard deviation
###Code
import matplotlib.pyplot as plt
plt.figure(dpi=130)
plt.plot(mean2)
plt.ylabel('mean dur in minutes')
plt.show()
import matplotlib.pyplot as plt
plt.figure(dpi=130)
plt.plot(var2)
plt.ylabel('var dur (mins)')
plt.show()
np.argmax(var1[5:251])
var1[211]
import matplotlib.pyplot as plt
plt.figure(dpi=130)
plt.plot(std2)
plt.ylabel('std')
plt.show()
###Output
_____no_output_____
###Markdown
seems like there are a lot of jazz lovers with 211 songs in their playlists. we might check if those are strange playlists. i tried a little but it seemed there isn't anything strange. check the playlists with 211 elements
###Code
durations_211 = sorted( np.array( pl2.loc[pl2['num_tracks'] == 211]['duration_ms']) /211/60/1000)
plt.hist(durations_211)
durations_50 = sorted( np.array( pl2.loc[pl2['num_tracks'] == 99]['duration_ms']) /211/60/1000)
plt.hist(durations_50)
pl3 = pl[['pid','num_tracks','duration_ms']]
pl3.head()
pl3.loc[pl3['num_tracks'] == 211].sort_values('duration_ms')
pid_d = pl3.loc[pl3['num_tracks'] == 211].duration_ms
pid = pl3.loc[pl3['num_tracks'] == 211].pid
pid_dur = pid_d.apply( lambda x : x/211/1000/60)
long_211_pls = pd.DataFrame([pid,pid_dur ] ).T.sort_values('duration_ms')
long_211_pls.head()
long_211_pls.describe()
###Output
_____no_output_____ |
notebooks/Ch07 - Text Document Categorization/20_newsgrp_cnn_model.ipynb | ###Markdown
Load Data Sets for 20 News Group
###Code
dataset = Loader.load_20newsgroup_data(subset='train')
corpus, labels = dataset.data, dataset.target
corpus, labels = remove_empty_docs(corpus, labels)
test_dataset = Loader.load_20newsgroup_data(subset='test')
test_corpus, test_labels = test_dataset.data, test_dataset.target
test_corpus, test_labels = remove_empty_docs(test_corpus, test_labels)
###Output
_____no_output_____
###Markdown
Mapping 20 Groups to 6 High level Categories
###Code
six_groups = {
'comp.graphics':0,'comp.os.ms-windows.misc':0,'comp.sys.ibm.pc.hardware':0,
'comp.sys.mac.hardware':0, 'comp.windows.x':0,
'rec.autos':1, 'rec.motorcycles':1, 'rec.sport.baseball':1, 'rec.sport.hockey':1,
'sci.crypt':2, 'sci.electronics':2,'sci.med':2, 'sci.space':2,
'misc.forsale':3,
'talk.politics.misc':4, 'talk.politics.guns':4, 'talk.politics.mideast':4,
'talk.religion.misc':5, 'alt.atheism':5, 'soc.religion.christian':5
}
map_20_2_6 = [six_groups[dataset.target_names[i]] for i in range(20)]
labels = [six_groups[dataset.target_names[i]] for i in labels]
test_labels = [six_groups[dataset.target_names[i]] for i in test_labels]
###Output
_____no_output_____
###Markdown
Pre-process Text to convert it to word index sequences
###Code
Preprocess.MIN_WD_COUNT=5
preprocessor = Preprocess(corpus=corpus)
corpus_to_seq = preprocessor.fit()
test_corpus_to_seq = preprocessor.transform(test_corpus)
###Output
_____no_output_____
###Markdown
Initialize Embeddings
###Code
glove=GloVe(50)
initial_embeddings = glove.get_embedding(preprocessor.word_index)
###Output
_____no_output_____
###Markdown
Build Model
###Code
newsgrp_model = DocumentModel(vocab_size=preprocessor.get_vocab_size(),
sent_k_maxpool = 5,
sent_filters = 20,
word_kernel_size = 5,
word_index = preprocessor.word_index,
num_sentences=Preprocess.NUM_SENTENCES,
embedding_weights=initial_embeddings,
conv_activation = 'relu',
train_embedding = True,
learn_word_conv = True,
learn_sent_conv = True,
sent_dropout = 0.4,
hidden_dims=64,
input_dropout=0.2,
hidden_gaussian_noise_sd=0.5,
final_layer_kernel_regularizer=0.1,
num_hidden_layers=2,
num_units_final_layer=6)
###Output
_____no_output_____
###Markdown
Save model parameters
###Code
train_params = TrainingParameters('6_newsgrp_largeclass',
model_file_path = config.MODEL_DIR+ '/20newsgroup/model_6_01.hdf5',
model_hyper_parameters = config.MODEL_DIR+ '/20newsgroup/model_6_01.json',
model_train_parameters = config.MODEL_DIR+ '/20newsgroup/model_6_01_meta.json',
num_epochs=20,
batch_size = 128,
validation_split=.10,
learning_rate=0.01)
train_params.save()
newsgrp_model._save_model(train_params.model_hyper_parameters)
###Output
_____no_output_____
###Markdown
Compile and run model
###Code
newsgrp_model._model.compile(loss="categorical_crossentropy",
optimizer=train_params.optimizer,
metrics=["accuracy"])
checkpointer = ModelCheckpoint(filepath=train_params.model_file_path,
verbose=1,
save_best_only=True,
save_weights_only=True)
early_stop = EarlyStopping(patience=2)
x_train = np.array(corpus_to_seq)
y_train = to_categorical(np.array(labels))
x_test = np.array(test_corpus_to_seq)
y_test = to_categorical(np.array(test_labels))
#Set LR
K.set_value(newsgrp_model.get_classification_model().optimizer.lr, train_params.learning_rate)
newsgrp_model.get_classification_model().fit(x_train, y_train,
batch_size=train_params.batch_size,
epochs=train_params.num_epochs,
verbose=2,
validation_split=train_params.validation_split,
callbacks=[checkpointer,early_stop])
newsgrp_model.get_classification_model().evaluate( x_test, y_test, verbose=2)
preds = newsgrp_model.get_classification_model().predict(x_test)
preds_test = np.argmax(preds, axis=1)
###Output
_____no_output_____
###Markdown
Evaluate Model Accuracy
###Code
from sklearn.metrics import classification_report,accuracy_score,confusion_matrix
print(classification_report(test_labels, preds_test))
print(confusion_matrix(test_labels, preds_test))
print(accuracy_score(test_labels, preds_test))
###Output
_____no_output_____
###Markdown
Visualization: Document Embeddings with tsne - what the model learned
###Code
from utils import scatter_plot
doc_embeddings = newsgrp_model.get_document_model().predict(x_test)
print(doc_embeddings.shape)
doc_proj = TSNE(n_components=2, random_state=42, ).fit_transform(doc_embeddings)
f, ax, sc, txts = scatter_plot(doc_proj, np.array(test_labels))
f.savefig('nws_grp_embd.png')
###Output
_____no_output_____ |
Drafts/Schedule.ipynb | ###Markdown
(As of September 3rd, this is just for organization and not at all complete or accurate.) Week 0Thursday:* Basic navigation in the Jupyter notebook. Code cells vs markdown cells.* Importing external libraries.* Some basic data structures: range, lists, tuples, sets, numpy arrays. What are some similarities and differences?* Two basic NumPy commands: arange and linspace.* Timing in Jupyter.Friday:* Practice reading error messages and documentation.* for loops and if statements. Importance of indentation.* Iterators, Iterable via error messages* NumPy arrays* Slicing and indexing Week 1Topics:* more data types: int, str, float, Boolean* Introduce iterable, hashable, mutable, immutable via error messages and documentation.* Documentation (the difference between extend and append, sorted, range, np.zeros, np.empty, difference between keyword arguments and positional arguments.)* while loops* Dictionaries* Counters, dictionaries* list comprehension* Writing a function: square root, prime numbers, modular arithmetic* Introduction to Jupyter Notebooks, lists and things similar to lists.* Introduction to Jupyter Notebooks/Anaconda/Spyder/Terminal* Comparison to Matlab and Mathematica* for loops/while loops/if statements* Lists* Prime numbers* Reading documentation: sorted, range, np.zeros, np.empty, difference between keyword arguments and positional arguments.* Every built-in function in Python* Loading external libraries* Introduction to NumPy* Dictionaries* Counting in Python* Probability simulation* Image processing with Pillow and NumPy Question 3:Complete the following code so that the function `my_positive_root(x)` returns a value of y such that $|y - \sqrt[3]{x}| \leq .001.$ You are not allowed to import any libraries. You are only allowed to use integer exponents, like `a**3`, not `a**(1/3)`. You should assume $x > 0$.def my_positive_root(x): a = 0 while ???: a = a + .001 return ???Question 4:Write a new function, `my_cube_root(x)`, that also works for negative values of x. Use an `if` statement and your function from up above. You should literally be typing `my_positive_root`; do not copy and paste your code.
###Code
def replace_elts(A):
m,n = A.shape
for i in range(m):
for j in range(n):
if A[i,j] < 10:
A[i,j] = -2
return A
###Output
_____no_output_____ |
06.time-series-anomaly-detection-ecg.ipynb | ###Markdown
Time Series Anomaly Detection using LSTM Autoencoders with PyTorch in Python
###Code
!nvidia-smi
!pip install -qq arff2pandas
!pip install -q -U watermark
!pip install -qq -U pandas
%reload_ext watermark
%watermark -v -p numpy,pandas,torch,arff2pandas
import torch
import copy
import numpy as np
import pandas as pd
import seaborn as sns
from pylab import rcParams
import matplotlib.pyplot as plt
from matplotlib import rc
from sklearn.model_selection import train_test_split
from torch import nn, optim
import torch.nn.functional as F
from arff2pandas import a2p
%matplotlib inline
%config InlineBackend.figure_format='retina'
sns.set(style='whitegrid', palette='muted', font_scale=1.2)
HAPPY_COLORS_PALETTE = ["#01BEFE", "#FFDD00", "#FF7D00", "#FF006D", "#ADFF02", "#8F00FF"]
sns.set_palette(sns.color_palette(HAPPY_COLORS_PALETTE))
rcParams['figure.figsize'] = 12, 8
RANDOM_SEED = 42
np.random.seed(RANDOM_SEED)
torch.manual_seed(RANDOM_SEED)
###Output
_____no_output_____
###Markdown
In this tutorial, you'll learn how to detect anomalies in Time Series data using an LSTM Autoencoder. You're going to use real-world ECG data from a single patient with heart disease to detect abnormal hearbeats.- [Read the tutorial](https://www.curiousily.com/posts/time-series-anomaly-detection-using-lstm-autoencoder-with-pytorch-in-python/)- [Run the notebook in your browser (Google Colab)](https://colab.research.google.com/drive/1_J2MrBSvsJfOcVmYAN2-WSp36BtsFZCa)- [Read the Getting Things Done with Pytorch book](https://github.com/curiousily/Getting-Things-Done-with-Pytorch)By the end of this tutorial, you'll learn how to:- Prepare a dataset for Anomaly Detection from Time Series Data- Build an LSTM Autoencoder with PyTorch- Train and evaluate your model- Choose a threshold for anomaly detection- Classify unseen examples as normal or anomaly DataThe [dataset](http://timeseriesclassification.com/description.php?Dataset=ECG5000) contains 5,000 Time Series examples (obtained with ECG) with 140 timesteps. Each sequence corresponds to a single heartbeat from a single patient with congestive heart failure.> An electrocardiogram (ECG or EKG) is a test that checks how your heart is functioning by measuring the electrical activity of the heart. With each heart beat, an electrical impulse (or wave) travels through your heart. This wave causes the muscle to squeeze and pump blood from the heart. [Source](https://www.heartandstroke.ca/heart/tests/electrocardiogram)We have 5 types of hearbeats (classes):- Normal (N) - R-on-T Premature Ventricular Contraction (R-on-T PVC)- Premature Ventricular Contraction (PVC)- Supra-ventricular Premature or Ectopic Beat (SP or EB) - Unclassified Beat (UB).> Assuming a healthy heart and a typical rate of 70 to 75 beats per minute, each cardiac cycle, or heartbeat, takes about 0.8 seconds to complete the cycle.Frequency: 60–100 per minute (Humans)Duration: 0.6–1 second (Humans) [Source](https://en.wikipedia.org/wiki/Cardiac_cycle)The dataset is available on my Google Drive. Let's get it:
###Code
!gdown --id 16MIleqoIr1vYxlGk4GKnGmrsCPuWkkpT
!unzip -qq ECG5000.zip
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
###Output
_____no_output_____
###Markdown
The data comes in multiple formats. We'll load the `arff` files into Pandas data frames:
###Code
with open('ECG5000_TRAIN.arff') as f:
train = a2p.load(f)
with open('ECG5000_TEST.arff') as f:
test = a2p.load(f)
###Output
_____no_output_____
###Markdown
We'll combine the training and test data into a single data frame. This will give us more data to train our Autoencoder. We'll also shuffle it:
###Code
df = train.append(test)
df = df.sample(frac=1.0)
df.shape
df.head()
###Output
_____no_output_____
###Markdown
We have 5,000 examples. Each row represents a single heartbeat record. Let's name the possible classes:
###Code
CLASS_NORMAL = 1
class_names = ['Normal','R on T','PVC','SP','UB']
###Output
_____no_output_____
###Markdown
Next, we'll rename the last column to `target`, so its easier to reference it:
###Code
new_columns = list(df.columns)
new_columns[-1] = 'target'
df.columns = new_columns
###Output
_____no_output_____
###Markdown
Exploratory Data AnalysisLet's check how many examples for each heartbeat class do we have:
###Code
df.target.value_counts()
###Output
_____no_output_____
###Markdown
Let's plot the results:
###Code
ax = sns.countplot(df.target)
ax.set_xticklabels(class_names);
###Output
_____no_output_____
###Markdown
The normal class, has by far, the most examples. This is great because we'll use it to train our model.Let's have a look at an averaged (smoothed out with one standard deviation on top and bottom of it) Time Series for each class:
###Code
def plot_time_series_class(data, class_name, ax, n_steps=10):
time_series_df = pd.DataFrame(data)
smooth_path = time_series_df.rolling(n_steps).mean()
path_deviation = 2 * time_series_df.rolling(n_steps).std()
under_line = (smooth_path - path_deviation)[0]
over_line = (smooth_path + path_deviation)[0]
ax.plot(smooth_path, linewidth=2)
ax.fill_between(
path_deviation.index,
under_line,
over_line,
alpha=.125
)
ax.set_title(class_name)
classes = df.target.unique()
fig, axs = plt.subplots(
nrows=len(classes) // 3 + 1,
ncols=3,
sharey=True,
figsize=(14, 8)
)
for i, cls in enumerate(classes):
ax = axs.flat[i]
data = df[df.target == cls] \
.drop(labels='target', axis=1) \
.mean(axis=0) \
.to_numpy()
plot_time_series_class(data, class_names[i], ax)
fig.delaxes(axs.flat[-1])
fig.tight_layout();
###Output
_____no_output_____
###Markdown
It is very good that the normal class has a distinctly different pattern than all other classes. Maybe our model will be able to detect anomalies? LSTM AutoencoderThe [Autoencoder's](https://en.wikipedia.org/wiki/Autoencoder) job is to get some input data, pass it through the model, and obtain a reconstruction of the input. The reconstruction should match the input as much as possible. The trick is to use a small number of parameters, so your model learns a compressed representation of the data.In a sense, Autoencoders try to learn only the most important features (compressed version) of the data. Here, we'll have a look at how to feed Time Series data to an Autoencoder. We'll use a couple of LSTM layers (hence the LSTM Autoencoder) to capture the temporal dependencies of the data.To classify a sequence as normal or an anomaly, we'll pick a threshold above which a heartbeat is considered abnormal. Reconstruction LossWhen training an Autoencoder, the objective is to reconstruct the input as best as possible. This is done by minimizing a loss function (just like in supervised learning). This function is known as *reconstruction loss*. Cross-entropy loss and Mean squared error are common examples. Anomaly Detection in ECG DataWe'll use normal heartbeats as training data for our model and record the *reconstruction loss*. But first, we need to prepare the data: Data PreprocessingLet's get all normal heartbeats and drop the target (class) column:
###Code
normal_df = df[df.target == str(CLASS_NORMAL)].drop(labels='target', axis=1)
normal_df.shape
###Output
_____no_output_____
###Markdown
We'll merge all other classes and mark them as anomalies:
###Code
anomaly_df = df[df.target != str(CLASS_NORMAL)].drop(labels='target', axis=1)
anomaly_df.shape
###Output
_____no_output_____
###Markdown
We'll split the normal examples into train, validation and test sets:
###Code
train_df, val_df = train_test_split(
normal_df,
test_size=0.15,
random_state=RANDOM_SEED
)
val_df, test_df = train_test_split(
val_df,
test_size=0.33,
random_state=RANDOM_SEED
)
###Output
_____no_output_____
###Markdown
We need to convert our examples into tensors, so we can use them to train our Autoencoder. Let's write a helper function for that:
###Code
def create_dataset(df):
sequences = df.astype(np.float32).to_numpy().tolist()
dataset = [torch.tensor(s).unsqueeze(1).float() for s in sequences]
n_seq, seq_len, n_features = torch.stack(dataset).shape
return dataset, seq_len, n_features
###Output
_____no_output_____
###Markdown
Each Time Series will be converted to a 2D Tensor in the shape *sequence length* x *number of features* (140x1 in our case).Let's create some datasets:
###Code
train_dataset, seq_len, n_features = create_dataset(train_df)
val_dataset, _, _ = create_dataset(val_df)
test_normal_dataset, _, _ = create_dataset(test_df)
test_anomaly_dataset, _, _ = create_dataset(anomaly_df)
###Output
_____no_output_____
###Markdown
LSTM Autoencoder*Sample Autoencoder Architecture [Image Source](https://lilianweng.github.io/lil-log/2018/08/12/from-autoencoder-to-beta-vae.html)* The general Autoencoder architecture consists of two components. An *Encoder* that compresses the input and a *Decoder* that tries to reconstruct it.We'll use the LSTM Autoencoder from this [GitHub repo](https://github.com/shobrook/sequitur) with some small tweaks. Our model's job is to reconstruct Time Series data. Let's start with the *Encoder*:
###Code
class Encoder(nn.Module):
def __init__(self, seq_len, n_features, embedding_dim=64):
super(Encoder, self).__init__()
self.seq_len, self.n_features = seq_len, n_features
self.embedding_dim, self.hidden_dim = embedding_dim, 2 * embedding_dim
self.rnn1 = nn.LSTM(
input_size=n_features,
hidden_size=self.hidden_dim,
num_layers=1,
batch_first=True
)
self.rnn2 = nn.LSTM(
input_size=self.hidden_dim,
hidden_size=embedding_dim,
num_layers=1,
batch_first=True
)
def forward(self, x):
x = x.reshape((1, self.seq_len, self.n_features))
x, (_, _) = self.rnn1(x)
x, (hidden_n, _) = self.rnn2(x)
return hidden_n.reshape((self.n_features, self.embedding_dim))
###Output
_____no_output_____
###Markdown
The *Encoder* uses two LSTM layers to compress the Time Series data input.Next, we'll decode the compressed representation using a *Decoder*:
###Code
class Decoder(nn.Module):
def __init__(self, seq_len, input_dim=64, n_features=1):
super(Decoder, self).__init__()
self.seq_len, self.input_dim = seq_len, input_dim
self.hidden_dim, self.n_features = 2 * input_dim, n_features
self.rnn1 = nn.LSTM(
input_size=input_dim,
hidden_size=input_dim,
num_layers=1,
batch_first=True
)
self.rnn2 = nn.LSTM(
input_size=input_dim,
hidden_size=self.hidden_dim,
num_layers=1,
batch_first=True
)
self.output_layer = nn.Linear(self.hidden_dim, n_features)
def forward(self, x):
x = x.repeat(self.seq_len, self.n_features)
x = x.reshape((self.n_features, self.seq_len, self.input_dim))
x, (hidden_n, cell_n) = self.rnn1(x)
x, (hidden_n, cell_n) = self.rnn2(x)
x = x.reshape((self.seq_len, self.hidden_dim))
return self.output_layer(x)
###Output
_____no_output_____
###Markdown
Our Decoder contains two LSTM layers and an output layer that gives the final reconstruction.Time to wrap everything into an easy to use module:
###Code
class RecurrentAutoencoder(nn.Module):
def __init__(self, seq_len, n_features, embedding_dim=64):
super(RecurrentAutoencoder, self).__init__()
self.encoder = Encoder(seq_len, n_features, embedding_dim).to(device)
self.decoder = Decoder(seq_len, embedding_dim, n_features).to(device)
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
###Output
_____no_output_____
###Markdown
Our Autoencoder passes the input through the Encoder and Decoder. Let's create an instance of it:
###Code
model = RecurrentAutoencoder(seq_len, n_features, 128)
model = model.to(device)
###Output
_____no_output_____
###Markdown
TrainingLet's write a helper function for our training process:
###Code
def train_model(model, train_dataset, val_dataset, n_epochs):
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
criterion = nn.L1Loss(reduction='sum').to(device)
history = dict(train=[], val=[])
best_model_wts = copy.deepcopy(model.state_dict())
best_loss = 10000.0
for epoch in range(1, n_epochs + 1):
model = model.train()
train_losses = []
for seq_true in train_dataset:
optimizer.zero_grad()
seq_true = seq_true.to(device)
seq_pred = model(seq_true)
loss = criterion(seq_pred, seq_true)
loss.backward()
optimizer.step()
train_losses.append(loss.item())
val_losses = []
model = model.eval()
with torch.no_grad():
for seq_true in val_dataset:
seq_true = seq_true.to(device)
seq_pred = model(seq_true)
loss = criterion(seq_pred, seq_true)
val_losses.append(loss.item())
train_loss = np.mean(train_losses)
val_loss = np.mean(val_losses)
history['train'].append(train_loss)
history['val'].append(val_loss)
if val_loss < best_loss:
best_loss = val_loss
best_model_wts = copy.deepcopy(model.state_dict())
print(f'Epoch {epoch}: train loss {train_loss} val loss {val_loss}')
model.load_state_dict(best_model_wts)
return model.eval(), history
###Output
_____no_output_____
###Markdown
At each epoch, the training process feeds our model with all training examples and evaluates the performance on the validation set. Note that we're using a batch size of 1 (our model sees only 1 sequence at a time). We also record the training and validation set losses during the process.Note that we're minimizing the [L1Loss](https://pytorch.org/docs/stable/nn.htmll1loss), which measures the MAE (mean absolute error). Why? The reconstructions seem to be better than with MSE (mean squared error).We'll get the version of the model with the smallest validation error. Let's do some training:
###Code
model, history = train_model(
model,
train_dataset,
val_dataset,
n_epochs=150
)
ax = plt.figure().gca()
ax.plot(history['train'])
ax.plot(history['val'])
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['train', 'test'])
plt.title('Loss over training epochs')
plt.show();
###Output
_____no_output_____
###Markdown
Our model converged quite well. Seems like we might've needed a larger validation set to smoothen the results, but that'll do for now. Saving the modelLet's store the model for later use:
###Code
MODEL_PATH = 'model.pth'
torch.save(model, MODEL_PATH)
###Output
_____no_output_____
###Markdown
Uncomment the next lines, if you want to download and load the pre-trained model:
###Code
# !gdown --id 1jEYx5wGsb7Ix8cZAw3l5p5pOwHs3_I9A
# model = torch.load('model.pth')
# model = model.to(device)
###Output
_____no_output_____
###Markdown
Choosing a thresholdWith our model at hand, we can have a look at the reconstruction error on the training set. Let's start by writing a helper function to get predictions from our model:
###Code
def predict(model, dataset):
predictions, losses = [], []
criterion = nn.L1Loss(reduction='sum').to(device)
with torch.no_grad():
model = model.eval()
for seq_true in dataset:
seq_true = seq_true.to(device)
seq_pred = model(seq_true)
loss = criterion(seq_pred, seq_true)
predictions.append(seq_pred.cpu().numpy().flatten())
losses.append(loss.item())
return predictions, losses
###Output
_____no_output_____
###Markdown
Our function goes through each example in the dataset and records the predictions and losses. Let's get the losses and have a look at them:
###Code
_, losses = predict(model, train_dataset)
sns.distplot(losses, bins=50, kde=True);
THRESHOLD = 26
###Output
_____no_output_____
###Markdown
EvaluationUsing the threshold, we can turn the problem into a simple binary classification task:- If the reconstruction loss for an example is below the threshold, we'll classify it as a *normal* heartbeat- Alternatively, if the loss is higher than the threshold, we'll classify it as an anomaly Normal hearbeatsLet's check how well our model does on normal heartbeats. We'll use the normal heartbeats from the test set (our model haven't seen those):
###Code
predictions, pred_losses = predict(model, test_normal_dataset)
sns.distplot(pred_losses, bins=50, kde=True);
###Output
_____no_output_____
###Markdown
We'll count the correct predictions:
###Code
correct = sum(l <= THRESHOLD for l in pred_losses)
print(f'Correct normal predictions: {correct}/{len(test_normal_dataset)}')
###Output
Correct normal predictions: 142/145
###Markdown
Anomalies We'll do the same with the anomaly examples, but their number is much higher. We'll get a subset that has the same size as the normal heartbeats:
###Code
anomaly_dataset = test_anomaly_dataset[:len(test_normal_dataset)]
###Output
_____no_output_____
###Markdown
Now we can take the predictions of our model for the subset of anomalies:
###Code
predictions, pred_losses = predict(model, anomaly_dataset)
sns.distplot(pred_losses, bins=50, kde=True);
###Output
_____no_output_____
###Markdown
Finally, we can count the number of examples above the threshold (considered as anomalies):
###Code
correct = sum(l > THRESHOLD for l in pred_losses)
print(f'Correct anomaly predictions: {correct}/{len(anomaly_dataset)}')
###Output
Correct anomaly predictions: 142/145
###Markdown
We have very good results. In the real world, you can tweak the threshold depending on what kind of errors you want to tolerate. In this case, you might want to have more false positives (normal heartbeats considered as anomalies) than false negatives (anomalies considered as normal). Looking at ExamplesWe can overlay the real and reconstructed Time Series values to see how close they are. We'll do it for some normal and anomaly cases:
###Code
def plot_prediction(data, model, title, ax):
predictions, pred_losses = predict(model, [data])
ax.plot(data, label='true')
ax.plot(predictions[0], label='reconstructed')
ax.set_title(f'{title} (loss: {np.around(pred_losses[0], 2)})')
ax.legend()
fig, axs = plt.subplots(
nrows=2,
ncols=6,
sharey=True,
sharex=True,
figsize=(22, 8)
)
for i, data in enumerate(test_normal_dataset[:6]):
plot_prediction(data, model, title='Normal', ax=axs[0, i])
for i, data in enumerate(test_anomaly_dataset[:6]):
plot_prediction(data, model, title='Anomaly', ax=axs[1, i])
fig.tight_layout();
###Output
_____no_output_____ |
Data Visualization with Python/DV0101EN-1-1-1-Introduction-to-Matplotlib-and-Line-Plots-py-v2.0.ipynb | ###Markdown
Introduction to Matplotlib and Line Plots IntroductionThe aim of these labs is to introduce you to data visualization with Python as concrete and as consistent as possible. Speaking of consistency, because there is no *best* data visualization library avaiblable for Python - up to creating these labs - we have to introduce different libraries and show their benefits when we are discussing new visualization concepts. Doing so, we hope to make students well-rounded with visualization libraries and concepts so that they are able to judge and decide on the best visualitzation technique and tool for a given problem _and_ audience.Please make sure that you have completed the prerequisites for this course, namely **Python for Data Science** and **Data Analysis with Python**, which are part of this specialization. **Note**: The majority of the plots and visualizations will be generated using data stored in *pandas* dataframes. Therefore, in this lab, we provide a brief crash course on *pandas*. However, if you are interested in learning more about the *pandas* library, detailed description and explanation of how to use it and how to clean, munge, and process data stored in a *pandas* dataframe are provided in our course **Data Analysis with Python**, which is also part of this specialization. ------------ Table of Contents1. [Exploring Datasets with *pandas*](0)1.1 [The Dataset: Immigration to Canada from 1980 to 2013](2)1.2 [*pandas* Basics](4) 1.3 [*pandas* Intermediate: Indexing and Selection](6) 2. [Visualizing Data using Matplotlib](8) 2.1 [Matplotlib: Standard Python Visualization Library](10) 3. [Line Plots](12) Exploring Datasets with *pandas* *pandas* is an essential data analysis toolkit for Python. From their [website](http://pandas.pydata.org/):>*pandas* is a Python package providing fast, flexible, and expressive data structures designed to make working with “relational” or “labeled” data both easy and intuitive. It aims to be the fundamental high-level building block for doing practical, **real world** data analysis in Python.The course heavily relies on *pandas* for data wrangling, analysis, and visualization. We encourage you to spend some time and familizare yourself with the *pandas* API Reference: http://pandas.pydata.org/pandas-docs/stable/api.html. The Dataset: Immigration to Canada from 1980 to 2013 Dataset Source: [International migration flows to and from selected countries - The 2015 revision](http://www.un.org/en/development/desa/population/migration/data/empirical2/migrationflows.shtml).The dataset contains annual data on the flows of international immigrants as recorded by the countries of destination. The data presents both inflows and outflows according to the place of birth, citizenship or place of previous / next residence both for foreigners and nationals. The current version presents data pertaining to 45 countries.In this lab, we will focus on the Canadian immigration data.For sake of simplicity, Canada's immigration data has been extracted and uploaded to one of IBM servers. You can fetch the data from [here](https://ibm.box.com/shared/static/lw190pt9zpy5bd1ptyg2aw15awomz9pu.xlsx).--- *pandas* Basics The first thing we'll do is import two key data analysis modules: *pandas* and **Numpy**.
###Code
import numpy as np # useful for many scientific computing in Python
import pandas as pd # primary data structure library
###Output
_____no_output_____
###Markdown
Let's download and import our primary Canadian Immigration dataset using *pandas* `read_excel()` method. Normally, before we can do that, we would need to download a module which *pandas* requires to read in excel files. This module is **xlrd**. For your convenience, we have pre-installed this module, so you would not have to worry about that. Otherwise, you would need to run the following line of code to install the **xlrd** module:```!conda install -c anaconda xlrd --yes```
###Code
!conda install -c anaconda xlrd --yes
###Output
Solving environment: done
==> WARNING: A newer version of conda exists. <==
current version: 4.5.11
latest version: 4.7.12
Please update conda by running
$ conda update -n base -c defaults conda
## Package Plan ##
environment location: /home/jupyterlab/conda/envs/python
added / updated specs:
- xlrd
The following packages will be downloaded:
package | build
---------------------------|-----------------
openssl-1.1.1 | h7b6447c_0 5.0 MB anaconda
certifi-2019.9.11 | py36_0 154 KB anaconda
xlrd-1.2.0 | py36_0 188 KB anaconda
------------------------------------------------------------
Total: 5.4 MB
The following packages will be UPDATED:
certifi: 2019.6.16-py36_1 conda-forge --> 2019.9.11-py36_0 anaconda
openssl: 1.1.1c-h516909a_0 conda-forge --> 1.1.1-h7b6447c_0 anaconda
xlrd: 1.1.0-py37_1 --> 1.2.0-py36_0 anaconda
Downloading and Extracting Packages
openssl-1.1.1 | 5.0 MB | ##################################### | 100%
certifi-2019.9.11 | 154 KB | ##################################### | 100%
xlrd-1.2.0 | 188 KB | ##################################### | 100%
Preparing transaction: done
Verifying transaction: done
Executing transaction: done
###Markdown
Now we are ready to read in our data.
###Code
df_can = pd.read_excel('https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DV0101EN/labs/Data_Files/Canada.xlsx',
sheet_name='Canada by Citizenship',
skiprows=range(20),
skipfooter=2)
print ('Data read into a pandas dataframe!')
###Output
Data read into a pandas dataframe!
###Markdown
Let's view the top 5 rows of the dataset using the `head()` function.
###Code
df_can.head()
# tip: You can specify the number of rows you'd like to see as follows: df_can.head(10)
###Output
_____no_output_____
###Markdown
We can also veiw the bottom 5 rows of the dataset using the `tail()` function.
###Code
df_can.tail()
###Output
_____no_output_____
###Markdown
When analyzing a dataset, it's always a good idea to start by getting basic information about your dataframe. We can do this by using the `info()` method.
###Code
df_can.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 195 entries, 0 to 194
Data columns (total 43 columns):
Type 195 non-null object
Coverage 195 non-null object
OdName 195 non-null object
AREA 195 non-null int64
AreaName 195 non-null object
REG 195 non-null int64
RegName 195 non-null object
DEV 195 non-null int64
DevName 195 non-null object
1980 195 non-null int64
1981 195 non-null int64
1982 195 non-null int64
1983 195 non-null int64
1984 195 non-null int64
1985 195 non-null int64
1986 195 non-null int64
1987 195 non-null int64
1988 195 non-null int64
1989 195 non-null int64
1990 195 non-null int64
1991 195 non-null int64
1992 195 non-null int64
1993 195 non-null int64
1994 195 non-null int64
1995 195 non-null int64
1996 195 non-null int64
1997 195 non-null int64
1998 195 non-null int64
1999 195 non-null int64
2000 195 non-null int64
2001 195 non-null int64
2002 195 non-null int64
2003 195 non-null int64
2004 195 non-null int64
2005 195 non-null int64
2006 195 non-null int64
2007 195 non-null int64
2008 195 non-null int64
2009 195 non-null int64
2010 195 non-null int64
2011 195 non-null int64
2012 195 non-null int64
2013 195 non-null int64
dtypes: int64(37), object(6)
memory usage: 65.6+ KB
###Markdown
To get the list of column headers we can call upon the dataframe's `.columns` parameter.
###Code
df_can.columns.values
###Output
_____no_output_____
###Markdown
Similarly, to get the list of indicies we use the `.index` parameter.
###Code
df_can.index.values
###Output
_____no_output_____
###Markdown
Note: The default type of index and columns is NOT list.
###Code
print(type(df_can.columns))
print(type(df_can.index))
###Output
<class 'pandas.core.indexes.base.Index'>
<class 'pandas.core.indexes.range.RangeIndex'>
###Markdown
To get the index and columns as lists, we can use the `tolist()` method.
###Code
df_can.columns.tolist()
df_can.index.tolist()
print (type(df_can.columns.tolist()))
print (type(df_can.index.tolist()))
###Output
<class 'list'>
<class 'list'>
###Markdown
To view the dimensions of the dataframe, we use the `.shape` parameter.
###Code
# size of dataframe (rows, columns)
df_can.shape
###Output
_____no_output_____
###Markdown
Note: The main types stored in *pandas* objects are *float*, *int*, *bool*, *datetime64[ns]* and *datetime64[ns, tz] (in >= 0.17.0)*, *timedelta[ns]*, *category (in >= 0.15.0)*, and *object* (string). In addition these dtypes have item sizes, e.g. int64 and int32. Let's clean the data set to remove a few unnecessary columns. We can use *pandas* `drop()` method as follows:
###Code
# in pandas axis=0 represents rows (default) and axis=1 represents columns.
df_can.drop(['AREA','REG','DEV','Type','Coverage'], axis=1, inplace=True)
df_can.head(2)
###Output
_____no_output_____
###Markdown
Let's rename the columns so that they make sense. We can use `rename()` method by passing in a dictionary of old and new names as follows:
###Code
df_can.rename(columns={'OdName':'Country', 'AreaName':'Continent', 'RegName':'Region'}, inplace=True)
df_can.columns
###Output
_____no_output_____
###Markdown
We will also add a 'Total' column that sums up the total immigrants by country over the entire period 1980 - 2013, as follows:
###Code
df_can['Total'] = df_can.sum(axis=1)
###Output
_____no_output_____
###Markdown
We can check to see how many null objects we have in the dataset as follows:
###Code
df_can.isnull().sum()
###Output
_____no_output_____
###Markdown
Finally, let's view a quick summary of each column in our dataframe using the `describe()` method.
###Code
df_can.describe()
###Output
_____no_output_____
###Markdown
--- *pandas* Intermediate: Indexing and Selection (slicing) Select Column**There are two ways to filter on a column name:**Method 1: Quick and easy, but only works if the column name does NOT have spaces or special characters.```python df.column_name (returns series)```Method 2: More robust, and can filter on multiple columns.```python df['column'] (returns series)``````python df[['column 1', 'column 2']] (returns dataframe)```--- Example: Let's try filtering on the list of countries ('Country').
###Code
df_can.Country # returns a series
###Output
_____no_output_____
###Markdown
Let's try filtering on the list of countries ('OdName') and the data for years: 1980 - 1985.
###Code
df_can[['Country', 1980, 1981, 1982, 1983, 1984, 1985]] # returns a dataframe
# notice that 'Country' is string, and the years are integers.
# for the sake of consistency, we will convert all column names to string later on.
###Output
_____no_output_____
###Markdown
Select RowThere are main 3 ways to select rows:```python df.loc[label] filters by the labels of the index/column df.iloc[index] filters by the positions of the index/column``` Before we proceed, notice that the defaul index of the dataset is a numeric range from 0 to 194. This makes it very difficult to do a query by a specific country. For example to search for data on Japan, we need to know the corressponding index value.This can be fixed very easily by setting the 'Country' column as the index using `set_index()` method.
###Code
df_can.set_index('Country', inplace=True)
# tip: The opposite of set is reset. So to reset the index, we can use df_can.reset_index()
df_can.head(3)
# optional: to remove the name of the index
df_can.index.name = None
###Output
_____no_output_____
###Markdown
Example: Let's view the number of immigrants from Japan (row 87) for the following scenarios: 1. The full row data (all columns) 2. For year 2013 3. For years 1980 to 1985
###Code
# 1. the full row data (all columns)
print(df_can.loc['Japan'])
# alternate methods
print(df_can.iloc[87])
print(df_can[df_can.index == 'Japan'].T.squeeze())
# 2. for year 2013
print(df_can.loc['Japan', 2013])
# alternate method
print(df_can.iloc[87, 36]) # year 2013 is the last column, with a positional index of 36
# 3. for years 1980 to 1985
print(df_can.loc['Japan', [1980, 1981, 1982, 1983, 1984, 1984]])
print(df_can.iloc[87, [3, 4, 5, 6, 7, 8]])
###Output
1980 701
1981 756
1982 598
1983 309
1984 246
1984 246
Name: Japan, dtype: object
1980 701
1981 756
1982 598
1983 309
1984 246
1985 198
Name: Japan, dtype: object
###Markdown
Column names that are integers (such as the years) might introduce some confusion. For example, when we are referencing the year 2013, one might confuse that when the 2013th positional index. To avoid this ambuigity, let's convert the column names into strings: '1980' to '2013'.
###Code
df_can.columns = list(map(str, df_can.columns))
# [print (type(x)) for x in df_can.columns.values] #<-- uncomment to check type of column headers
###Output
_____no_output_____
###Markdown
Since we converted the years to string, let's declare a variable that will allow us to easily call upon the full range of years:
###Code
# useful for plotting later on
years = list(map(str, range(1980, 2014)))
years
###Output
_____no_output_____
###Markdown
Filtering based on a criteriaTo filter the dataframe based on a condition, we simply pass the condition as a boolean vector. For example, Let's filter the dataframe to show the data on Asian countries (AreaName = Asia).
###Code
# 1. create the condition boolean series
condition = df_can['Continent'] == 'Asia'
print(condition)
# 2. pass this condition into the dataFrame
df_can[condition]
# we can pass mutliple criteria in the same line.
# let's filter for AreaNAme = Asia and RegName = Southern Asia
df_can[(df_can['Continent']=='Asia') & (df_can['Region']=='Southern Asia')]
# note: When using 'and' and 'or' operators, pandas requires we use '&' and '|' instead of 'and' and 'or'
# don't forget to enclose the two conditions in parentheses
###Output
_____no_output_____
###Markdown
Before we proceed: let's review the changes we have made to our dataframe.
###Code
print('data dimensions:', df_can.shape)
print(df_can.columns)
df_can.head(2)
###Output
data dimensions: (195, 38)
Index(['Continent', 'Region', 'DevName', '1980', '1981', '1982', '1983',
'1984', '1985', '1986', '1987', '1988', '1989', '1990', '1991', '1992',
'1993', '1994', '1995', '1996', '1997', '1998', '1999', '2000', '2001',
'2002', '2003', '2004', '2005', '2006', '2007', '2008', '2009', '2010',
'2011', '2012', '2013', 'Total'],
dtype='object')
###Markdown
--- Visualizing Data using Matplotlib Matplotlib: Standard Python Visualization LibraryThe primary plotting library we will explore in the course is [Matplotlib](http://matplotlib.org/). As mentioned on their website: >Matplotlib is a Python 2D plotting library which produces publication quality figures in a variety of hardcopy formats and interactive environments across platforms. Matplotlib can be used in Python scripts, the Python and IPython shell, the jupyter notebook, web application servers, and four graphical user interface toolkits.If you are aspiring to create impactful visualization with python, Matplotlib is an essential tool to have at your disposal. Matplotlib.PyplotOne of the core aspects of Matplotlib is `matplotlib.pyplot`. It is Matplotlib's scripting layer which we studied in details in the videos about Matplotlib. Recall that it is a collection of command style functions that make Matplotlib work like MATLAB. Each `pyplot` function makes some change to a figure: e.g., creates a figure, creates a plotting area in a figure, plots some lines in a plotting area, decorates the plot with labels, etc. In this lab, we will work with the scripting layer to learn how to generate line plots. In future labs, we will get to work with the Artist layer as well to experiment first hand how it differs from the scripting layer. Let's start by importing `Matplotlib` and `Matplotlib.pyplot` as follows:
###Code
# we are using the inline backend
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
*optional: check if Matplotlib is loaded.
###Code
print ('Matplotlib version: ', mpl.__version__) # >= 2.0.0
###Output
Matplotlib version: 3.1.1
###Markdown
*optional: apply a style to Matplotlib.
###Code
print(plt.style.available)
mpl.style.use(['ggplot']) # optional: for ggplot-like style
###Output
['Solarize_Light2', '_classic_test', 'bmh', 'classic', 'dark_background', 'fast', 'fivethirtyeight', 'ggplot', 'grayscale', 'seaborn-bright', 'seaborn-colorblind', 'seaborn-dark-palette', 'seaborn-dark', 'seaborn-darkgrid', 'seaborn-deep', 'seaborn-muted', 'seaborn-notebook', 'seaborn-paper', 'seaborn-pastel', 'seaborn-poster', 'seaborn-talk', 'seaborn-ticks', 'seaborn-white', 'seaborn-whitegrid', 'seaborn', 'tableau-colorblind10']
###Markdown
Plotting in *pandas*Fortunately, pandas has a built-in implementation of Matplotlib that we can use. Plotting in *pandas* is as simple as appending a `.plot()` method to a series or dataframe.Documentation:- [Plotting with Series](http://pandas.pydata.org/pandas-docs/stable/api.htmlplotting)- [Plotting with Dataframes](http://pandas.pydata.org/pandas-docs/stable/api.htmlapi-dataframe-plotting) Line Pots (Series/Dataframe) **What is a line plot and why use it?**A line chart or line plot is a type of plot which displays information as a series of data points called 'markers' connected by straight line segments. It is a basic type of chart common in many fields.Use line plot when you have a continuous data set. These are best suited for trend-based visualizations of data over a period of time. **Let's start with a case study:**In 2010, Haiti suffered a catastrophic magnitude 7.0 earthquake. The quake caused widespread devastation and loss of life and aout three million people were affected by this natural disaster. As part of Canada's humanitarian effort, the Government of Canada stepped up its effort in accepting refugees from Haiti. We can quickly visualize this effort using a `Line` plot:**Question:** Plot a line graph of immigration from Haiti using `df.plot()`. First, we will extract the data series for Haiti.
###Code
haiti = df_can.loc['Haiti', years] # passing in years 1980 - 2013 to exclude the 'total' column
haiti.head()
###Output
_____no_output_____
###Markdown
Next, we will plot a line plot by appending `.plot()` to the `haiti` dataframe.
###Code
mpl.style.use(['seaborn-bright'])
haiti.plot()
###Output
_____no_output_____
###Markdown
*pandas* automatically populated the x-axis with the index values (years), and the y-axis with the column values (population). However, notice how the years were not displayed because they are of type *string*. Therefore, let's change the type of the index values to *integer* for plotting.Also, let's label the x and y axis using `plt.title()`, `plt.ylabel()`, and `plt.xlabel()` as follows:
###Code
haiti.index = haiti.index.map(int) # let's change the index values of Haiti to type integer for plotting
haiti.plot(kind='line')
plt.title('Immigration from Haiti')
plt.ylabel('Number of immigrants')
plt.xlabel('Years')
plt.show() # need this line to show the updates made to the figure
###Output
_____no_output_____
###Markdown
We can clearly notice how number of immigrants from Haiti spiked up from 2010 as Canada stepped up its efforts to accept refugees from Haiti. Let's annotate this spike in the plot by using the `plt.text()` method.
###Code
haiti.plot(kind='line')
plt.title('Immigration from Haiti')
plt.ylabel('Number of Immigrants')
plt.xlabel('Years')
# annotate the 2010 Earthquake.
# syntax: plt.text(x, y, label)
plt.text(2000, 6000, '2010 Earthquake') # see note below
plt.show()
###Output
_____no_output_____
###Markdown
With just a few lines of code, you were able to quickly identify and visualize the spike in immigration!Quick note on x and y values in `plt.text(x, y, label)`: Since the x-axis (years) is type 'integer', we specified x as a year. The y axis (number of immigrants) is type 'integer', so we can just specify the value y = 6000. ```python plt.text(2000, 6000, '2010 Earthquake') years stored as type int``` If the years were stored as type 'string', we would need to specify x as the index position of the year. Eg 20th index is year 2000 since it is the 20th year with a base year of 1980.```python plt.text(20, 6000, '2010 Earthquake') years stored as type int``` We will cover advanced annotation methods in later modules. We can easily add more countries to line plot to make meaningful comparisons immigration from different countries. **Question:** Let's compare the number of immigrants from India and China from 1980 to 2013. Step 1: Get the data set for China and India, and display dataframe.
###Code
### type your answer here
india_china = df_can.loc[['India','China'],years]
print(india.head())
###Output
1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 ... \
India 8880 8670 8147 7338 5704 4211 7150 10189 11522 10343 ...
China 5123 6682 3308 1863 1527 1816 1960 2643 2758 4323 ...
2004 2005 2006 2007 2008 2009 2010 2011 2012 2013
India 28235 36210 33848 28742 28261 29456 34235 27509 30933 33087
China 36619 42584 33518 27642 30037 29622 30391 28502 33024 34129
[2 rows x 34 columns]
###Markdown
Double-click __here__ for the solution.<!-- The correct answer is:df_CI = df_can.loc[['India', 'China'], years]df_CI.head()--> Step 2: Plot graph. We will explicitly specify line plot by passing in `kind` parameter to `plot()`.
###Code
### type your answer here
india_china.plot(kind='line')
###Output
_____no_output_____
###Markdown
Double-click __here__ for the solution.<!-- The correct answer is:df_CI.plot(kind='line')--> That doesn't look right...Recall that *pandas* plots the indices on the x-axis and the columns as individual lines on the y-axis. Since `df_CI` is a dataframe with the `country` as the index and `years` as the columns, we must first transpose the dataframe using `transpose()` method to swap the row and columns.
###Code
df_CI = india_china
df_CI = df_CI.transpose()
df_CI.head()
###Output
_____no_output_____
###Markdown
*pandas* will auomatically graph the two countries on the same graph. Go ahead and plot the new transposed dataframe. Make sure to add a title to the plot and label the axes.
###Code
### type your answer here
df_CI.plot(kind='line')
plt.xlabel('Years')
plt.ylabel('Number of Immigrants')
plt.show()
###Output
_____no_output_____
###Markdown
Double-click __here__ for the solution.<!-- The correct answer is:df_CI.index = df_CI.index.map(int) let's change the index values of df_CI to type integer for plottingdf_CI.plot(kind='line')--><!--plt.title('Immigrants from China and India')plt.ylabel('Number of Immigrants')plt.xlabel('Years')--><!--plt.show()--> From the above plot, we can observe that the China and India have very similar immigration trends through the years. *Note*: How come we didn't need to transpose Haiti's dataframe before plotting (like we did for df_CI)?That's because `haiti` is a series as opposed to a dataframe, and has the years as its indices as shown below. ```pythonprint(type(haiti))print(haiti.head(5))```>class 'pandas.core.series.Series' >1980 1666 >1981 3692 >1982 3498 >1983 2860 >1984 1418 >Name: Haiti, dtype: int64 Line plot is a handy tool to display several dependent variables against one independent variable. However, it is recommended that no more than 5-10 lines on a single graph; any more than that and it becomes difficult to interpret. **Question:** Compare the trend of top 5 countries that contributed the most to immigration to Canada.
###Code
### type your answer here
df_top_total = df_can[['Total']].sort_values('Total',ascending=False).index
df_top_total = df_can.loc[df_top_total[0:5],years].transpose()
df_top_total.plot(kind='line')
plt.title('Top 5 countries that contributed to immigration in Canada')
plt.xlabel('Years')
plt.ylabel('Number of immigrants')
plt.show()
###Output
_____no_output_____ |
cs109_hw5_submission.ipynb | ###Markdown
CS 109A/STAT 121A/AC 209A/CSCI E-109A: Homework 5 Logistic Regression and PCA **Harvard University****Fall 2017****Instructors**: Pavlos Protopapas, Kevin Rader, Rahul Dave, Margo Levine--- INSTRUCTIONS- To submit your assignment follow the instructions given in canvas.- Restart the kernel and run the whole notebook again before you submit. - Do not include your name(s) in the notebook if you are submitting as a group. - If you submit individually and you have worked with someone, please include the name of your [one] partner below. --- Your partner's name (if you submit separately):Enrollment Status (109A, 121A, 209A, or E109A): 109A Import libraries:
###Code
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import statsmodels.api as sm
from statsmodels.api import OLS
from sklearn.decomposition import PCA
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LogisticRegressionCV
from sklearn.utils import resample
from sklearn.model_selection import cross_val_score
from sklearn.metrics import accuracy_score
%matplotlib inline
###Output
_____no_output_____
###Markdown
Cancer Classification from Gene ExpressionsIn this homework assignment, we will build a classification model to distinguish between two related classes of cancer, acute lymphoblastic leukemia (ALL) and acute myeloid leukemia (AML), using gene expression measurements. The data set is provided in the file `dataset_hw5.csv`. Each row in this file corresponds to a tumor tissue sample from a patient with one of the two forms of Leukemia. The first column contains the cancer type, with 0 indicating the ALL class and 1 indicating the AML class. Columns 2-7130 contain expression levels of 7129 genes recorded from each tissue sample. In the following parts, we will use logistic regression to build a classification model for this data set. We will also use principal components analysis (PCA) to visualize the data and to reduce its dimensions. Part (a): Data Exploration1. First step is to split the observations into an approximate 50-50 train-test split. Below is some code to do this for you (we want to make sure everyone has the same splits).2. Take a peak at your training set: you should notice the severe differences in the measurements from one gene to the next (some are negative, some hover around zero, and some are well into the thousands). To account for these differences in scale and variability, normalize each predictor to vary between 0 and 1.3. Notice that the results training set contains more predictors than observations. Do you foresee a problem in fitting a classification model to such a data set?4. A convenient tool to visualize the gene expression data is a heat map. Arrange the rows of the training set so that the 'AML' rows are grouped together and the 'ALL' rows are together. Generate a heat map of the data with expression values from the following genes: `D49818_at`, `M23161_at`, `hum_alu_at`, `AFFX-PheX-5_at`, `M15990_at`. By observing the heat map, comment on which of these genes are useful in discriminating between the two classes.5. We can also visualize this data set in two dimensions using PCA. Find the top two principal components for the gene expression data. Generate a scatter plot using these principal components, highlighting the AML and ALL points in different colors. How well do the top two principal components discriminate between the two classes?
###Code
# train test split!
np.random.seed(9001)
df = pd.read_csv('dataset_hw5.csv')
msk = np.random.rand(len(df)) < 0.5
data_train = df[msk]
data_test = df[~msk]
###Output
_____no_output_____
###Markdown
In this section I standardize the training data first by doing scaler.fit(xtrain), I then transform both the train and the test with this to ensure I do not allow test information to leak into training and to make sure training rules are being applied to the test set. I opt for standardization because some gene expressions have outliers and these may be significant in later stages of the modeling (normalizing may scale down my predictors to very small numbers if I have large outliers)
###Code
# scaling entire dataset/train and test
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
newXtrain = pd.DataFrame.copy(data_train.loc[:,data_train.columns!='Cancer_type'])
scaler.fit(newXtrain)
Xtrain_scaled = pd.DataFrame(scaler.transform(newXtrain), columns=newXtrain.columns)
Xtrain_scaled=Xtrain_scaled.set_index(data_train.index)
Xtrain_scaled['Cancer_type'] = pd.DataFrame.copy(data_train['Cancer_type'])
newXtest = pd.DataFrame.copy(data_test.loc[:,data_test.columns!='Cancer_type'])
Xtest_scaled = pd.DataFrame(scaler.transform(newXtest), columns=newXtest.columns)
Xtest_scaled=Xtest_scaled.set_index(data_test.index)
Xtest_scaled['Cancer_type'] = pd.DataFrame.copy(data_test['Cancer_type'])
# plot heatmap using seaborn (much easier)
import seaborn as sns
zz = Xtrain_scaled.sort_values(by='Cancer_type', ascending=0)
fig, ax = plt.subplots(figsize=(5,10))
ax = sns.heatmap(zz[['D49818_at','M23161_at', 'hum_alu_at', 'AFFX-PheX-5_at', 'M15990_at']],cmap ='viridis')
ax.set_yticklabels(reversed(zz[['Cancer_type']].values[:,0]))
plt.title('Gene Expression Heatmap against ALL Response')
plt.ylabel('Actual Response (ALL=1, AML =0)')
# PCA section
from sklearn.decomposition import PCA
pca = PCA(n_components = 2)
respca = pca.fit_transform(zz.drop('Cancer_type',axis =1))
respca = pd.DataFrame(respca, columns = ['pca1','pca2'])
respca['Cancer_type'] = zz[['Cancer_type']].values
sns.lmplot(x = 'pca1',y='pca2',data = respca,hue = 'Cancer_type',fit_reg = False,size = 10)
###Output
_____no_output_____
###Markdown
Q- Notice that the results training set contains more predictors than observations. Do you foresee a problem in fitting a classification model to such a data set?A- Yes, high probability of over fitting our data (hence high likelihood of large variance e.g. low test score and high training score where score refers to (1-misclassification rate)) Q- A convenient tool to visualize the gene expression data is a heat map. Arrange the rows of the training set so that the 'AML' rows are grouped together and the 'ALL' rows are together. Generate a heat map of the data with expression values from the following genes: D49818_at, M23161_at, hum_alu_at, AFFX-PheX-5_at, M15990_at. By observing the heat map, comment on which of these genes are useful in discriminating between the two classes. A- Out of interest, I ran the model using standardization and normalization and the only core difference between these models was the heatmap generated in this section. Therefore, it depends on the way you standardize/normalize data. When you $\textbf{normalize}$ the data 'M15990_at' and 'M23161_at' are dark almost everywhere so we do not think these two can be good predictors. The others don't seem to have clear demarcations but AFXX seems to be relatively good with darker regions with zeros and lighter with 1's.- When we $\textbf{standardize}$, M23161_at gives us lighter yellows for 0 and darker colors for 1's meaning this could be a good predictor. - Why this difference you ask? Well, we get this difference because when we scale with min max, we really are scaling by outliers in our data therefore, many of the points are being divided by these outliers. For the rest of the problem I have used standardization. Q- How do top 2 PCA components discriminate between 2 classesA- Top 2 pca components can discriminate a decent number of types. For example, in the lower left half (so low pca1 and pca2 values) we tend to get green dots and hence ALL. In the region to the top right of this, we generally get most of the blue dots with some level of misclassification. Therefore, 2 pca components aren't bad at prediction. We will see in a later section that this is quite true. Part (b): Linear Regression vs. Logistic RegressionBegin by analyzing the differences between using linear regression and logistic regression for classification. For this part, you shall work with a single gene predictor: `M23161_at`.1. Fit a simple linear regression model to the training set using the single gene predictor `D29963_at`. We could interpret the scores predicted by regression model interpreted for a patient as an estimate of the probability that the patient has the `ALL` type cancer (class 1). Is there a problem with this interpretation?2. The fitted linear regression model can be converted to a classification model (i.e. a model that predicts one of two binary labels 0 or 1) by classifying patients with predicted score greater than 0.5 into the `ALL` type (class 1), and the others into the `AML` type (class 0). Evaluate the classification accuracy (1 - misclassification rate) of the obtained classification model on both the training and test sets.3. Next, fit a simple logistic regression model to the training set. How does the training and test calssification accuracy of this model compare with the linear regression model? Remember, you need to set the regularization parameter for sklearn's logistic regression function to be a very large value in order not to regularize (use 'C=100000').4. Plot the quantitative output from linear regression model and the probabilistic output from the logistic regression model (on the training set points) as a function of the gene predictor. Also, display the true binary response for the training set points in the same plot.Based on these plots, does one of the models appear better suited for binary classification than the other? Explain.
###Code
# part 1
# extract train and test response
ytrain = Xtrain_scaled[['Cancer_type']].values
ytest = Xtest_scaled[['Cancer_type']].values
# fit a linear regression
linreg = LinearRegression(fit_intercept = True)
linreg.fit(Xtrain_scaled[['D29963_at']],ytrain)
ypred = linreg.predict(Xtrain_scaled[['D29963_at']])
df = pd.DataFrame(np.c_[ypred,ytrain], columns = ['predicted%of1','actual'])
df.T
# use values as %'s and cast to 1's and 0's with 0.5
ypred = ypred[:,0]
ypred[ypred>0.5] = 1
ypred[ypred<=0.5] = 0
print('Train Classification accuracy (linear) =%s' %(1-np.sum(abs(ypred-ytrain[:,0]))/len(ypred)))
ypred = linreg.predict(Xtest_scaled[['D29963_at']])
ypred = ypred[:,0]
ypred[ypred>0.5] = 1
ypred[ypred<=0.5] = 0
print('Test Classification accuracy (linear) =%s' %(1-np.sum(abs(ypred-ytest[:,0]))/len(ypred)))
# fit logistic regression to gene expre d29963
logreg = LogisticRegression(fit_intercept = True, C = 100000)
logreg.fit(Xtrain_scaled[['D29963_at']],np.ravel(ytrain))
ypred = logreg.predict(Xtrain_scaled[['D29963_at']])
print('Train Classification accuracy (logistic) =%s' %(1-np.sum(abs(ypred-ytrain[:,0]))/len(ypred)))
ypred = logreg.predict(Xtest_scaled[['D29963_at']])
print('Test Classification accuracy (logistic) =%s' %(1-np.sum(abs(ypred-ytest[:,0]))/len(ypred)))
# plot probabilities of log and linear regression with True response
xvals = Xtrain_scaled[['D29963_at']]
ypredlin = linreg.predict(Xtrain_scaled[['D29963_at']])
ypredlog = logreg.predict_proba(Xtrain_scaled[['D29963_at']])
ytrue = Xtrain_scaled[['Cancer_type']]
plt.figure(figsize=(10,10))
plt.scatter(xvals,ytrue,label = 'true', c = 'b',alpha = 0.3)
plt.scatter(xvals,ypredlin,label = 'lin',c = 'r',alpha = 0.5)
plt.scatter(xvals,ypredlog[:,1],label = 'log',c='g',alpha = 0.6)
plt.xlabel('D29963_at expression')
plt.ylabel('probability of being ALL')
plt.legend()
###Output
_____no_output_____
###Markdown
Q- Fit a simple linear regression model to the training set using the single gene predictor D29963_at. We could interpret the scores predicted by regression model interpreted for a patient as an estimate of the probability that the patient has the ALL type cancer (class 1). Is there a problem with this interpretation?A- Our model is not restricted in any way to be between 0 and 1, we could have negative predictions or predictions greater than 1 which are obviously not probabilities so while this interpretation allows us to classify we may not generate probabilities. Furthermore, if we had more classification regions, then our predictions can definitely not be interpreted in this way since our response could be in any range (depending on values of response variables).Q- How does the training and test calssification accuracy of this model compare with the linear regression model?A- We can see from the results above that both models generate the same train and test scores. At first instance this seems odd since we were introduced with the notion that logistic regression is a classifier (and should do better) but when we dig deeper we can see that because the gene expression values aren't highly spread (e.g low expression values don't lead to 0's while high expression values don't lead to 1's) these two methods are comparable. A great source explaining the similarities can be found here: https://statisticalhorizons.com/linear-vs-logistic . Essentially, linear regression can often times do just as well if the probabilities don't have much spread and hence the log odds are linear.Q- Based on these plots, does one of the models appear better suited for binary classification than the other? Explain.A- In the center we can see there is similar performance but at the extrema, it depends so from this particular example neither model is 'better' at first glance. However, if the expression values were more spread out, logistic regression would be better since it can do better at boundaries if there is enough spread in expression. This goes back to the discussion listed in the answer to the previous question. Part (c): Multiple Logistic Regression1. Next, fit a multiple logistic regression model with all the gene predictors from the data set. How does the classification accuracy of this model compare with the models fitted in Part (b) with a single gene (on both the training and test sets)? 2. "Use the `visualize_prob` from `HW5_functions.py` to visualize the probabilties predicted by the fitted multiple logistic regression model on both the training and test data sets. The function creates a visualization that places the data points on a vertical line based on the predicted probabilities, with the `ALL` and `AML` classes shown in different colors, and with the 0.5 threshold highlighted using a dotted horizontal line. Is there a difference in the spread of probabilities in the training and test plots? Are there data points for which the predicted probability is close to 0.5? If so, what can you say about these points?"
###Code
# fit to all predictors
colz = Xtrain_scaled.columns[:-1]
xtrain = Xtrain_scaled[colz]
xtest = Xtest_scaled[colz]
multilr = LogisticRegression(fit_intercept = True,C = 10**6)
multilr.fit(xtrain,np.ravel(ytrain))
ypred = multilr.predict(xtrain)
print('Train Classification accuracy (logistic) =%s' %(1-np.sum(abs(ypred-ytrain[:,0]))/len(ypred)))
ypred = multilr.predict(xtest)
print('Test Classification accuracy (logistic) =%s' %(1-np.sum(abs(ypred-ytest[:,0]))/len(ypred)))
#-------- visualize_prob
# A function to visualize the probabilities predicted by a Logistic Regression model
# Input:
# model (Logistic regression model)
# x (n x d array of predictors in training data)
# y (n x 1 array of response variable vals in training data: 0 or 1)
# ax (an axis object to generate the plot)
def visualize_prob(model, x, y, ax):
# Use the model to predict probabilities for
y_pred = model.predict_proba(x)
# Separate the predictions on the label 1 and label 0 points
ypos = y_pred[y==1]
yneg = y_pred[y==0]
# Count the number of label 1 and label 0 points
npos = ypos.shape[0]
nneg = yneg.shape[0]
# Plot the probabilities on a vertical line at x = 0,
# with the positive points in blue and negative points in red
pos_handle = ax.plot(np.zeros((npos,1)), ypos[:,1], 'bo', label = 'ALL')
neg_handle = ax.plot(np.zeros((nneg,1)), yneg[:,1], 'ro', label = 'AML')
# Line to mark prob 0.5
ax.axhline(y = 0.5, color = 'k', linestyle = '--')
# Add y-label and legend, do not display x-axis, set y-axis limit
ax.set_ylabel('Probability of ALL class')
ax.legend(loc = 'best')
ax.get_xaxis().set_visible(False)
ax.set_ylim([0,1])
fig,ax = plt.subplots(1,2,figsize=(14,7))
visualize_prob(multilr,xtrain,np.ravel(ytrain),ax[0])
ax[0].set_title('Training Classification')
visualize_prob(multilr,xtest,np.ravel(ytest),ax[1])
ax[1].set_title('Test Classification')
###Output
_____no_output_____
###Markdown
Q- Next, fit a multiple logistic regression model with all the gene predictors from the data set. How does the classification accuracy of this model compare with the models fitted in Part (b) with a single gene (on both the training and test sets)?A- Classification accuracy on both training and test set improves with classification on the training set moving to 100% accuracy (indicative of some level of overfitting). The test score improves from 0.829 to 0.97 suggesting that the increase in feature space adds some more complexity and improves our test score.Q- "Use the visualize_prob from HW5_functions.py to visualize the probabilties predicted by the fitted multiple logistic regression model on both the training and test data sets. The function creates a visualization that places the data points on a vertical line based on the predicted probabilities, with the ALL and AML classes shown in different colors, and with the 0.5 threshold highlighted using a dotted horizontal line. Is there a difference in the spread of probabilities in the training and test plots? Are there data points for which the predicted probability is close to 0.5? If so, what can you say about these points?"A- There is a difference in the spread of probabilities of training and test. In the test we can see a wider range of probabilities since our model is over fitting and is not handling 'unseen' data as well as the training. We can also see some misclassification due to this as we can see some values which are truly ALL being predicted with probabilities less than 0.5.There aren't any points close to 0.5 using standardization but when I ran this with normalization I did find some points close to 0.5. In that case it seems like those points have an equal likelihood of being classed into AML or ALL even though they are distinctly one type. Therefore, we may want to consider how we could potentially change this probability if we cared about increased accuracy of ALL vs AML for example. Part (d): Analyzing Significance of CoefficientsHow many of the coefficients estimated by the multiple logistic regression in the previous problem are significantly different from zero at a *significance level of 95%*? Hint: To answer this question, use *bootstrapping* with 100 boostrap samples/iterations.
###Code
coefsig =[]
def sample(x, y, k):
n = x.shape[0] # No. of training points
# Choose random indices of size 'k'
subset_ind = np.random.choice(np.arange(n), k)
# Get predictors and reponses with the indices
x_subset = x[subset_ind, :]
y_subset = y[subset_ind]
return (x_subset, y_subset)
multilr = LogisticRegression(fit_intercept = True,C = 10**6)
from random import randint
for i in range(100):
xx, yy = sample(xtrain.values,ytrain,32)
multilr.fit(xx,np.ravel(yy))
coefsig.append(multilr.coef_)
coefsig = np.array(coefsig)
avgcoef = np.mean(coefsig,axis = 0)[0,:]
stdcoef = np.std(coefsig,axis = 0)[0,:]
z2 = avgcoef-2*stdcoef
z1 = avgcoef+2*stdcoef
print('Number of Statistically significant values:%s' %(np.shape(z2[z2>0])[0] + np.shape(z1[z1<0])[0]))
###Output
Number of Statistically significant values:1717
###Markdown
- In running the bootstrapping scheme we make no assumptions about our data, however it is possible that there is correlation between our data. So in many cases a t-test is not identical to a 95% confidence interval from bootstrapping. However, for the purpose of this problem we assume we can use this method. Therefore we bootstrap 100 times, take the mean and look at 2 standard deviations, if the value zero appears then the coefficient is not statistically significant. What we find is that 1100 coefficients are statistically significant at this confidence level. If I normalize the data in step 1, I find that number of significant coefficients is 1690. This already can be used to help us narrow down our feature space. Part (e): Dimensionality Reduction using PCAA reasonable approach to reduce the dimensionality of the data is to use PCA and fit a logistic regression model on the first set of principal components contributing to 90% of the variance in the predictors.1. How do the classification accuracy values on both the training and tests sets compare with the models fitted in Parts (c) and (d)? 2. Re-fit a logistic regression model using 5-fold cross-validation to choose the number of principal components, and comment on whether you get better test performance than the model fitted above (explain your observations). 3. Use the code provided in Part (c) to visualize the probabilities predicted by the fitted models on both the training and test sets. How does the spread of probabilities in these plots compare to those for the models in Part (c) and (d)?
###Code
pcavar = []
i = 1
while True:
pca = PCA(n_components = i)
pca.fit(xtrain)
pcavar.append(pca.explained_variance_ratio_.sum())
if (pca.explained_variance_ratio_.sum()) >= 0.9:
break
i+=1
plt.figure(figsize=(10,8))
plt.plot(np.arange(1,25,1),pcavar)
plt.title('#PCA components vs total variance')
plt.ylabel('Variance')
plt.xlabel('# of PCA components')
###Output
_____no_output_____
###Markdown
I choose 24 pca compoonents since it contributes to 91% of the variance
###Code
multilr = LogisticRegression(fit_intercept = True, C = 10**6)
pca = PCA(n_components = 24)
pca.fit(xtrain)
xpca = pca.transform(xtrain)
xtst = pca.transform(xtest)
multilr.fit(xpca,np.ravel(ytrain))
ypred = multilr.predict(xpca)
print('Train Classification accuracy (logistic) =%s' %(1-np.sum(abs(ypred-ytrain[:,0]))/len(ypred)))
ypred = multilr.predict(xtst)
print('Test Classification accuracy (logistic) =%s' %(1-np.sum(abs(ypred-ytest[:,0]))/len(ypred)))
fig,ax = plt.subplots(1,2,figsize=(14,7))
visualize_prob(multilr,xpca,np.ravel(ytrain),ax[0])
ax[0].set_title('Training Classification')
visualize_prob(multilr,xtst,np.ravel(ytest),ax[1])
ax[1].set_title('Test Classification')
lrcv = LogisticRegressionCV(Cs = [10**8],fit_intercept = True,cv = 5)
scores = []
stds = []
for i in range(23):
lrcv.fit(xpca[:,0:i+1],np.ravel(ytrain))
scores.append(lrcv.scores_)
scores = np.array(scores)
lrcv_means = [np.mean(scores[i][1]) for i in range(23)]
stds = [np.std(scores[i][1]) for i in range(23)]
xx = np.arange(1,24,1)
plt.figure(figsize=(12,10))
plt.title('cross validation score vs # of PCA components')
plt.errorbar(xx,lrcv_means,yerr = stds,marker='o',linestyle=None)
np.argmax(lrcv_means) +1
###Output
_____no_output_____
###Markdown
4 features yield the highest training accuracy
###Code
multilr = LogisticRegression(fit_intercept = True, C = 10**6)
multilr.fit(xpca[:,0:4],np.ravel(ytrain))
ypred = multilr.predict(xpca[:,0:4])
print('Train Classification accuracy (logistic) =%s' %(1-np.sum(abs(ypred-ytrain[:,0]))/len(ypred)))
ypred = multilr.predict(xtst[:,0:4])
print('Test Classification accuracy (logistic) =%s' %(1-np.sum(abs(ypred-ytest[:,0]))/len(ypred)))
fig,ax = plt.subplots(1,2,figsize=(14,7))
visualize_prob(multilr,xpca[:,0:4],np.ravel(ytrain),ax[0])
ax[0].set_title('Training Classification')
visualize_prob(multilr,xtst[:,0:4],np.ravel(ytest),ax[1])
ax[1].set_title('Test Classification')
###Output
_____no_output_____ |
D60_PCA 觀察_使用手寫辨識資料集/Day_060_PCA.ipynb | ###Markdown
使用手寫辨識資料集, 觀察 PCA 算法 [教學目標]- 以 PCA + 邏輯斯迴歸判斷手寫辨識資料集, 觀察不同 component 下正確率的變化- 因為非監督模型的效果, 較難以簡單的範例看出來 所以非監督偶數日提供的範例與作業, 主要目的在於觀察非監督模型的效果, 同學只要能感受到模型效果即可, 不用執著於搞懂程式的每一個部分 [範例重點]- 以手寫辨識資料集, 觀察 PCA 算法取不同 component 時, PCA 解釋度與分類正確率如何變化 (In[5], Out[5])
###Code
# 載入套件
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.linear_model import SGDClassifier
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
import warnings
warnings.filterwarnings("ignore")
# 定義 PCA 與隨後的邏輯斯迴歸函數
logistic = SGDClassifier(loss='log', penalty='l2', max_iter=10000, tol=1e-5, random_state=0)
pca = PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
# 載入手寫數字辨識集
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
# 先執行 GridSearchCV 跑出最佳參數
param_grid = {
'pca__n_components': [4, 10, 20, 30, 40, 50, 64],
'logistic__alpha': np.logspace(-4, 4, 5),
}
search = GridSearchCV(pipe, param_grid, iid=False, cv=5, return_train_score=False)
search.fit(X_digits, y_digits)
print("Best parameter (CV score=%0.3f):" % search.best_score_)
print(search.best_params_)
# 繪製不同 components 的 PCA 解釋度
pca.fit(X_digits)
fig, (ax0, ax1) = plt.subplots(nrows=2, sharex=True, figsize=(6, 6))
ax0.plot(pca.explained_variance_ratio_, linewidth=2)
ax0.set_ylabel('PCA explained variance')
ax0.axvline(search.best_estimator_.named_steps['pca'].n_components, linestyle=':', label='n_components chosen')
ax0.legend(prop=dict(size=12))
# 繪製不同採樣點的分類正確率
results = pd.DataFrame(search.cv_results_)
components_col = 'param_pca__n_components'
best_clfs = results.groupby(components_col).apply(lambda g: g.nlargest(1, 'mean_test_score'))
best_clfs.plot(x=components_col, y='mean_test_score', yerr='std_test_score', legend=False, ax=ax1)
ax1.set_ylabel('Classification accuracy (val)')
ax1.set_xlabel('n_components')
plt.tight_layout()
plt.show()
###Output
_____no_output_____ |
module1-regression-1/Day 21 Notes of Linear Regression.ipynb | ###Markdown
Lambda School Data Science*Unit 2, Sprint 1, Module 1*--- Regression 1- Begin with baselines for regression- Use scikit-learn to fit a linear regression- Explain the coefficients from a linear regression Brandon Rohrer wrote a good blog post, [“What questions can machine learning answer?”](https://brohrer.github.io/five_questions_data_science_answers.html)We’ll focus on two of these questions in Unit 2. These are both types of “supervised learning.”- “How Much / How Many?” (Regression)- “Is this A or B?” (Classification)This unit, you’ll build supervised learning models with “tabular data” (data in tables, like spreadsheets). Including, but not limited to:- Predict New York City real estate prices <-- **Today, we'll start this!**- Predict which water pumps in Tanzania need repairs- Choose your own labeled, tabular dataset, train a predictive model, and publish a blog post or web app with visualizations to explain your model! SetupRun the code cell below. You can work locally (follow the [local setup instructions](https://lambdaschool.github.io/ds/unit2/local/)) or on Colab.Libraries:- ipywidgets- pandas- plotly- scikit-learnIf your **Plotly** visualizations aren't working:- You must have JavaScript enabled in your browser- You probably want to use Chrome or Firefox- You may need to turn off ad blockers- [If you're using Jupyter Lab locally, you need to install some "extensions"](https://plot.ly/python/getting-started/jupyterlab-support-python-35)
###Code
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/'
# If you're working locally:
# else:
# DATA_PATH = '../data/'
# Ignore this Numpy warning when using Plotly Express:
# FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')
###Output
_____no_output_____
###Markdown
Begin with baselines for regression Overview Predict how much a NYC condo costs 🏠💸Regression models output continuous numbers, so we can use regression to answer questions like "How much?" or "How many?" Often, the question is "How much will this cost? How many dollars?" For example, here's a fun YouTube video, which we'll use as our scenario for this lesson:[Amateurs & Experts Guess How Much a NYC Condo With a Private Terrace Costs](https://www.youtube.com/watch?v=JQCctBOgH9I)> Real Estate Agent Leonard Steinberg just sold a pre-war condo in New York City's Tribeca neighborhood. We challenged three people - an apartment renter, an apartment owner and a real estate expert - to try to guess how much the apartment sold for. Leonard reveals more and more details to them as they refine their guesses. The condo from the video is **1,497 square feet**, built in 1852, and is in a desirable neighborhood. According to the real estate agent, _"Tribeca is known to be one of the most expensive ZIP codes in all of the United States of America."_How can we guess what this condo sold for? Let's look at 3 methods:1. Heuristics2. Descriptive Statistics3. Predictive Model Follow Along 1. HeuristicsHeuristics are "rules of thumb" that people use to make decisions and judgments. The video participants discussed their heuristics: **Participant 1**, Chinwe, is a real estate amateur. She rents her apartment in New York City. Her first guess was `8 million, and her final guess was 15 million.[She said](https://youtu.be/JQCctBOgH9I?t=465), _"People just go crazy for numbers like 1852. You say **'pre-war'** to anyone in New York City, they will literally sell a kidney. They will just give you their children."_ **Participant 3**, Pam, is an expert. She runs a real estate blog. Her first guess was 1.55 million, and her final guess was 2.2 million.[She explained](https://youtu.be/JQCctBOgH9I?t=280) her first guess: _"I went with a number that I think is kind of the going rate in the location, and that's **a thousand bucks a square foot.**"_ **Participant 2**, Mubeen, is between the others in his expertise level. He owns his apartment in New York City. His first guess was 1.7 million, and his final guess was also 2.2 million. 2. Descriptive Statistics We can use data to try to do better than these heuristics. How much have other Tribeca condos sold for?Let's answer this question with a relevant dataset, containing most of the single residential unit, elevator apartment condos sold in Tribeca, from January through April 2019.We can get descriptive statistics for the dataset's `SALE_PRICE` column.How many condo sales are in this dataset? What was the average sale price? The median? Minimum? Maximum?
###Code
import pandas as pd
df = pd.read_csv(DATA_PATH+'condos/tribeca.csv')
pd.options.display.float_format = '{:,.0f}'.format
df['SALE_PRICE'].describe()
###Output
_____no_output_____
###Markdown
On average, condos in Tribeca have sold for \$3.9 million. So that could be a reasonable first guess.In fact, here's the interesting thing: **we could use this one number as a "prediction", if we didn't have any data except for sales price...** Imagine we didn't have any any other information about condos, then what would you tell somebody? If you had some sales prices like this but you didn't have any of these other columns. If somebody asked you, "How much do you think a condo in Tribeca costs?"You could say, "Well, I've got 90 sales prices here, and I see that on average they cost \$3.9 million."So we do this all the time in the real world. We use descriptive statistics for prediction. And that's not wrong or bad, in fact **that's where you should start. This is called the _mean baseline_.** **Baseline** is an overloaded term, with multiple meanings:1. [**The score you'd get by guessing**](https://twitter.com/koehrsen_will/status/1088863527778111488)2. [**Fast, first models that beat guessing**](https://blog.insightdatascience.com/always-start-with-a-stupid-model-no-exceptions-3a22314b9aaa) 3. **Complete, tuned "simpler" model** (Simpler mathematically, computationally. Or less work for you, the data scientist.)4. **Minimum performance that "matters"** to go to production and benefit your employer and the people you serve.5. **Human-level performance** Baseline type 1 is what we're doing now.(Linear models can be great for 2, 3, 4, and [sometimes even 5 too!](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.188.5825)) ---Let's go back to our mean baseline for Tribeca condos. If we just guessed that every Tribeca condo sold for \$3.9 million, how far off would we be, on average?
###Code
guess = df['SALE_PRICE'].mean()
errors = guess - df['SALE_PRICE']
errors
mean_absolute_error = errors.abs().mean()
print(f'If we just guessed every Tribeca condo sold for ${guess:,.0f},')
print(f'we would be off by ${mean_absolute_error:,.0f} on average.')
###Output
If we just guessed every Tribeca condo sold for $3,928,736,
we would be off by $2,783,380 on average.
###Markdown
That sounds like a lot of error! But fortunately, we can do better than this first baseline — we can use more data. For example, the condo's size.Could sale price be **dependent** on square feet? To explore this relationship, let's make a scatterplot, using [Plotly Express](https://plot.ly/python/plotly-express/):
###Code
import plotly.express as px
px.scatter(df, x='GROSS_SQUARE_FEET', y='SALE_PRICE')
###Output
_____no_output_____
###Markdown
3. Predictive ModelTo go from a _descriptive_ [scatterplot](https://www.plotly.express/plotly_express/plotly_express.scatter) to a _predictive_ regression, just add a _line of best fit:_
###Code
px.scatter(df, x='GROSS_SQUARE_FEET', y='SALE_PRICE', trendline='ols')
df['GROSS_SQUARE_FEET'].describe()
###Output
_____no_output_____
###Markdown
Roll over the Plotly regression line to see its equation and predictions for sale price, dependent on gross square feet.Linear Regression helps us **interpolate.** For example, in this dataset, there's a gap between 4016 sq ft and 4663 sq ft. There were no 4300 sq ft condos sold, but what price would you predict, using this line of best fit?Linear Regression also helps us **extrapolate.** For example, in this dataset, there were no 6000 sq ft condos sold, but what price would you predict? The line of best fit tries to summarize the relationship between our x variable and y variable in a way that enables us to use the equation for that line to make predictions. **Synonyms for "y variable"**- **Dependent Variable**- Response Variable- Outcome Variable - Predicted Variable- Measured Variable- Explained Variable- **Label**- **Target** **Synonyms for "x variable"**- **Independent Variable**- Explanatory Variable- Regressor- Covariate- Correlate- **Feature** The bolded terminology will be used most often by your instructors this unit. ChallengeIn your assignment, you will practice how to begin with baselines for regression, using a new dataset! Use scikit-learn to fit a linear regression Overview We can use visualization libraries to do simple linear regression ("simple" means there's only one independent variable). But during this unit, we'll usually use the scikit-learn library for predictive models, and we'll usually have multiple independent variables. In [_Python Data Science Handbook,_ Chapter 5.2: Introducing Scikit-Learn](https://jakevdp.github.io/PythonDataScienceHandbook/05.02-introducing-scikit-learn.htmlBasics-of-the-API), Jake VanderPlas explains **how to structure your data** for scikit-learn:> The best way to think about data within Scikit-Learn is in terms of tables of data. >> >>The features matrix is often stored in a variable named `X`. The features matrix is assumed to be two-dimensional, with shape `[n_samples, n_features]`, and is most often contained in a NumPy array or a Pandas `DataFrame`.>>We also generally work with a label or target array, which by convention we will usually call `y`. The target array is usually one dimensional, with length `n_samples`, and is generally contained in a NumPy array or Pandas `Series`. The target array may have continuous numerical values, or discrete classes/labels. >>The target array is the quantity we want to _predict from the data:_ in statistical terms, it is the dependent variable. VanderPlas also lists a **5 step process** for scikit-learn's "Estimator API":> Every machine learning algorithm in Scikit-Learn is implemented via the Estimator API, which provides a consistent interface for a wide range of machine learning applications.>> Most commonly, the steps in using the Scikit-Learn estimator API are as follows:>> 1. Choose a class of model by importing the appropriate estimator class from Scikit-Learn.> 2. Choose model hyperparameters by instantiating this class with desired values.> 3. Arrange data into a features matrix and target vector following the discussion above.> 4. Fit the model to your data by calling the `fit()` method of the model instance.> 5. Apply the Model to new data: For supervised learning, often we predict labels for unknown data using the `predict()` method.Let's try it! Follow AlongFollow the 5 step process, and refer to [Scikit-Learn LinearRegression documentation](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html).
###Code
# 1. Import the appropriate estimator class from Scikit-Learn
from sklearn.linear_model import LinearRegression
# 2. Instantiate this class
model = LinearRegression()
# 3. Arrange X features matrix & y target vector
features = ['GROSS_SQUARE_FEET']
target = ['SALE_PRICE']
x_train = df[features]
y_train = df[target]
x_train
y_train
# 4. Fit the model
model.fit(x_train, y_train)
# 5. Apply the model to new data
square_feet = 1497
x_test = [[ square_feet ]]
y_pred = model.predict(x_test)
y_pred
# x_test = [ [400],
# [1400],
# [2400] ]
# y_pred = model.predict(x_test)
# y_pred
###Output
_____no_output_____
###Markdown
So, we used scikit-learn to fit a linear regression, and predicted the sales price for a 1,497 square foot Tribeca condo, like the one from the video.Now, what did that condo actually sell for? ___The final answer is revealed in [the video at 12:28](https://youtu.be/JQCctBOgH9I?t=748)!___
###Code
y_test = [2800000]
###Output
_____no_output_____
###Markdown
What was the error for our prediction, versus the video participants?Let's use [scikit-learn's mean absolute error function](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_absolute_error.html).
###Code
chinwe_final_guess = [15000000]
mubeen_final_guess = [2200000]
pam_final_guess = [2200000]
from sklearn.metrics import mean_absolute_error
mae = mean_absolute_error(y_test, y_pred)
print ("Our model's error", mae)
y_pred
###Output
_____no_output_____
###Markdown
This [diagram](https://ogrisel.github.io/scikit-learn.org/sklearn-tutorial/tutorial/text_analytics/general_concepts.htmlsupervised-learning-model-fit-x-y) shows what we just did! Don't worry about understanding it all now. But can you start to match some of these boxes/arrows to the corresponding lines of code from above? Here's [another diagram](https://livebook.manning.com/book/deep-learning-with-python/chapter-1/), which shows how machine learning is a "new programming paradigm":> A machine learning system is "trained" rather than explicitly programmed. It is presented with many "examples" relevant to a task, and it finds statistical structure in these examples which eventually allows the system to come up with rules for automating the task. —[Francois Chollet](https://livebook.manning.com/book/deep-learning-with-python/chapter-1/) Wait, are we saying that *linear regression* could be considered a *machine learning algorithm*? Maybe it depends? What do you think? We'll discuss throughout this unit. ChallengeIn your assignment, you will use scikit-learn for linear regression with one feature. For a stretch goal, you can do linear regression with two or more features. Explain the coefficients from a linear regression OverviewWhat pattern did the model "learn", about the relationship between square feet & price? Follow Along To help answer this question, we'll look at the `coef_` and `intercept_` attributes of the `LinearRegression` object. (Again, [here's the documentation](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html).)
###Code
model.coef_
model.intercept_
###Output
_____no_output_____
###Markdown
We can repeatedly apply the model to new/unknown data, and explain the coefficient:
###Code
def predict(square_feet):
y_pred = model.predict([[square_feet]])
estimate = y_pred[0]
coefficient = model.coef_[0]
print (int(estimate), "is our estimated price for", int(square_feet), "square foot condo in Tribeca.")
print(predict(1497))
# What does the model predict for low square footage?
print(predict(500))
# For high square footage?
print(predict(10000))
###Output
29259112 is our estimated price for 10000 square foot condo in Tribeca.
None
|
examples/rbm_optimization.ipynb | ###Markdown
The setup Define a graph to run QAOA on:
###Code
G = nx.random_regular_graph(d=3, n=12, seed=12345)
nx.draw_kamada_kawai(G, node_color='gold', node_size=500)
###Output
_____no_output_____
###Markdown
For $p>1$, provided we have a small graph, we can find optimal angles exactly:
###Code
qaoa = QAOA(G, p=2)
%%time
angles, costs = qaoa.optimize(init=[np.pi/8, np.pi/8, -np.pi/8, -np.pi/8], tol=1e-4)
fig, ax = plt.subplots(figsize=[8,5])
ax.plot(costs)
ax.set_xlabel('Iteration step', fontsize=20)
ax.set_ylabel(r'$\langle \mathcal{C} \rangle $', fontsize=30)
gammas, betas = np.split(angles, 2)
gammas[0] # \gamma _1
gammas[1] # \gamma _2
betas[0] # \beta _1
betas[1] # \beta _2
###Output
_____no_output_____
###Markdown
Initialize an RBM ansatz with $N=12$ visible units, the same number as the underlying graph
###Code
logpsi = RBM(12)
###Output
_____no_output_____
###Markdown
Exactly apply $U_C (\gamma _1) = \exp \left( -i \gamma _1 \sum _{\langle i, j \rangle } Z_i Z_j \right)$
###Code
logpsi.UC(G, gamma=gammas[0], mask=False)
###Output
_____no_output_____
###Markdown
The process introduced a number of hidden units $n_h$ that's equal to the number of edges in the graph. (Plus 1 that was there by default when we initialized the RBM.)We can look at the numbers:
###Code
logpsi.nv, logpsi.nh
logpsi.alpha # = logpsi.nh / logpsi.nv
###Output
_____no_output_____
###Markdown
The first optimization Now, initialize the optimizer and approximately apply $U_B (\beta _1) = \exp \left( -i \beta _1 \sum _i X_i \right)$
###Code
optim = Optimizer(logpsi, n_steps=800, n_chains=4, warmup=800, step=12)
%%time
for n in range(len(G)):
params, history = optim.sr_rx(n=n, beta=betas[0], resample_phi=3, verbose=True)
optim.machine.params = params
print(f'Done with qubit #{n+1}, reached fidelity {history[-1]}')
logpsi.params = params
###Output
_____no_output_____
###Markdown
It's a good check to compare exact fidelities at this point:
###Code
psi_exact = QAOA(G, p=1).simulate(gammas[0], betas[0]).final_state_vector
psi_rbm = logpsi.get_state_vector(normalized=True)
exact_fidelity(psi_exact, psi_rbm)
###Output
_____no_output_____
###Markdown
Next, apply$$U_C (\gamma _2) = \exp \left( -i \gamma _2 \sum _{\langle i, j \rangle } Z_i Z_j \right)$$
###Code
logpsi.UC(G, gamma=gammas[1])
optim.machine = logpsi
###Output
_____no_output_____
###Markdown
However, this doubled the number of hidden units:
###Code
logpsi.alpha
###Output
_____no_output_____
###Markdown
The compression step We can keep the number of hidden units under control as we go to higher values of $p$ by performing a compression step, as described in the paper.Essentially, we define a smaller RBM with `RBM.alpha = 1.5` (the previous value or any we choose to compress to). Then, we optimize parameters of the new RBM to describe the same quantum state as the larger one, obtaining a compressed representaion of$$ \vert \psi \rangle = U_C (\gamma _2) \; U_B (\beta _1) \; U_C(\gamma _1) \; \vert + \rangle $$ A heuristically good choice for initial RBM parameters are those values that exactly describe the following quantum state:$$ \vert \psi _\text{init} \rangle = U_C \left( \frac{\gamma_1 + \gamma _2}{2} \right) \; \vert + \rangle $$
###Code
aux = RBM(len(G))
aux.UC(G, (gammas[0] + gammas[1])/2)
init_params = aux.params
###Output
_____no_output_____
###Markdown
Now, perform the compression:
###Code
%%time
params, history = optim.sr_compress(init=init_params, resample_phi=2, verbose=True)
###Output
Iteration 34 | Fidelity = 0.9950 | lr = 0.100 | Diff mean fidelity = 0.0061196
CPU times: user 21.6 s, sys: 1.39 s, total: 23 s
Wall time: 13.5 s
###Markdown
Let's plot the fidelity as a function of compression optimizer step:
###Code
fig, ax = plt.subplots(figsize=[8,5])
ax.plot(history)
ax.set_xlabel('Iteration step', fontsize=30)
ax.set_ylabel('Fidelity', fontsize=30)
###Output
_____no_output_____
###Markdown
Estimated fidelity reached:
###Code
history[-1]
logpsi = RBM(12, (len(params) - 12)//(12+1))
logpsi.params = params
logpsi.alpha
###Output
_____no_output_____
###Markdown
Finally, we can apply $U_B (\beta _2) = \exp \left( -i \beta _2 \sum _i X_i \right)$
###Code
optim.machine = logpsi
###Output
_____no_output_____
###Markdown
The second optimization
###Code
%%time
for n in range(len(G)):
params, history = optim.sr_rx(n=n, beta=betas[1], resample_phi=3, verbose=True)
optim.machine.params = params
print(f'Done with qubit #{n+1}, reached fidelity {history[-1]}')
###Output
Done with qubit #1, reached fidelity 0.9936483423240047
Done with qubit #2, reached fidelity 0.9945939003403254
Iteration 33 | Fidelity = 0.9952 | lr = 0.100 | Diff mean fidelity = 0.0023351
Done with qubit #3, reached fidelity 0.9929582465250073
Done with qubit #4, reached fidelity 0.9973711526104884
Iteration 33 | Fidelity = 0.9950 | lr = 0.100 | Diff mean fidelity = 0.0009768
Done with qubit #5, reached fidelity 0.9950055465841054
Iteration 31 | Fidelity = 0.9949 | lr = 0.100 | Diff mean fidelity = 0.0005597
Done with qubit #6, reached fidelity 0.994948074500038
Iteration 32 | Fidelity = 0.9940 | lr = 0.100 | Diff mean fidelity = 0.0004904
Done with qubit #7, reached fidelity 0.993982373382955
Done with qubit #8, reached fidelity 0.9927259787261872
Done with qubit #9, reached fidelity 0.9917465266431491
Done with qubit #10, reached fidelity 0.9851735331537615
Done with qubit #11, reached fidelity 0.9965867032532723
Done with qubit #12, reached fidelity 0.9897891989809056
CPU times: user 2min 34s, sys: 9.88 s, total: 2min 44s
Wall time: 1min 44s
###Markdown
And, compare the final output fidelity at $p=2$:
###Code
logpsi.params = params
psi_exact = QAOA(G, p=2).simulate(gammas, betas).final_state_vector
psi_rbm = logpsi.get_state_vector(normalized=True)
exact_fidelity(psi_exact, psi_rbm)
###Output
_____no_output_____ |
notebooks/coco_keypoints.ipynb | ###Markdown
COCO Keypoints Simple example on how to parse keypoints for the coco annotation format. For demonstration purposes we will be using the samples present on the repo instead of the full COCO dataset.
###Code
from icevision.all import *
data_dir = Path('/home/lgvaz/git/icevision/samples')
class_map = ClassMap(['person'])
parser = parsers.COCOKeyPointsParser(annotations_filepath=data_dir/'keypoints_annotations.json', img_dir=data_dir/'images')
records = parser.parse(data_splitter=SingleSplitSplitter())[0]
record = records[1]
show_record(record, figsize=(10,10), class_map=class_map)
test_tfms = tfms.A.Adapter([*tfms.A.resize_and_pad(512), tfms.A.Normalize()])
test_ds = Dataset(records, test_tfms)
show_sample(test_ds[0], figsize=(10,10), display_bbox=False)
###Output
_____no_output_____ |
doc/notebook/02_1_amazonreview_multiclass_classification_sparse.ipynb | ###Markdown
1. DescriptionSentiment classification using Amazon review dataset (multi class classification).Dataset can be downloaded from https://s3.amazonaws.com/amazon-reviews-pds/tsv/amazon_reviews_us_Books_v1_02.tsv.gzThe consumer reviews serve as feedback for businesses in terms of performance, product quality, and consumer service. An online review typically consists of free-form text and a star rating out of 5. The problem of predicting a user’s star rating for a product, given the user’s text review for that product is lately become a popular, albeit hard, problem in machine learning. Using this dataset, we train a classifier to predict product rating based on the review text.Predicting the ratings based on the text is particulary difficult tasks. The primary reason for the difficulty is that two person can provide different ratings for writing similar reviews. As the scale for ratings increases (scale of 5 to scale of 10), the tasks become increasingly difficult. 2. Data PreprocessingFor amazon review classification we will perform some data preparation and data cleaning steps. We will generate feature vectors using sklearn TF-IDF for review text.
###Code
import os
import pandas as pd
from collections import OrderedDict
def create_embed(x_train, x_test):
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
count_vect = CountVectorizer()
x_train_counts = count_vect.fit_transform(x_train)
x_test_counts = count_vect.transform(x_test)
tfidf_transformer = TfidfTransformer()
x_train_tfidf = tfidf_transformer.fit_transform(x_train_counts)
x_test_tfidf = tfidf_transformer.transform(x_test_counts)
return x_train_tfidf, x_test_tfidf
def preprocess_data(fname):
df = pd.read_csv(fname, sep='\t', error_bad_lines=False)
df = df[["review_body", "star_rating"]]
df = df.dropna().drop_duplicates().sample(frac=1) # why sampling?
print("Dataset contains {} reviews".format(df.shape[0]))
rating_categories = df["star_rating"].value_counts()
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(df["review_body"],
df["star_rating"],
random_state = 42)
x_train, x_test = create_embed(x_train, x_test)
return x_train, x_test, y_train, y_test, rating_categories
#---- Data Preparation ----
# Please uncomment the below lines to download and unzip the dataset.
#!wget -N https://s3.amazonaws.com/amazon-reviews-pds/tsv/amazon_reviews_us_Books_v1_02.tsv.gz
#!gunzip amazon_reviews_us_Books_v1_02.tsv.gz
#!mv amazon_reviews_us_Books_v1_02.tsv datasets
DATA_FILE = "datasets/amazon_reviews_us_Books_v1_02.tsv/amazon_reviews_us_Books_v1_02.tsv"
x_train, x_test, y_train, y_test, rating_categories = preprocess_data(DATA_FILE)
print("shape of train data: {}".format(x_train.shape))
print("shape of test data: {}".format(x_test.shape))
# Label distribution summary
ax = rating_categories.plot(kind='bar', title='Label Distribution').\
set(xlabel="Rating Id's", ylabel="No. of reviewes")
###Output
_____no_output_____
###Markdown
3. Algorithm Evaluation
###Code
import time
from sklearn import metrics
train_time = []
test_time = []
accuracy = []
precision = []
recall = []
f1 = []
estimator_name = []
def evaluate(estimator, estimator_nm,
x_train, y_train,
x_test, y_test):
estimator_name.append(estimator_nm)
start_time = time.time()
estimator.fit(x_train, y_train)
train_time.append(round(time.time() - start_time, 4))
start_time = time.time()
pred_y = estimator.predict(x_test)
test_time.append(round(time.time() - start_time, 4))
accuracy.append(metrics.accuracy_score(y_test, pred_y))
precision.append(metrics.precision_score(y_test, pred_y, average='macro'))
recall.append(metrics.recall_score(y_test, pred_y, average='macro'))
f1.append(metrics.f1_score(y_test, pred_y, average='macro'))
target_names = ['rating 1.0', 'rating 2.0', 'rating 3.0', 'rating 4.0', 'rating 5.0']
return metrics.classification_report(y_test, pred_y, target_names=target_names)
###Output
_____no_output_____
###Markdown
3.1 Multinomial LogisticRegression
###Code
#1. Demo: Multinomial LogisticRegression
import frovedis
TARGET = "multinomial_logistic_regression"
from frovedis.exrpc.server import FrovedisServer
FrovedisServer.initialize("mpirun -np 8 " + os.environ["FROVEDIS_SERVER"])
from frovedis.mllib.linear_model import LogisticRegression as frovLogisticRegression
f_est = frovLogisticRegression(max_iter=3100, penalty='none', \
lr_rate=0.001, tol=1e-8)
E_NM = TARGET + "_frovedis_" + frovedis.__version__
f_report = evaluate(f_est, E_NM, \
x_train, y_train, x_test, y_test)
f_est.release()
FrovedisServer.shut_down()
import sklearn
from sklearn.linear_model import LogisticRegression as skLogisticRegression
s_est = skLogisticRegression(max_iter = 3100, penalty='none', \
tol = 1e-8, n_jobs = 12)
E_NM = TARGET + "_sklearn_" + sklearn.__version__
s_report = evaluate(s_est, E_NM, \
x_train, y_train, x_test, y_test)
# LogisticRegression: Precision, Recall and F1 score for each class
print("Frovedis LogisticRegression metrices: ")
print(f_report)
print("Sklearn LogisticRegression metrices: ")
print(s_report)
###Output
/opt/nec/nosupport/frovedis/x86/lib/python/frovedis/mllib/linear_model.py:108: UserWarning: fit: multinomial classification problem is detected... switching solver to 'sag'.
"detected... switching solver to 'sag'.\n")
/home/adityaw/virt1/lib64/python3.6/site-packages/sklearn/metrics/_classification.py:1245: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, msg_start, len(result))
/home/adityaw/virt1/lib64/python3.6/site-packages/sklearn/metrics/_classification.py:1245: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, msg_start, len(result))
/home/adityaw/virt1/lib64/python3.6/site-packages/sklearn/metrics/_classification.py:1245: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, msg_start, len(result))
/home/adityaw/virt1/lib64/python3.6/site-packages/sklearn/metrics/_classification.py:1245: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, msg_start, len(result))
###Markdown
3.2 MultinomialNB
###Code
#2. Demo: MultinomialNB
import frovedis
TARGET = "multinomial_naive_bayes"
from frovedis.exrpc.server import FrovedisServer
FrovedisServer.initialize("mpirun -np 8 " + os.environ["FROVEDIS_SERVER"])
from frovedis.mllib.naive_bayes import MultinomialNB as fMNB
f_est = fMNB()
E_NM = TARGET + "_frovedis_" + frovedis.__version__
f_report = evaluate(f_est, E_NM, \
x_train, y_train, x_test, y_test)
f_est.release()
FrovedisServer.shut_down()
import sklearn
from sklearn.naive_bayes import MultinomialNB as sMNB
s_est = sMNB()
E_NM = TARGET + "_sklearn_" + sklearn.__version__
s_report = evaluate(s_est, E_NM, \
x_train, y_train, x_test, y_test)
# MultinomialNB: Precision, Recall and F1 score for each class
print("Frovedis MultinomialNB metrices: ")
print(f_report)
print("Sklearn MultinomialNB metrices: ")
print(s_report)
###Output
Frovedis MultinomialNB metrices:
precision recall f1-score support
rating 1.0 0.85 0.01 0.01 59054
rating 2.0 0.00 0.00 0.00 41208
rating 3.0 0.14 0.00 0.00 61924
rating 4.0 0.31 0.00 0.00 145240
rating 5.0 0.60 1.00 0.75 460417
accuracy 0.60 767843
macro avg 0.38 0.20 0.15 767843
weighted avg 0.49 0.60 0.45 767843
Sklearn MultinomialNB metrices:
precision recall f1-score support
rating 1.0 0.85 0.01 0.01 59054
rating 2.0 0.00 0.00 0.00 41208
rating 3.0 0.14 0.00 0.00 61924
rating 4.0 0.31 0.00 0.00 145240
rating 5.0 0.60 1.00 0.75 460417
accuracy 0.60 767843
macro avg 0.38 0.20 0.15 767843
weighted avg 0.49 0.60 0.45 767843
###Markdown
3.3 Bernoulli Naive Bayes
###Code
# Demo: Bernoulli Naive Bayes
import frovedis
TARGET = "bernoulli_naive_bayes"
from frovedis.exrpc.server import FrovedisServer
FrovedisServer.initialize("mpirun -np 8 " + os.environ["FROVEDIS_SERVER"])
from frovedis.mllib.naive_bayes import BernoulliNB as frovNB
f_est = frovNB(alpha=1.0)
E_NM = TARGET + "_frovedis_" + frovedis.__version__
f_report = evaluate(f_est, E_NM, \
x_train, y_train, x_test, y_test)
f_est.release()
FrovedisServer.shut_down()
import sklearn
from sklearn.naive_bayes import BernoulliNB as skNB
s_est = skNB(alpha=1.0)
E_NM = TARGET + "_sklearn_" + sklearn.__version__
s_report = evaluate(s_est, E_NM, \
x_train, y_train, x_test, y_test)
# Precision, Recall and F1 score for each class
print("Frovedis Bernoulli Naive Bayes metrices: ")
print(f_report)
print("Sklearn Bernoulli Naive Bayes metrices: ")
print(s_report)
###Output
Frovedis Bernoulli Naive Bayes metrices:
precision recall f1-score support
rating 1.0 0.53 0.41 0.46 59054
rating 2.0 0.30 0.09 0.13 41208
rating 3.0 0.22 0.27 0.25 61924
rating 4.0 0.29 0.26 0.28 145240
rating 5.0 0.70 0.77 0.73 460417
accuracy 0.57 767843
macro avg 0.41 0.36 0.37 767843
weighted avg 0.55 0.57 0.55 767843
Sklearn Bernoulli Naive Bayes metrices:
precision recall f1-score support
rating 1.0 0.53 0.41 0.46 59054
rating 2.0 0.30 0.09 0.13 41208
rating 3.0 0.22 0.27 0.25 61924
rating 4.0 0.29 0.26 0.28 145240
rating 5.0 0.70 0.77 0.73 460417
accuracy 0.57 767843
macro avg 0.41 0.36 0.37 767843
weighted avg 0.55 0.57 0.55 767843
###Markdown
4. Performance summary
###Code
summary = pd.DataFrame(OrderedDict({ "estimator": estimator_name,
"train time": train_time,
"test time": test_time,
"accuracy": accuracy,
"precision": precision,
"recall": recall,
"f1-score": f1
}))
summary
###Output
_____no_output_____ |
Week 2/Non-negative least squares.ipynb | ###Markdown
In this module, we fit a linear model with positive constraints on the regression coefficients and compare the estimated coefficients to a classic linear regression.
###Code
!pip install -U scikit-learn
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
###Output
_____no_output_____
###Markdown
Generate some random data
###Code
np.random.seed(42)
n_samples, n_features = 200, 50
X = np.random.randn(n_samples, n_features)
true_coef = 3 * np.random.randn(n_features)
# Threshold coefficients to render them non-negative
true_coef[true_coef < 0] = 0
y = np.dot(X, true_coef)
# Add some noise
y += 5 * np.random.normal(size=(n_samples,))
###Output
_____no_output_____
###Markdown
Split the data in train set and test set
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5)
###Output
_____no_output_____
###Markdown
Fit the Non-Negative least squares.
###Code
from sklearn.linear_model import LinearRegression
reg_nnls = LinearRegression(positive=True)
y_pred_nnls = reg_nnls.fit(X_train, y_train).predict(X_test)
r2_score_nnls = r2_score(y_test, y_pred_nnls)
print("NNLS R2 score", r2_score_nnls)
###Output
NNLS R2 score 0.8225220806196525
###Markdown
Fit an OLS.
###Code
reg_ols = LinearRegression()
y_pred_ols = reg_ols.fit(X_train, y_train).predict(X_test)
r2_score_ols = r2_score(y_test, y_pred_ols)
print("OLS R2 score", r2_score_ols)
###Output
OLS R2 score 0.7436926291700348
###Markdown
Comparing the regression coefficients between OLS and NNLS, we can observe they are highly correlated (the dashed line is the identity relation), but the non-negative constraint shrinks some to 0. The Non-Negative Least squares inherently yield sparse results.
###Code
fig, ax = plt.subplots()
ax.plot(reg_ols.coef_, reg_nnls.coef_, linewidth=0, marker=".")
low_x, high_x = ax.get_xlim()
low_y, high_y = ax.get_ylim()
low = max(low_x, low_y)
high = min(high_x, high_y)
ax.plot([low, high], [low, high], ls="--", c=".3", alpha=0.5)
ax.set_xlabel("OLS regression coefficients", fontweight="bold")
ax.set_ylabel("NNLS regression coefficients", fontweight="bold")
###Output
_____no_output_____ |
Chapter06/CHapter6_QDraw_TF2_alpha.ipynb | ###Markdown
Acquire The Data
###Code
batch_size = 128
img_rows, img_cols = 28, 28 # image dims
#load npy arrays
data_path = "data_files/" # folder for image files
for (dirpath, dirnames, filenames) in walk(data_path):
pass # file names accumulate in list 'filenames'
print(filenames)
num_images = 1000000 ### was 100000, reduce this number if memory issues.
num_files = len(filenames) # *** we have 10 files ***
images_per_category = num_images//num_files
seed = np.random.randint(1, 10e7)
i=0
print(images_per_category)
for file in filenames:
file_path = data_path + file
x = np.load(file_path)
x = x.astype('float32') ##normalise images
x /= 255.0
y = [i] * len(x) # create numeric label for this image
x = x[:images_per_category] # get our sample of images
y = y[:images_per_category] # get our sample of labels
if i == 0:
x_all = x
y_all = y
else:
x_all = np.concatenate((x,x_all), axis=0)
y_all = np.concatenate((y,y_all), axis=0)
i += 1
#split data arrays into train and test segments
x_train, x_test, y_train, y_test = train_test_split(x_all, y_all, test_size=0.2, random_state=42)
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
y_train = tf.keras.utils.to_categorical(y_train, num_files)
y_test = tf.keras.utils.to_categorical(y_test, num_files)
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
x_train, x_valid, y_train, y_valid = train_test_split(x_train, y_train, test_size=0.1, random_state=42)
###Output
x_train shape: (800000, 28, 28, 1)
800000 train samples
200000 test samples
###Markdown
Create the model
###Code
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add(tf.keras.layers.Dropout(0.25))
model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='relu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add(tf.keras.layers.Dropout(0.25))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(128, activation='relu'))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(num_files, activation='softmax'))
print("Compiling...........")
model.compile(loss=tf.keras.losses.categorical_crossentropy,
optimizer=tf.keras.optimizers.Adadelta(),
metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Train the model
###Code
epochs=1 # for testing, for training use 25
callbacks=[tf.keras.callbacks.TensorBoard(log_dir = "./tb_log_dir", histogram_freq = 0)]
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
callbacks=callbacks,
verbose=1,
validation_data=(x_valid, y_valid))
score = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
###Output
200000/200000 [==============================] - 10s 50us/sample - loss: 2.0684 - accuracy: 0.3971
Test loss: 2.068418756465912
Test accuracy: 0.39709
###Markdown
Test The Model
###Code
#_test
import os
labels = [os.path.splitext(file)[0] for file in filenames]
print(labels)
print("\nFor each pair in the following, the first label is predicted, second is actual\n")
for i in range(20):
t = np.random.randint(len(x_test) )
x1= x_test[t]
x1 = x1.reshape(1,28,28,1)
p = model.predict(x1)
print("-------------------------")
print(labels[np.argmax(p)])
print(labels[np.argmax(y_test[t])])
print("-------------------------")
###Output
['broom', 'aircraft_carrier', 'alarm_clock', 'ant', 'cell_phone', 'baseball', 'asparagus', 'dolphin', 'crocodile', 'bee']
For each pair in the following, the first label is predicted, second is actual
-------------------------
cell_phone
alarm_clock
-------------------------
-------------------------
baseball
baseball
-------------------------
-------------------------
asparagus
broom
-------------------------
-------------------------
bee
cell_phone
-------------------------
-------------------------
bee
bee
-------------------------
-------------------------
alarm_clock
cell_phone
-------------------------
-------------------------
cell_phone
cell_phone
-------------------------
-------------------------
asparagus
broom
-------------------------
-------------------------
cell_phone
baseball
-------------------------
-------------------------
aircraft_carrier
aircraft_carrier
-------------------------
-------------------------
cell_phone
cell_phone
-------------------------
-------------------------
aircraft_carrier
crocodile
-------------------------
-------------------------
aircraft_carrier
aircraft_carrier
-------------------------
-------------------------
aircraft_carrier
ant
-------------------------
-------------------------
cell_phone
bee
-------------------------
-------------------------
baseball
baseball
-------------------------
-------------------------
cell_phone
baseball
-------------------------
-------------------------
aircraft_carrier
ant
-------------------------
-------------------------
alarm_clock
dolphin
-------------------------
-------------------------
bee
dolphin
-------------------------
###Markdown
Save, Reload and Retest the Model
###Code
model.save("./QDrawModel.h5")
del model
from tensorflow.keras.models import load_model
import numpy as np
model = load_model('./QDrawModel.h5')
model.summary()
print("For each pair, first is predicted, second is actual")
for i in range(20):
t = np.random.randint(len(x_test))
x1= x_test[t]
x1 = x1.reshape(1,28,28,1)
p = model.predict(x1)
print("-------------------------")
print(labels[np.argmax(p)])
print(labels[np.argmax(y_test[t])])
print("-------------------------")
###Output
For each pair, first is predicted, second is actual
-------------------------
broom
broom
-------------------------
-------------------------
cell_phone
alarm_clock
-------------------------
-------------------------
crocodile
dolphin
-------------------------
-------------------------
alarm_clock
alarm_clock
-------------------------
-------------------------
cell_phone
aircraft_carrier
-------------------------
-------------------------
bee
crocodile
-------------------------
-------------------------
cell_phone
alarm_clock
-------------------------
-------------------------
cell_phone
cell_phone
-------------------------
-------------------------
bee
crocodile
-------------------------
-------------------------
cell_phone
asparagus
-------------------------
-------------------------
broom
broom
-------------------------
-------------------------
cell_phone
alarm_clock
-------------------------
-------------------------
aircraft_carrier
crocodile
-------------------------
-------------------------
aircraft_carrier
aircraft_carrier
-------------------------
-------------------------
aircraft_carrier
dolphin
-------------------------
-------------------------
cell_phone
cell_phone
-------------------------
-------------------------
broom
broom
-------------------------
-------------------------
bee
ant
-------------------------
-------------------------
aircraft_carrier
dolphin
-------------------------
-------------------------
cell_phone
cell_phone
-------------------------
|
foreign_languages/Sara_danish.ipynb | ###Markdown
Exploring Foreign LanguagesSo far, we have been learning about general ways to explore texts through manipulating strings and regular expressions. Today, we will be focusing on what we can do when texts are in languages other than English. This will just be an introduction to some of the many different modules that can be used for these tasks. The goal is to learn some tools, including Polyglot and translation, that can be jumping off points to see what you may or may not need going forward. Lesson Outline:- Q&A about what we've gone over so far- Examples (with Sara's data)- Practice! InstallationsUncomment and run the cell below!
###Code
#!pip install translation
#!pip install py-translate
#!pip install morfessor
#!pip install polyglot
#!pip install pycld2
#!brew install intltool icu4c gettext
#!brew link icu4c gettext --force
#!CFLAGS=-I/usr/local/opt/icu4c/include LDFLAGS=-L/usr/local/opt/icu4c/lib pip3 install pyicu
###Output
_____no_output_____
###Markdown
Importing Text
###Code
import codecs
with codecs.open('Skyggebilleder af en Reise til Harzen.txt', 'r', encoding='utf-8', errors='ignore') as f:
read_text = f.read()
read_text
# pulling out a subsection of text for our examples
text_snippet = read_text[20000:23000]
###Output
_____no_output_____
###Markdown
Translating TextThere are many different ways that you could go about translating text within Python, but one of the easiest is the package `translation`. `translation` makes use of existing online translators. The module used to include a method for Google Translate, but the site no longer allows easy access. Bing is probably the most useful method for it.**Pros:*** Easy to set up* Runs quickly**Cons:*** Not always accurate* Internet connection needed* Language limitationsThe documentation (or lack there of): https://pypi.python.org/pypi/translation
###Code
import translation
translation.bing(text_snippet, dst = 'en')
###Output
_____no_output_____
###Markdown
Other alternatives for translating your text include:* `py-translate` * Makes use of Google Translate * Often return errors / gets blocked * Can be used from the command line * Documentation: https://pypi.python.org/pypi/py-translate* API calls to Google Translate * Takes a little more set-up * Can be customized a little bit more * Can translate a LOT of text
###Code
# using py-translate
from translate import translator
# calling tranlator function, telling it that the
translator('da', 'en',text_snippet[:200])
###Output
_____no_output_____
###Markdown
PolyglotPolyglot is "a natural language pipeline that supports massive multilingual applications," in other words, it does a lot of stuff. It is a sort of one-stop-shop for many different functions that you may want to apply to you text, and supports many different languages. We are going to run through some of its functionalities.Docs: http://polyglot.readthedocs.io/en/latest/ Language Detection
###Code
from polyglot.detect import Detector
# create a detector object that contains read_text
# and assigning it to DETECTED
detected = Detector(read_text)
# the .language method will return the language the most of
# the text is made up of and the system is confident about
print(detected.language)
# sometimes there will be multiple languages within
# the text, and you will want to see all of them
for language in detected.languages:
print(language)
# if you try to pass in a string that is too short
# for the system to get a good read on, it will throw
# an error, alerting you to this fact
Detector("4")
# we can override that with the optional argument 'quiet=True'
print(Detector("4", quiet=True))
# here are all of the languages supported for language detection
from polyglot.utils import pretty_list
print(pretty_list(Detector.supported_languages()))
###Output
_____no_output_____
###Markdown
TokenizationSimilar to what we saw with NLTK, Polyglot can break our text up into words and sentences. Polyglot has the advantage of spanning multiple languages, and thus is more likely to identify proper breakpoint in languages other than English.
###Code
from polyglot.text import Text
# creating a Text object that analyzes our text_snippet
text = Text(text_snippet)
# Text also has a language instance variable
print(text.language)
# here, we are looking at text_snippet tokenized into words
text.words
# now we are looking at text_snippet broken down into sentences
text.sentences
###Output
_____no_output_____
###Markdown
Side Notes: Important Package InformationNot all of the packages are downloaded for all functionalities for all languages in Polyglot. Instead of forcing you to download a lot of files in the beginning, the creators decided that it would be better for language extensions to be downloaded on an 'as-necessary' basis. You will occassionaly be told that you're lacking a package, and you will need to download it. You can either do that with the built-in downloader, or from the command line.
###Code
# staying within python
from polyglot.downloader import downloader
downloader.download("embeddings2.en")
# alternate command line method
!polyglot download embeddings2.da pos2.da
###Output
_____no_output_____
###Markdown
Also, if you're working with a language and want to know what Polyglot lets you do with a language, it provides a `supported_tasks` method.
###Code
# tasks available for english
downloader.supported_tasks(lang="en")
# tasks available for danish
downloader.supported_tasks(lang="da")
###Output
_____no_output_____
###Markdown
Part of Speech TaggingPolyglot supports POS tagging for several languages.
###Code
# languages that polyglot supports for part of speech tagging
print(downloader.supported_languages_table("pos2"))
text.pos_tags
###Output
_____no_output_____
###Markdown
Named Entity RecognitionPolyglot can tag names and groups them into three main categories:* Locations (Tag: I-LOC): cities, countries, regions, continents, neighborhoods, administrative divisions ...* Organizations (Tag: I-ORG): sports teams, newspapers, banks, universities, schools, non-profits, companies, ...* Persons (Tag: I-PER): politicians, scientists, artists, atheletes ...
###Code
# languages that polyglot supports for part of speech tagging
print(downloader.supported_languages_table("ner2", 3))
#!polyglot download ner2.da
text.entities
###Output
_____no_output_____
###Markdown
Other Features of Polyglot* Nearest Neighbors -- http://polyglot.readthedocs.io/en/latest/Embeddings.html* Morpheme Generation -- http://polyglot.readthedocs.io/en/latest/MorphologicalAnalysis.html* Sentiment Analysis -- http://polyglot.readthedocs.io/en/latest/Sentiment.html* Transliteration -- http://polyglot.readthedocs.io/en/latest/Transliteration.html Code Summary: Translation:* `translation.bing(your_string, dst = 'en')` Polyglot:* `.language`* `.languages`* `.language`* `.words`* `.sentences`* `.pos_tags`* `.entities` Extra
###Code
# importing some more packages
from datascience import *
%matplotlib inline
import seaborn as sns
# analyzing our text with a Polyglot Text object
whole_text = Text(read_text)
# the language of our text
print(whole_text.language)
# getting the part of speech tags for our corpus
print(whole_text.pos_tags)
words_and_poss = list(whole_text.pos_tags)
# putting those word / part of speech pairs into a table
wrd = Table(['Word', 'Part of Speech']).with_rows(words_and_poss)
# grouping those by part of speech to get the most commonly occuring parts of speech
df = wrd.group('Part of Speech').sort('count', descending=True).to_df()
df
# plotting the counts for each part of speech using seaborn
sns.barplot(x='Part of Speech', y='count', data=df)
# getting the most popular word for each part of speech type
wrd_counts = wrd.group('Word').join('Word', wrd).sort('count', descending=True)
wrd_counts.group(2, lambda x: x.item(0)).show(16)
# thats not very informative, so lets pull out the stop words
# using a list from http://snowball.tartarus.org/algorithms/danish/stop.txt
danish_stop_words = """og,
i,
jeg,
det,
at,
en,
den,
til,
er,
som,
på,
de,
med,
han,
af,
for,
ikke,
der,
var,
mig,
sig,
men,
et,
har,
om,
vi,
min,
havde,
ham,
hun,
nu,
over,
da,
fra,
du,
ud,
sin,
dem,
os,
op,
man,
hans,
hvor,
eller,
hvad,
skal,
selv,
her,
alle,
vil,
blev,
kunne,
ind,
når,
være,
dog,
noget,
ville,
jo,
deres,
efter,
ned,
skulle,
denne,
end,
dette,
mit,
også,
under,
have,
dig,
anden,
hende,
mine,
alt,
meget,
sit,
sine,
vor,
mod,
disse,
hvis,
din,
nogle,
hos,
blive,
mange,
ad,
bliver,
hendes,
været,
thi,
jer,
sådan"""
splt = danish_stop_words.split(',\n')
print(splt)
# determining which rows we need to change
not_in_stop_words = [x not in danish_stop_words for x in wrd_counts['Word']]
# most common words for each part of speech no longer including the stop words
wrd_counts.where(not_in_stop_words).group(2, lambda x: x.item(0)).show(16)
# retrieving all of the named entities that Polyglot detected
ner = str(whole_text.entities).split('I-')[1:]
ner[:5]
# splitting up the type and the name
split_type = [x.split('([') for x in ner]
split_type[:5]
# making a table out of that
entities = Table(['Type', 'Name']).with_rows(split_type)
entities
# how many of each type of entity there are
entities.group('Type')
# finding the most commonly occuring entities
entities.group('Name').sort('count', descending=True)
# possibly the most common names of people
entities.where('Type', 'PER').group('Name').sort('count', True)
###Output
_____no_output_____ |
Question Classifier.ipynb | ###Markdown
As can be observed, the train set consists of some duplicate question (81 to be exact). The number of unique Coarse:Fine classes is 50 whereas entries corresponding to 42 are present in the test set. The number of fine classes overall is 47 whereas entries corresponding to 39 are present in test.
###Code
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
le.fit(pd.Series(train.QType.tolist() + test.QType.tolist()).values)
train['QType'] = le.transform(train.QType.values)
test['QType'] = le.transform(test.QType.values)
le2 = LabelEncoder()
le2.fit(pd.Series(train['QType-Coarse'].tolist() + test['QType-Coarse'].tolist()).values)
train['QType-Coarse'] = le2.transform(train['QType-Coarse'].values)
test['QType-Coarse'] = le2.transform(test['QType-Coarse'].values)
le3 = LabelEncoder()
le3.fit(pd.Series(train['QType-Fine'].tolist() + test['QType-Fine'].tolist()).values)
train['QType-Fine'] = le3.transform(train['QType-Fine'].values)
test['QType-Fine'] = le3.transform(test['QType-Fine'].values)
train.head()
all_corpus = pd.Series(train.Question.tolist() + test.Question.tolist()).astype(str)
###Output
_____no_output_____
###Markdown
Obtaining Dotwords.Also, performing text cleaning and pre-processing in the next two blocks
###Code
nltk.download('stopwords')
nltk.download('wordnet')
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from nltk.stem.snowball import SnowballStemmer
from nltk.stem.wordnet import WordNetLemmatizer
# dot_words = []
# for row in all_corpus:
# for word in row.split():
# if '.' in word and len(word)>2:
# dot_words.append(word)
def text_clean(corpus, keep_list):
'''
Purpose : Function to keep only alphabets, digits and certain words (punctuations, qmarks, tabs etc. removed)
Input : Takes a text corpus, 'corpus' to be cleaned along with a list of words, 'keep_list', which have to be retained
even after the cleaning process
Output : Returns the cleaned text corpus
'''
cleaned_corpus = pd.Series()
for row in corpus:
qs = []
for word in row.split():
if word not in keep_list:
p1 = re.sub(pattern='[^a-zA-Z0-9]',repl=' ',string=word)
p1 = p1.lower()
qs.append(p1)
else : qs.append(word)
cleaned_corpus = cleaned_corpus.append(pd.Series(' '.join(qs)))
return cleaned_corpus
def preprocess(corpus, keep_list, cleaning = True, stemming = False, stem_type = None, lemmatization = False, remove_stopwords = True):
'''
Purpose : Function to perform all pre-processing tasks (cleaning, stemming, lemmatization, stopwords removal etc.)
Input :
'corpus' - Text corpus on which pre-processing tasks will be performed
'keep_list' - List of words to be retained during cleaning process
'cleaning', 'stemming', 'lemmatization', 'remove_stopwords' - Boolean variables indicating whether a particular task should
be performed or not
'stem_type' - Choose between Porter stemmer or Snowball(Porter2) stemmer. Default is "None", which corresponds to Porter
Stemmer. 'snowball' corresponds to Snowball Stemmer
Note : Either stemming or lemmatization should be used. There's no benefit of using both of them together
Output : Returns the processed text corpus
'''
if cleaning == True:
corpus = text_clean(corpus, keep_list)
if remove_stopwords == True:
wh_words = ['who', 'what', 'when', 'why', 'how', 'which', 'where', 'whom']
stop = set(stopwords.words('english'))
for word in wh_words:
stop.remove(word)
corpus = [[x for x in x.split() if x not in stop] for x in corpus]
else :
corpus = [[x for x in x.split()] for x in corpus]
if lemmatization == True:
lem = WordNetLemmatizer()
corpus = [[lem.lemmatize(x, pos = 'v') for x in x] for x in corpus]
if stemming == True:
if stem_type == 'snowball':
stemmer = SnowballStemmer(language = 'english')
corpus = [[stemmer.stem(x) for x in x] for x in corpus]
else :
stemmer = PorterStemmer()
corpus = [[stemmer.stem(x) for x in x] for x in corpus]
corpus = [' '.join(x) for x in corpus]
return corpus
common_dot_words = ['U.S.', 'St.', 'Mr.', 'Mrs.', 'D.C.']
all_corpus = preprocess(all_corpus, keep_list = common_dot_words, remove_stopwords = True)
###Output
_____no_output_____
###Markdown
Splitting the preprocessed combined corpus again into train and test set
###Code
train_corpus = all_corpus[0:train.shape[0]]
test_corpus = all_corpus[train.shape[0]:]
###Output
_____no_output_____
###Markdown
Loading the English model for Spacy.NLTK version for the same performs too slowly, hence opting for Spacy.
###Code
nlp = spacy.load('en')
###Output
_____no_output_____
###Markdown
Obtaining Features from Train Data, which would be fed to CountVectorizerCreating list of Named Entitites, Lemmas, POS Tags, Syntactic Dependency Relation and Orthographic Features using shape.Later, these would be used as features for our model.
###Code
all_ner = []
all_lemma = []
all_tag = []
all_dep = []
all_shape = []
for row in train_corpus:
doc = nlp(row)
present_lemma = []
present_tag = []
present_dep = []
present_shape = []
present_ner = []
#print(row)
for token in doc:
present_lemma.append(token.lemma_)
present_tag.append(token.tag_)
#print(present_tag)
present_dep.append(token.dep_)
present_shape.append(token.shape_)
all_lemma.append(" ".join(present_lemma))
all_tag.append(" ".join(present_tag))
all_dep.append(" ".join(present_dep))
all_shape.append(" ".join(present_shape))
for ent in doc.ents:
present_ner.append(ent.label_)
all_ner.append(" ".join(present_ner))
###Output
_____no_output_____
###Markdown
Converting the attributes obtained above into vectors using CountVectorizer.
###Code
count_vec_ner = CountVectorizer(ngram_range=(1, 2)).fit(all_ner)
ner_ft = count_vec_ner.transform(all_ner)
count_vec_lemma = CountVectorizer(ngram_range=(1, 2)).fit(all_lemma)
lemma_ft = count_vec_lemma.transform(all_lemma)
count_vec_tag = CountVectorizer(ngram_range=(1, 2)).fit(all_tag)
tag_ft = count_vec_tag.transform(all_tag)
count_vec_dep = CountVectorizer(ngram_range=(1, 2)).fit(all_dep)
dep_ft = count_vec_dep.transform(all_dep)
count_vec_shape = CountVectorizer(ngram_range=(1, 2)).fit(all_shape)
shape_ft = count_vec_shape.transform(all_shape)
###Output
_____no_output_____
###Markdown
Combining the features obtained into 1 matrix
###Code
#x_all_ft_train = hstack([ner_ft, lemma_ft, tag_ft, dep_ft, shape_ft])
x_all_ft_train = hstack([ner_ft, lemma_ft, tag_ft])
x_all_ft_train
###Output
_____no_output_____
###Markdown
Converting from COOrdinate format to Compressed Sparse Row format for easier mathematical computations.
###Code
x_all_ft_train = x_all_ft_train.tocsr()
x_all_ft_train
###Output
_____no_output_____
###Markdown
Now we will obtain the Feature vectors for the test set using the CountVectorizers Obtained from the Training Corpus
###Code
all_test_ner = []
all_test_lemma = []
all_test_tag = []
all_test_dep = []
all_test_shape = []
for row in test_corpus:
doc = nlp(row)
present_lemma = []
present_tag = []
present_dep = []
present_shape = []
present_ner = []
#print(row)
for token in doc:
present_lemma.append(token.lemma_)
present_tag.append(token.tag_)
#print(present_tag)
present_dep.append(token.dep_)
present_shape.append(token.shape_)
all_test_lemma.append(" ".join(present_lemma))
all_test_tag.append(" ".join(present_tag))
all_test_dep.append(" ".join(present_dep))
all_test_shape.append(" ".join(present_shape))
for ent in doc.ents:
present_ner.append(ent.label_)
all_test_ner.append(" ".join(present_ner))
ner_test_ft = count_vec_ner.transform(all_test_ner)
lemma_test_ft = count_vec_lemma.transform(all_test_lemma)
tag_test_ft = count_vec_tag.transform(all_test_tag)
dep_test_ft = count_vec_dep.transform(all_test_dep)
shape_test_ft = count_vec_shape.transform(all_test_shape)
#x_all_ft_test = hstack([ner_test_ft, lemma_test_ft, tag_test_ft, dep_test_ft, shape_test_ft])
x_all_ft_test = hstack([ner_test_ft, lemma_test_ft, tag_test_ft])
x_all_ft_test
x_all_ft_test = x_all_ft_test.tocsr()
x_all_ft_test
###Output
_____no_output_____
###Markdown
Model TrainingLiterature study over the years has shown Linear SVM performs best in this Use Case.
###Code
model = svm.LinearSVC()
###Output
_____no_output_____
###Markdown
First Modelling for Coarse Classes
###Code
model.fit(x_all_ft_train, train['QType-Coarse'].values)
###Output
_____no_output_____
###Markdown
Model Evaluation
###Code
preds = model.predict(x_all_ft_test)
preds
accuracy_score(test['QType-Coarse'].values, preds)
###Output
_____no_output_____
###Markdown
Glad to announce, Feature Engineering has enabled us to achieve an Accuracy of 88.2% on the validation set.The obtained accuracy is way higher than the 73% accuracy obtained without feature engineering Next, we will obtain accuracies for Coarse:Fine combinations
###Code
model.fit(x_all_ft_train, train['QType'].values)
preds = model.predict(x_all_ft_test)
accuracy_score(test['QType'].values, preds)
###Output
_____no_output_____
###Markdown
Woah, up to 81.4% accuracy from 68% obtained earlier when modelled without Feature Engineering. Finally, we would evaluate our performance for the fine classes
###Code
model.fit(x_all_ft_train, train['QType-Fine'].values)
preds = model.predict(x_all_ft_test)
accuracy_score(test['QType-Fine'].values, preds)
###Output
_____no_output_____ |
notebooks/compare_ddpg.ipynb | ###Markdown
Set things up
###Code
import numpy as np
import tensorflow as tf
from nn_policy import FeedForwardCritic
from nn_policy import FeedForwardPolicy
from rllab.envs.mujoco.half_cheetah_env import HalfCheetahEnv
from rllab.exploration_strategies.ou_strategy import OUStrategy
from sandbox.rocky.tf.algos.ddpg import DDPG as ShaneDDPG
from sandbox.rocky.tf.envs.base import TfEnv
from sandbox.rocky.tf.policies.deterministic_mlp_policy import \
DeterministicMLPPolicy
from sandbox.rocky.tf.q_functions.continuous_mlp_q_function import \
ContinuousMLPQFunction
from ddpg import DDPG as MyDDPG
from testing_utils import are_np_arrays_equal
env = TfEnv(HalfCheetahEnv())
action_dim = env.action_dim
obs_dim = env.observation_space.low.shape[0]
batch_size = 2
rewards = np.random.rand(batch_size)
terminals = (np.random.rand(batch_size) > 0.5).astype(np.int)
obs = np.random.rand(batch_size, obs_dim)
actions = np.random.rand(batch_size, action_dim)
next_obs = np.random.rand(batch_size, obs_dim)
ddpg_params = dict(
batch_size=64,
n_epochs=0,
epoch_length=0,
eval_samples=0,
discount=0.99,
qf_learning_rate=1e-3,
policy_learning_rate=1e-4,
soft_target_tau=0.001,
replay_pool_size=1000000,
min_pool_size=1000,
scale_reward=0.1,
)
discount = ddpg_params['discount']
print(rewards)
print(terminals)
print(obs)
print(actions)
print(next_obs)
###Output
[ 0.15005835 0.81457649]
[0 1]
[[ 0.43511439 0.21486068 0.43619294 0.66923761 0.20440605 0.82207058
0.83291033 0.72373561 0.89668103 0.67410786 0.80799981 0.64763201
0.01083204 0.4382325 0.93362274 0.55795521 0.63737658 0.7260999
0.9175968 0.17842764]
[ 0.41534872 0.5935848 0.63982088 0.23709139 0.9229585 0.80080515
0.99038569 0.92861875 0.28002253 0.97068026 0.24973167 0.93388785
0.99066874 0.4360376 0.57956691 0.67015587 0.19678966 0.18611555
0.22873158 0.39150123]]
[[ 0.04384032 0.64044176 0.06986806 0.99731914 0.78400959 0.12711896]
[ 0.90925847 0.96190726 0.1259375 0.01973137 0.47221903 0.60472708]]
[[ 0.29052842 0.92648082 0.00907505 0.4897972 0.45359199 0.36603501
0.26034967 0.76724245 0.64317068 0.36499064 0.72187408 0.24276138
0.22878558 0.8248953 0.64472811 0.08181222 0.31025709 0.35683179
0.68326028 0.1779539 ]
[ 0.93819824 0.93290809 0.15855846 0.27508406 0.55827918 0.51646106
0.30439037 0.35100247 0.65420072 0.16924955 0.09570054 0.53530208
0.23401812 0.57407776 0.31642575 0.36555799 0.50138211 0.34332719
0.62882041 0.24917595]]
###Markdown
Create my stuff
###Code
sess_me = tf.Session()
with sess_me.as_default():
es = OUStrategy(env_spec=env.spec)
ddpg_params['Q_weight_decay'] = 0.
qf_params = dict(
embedded_hidden_sizes=(100, ),
observation_hidden_sizes=(100, ),
hidden_nonlinearity=tf.nn.relu,
)
policy_params = dict(
observation_hidden_sizes=(100, 100),
hidden_nonlinearity=tf.nn.relu,
output_nonlinearity=tf.nn.tanh,
)
qf = FeedForwardCritic(
"critic",
env.observation_space.flat_dim,
env.action_space.flat_dim,
**qf_params
)
policy = FeedForwardPolicy(
"actor",
env.observation_space.flat_dim,
env.action_space.flat_dim,
**policy_params
)
my_algo = MyDDPG(
env,
es,
policy,
qf,
**ddpg_params
)
my_policy = my_algo.actor
my_qf = my_algo.critic
my_target_policy = my_algo.target_actor
my_target_qf = my_algo.target_critic
###Output
_____no_output_____
###Markdown
Set up Shane
###Code
sess_shane = tf.Session()
with sess_shane.as_default():
es = OUStrategy(env_spec=env.spec)
policy = DeterministicMLPPolicy(
name="init_policy",
env_spec=env.spec,
hidden_sizes=(100, 100),
hidden_nonlinearity=tf.nn.relu,
output_nonlinearity=tf.nn.tanh,
)
qf = ContinuousMLPQFunction(
name="qf",
env_spec=env.spec,
hidden_sizes=(100, 100),
)
ddpg_params.pop('Q_weight_decay')
shane_algo = ShaneDDPG(
env,
policy,
qf,
es,
**ddpg_params
)
sess_shane.run(tf.initialize_all_variables())
shane_algo.init_opt()
# This initializes the optimizer parameters
sess_shane.run(tf.initialize_all_variables())
f_train_policy = shane_algo.opt_info['f_train_policy']
f_train_qf = shane_algo.opt_info['f_train_qf']
shane_target_qf = shane_algo.opt_info["target_qf"]
shane_target_policy = shane_algo.opt_info["target_policy"]
shane_policy = shane_algo.policy
shane_qf = shane_algo.qf
###Output
_____no_output_____
###Markdown
Measure stuff from Shane's algo
###Code
with sess_shane.as_default():
shane_policy_param_values = shane_policy.flat_to_params(
shane_policy.get_param_values()
)
shane_qf_param_values = shane_qf.flat_to_params(
shane_qf.get_param_values()
)
# TODO(vpong): why are these two necessary?
shane_target_policy.set_param_values(shane_policy.get_param_values())
shane_target_qf.set_param_values(shane_qf.get_param_values())
shane_actions, _ = shane_policy.get_actions(obs)
shane_qf_out = shane_qf.get_qval(obs, actions)
shane_next_actions, _ = shane_target_policy.get_actions(next_obs)
shane_next_target_qf_values = shane_target_qf.get_qval(next_obs, shane_next_actions)
shane_ys = rewards + (1. - terminals) * discount * shane_next_target_qf_values
###Output
_____no_output_____
###Markdown
Copy things to my algo
###Code
with sess_me.as_default():
my_policy.set_param_values(shane_policy_param_values)
my_target_policy.set_param_values(shane_policy_param_values)
my_qf.set_param_values(shane_qf_param_values)
my_target_qf.set_param_values(shane_qf_param_values)
###Output
_____no_output_____
###Markdown
Measure stuff from my algo
###Code
feed_dict = my_algo._update_feed_dict(rewards, terminals, obs,
actions, next_obs)
my_actions = sess_me.run(
my_policy.output,
feed_dict=feed_dict
)
my_qf_out = sess_me.run(
my_qf.output,
feed_dict=feed_dict
).flatten()
my_next_actions = sess_me.run(
my_target_policy.output,
feed_dict=feed_dict
)
my_next_target_qf_values = sess_me.run(
my_algo.target_critic.output,
feed_dict=feed_dict).flatten()
my_ys = sess_me.run(my_algo.ys, feed_dict=feed_dict).flatten()
my_policy_loss = sess_me.run(
my_algo.actor_surrogate_loss,
feed_dict=feed_dict)
my_qf_loss = sess_me.run(
my_algo.critic_loss,
feed_dict=feed_dict)
###Output
_____no_output_____
###Markdown
Check that Shane and my params stayed the same
###Code
shane_policy = shane_algo.policy
shane_qf = shane_algo.qf
with sess_shane.as_default():
shane_policy_param_values_new = shane_policy.flat_to_params(
shane_policy.get_param_values()
)
shane_qf_param_values_new = shane_qf.flat_to_params(
shane_qf.get_param_values()
)
shane_target_policy_param_values_new = shane_target_policy.flat_to_params(
shane_target_policy.get_param_values()
)
shane_target_qf_param_values_new = shane_target_qf.flat_to_params(
shane_target_qf.get_param_values()
)
my_policy_params_values_new = my_algo.actor.get_param_values()
my_qf_params_values_new = my_algo.critic.get_param_values()
my_target_policy_params_values_new = my_algo.target_actor.get_param_values()
my_target_qf_params_values_new = my_algo.target_critic.get_param_values()
print(all((a==b).all() for a, b in zip(shane_policy_param_values, shane_policy_param_values_new)))
print(all((a==b).all() for a, b in zip(shane_policy_param_values, my_policy_params_values_new)))
print(all((a==b).all() for a, b in zip(shane_policy_param_values, shane_target_policy_param_values_new)))
print(all((a==b).all() for a, b in zip(shane_policy_param_values, my_target_policy_params_values_new)))
print(all((a==b).all() for a, b in zip(shane_qf_param_values, shane_qf_param_values_new)))
print(all((a==b).all() for a, b in zip(shane_qf_param_values, my_qf_params_values_new)))
print(all((a==b).all() for a, b in zip(shane_qf_param_values, shane_target_qf_param_values_new)))
print(all((a==b).all() for a, b in zip(shane_qf_param_values, my_target_qf_params_values_new)))
###Output
True
True
True
True
True
True
True
True
###Markdown
Check critic outputs are the same
###Code
W1, b1, W2, b2, W3, b3 = shane_qf_param_values
output = np.matmul(obs, W1) + b1
output = np.maximum(output, 0)
output = np.hstack((output, actions))
output = np.matmul(output, W2) + b2
output = np.maximum(output, 0)
output = np.matmul(output, W3) + b3
expected_qf_out = output.flatten()
print(my_qf_out)
print(shane_qf_out)
print(expected_qf_out)
###Output
[-0.07917806 0.00283957]
[-0.07917806 0.00283957]
[-0.07917813 0.00283952]
###Markdown
Check actor outputs are the same
###Code
W1, b1, W2, b2, W3, b3 = shane_policy_param_values
output = np.matmul(obs, W1) + b1
output = np.maximum(output, 0)
output = np.matmul(output, W2) + b2
output = np.maximum(output, 0)
output = np.matmul(output, W3) + b3
expected_action = output
print(my_actions)
print(shane_actions)
print(expected_action)
###Output
[[-0.20947778 0.04484395 0.08546824 0.01056851 0.00029767 0.0958475 ]
[ 0.01458523 -0.0430692 0.10159081 -0.15388419 -0.06008253 0.18279688]]
[[-0.20947778 0.04484395 0.08546824 0.01056851 0.00029767 0.0958475 ]
[ 0.01458523 -0.0430692 0.10159081 -0.15388419 -0.06008253 0.18279688]]
[[-0.21262505 0.04487398 0.0856773 0.01056885 0.00029774 0.09614267]
[ 0.01458626 -0.04309584 0.10194247 -0.15511645 -0.06015505 0.18487474]]
###Markdown
Check that next action outputs are the same
###Code
W1, b1, W2, b2, W3, b3 = shane_policy_param_values
output = np.matmul(next_obs, W1) + b1
output = np.maximum(output, 0)
output = np.matmul(output, W2) + b2
output = np.maximum(output, 0)
output = np.matmul(output, W3) + b3
expected_next_action = output
print(my_next_actions)
print(shane_next_actions)
print(expected_next_action)
###Output
[[-0.086945 -0.01997953 0.02840678 0.09882895 0.02658396 0.11652762]
[ 0.01991368 -0.0152898 0.01624201 0.11547601 -0.00939338 0.18017189]]
[[-0.086945 -0.01997953 0.02840678 0.09882895 0.02658396 0.11652762]
[ 0.01991368 -0.0152898 0.01624201 0.11547601 -0.00939338 0.18017189]]
[[-0.08716509 -0.01998221 0.02841444 0.09915265 0.02659021 0.11705939]
[ 0.0199163 -0.015291 0.01624345 0.1159935 -0.00939367 0.18216033]]
###Markdown
Check next critic outputs are the same
###Code
W1, b1, W2, b2, W3, b3 = shane_qf_param_values
output = np.matmul(next_obs, W1) + b1
output = np.maximum(output, 0)
output = np.hstack((output, expected_next_action))
output = np.matmul(output, W2) + b2
output = np.maximum(output, 0)
output = np.matmul(output, W3) + b3
expected_target_qf_values = output.flatten()
print(shane_next_target_qf_values)
print(my_next_target_qf_values)
print(expected_target_qf_values)
my_expected_ys = rewards + (1. - terminals) * discount * my_next_target_qf_values
shane_expected_ys = rewards + (1. - terminals) * discount * shane_next_target_qf_values
expected_ys = rewards + (1. - terminals) * discount * expected_target_qf_values
print(shane_ys)
print(shane_expected_ys)
print(my_ys)
print(my_expected_ys)
print(expected_ys)
###Output
[ 0.11367485 0.81457649]
[ 0.11367485 0.81457649]
[ 0.11367485 0.81457651]
[ 0.11367485 0.81457649]
[ 0.11369999 0.81457649]
###Markdown
Check losses are the sameOnly do this once since it changes the params!
###Code
with sess_shane.as_default():
shane_policy_loss, _ = f_train_policy(obs)
shane_qf_loss, qval, _ = f_train_qf(shane_ys, obs, actions)
print(my_policy_loss)
print(shane_policy_loss)
print(shane_qf_loss)
print(my_qf_loss)
sess.close()
###Output
_____no_output_____ |
Week_1/Assignment5_Boston_Marathon.ipynb | ###Markdown
###Code
!git clone https://github.com/llimllib/bostonmarathon
###Output
fatal: destination path 'bostonmarathon' already exists and is not an empty directory.
###Markdown
Import library
###Code
# libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
import pickle
# kmean and elbow curve
from sklearn.cluster import KMeans
from yellowbrick.cluster import KElbowVisualizer
# min-max scaler
from sklearn.preprocessing import MinMaxScaler
plt.rcParams['figure.figsize'] = (15, 5)
df_org = pd.read_csv('/content/bostonmarathon/results/2014/results.csv')
###Output
_____no_output_____
###Markdown
Phase 1: Build Unsupervise model Create dataframe that included general features
###Code
df = df_org[['name','gender','age','state','country','city']]
df
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 31984 entries, 0 to 31983
Data columns (total 6 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 name 31984 non-null object
1 gender 31984 non-null object
2 age 31984 non-null int64
3 state 29408 non-null object
4 country 31984 non-null object
5 city 31983 non-null object
dtypes: int64(1), object(5)
memory usage: 1.5+ MB
###Markdown
Check null values
###Code
null_percent = pd.DataFrame(df.isnull().sum() / len(df), columns=['null_percent'])
null_percent
###Output
_____no_output_____
###Markdown
We remove features that have threshold > 0.3
###Code
temp = []
for feature in null_percent.iterrows():
if float(feature[1]) > 0.3:
temp.append(feature[0])
temp
###Output
_____no_output_____
###Markdown
We don't have any features out of our criteria
###Code
df = df.dropna()
###Output
_____no_output_____
###Markdown
check distinct of features
###Code
temp = [df.name, df.state, df.country, df.city]
for i in temp:
temp2 = len(i.unique()) / len(i)
print(temp2)
###Output
0.9979597388465724
0.002312295973884657
6.800870511425463e-05
0.1587323177366703
###Markdown
we remove feature `name` since it have to many distinct (99%)Its not good for unsupervise model
###Code
df = df.drop('name',axis=1)#.reset_index(drop=True)
df
###Output
_____no_output_____
###Markdown
find oulier
###Code
df2 = df.copy()
###Output
_____no_output_____
###Markdown
change `gender`, `state`, `country`, `city` to category values
###Code
df2['gender'] = df.gender.astype('category').cat.codes
df2['state'] = df.state.astype('category').cat.codes
df2['country'] = df.country.astype('category').cat.codes
df2['city'] = df.city.astype('category').cat.codes
df2
###Output
_____no_output_____
###Markdown
find ouliers
###Code
Q1 = df2.quantile(0.25)
Q3 = df2.quantile(0.75)
IQR = Q3 - Q1
print(IQR)
count_outlier = []
for i in range(len(IQR)):
index = df2[(df2[IQR.index[i]] < (Q1[i] - 1.5 * IQR[i])) |
(df2[IQR.index[i]] > (Q3[i] + 1.5 * IQR[i]))].index
count_outlier.append(len(index))
percent = []
for count in count_outlier:
percent.append(100*(count/(df2.shape[0])))
outlier = pd.DataFrame({'count': count_outlier, 'percent':percent}, index=IQR.index)
outlier
###Output
_____no_output_____
###Markdown
we can't remove outliers of `country`, since this feature just contain USA and Canada, so that these 2175 ouliers is Canada, if we remove, this feature will be uselessWe just remove outliers of `age`
###Code
IQR.index[1]
index = list(df2[(df2[IQR.index[1]] < (Q1[1] - 1.5 * IQR[1])) |
(df2[IQR.index[1]] > (Q3[1] + 1.5 * IQR[1]))].index)
#drop indexes of df_train and df_supervise
df.drop(index,inplace=True)
###Output
_____no_output_____
###Markdown
final `df`
###Code
df
###Output
_____no_output_____
###Markdown
milestone `df3`
###Code
df3 = df.copy()
df3
###Output
_____no_output_____
###Markdown
create onehotencoding features for `gender` and `country`
###Code
df_gender = pd.get_dummies(df3.gender, prefix='gender')
df_country = pd.get_dummies(df3.country, prefix='country')
df_gender
df3 = df3.join([df_gender, df_country]).drop(['gender','country'],axis=1)
df3['state'] = df3.state.astype('category').cat.codes
df3['city'] = df3.city.astype('category').cat.codes
df3
df3.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 29378 entries, 9 to 31983
Data columns (total 7 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 age 29378 non-null int64
1 state 29378 non-null int8
2 city 29378 non-null int16
3 gender_F 29378 non-null uint8
4 gender_M 29378 non-null uint8
5 country_CAN 29378 non-null uint8
6 country_USA 29378 non-null uint8
dtypes: int16(1), int64(1), int8(1), uint8(4)
memory usage: 1.9 MB
###Markdown
check distribution and normalization
###Code
def histplot(data, nums_feature):
''' data: dataframe
nums_feature: number of features (must be even) '''
w = int(np.ceil(nums_feature / 2))
fig,ax = plt.subplots(w, 2, figsize=(14,12))
i=0
try:
for x in range(w):
for y in range(2):
sns.histplot(data[data.columns[i]], kde=True, ax=ax[x,y])
i += 1
except IndexError:
pass
plt.tight_layout()
plt.show()
histplot(df3, np.ceil(len(df3.columns)))
###Output
_____no_output_____
###Markdown
milestone `df_final`
###Code
df_final = df3.copy()
###Output
_____no_output_____
###Markdown
minmaxtransform for `age`, `state`, `city`
###Code
df_final[['age','state','city']] = MinMaxScaler(feature_range= (0,1)).fit_transform(df3[['age','state','city']])
df_final
Nc = range(1, 20)
kmeans = [KMeans(n_clusters=i) for i in Nc]
kmeans
score = [kmeans[i].fit(df_final).score(df_final) for i in range(len(kmeans))]
score #Opposite of the value of X on the K-means objective.
plt.plot(Nc,score)
plt.xlabel('Number of Clusters')
plt.ylabel('Score')
plt.title('Elbow Curve')
###Output
_____no_output_____
###Markdown
base on chart, we will choose custer = 3 `clusters` = 3
###Code
cluster_model = KMeans(n_clusters=3).fit(df_final)
cluster_model.labels_
df['cluster'] = cluster_model.labels_
df
###Output
_____no_output_____
###Markdown
check_insight
###Code
def check_insight(data):
columns = ['country','gender','state','city']
fig, ax = plt.subplots(4, figsize=(18,35))
j=0
for i in columns:
temp = data[i].reset_index().drop(['unique','top'], axis=1)
df_temp = temp.melt('cluster', var_name='cols', value_name='samples')
#plot
sns.barplot(data=df_temp, y='samples', x='cluster',hue='cols',
ci="sd", palette="Blues_d",errwidth=0.3, alpha=0.4, ax=ax[j],).set_title(str('{}'.format(i)).upper())
j+=1
insight = df.groupby('cluster')[['country','gender','state','city']].describe()
insight
check_insight(insight)
###Output
_____no_output_____
###Markdown
We can see that in feature `country` and `gender`, numbers of most-appear feature is a same with total samples in specific clusters.Check with dataframe `insight` , we can see that:1. cluster 0 only inclue `gender` = Female, and `country` = USA2. cluster 2 only inclue `gender` = Male, and `country` = USAThis will reduce the important of other features like `state` or `city` `n_clusters` = 8
###Code
cluster_model = KMeans(n_clusters=8).fit(df_final)
cluster_model.labels_
df['cluster'] = cluster_model.labels_
insight = df.groupby('cluster')[['country','gender','state','city']].describe()
insight
check_insight(insight)
###Output
_____no_output_____
###Markdown
We have the same result, so we bet that changing algorithms might solve this problem save model We will save model that clusters = 3
###Code
pickle.dump(cluster_model, open("/content/drive/MyDrive/CBD_Robotic/boston_marathon/cluster_model.pkl", "wb"))
# cd /content/drive/MyDrive/CBD_Robotic/boston_marathon
# cat model_requirements.txt
###Output
/content/drive/MyDrive/CBD_Robotic/boston_marathon
###Markdown
Phase 2: Using Unsupervise model to boston marathon project Load model & Import libraries
###Code
# libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
import pickle
# kmean and elbow curve
from sklearn.cluster import KMeans
from yellowbrick.cluster import KElbowVisualizer
# min-max scaler
from sklearn.preprocessing import MinMaxScaler
###Output
/usr/local/lib/python3.6/dist-packages/sklearn/utils/deprecation.py:144: FutureWarning: The sklearn.metrics.classification module is deprecated in version 0.22 and will be removed in version 0.24. The corresponding classes / functions should instead be imported from sklearn.metrics. Anything that cannot be imported from sklearn.metrics is now part of the private API.
warnings.warn(message, FutureWarning)
###Markdown
The data for predict cluster must follow feature's order like this:`gender`: onehotencode to Male and Female`country`: onehotencode to USA and CAN`age`, `state` and `city` in range(0,1) 
###Code
model = pickle.load(open("/content/drive/MyDrive/CBD_Robotic/boston_marathon/cluster_model.pkl", "rb"))
###Output
_____no_output_____
###Markdown
Boston Marathon Project
###Code
!git clone https://github.com/llimllib/bostonmarathon
plt.rcParams['figure.figsize'] = (15, 5)
df = pd.read_csv('/content/bostonmarathon/results/2014/results.csv')
df
###Output
_____no_output_____
###Markdown
Check null values
###Code
null_percent = pd.DataFrame(df.isnull().sum() / len(df), columns=['null_percent'])
null_percent
###Output
_____no_output_____
###Markdown
We remove features that have threshold > 0.3
###Code
temp = []
for feature in null_percent.iterrows():
if float(feature[1]) > 0.3:
temp.append(feature[0])
temp
###Output
_____no_output_____
###Markdown
We don't have any features out of our criteria
###Code
df = df.drop(temp, axis=1)
df = df.dropna()
df = df.reset_index(drop=True)
df
###Output
_____no_output_____
###Markdown
replace weird values and convert some features to numeric We can see some weird values in `10k`, `20k`, etc...
###Code
df['10k'][2828]
###Output
_____no_output_____
###Markdown
remove and convert
###Code
temp = ['5k','10k','20k','25k','30k','35k','40k','half']
for i in temp:
#drop weird values
df.drop(df[df[i] == '-'].index, inplace=True)
#covert object type to numeric
df[i] = pd.to_numeric(df[i])
df
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 29103 entries, 0 to 29407
Data columns (total 20 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 10k 29103 non-null float64
1 name 29103 non-null object
2 division 29103 non-null int64
3 25k 29103 non-null float64
4 gender 29103 non-null object
5 age 29103 non-null int64
6 official 29103 non-null float64
7 bib 29103 non-null object
8 genderdiv 29103 non-null int64
9 35k 29103 non-null float64
10 overall 29103 non-null int64
11 pace 29103 non-null float64
12 state 29103 non-null object
13 30k 29103 non-null float64
14 5k 29103 non-null float64
15 half 29103 non-null float64
16 20k 29103 non-null float64
17 country 29103 non-null object
18 city 29103 non-null object
19 40k 29103 non-null float64
dtypes: float64(10), int64(4), object(6)
memory usage: 4.7+ MB
###Markdown
plot some insight
###Code
df
df['official'].groupby(pd.cut(df['age'], range(15,90,5))).aggregate(np.average).plot(kind="bar", title="Average time by age group")
###Output
_____no_output_____
###Markdown
We can see that the the age range that have greatest time is from 20 to 40after that, the time spend is higher per higher age
###Code
df['official'].groupby(pd.cut(df['age'], range(15,90,5))).aggregate(len).plot(kind="bar", title="# of runners by age group")
###Output
_____no_output_____
###Markdown
The numbers of age from 15 to 20, and from above 65 is really fewit maybe occur outliers find outliers
###Code
df2 = df.copy()
df2
df_temp = df2.drop(['name','gender','bib','country'], axis=1)
#convert categories
df_temp['state'] = df_temp.state.astype('category').cat.codes
df_temp['city'] = df_temp.city.astype('category').cat.codes
df_temp
#find IQR
Q1 = df_temp.quantile(0.25)
Q3 = df_temp.quantile(0.75)
IQR = Q3 - Q1
print(IQR)
#find ouliers
count_outlier = []
for i in range(len(IQR)):
index = df_temp[(df_temp[IQR.index[i]] < (Q1[i] - 1.5 * IQR[i])) |
(df_temp[IQR.index[i]] > (Q3[i] + 1.5 * IQR[i]))].index
count_outlier.append(len(index))
percent = []
for count in count_outlier:
percent.append(100*(count/(df_temp.shape[0])))
#ouliers
outlier = pd.DataFrame({'count': count_outlier, 'percent':percent}, index=IQR.index)
outlier
for i in range(len(IQR)):
index = list(df_temp[(df_temp[IQR.index[i]] < (Q1[i] - 1.5 * IQR[i])) |
(df_temp[IQR.index[i]] > (Q3[i] + 1.5 * IQR[i]))].index)
df_temp.drop(index, inplace=True)
df_temp
###Output
_____no_output_____
###Markdown
milestone `df3`
###Code
df3 = df2.drop(index=df2.drop(index=df_temp.index).index).reset_index(drop=True)
df3
###Output
_____no_output_____
###Markdown
divide into train and supervise dataset
###Code
df_train = df3[['name','gender','age','country','city','state']]
df_train
df_supervise = df3.drop(df_train.columns, axis=1)
df_supervise
###Output
_____no_output_____
###Markdown
create onehot dataset for `gender` and `country`
###Code
df_gender = pd.get_dummies(df_train.gender, prefix='gender')
df_country = pd.get_dummies(df_train.country, prefix='country')
df_gender
###Output
_____no_output_____
###Markdown
Join onehot features to `df_train`
###Code
df_train = df_train.join([df_gender, df_country]).drop(['gender','country'],axis=1)
df_train['state'] = df_train.state.astype('category').cat.codes
df_train['city'] = df_train.city.astype('category').cat.codes
#drop `name`
df_train = df_train.drop(['name'],axis=1)
df_train
###Output
_____no_output_____
###Markdown
apply MinMaxScaler to `age`, `state`, `city`
###Code
df_train[['age','state','city']] = MinMaxScaler(feature_range= (0,1)).fit_transform(df_train[['age','state','city']])
df_train
###Output
_____no_output_____
###Markdown
Predict clusters Recall that:  We can see that the order is not correct, so that we should reorder columns:
###Code
df_train2 = df_train.reindex(columns=['age','state','city','gender_F','gender_M','country_CAN','country_USA'])
df_train2
###Output
_____no_output_____
###Markdown
Everythings is ok now and be ready to predict
###Code
predict = model.predict(df_train2)
###Output
_____no_output_____
###Markdown
milestone `df4`
###Code
df4 = df3.copy()
df4['cluster'] = predict
df4
###Output
_____no_output_____
###Markdown
create `percent_cluster` and add it to `df4`
###Code
temp = df4.cluster.value_counts(normalize=True) * 100
percent_cluster = temp.to_frame().reset_index()
percent_cluster.rename(columns={"cluster": "percent_cluster"}, inplace=True)
percent_cluster
df4 = pd.merge(df4, percent_cluster, left_on='cluster', right_on='index', how='left').drop(['index'], axis=1)
df4
###Output
_____no_output_____
###Markdown
analyse supervise data We add `age`, `cluster`, `percent_cluster` to `df_supervise`
###Code
df_supervise[['age','cluster','percent_cluster']] = df4[['age','cluster','percent_cluster']]
#remove `bib`
df_supervise.drop(['bib'], axis=1, inplace=True)
df_supervise
df_supervise.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 27361 entries, 0 to 27360
Data columns (total 16 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 10k 27361 non-null float64
1 division 27361 non-null int64
2 25k 27361 non-null float64
3 official 27361 non-null float64
4 genderdiv 27361 non-null int64
5 35k 27361 non-null float64
6 overall 27361 non-null int64
7 pace 27361 non-null float64
8 30k 27361 non-null float64
9 5k 27361 non-null float64
10 half 27361 non-null float64
11 20k 27361 non-null float64
12 40k 27361 non-null float64
13 age 27361 non-null int64
14 cluster 27361 non-null int32
15 percent_cluster 27361 non-null float64
dtypes: float64(11), int32(1), int64(4)
memory usage: 3.2 MB
###Markdown
group columns by cluster and calculate mean of all columns
###Code
df_bycluster = df_supervise.groupby("cluster").mean()
df_bycluster = df_bycluster.reset_index()
#fill color for values
cm = sns.light_palette("green", as_cmap=True)
df_bycluster2 = df_bycluster.style.background_gradient(cmap=cm)
df_bycluster2
###Output
_____no_output_____
###Markdown
We see that cluster 0 usually have higher time to finish race, but `age` in this cluser is lowest (mean = 39,8)cluster 1 and cluster 2 doesn't have too much difference in time finish in all race, most of features of cluster 2 is a bit higher, contrast with `age`
###Code
def barplot(data, nums_feature):
''' data: dataframe
nums_feature: number of features (must be even) '''
w = int(np.ceil(nums_feature / 2))
fig,ax = plt.subplots(w, 2, figsize=(14,20))
i=1
try:
for x in range(w):
for y in range(2):
sns.barplot(data=data, x='cluster', y=data[data.columns[i]] ,
ci="sd", palette="dark", alpha=0.4, ax=ax[x,y])
i += 1
except IndexError:
pass
plt.tight_layout()
plt.show()
barplot(df_bycluster, np.ceil(len(df_bycluster.columns)))
###Output
_____no_output_____
###Markdown
check_insight
###Code
def check_insight(data):
columns = ['country','gender','state','city']
fig, ax = plt.subplots(4, figsize=(18,35))
j=0
for i in columns:
temp = data[i].reset_index().drop(['unique','top'], axis=1)
df_temp = temp.melt('cluster', var_name='cols', value_name='samples')
#plot
sns.barplot(data=df_temp, y='samples', x='cluster',hue='cols',
ci="sd", palette="Blues_d",errwidth=0.3, alpha=0.4, ax=ax[j],).set_title(str('{}'.format(i)).upper())
j+=1
insight = df4.groupby('cluster')[['country','gender','state','city']].describe()
insight
check_insight(insight)
###Output
_____no_output_____
###Markdown
We can see that in feature `country` and `gender`, numbers of most-appear feature is a same with total samples in specific clusters.Check with dataframe `insight` , we can see that:1. cluster 0 only inclue `gender` = Female, and `country` = USA2. cluster 2 only inclue `gender` = Male, and `country` = USAThis will reduce the important of other features like `state` or `city` Use for prediction We have enough ingredients to predict many infomations about person relate to marathon Assume that we have this data about person:
###Code
df_person_info = df4[['name','gender','age','country','city','state']]
df_person_info
###Output
_____no_output_____
###Markdown
After using model `cluster_model`, We will have a data that labeled:
###Code
df_person_info[['cluster','percent_cluster']] = df4[['cluster','percent_cluster']]
df_person_info
###Output
_____no_output_____
###Markdown
Recall that we have `df_bycluster2` that included mean values of all supervise features:
###Code
df_bycluster2
###Output
_____no_output_____
###Markdown
Finally, we join each other by `cluster`:
###Code
df_bycluster.rename(columns={'age' : 'age_avg'}, inplace=True)
df_final = pd.merge(df_person_info, df_bycluster.drop('percent_cluster',axis=1), left_on='cluster', right_on='cluster', how='left')
df_final
###Output
_____no_output_____ |
Transfer learning cats and dogs.ipynb | ###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Train in a remote VM (MLC managed DSVM)* Create Workspace* Create Experiment* Upload data to a blob in workspace* Configure ACI run config* Submit the experiment in ACI* Register the retrained model PrerequisitesMake sure you go through the [00. Installation and Configuration](00.configuration.ipynb) Notebook first if you haven't. Install Azure ML SDK* !pip install azureml-core* !pip install azureml-contrib-iot* !pip install azure-mgmt-containerregistry Check the conda environmentMake sure you have started the notebook from the correct conda environment
###Code
import os
print(os.__file__)
# Check core SDK version number
import azureml.core as azcore
print("SDK version:", azcore.VERSION)
###Output
SDK version: 1.0.2
###Markdown
Initialize WorkspaceInitialize a workspace object from persisted configuration.
###Code
from azureml.core import Workspace
ws = Workspace.from_config('./aml_config/config.json')
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n')
###Output
Found the config file in: /home/arun/Documents/tensorflow-for-poets-2/aml_config/config.json
peabody
peabody
eastus
54646fde-e2bd-4f13-bb8a-2eb1174d1240
###Markdown
Create Experiment**Experiment** is a logical container in an Azure ML Workspace. It hosts run records which can include run metrics and output artifacts from your experiments.
###Code
experiment_name = 'cats_dogs'
from azureml.core import Experiment
exp = Experiment(workspace = ws, name = experiment_name)
###Output
_____no_output_____
###Markdown
Upload data files into datastoreRegister your existing azure storage as a new datastore with the workspace. The datastore should be backed by the Azure blob storage account. We can use it to transfer data from local to the cloud, and access it from the compute target.
###Code
from azureml.core.datastore import Datastore
ds = Datastore.register_azure_blob_container(workspace=ws,
datastore_name='mycatdog',
container_name='cat-dog',
account_name='mytraindata',
account_key='TPYHA0FQYymwr0it/Vubn/aAC8hYcuGNrp6TmicH9JidTI1PnwYeL9DZ51UnF5xN8oW26+eAWUnQOLkURa++Ig==',
create_if_not_exists=False)
data_path = "training_images" # This is the path to the folder in the blob container. Set this to None to get all the contents.
print(ds.name, ds.datastore_type, ds.account_name, ds.container_name)
###Output
mycatdog AzureBlob mytraindata cat-dog
###Markdown
Configure for using ACILinux-based ACI is available in `West US`, `East US`, `West Europe`, `North Europe`, `West US 2`, `Southeast Asia`, `Australia East`, `East US 2`, and `Central US` regions. See details [here](https://docs.microsoft.com/en-us/azure/container-instances/container-instances-quotasregion-availability). Create a `DataReferenceConfiguration` object to inform the system what data folder to download to the copmute target.
###Code
from azureml.core.runconfig import DataReferenceConfiguration
dr = DataReferenceConfiguration(datastore_name=ds.name,
path_on_datastore=data_path,
mode='download', # download files from datastore to compute target
overwrite=True)
###Output
_____no_output_____
###Markdown
Set the system to build a conda environment based on the run configuration. Once the environment is built, and if you don't change your dependencies, it will be reused in subsequent runs.
###Code
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
# choose a name for your cluster
cluster_name = "cpucluster3"
try:
compute_target = ComputeTarget(workspace=ws, name=cluster_name)
print('Found existing compute target.')
except ComputeTargetException:
print('Creating a new compute target...')
compute_config = AmlCompute.provisioning_configuration(vm_size='Standard_D3', max_nodes=2)
# create the cluster
compute_target = ComputeTarget.create(ws, cluster_name, compute_config)
compute_target.wait_for_completion(show_output=True)
# Use the 'status' property to get a detailed status for the current AmlCompute.
print(compute_target.status.serialize())
from azureml.core.runconfig import RunConfiguration, DEFAULT_CPU_IMAGE
from azureml.core.conda_dependencies import CondaDependencies
# create a new runconfig object
run_config = RunConfiguration(framework = "python")
# Set compute target
run_config.target = compute_target.name
# set the data reference of the run configuration
run_config.data_references = {ds.name: dr}
# enable Docker
run_config.environment.docker.enabled = True
# set Docker base image to the default CPU-based image
run_config.environment.docker.base_image = DEFAULT_CPU_IMAGE
# use conda_dependencies.yml to create a conda environment in the Docker image for execution
run_config.environment.python.user_managed_dependencies = False
# auto-prepare the Docker image when used for execution (if it is not already prepared)
run_config.auto_prepare_environment = True
# specify CondaDependencies obj
run_config.environment.python.conda_dependencies = CondaDependencies.create(conda_packages=['tensorflow==1.8.0'])
###Output
_____no_output_____
###Markdown
Submit the ExperimentSubmit script to run in the Docker image in the remote VM. If you run this for the first time, the system will download the base image, layer in packages specified in the `conda_dependencies.yml` file on top of the base image, create a container and then execute the script in the container.
###Code
from azureml.core import Run
from azureml.core import ScriptRunConfig
src = ScriptRunConfig(source_directory = './scripts', script = 'retrain.py', run_config = run_config,
# pass the datastore reference as a parameter to the training script
arguments=['--image_dir', str(ds.as_download()),
'--architecture', 'mobilenet_1.0_224',
'--output_graph', 'outputs/retrained_graph.pb',
'--output_labels', 'outputs/output_labels.txt',
'--model_download_url', 'https://raw.githubusercontent.com/rakelkar/models/master/model_output/',
'--model_file_name', 'imagenet_2_frozen.pb'
])
run = exp.submit(config=src)
###Output
_____no_output_____
###Markdown
View run history details
###Code
run
run.wait_for_completion(show_output=True)
###Output
RunId: cats_dogs_1547169681085
Streaming azureml-logs/60_control_log.txt
=========================================
Streaming log file azureml-logs/60_control_log.txt
Streaming log file azureml-logs/80_driver_log.txt
Streaming azureml-logs/80_driver_log.txt
========================================
https://raw.githubusercontent.com/rakelkar/models/master/model_output/mobilenet_v1_1.0_224_frozen.tgz
/tmp/imagenet
/tmp/imagenet/mobilenet_v1_1.0_224_frozen.tgz
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 0.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 0.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 0.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 0.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 0.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 0.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 0.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 0.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 0.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 0.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 0.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 0.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 0.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 0.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 0.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 0.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 0.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 0.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 0.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 1.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 1.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 1.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 1.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 1.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 1.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 1.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 1.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 1.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 1.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 1.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 1.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 1.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 1.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 1.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 1.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 1.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 1.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 1.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 2.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 2.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 2.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 2.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 2.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 2.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 2.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 2.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 2.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 2.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 2.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 2.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 2.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 2.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 2.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 2.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 2.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 2.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 2.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 3.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 3.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 3.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 3.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 3.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 3.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 3.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 3.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 3.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 3.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 3.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 3.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 3.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 3.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 3.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 3.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 3.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 3.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 3.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 4.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 4.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 4.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 4.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 4.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 4.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 4.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 4.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 4.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 4.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 4.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 4.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 4.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 4.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 4.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 4.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 4.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 4.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 4.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 5.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 5.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 5.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 5.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 5.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 5.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 5.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 5.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 5.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 5.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 5.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 5.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 5.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 5.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 5.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 5.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 5.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 5.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 5.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 5.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 6.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 6.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 6.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 6.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 6.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 6.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 6.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 6.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 6.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 6.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 6.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 6.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 6.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 6.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 6.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 6.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 6.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 6.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 6.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 7.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 7.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 7.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 7.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 7.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 7.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 7.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 7.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 7.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 7.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 7.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 7.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 7.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 7.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 7.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 7.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 7.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 7.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 7.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 8.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 8.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 8.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 8.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 8.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 8.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 8.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 8.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 8.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 8.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 8.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 8.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 8.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 8.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 8.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 8.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 8.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 8.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 8.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 9.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 9.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 9.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 9.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 9.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 9.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 9.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 9.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 9.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 9.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 9.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 9.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 9.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 9.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 9.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 9.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 9.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 9.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 9.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 10.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 10.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 10.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 10.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 10.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 10.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 10.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 10.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 10.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 10.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 10.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 10.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 10.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 10.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 10.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 10.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 10.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 10.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 10.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 10.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 11.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 11.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 11.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 11.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 11.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 11.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 11.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 11.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 11.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 11.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 11.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 11.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 11.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 11.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 11.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 11.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 11.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 11.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 11.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 12.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 12.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 12.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 12.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 12.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 12.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 12.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 12.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 12.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 12.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 12.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 12.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 12.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 12.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 12.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 12.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 12.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 12.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 12.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 13.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 13.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 13.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 13.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 13.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 13.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 13.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 13.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 13.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 13.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 13.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 13.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 13.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 13.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 13.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 13.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 13.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 13.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 13.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 14.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 14.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 14.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 14.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 14.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 14.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 14.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 14.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 14.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 14.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 14.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 14.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 14.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 14.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 14.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 14.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 14.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 14.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 14.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 15.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 15.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 15.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 15.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 15.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 15.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 15.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 15.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 15.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 15.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 15.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 15.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 15.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 15.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 15.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 15.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 15.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 15.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 15.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 15.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 16.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 16.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 16.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 16.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 16.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 16.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 16.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 16.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 16.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 16.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 16.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 16.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 16.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 16.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 16.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 16.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 16.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 16.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 16.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 17.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 17.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 17.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 17.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 17.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 17.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 17.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 17.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 17.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 17.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 17.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 17.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 17.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 17.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 17.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 17.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 17.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 17.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 17.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 18.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 18.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 18.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 18.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 18.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 18.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 18.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 18.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 18.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 18.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 18.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 18.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 18.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 18.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 18.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 18.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 18.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 18.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 18.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 19.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 19.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 19.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 19.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 19.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 19.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 19.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 19.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 19.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 19.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 19.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 19.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 19.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 19.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 19.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 19.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 19.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 19.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 19.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 20.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 20.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 20.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 20.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 20.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 20.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 20.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 20.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 20.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 20.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 20.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 20.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 20.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 20.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 20.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 20.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 20.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 20.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 20.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 20.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 21.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 21.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 21.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 21.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 21.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 21.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 21.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 21.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 21.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 21.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 21.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 21.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 21.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 21.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 21.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 21.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 21.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 21.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 21.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 22.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 22.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 22.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 22.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 22.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 22.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 22.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 22.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 22.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 22.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 22.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 22.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 22.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 22.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 22.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 22.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 22.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 22.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 22.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 23.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 23.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 23.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 23.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 23.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 23.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 23.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 23.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 23.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 23.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 23.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 23.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 23.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 23.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 23.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 23.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 23.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 23.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 23.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 24.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 24.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 24.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 24.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 24.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 24.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 24.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 24.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 24.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 24.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 24.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 24.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 24.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 24.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 24.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 24.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 24.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 24.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 24.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 25.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 25.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 25.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 25.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 25.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 25.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 25.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 25.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 25.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 25.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 25.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 25.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 25.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 25.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 25.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 25.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 25.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 25.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 25.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 26.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 26.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 26.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 26.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 26.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 26.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 26.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 26.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 26.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 26.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 26.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 26.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 26.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 26.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 26.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 26.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 26.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 26.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 26.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 26.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 27.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 27.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 27.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 27.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 27.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 27.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 27.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 27.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 27.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 27.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 27.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 27.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 27.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 27.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 27.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 27.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 27.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 27.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 27.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 28.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 28.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 28.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 28.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 28.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 28.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 28.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 28.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 28.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 28.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 28.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 28.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 28.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 28.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 28.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 28.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 28.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 28.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 28.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 29.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 29.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 29.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 29.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 29.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 29.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 29.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 29.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 29.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 29.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 29.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 29.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 29.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 29.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 29.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 29.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 29.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 29.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 29.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 30.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 30.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 30.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 30.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 30.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 30.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 30.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 30.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 30.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 30.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 30.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 30.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 30.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 30.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 30.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 30.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 30.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 30.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 30.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 31.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 31.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 31.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 31.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 31.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 31.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 31.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 31.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 31.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 31.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 31.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 31.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 31.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 31.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 31.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 31.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 31.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 31.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 31.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 31.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 32.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 32.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 32.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 32.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 32.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 32.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 32.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 32.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 32.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 32.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 32.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 32.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 32.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 32.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 32.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 32.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 32.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 32.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 32.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 33.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 33.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 33.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 33.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 33.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 33.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 33.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 33.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 33.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 33.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 33.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 33.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 33.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 33.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 33.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 33.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 33.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 33.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 33.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 34.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 34.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 34.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 34.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 34.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 34.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 34.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 34.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 34.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 34.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 34.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 34.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 34.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 34.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 34.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 34.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 34.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 34.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 34.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 35.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 35.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 35.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 35.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 35.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 35.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 35.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 35.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 35.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 35.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 35.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 35.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 35.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 35.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 35.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 35.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 35.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 35.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 35.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 36.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 36.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 36.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 36.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 36.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 36.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 36.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 36.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 36.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 36.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 36.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 36.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 36.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 36.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 36.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 36.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 36.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 36.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 36.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 36.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 37.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 37.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 37.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 37.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 37.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 37.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 37.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 37.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 37.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 37.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 37.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 37.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 37.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 37.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 37.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 37.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 37.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 37.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 37.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 38.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 38.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 38.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 38.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 38.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 38.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 38.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 38.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 38.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 38.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 38.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 38.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 38.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 38.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 38.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 38.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 38.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 38.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 38.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 39.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 39.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 39.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 39.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 39.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 39.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 39.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 39.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 39.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 39.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 39.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 39.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 39.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 39.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 39.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 39.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 39.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 39.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 39.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 40.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 40.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 40.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 40.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 40.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 40.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 40.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 40.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 40.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 40.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 40.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 40.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 40.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 40.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 40.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 40.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 40.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 40.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 40.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 41.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 41.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 41.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 41.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 41.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 41.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 41.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 41.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 41.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 41.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 41.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 41.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 41.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 41.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 41.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 41.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 41.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 41.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 41.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 41.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 42.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 42.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 42.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 42.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 42.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 42.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 42.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 42.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 42.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 42.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 42.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 42.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 42.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 42.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 42.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 42.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 42.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 42.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 42.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 43.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 43.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 43.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 43.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 43.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 43.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 43.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 43.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 43.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 43.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 43.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 43.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 43.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 43.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 43.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 43.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 43.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 43.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 43.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 44.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 44.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 44.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 44.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 44.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 44.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 44.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 44.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 44.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 44.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 44.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 44.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 44.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 44.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 44.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 44.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 44.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 44.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 44.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 45.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 45.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 45.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 45.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 45.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 45.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 45.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 45.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 45.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 45.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 45.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 45.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 45.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 45.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 45.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 45.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 45.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 45.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 45.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 46.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 46.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 46.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 46.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 46.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 46.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 46.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 46.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 46.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 46.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 46.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 46.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 46.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 46.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 46.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 46.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 46.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 46.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 46.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 47.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 47.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 47.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 47.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 47.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 47.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 47.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 47.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 47.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 47.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 47.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 47.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 47.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 47.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 47.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 47.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 47.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 47.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 47.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 47.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 48.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 48.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 48.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 48.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 48.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 48.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 48.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 48.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 48.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 48.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 48.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 48.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 48.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 48.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 48.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 48.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 48.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 48.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 48.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 49.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 49.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 49.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 49.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 49.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 49.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 49.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 49.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 49.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 49.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 49.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 49.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 49.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 49.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 49.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 49.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 49.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 49.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 49.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 50.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 50.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 50.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 50.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 50.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 50.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 50.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 50.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 50.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 50.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 50.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 50.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 50.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 50.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 50.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 50.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 50.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 50.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 50.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 51.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 51.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 51.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 51.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 51.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 51.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 51.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 51.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 51.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 51.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 51.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 51.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 51.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 51.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 51.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 51.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 51.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 51.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 51.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 52.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 52.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 52.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 52.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 52.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 52.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 52.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 52.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 52.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 52.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 52.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 52.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 52.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 52.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 52.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 52.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 52.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 52.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 52.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 52.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 53.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 53.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 53.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 53.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 53.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 53.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 53.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 53.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 53.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 53.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 53.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 53.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 53.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 53.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 53.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 53.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 53.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 53.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 53.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 54.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 54.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 54.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 54.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 54.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 54.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 54.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 54.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 54.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 54.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 54.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 54.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 54.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 54.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 54.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 54.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 54.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 54.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 54.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 55.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 55.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 55.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 55.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 55.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 55.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 55.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 55.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 55.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 55.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 55.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 55.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 55.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 55.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 55.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 55.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 55.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 55.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 55.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 56.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 56.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 56.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 56.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 56.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 56.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 56.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 56.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 56.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 56.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 56.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 56.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 56.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 56.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 56.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 56.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 56.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 56.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 56.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 57.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 57.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 57.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 57.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 57.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 57.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 57.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 57.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 57.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 57.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 57.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 57.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 57.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 57.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 57.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 57.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 57.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 57.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 57.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 57.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 58.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 58.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 58.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 58.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 58.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 58.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 58.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 58.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 58.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 58.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 58.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 58.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 58.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 58.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 58.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 58.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 58.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 58.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 58.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 59.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 59.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 59.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 59.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 59.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 59.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 59.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 59.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 59.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 59.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 59.5%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 59.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 59.6%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 59.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 59.7%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 59.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 59.8%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 59.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 59.9%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 60.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 60.0%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 60.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 60.1%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 60.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 60.2%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 60.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 60.3%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 60.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 60.4%
>> Downloading mobilenet_v1_1.0_224_frozen.tgz 60.5%
###Markdown
Register the Model
###Code
from azureml.core.model import Model
model = run.register_model(model_name = experiment_name, model_path = 'outputs/')
print(model.name, model.url, model.version, model.id, model.created_time)
###Output
_____no_output_____
###Markdown
Convert Model
###Code
from azureml.contrib.iot.model_converters import SnpeConverter
# submit a compile request
compile_request = SnpeConverter.convert_tf_model(
ws,
source_model=model,
input_node="input",
input_dims="1,224,224,3",
outputs_nodes = ["final_result"],
allow_unconsumed_nodes = True)
print(compile_request._operation_id)
# wait for the request to complete
compile_request.wait_for_completion(show_output=True)
# get the compiled model
compiled_model = compile_request.result
print(compiled_model.name, compiled_model.url, compiled_model.version, compiled_model.id, compiled_model.created_time)
compiled_model.download(target_dir="./converted/", exist_ok=True)
###Output
_____no_output_____
###Markdown
Create Docker Image Show the sample application file
###Code
with open('./main.py', 'r') as f:
print(f.read())
from azureml.core.image import Image
from azureml.contrib.iot import IotContainerImage
image_config = IotContainerImage.image_configuration(
architecture="arm32v7",
execution_script="main.py",
dependencies=["cameraapi.py","iot.py","ipcprovider.py","utility.py"],
docker_file="Dockerfile",
tags = ["mobilenet"],
description = "MobileNet based demo module")
image = Image.create(name = "peabodymobilenet",
# this is the model object
models = [compiled_model],
image_config = image_config,
workspace = ws)
image.wait_for_creation(show_output = True)
###Output
_____no_output_____
###Markdown
Enter your container registry credentials List the image to get URI
###Code
container_reg = ws.get_details()["containerRegistry"]
reg_name=container_reg.split("/")[-1]
resource_group_name = ws.resource_group
container_url = "\"" + image.image_location + "\","
subscription_id = ws.subscription_id
print('{}'.format(image.image_location))
print('{}'.format(reg_name))
print('{}'.format(subscription_id))
from azure.mgmt.containerregistry import ContainerRegistryManagementClient
from azure.mgmt import containerregistry
client = ContainerRegistryManagementClient(ws._auth,subscription_id)
result= client.registries.list_credentials(resource_group_name, reg_name, custom_headers=None, raw=False)
username = result.username
password = result.passwords[0].value
###Output
_____no_output_____
###Markdown
Build your Deployment.json file
###Code
%%writefile ./deploymentpb.json
{
"modulesContent": {
"$edgeAgent": {
"properties.desired": {
"schemaVersion": "1.0",
"runtime": {
"type": "docker",
"settings": {
"minDockerVersion": "v1.25",
"loggingOptions": "",
"registryCredentials": {
#Automatically adding your acr details
acr_details = "\"" + reg_name +"\": {\n\t\t\t\"username\": \""+ username + "\",\n\t\t\t" + "\"password\":\"" + password + "\",\n\t\t\t" + "\"address\":\"" + reg_name + ".azurecr.io\"" + ",\n\t\t}"
print('{}'.format(acr_details))
%store acr_details >> deploymentpb.json
%%writefile -a ./deploymentpb.json
}
}
},
"systemModules": {
"edgeAgent": {
"type": "docker",
"settings": {
"image": "mcr.microsoft.com/azureiotedge-agent:1.0",
"createOptions": "{}",
"env": {
"UpstreamProtocol": {
"value": "MQTT"
}
}
}
},
"edgeHub": {
"type": "docker",
"status": "running",
"restartPolicy": "always",
"settings": {
"image": "mcr.microsoft.com/azureiotedge-hub:1.0",
"createOptions": "{\"User\":\"root\",\"HostConfig\":{\"PortBindings\":{\"5671/tcp\":[{\"HostPort\":\"5671\"}], \"8883/tcp\":[{\"HostPort\":\"8883\"}],\"443/tcp\":[{\"HostPort\":\"443\"}]}}}",
"env": {
"UpstreamProtocol": {
"value": "MQTT "
}
}
}
}
},
"modules": {
"VisionSampleModule": {
"version": "1.0",
"type": "docker",
"status": "running",
"restartPolicy": "always",
"settings": {
"image":
#adding your container URL
%store container_url >> deploymentpb.json
%%writefile -a ./deploymentpb.json
"createOptions": "{\"HostConfig\":{\"Binds\":[\"/data/misc/camera:/app/vam_model_folder\"],\"NetworkMode\":\"host\"},\"NetworkingConfig\":{\"EndpointsConfig\":{\"host\":{}}}}"
}
}
}
}
},
"$edgeHub": {
"properties.desired": {
"schemaVersion": "1.0",
"routes": {
"route": "FROM /messages/* INTO $upstream"
},
"storeAndForwardConfiguration": {
"timeToLiveSecs": 7200
}
}
}
}
}
###Output
_____no_output_____
###Markdown
Deploy image as an IoT module Set subscription to the same as your workspace
###Code
%%writefile ./setsub
az account set --subscription
iot_sub=ws.subscription_id
%store iot_sub >> setsub
!sh setsub
print ('{}'.format(iot_sub))
###Output
_____no_output_____
###Markdown
Provision Azure IoT Hub
###Code
#RG and location to create hub
iot_rg="vaidk_"+resource_group_name
iot_location=ws.get_details()["location"]
#temp to delete
iot_location="eastus2"
iot_hub_name="iothub-"+ ws.get_details()["name"]
iot_device_id="vadik_"+ ws.get_details()["name"]
iot_deployment_id="dpl"+ "cstmvaidk"
print('{}'.format(iot_hub_name))
%%writefile ./create
#Command to create hub and device
# Adding Intialization steps
regcommand="\n echo Installing Extension ... \naz extension add --name azure-cli-iot-ext \n"+ "\n echo CREATING RG "+iot_rg+"... \naz group create --name "+ iot_rg +" --location "+ iot_location+ "\n" +"\n echo CREATING HUB "+iot_hub_name+"... \naz iot hub create --name "+ iot_hub_name + " --resource-group "+ iot_rg +" --sku S1"
#print('{}'.format(regcommand))
%store regcommand >> create
###Output
_____no_output_____
###Markdown
Create Identity for your device
###Code
#Adding Device ID
create_device="\n echo CREATING DEVICE ID "+iot_device_id+"... \n az iot hub device-identity create --device-id "+ iot_device_id + " --hub-name " + iot_hub_name +" --edge-enabled"
#print('{}'.format(create_device))
%store create_device >> create
#Create command and vonfigure device
!sh create
###Output
_____no_output_____
###Markdown
Create Deployment
###Code
%%writefile ./deploy
#Command to create hub and device
#Add deployment command
deploy_device="\necho DELETING "+iot_deployment_id+" ... \naz iot edge deployment delete --deployment-id \"" + iot_deployment_id +"\" --hub-name \"" + iot_hub_name +"\"\necho DEPLOYING "+iot_deployment_id+" ... \naz iot edge deployment create --deployment-id \"" + iot_deployment_id + "\" --content \"deploymentpb.json\" --hub-name \"" + iot_hub_name +"\" --target-condition \"deviceId='"+iot_device_id+"'\" --priority 1"
print('{}'.format(deploy_device))
%store deploy_device >> deploy
#run deployment to stage all work for when the model is ready
!sh deploy
###Output
_____no_output_____
###Markdown
Use this conenction string on your camera to Initialize it
###Code
%%writefile ./showdetails
#Command to create hub and device
#Add deployment command
get_string="\n echo THIS IS YOUR CONNECTION STRING ... \naz iot hub device-identity show-connection-string --device-id \"" + iot_device_id + "\" --hub-name \"" + iot_hub_name+"\""
#print('{}'.format(get_string))
%store get_string >> showdetails
!sh showdetails
###Output
_____no_output_____ |
udacity/data-scientist-nanodegree/sparkify/.ipynb_checkpoints/final-model-checkpoint.ipynb | ###Markdown
Final modelTrain the best model on the bigger dataset and evaluate once more.
###Code
# Imports
import findspark
findspark.init()
findspark.find()
import pyspark
# Imports for creating spark session
from pyspark import SparkContext, SparkConf
from pyspark.sql import SparkSession
conf = pyspark.SparkConf().setAppName('sparkify-capstone-model').setMaster('local')
sc = pyspark.SparkContext(conf=conf)
spark = SparkSession(sc)
# Imports for modelling, tuning and evaluation
from pyspark.ml.classification import GBTClassifier
from pyspark.ml.evaluation import BinaryClassificationEvaluator, MulticlassClassificationEvaluator
from pyspark.ml.tuning import TrainValidationSplit, ParamGridBuilder
# Imports for visualization and output
import matplotlib.pyplot as plt
from IPython.display import HTML, display
# Read in dataset
conf.set("spark.driver.maxResultSize", "0")
path = "out/features.parquet"
df = spark.read.parquet(path)
def createSubset(df, factor):
"""
INPUT:
df: The dataset to split
factor: How much of the dataset to return
OUTPUT:
df_subset: The split subset
"""
df_subset, df_dummy = df.randomSplit([factor, 1 - factor])
return df_subset
def printConfusionMatrix(tp, fp, tn, fn):
""" Simple function to output a confusion matrix from f/t/n/p values as html table.
INPUT:
data: The array to print as table
OUTPUT:
Prints the array as html table.
"""
html = "<table><tr><td></td><td>Act. True</td><td>False</td></tr>"
html += "<tr><td>Pred. Pos.</td><td>{}</td><td>{}</td></tr>".format(tp, fp)
html += "<tr><td>Negative</td><td>{}</td><td>{}</td></tr>".format(fn, tn)
html += "</table>"
display(HTML(html))
def showEvaluationMetrics(predictions):
""" Calculate and print the some evaluation metrics for the passed predictions.
INPUT:
predictions: The predictions to evaluate and print
OUTPUT:
Just prints the evaluation metrics
"""
# Calculate true, false positives and negatives to calculate further metrics later:
tp = predictions[(predictions.churn == 1) & (predictions.prediction == 1)].count()
tn = predictions[(predictions.churn == 0) & (predictions.prediction == 0)].count()
fp = predictions[(predictions.churn == 0) & (predictions.prediction == 1)].count()
fn = predictions[(predictions.churn == 1) & (predictions.prediction == 0)].count()
printConfusionMatrix(tp, fp, tn, fn)
# Calculate and print metrics
f1 = MulticlassClassificationEvaluator(labelCol = "churn", metricName = "f1") \
.evaluate(predictions)
accuracy = float((tp + tn) / (tp + tn + fp + fn))
recall = float(tp / (tp + fn))
precision = float(tp / (tp + fp))
print("F1: ", f1)
print("Accuracy: ", accuracy)
print("Recall: ", recall)
print("Precision: ", precision)
def printAUC(predictions, labelCol = "churn"):
""" Print the area under curve for the predictions.
INPUT:
predictions: The predictions to get and print the AUC for
OUTPU:
Prints the AUC
"""
print("Area under curve: ", BinaryClassificationEvaluator(labelCol = labelCol).evaluate(predictions))
def undersampleNegatives(df, ratio, labelCol = "churn"):
"""
Undersample the negatives (0's) in the given dataframe by ratio.
NOTE: The "selection" method here is of course very crude and in a real version should be randomized and shuffled.
INPUT:
df: dataframe to undersample negatives from
ratio: Undersampling ratio
labelCol: LAbel column name in the input dataframe
OUTPUT:
A new dataframe with negatives undersampled by ratio
"""
zeros = df.filter(df[labelCol] == 0)
ones = df.filter(df[labelCol] == 1)
zeros = createSubset(zeros, ratio)
return zeros.union(ones)
def gbtPredictions(df_train, df_test, maxIter = 10, labelCol = "churn", featuresCol = "features"):
""" Fit, evaluate and show results for GBTClassifier
INPUT:
df_train: The training data set.
df_test: The testing data set.
maxIter: Number of maximum iterations in the gradeint boost.
labelCol: The label column name, "churn" by default.
featuresCol: The label column name, "features" by default.
OUTPUT:
predictions: The model's predictions
"""
# Fit and train model
gbt = GBTClassifier(labelCol = labelCol, featuresCol = featuresCol, maxIter = maxIter).fit(df_train)
return gbt.transform(df_test)
df_train, df_test = df.randomSplit([0.9, 0.1])
gbt = GBTClassifier(labelCol = "churn", featuresCol = "features", maxIter = 120, maxDepth = 5).fit(undersampleNegatives(df_train, .7))
predictions = gbt.transform(df_test)
showEvaluationMetrics(predictions)
printAUC(predictions)
gbt.save("out/model")
# Output the notebook to an html file
from subprocess import call
call(['python', '-m', 'nbconvert', 'final-model.ipynb'])
###Output
_____no_output_____ |
Kaggle/iWildCam 2020/iwildcam_2020_demo_kernel.ipynb | ###Markdown
###Code
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import json
import os
from IPython.display import FileLink
# for dirname, _, filenames in os.walk('/kaggle/input'):
# for filename in filenames:
# print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
with open('/kaggle/input/iwildcam-2020-fgvc7/iwildcam2020_train_annotations.json') as f:
train_data = json.load(f)
with open('/kaggle/input/iwildcam-2020-fgvc7/iwildcam2020_test_information.json') as f:
test_data = json.load(f)
train_data.keys()
train = pd.DataFrame(train_data['annotations'])
train.head()
train.rename(columns={'count': 'cnt'}, inplace=True)
train[train.cnt > 1].describe()
train.describe()
train_img = pd.DataFrame(train_data['images'])
indices1 = []
indices2 = []
indices1.append( train[ train['image_id'] == '896c1198-21bc-11ea-a13a-137349068a90' ].index )
indices1.append( train[ train['image_id'] == '8792549a-21bc-11ea-a13a-137349068a90' ].index )
indices1.append( train[ train['image_id'] == '87022118-21bc-11ea-a13a-137349068a90' ].index )
indices1.append( train[ train['image_id'] == '98a295ba-21bc-11ea-a13a-137349068a90' ].index )
indices2.append( train_img[ train_img['id'] == '896c1198-21bc-11ea-a13a-137349068a90' ].index )
indices2.append( train_img[ train_img['id'] == '8792549a-21bc-11ea-a13a-137349068a90' ].index )
indices2.append( train_img[ train_img['id'] == '87022118-21bc-11ea-a13a-137349068a90' ].index )
indices2.append( train_img[ train_img['id'] == '98a295ba-21bc-11ea-a13a-137349068a90' ].index )
for _id in train_img[train_img['location'] == 537]['id'].values:
indices1.append( train[ train['image_id'] == _id ].index )
indices2.append(train_img[ train_img['id'] == _id ].index)
for the_index in indices1:
train = train.drop(train.index[the_index])
for the_index in indices2:
train_img = train_img.drop(train_img.index[the_index])
train_img.head()
fig = plt.figure(figsize=(19, 4))
ax = sns.distplot(train['category_id'])
plt.title('distribution of number of data per category')
fig = plt.figure(figsize=(30, 4))
ax = sns.barplot(x="category_id", y="cnt",data=train)
plt.title('distribution of count per id')
fig = plt.figure(figsize=(30, 4))
ax = sns.countplot(train_img['location'])
plt.title('distribution of number of animals by location')
labels_month = sorted(list(set(train_img['datetime'].map(lambda str: str[5:7]))))
# fig, ax = plt.subplots(1,2, figsize=(20,7)
plt.title('Count of train data per month')
ax = sns.countplot(train_img['datetime'].map(lambda str: str[5:7] ), order=labels_month)
ax.set(xlabel='Month', ylabel='count')
# ax.set(ylim=(0,55000))
train_img.describe()
train.describe()
train_img = train_img
train = train
train_img['category'] = train['category_id']
train_img.drop(train_img.columns.difference(['file_name','category']), 1, inplace=True)
train_img['category'] = train_img['category'].apply(str)
train_img.head()
train_img[ train_img['file_name'] == '883572ba-21bc-11ea-a13a-137349068a90.jpg' ].index
train_img.drop(123658,inplace=True)
train_img.drop(123651,inplace=True)
train_img.drop(123653,inplace=True)
# !pip install tensorflow-gpu==1.14.0
# !pip install keras==2.2.4
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras import Model
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import cv2
from sklearn.model_selection import train_test_split
# import pickle
import dill
from tqdm import tqdm
from os import makedirs
from os.path import expanduser, exists, join
train_datagen = ImageDataGenerator(
rescale=1./255,
horizontal_flip = True,
zoom_range = 0.3,
width_shift_range = 0.3,
height_shift_range=0.3,
rotation_range = 40,
shear_range = 0.3,
channel_shift_range=150.0,
fill_mode='nearest',
brightness_range=(0.2, 0.9)
)
# (max_rotate=20, max_zoom=1.3, max_lighting=0.4, max_warp=0.4,
# p_affine=1., p_lighting=1.
train_generator = train_datagen.flow_from_dataframe(
dataframe=train_img[90000:120000],
directory='/kaggle/input/iwildcam-2020-fgvc7/train',
x_col="file_name",
y_col="category",
target_size=(150,150),
batch_size=256,
classes = train_img['category'].unique().tolist(),
class_mode='categorical')
labels = (train_generator.class_indices)
labels = dict((v,k) for k,v in labels.items())
print(labels)
# cache_dir = expanduser(join('~', '.keras'))
# if not exists(cache_dir):
# makedirs(cache_dir)
# models_dir = join(cache_dir, 'models')
# if not exists(models_dir):
# makedirs(models_dir)
# !cp ../input/keras-pretrained-models/*notop* ~/.keras/models/
# !cp ../input/keras-pretrained-models/imagenet_class_index.json ~/.keras/models/
# !cp ../input/keras-pretrained-models/resnet50* ~/.keras/models/
!ls ../input/keras-pretrained-models/
# !git clone https://github.com/qubvel/efficientnet.git
# import efficientnet.efficientnet.tfkeras as efn
from tensorflow.keras.applications import inception_v3
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense,Flatten,Dropout,BatchNormalization, GlobalAveragePooling2D
from tensorflow.keras.optimizers import Adam
pre_trained_model = tf.keras.applications.InceptionV3(include_top=False,input_shape = (150, 150, 3),
weights='../input/keras-pretrained-models/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5')
# pre_trained_model = efn.EfficientNetB7(weights='imagenet', include_top=False, pooling='avg', input_shape=(96, 96, 3))
for layer in pre_trained_model.layers:
layer.trainable = False
# x = pre_trained_model.output
# predictions = Dense(573, activation="softmax")(x)
# model = Model(inputs=pre_trained_model.input, outputs=predictions)
model = Sequential()
# first (and only) set of FC => RELU layers
model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.3))
model.add(BatchNormalization())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.3))
model.add(BatchNormalization())
model.add(Dense(216,activation='softmax'))
pretrainedInput = pre_trained_model.input
pretrainedOutput = pre_trained_model.output
output = model(pretrainedOutput)
model = Model(pretrainedInput, output)
model.compile(Adam(), loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
history = new_model.fit_generator(
train_generator,
steps_per_epoch=train_generator.n//train_generator.batch_size+1,
epochs=5,
shuffle = True,
verbose = 1)
import matplotlib.pyplot as plt
acc = history.history['accuracy']
loss = history.history['loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'r', label='Training accuracy')
plt.title('Training accuracy vs epochs')
plt.legend(loc=0)
plt.figure()
plt.show()
new_model.save('Modeln.h5')
FileLink('Modeln.h5')
test = pd.DataFrame(test_data['images'])
test.head()
test.describe()
test_data.keys()
test_datagen = ImageDataGenerator(rescale = 1./255.)
test_generator = test_datagen.flow_from_dataframe(
dataframe=test,
directory='/kaggle/input/iwildcam-2020-fgvc7/test',
x_col="file_name",
target_size=(150, 150),
batch_size=64,class_mode=None)
new_model = tf.keras.models.load_model('/kaggle/input/model-1/Modeln.h5')
preds = new_model.predict_generator(test_generator,
steps=test_generator.n//test_generator.batch_size+1,
verbose=1)
predicted_class_indices=np.argmax(preds,axis=1)
labels = (train_generator.class_indices)
labels = dict((v,k) for k,v in labels.items())
predictions = [labels[k] for k in predicted_class_indices]
Id=test.id
results=pd.DataFrame({"Id":Id,
"Category":predictions})
submission = pd.read_csv('/kaggle/input/iwildcam-2020-fgvc7/sample_submission.csv')
submission = submission.drop(['Category'], axis=1)
submission = submission.merge(results, on='Id')
submission.to_csv('modeln.csv', index=False)
FileLink('modeln.csv')
# results.to_csv("results.csv",index=False)
###Output
_____no_output_____ |
eopf-notebooks/eopf_product_data_structure/EOPF_S2_MSI_v1.2.ipynb | ###Markdown
EOPF S3 OLCI L1 Product Data Structure Proposal
###Code
import os
import xarray as xr
import glob
import rasterio
from IPython.core.display import HTML
import glob
import re
from utils import display
from EOProductDataStructure import EOProductBuilder, EOVariableBuilder, EOGroupBuilder
from lxml import etree
variable_chunks = { 'B01': 192,
'B02': 1024, 'B03': 1024, 'B04': 1024,
'B05': 640, 'B06': 640, 'B07': 640, 'B08': 640, 'B8A': 640,
'B09': 192, 'B10': 192,
'B11': 640, 'B12': 640,
'TCI': 256 }
def get_jp2_ds(path_to_product, glob_patterns, var_pattern, resolution):
variables = {}
coordinates = {}
attributes = {}
for glob_pattern in glob_patterns:
files = glob.glob(path_to_product + '/' + glob_pattern)
for file in files:
var = re.match(var_pattern, file[file.rfind('/')+1:]).group(1)
chunks = variable_chunks[var]
ds1 = xr.open_dataset(file, chunks=chunks, engine='rasterio', mask_and_scale=False)
if var == 'TCI':
variables['red'] = ds1.get('band_data')[0].drop('band')
variables['green'] = ds1.get('band_data')[1].drop('band')
variables['blue'] = ds1.get('band_data')[2].drop('band')
else:
variables[var] = ds1.get('band_data')[0].drop('band')
for attr in ds1.attrs:
if attr not in attributes:
attributes[attr] = ds1.attrs[attr]
ds = xr.Dataset(data_vars=variables, coords=coordinates, attrs=attributes).rename({'x': 'x_'+resolution, 'y': 'y_'+resolution}).drop(['spatial_ref', 'x_'+resolution, 'y_'+resolution])
return ds
def get_coord_ds(path_to_product, glob_patterns, resolutions):
variables = {}
coordinates = {}
attributes = {}
for glob_pattern, resolution in zip(glob_patterns, resolutions):
files = glob.glob(path_to_product + '/' + glob_pattern)
for file in files:
ds1 = xr.open_dataset(file, engine='rasterio', mask_and_scale=False).rename({'x': 'x_'+resolution, 'y': 'y_'+resolution})
variables['x_' + resolution] = ds1['x_' + resolution]
variables['y_' + resolution] = ds1['y_' + resolution]
if 'spatial_ref' in ds1 and 'spatial_ref' not in variables:
variables['spatial_ref'] = ds1['spatial_ref']
for attr in ds1.attrs:
if attr not in attributes:
attributes[attr] = ds1.attrs[attr]
ds = xr.Dataset(data_vars=variables, coords=coordinates, attrs=attributes)
return ds
band_names = ['B01', 'B02', 'B03', 'B04', 'B05', 'B06', 'B07', 'B08', 'B8A', 'B09', 'B10', 'B11', 'B12']
def get_values(dom, xpath):
list = dom.xpath(xpath, namespaces={'n1': 'https://psd-14.sentinel2.eo.esa.int/PSD/S2_PDI_Level-1C_Tile_Metadata.xsd'})
array = [[float(i) for i in x.text.split()] for x in list]
da = xr.DataArray(array, dims=['y_tiepoints', 'x_tiepoints'])
return da
def get_shape(dom, xpath):
list = dom.xpath(xpath, namespaces={'n1': 'https://psd-14.sentinel2.eo.esa.int/PSD/S2_PDI_Level-1C_Tile_Metadata.xsd'})
return [len(list), len(list[0].text.split())]
def parse_xml(path_to_product, glob_pattern):
path = glob.glob(path_to_product + '/' + glob_pattern)[0]
dom = etree.parse(path)
return dom
def get_angles_ds(path_to_product, glob_pattern):
dom = parse_xml(path_to_product, glob_pattern)
sza = get_values(dom, 'n1:Geometric_Info/Tile_Angles/Sun_Angles_Grid/Zenith/Values_List/VALUES')
saa = get_values(dom, 'n1:Geometric_Info/Tile_Angles/Sun_Angles_Grid/Azimuth/Values_List/VALUES')
bands = {'sza': sza, 'saa': saa}
for band_id in range(13):
for detector_id in range(1,7):
vza = get_values(dom, 'n1:Geometric_Info/Tile_Angles/Viewing_Incidence_Angles_Grids[@bandId="{}" and @detectorId="{}"]/Zenith/Values_List/VALUES'
.format(band_id, detector_id))
vaa = get_values(dom, 'n1:Geometric_Info/Tile_Angles/Viewing_Incidence_Angles_Grids[@bandId="{}" and @detectorId="{}"]/Azimuth/Values_List/VALUES'
.format(band_id, detector_id))
bands['vza_{}_{}'.format(band_names[band_id], detector_id)] = vza
bands['vaa_{}_{}'.format(band_names[band_id], detector_id)] = vaa
ds = xr.Dataset(bands)
return ds
def get_tiepoints_ds(path_to_product, glob_pattern):
dom = parse_xml(path_to_product, glob_pattern)
shape_y_x = get_shape(dom, 'n1:Geometric_Info/Tile_Angles/Sun_Angles_Grid/Zenith/Values_List/VALUES')
ymax = float(dom.xpath('n1:Geometric_Info/Tile_Geocoding/Geoposition[@resolution="10"]/ULY',
namespaces={'n1': 'https://psd-14.sentinel2.eo.esa.int/PSD/S2_PDI_Level-1C_Tile_Metadata.xsd'})[0].text)
xmin = float(dom.xpath('n1:Geometric_Info/Tile_Geocoding/Geoposition[@resolution="10"]/ULX',
namespaces={'n1': 'https://psd-14.sentinel2.eo.esa.int/PSD/S2_PDI_Level-1C_Tile_Metadata.xsd'})[0].text)
ystep = float(dom.xpath('n1:Geometric_Info/Tile_Angles/Sun_Angles_Grid/Zenith/ROW_STEP',
namespaces={'n1': 'https://psd-14.sentinel2.eo.esa.int/PSD/S2_PDI_Level-1C_Tile_Metadata.xsd'})[0].text)
xstep = float(dom.xpath('n1:Geometric_Info/Tile_Angles/Sun_Angles_Grid/Zenith/COL_STEP',
namespaces={'n1': 'https://psd-14.sentinel2.eo.esa.int/PSD/S2_PDI_Level-1C_Tile_Metadata.xsd'})[0].text)
y = [ymax - i * ystep - ystep / 2 for i in range(shape_y_x[0])]
x = [xmin + i * xstep + xstep / 2 for i in range(shape_y_x[1])]
ds = xr.Dataset({'y_tiepoints': y, 'x_tiepoints': x})
return ds
path_to_product = glob.glob("data/S2?_MSIL1C*.SAFE")[0]
# Groups definition
groups = {}
groups['coordinates'] = get_coord_ds(path_to_product, ["GRANULE/*/IMG_DATA/*_%s.jp2" % r for r in ['B02','B05','B01']], ['10m', '20m', '60m']) # extensional coordinates, metric and geographic
groups['tiepoints'] = get_tiepoints_ds(path_to_product, "GRANULE/*/MTD_TL.xml")
#groups['crs'] = get_crs_ds(path_to_product, [""]) # utm zone, geographic footprint, metric corners, metric resolutions, parameters to feed proj
groups['measurements_10m'] = get_jp2_ds(path_to_product,["GRANULE/*/IMG_DATA/*_%s.jp2" % r for r in ['B02','B03','B04','B08']], '.*_(...).jp2', '10m')
groups['measurements_20m'] = get_jp2_ds(path_to_product,["GRANULE/*/IMG_DATA/*_%s.jp2" % r for r in ['B05','B06','B07','B8A','B11','B12']], '.*_(...).jp2', '20m')
groups['measurements_60m'] = get_jp2_ds(path_to_product,["GRANULE/*/IMG_DATA/*_%s.jp2" % r for r in ['B01','B09','B10']], '.*_(...).jp2', '60m')
groups['quicklook_tci'] = get_jp2_ds(path_to_product,["GRANULE/*/IMG_DATA/*_%s.jp2" % r for r in ['TCI']], '.*_(...).jp2', '10m')
groups['geometry'] = get_angles_ds(path_to_product,"GRANULE/*/MTD_TL.xml") # angles on tiepoint raster
#groups['instrument'] = get_xml_ds(path_to_product,["MTD_MSIL1C.xml"]) # band characteristics, gains
#groups['meteo'] = get_ds(path_to_product,["tie_meteo"])
# Create a new EOProduct instance
product_name = os.path.basename("S2_MSIL1C")
product = EOProductBuilder("S2_MSIL1C")
# do the same work as before
product.metadatas = ["MTD_MSIL1C.xml"]
# ==================== Product groups setting ========================
for group_name, ds in groups.items():
group = EOGroupBuilder(group_name)
group.attrs["description"] = f"{group_name} Data Group"
group.dims = ds.dims
for v, var in ds.variables.items():
variable = EOVariableBuilder(v, default_attrs = False)
variable.dtype = var.dtype
variable.dimensions = var.dims
variable.attrs = var.attrs
group.variables.append(variable)
product.groups.append(group)
product.attrs['metadata_files'] = '[xfdumanfist.xml]'
print("inputs read")
display(product.compute())
###Output
_____no_output_____ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.