path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
ipynb/test_perm.ipynb | ###Markdown
A notebook to test permuting a list of things.
###Code
import os
import numpy as np
import pprint
import scipy.stats as stats
import yaml
import itertools as ite
def print_iter(iterable):
for i, x in enumerate(iterable):
print('{:2}: {}'.format(i+1, x))
###Output
_____no_output_____
###Markdown
Self Cartesian product
###Code
print_iter(ite.product('abc', repeat=2))
###Output
_____no_output_____
###Markdown
Cartesian product
###Code
print_iter(ite.product('ABC', 'de', [0, 1]))
print_iter(ite.permutations('abc'))
class A(object):
pass
str(A)
import collections
d = collections.OrderedDict()
d['a'] = 1
d['c'] = 3
d['b'] = 2
for x in d.items():
print(x)
for x in ite.zip_longest( ('a','b','c'), (1, 2)):
print(x)
###Output
_____no_output_____ |
work/Geodescriber.ipynb | ###Markdown
Developing the Geodescriber TitleLoad a geostore via LMIPy. Use OSM revese geocoding to generate data for bounds of a geometry. Create a title based on the agreement between the bounds. EE query Dynamic Paragraph creation Translate to a different languageUse Translation service to return response in a target language.
###Code
#!pip install geocoder
#!pip install googletrans
#!pip install LMIPy
#!pip install earthengine-api
#!pip install oauth2client
import geocoder # https://geocoder.readthedocs.io/
from googletrans import Translator #https://py-googletrans.readthedocs.io/en/latest/#googletrans-languages
import LMIPy
# Area between spain and france
atts={'geojson': {'type': 'FeatureCollection',
'features': [{'type': 'Feature',
'properties': {},
'geometry': {'type': 'Polygon',
'coordinates': [[[-0.87890625, 43.329173667843904],
[-1.6149902343749998, 42.75104599038353],
[-1.1865234375, 42.35854391749705],
[-0.6427001953125, 42.755079545072135],
[-0.45043945312499994, 42.9524020856897],
[-0.87890625, 43.329173667843904]]]}}]}}
g1 = LMIPy.Geometry(attributes=atts)#, server='http://localhost:9000')
g1
g1.shape()[0]
# Weird area in Spain
atts= {'geojson': {'type': 'FeatureCollection',
'features': [{'type': 'Feature',
'properties': {},
'geometry': {'type': 'Polygon',
'coordinates': [[[-4.866943359375, 41.69752591075902],
[-5.756835937499999, 41.31907562295139],
[-5.592041015625, 41.08763212467916],
[-4.89990234375, 41.13729606112276],
[-4.7021484375, 41.08763212467916],
[-4.41650390625, 40.57224011776902],
[-4.72412109375, 40.12849105685408],
[-5.042724609375, 39.926588421909436],
[-5.218505859375, 39.58029027440865],
[-4.801025390625, 39.36827914916014],
[-4.02099609375, 39.37677199661635],
[-3.7902832031250004, 40.07807142745009],
[-4.02099609375, 40.522150985623796],
[-4.515380859375, 41.104190944576466],
[-4.383544921875, 41.376808565702355],
[-4.32861328125, 41.65649719441145],
[-4.866943359375, 41.69752591075902]]]}}]}}
g2 = LMIPy.Geometry(attributes=atts) #, server='http://localhost:9000')
g2
g2.shape()[0]
# Worker functions for generating a title
def reverse_geocode_a_geostore(geostore):
""" Take an LMIPy.Geostore object and return geocoding results on the min/max coordinate locations"""
s = geostore.shape()[0]
min_coords = [s.bounds[1], s.bounds[0]]
max_coords = [s.bounds[3], s.bounds[2]]
geocode_results = []
for coords in [min_coords, max_coords]:
geocode_results.append(geocoder.osm(coords, method='reverse'))
return geocode_results
def create_title_elements(geostore):
"""Take revsere geocoding results for upper and lower corners of a polygons bounds,
Extract the region, county, country, continent attributes of the locations.
Use the overlap to set an appropriate title.
"""
geocode_results = reverse_geocode_a_geostore(geostore)
key_locations = []
for result in geocode_results:
d = {}
try:
d['region'] = result.geojson.get('features')[0].get('properties').get('region')
except:
d['region'] = None
try:
d['county'] = result.geojson.get('features')[0].get('properties').get('county')
except:
d['county'] = None
try:
d['country'] = result.geojson.get('features')[0].get('properties').get('country')
except:
d['country'] = None
try:
d['continent'] = continent_lookup[iso_to_continent[result.geojson.get('features')[0].get('properties').get('country_code').upper()]]
except:
d['continent'] = None
key_locations.append(d)
# Check for overlap between upper and lower bounds
same_region = key_locations[0].get('region') == key_locations[1].get('region')
same_county = key_locations[0].get('county') == key_locations[1].get('county')
same_country = key_locations[0].get('country') == key_locations[1].get('country')
same_continent = key_locations[0].get('continent') == key_locations[1].get('continent')
# Set a title
if same_region:
title= [key_locations[0]['region'], key_locations[0]['county']]
elif same_county:
title= [key_locations[0]['county'], key_locations[0]['country']]
elif same_country:
title = [key_locations[0]['country'], key_locations[0]['continent']]
elif same_continent:
title = [key_locations[0]['continent']]
else:
title = None
return title
def create_title(title_elements):
"""Create a string(title) from a list input."""
if len(title_elements) == 2:
return f"Area in {title_elements[0]}, {title_elements[1]}"
elif len(title_elements) == 1:
return f"Area in {title_elements[0]}"
else:
return None
# geocode_results = reverse_geocode_a_geostore(g)
# geocode_results
# g1 = LMIPy.Geometry('f6726c97139f362ca9a10d70dc686375', server='http://localhost:9000')
g
title_elements = create_title_elements(g)
title = create_title(title_elements)
title
###Output
_____no_output_____
###Markdown
Check speed of response and translationN.b. title and paragraph should be translated together and split to save on requests
###Code
%%time
title_elements = create_title_elements(g2)
title = create_title(title_elements)
print(title)
create_title(title_elements)
%%time
title = create_title(g2)
title
translator= Translator(to_lang="es")
translation = translator.translate(title)
translation
translator= Translator(to_lang="fr")
translation = translator.translate(title)
translation
geostore2 = LMIPy.Geometry('f6726c97139f362ca9a10d70dc686375', server='http://localhost:9000')
geostore2
# Test of geometry in RW API
g = LMIPy.Geometry('37bd82f55b0a98dca94a46ad7789e2a3')
title = create_title(g)
title
translator.translate(title)
###Output
_____no_output_____
###Markdown
Earth Engine portion* Step 1 - Build a layer with multiple bands which we can intersect against in EE* Step 2 - Run a Zonal stats reducer on the area* Step 3 - Create a decision tree and dynamic sentence* Include: amount of tree cover area, elevation (split into categories), biogeophysical regions? land-cover classes? protected areas?
###Code
import ee
ee.Initialize()
def get_region(geom):
"""Take a valid geojson object, iterate over all features in that object.
Build up a list of EE Polygons, and finally return an EE Feature
collection. New as of 19th Sep 2017 (needed to fix a bug where the old
function ignored multipolys)
"""
polygons = []
for feature in geom.get('features'):
shape_type = feature.get('geometry').get('type')
coordinates = feature.get('geometry').get('coordinates')
if shape_type == 'MultiPolygon':
polygons.append(ee.Geometry.MultiPolygon(coordinates))
elif shape_type == 'Polygon':
polygons.append(ee.Geometry.Polygon(coordinates))
else:
pass
return ee.FeatureCollection(polygons)
g = LMIPy.Geometry(id_hash="c9d9da7b63f1983ff8d274e9f15efbf7") # area in spain with no Intact forest
g = LMIPy.Geometry(id_hash="9d7a5615df0543881a0f710fa61a1382") # area in georgia with Intact Forest
# Grab the layer
img = ee.Image('users/benlaken/geodesriber-asset')
# Create an EE feature from a geostore object
region = get_region(g2.attributes.get('geojson'))
stats = img.reduceRegion(**{
'reducer': ee.Reducer.frequencyHistogram(),
'geometry': region,
'bestEffort': True,
}).getInfo()
stats
# Some sentences will either have a null case or be None. If they are none, they should not be used to build the para.
not_intact = stats.get('intact2016').get('0', None)
is_intact = stats.get('intact2016').get('1', None)
intact_sentence = None
if is_intact:
# intact > 50%
if is_intact/not_intact > 0.75:
intact_sentence = "This region contains a large amount of Intact Forest."
elif is_intact/not_intact > 0.5:
intact_sentence = "This region contains Intact Forest."
else:
intact_sentence = "This region contains some Intact Forest."
else:
intact_sentence = 'This region has no Intact Forest.'
intact_sentence
is_mountain = stats.get('isMountain').get('1')
not_mountain = stats.get('isMountain').get('0')
mountain_sentence = None
if is_mountain:
if is_mountain/not_mountain > 0.75:
mountain_sentence = "a mountainous area"
elif is_mountain/not_mountain > 0.5:
mountain_sentence = "a mix of lowland and mountains areas"
else:
mountain_sentence = "a predominanty lowland area"
else:
mountain_sentence = "A lowland area."
mountain_sentence
# koppen_sentence = None
# total = 0
# for item in stats.get('koppen'):
# total += stats.get('koppen').get(item)
# for item in stats.get('koppen'):
# tmp_description = koppen_translated[item]
# tmp_proportion = stats.get('koppen').get(item)/ total
# print(tmp_description, tmp_proportion)
# if tmp_proportion > 0.75:
# koppen_sentence = f"The majority of the area has a {tmp_description}."
# koppen_sentence
def give_sorted_d(lookup_dic, key):
"""Return a dic with keys as integer percentage of coverage proportion."""
total = 0
for item in stats.get(key):
total += stats.get(key).get(item)
tmp_d = {}
for item in stats.get(key):
tmp_proportion = int((stats.get(key).get(item)/ total) * 100)
#print(item, tmp_proportion)
tmp_d[tmp_proportion] = lookup_dic[item]
s_dic = {}
for kk in sorted(tmp_d,reverse=True):
s_dic[kk] = tmp_d[kk]
return s_dic
# create a sorted list of items to deal with possilities of different Koppen climates
tmp_d = give_sorted_d(lookup_dic=koppen_translated, key='koppen')
proportion_list = list(tmp_d.keys())
if proportion_list[0] > 75:
koppen_sentence = f"The area has a predominantly {tmp_d[proportion_list[0]]}."
elif proportion_list[0] > 50:
koppen_sentence = f"The majority of the region has {tmp_d[proportion_list[0]]}. It also has areas of {tmp_d[proportion_list[1]]}."
else:
koppen_sentence = f"The area has mixed environmental conditions, including {tmp_d[proportion_list[0]]}, and {tmp_d[proportion_list[1]]}."
koppen_sentence
stats
## Need to extract the mapping between the biome number and biome name and ecoregion number and name from the shapefile
ecoregion_sentence = None
tmp_d = give_sorted_d(ecoid_to_ecoregion,'ecoregion')
tmp_d
proportion_list = list(tmp_d.keys())
if proportion_list[0] > 75:
ecoregion_sentence = f"The region's habitat is comprised of {tmp_d[proportion_list[0]]}."
elif proportion_list[0] > 50:
ecoregion_sentence = f"The majority of the regions habitat is comprised of {tmp_d[proportion_list[0]]}. It also includes areas of {tmp_d[proportion_list[1]]}."
else:
ecoregion_sentence = f"The region is made up of different habitats, including {tmp_d[proportion_list[0]]}, and {tmp_d[proportion_list[1]]}"
ecoregion_sentence
biome_sentence = None
tmp_d = give_sorted_d(biomeNum_2_biomeName,'biome')
proportion_list = list(tmp_d.keys())
if proportion_list[0] > 75:
biome_sentence = f"It is part of the {tmp_d[proportion_list[0]]} biome."
elif proportion_list[0] > 50:
biome_sentence = f"The majority of the region is comprised of {tmp_d[proportion_list[0]]}. It also includes areas of {tmp_d[proportion_list[1]]}."
else:
biome_sentence = f"The region is made up of several types of biomes, including {tmp_d[proportion_list[0]]}, and {tmp_d[proportion_list[1]]}."
biome_sentence
area_sentence = f"Area of {g.attributes.get('areaHa') * 0.01:3,.0f}km² located in {mountain_sentence} in {title_elements[0]}."
area_sentence
description = f"{area_sentence} {biome_sentence} {koppen_sentence} {ecoregion_sentence} {intact_sentence}"
description
translator= Translator(to_lang="fr")
title_translation = translator.translate(title)
description_translation = translator.translate(description)
print(title_translation)
print(description_translation)
translator= Translator(to_lang="es")
title_translation = translator.translate(title)
description_translation = translator.translate(description)
print(title_translation)
print(description_translation)
translator= Translator(to_lang="ro")
title_translation = translator.translate(title)
description_translation = translator.translate(description)
print(title_translation)
print(description_translation)
translator= Translator(to_lang="de")
title_translation = translator.translate(title)
description_translation = translator.translate(description)
print(title_translation)
print(description_translation)
translator= Translator(to_lang="nl")
title_translation = translator.translate(title)
description_translation = translator.translate(description)
print(title_translation)
print(description_translation)
#print(f"{area_sentence} {koppen_sentence} {mountain_sentence} {ecoregion_sentence} {intact_sentence} {biome_sentence}")
###Output
_____no_output_____
###Markdown
App version Input a geostore ID, + optional app arguments and language arguments. Output serilized title, description, and dictionary of data
###Code
import geocoder
from googletrans import Translator #https://py-googletrans.readthedocs.io/en/latest/#googletrans-languages
import LMIPy
import ee
ee.Initialize()
def reverse_geocode_a_geostore(geostore):
""" Take an LMIPy.Geostore object and return geocoding results on the min/max coordinate locations"""
s = geostore.shape()[0]
min_coords = [s.bounds[1], s.bounds[0]]
max_coords = [s.bounds[3], s.bounds[2]]
geocode_results = []
for coords in [min_coords, max_coords]:
geocode_results.append(geocoder.osm(coords, method='reverse', lang_code='en'))
return geocode_results
def create_title_elements(geostore):
"""Take revsere geocoding results for upper and lower corners of a polygons bounds,
Extract the region, county, country, continent attributes of the locations.
Use the overlap to set an appropriate title.
"""
geocode_results = reverse_geocode_a_geostore(geostore)
key_locations = []
for result in geocode_results:
d = {}
try:
d['region'] = result.geojson.get('features')[0].get('properties').get('region')
except:
d['region'] = None
try:
d['county'] = result.geojson.get('features')[0].get('properties').get('county')
except:
d['county'] = None
try:
d['country'] = result.geojson.get('features')[0].get('properties').get('country')
except:
d['country'] = None
try:
d['continent'] = continent_lookup[iso_to_continent[result.geojson.get('features')[0].get('properties').get('country_code').upper()]]
except:
d['continent'] = None
key_locations.append(d)
# Check for overlap between upper and lower bounds
same_region = check_equivence(key_locations[0].get('region'), key_locations[1].get('region'))
same_county = check_equivence(key_locations[0].get('county'), key_locations[1].get('county'))
same_country = check_equivence(key_locations[0].get('country'), key_locations[1].get('country'))
same_continent = check_equivence(key_locations[0].get('continent'), key_locations[1].get('continent'))
# Set a title
if same_region:
return [key_locations[0]['region'], key_locations[0]['county']]
elif same_county:
return [key_locations[0]['county'], key_locations[0]['country']]
elif same_country:
return [key_locations[0]['country'], key_locations[0]['continent']]
elif same_continent:
return [key_locations[0]['continent']]
elif key_locations[0]['continent'] is not None and key_locations[1]['continent'] is not None:
return [key_locations[0]['continent'], [key_locations[1]['continent']], True]
else:
return None
def check_equivence(item1, item2):
"""Check to see if the two items are equal and neither is equal to None"""
if item1 is None or item2 is None:
return None
else:
return item1 == item2
def create_title(title_elements):
"""Create a string(title) from a list input."""
if not title_elements:
return "Area of interest"
if len(title_elements) == 3:
return f"Area between {title_elements[0]} and {title_elements[1]}"
elif len(title_elements) == 2:
return f"Area in {title_elements[0]}, {title_elements[1]}"
elif len(title_elements) == 1:
return f"Area in {title_elements[0]}"
else:
return "Area of Interest"
def get_region(geom):
"""Take a valid geojson object, iterate over all features in that object.
Build up a list of EE Polygons, and finally return an EE Feature
collection. New as of 19th Sep 2017 (needed to fix a bug where the old
function ignored multipolys)
"""
polygons = []
for feature in geom.get('features'):
shape_type = feature.get('geometry').get('type')
coordinates = feature.get('geometry').get('coordinates')
if shape_type == 'MultiPolygon':
polygons.append(ee.Geometry.MultiPolygon(coordinates))
elif shape_type == 'Polygon':
polygons.append(ee.Geometry.Polygon(coordinates))
else:
pass
return ee.FeatureCollection(polygons)
def give_sorted_d(lookup_dic, key, stats):
"""Return a dic with keys as integer percentage of coverage proportion."""
total = 0
for item in stats.get(key):
total += stats.get(key).get(item)
tmp_d = {}
for item in stats.get(key):
tmp_proportion = int((stats.get(key).get(item)/ total) * 100)
#print(item, tmp_proportion)
tmp_d[tmp_proportion] = lookup_dic[item]
s_dic = {}
for kk in sorted(tmp_d,reverse=True):
s_dic[kk] = tmp_d[kk]
return s_dic
def gen_ecoregion_sentence(stats):
ecoregion_sentence = None
tmp_d = give_sorted_d(ecoid_to_ecoregion, 'ecoregion', stats)
proportion_list = list(tmp_d.keys())
if proportion_list[0] > 75:
ecoregion_sentence = f"The region's habitat is comprised of {tmp_d[proportion_list[0]]}."
elif proportion_list[0] > 50:
ecoregion_sentence = f"The majority of the regions habitat is comprised of {tmp_d[proportion_list[0]]}. It also includes areas of {tmp_d[proportion_list[1]]}."
else:
ecoregion_sentence = f"The region is made up of different habitats, including {tmp_d[proportion_list[0]]}, and {tmp_d[proportion_list[1]]}"
return ecoregion_sentence
def gen_intact_sentence(stats):
not_intact = stats.get('intact2016').get('0', None)
is_intact = stats.get('intact2016').get('1', None)
intact_sentence = None
if is_intact:
if is_intact/not_intact > 0.75:
intact_sentence = "This region contains a large amount of Intact Forest."
elif is_intact/not_intact > 0.5:
intact_sentence = "This region contains Intact Forest."
else:
intact_sentence = "This region contains some Intact Forest."
else:
intact_sentence = 'This region has no Intact Forest.'
return intact_sentence
def gen_mountain_sentence(stats):
is_mountain = stats.get('isMountain').get('1')
not_mountain = stats.get('isMountain').get('0')
mountain_sentence = None
if is_mountain:
if is_mountain/not_mountain > 0.75:
mountain_sentence = "a mountainous area"
elif is_mountain/not_mountain > 0.5:
mountain_sentence = "a mix of lowland and mountains areas"
else:
mountain_sentence = "a predominanty lowland area"
else:
mountain_sentence = "A lowland area."
return mountain_sentence
def gen_koppen_sentence(stats):
# create a sorted list of items to deal with possilities of different Koppen climates
tmp_d = give_sorted_d(lookup_dic=koppen_translated, key='koppen',stats=stats)
proportion_list = list(tmp_d.keys())
if proportion_list[0] > 75:
koppen_sentence = f"The area has a predominantly {tmp_d[proportion_list[0]]}."
elif proportion_list[0] > 50:
koppen_sentence = f"The majority of the region has {tmp_d[proportion_list[0]]}. It also has areas of {tmp_d[proportion_list[1]]}."
else:
koppen_sentence = f"The most common environmental conditions of the area are {tmp_d[proportion_list[0]]}."
return koppen_sentence
def gen_ecoregion_sentence(stats):
ecoregion_sentence = None
tmp_d = give_sorted_d(ecoid_to_ecoregion, 'ecoregion', stats)
proportion_list = list(tmp_d.keys())
if proportion_list[0] > 75:
ecoregion_sentence = f"The region's habitat is comprised of {tmp_d[proportion_list[0]]}."
elif proportion_list[0] > 50:
ecoregion_sentence = f"The majority of the regions habitat is comprised of {tmp_d[proportion_list[0]]}. It also includes areas of {tmp_d[proportion_list[1]]}."
else:
ecoregion_sentence = f"The region is made up of different habitats, including {tmp_d[proportion_list[0]]}, and {tmp_d[proportion_list[1]]}."
return ecoregion_sentence
def gen_biome_sentence(stats):
biome_sentence = None
tmp_d = give_sorted_d(biomeNum_2_biomeName,'biome', stats)
proportion_list = list(tmp_d.keys())
if proportion_list[0] > 75:
biome_sentence = f"It is part of the {tmp_d[proportion_list[0]]} biome."
elif proportion_list[0] > 50:
biome_sentence = f"The majority of the region is comprised of {tmp_d[proportion_list[0]]}. It also includes areas of {tmp_d[proportion_list[1]]}."
else:
biome_sentence = f"The region is made up of several types of biomes, including {tmp_d[proportion_list[0]]}, and {tmp_d[proportion_list[1]]}."
return biome_sentence
def human_format(num):
magnitude = 0
while abs(num) >= 1000:
magnitude += 1
num /= 1000.0
# add more suffixes if you need them
return '%.2f%s' % (num, ['', 'k', 'M', 'G', 'T', 'P'][magnitude])
def gen_area_sentence(g, app, mountain_sentence, title_elements):
if app == 'gfw':
area_sentence = f"Area of {human_format(g.attributes.get('areaHa'))}ha located in {mountain_sentence} in {title_elements[0]}."
else:
area_sentence = f"Area of {g.attributes.get('areaHa') * 0.01:3,.0f}km² located in {mountain_sentence} in {title_elements[0]}."
return area_sentence
def main(geostore_id, lang='en', app='gfw'):
g = LMIPy.Geometry(geostore_id)
title_elements = create_title_elements(g)
title = create_title(title_elements)
img = ee.Image('users/benlaken/geodesriber-asset') # Grab the layer
region = get_region(g.attributes.get('geojson')) # Create an EE feature from a geostore object
stats = img.reduceRegion(**{'reducer': ee.Reducer.frequencyHistogram(),
'geometry': region,
'bestEffort': True,
}).getInfo()
ecoregion_sentence = gen_ecoregion_sentence(stats)
intact_sentence = gen_intact_sentence(stats)
mountain_sentence = gen_mountain_sentence(stats)
koppen_sentence = gen_koppen_sentence(stats)
ecoregion_sentence = gen_ecoregion_sentence(stats)
biome_sentence = gen_biome_sentence(stats)
area_sentence = gen_area_sentence(g=g, app=app, mountain_sentence=mountain_sentence, title_elements=title_elements)
description = f"{area_sentence} {biome_sentence} {koppen_sentence} {ecoregion_sentence} {intact_sentence}"
if lang is not 'en':
translator = Translator()
r = translator.translate(text=[title, description], dest=lang, src='en')
title = r[0].text
description = r[1].text
return {'title':title, 'description':description, 'lang': lang, 'stats': stats}
create_title_elements(g) ## <<---- need to invetigate the reverse geocoder (should be returning english translations)
from pprint import pprint
%%time
main(gg.id)
gx = LMIPy.Geometry(id_hash='fb47822bbb56e89be6ac07b9ed52a28d')
%%time
main(gx.id)
%%time
response = main(geostore_id='fb47822bbb56e89be6ac07b9ed52a28d', lang='en')
pprint(response)
%%time
pprint(main('cf1874cd07c7e5f6adcf2f969bdd8e27', lang='en'))
%%time
#pprint(main('489e154c4b463835691c2da1e12910a6'))
%%time
#pprint(main('6f78821e65d893842606fead7c2d7924', lang='es', app='soilwatch'))
###Output
_____no_output_____
###Markdown
Dependent/lookup info
###Code
continent_lookup = {'AF':'Africa',
'AN':'Antarctica',
'AS':'Asia',
'EU':'Europe',
'NA':'North america',
'OC':'Oceania',
'SA':'South america'}
iso_to_continent = {'AD':'EU',
'AE':'AS',
'AF':'AS',
'AG':'NA',
'AI':'NA',
'AL':'EU',
'AM':'AS',
'AO':'AF',
'AP':'AS',
'AN':'NA',
'AQ':'AN',
'AR':'SA',
'AS':'OC',
'AT':'EU',
'AU':'OC',
'AW':'NA',
'AX':'EU',
'AZ':'AS',
'BA':'EU',
'BB':'NA',
'BD':'AS',
'BE':'EU',
'BF':'AF',
'BG':'EU',
'BH':'AS',
'BI':'AF',
'BJ':'AF',
'BL':'NA',
'BM':'NA',
'BN':'AS',
'BO':'SA',
'BR':'SA',
'BS':'NA',
'BT':'AS',
'BV':'AN',
'BW':'AF',
'BY':'EU',
'BZ':'NA',
'CA':'NA',
'CC':'AS',
'CD':'AF',
'CF':'AF',
'CG':'AF',
'CH':'EU',
'CI':'AF',
'CK':'OC',
'CL':'SA',
'CM':'AF',
'CN':'AS',
'CO':'SA',
'CR':'NA',
'CU':'NA',
'CV':'AF',
'CX':'AS',
'CY':'AS',
'CZ':'EU',
'DE':'EU',
'DJ':'AF',
'DK':'EU',
'DM':'NA',
'DO':'NA',
'DZ':'AF',
'EC':'SA',
'EE':'EU',
'EG':'AF',
'EH':'AF',
'ER':'AF',
'ES':'EU',
'ET':'AF',
'EU':'EU',
'FI':'EU',
'FJ':'OC',
'FK':'SA',
'FM':'OC',
'FO':'EU',
'FR':'EU',
'FX':'EU',
'GA':'AF',
'GB':'EU',
'GD':'NA',
'GE':'AS',
'GF':'SA',
'GG':'EU',
'GH':'AF',
'GI':'EU',
'GL':'NA',
'GM':'AF',
'GN':'AF',
'GP':'NA',
'GQ':'AF',
'GR':'EU',
'GS':'AN',
'GT':'NA',
'GU':'OC',
'GW':'AF',
'GY':'SA',
'HK':'AS',
'HM':'AN',
'HN':'NA',
'HR':'EU',
'HT':'NA',
'HU':'EU',
'ID':'AS',
'IE':'EU',
'IL':'AS',
'IM':'EU',
'IN':'AS',
'IO':'AS',
'IQ':'AS',
'IR':'AS',
'IS':'EU',
'IT':'EU',
'JE':'EU',
'JM':'NA',
'JO':'AS',
'JP':'AS',
'KE':'AF',
'KG':'AS',
'KH':'AS',
'KI':'OC',
'KM':'AF',
'KN':'NA',
'KP':'AS',
'KR':'AS',
'KW':'AS',
'KY':'NA',
'KZ':'AS',
'LA':'AS',
'LB':'AS',
'LC':'NA',
'LI':'EU',
'LK':'AS',
'LR':'AF',
'LS':'AF',
'LT':'EU',
'LU':'EU',
'LV':'EU',
'LY':'AF',
'MA':'AF',
'MC':'EU',
'MD':'EU',
'ME':'EU',
'MF':'NA',
'MG':'AF',
'MH':'OC',
'MK':'EU',
'ML':'AF',
'MM':'AS',
'MN':'AS',
'MO':'AS',
'MP':'OC',
'MQ':'NA',
'MR':'AF',
'MS':'NA',
'MT':'EU',
'MU':'AF',
'MV':'AS',
'MW':'AF',
'MX':'NA',
'MY':'AS',
'MZ':'AF',
'NA':'AF',
'NC':'OC',
'NE':'AF',
'NF':'OC',
'NG':'AF',
'NI':'NA',
'NL':'EU',
'NO':'EU',
'NP':'AS',
'NR':'OC',
'NU':'OC',
'NZ':'OC',
'O1':'--',
'OM':'AS',
'PA':'NA',
'PE':'SA',
'PF':'OC',
'PG':'OC',
'PH':'AS',
'PK':'AS',
'PL':'EU',
'PM':'NA',
'PN':'OC',
'PR':'NA',
'PS':'AS',
'PT':'EU',
'PW':'OC',
'PY':'SA',
'QA':'AS',
'RE':'AF',
'RO':'EU',
'RS':'EU',
'RU':'EU',
'RW':'AF',
'SA':'AS',
'SB':'OC',
'SC':'AF',
'SD':'AF',
'SE':'EU',
'SG':'AS',
'SH':'AF',
'SI':'EU',
'SJ':'EU',
'SK':'EU',
'SL':'AF',
'SM':'EU',
'SN':'AF',
'SO':'AF',
'SR':'SA',
'ST':'AF',
'SV':'NA',
'SY':'AS',
'SZ':'AF',
'TC':'NA',
'TD':'AF',
'TF':'AN',
'TG':'AF',
'TH':'AS',
'TJ':'AS',
'TK':'OC',
'TL':'AS',
'TM':'AS',
'TN':'AF',
'TO':'OC',
'TR':'EU',
'TT':'NA',
'TV':'OC',
'TW':'AS',
'TZ':'AF',
'UA':'EU',
'UG':'AF',
'UM':'OC',
'US':'NA',
'UY':'SA',
'UZ':'AS',
'VA':'EU',
'VC':'NA',
'VE':'SA',
'VG':'NA',
'VI':'NA',
'VN':'AS',
'VU':'OC',
'WF':'OC',
'WS':'OC',
'YE':'AS',
'YT':'AF',
'ZA':'AF',
'ZM':'AF',
'ZW':'AF'}
koppen = {
'11': 'Af',
'12': 'Am',
'13': 'As',
'14': 'Aw',
'21': 'BWk',
'22': 'BWh',
'26': 'BSk',
'27': 'BSh',
'31': 'Cfa',
'32': 'Cfb',
'33': 'Cfc',
'34': 'Csa',
'35': 'Csb',
'36': 'Csc',
'37': 'Cwa',
'38': 'Cwb',
'39': 'Cwc',
'41': 'Dfa',
'42': 'Dfb',
'43': 'Dfc',
'44': 'Dfd',
'45': 'Dsa',
'46': 'Dsb',
'47': 'Dsc',
'48': 'Dsd',
'49': 'Dwa',
'50': 'Dwb',
'51': 'Dwc',
'52': 'Dwd',
'61': 'EF',
'62': 'ET'
}
koppen_translated = {
'11': 'equatorial, humid climate',
'12': 'equatorial, with monsoonal rainfall',
'13': 'equatorial climate with dry summers',
'14': 'equatorial climate with dry winters',
'21': 'arid desert climate with cold temperatures',
'22': 'arid desert climate with hot temperatures',
'26': 'semi-arid climate with cold temperatures',
'27': 'semi-arid climate with hot temperatures',
'31': 'warm and temperate climate with high humidity and hot summers',
'32': 'warm and temperate climate with high humidity and warm summers',
'33': 'warm and temperate climate with high humidity and cool summers',
'34': 'warm and temperate climate with dry, hot summers',
'35': 'warm and temperate climate with dry summers',
'36': 'warm and temperate climate with dry, cool summers',
'37': 'warm and temperate climate with dry winters and hot summers',
'38': 'warm and temperate climate with dry winters and warm summers',
'39': 'warm and temperate climate with dry winters and cool summers',
'41': 'snowy, humid climate with hot summers',
'42': 'snowy, humid climate with warm summers',
'43': 'snowy, humid climate with cool summers',
'44': 'snowy, humid, and continental climate',
'45': 'snowy climate with dry, hot summers',
'46': 'snowy climate with dry warm summers',
'47': 'snowy climate with dry cool summers',
'48': 'snowy climate with dry summers and extremly continental temperatures',
'49': 'snowy climate with dry winters and hot summers',
'50': 'snowy climate with dry winters and warm summers',
'51': 'snowy climate with dry winters and cool summers',
'52': 'snowy climate with dry winters and extremley continental temperatures',
'61': 'polar, perpetual frost climate',
'62': 'polar tundra climate'
}
#http://koeppen-geiger.vu-wien.ac.at/data/legend.txt
ecoid_to_ecoregion = {'0': 'rock and ice',
'1': 'Albertine Rift montane forests',
'2': 'Cameroon Highlands forests',
'3': 'Central Congolian lowland forests',
'4': 'Comoros forests',
'5': 'Congolian coastal forests',
'6': 'Cross-Niger transition forests',
'7': 'Cross-Sanaga-Bioko coastal forests',
'8': 'East African montane forests',
'9': 'Eastern Arc forests',
'10': 'Eastern Congolian swamp forests',
'11': 'Eastern Guinean forests',
'12': 'Ethiopian montane forests',
'13': 'Granitic Seychelles forests',
'14': 'Guinean montane forests',
'15': 'Knysna-Amatole montane forests',
'16': 'Kwazulu Natal-Cape coastal forests',
'17': 'Madagascar humid forests',
'18': 'Madagascar subhumid forests',
'19': 'Maputaland coastal forests and woodlands',
'20': 'Mascarene forests',
'21': 'Mount Cameroon and Bioko montane forests',
'22': 'Niger Delta swamp forests',
'23': 'Nigerian lowland forests',
'24': 'Northeast Congolian lowland forests',
'25': 'Northern Swahili coastal forests',
'26': 'Northwest Congolian lowland forests',
'27': 'São Tomé, Príncipe, and Annobón forests',
'28': 'Southern Swahili coastal forests and woodlands',
'29': 'Western Congolian swamp forests',
'30': 'Western Guinean lowland forests',
'31': 'Cape Verde Islands dry forests',
'32': 'Madagascar dry deciduous forests',
'33': 'Zambezian evergreen dry forests',
'34': 'Angolan mopane woodlands',
'35': 'Angolan scarp savanna and woodlands',
'36': 'Angolan wet miombo woodlands',
'37': 'Ascension scrub and grasslands',
'38': 'Central bushveld',
'39': 'Central Zambezian wet miombo woodlands',
'40': 'Drakensberg Escarpment savanna and thicket',
'41': 'Drakensberg grasslands',
'42': 'Dry miombo woodlands',
'43': 'East Sudanian savanna',
'44': 'Guinean forest-savanna',
'45': 'Horn of Africa xeric bushlands',
'46': 'Itigi-Sumbu thicket',
'47': 'Kalahari Acacia woodlands',
'48': 'Limpopo lowveld',
'49': 'Mandara Plateau woodlands',
'50': 'Masai xeric grasslands and shrublands',
'51': 'Northern Acacia-Commiphora bushlands and thickets',
'52': 'Northern Congolian Forest-Savanna',
'53': 'Sahelian Acacia savanna',
'54': 'Serengeti volcanic grasslands',
'55': 'Somali Acacia-Commiphora bushlands and thickets',
'56': 'South Arabian fog woodlands, shrublands, and dune',
'57': 'Southern Acacia-Commiphora bushlands and thickets',
'58': 'Southern Congolian forest-savanna',
'59': 'Southwest Arabian montane woodlands and grasslands',
'60': 'St. Helena scrub and woodlands',
'61': 'Victoria Basin forest-savanna',
'62': 'West Sudanian savanna',
'63': 'Western Congolian forest-savanna',
'64': 'Zambezian Baikiaea woodlands',
'65': 'Zambezian mopane woodlands',
'66': 'Zambezian-Limpopo mixed woodlands',
'67': 'Amsterdam-Saint Paul Islands temperate grasslands',
'68': 'Tristan Da Cunha-Gough Islands shrub and grasslands',
'69': 'East African halophytics',
'70': 'Etosha Pan halophytics',
'71': 'Inner Niger Delta flooded savanna',
'72': 'Lake Chad flooded savanna',
'73': 'Makgadikgadi halophytics',
'74': 'Sudd flooded grasslands',
'75': 'Zambezian coastal flooded savanna',
'76': 'Zambezian flooded grasslands',
'77': 'Angolan montane forest-grassland',
'78': 'East African montane moorlands',
'79': 'Ethiopian montane grasslands and woodlands',
'80': 'Ethiopian montane moorlands',
'81': 'Highveld grasslands',
'82': 'Jos Plateau forest-grassland',
'83': 'Madagascar ericoid thickets',
'84': 'Mulanje Montane forest-grassland',
'85': 'Nyanga-Chimanimani Montane forest-grassland',
'86': 'Rwenzori-Virunga montane moorlands',
'87': 'Southern Rift Montane forest-grassland',
'88': 'Albany thickets',
'89': 'Fynbos shrubland',
'90': 'Renosterveld shrubland',
'91': 'Aldabra Island xeric scrub',
'92': 'Djibouti xeric shrublands',
'93': 'Eritrean coastal desert',
'94': 'Gariep Karoo',
'95': 'Hobyo grasslands and shrublands',
'96': 'Ile Europa and Bassas da India xeric scrub',
'97': 'Kalahari xeric savanna',
'98': 'Kaokoveld desert',
'99': 'Madagascar spiny thickets',
'100': 'Madagascar succulent woodlands',
'101': 'Nama Karoo shrublands',
'102': 'Namaqualand-Richtersveld steppe',
'103': 'Namib Desert',
'104': 'Namibian savanna woodlands',
'105': 'Socotra Island xeric shrublands',
'106': 'Somali montane xeric woodlands',
'107': 'Southwest Arabian coastal xeric shrublands',
'108': 'Southwest Arabian Escarpment shrublands and woodlands',
'109': 'Southwest Arabian highland xeric scrub',
'110': 'Succulent Karoo xeric shrublands',
'111': 'Central African mangroves',
'112': 'East African mangroves',
'113': 'Guinean mangroves',
'114': 'Madagascar mangroves',
'115': 'Red Sea mangroves',
'116': 'Southern Africa mangroves',
'117': 'Adelie Land tundra',
'118': 'Central South Antarctic Peninsula tundra',
'119': 'Dronning Maud Land tundra',
'120': 'East Antarctic tundra',
'121': 'Ellsworth Land tundra',
'122': 'Ellsworth Mountains tundra',
'123': 'Enderby Land tundra',
'124': 'Marie Byrd Land tundra',
'125': 'North Victoria Land tundra',
'126': 'Northeast Antarctic Peninsula tundra',
'127': 'Northwest Antarctic Peninsula tundra',
'128': 'Prince Charles Mountains tundra',
'129': 'Scotia Sea Islands tundra',
'130': 'South Antarctic Peninsula tundra',
'131': 'South Orkney Islands tundra',
'132': 'South Victoria Land tundra',
'133': 'Southern Indian Ocean Islands tundra',
'134': 'Transantarctic Mountains tundra',
'135': 'Admiralty Islands lowland rain forests',
'136': 'Banda Sea Islands moist deciduous forests',
'137': 'Biak-Numfoor rain forests',
'138': 'Buru rain forests',
'139': 'Central Range Papuan montane rain forests',
'140': 'Halmahera rain forests',
'141': 'Huon Peninsula montane rain forests',
'142': 'Lord Howe Island subtropical forests',
'143': 'Louisiade Archipelago rain forests',
'144': 'New Britain-New Ireland lowland rain forests',
'145': 'New Britain-New Ireland montane rain forests',
'146': 'New Caledonia rain forests',
'147': 'Norfolk Island subtropical forests',
'148': 'Northern New Guinea lowland rain and freshwater swamp forests',
'149': 'Northern New Guinea montane rain forests',
'150': 'Queensland tropical rain forests',
'151': 'Seram rain forests',
'152': 'Solomon Islands rain forests',
'153': 'Southeast Papuan rain forests',
'154': 'Southern New Guinea freshwater swamp forests',
'155': 'Southern New Guinea lowland rain forests',
'156': 'Sulawesi lowland rain forests',
'157': 'Sulawesi montane rain forests',
'158': 'Trobriand Islands rain forests',
'159': 'Vanuatu rain forests',
'160': 'Vogelkop montane rain forests',
'161': 'Vogelkop-Aru lowland rain forests',
'162': 'Yapen rain forests',
'163': 'Lesser Sundas deciduous forests',
'164': 'New Caledonia dry forests',
'165': 'Sumba deciduous forests',
'166': 'Timor and Wetar deciduous forests',
'167': 'Chatham Island temperate forests',
'168': 'Eastern Australian temperate forests',
'169': 'Fiordland temperate forests',
'170': 'Nelson Coast temperate forests',
'171': 'New Zealand North Island temperate forests',
'172': 'New Zealand South Island temperate forests',
'173': 'Northland temperate kauri forests',
'174': 'Rakiura Island temperate forests',
'175': 'Richmond temperate forests',
'176': 'Southeast Australia temperate forests',
'177': 'Tasmanian Central Highland forests',
'178': 'Tasmanian temperate forests',
'179': 'Tasmanian temperate rain forests',
'180': 'Westland temperate forests',
'181': 'Arnhem Land tropical savanna',
'182': 'Brigalow tropical savanna',
'183': 'Cape York Peninsula tropical savanna',
'184': 'Carpentaria tropical savanna',
'185': 'Einasleigh upland savanna',
'186': 'Kimberly tropical savanna',
'187': 'Mitchell Grass Downs',
'188': 'Trans Fly savanna and grasslands',
'189': 'Victoria Plains tropical savanna',
'190': 'Canterbury-Otago tussock grasslands',
'191': 'Eastern Australia mulga shrublands',
'192': 'Southeast Australia temperate savanna',
'193': 'Australian Alps montane grasslands',
'194': 'New Zealand South Island montane grasslands',
'195': 'Papuan Central Range sub-alpine grasslands',
'196': 'Antipodes Subantarctic Islands tundra',
'197': 'Coolgardie woodlands',
'198': 'Esperance mallee',
'199': 'Eyre and York mallee',
'200': 'Flinders-Lofty montane woodlands',
'201': 'Hampton mallee and woodlands',
'202': 'Jarrah-Karri forest and shrublands',
'203': 'Murray-Darling woodlands and mallee',
'204': 'Naracoorte woodlands',
'205': 'Southwest Australia savanna',
'206': 'Southwest Australia woodlands',
'207': 'Carnarvon xeric shrublands',
'208': 'Central Ranges xeric scrub',
'209': 'Gibson desert',
'210': 'Great Sandy-Tanami desert',
'211': 'Great Victoria desert',
'212': 'Nullarbor Plains xeric shrublands',
'213': 'Pilbara shrublands',
'214': 'Simpson desert',
'215': 'Tirari-Sturt stony desert',
'216': 'Western Australian Mulga shrublands',
'217': 'New Guinea mangroves',
'218': 'Andaman Islands rain forests',
'219': 'Borneo lowland rain forests',
'220': 'Borneo montane rain forests',
'221': 'Borneo peat swamp forests',
'222': 'Brahmaputra Valley semi-evergreen forests',
'223': 'Cardamom Mountains rain forests',
'224': 'Chao Phraya freshwater swamp forests',
'225': 'Chao Phraya lowland moist deciduous forests',
'226': 'Chin Hills-Arakan Yoma montane forests',
'227': 'Christmas and Cocos Islands tropical forests',
'228': 'East Deccan moist deciduous forests',
'229': 'Eastern Java-Bali montane rain forests',
'230': 'Eastern Java-Bali rain forests',
'231': 'Greater Negros-Panay rain forests',
'232': 'Hainan Island monsoon rain forests',
'233': 'Himalayan subtropical broadleaf forests',
'234': 'Irrawaddy freshwater swamp forests',
'235': 'Irrawaddy moist deciduous forests',
'236': 'Jian Nan subtropical evergreen forests',
'237': 'Kayah-Karen montane rain forests',
'238': 'Lower Gangetic Plains moist deciduous forests',
'239': 'Luang Prabang montane rain forests',
'240': 'Luzon montane rain forests',
'241': 'Luzon rain forests',
'242': 'Malabar Coast moist forests',
'243': 'Maldives-Lakshadweep-Chagos Archipelago tropical moist forests',
'244': 'Meghalaya subtropical forests',
'245': 'Mentawai Islands rain forests',
'246': 'Mindanao montane rain forests',
'247': 'Mindanao-Eastern Visayas rain forests',
'248': 'Mindoro rain forests',
'249': 'Mizoram-Manipur-Kachin rain forests',
'250': 'Myanmar coastal rain forests',
'251': 'Nansei Islands subtropical evergreen forests',
'252': 'Nicobar Islands rain forests',
'253': 'North Western Ghats moist deciduous forests',
'254': 'North Western Ghats montane rain forests',
'255': 'Northern Annamites rain forests',
'256': 'Northern Indochina subtropical forests',
'257': 'Northern Khorat Plateau moist deciduous forests',
'258': 'Northern Thailand-Laos moist deciduous forests',
'259': 'Northern Triangle subtropical forests',
'260': 'Northern Vietnam lowland rain forests',
'261': 'Orissa semi-evergreen forests',
'262': 'Palawan rain forests',
'263': 'Peninsular Malaysian montane rain forests',
'264': 'Peninsular Malaysian peat swamp forests',
'265': 'Peninsular Malaysian rain forests',
'266': 'Red River freshwater swamp forests',
'267': 'South China Sea Islands',
'268': 'South China-Vietnam subtropical evergreen forests',
'269': 'South Taiwan monsoon rain forests',
'270': 'South Western Ghats moist deciduous forests',
'271': 'South Western Ghats montane rain forests',
'272': 'Southern Annamites montane rain forests',
'273': 'Southwest Borneo freshwater swamp forests',
'274': 'Sri Lanka lowland rain forests',
'275': 'Sri Lanka montane rain forests',
'276': 'Sulu Archipelago rain forests',
'277': 'Sumatran freshwater swamp forests',
'278': 'Sumatran lowland rain forests',
'279': 'Sumatran montane rain forests',
'280': 'Sumatran peat swamp forests',
'281': 'Sundaland heath forests',
'282': 'Sundarbans freshwater swamp forests',
'283': 'Taiwan subtropical evergreen forests',
'284': 'Tenasserim-South Thailand semi-evergreen rain forests',
'285': 'Tonle Sap freshwater swamp forests',
'286': 'Tonle Sap-Mekong peat swamp forests',
'287': 'Upper Gangetic Plains moist deciduous forests',
'288': 'Western Java montane rain forests',
'289': 'Western Java rain forests',
'290': 'Central Deccan Plateau dry deciduous forests',
'291': 'Central Indochina dry forests',
'292': 'Chhota-Nagpur dry deciduous forests',
'293': 'East Deccan dry-evergreen forests',
'294': 'Irrawaddy dry forests',
'295': 'Khathiar-Gir dry deciduous forests',
'296': 'Narmada Valley dry deciduous forests',
'297': 'North Deccan dry deciduous forests',
'298': 'South Deccan Plateau dry deciduous forests',
'299': 'Southeast Indochina dry evergreen forests',
'300': 'Southern Vietnam lowland dry forests',
'301': 'Sri Lanka dry-zone dry evergreen forests',
'302': 'Himalayan subtropical pine forests',
'303': 'Luzon tropical pine forests',
'304': 'Northeast India-Myanmar pine forests',
'305': 'Sumatran tropical pine forests',
'306': 'Eastern Himalayan broadleaf forests',
'307': 'Northern Triangle temperate forests',
'308': 'Western Himalayan broadleaf forests',
'309': 'Eastern Himalayan subalpine conifer forests',
'310': 'Western Himalayan subalpine conifer forests',
'311': 'Terai-Duar savanna and grasslands',
'312': 'Rann of Kutch seasonal salt marsh',
'313': 'Kinabalu montane alpine meadows',
'314': 'Aravalli west thorn scrub forests',
'315': 'Deccan thorn scrub forests',
'316': 'Godavari-Krishna mangroves',
'317': 'Indus Valley desert',
'318': 'Thar desert',
'319': 'Indochina mangroves',
'320': 'Indus River Delta-Arabian Sea mangroves',
'321': 'Myanmar Coast mangroves',
'322': 'Sunda Shelf mangroves',
'323': 'Sundarbans mangroves',
'324': 'Sonoran-Sinaloan subtropical dry forest',
'325': 'Bermuda subtropical conifer forests',
'326': 'Sierra Madre Occidental pine-oak forests',
'327': 'Sierra Madre Oriental pine-oak forests',
'328': 'Allegheny Highlands forests',
'329': 'Appalachian mixed mesophytic forests',
'330': 'Appalachian Piedmont forests',
'331': 'Appalachian-Blue Ridge forests',
'332': 'East Central Texas forests',
'333': 'Eastern Canadian Forest-Boreal transition',
'334': 'Eastern Great Lakes lowland forests',
'335': 'Gulf of St. Lawrence lowland forests',
'336': 'Interior Plateau US Hardwood Forests',
'337': 'Mississippi lowland forests',
'338': 'New England-Acadian forests',
'339': 'Northeast US Coastal forests',
'340': 'Ozark Highlands mixed forests',
'341': 'Ozark Mountain forests',
'342': 'Southern Great Lakes forests',
'343': 'Upper Midwest US forest-savanna transition',
'344': 'Western Great Lakes forests',
'345': 'Alberta-British Columbia foothills forests',
'346': 'Arizona Mountains forests',
'347': 'Atlantic coastal pine barrens',
'348': 'Blue Mountains forests',
'349': 'British Columbia coastal conifer forests',
'350': 'Central British Columbia Mountain forests',
'351': 'Central Pacific Northwest coastal forests',
'352': 'Central-Southern Cascades Forests',
'353': 'Colorado Rockies forests',
'354': 'Eastern Cascades forests',
'355': 'Fraser Plateau and Basin conifer forests',
'356': 'Great Basin montane forests',
'357': 'Klamath-Siskiyou forests',
'358': 'North Cascades conifer forests',
'359': 'Northern California coastal forests',
'360': 'Northern Pacific Alaskan coastal forests',
'361': 'Northern Rockies conifer forests',
'362': 'Okanogan dry forests',
'363': 'Piney Woods',
'364': 'Puget lowland forests',
'365': 'Queen Charlotte Islands conifer forests',
'366': 'Sierra Nevada forests',
'367': 'South Central Rockies forests',
'368': 'Wasatch and Uinta montane forests',
'369': 'Alaska Peninsula montane taiga',
'370': 'Central Canadian Shield forests',
'371': 'Cook Inlet taiga',
'372': 'Copper Plateau taiga',
'373': 'Eastern Canadian forests',
'374': 'Eastern Canadian Shield taiga',
'375': 'Interior Alaska-Yukon lowland taiga',
'376': 'Mid-Canada Boreal Plains forests',
'377': 'Midwest Canadian Shield forests',
'378': 'Muskwa-Slave Lake taiga',
'379': 'Northern Canadian Shield taiga',
'380': 'Northern Cordillera forests',
'381': 'Northwest Territories taiga',
'382': 'Southern Hudson Bay taiga',
'383': 'Watson Highlands taiga',
'384': 'Western Gulf coastal grasslands',
'385': 'California Central Valley grasslands',
'386': 'Canadian Aspen forests and parklands',
'387': 'Central US forest-grasslands transition',
'388': 'Central Tallgrass prairie',
'389': 'Central-Southern US mixed grasslands',
'390': 'Cross-Timbers savanna-woodland',
'391': 'Edwards Plateau savanna',
'392': 'Flint Hills tallgrass prairie',
'393': 'Mid-Atlantic US coastal savannas',
'394': 'Montana Valley and Foothill grasslands',
'395': 'Nebraska Sand Hills mixed grasslands',
'396': 'Northern Shortgrass prairie',
'397': 'Northern Tallgrass prairie',
'398': 'Palouse prairie',
'399': 'Southeast US conifer savannas',
'400': 'Southeast US mixed woodlands and savannas',
'401': 'Texas blackland prairies',
'402': 'Western shortgrass prairie',
'403': 'Willamette Valley oak savanna',
'404': 'Ahklun and Kilbuck Upland Tundra',
'405': 'Alaska-St. Elias Range tundra',
'406': 'Aleutian Islands tundra',
'407': 'Arctic coastal tundra',
'408': 'Arctic foothills tundra',
'409': 'Beringia lowland tundra',
'410': 'Beringia upland tundra',
'411': 'Brooks-British Range tundra',
'412': 'Canadian High Arctic tundra',
'413': 'Canadian Low Arctic tundra',
'414': 'Canadian Middle Arctic Tundra',
'415': 'Davis Highlands tundra',
'416': 'Interior Yukon-Alaska alpine tundra',
'417': 'Kalaallit Nunaat Arctic steppe',
'418': 'Kalaallit Nunaat High Arctic tundra',
'419': 'Ogilvie-MacKenzie alpine tundra',
'420': 'Pacific Coastal Mountain icefields and tundra',
'421': 'Torngat Mountain tundra',
'422': 'California coastal sage and chaparral',
'423': 'California interior chaparral and woodlands',
'424': 'California montane chaparral and woodlands',
'425': 'Santa Lucia Montane Chaparral and Woodlands',
'426': 'Baja California desert',
'427': 'Central Mexican matorral',
'428': 'Chihuahuan desert',
'429': 'Colorado Plateau shrublands',
'430': 'Great Basin shrub steppe',
'431': 'Gulf of California xeric scrub',
'432': 'Meseta Central matorral',
'433': 'Mojave desert',
'434': 'Snake-Columbia shrub steppe',
'435': 'Sonoran desert',
'436': 'Tamaulipan matorral',
'437': 'Tamaulipan mezquital',
'438': 'Wyoming Basin shrub steppe',
'439': 'Alto Paraná Atlantic forests',
'440': 'Araucaria moist forests',
'441': 'Atlantic Coast restingas',
'442': 'Bahia coastal forests',
'443': 'Bahia interior forests',
'444': 'Bolivian Yungas',
'445': 'Caatinga Enclaves moist forests',
'446': 'Caqueta moist forests',
'447': 'Catatumbo moist forests',
'448': 'Cauca Valley montane forests',
'449': 'Cayos Miskitos-San Andrés and Providencia moist forests',
'450': 'Central American Atlantic moist forests',
'451': 'Central American montane forests',
'452': 'Chiapas montane forests',
'453': 'Chimalapas montane forests',
'454': 'Chocó-Darién moist forests',
'455': 'Cocos Island moist forests',
'456': 'Cordillera La Costa montane forests',
'457': 'Cordillera Oriental montane forests',
'458': 'Costa Rican seasonal moist forests',
'459': 'Cuban moist forests',
'460': 'Eastern Cordillera Real montane forests',
'461': 'Eastern Panamanian montane forests',
'462': 'Fernando de Noronha-Atol das Rocas moist forests',
'463': 'Guianan freshwater swamp forests',
'464': 'Guianan Highlands moist forests',
'465': 'Guianan lowland moist forests',
'466': 'Guianan piedmont moist forests',
'467': 'Gurupa várzea',
'468': 'Hispaniolan moist forests',
'469': 'Iquitos várzea',
'470': 'Isthmian-Atlantic moist forests',
'471': 'Isthmian-Pacific moist forests',
'472': 'Jamaican moist forests',
'473': 'Japurá-Solimões-Negro moist forests',
'474': 'Juruá-Purus moist forests',
'475': 'Leeward Islands moist forests',
'476': 'Madeira-Tapajós moist forests',
'477': 'Magdalena Valley montane forests',
'478': 'Magdalena-Urabá moist forests',
'479': 'Marañón dry forests',
'480': 'Marajó várzea',
'481': 'Mato Grosso tropical dry forests',
'482': 'Monte Alegre várzea',
'483': 'Napo moist forests',
'484': 'Negro-Branco moist forests',
'485': 'Northeast Brazil restingas',
'486': 'Northwest Andean montane forests',
'487': 'Oaxacan montane forests',
'488': 'Orinoco Delta swamp forests',
'489': 'Pantanos de Centla',
'490': 'Pantepui forests and shrublands',
'491': 'Pernambuco coastal forests',
'492': 'Pernambuco interior forests',
'493': 'Peruvian Yungas',
'494': 'Petén-Veracruz moist forests',
'495': 'Puerto Rican moist forests',
'496': 'Purus várzea',
'497': 'Purus-Madeira moist forests',
'498': 'Rio Negro campinarana',
'499': 'Santa Marta montane forests',
'500': 'Serra do Mar coastal forests',
'501': 'Sierra de los Tuxtlas',
'502': 'Sierra Madre de Chiapas moist forests',
'503': 'Solimões-Japurá moist forests',
'504': 'Southern Andean Yungas',
'505': 'Southwest Amazon moist forests',
'506': 'Talamancan montane forests',
'507': 'Tapajós-Xingu moist forests',
'508': 'Tocantins/Pindare moist forests',
'509': 'Trindade-Martin Vaz Islands tropical forests',
'510': 'Trinidad and Tobago moist forest',
'511': 'Uatumã-Trombetas moist forests',
'512': 'Ucayali moist forests',
'513': 'Venezuelan Andes montane forests',
'514': 'Veracruz moist forests',
'515': 'Veracruz montane forests',
'516': 'Western Ecuador moist forests',
'517': 'Windward Islands moist forests',
'518': 'Xingu-Tocantins-Araguaia moist forests',
'519': 'Yucatán moist forests',
'520': 'Apure-Villavicencio dry forests',
'521': 'Bajío dry forests',
'522': 'Balsas dry forests',
'523': 'Bolivian montane dry forests',
'524': 'Brazilian Atlantic dry forests',
'525': 'Caatinga',
'526': 'Cauca Valley dry forests',
'527': 'Central American dry forests',
'528': 'Chiapas Depression dry forests',
'529': 'Chiquitano dry forests',
'530': 'Cuban dry forests',
'531': 'Ecuadorian dry forests',
'532': 'Hispaniolan dry forests',
'533': 'Islas Revillagigedo dry forests',
'534': 'Jalisco dry forests',
'535': 'Jamaican dry forests',
'536': 'Lara-Falcón dry forests',
'537': 'Lesser Antillean dry forests',
'538': 'Magdalena Valley dry forests',
'539': 'Maracaibo dry forests',
'540': 'Maranhão Babaçu forests',
'541': 'Panamanian dry forests',
'542': 'Patía valley dry forests',
'543': 'Puerto Rican dry forests',
'544': 'Sierra de la Laguna dry forests',
'545': 'Sinaloan dry forests',
'546': 'Sinú Valley dry forests',
'547': 'Southern Pacific dry forests',
'548': 'Trinidad and Tobago dry forest',
'549': 'Tumbes-Piura dry forests',
'550': 'Veracruz dry forests',
'551': 'Yucatán dry forests',
'552': 'Bahamian pineyards',
'553': 'Central American pine-oak forests',
'554': 'Cuban pine forests',
'555': 'Hispaniolan pine forests',
'556': 'Sierra de la Laguna pine-oak forests',
'557': 'Sierra Madre de Oaxaca pine-oak forests',
'558': 'Sierra Madre del Sur pine-oak forests',
'559': 'Trans-Mexican Volcanic Belt pine-oak forests',
'560': 'Juan Fernández Islands temperate forests',
'561': 'Magellanic subpolar forests',
'562': 'San Félix-San Ambrosio Islands temperate forests',
'563': 'Valdivian temperate forests',
'564': 'Belizian pine savannas',
'565': 'Beni savanna',
'566': 'Campos Rupestres montane savanna',
'567': 'Cerrado',
'568': 'Clipperton Island shrub and grasslands',
'569': 'Dry Chaco',
'570': 'Guianan savanna',
'571': 'Humid Chaco',
'572': 'Llanos',
'573': 'Miskito pine forests',
'574': 'Uruguayan savanna',
'575': 'Espinal',
'576': 'Humid Pampas',
'577': 'Low Monte',
'578': 'Patagonian steppe',
'579': 'Cuban wetlands',
'580': 'Enriquillo wetlands',
'581': 'Everglades flooded grasslands',
'582': 'Guayaquil flooded grasslands',
'583': 'Orinoco wetlands',
'584': 'Pantanal',
'585': 'Paraná flooded savanna',
'586': 'Southern Cone Mesopotamian savanna',
'587': 'Central Andean dry puna',
'588': 'Central Andean puna',
'589': 'Central Andean wet puna',
'590': 'Cordillera Central páramo',
'591': 'Cordillera de Merida páramo',
'592': 'High Monte',
'593': 'Northern Andean páramo',
'594': 'Santa Marta páramo',
'595': 'Southern Andean steppe',
'596': 'Chilean Matorral',
'597': 'Araya and Paria xeric scrub',
'598': 'Atacama desert',
'599': 'Caribbean shrublands',
'600': 'Cuban cactus scrub',
'601': 'Galápagos Islands xeric scrub',
'602': 'Guajira-Barranquilla xeric scrub',
'603': 'La Costa xeric shrublands',
'604': 'Malpelo Island xeric scrub',
'605': 'Motagua Valley thornscrub',
'606': 'Paraguaná xeric scrub',
'607': 'San Lucan xeric scrub',
'608': 'Sechura desert',
'609': 'St. Peter and St. Paul Rocks',
'610': 'Tehuacán Valley matorral',
'611': 'Amazon-Orinoco-Southern Caribbean mangroves',
'612': 'Bahamian-Antillean mangroves',
'613': 'Mesoamerican Gulf-Caribbean mangroves',
'614': 'Northern Mesoamerican Pacific mangroves',
'615': 'South American Pacific mangroves',
'616': 'Southern Atlantic Brazilian mangroves',
'617': 'Southern Mesoamerican Pacific mangroves',
'618': 'Carolines tropical moist forests',
'619': 'Central Polynesian tropical moist forests',
'620': 'Cook Islands tropical moist forests',
'621': 'Eastern Micronesia tropical moist forests',
'622': 'Fiji tropical moist forests',
'623': "Hawai'i tropical moist forests",
'624': 'Kermadec Islands subtropical moist forests',
'625': 'Marquesas tropical moist forests',
'626': 'Ogasawara subtropical moist forests',
'627': 'Palau tropical moist forests',
'628': 'Rapa Nui and Sala y Gómez subtropical forests',
'629': 'Samoan tropical moist forests',
'630': 'Society Islands tropical moist forests',
'631': 'Tongan tropical moist forests',
'632': 'Tuamotu tropical moist forests',
'633': 'Tubuai tropical moist forests',
'634': 'Western Polynesian tropical moist forests',
'635': 'Fiji tropical dry forests',
'636': "Hawai'i tropical dry forests",
'637': 'Marianas tropical dry forests',
'638': 'Yap tropical dry forests',
'639': "Hawai'i tropical high shrublands",
'640': "Hawai'i tropical low shrublands",
'641': "Northwest Hawai'i scrub",
'642': 'Guizhou Plateau broadleaf and mixed forests',
'643': 'Yunnan Plateau subtropical evergreen forests',
'644': 'Appenine deciduous montane forests',
'645': 'Azores temperate mixed forests',
'646': 'Balkan mixed forests',
'647': 'Baltic mixed forests',
'648': 'Cantabrian mixed forests',
'649': 'Caspian Hyrcanian mixed forests',
'650': 'Caucasus mixed forests',
'651': 'Celtic broadleaf forests',
'652': 'Central Anatolian steppe and woodlands',
'653': 'Central China Loess Plateau mixed forests',
'654': 'Central European mixed forests',
'655': 'Central Korean deciduous forests',
'656': 'Changbai Mountains mixed forests',
'657': 'Changjiang Plain evergreen forests',
'658': 'Crimean Submediterranean forest complex',
'659': 'Daba Mountains evergreen forests',
'660': 'Dinaric Mountains mixed forests',
'661': 'East European forest steppe',
'662': 'Eastern Anatolian deciduous forests',
'663': 'English Lowlands beech forests',
'664': 'European Atlantic mixed forests',
'665': 'Euxine-Colchic broadleaf forests',
'666': 'Hokkaido deciduous forests',
'667': 'Huang He Plain mixed forests',
'668': 'Madeira evergreen forests',
'669': 'Manchurian mixed forests',
'670': 'Nihonkai evergreen forests',
'671': 'Nihonkai montane deciduous forests',
'672': 'North Atlantic moist mixed forests',
'673': 'Northeast China Plain deciduous forests',
'674': 'Pannonian mixed forests',
'675': 'Po Basin mixed forests',
'676': 'Pyrenees conifer and mixed forests',
'677': 'Qin Ling Mountains deciduous forests',
'678': 'Rodope montane mixed forests',
'679': 'Sarmatic mixed forests',
'680': 'Sichuan Basin evergreen broadleaf forests',
'681': 'Southern Korea evergreen forests',
'682': 'Taiheiyo evergreen forests',
'683': 'Taiheiyo montane deciduous forests',
'684': 'Tarim Basin deciduous forests and steppe',
'685': 'Ussuri broadleaf and mixed forests',
'686': 'Western European broadleaf forests',
'687': 'Western Siberian hemiboreal forests',
'688': 'Zagros Mountains forest steppe',
'689': 'Alps conifer and mixed forests',
'690': 'Altai montane forest and forest steppe',
'691': 'Caledon conifer forests',
'692': 'Carpathian montane forests',
'693': 'Da Hinggan-Dzhagdy Mountains conifer forests',
'694': 'East Afghan montane conifer forests',
'695': 'Elburz Range forest steppe',
'696': 'Helanshan montane conifer forests',
'697': 'Hengduan Mountains subalpine conifer forests',
'698': 'Hokkaido montane conifer forests',
'699': 'Honshu alpine conifer forests',
'700': 'Khangai Mountains conifer forests',
'701': 'Mediterranean conifer and mixed forests',
'702': 'Northeast Himalayan subalpine conifer forests',
'703': 'Northern Anatolian conifer and deciduous forests',
'704': 'Nujiang Langcang Gorge alpine conifer and mixed forests',
'705': 'Qilian Mountains conifer forests',
'706': 'Qionglai-Minshan conifer forests',
'707': 'Sayan montane conifer forests',
'708': 'Scandinavian coastal conifer forests',
'709': 'Tian Shan montane conifer forests',
'710': 'East Siberian taiga',
'711': 'Iceland boreal birch forests and alpine tundra',
'712': 'Kamchatka taiga',
'713': 'Kamchatka-Kurile meadows and sparse forests',
'714': 'Northeast Siberian taiga',
'715': 'Okhotsk-Manchurian taiga',
'716': 'Sakhalin Island taiga',
'717': 'Scandinavian and Russian taiga',
'718': 'Trans-Baikal conifer forests',
'719': 'Urals montane forest and taiga',
'720': 'West Siberian taiga',
'721': 'Alai-Western Tian Shan steppe',
'722': 'Al-Hajar foothill xeric woodlands and shrublands',
'723': 'Al-Hajar montane woodlands and shrublands',
'724': 'Altai steppe and semi-desert',
'725': 'Central Anatolian steppe',
'726': 'Daurian forest steppe',
'727': 'Eastern Anatolian montane steppe',
'728': 'Emin Valley steppe',
'729': 'Faroe Islands boreal grasslands',
'730': 'Gissaro-Alai open woodlands',
'731': 'Kazakh forest steppe',
'732': 'Kazakh steppe',
'733': 'Kazakh upland steppe',
'734': 'Mongolian-Manchurian grassland',
'735': 'Pontic steppe',
'736': 'Sayan Intermontane steppe',
'737': 'Selenge-Orkhon forest steppe',
'738': 'South Siberian forest steppe',
'739': 'Syrian xeric grasslands and shrublands',
'740': 'Tian Shan foothill arid steppe',
'741': 'Amur meadow steppe',
'742': 'Bohai Sea saline meadow',
'743': 'Nenjiang River grassland',
'744': 'Nile Delta flooded savanna',
'745': 'Saharan halophytics',
'746': 'Suiphun-Khanka meadows and forest meadows',
'747': 'Tigris-Euphrates alluvial salt marsh',
'748': 'Yellow Sea saline meadow',
'749': 'Altai alpine meadow and tundra',
'750': 'Central Tibetan Plateau alpine steppe',
'751': 'Eastern Himalayan alpine shrub and meadows',
'752': 'Ghorat-Hazarajat alpine meadow',
'753': 'Hindu Kush alpine meadow',
'754': 'Karakoram-West Tibetan Plateau alpine steppe',
'755': 'Khangai Mountains alpine meadow',
'756': 'Kopet Dag woodlands and forest steppe',
'757': 'Kuh Rud and Eastern Iran montane woodlands',
'758': 'Mediterranean High Atlas juniper steppe',
'759': 'North Tibetan Plateau-Kunlun Mountains alpine desert',
'760': 'Northwestern Himalayan alpine shrub and meadows',
'761': 'Ordos Plateau steppe',
'762': 'Pamir alpine desert and tundra',
'763': 'Qilian Mountains subalpine meadows',
'764': 'Sayan alpine meadows and tundra',
'765': 'Southeast Tibet shrublands and meadows',
'766': 'Sulaiman Range alpine meadows',
'767': 'Tian Shan montane steppe and meadows',
'768': 'Tibetan Plateau alpine shrublands and meadows',
'769': 'Western Himalayan alpine shrub and meadows',
'770': 'Yarlung Zanbo arid steppe',
'771': 'Cherskii-Kolyma mountain tundra',
'772': 'Chukchi Peninsula tundra',
'773': 'Kamchatka tundra',
'774': 'Kola Peninsula tundra',
'775': 'Northeast Siberian coastal tundra',
'776': 'Northwest Russian-Novaya Zemlya tundra',
'777': 'Novosibirsk Islands Arctic desert',
'778': 'Russian Arctic desert',
'779': 'Russian Bering tundra',
'780': 'Scandinavian Montane Birch forest and grasslands',
'781': 'Taimyr-Central Siberian tundra',
'782': 'Trans-Baikal Bald Mountain tundra',
'783': 'Wrangel Island Arctic desert',
'784': 'Yamal-Gydan tundra',
'785': 'Aegean and Western Turkey sclerophyllous and mixed forests',
'786': 'Anatolian conifer and deciduous mixed forests',
'787': 'Canary Islands dry woodlands and forests',
'788': 'Corsican montane broadleaf and mixed forests',
'789': 'Crete Mediterranean forests',
'790': 'Cyprus Mediterranean forests',
'791': 'Eastern Mediterranean conifer-broadleaf forests',
'792': 'Iberian conifer forests',
'793': 'Iberian sclerophyllous and semi-deciduous forests',
'794': 'Illyrian deciduous forests',
'795': 'Italian sclerophyllous and semi-deciduous forests',
'796': 'Mediterranean Acacia-Argania dry woodlands and succulent thickets',
'797': 'Mediterranean dry woodlands and steppe',
'798': 'Mediterranean woodlands and forests',
'799': 'Northeast Spain and Southern France Mediterranean forests',
'800': 'Northwest Iberian montane forests',
'801': 'Pindus Mountains mixed forests',
'802': 'South Apennine mixed montane forests',
'803': 'Southeast Iberian shrubs and woodlands',
'804': 'Southern Anatolian montane conifer and deciduous forests',
'805': 'Southwest Iberian Mediterranean sclerophyllous and mixed forests',
'806': 'Tyrrhenian-Adriatic sclerophyllous and mixed forests',
'807': 'Afghan Mountains semi-desert',
'808': 'Alashan Plateau semi-desert',
'809': 'Arabian desert',
'810': 'Arabian sand desert',
'811': 'Arabian-Persian Gulf coastal plain desert',
'812': 'Azerbaijan shrub desert and steppe',
'813': 'Badghyz and Karabil semi-desert',
'814': 'Baluchistan xeric woodlands',
'815': 'Caspian lowland desert',
'816': 'Central Afghan Mountains xeric woodlands',
'817': 'Central Asian northern desert',
'818': 'Central Asian riparian woodlands',
'819': 'Central Asian southern desert',
'820': 'Central Persian desert basins',
'821': 'East Arabian fog shrublands and sand desert',
'822': 'East Sahara Desert',
'823': 'East Saharan montane xeric woodlands',
'824': 'Eastern Gobi desert steppe',
'825': 'Gobi Lakes Valley desert steppe',
'826': 'Great Lakes Basin desert steppe',
'827': 'Junggar Basin semi-desert',
'828': 'Kazakh semi-desert',
'829': 'Kopet Dag semi-desert',
'830': 'Mesopotamian shrub desert',
'831': 'North Arabian desert',
'832': 'North Arabian highland shrublands',
'833': 'North Saharan Xeric Steppe and Woodland',
'834': 'Paropamisus xeric woodlands',
'835': 'Qaidam Basin semi-desert',
'836': 'Red Sea coastal desert',
'837': 'Red Sea-Arabian Desert shrublands',
'838': 'Registan-North Pakistan sandy desert',
'839': 'Saharan Atlantic coastal desert',
'840': 'South Arabian plains and plateau desert',
'841': 'South Iran Nubo-Sindian desert and semi-desert',
'842': 'South Sahara desert',
'843': 'Taklimakan desert',
'844': 'Tibesti-Jebel Uweinat montane xeric woodlands',
'845': 'West Sahara desert',
'846': 'West Saharan montane xeric woodlands'}
biomeNum_2_biomeName = {
'1': 'Tropical and Subtropical Moist Broadleaf Forests',
'2': 'Tropical and Subtropical Dry Broadleaf Forests',
'3': 'Tropical and Subtropical Coniferous Forests',
'4': 'Temperate Broadleaf and Mixed Forests',
'5': 'Temperate Conifer Forests',
'6': 'Boreal Forests/Taiga',
'7': 'Tropical and Subtropical Grasslands, Savannas and Shrublands',
'8': 'Temperate Grasslands, Savannas and Shrublands',
'9': 'Flooded Grasslands and Savannas',
'10': 'Montane Grasslands and Shrublands',
'11': 'Tundra',
'12': 'Mediterranean Forests, Woodlands and Scrub',
'13': 'Deserts and Xeric Shrublands',
'14': 'Mangroves'}
###Output
_____no_output_____ |
notebooks/tils-project.ipynb | ###Markdown
TILs-projectThis notebook is a copy of `4. Investestigating Survival/Recurrance` with changes as to match the endpoints studied by Felicia Leion.
###Code
import pandas as pd
from itertools import combinations
import regex as re
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import numpy as np
from numba import jit
from tqdm import tqdm
import matplotlib
import matplotlib.pyplot as plt
sns.set()
sns.set_style("whitegrid", {'axes.grid' : False})
matplotlib.rcParams['font.family'] = "sans"
df_pat = pd.read_excel('../data/tnbc2/256_TNBC__F_LEION_till_arvid.xlsx')
df_pat["nodes"] = df_pat["Positive nodes"]
df_pat["age"] = df_pat["Age at diagnosis"]
df_pat["size"] = df_pat["Size (mm)"]
df_pat = df_pat.replace(-0.99, np.NaN)
df_pat = df_pat.replace("N/D", np.NaN)
df_pat = df_pat.replace("x", np.NaN)
#df_pat = df_pat[~df_pat["TILs helsnitt"].isna()]
df_pat["treated"] = df_pat["Chemo (schema)"].apply(lambda x: x != "None")
df_pat["relapse"] = df_pat["Relapse yes 1 no 0"].astype(np.bool)
df_pat["dead"] = df_pat["Dead yes 1 no 0"].astype(np.bool)
df_pat["distant_relapse"] = (df_pat["Months_surgery_distant_relapse"] > 0)
df_pat["OS"] = ~df_pat["dead"] # Overall Survival
df_pat["IDFS"] = ~df_pat["relapse"] # Invasive Disease Free Survival (not relapse)
df_pat["DRFI"] = ~df_pat["distant_relapse"] # Distant Relapse Free Survival (not distant relapse)
df_pat = df_pat[df_pat["treated"]]
df_pat["IDFS"].value_counts()
def pat_id_wsi(image_id):
try:
return int(re.findall(r"\d+", image_id)[0])
except:
return np.NaN
def pat_id_tma(image_name):
block, nr, letter = re.findall(r"Block_(\d).*_(.*)_([A-Z])_", image_name)[0]
block_start = [1, 59, 113, 172, 210]
start = block_start[int(block)-1]
letter = letter.lower()
key = np.array([i for i in range(start, start + int(10*12/2)) for n in range(2)]).reshape((10,12))
pat_id = key[int(nr)-1][11 - (ord(letter) - 97)]
return pat_id
def _tma_id(patient_id):
block_start = [1, 59, 113, 172, 210]
start = [s for s in block_start if patient_id >= s][-1]
block = block_start.index(start) + 1
key = np.array([i for i in range(start, start + int(10*12/2)) for n in range(2)]).reshape((10,12))
Y, X = np.where(key == patient_id)
letters = [chr(11 - (x - 97)).upper() for x in X]
numbers = list(Y + 1)
return block, letters, numbers
with pd.option_context('display.max_rows', None, 'display.max_columns', None): # more options can be specified also
pass
#print(df_wsi.sort_values("TMAid"))
#print(df_wsi["TMAid"].value_counts())
from joblib import Memory
memory = Memory('./cache/')
def extract_features(path):
df = pd.read_feather(path)
return df
def merge_patient_data_wsi(df_wsi, df_pat):
df_pat["STR"] = df_pat["TILs helsnitt"]
df_wsi["TMAid"] = df_wsi["image_id"].apply(pat_id_wsi)
df_mean = df_wsi.groupby("TMAid").mean().reset_index()
df_all = pd.merge(df_pat, df_mean, how='left', on=["TMAid"])
return df_all.sort_values("TMAid")
def merge_patient_data_tma(df_tma, df_pat):
df_pat["STR"] = df_pat["TILs TMA"]
df_tma["TMAid"] = df_tma["image_id"].apply(pat_id_tma)
df_mean = df_tma.groupby("TMAid").mean().reset_index()
df_all = pd.merge(df_pat, df_mean, how='left', on=["TMAid"])
return df_all.sort_values("TMAid")
def tma_df():
df_tma = pd.read_feather('./tma_quip2_0.2_5_1.0.feather')
df_pat["STR"] = df_pat["TILs TMA"]
df_tma["TMAid"] = df_tma["image_id"].apply(pat_id_tma)
df_mean = df_tma.groupby("TMAid").mean().reset_index()
df_all = pd.merge(df_pat, df_mean, how='left', on=["TMAid"])
df_all = merge_patient_data_tma(df_tma, df_pat)
return df_all
def wsi_df():
df_wsi = pd.read_feather('./wsi_quip2_0.2_5_1.0_100.feather')
df_pat["STR"] = df_pat["TILs helsnitt"]
df_wsi["TMAid"] = df_wsi["image_id"].apply(pat_id_wsi)
df_mean = df_wsi.groupby("TMAid").mean().reset_index()
df_all = pd.merge(df_pat, df_mean, how='left', on=["TMAid"])
df_all = merge_patient_data_wsi(df_wsi, df_pat)
return df_all
# Agrees with Felicias report
df = tma_df()
print(df["OS"].value_counts())
print(df["IDFS"].value_counts())
print(df["DRFI"].value_counts())
print()
df = wsi_df()
print(df["OS"].value_counts())
print(df["IDFS"].value_counts())
print(df["DRFI"].value_counts())
len(df[["IDFS", "n_immune"]]["n_immune"].dropna())
import statsmodels.api as sm
from numpy import mean
from numpy import std
from sklearn.model_selection import StratifiedKFold
from sklearn import preprocessing
from numpy.linalg import LinAlgError
from statsmodels.tools.sm_exceptions import PerfectSeparationError
from sklearn.neural_network import MLPClassifier
def _results_to_pandas(summary):
return pd.read_html(summary.tables[1].as_html(), header=0, index_col=0)[0]
def logit(x_train, y_train, x_val, y_val):
try:
model = sm.Logit(y_train, x_train).fit(disp=False)
return model.predict(x_val), model
except (LinAlgError, PerfectSeparationError):
return np.random.rand(*y_val.shape), None
def cross_validation(y, X, model = logit):
if len(y.shape) > 1:
y = y.iloc[:,0]
X = (X-X.mean())/X.std()
X = pd.DataFrame(X)
X["Intercept"] = 1.0
true, pred, = ([], [])
summaries = []
cv_outer = StratifiedKFold(n_splits=5, shuffle=True, random_state=1)
for train_val_idx, test_idx in cv_outer.split(X, y):
X_train_val, X_test = X.iloc[train_val_idx], X.iloc[test_idx]
y_train_val, y_test = y.iloc[train_val_idx], y.iloc[test_idx]
cv_inner = StratifiedKFold(n_splits=5, shuffle=True, random_state=1)
for train_idx, val_idx in cv_inner.split(X_train_val, y_train_val):
x_train, x_val = X_train_val.iloc[train_idx], X_train_val.iloc[val_idx]
y_train, y_val = y_train_val.iloc[train_idx], y_train_val.iloc[val_idx]
#x_train, y_train = pipeline.fit_resample(x_train, y_train)
y_pred, m = model(x_train, y_train, x_val, y_val)
true.extend(list(y_val))
pred.extend(list(y_pred))
if m:
summaries.append(_results_to_pandas(m.summary()))
if summaries:
result = sum(summaries) / len(summaries)
else:
result = None
return true, pred, result
def cross_validation_test(y, X, model = logit):
if len(y.shape) > 1:
y = y.iloc[:,0]
X = (X-X.mean())/X.std()
X["Intercept"] = 1.0
true, pred, = ([], [])
summaries = []
cv_outer = StratifiedKFold(n_splits=5, shuffle=True, random_state=1)
for train_val_idx, test_idx in cv_outer.split(X, y):
X_train_val, X_test = X.iloc[train_val_idx], X.iloc[test_idx]
y_train_val, y_test = y.iloc[train_val_idx], y.iloc[test_idx]
y_pred, m = model(X_train_val, y_train_val, X_test, y_test)
true.extend(list(y_test))
pred.extend(list(y_pred))
if m:
summaries.append(_results_to_pandas(m.summary()))
if summaries:
result = sum(summaries) / len(summaries)
else:
result = None
return true, pred, result
from sklearn.metrics import roc_curve, roc_auc_score
def plot_roc(df, endpoint, feature, label, ax):
df = df[[endpoint, feature]].dropna()
n = len(df)
true, pred, summary = cross_validation(df[endpoint], df[feature])
auc = round(roc_auc_score(true, pred), 2)
fpr, tpr, _ = roc_curve(true, pred)
ax.plot(fpr, tpr, label=f"{label} (AUC={auc})", linewidth=4)
ax.legend(loc="lower right")
wsi = wsi_df()
tma = tma_df()
fig, axs = plt.subplots(nrows=2, ncols=3, figsize=(12,8), sharex=True, sharey=True)
plt.tight_layout()
pad = 5
ax = axs[0,0]
ax.set_title("OS", size="large")
ax.annotate("WSI", xy=(0, 0.5), xytext=(-ax.yaxis.labelpad - pad, 0),
xycoords=ax.yaxis.label, textcoords='offset points',
size='large', ha='right', va='center')
plot_roc(wsi, "OS", "n_immune", "AI-counted TILs", ax)
plot_roc(wsi, "OS", "STR", "estimated TILS", ax)
ax = axs[1,0]
ax.annotate("TMA", xy=(0, 0.5), xytext=(-ax.yaxis.labelpad - pad, 0),
xycoords=ax.yaxis.label, textcoords='offset points',
size='large', ha='right', va='center')
plot_roc(tma, "OS", "n_immune", "AI-counted TILs", ax)
plot_roc(tma, "OS", "STR", "estimated TILS", ax)
ax = axs[0,1]
ax.set_title("DRFI", size="large")
plot_roc(wsi, "DRFI", "n_immune", "AI-counted TILs", ax)
plot_roc(wsi, "DRFI", "STR", "estimated TILS", ax)
ax = axs[1,1]
plot_roc(tma, "DRFI", "n_immune", "AI-counted TILs", ax)
plot_roc(tma, "DRFI", "STR", "estimated TILS", ax)
ax = axs[0,2]
ax.set_title("IDFS", size="large")
plot_roc(wsi, "IDFS", "n_immune", "AI-counted TILs", ax)
plot_roc(wsi, "IDFS", "STR", "estimated TILS", ax)
ax = axs[1,2]
plot_roc(tma, "IDFS", "n_immune", "AI-counted TILs", ax)
plot_roc(tma, "IDFS", "STR", "estimated TILS", ax)
from itertools import combinations, chain, product, permutations
from tqdm import tqdm
from patsy import dmatrices, dmatrix
from scipy.stats import pearsonr
group = ["distant_relapse"]
image_features = set([
"STR",
"n_immune",
"n_tumor",
"tumor_area",
"immune_area",
'tumor_k_100',
'immune_k_100',
"t_tils_100",
"s_tils_100",
])
pat_features = set(['age', 'nodes', "size"])
all_features = image_features.union(pat_features)
def label(feature):
r = re.findall("\d{1,3}", feature)
if feature == "n_immune":
return "$N_i$"
elif feature == "n_tumor":
return "$N_t$"
elif feature == "immune_area":
return "$A_i$"
elif feature == "tumor_area":
return "$A_t$"
elif feature.startswith("s_tils"):
return "$N_{is}(" + r[0] + ")$"
elif feature.startswith("t_tils"):
return "$N_{it}(" + r[0] + ")$"
elif feature.startswith("immune_k"):
return "$K_{i}(" + r[0] + ")$"
elif feature.startswith("tumor_k"):
return "$K_{t}(" + r[0] + ")$"
elif feature == "nodes":
return "$N_n$"
elif feature == "STR":
return feature
else:
return feature.title()
pd.options.mode.chained_assignment = None
def try_interactions(data, features, target, n_features = [1], test=False):
d = []
for f in tqdm(list(chain(*[combinations(features, i) for i in n_features]))):
f = list(f)
nona = data[f + [target]].dropna()
y = nona[target]
X = nona[f]
if test:
true, pred, results = cross_validation_test(y, X, logit)
else:
true, pred, results = cross_validation(y, X, logit)
auc = roc_auc_score(true, pred)
fpr, tpr, thresholds = roc_curve(true, pred)
d.append({
"formula" : f,
"AUC" : auc,
"tpr" : tpr,
"fpr" : fpr,
"thresh" : thresholds,
"model" : results,
"target" : target
})
return pd.DataFrame(d).sort_values("AUC", ascending=False).reset_index()
def best_features(df,features, target, n=10):
result = []
for f in features:
y, X = dmatrices(f"{target} ~ {f}", df, NA_action='drop', return_type='dataframe')
true, pred, _ = cross_validation(y, X, logit)
auc = roc_auc_score(true, pred)
result.append((f, auc))
return [f[0] for f in sorted(result, key = lambda x: x[1], reverse=True)[:n]]
def filter_correlated(df, corr_limit = 0.9):
corr_matrix = df.corr().abs()
upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(np.bool))
to_drop = [column for column in upper.columns if any(upper[column] > corr_limit)]
return df.drop(to_drop, axis=1)
def high_correlation(df, corr_limit):
return [(a,b) for a, b in combinations(df.columns, 2)
if df[[a,b]].corr().iloc[0,1] > corr_limit]
def worse_predictor(df, feature_pairs, target):
f = []
for a, b in feature_pairs:
f.append(min(a,b, key = lambda x: auc_formula(df, f"{target} ~ {x}")))
return set(f)
def auc_formula(data, formula, test=False):
y, X = dmatrices(formula, data, NA_action='drop', return_type='dataframe')
if test:
true, pred, m = cross_validation_test(y, X, logit)
else:
true, pred, m = cross_validation(y, X, logit)
auc = roc_auc_score(true, pred)
fpr, tpr, _ = roc_curve(true, pred)
return fpr, tpr, auc, m
# All single image features
test = True
n_features = [1]
feat = image_features
#df_treated = df_all[df_all["treated"]==True]
distant_img = try_interactions(df_treated, feat, "distant_relapse", n_features, test=test)
local_img = try_interactions(df_treated, feat, "local_relapse", n_features, test=test)
dead_img = try_interactions(df_treated, feat, "dead", n_features, test=test)
def format_df(df):
df["label"] = df["formula"].apply(lambda x: label(x[0]))
df["coef"] = df["model"].apply(lambda x: x.iloc[0]["coef"])
df = df.round(2)
return df[["label", "AUC", "coef"]].add_prefix(df["target"].iloc[0]+"_")
latex = pd.concat([format_df(distant_img),
format_df(local_img),
format_df(dead_img)], axis=1).to_latex(index=False, escape=False)
print(IMAGE_TYPE)
print(latex)
sns.scatterplot(data=df_all, x="immune_k_100", y="n_immune")
plt.xscale('log')
plt.yscale('log')
# Best features according to validation score
test = False
features = all_features
n_features = range(1,len(all_features)+1)
df_treated = df_all[df_all["treated"]]
distant_treated = try_interactions(df_treated, features, "distant_relapse", n_features, test=test)
local_treated = try_interactions(df_treated,features, "local_relapse", n_features, test=test)
dead_treated = try_interactions(df_treated, features, "dead", n_features, test=test)
pd.concat([distant_treated,
local_treated,
dead_treated]).to_pickle(f"combinations_{IMAGE_TYPE}.pickle")
# All single patient data predictors
test = True
n_features = [1]
df_treated_pat = df_pat[df_pat["treated"]]
distant_pat = try_interactions(df_treated_pat, pat_features, "distant_relapse", n_features, test=test)
local_pat = try_interactions(df_treated_pat, pat_features, "local_relapse", n_features, test=test)
dead_pat = try_interactions(df_treated_pat, pat_features, "dead", n_features, test=test)
latex = pd.concat([format_df(distant_pat),
format_df(local_pat),
format_df(dead_pat)], axis=1).to_latex(index=False, escape=False)
print(latex)
def plot_roc(df, df_treated, img_type):
r = lambda x: round(x, 2)
selected_all = df["formula"].iloc[0]
selected_model = df[df["formula"].apply(lambda x: "STR" not in x and not set(x).intersection(pat_features))]["formula"].iloc[0]
selected_pat = df[df["formula"].apply(lambda x: not set(x).intersection(image_features))]["formula"].iloc[0]
formula_all = f"{df['target'].iloc[0]} ~ -1 + {'+'.join(selected_all)}"
formula_model = f"{df['target'].iloc[0]} ~ -1 + {'+'.join(selected_model)}"
formula_pat = f"{df['target'].iloc[0]} ~ -1 + {'+'.join(selected_pat)}"
formula_str = f"{df['target'].iloc[0]} ~ -1 + STR"
_, _, auc_val_all, _ = auc_formula(df_treated, formula_all, test=False)
_, _, auc_val_model, _ = auc_formula(df_treated, formula_model, test=False)
_, _, auc_val_pat, _ = auc_formula(df_treated, formula_pat, test=False)
_, _, auc_val_str, _ = auc_formula(df_treated, formula_str, test=False)
print("all", [label(f) for f in selected_all], round(auc_val_all, 2))
print("model", [label(f) for f in selected_model], round(auc_val_model,2 ))
print("pat", [label(f) for f in selected_pat], round(auc_val_pat, 2))
print("str", ["STR"], formula_str, round(auc_val_str, 2))
fpr_all, tpr_all, auc_all, res = auc_formula(df_treated, formula_all, test=True)
fpr_model, tpr_model, auc_model, res = auc_formula(df_treated, formula_model, test=True)
fpr_pat, tpr_pat, auc_pat, res = auc_formula(df_treated, formula_pat, test=True)
fpr_str, tpr_str, auc_str, res = auc_formula(df_treated, formula_str, test=True)
plt.plot(fpr_model, tpr_model, label=f"Computed metrics ({img_type}), AUC: {r(auc_model)}", linewidth=4)
plt.plot(fpr_str, tpr_str, label=f"Estimated stromal TILs ({img_type}), AUC: {r(auc_str)}", linewidth=4)
plt.plot(fpr_pat, tpr_pat, label=f"Patient data, AUC: {r(auc_pat)}", linewidth=4)
plt.plot(fpr_all, tpr_all, label=f"All features, AUC: {r(auc_all)}", linewidth=5, linestyle=':')
plt.ylabel("True positive rate", fontsize=15)
plt.xlabel("False positive rate", fontsize=15)
plt.legend(fontsize=14, title = "Feature set", loc = "lower right")
fig_options = {
'bbox_inches' : 'tight'
}
IMAGE_TYPE = "WSI"
print(IMAGE_TYPE)
font_size_title = 16
df = pd.read_pickle(f"./combinations_{IMAGE_TYPE}.pickle")
df_treated = pd.read_pickle(f"./df_treated_{IMAGE_TYPE}.pickle")
plt.figure(figsize=(8,8))
plt.title(f"ROC Distant relapse from {IMAGE_TYPE}", fontsize=font_size_title)
target = "distant_relapse"
print(target)
plot_roc(df[df["target"] == target], df_treated, IMAGE_TYPE)
plt.savefig(f"../docs/roc_{IMAGE_TYPE}_{target}.svg", **fig_options)
plt.figure(figsize=(8,8))
plt.title(f"ROC Local relapse from {IMAGE_TYPE}", fontsize=font_size_title)
target = "local_relapse"
print(target)
plot_roc(df[df["target"] == target], df_treated, IMAGE_TYPE)
plt.savefig(f"../docs/roc_{IMAGE_TYPE}_{target}.svg", **fig_options)
plt.figure(figsize=(8,8))
plt.title(f"ROC Mortality from {IMAGE_TYPE}", fontsize=font_size_title)
target = "dead"
print(target)
plot_roc(df[df["target"] == target], df_treated, IMAGE_TYPE)
plt.savefig(f"../docs/roc_{IMAGE_TYPE}_{target}.svg", **fig_options)
IMAGE_TYPE = "TMA"
print("\n" + IMAGE_TYPE)
font_size_title = 16
print(target)
df = pd.read_pickle(f"./combinations_{IMAGE_TYPE}.pickle")
df_treated = pd.read_pickle(f"./df_treated_{IMAGE_TYPE}.pickle")
plt.figure(figsize=(8,8))
plt.title(f"ROC Distant relapse from {IMAGE_TYPE}", fontsize=font_size_title)
target = "distant_relapse"
print(target)
plot_roc(df[df["target"] == target], df_treated, IMAGE_TYPE)
plt.savefig(f"../docs/roc_{IMAGE_TYPE}_{target}.svg", **fig_options)
plt.figure(figsize=(8,8))
plt.title(f"ROC Local relapse from {IMAGE_TYPE}", fontsize=font_size_title)
target = "local_relapse"
print(target)
plot_roc(df[df["target"] == target], df_treated, IMAGE_TYPE)
plt.savefig(f"../docs/roc_{IMAGE_TYPE}_{target}.svg", **fig_options)
plt.figure(figsize=(8,8))
plt.title(f"ROC Mortality from {IMAGE_TYPE}", fontsize=font_size_title)
target = "dead"
plot_roc(df[df["target"] == target], df_treated, IMAGE_TYPE)
plt.savefig(f"../docs/roc_{IMAGE_TYPE}_{target}.svg", **fig_options)
formula_distant = f"distant_relapse ~ -1 + {distant_treated['formula'].iloc[0]}"
formula_local = f"local_relapse ~ -1 + {local_treated['formula'].iloc[0]}"
formula_dead = f"dead ~ -1 + {dead_treated['formula'].iloc[0]}"
print(formula_distant)
print(formula_local)
print(formula_dead)
def plot_roc(fpr, tpr, **kwargs):
sns.lineplot(x=fpr, y=tpr, linewidth=4, estimator=None, **kwargs)
plt.xlabel("False positive rate")
def legend(df):
terms = df["formula"].iloc[0].split('+')
plt.legend(loc='lower right', title = f"{'+'.join([label(term) for term in terms])}")
plt.figure(figsize=(15, 5))
plt.suptitle("Best predictors using WSIs and patient data", fontsize=16, y=1)
plt.tight_layout()
matplotlib.rcParams['font.size'] = 20
plt.subplot(131)
fpr, tpr, auc, res = auc_formula(df_all[df_all["treated"]], formula_distant, test=True)
plot_roc(fpr, tpr, label="Test AUC: " + str(round(auc,2)))
fpr, tpr, auc, res = auc_formula(df_all[df_all["treated"]], formula_distant, test=False)
plot_roc(fpr, tpr, label="Validation AUC: " + str(round(auc,2)))
legend(distant_treated)
plt.title("Distant relapse", fontsize=15)
plt.ylabel("True positive rate")
print("\tDISTANT RELAPSE:")
print(res)
plt.subplot(132)
fpr, tpr, auc, res = auc_formula(df_all[df_all["treated"]], formula_local, test=True)
plot_roc(fpr, tpr, label="Test AUC: " + str(round(auc,2)))
fpr, tpr, auc, res = auc_formula(df_all[df_all["treated"]], formula_local, test=False)
plot_roc(fpr, tpr, label="Validation AUC: " + str(round(auc,2)))
legend(local_treated)
plt.title("Local relapse", fontsize=15)
print("\tLOCAL RELAPSE:")
print(res)
plt.subplot(133)
fpr, tpr, auc, res = auc_formula(df_all[df_all["treated"]], formula_dead, test=True)
plot_roc(fpr, tpr, label="Test AUC: " + str(round(auc,2)))
fpr, tpr, auc, res = auc_formula(df_all[df_all["treated"]], formula_dead, test=False)
plot_roc(fpr, tpr, label="Validation AUC: " + str(round(auc,2)))
legend(dead_treated)
plt.title("Fatality", fontsize=15)
print("\tOVER ALL SURVIVAL:")
print(res)
plt.savefig("../docs/roc_best_predictors_wsi.svg", bbox_inches='tight')
# Without intercept
# With intercept
plt.figure(figsize=(8,8))
plt.title("Local relapse, 800 tiles")
for n, row in df_distant.iloc[0:20].iterrows():
plt.plot(*row["roc"], label=f"{round(row['AUC'],3)} {row['formula']}")
plt.legend()
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
def test_samples(df_wsi, formula):
d = []
n_max = df_wsi["image_id"].value_counts().min()
for n in tqdm(np.logspace(0, np.log10(n_max), num=1000)[::-1]):
for i in range(1):
df_wsi_sample = df_wsi.groupby("image_id").sample(int(n), replace=False)
df = merge_patient_data_wsi(df_wsi_sample, df_pat[df_pat["treated"]])
y, X = dmatrices(formula, df, NA_action = "drop", return_type="dataframe")
true, pred, _ = cross_validation(y, X, logit)
auc = roc_auc_score(true, pred)
tpr, fpr, thresholds = roc_curve(true, pred)
d.append({
"Number of samples" : n,
"formula" : formula,
"AUC" : auc,
"roc" : (tpr, fpr),
"thresh" : thresholds,
})
return pd.DataFrame(d)
#results_distant = test_samples(df_wsi, df_distant["formula"].iloc[0])
results_distant = test_samples(df_wsi, "distant_relapse ~ n_immune")
results_local = test_samples(df_wsi, "local_relapse ~ n_immune + s_tils_100 + t_tils_100")
results_dead = test_samples(df_wsi, "dead ~ n_immune + t_tils_100 + n_tumor")
results_distant["Outcome"] = "Distant relapse"
results_local["Outcome"] = "Local relapse"
results_dead["Outcome"] = "Fatality"
df = pd.concat([results_distant, results_local, results_dead])
plt.figure(figsize=(8,8))
sns.scatterplot(data=df, x_jitter=True,y="AUC", x="Number of samples", hue="Outcome", style="Outcome", s=40)
plt.xscale('log')
plt.xlabel("Number of WSI-samples")
plt.title("AUC vs. WSI-sample size", fontsize=16)
plt.savefig("../docs/auc_sample_size.svg", **fig_options)
plt.xlim((1,600))
results_local = test_samples(df_wsi, df_local["formula"].iloc[0])
plt.figure()
sns.boxplot(data=results_distant, y="AUC", x="n_samples")
formula = "distant_relapse ~ n_tumor*(tumor_tils1+tumor_cluster)"
print(results_distant["formula"].iloc[0])
df_wsi_sample = df_wsi.groupby("image_id").sample(400, replace=False)
df = merge_patient_data(df_wsi_sample, df_pat[df_pat["treated"] == 1])
df = df[features + ["distant_relapse"]].replace([np.inf, -np.inf], np.nan).dropna()
y, X = dmatrices(formula, df, NA_action="drop")
true, pred = cross_validation(y, X, logit)
auc = roc_auc_score(true, pred)
tpr, fpr, thresholds = roc_curve(true, pred)
print(auc)
df_all["nodes"].isna().value_counts()
import os
from shutil import copyfile
path = "../data/tnbc_wsi/images/"
for image_name in os.listdir(path):
pat_id = pat_id_wsi(image_name)
if pat_id in df_pat.index and df_pat.loc[pat_id]["treated"]:
copyfile(os.path.join(path, image_name), "../data/tnbc_wsi/treated/" + image_name)
df_pat["treated"].value_counts()
###Output
_____no_output_____ |
notebooks/02-tuning/01-tune-with-ice.ipynb | ###Markdown
Import data
###Code
X = pd.read_csv(processed_root('cervical_cancer_risks/X.csv'))
y = pd.read_csv(processed_root('cervical_cancer_risks/y.csv'))['Biopsy']
###Output
_____no_output_____
###Markdown
Train-validation split
###Code
X_train, X_val, y_train, y_val = train_test_split(X, y,
train_size = 0.9, test_size = 0.1,
random_state = 42)
###Output
_____no_output_____
###Markdown
Functions
###Code
def brier_score(y_pred, y_true):
return np.mean(y_pred - y_val)**2
###Output
_____no_output_____
###Markdown
Base model
###Code
rf = RandomForestClassifier(n_estimators = 500)
rf.fit(X_train, y_train)
y_pred = rf.predict(X_val)
print(f"Brier score: {brier_score(y_pred, y_val):.5f}")
np.mean(y_pred==y_val)
###Output
_____no_output_____
###Markdown
Tune model
###Code
ice = ICE("binary", time = False)
feature = "Age"
max_depths = [i for i in range(1,50,1)]
val_loss = []
val_accuracy = []
rf_fi = []
ice_fi = []
ice_fi_normalized = []
for md in max_depths:
rf = RandomForestClassifier(max_depth = md, n_estimators = 500)
rf.fit(X_train, y_train)
# val loss
y_pred = rf.predict_proba(X_val)[:,1]
val_loss.append(brier_score(y_pred, y_val))
# val accuracy
y_pred = rf.predict(X_val)
val_accuracy.append(np.mean(y_pred == y_val))
# rf feature importance
rf_fi.append(rf.feature_importances_[X_train.columns == feature].item())
# ice feature impact
ice.fit_single_feature(X, rf, "Age")
fis = ice.get_feature_impact("Age")
ice_fi.append(fis['fi'])
ice_fi_normalized.append(fis['fi_normalized'])
tune_results = pd.DataFrame({'max_depth':max_depths,
'brier':val_loss,
'accuracy':val_accuracy,
'rf_fi':rf_fi,
'ice_fi':ice_fi,
'ice_fi_normalized':ice_fi_normalized})
fig, ax = plt.subplots()
ax.plot('max_depth', 'rf_fi', data = tune_results, label = 'Random Forest FI')
ax.plot('max_depth', 'ice_fi_normalized', data = tune_results, label = 'ICE FI')
ax.legend()
###Output
_____no_output_____ |
save_and_load.ipynb | ###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###Output
_____no_output_____
###Markdown
Save and load models View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook Model progress can be saved during—and after—training. This means a model can resume where it left off and avoid long training times. Saving also means you can share your model and others can recreate your work. When publishing research models and techniques, most machine learning practitioners share:* code to create the model, and* the trained weights, or parameters, for the modelSharing this data helps others understand how the model works and try it themselves with new data.Caution: Be careful with untrusted code—TensorFlow models are code. See [Using TensorFlow Securely](https://github.com/tensorflow/tensorflow/blob/master/SECURITY.md) for details. OptionsThere are different ways to save TensorFlow models—depending on the API you're using. This guide uses [tf.keras](https://www.tensorflow.org/guide/keras), a high-level API to build and train models in TensorFlow. For other approaches, see the TensorFlow [Save and Restore](https://www.tensorflow.org/guide/saved_model) guide or [Saving in eager](https://www.tensorflow.org/guide/eagerobject-based_saving). Setup Installs and imports Install and import TensorFlow and dependencies:
###Code
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
!pip install pyyaml h5py # Required to save models in HDF5 format
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import tensorflow as tf
from tensorflow import keras
print(tf.version.VERSION)
###Output
_____no_output_____
###Markdown
Get an example datasetTo demonstrate how to save and load weights, you'll use the [MNIST dataset](http://yann.lecun.com/exdb/mnist/). To speed up these runs, use the first 1000 examples:
###Code
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data()
train_labels = train_labels[:1000]
test_labels = test_labels[:1000]
train_images = train_images[:1000].reshape(-1, 28 * 28) / 255.0
test_images = test_images[:1000].reshape(-1, 28 * 28) / 255.0
###Output
_____no_output_____
###Markdown
Define a model Start by building a simple sequential model:
###Code
# Define a simple sequential model
def create_model():
model = tf.keras.models.Sequential([
keras.layers.Dense(512, activation='relu', input_shape=(784,)),
keras.layers.Dropout(0.2),
keras.layers.Dense(10)
])
model.compile(optimizer='adam',
loss=tf.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
return model
# Create a basic model instance
model = create_model()
# Display the model's architecture
model.summary()
###Output
_____no_output_____
###Markdown
Save checkpoints during training You can use a trained model without having to retrain it, or pick-up training where you left off—in case the training process was interrupted. The `tf.keras.callbacks.ModelCheckpoint` callback allows to continually save the model both *during* and at *the end* of training. Checkpoint callback usageCreate a `tf.keras.callbacks.ModelCheckpoint` callback that saves weights only during training:
###Code
checkpoint_path = "training_1/cp.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
# Create a callback that saves the model's weights
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
save_weights_only=True,
verbose=1)
# Train the model with the new callback
model.fit(train_images,
train_labels,
epochs=10,
validation_data=(test_images,test_labels),
callbacks=[cp_callback]) # Pass callback to training
# This may generate warnings related to saving the state of the optimizer.
# These warnings (and similar warnings throughout this notebook)
# are in place to discourage outdated usage, and can be ignored.
###Output
_____no_output_____
###Markdown
This creates a single collection of TensorFlow checkpoint files that are updated at the end of each epoch:
###Code
!ls {checkpoint_dir}
###Output
_____no_output_____
###Markdown
Create a new, untrained model. When restoring a model from weights-only, you must have a model with the same architecture as the original model. Since it's the same model architecture, you can share weights despite that it's a different *instance* of the model.Now rebuild a fresh, untrained model, and evaluate it on the test set. An untrained model will perform at chance levels (~10% accuracy):
###Code
# Create a basic model instance
model = create_model()
# Evaluate the model
loss, acc = model.evaluate(test_images, test_labels, verbose=2)
print("Untrained model, accuracy: {:5.2f}%".format(100*acc))
###Output
_____no_output_____
###Markdown
Then load the weights from the checkpoint and re-evaluate:
###Code
# Loads the weights
model.load_weights(checkpoint_path)
# Re-evaluate the model
loss,acc = model.evaluate(test_images, test_labels, verbose=2)
print("Restored model, accuracy: {:5.2f}%".format(100*acc))
###Output
_____no_output_____
###Markdown
Checkpoint callback optionsThe callback provides several options to provide unique names for checkpoints and adjust the checkpointing frequency.Train a new model, and save uniquely named checkpoints once every five epochs:
###Code
# Include the epoch in the file name (uses `str.format`)
checkpoint_path = "training_2/cp-{epoch:04d}.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
# Create a callback that saves the model's weights every 5 epochs
cp_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_path,
verbose=1,
save_weights_only=True,
period=5)
# Create a new model instance
model = create_model()
# Save the weights using the `checkpoint_path` format
model.save_weights(checkpoint_path.format(epoch=0))
# Train the model with the new callback
model.fit(train_images,
train_labels,
epochs=50,
callbacks=[cp_callback],
validation_data=(test_images,test_labels),
verbose=0)
###Output
_____no_output_____
###Markdown
Now, look at the resulting checkpoints and choose the latest one:
###Code
!ls {checkpoint_dir}
latest = tf.train.latest_checkpoint(checkpoint_dir)
latest
from google.colab import drive
drive.mount('/content/drive')
###Output
_____no_output_____
###Markdown
Note: the default tensorflow format only saves the 5 most recent checkpoints.To test, reset the model and load the latest checkpoint:
###Code
# Create a new model instance
model = create_model()
# Load the previously saved weights
model.load_weights(latest)
# Re-evaluate the model
loss, acc = model.evaluate(test_images, test_labels, verbose=2)
print("Restored model, accuracy: {:5.2f}%".format(100*acc))
###Output
_____no_output_____
###Markdown
What are these files? The above code stores the weights to a collection of [checkpoint](https://www.tensorflow.org/guide/saved_modelsave_and_restore_variables)-formatted files that contain only the trained weights in a binary format. Checkpoints contain:* One or more shards that contain your model's weights.* An index file that indicates which weights are stored in a which shard.If you are only training a model on a single machine, you'll have one shard with the suffix: `.data-00000-of-00001` Manually save weightsYou saw how to load the weights into a model. Manually saving them is just as simple with the `Model.save_weights` method. By default, `tf.keras`—and `save_weights` in particular—uses the TensorFlow [checkpoint](../../guide/checkpoint.ipynb) format with a `.ckpt` extension (saving in [HDF5](https://js.tensorflow.org/tutorials/import-keras.html) with a `.h5` extension is covered in the [Save and serialize models](../../guide/keras/save_and_serializeweights-only_saving_in_savedmodel_format) guide):
###Code
# Save the weights
model.save_weights('./checkpoints/my_checkpoint')
# Create a new model instance
model = create_model()
# Restore the weights
model.load_weights('./checkpoints/my_checkpoint')
# Evaluate the model
loss,acc = model.evaluate(test_images, test_labels, verbose=2)
print("Restored model, accuracy: {:5.2f}%".format(100*acc))
###Output
_____no_output_____
###Markdown
Save the entire modelCall [`model.save`](https://www.tensorflow.org/api_docs/python/tf/keras/Modelsave) to save the a model's architecture, weights, and training configuration in a single file/folder. This allows you to export a model so it can be used without access to the original Python code*. Since the optimizer-state is recovered, you can resume training from exactly where you left off.Saving a fully-functional model is very useful—you can load them in TensorFlow.js ([HDF5](https://js.tensorflow.org/tutorials/import-keras.html), [Saved Model](https://js.tensorflow.org/tutorials/import-saved-model.html)) and then train and run them in web browsers, or convert them to run on mobile devices using TensorFlow Lite ([HDF5](https://www.tensorflow.org/lite/convert/python_apiexporting_a_tfkeras_file_), [Saved Model](https://www.tensorflow.org/lite/convert/python_apiexporting_a_savedmodel_))\*Custom objects (e.g. subclassed models or layers) require special attention when saving and loading. See the **Saving custom objects** section below HDF5 formatKeras provides a basic save format using the [HDF5](https://en.wikipedia.org/wiki/Hierarchical_Data_Format) standard.
###Code
# Create and train a new model instance.
model = create_model()
model.fit(train_images, train_labels, epochs=5)
# Save the entire model to a HDF5 file.
# The '.h5' extension indicates that the model should be saved to HDF5.
model.save('my_model.h5')
###Output
_____no_output_____
###Markdown
Now, recreate the model from that file:
###Code
# Recreate the exact same model, including its weights and the optimizer
new_model = tf.keras.models.load_model('my_model.h5')
# Show the model architecture
new_model.summary()
###Output
_____no_output_____
###Markdown
Check its accuracy:
###Code
loss, acc = new_model.evaluate(test_images, test_labels, verbose=2)
print('Restored model, accuracy: {:5.2f}%'.format(100*acc))
###Output
_____no_output_____
###Markdown
This technique saves everything:* The weight values* The model's configuration(architecture)* The optimizer configurationKeras saves models by inspecting the architecture. Currently, it is not able to save TensorFlow optimizers (from `tf.train`). When using those you will need to re-compile the model after loading, and you will lose the state of the optimizer. SavedModel format The SavedModel format is another way to serialize models. Models saved in this format can be restored using `tf.keras.models.load_model` and are compatible with TensorFlow Serving. The [SavedModel guide](https://www.tensorflow.org/guide/saved_model) goes into detail about how to serve/inspect the SavedModel. The section below illustrates the steps to saving and restoring the model.
###Code
# Create and train a new model instance.
model = create_model()
model.fit(train_images, train_labels, epochs=5)
# Save the entire model as a SavedModel.
!mkdir -p saved_model
model.save('saved_model/my_model')
###Output
_____no_output_____
###Markdown
The SavedModel format is a directory containing a protobuf binary and a Tensorflow checkpoint. Inspect the saved model directory:
###Code
# my_model directory
!ls saved_model
# Contains an assets folder, saved_model.pb, and variables folder.
!ls saved_model/my_model
###Output
_____no_output_____
###Markdown
Reload a fresh Keras model from the saved model:
###Code
new_model = tf.keras.models.load_model('saved_model/my_model')
# Check its architecture
new_model.summary()
###Output
_____no_output_____
###Markdown
The restored model is compiled with the same arguments as the original model. Try running evaluate and predict with the loaded model:
###Code
# Evaluate the restored model
loss, acc = new_model.evaluate(test_images, test_labels, verbose=2)
print('Restored model, accuracy: {:5.2f}%'.format(100*acc))
print(new_model.predict(test_images).shape)
###Output
_____no_output_____ |
Jupyter Notebooks/Data Extraction - Preferred.ipynb | ###Markdown
New way of data extraction 3. Extracting the title, id, url and post content of submissions for each flair obtained above. Extracting 500 posts of each flair.
###Code
newDataset = []
for flair in possibleflairs:
for sub in india_subreddit.search(flair, limit=500):
newDataset.append([sub.title, sub.id, sub.url, sub.selftext, flair])
newDataset = pd.DataFrame(newDataset, columns=['Title', 'id', 'url', 'content', 'flair'])
newDataset
###Output
_____no_output_____
###Markdown
4. save the data in a csv
###Code
newDataset.to_csv('newdataset.csv', index=False)
#index=False will prevent the row numbers from being saved as an independent column/attribute in the saved csv file
###Output
_____no_output_____
###Markdown
The second method to extract more features (like comments)
###Code
newDataset = { 'Title' : [],
'ID' : [],
'Url' : [],
'Content' : [],
'Comments' : [],
'Flair' : []
}
for flair in possibleflairs:
subreddits = india_subreddit.search(flair, limit=10)
for sub in subreddits:
newDataset['Title'].append(sub.title)
newDataset['ID'].append(sub.id)
newDataset['Url'].append(sub.url)
newDataset['Content'].append(sub.selftext)
newDataset['Flair'].append(flair)
# newDataset.append([sub.title, sub.id, sub.url, sub.selftext, flair])
sub.comments.replace_more(limit=None)
comment = ''
for top_level_comment in sub.comments:
comment = comment + ' ' + top_level_comment.body
newDataset["Comments"].append(comment)
newDataset = pd.DataFrame(newDataset)
newDataset
newDataset.to_csv('morefeaturesDataset.csv', index=False)
###Output
_____no_output_____
###Markdown
Preferred form of data extraction, that extracts data based on post flairs and not the hottest posts (as done in Data Extraction file)
###Code
import pandas as pd
import praw #PRAW is the API being used to scrap data from Reddit
### Creating a reddit instance by authenticating ourselves
reddit = praw.Reddit(client_id='', client_secret='', user_agent='', password='', username='')
###Output
_____no_output_____
###Markdown
Note: The client_id, client_secret and password are anonymised to prevent privacy 1. get subreddit info of india subreddit
###Code
india_subreddit = reddit.subreddit('India')
###Output
_____no_output_____
###Markdown
2. Extracting the possible flairs
###Code
def get_possible_flairs(subreddit):
possibleflairs = []
for template in subreddit.flair.link_templates:
possibleflairs.append(template["text"])
return possibleflairs
possibleflairs = get_possible_flairs(india_subreddit)
possibleflairs
###Output
_____no_output_____ |
ASCAD_variable_key/ASCAD_variable_key.ipynb | ###Markdown
DataLoader
###Code
### handle the dataset
class TorchDataset(Dataset):
def __init__(self, trs_file, label_file, trace_num, trace_offset, trace_length):
self.trs_file = trs_file
self.label_file = label_file
self.trace_num = trace_num
self.trace_offset = trace_offset
self.trace_length = trace_length
self.ToTensor = transforms.ToTensor()
def __getitem__(self, i):
index = i % self.trace_num
trace = self.trs_file[index,:]
label = self.label_file[index]
trace = trace[self.trace_offset:self.trace_offset+self.trace_length]
trace = np.reshape(trace,(1,-1))
trace = self.ToTensor(trace)
trace = np.reshape(trace, (1,-1))
label = torch.tensor(label, dtype=torch.long)
return trace.float(), label
def __len__(self):
return self.trace_num
### data loader for training
def load_training(batch_size, kwargs):
data = TorchDataset(**kwargs)
train_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=True, drop_last=True, num_workers=1, pin_memory=True)
return train_loader
### data loader for testing
def load_testing(batch_size, kwargs):
data = TorchDataset(**kwargs)
test_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=False, drop_last=True, num_workers=1, pin_memory=True)
return test_loader
###Output
_____no_output_____
###Markdown
Arrays and Functions
###Code
Sbox = [99, 124, 119, 123, 242, 107, 111, 197, 48, 1, 103, 43, 254, 215, 171, 118, 202, 130, 201, 125, 250, 89, 71,
240, 173, 212, 162, 175, 156, 164, 114, 192, 183, 253, 147, 38, 54, 63, 247, 204, 52, 165, 229, 241, 113, 216,
49, 21, 4, 199, 35, 195, 24, 150, 5, 154, 7, 18, 128, 226, 235, 39, 178, 117, 9, 131, 44, 26, 27, 110, 90, 160,
82, 59, 214, 179, 41, 227, 47, 132, 83, 209, 0, 237, 32, 252, 177, 91, 106, 203, 190, 57, 74, 76, 88, 207, 208,
239, 170, 251, 67, 77, 51, 133, 69, 249, 2, 127, 80, 60, 159, 168, 81, 163, 64, 143, 146, 157, 56, 245, 188,
182, 218, 33, 16, 255, 243, 210, 205, 12, 19, 236, 95, 151, 68, 23, 196, 167, 126, 61, 100, 93, 25, 115, 96,
129, 79, 220, 34, 42, 144, 136, 70, 238, 184, 20, 222, 94, 11, 219, 224, 50, 58, 10, 73, 6, 36, 92, 194, 211,
172, 98, 145, 149, 228, 121, 231, 200, 55, 109, 141, 213, 78, 169, 108, 86, 244, 234, 101, 122, 174, 8, 186,
120, 37, 46, 28, 166, 180, 198, 232, 221, 116, 31, 75, 189, 139, 138, 112, 62, 181, 102, 72, 3, 246, 14, 97,
53, 87, 185, 134, 193, 29, 158, 225, 248, 152, 17, 105, 217, 142, 148, 155, 30, 135, 233, 206, 85, 40, 223, 140,
161, 137, 13, 191, 230, 66, 104, 65, 153, 45, 15, 176, 84, 187, 22]
HW_byte = [0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 1, 2, 2,
3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 1, 2, 2, 3, 2, 3,
3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3,
4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4,
3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5,
6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4,
4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5,
6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8]
### To train a network
def train(epoch, model, scheduler):
"""
- epoch : the current epoch
- model : the current model
- learning_rate : learning rate
"""
# enter training mode
model.train()
# Instantiate the Iterator
iter_ = iter(train_loader)
# get the number of batches
num_iter = len(train_loader)
clf_criterion = nn.CrossEntropyLoss()
# train on each batch of data
for i in range(1, num_iter+1):
data, label = iter_.next()
if cuda:
data, label = data.cuda(), label.cuda()
data, label = Variable(data), Variable(label)
optimizer.zero_grad()
prediction = model(data)
loss = clf_criterion(prediction, label)
preds = prediction.data.max(1, keepdim=True)[1]
correct_batch = preds.eq(label.data.view_as(preds)).sum()
# optimzie the cross-entropy loss
loss.backward()
optimizer.step()
scheduler.step()
if i % log_interval == 0:
print('Train Epoch {}: [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tAcc: {:.6f}%'.format(
epoch, i * len(data), len(train_loader) * batch_size,
100. * i / len(train_loader), loss.data, float(correct_batch) * 100. /batch_size))
### validation
def validation(model):
# enter evaluation mode
model.eval()
valid_loss = 0
# the number of correct prediction
correct_valid = 0
clf_criterion = nn.CrossEntropyLoss()
for data, label in valid_loader:
if cuda:
data, label = data.cuda(), label.cuda()
data, label = Variable(data), Variable(label)
valid_preds = model(data)
# sum up batch loss
valid_loss += clf_criterion(valid_preds, label)
# get the index of the max probability
pred = valid_preds.data.max(1)[1]
# get the number of correct prediction
correct_valid += pred.eq(label.data.view_as(pred)).cpu().sum()
valid_loss /= len(valid_loader)
valid_acc = 100. * correct_valid / len(valid_loader.dataset)
print('Validation: loss: {:.4f}, accuracy: {}/{} ({:.6f}%)'.format(
valid_loss.data, correct_valid, len(valid_loader.dataset),
valid_acc))
return valid_loss, valid_acc
### test/attack
def test(model, disp_GE=True, model_flag='pretrained'):
"""
- model : the current model
- disp_GE : whether to attack/calculate guessing entropy (GE)
- model_flag : a string for naming GE result
"""
# enter evaluation mode
model.eval()
test_loss = 0
# the number of correct prediction
correct = 0
epoch = 0
clf_criterion = nn.CrossEntropyLoss()
# Initialize the prediction and label lists(tensors)
predlist=torch.zeros(0,dtype=torch.long, device='cpu')
lbllist=torch.zeros(0,dtype=torch.long, device='cpu')
test_preds_all = torch.zeros((test_num, class_num), dtype=torch.float, device='cpu')
for data, label in test_loader:
if cuda:
data, label = data.cuda(), label.cuda()
data, label = Variable(data), Variable(label)
test_preds = model(data)
# sum up batch loss
test_loss += clf_criterion(test_preds, label)
# get the index of the max probability
pred = test_preds.data.max(1)[1]
# get the softmax results for attack/showing guessing entropy
softmax = nn.Softmax(dim=1)
test_preds_all[epoch*batch_size:(epoch+1)*batch_size, :] =softmax(test_preds)
# get the predictions (predlist) and real labels (lbllist) for showing confusion matrix
predlist=torch.cat([predlist,pred.view(-1).cpu()])
lbllist=torch.cat([lbllist,label.view(-1).cpu()])
# get the number of correct prediction
correct += pred.eq(label.data.view_as(pred)).cpu().sum()
epoch += 1
test_loss /= len(test_loader)
print('test loss: {:.4f}, test accuracy: {}/{} ({:.2f}%)\n'.format(
test_loss.data, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
# get the confusion matrix
confusion_mat = confusion_matrix(lbllist.numpy(), predlist.numpy())
# show the confusion matrix
# plot_sonfusion_matrix(confusion_mat, classes = range(class_num))
# show the guessing entropy and success rate
if disp_GE:
plot_guessing_entropy(test_preds_all.numpy(), real_key, model_flag)
### show the guessing entropy and success rate
def plot_guessing_entropy(preds, real_key, model_flag):
"""
- preds : the probability for each class (n*256 for a byte, n*9 for Hamming weight)
- real_key : the key of the target device
- model_flag : a string for naming GE result
"""
# GE/SR is averaged over 100 attacks
num_averaged = 100
# max trace num for attack
trace_num_max = 500
# the step trace num increases
step = 1
if trace_num_max > 400 and trace_num_max < 1000:
step = 2
if trace_num_max >= 1000 and trace_num_max < 5000:
step = 4
if trace_num_max >= 5000 and trace_num_max < 10000:
step = 5
guessing_entropy = np.zeros((num_averaged, int(trace_num_max/step)))
# attack multiples times for average
for time in range(num_averaged):
# select the attack traces randomly
random_index = list(range(plaintext.shape[0]))
random.shuffle(random_index)
random_index = random_index[0:trace_num_max]
# initialize score matrix
score_mat = np.zeros((trace_num_max, 256))
for key_guess in range(0, 256):
for i in range(0, trace_num_max):
initialState = plaintext[random_index[i]] ^ key_guess
sout = Sbox[initialState]
if labeling_method == 'identity':
label = sout
elif labeling_method == 'hw':
label = HW_byte[sout]
score_mat[i, key_guess] = preds[random_index[i], label]
score_mat = np.log(score_mat + 1e-40)
for i in range(0, int(trace_num_max/step)):
log_likelihood = np.sum(score_mat[0:i*step+1,:], axis=0)
ranked = np.argsort(log_likelihood)[::-1]
guessing_entropy[time,i] = list(ranked).index(real_key)
guessing_entropy = np.mean(guessing_entropy,axis=0)
plt.figure(figsize=(20,10))
plt.subplot(1, 1, 1)
plt.grid(True)
x = range(0, trace_num_max, step)
p1, = plt.plot(x, guessing_entropy[0:int(trace_num_max/step)],color='red')
plt.xlabel('Number of trace')
plt.ylabel('Guessing entropy')
#np.save('./results/bilinear_entropy_'+ labeling_method + '_ascad_fixed_' + model_flag + '_'+ desync, guessing_entropy)
plt.show()
### show the confusion matrix
def plot_sonfusion_matrix(cm, classes, normalize=False, title='Confusion matrix',cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
plt.ylim((len(classes)-0.5, -0.5))
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predict label')
plt.show()
# correlation of two matrix
def Matrix_Cor(X, Y):
if X.shape[0] != Y.shape[0]:
print("X and Y have wrong shape")
return
# print(
# "Start calculating the correlation between power trace and intermedia data_file_path"
# )
N, col_X = X.shape
col_Y = Y.shape[1]
Sum_of_X = X.sum(axis=0)
# print(Sum_of_X.shape)
Sum_of_Y = Y.sum(axis=0)
# print(Sum_of_Y.shape)
Sum_of_X2 = (X * X).sum(axis=0)
# print(Sum_of_X2.shape)
Sum_of_Y2 = (Y * Y).sum(axis=0)
# print(Sum_of_Y2.shape)
Sum_of_XY = (X.T).dot(Y)
# print(Sum_of_XY.shape)
r = N * Sum_of_XY - Sum_of_X.reshape(
(col_X, 1)).dot(Sum_of_Y.reshape(1, col_Y))
r = r / np.sqrt(
(N * Sum_of_X2 - Sum_of_X * Sum_of_X).reshape(col_X, 1).dot(
(N * Sum_of_Y2 - Sum_of_Y * Sum_of_Y).reshape(1, col_Y)))
# print(
# "Finished.The correlation matrix is retured.\nPlease run CPA(result=result) to see the key rank."
# )
return r.T
###Output
_____no_output_____
###Markdown
Setups
###Code
real_key = 34 # key value
labeling_method = 'identity' # labeling of trace
preprocess = 'feature_standardization' # preprocess method
batch_size = 600
total_epoch = 50
lr = 0.005 # learning rate
log_interval = 100 # epoch interval to log training information
train_num = 195000
valid_num = 5000
test_num = 100000
trace_offset = 0
trace_length = 1400
file_path = './Data/ASCAD_var/'
desync = 'desync_0'
no_cuda =False
cuda = not no_cuda and torch.cuda.is_available()
seed = 8
torch.manual_seed(seed)
if cuda:
torch.cuda.manual_seed(seed)
if labeling_method == 'identity':
class_num = 256
elif labeling_method == 'hw':
class_num = 9
# to load traces and labels
X_train = np.load(file_path + 'X_train.npy')
Y_train = np.load(file_path + 'Y_train.npy')
X_attack = np.load(file_path + 'X_attack.npy')
Y_attack = np.load(file_path + 'Y_attack.npy')
# to load plaintexts
plaintext = np.load(file_path + 'plaintexts_attack.npy')
plaintext = plaintext[0:test_num,2]
# preprocess of traces
if preprocess == 'feature_standardization':
scaler = preprocessing.StandardScaler()
X_train = scaler.fit_transform(X_train)
X_attack = scaler.transform(X_attack)
elif preprocess == 'feature_scaling':
scaler = preprocessing.MinMaxScaler(feature_range=(-1,1))
X_train = scaler.fit_transform(X_train)
X_attack = scaler.transform(X_attack)
elif preprocess == 'horizontal_standardization':
mn = np.repeat(np.mean(X_train, axis=1, keepdims=True), X_train.shape[1], axis=1)
std = np.repeat(np.std(X_train, axis=1, keepdims=True), X_train.shape[1], axis=1)
X_train = (X_train - mn)/std
mn = np.repeat(np.mean(X_attack, axis=1, keepdims=True), X_attack.shape[1], axis=1)
std = np.repeat(np.std(X_attack, axis=1, keepdims=True), X_attack.shape[1], axis=1)
X_attack = (X_attack - mn)/std
elif preprocess == 'horizontal_scaling':
scaler = preprocessing.MinMaxScaler(feature_range=(-1, 1)).fit(X_train.T)
X_train = scaler.transform(X_train.T).T
scaler = preprocessing.MinMaxScaler(feature_range=(-1, 1)).fit(X_attack.T)
X_attack = scaler.transform(X_attack.T).T
# parameters of data loader
kwargs_train = {
'trs_file': X_train[0:train_num,:],
'label_file': Y_train[0:train_num],
'trace_num':train_num,
'trace_offset':trace_offset,
'trace_length':trace_length,
}
kwargs_valid = {
'trs_file': X_train[train_num:train_num+valid_num,:],
'label_file': Y_train[train_num:train_num+valid_num],
'trace_num':valid_num,
'trace_offset':trace_offset,
'trace_length':trace_length,
}
kwargs_test = {
'trs_file': X_attack[0:test_num,:],
'label_file': Y_attack[0:test_num],
'trace_num':test_num,
'trace_offset':trace_offset,
'trace_length':trace_length,
}
train_loader = load_training(batch_size, kwargs_train)
valid_loader = load_training(batch_size, kwargs_valid)
test_loader = load_testing(batch_size, kwargs_test)
print('Load data complete!')
###Output
Load data complete!
###Markdown
Models
###Code
### the pre-trained model
class Net(nn.Module):
def __init__(self, num_classes=class_num):
super(Net, self).__init__()
# the encoder part
self.features = nn.Sequential(
nn.Conv1d(1, 2, kernel_size=1),
nn.SELU(),
nn.BatchNorm1d(2),
nn.AvgPool1d(kernel_size=2, stride=2),
nn.Flatten()
)
# the fully-connected layer 1
self.classifier_1 = nn.Sequential(
nn.Linear(1400, 20),
nn.SELU(),
)
# the fully-connected layer 2
self.classifier_2 = nn.Sequential(
nn.Linear(400, 20),
nn.SELU()
)
# the output layer
self.final_classifier = nn.Sequential(
nn.Linear(20, num_classes)
)
# how the network runs
def forward(self, input):
x1 = self.features(input)
x1 = x1.view(x1.size(0), -1)
x1 = self.classifier_1(x1)
#x1 = x1-torch.mean(x1,0,True)
x = torch.bmm(x1.unsqueeze(2), x1.unsqueeze(1))
x = x.view(-1, x1.size(1) **2)
x = self.classifier_2(x)
output = self.final_classifier(x)
return output
###Output
_____no_output_____
###Markdown
Train
###Code
# create a network
model = Net(num_classes=class_num)
print('Construct model complete')
if cuda:
model.cuda()
# initialize a big enough loss
min_loss = 1000
optimizer = optim.Adam([
{'params': model.features.parameters()},
{'params': model.classifier_1.parameters()},
{'params': model.classifier_2.parameters()},
{'params': model.final_classifier.parameters()}
], lr=lr)
scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=lr, steps_per_epoch=len(train_loader), pct_start=0.2,anneal_strategy='linear', cycle_momentum=False,epochs=total_epoch, div_factor=10, verbose=False)
# restore the optimizer state
for epoch in range(1, total_epoch + 1):
print(f'Train Epoch {epoch}:')
train(epoch, model,scheduler)
with torch.no_grad():
valid_loss, _ = validation(model)
# save the model that achieves the lowest validation loss
if valid_loss < min_loss:
min_loss = valid_loss
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict()
}, './models/ASCAD_variable_key.pth')
###Output
Construct model complete
Train Epoch 1:
Train Epoch 1: [60000/195000 (31%)] Loss: 5.560851 Acc: 0.166667%
Train Epoch 1: [120000/195000 (62%)] Loss: 5.559626 Acc: 0.833333%
Train Epoch 1: [180000/195000 (92%)] Loss: 5.552015 Acc: 0.666667%
Validation: loss: 5.5423, accuracy: 29/5000 (0.580000%)
Train Epoch 2:
Train Epoch 2: [60000/195000 (31%)] Loss: 5.430151 Acc: 0.166667%
Train Epoch 2: [120000/195000 (62%)] Loss: 5.435457 Acc: 0.166667%
Train Epoch 2: [180000/195000 (92%)] Loss: 5.381231 Acc: 1.166667%
Validation: loss: 5.3691, accuracy: 33/5000 (0.660000%)
Train Epoch 3:
Train Epoch 3: [60000/195000 (31%)] Loss: 5.330430 Acc: 1.000000%
Train Epoch 3: [120000/195000 (62%)] Loss: 7.039520 Acc: 1.166667%
Train Epoch 3: [180000/195000 (92%)] Loss: 5.289044 Acc: 0.833333%
Validation: loss: 5.3189, accuracy: 54/5000 (1.080000%)
Train Epoch 4:
Train Epoch 4: [60000/195000 (31%)] Loss: 5.239972 Acc: 0.666667%
Train Epoch 4: [120000/195000 (62%)] Loss: 5.295114 Acc: 1.166667%
Train Epoch 4: [180000/195000 (92%)] Loss: 5.311929 Acc: 1.166667%
Validation: loss: 5.2976, accuracy: 50/5000 (1.000000%)
Train Epoch 5:
Train Epoch 5: [60000/195000 (31%)] Loss: 5.264466 Acc: 1.000000%
Train Epoch 5: [120000/195000 (62%)] Loss: 5.251741 Acc: 1.833333%
Train Epoch 5: [180000/195000 (92%)] Loss: 5.263451 Acc: 1.333333%
Validation: loss: 5.2529, accuracy: 52/5000 (1.040000%)
Train Epoch 6:
Train Epoch 6: [60000/195000 (31%)] Loss: 5.251872 Acc: 1.000000%
Train Epoch 6: [120000/195000 (62%)] Loss: 5.231488 Acc: 1.166667%
Train Epoch 6: [180000/195000 (92%)] Loss: 5.236364 Acc: 1.166667%
Validation: loss: 5.2272, accuracy: 60/5000 (1.200000%)
Train Epoch 7:
Train Epoch 7: [60000/195000 (31%)] Loss: 5.169016 Acc: 0.666667%
Train Epoch 7: [120000/195000 (62%)] Loss: 5.188619 Acc: 1.000000%
Train Epoch 7: [180000/195000 (92%)] Loss: 5.182228 Acc: 1.666667%
Validation: loss: 5.1942, accuracy: 62/5000 (1.240000%)
Train Epoch 8:
Train Epoch 8: [60000/195000 (31%)] Loss: 5.156321 Acc: 1.000000%
Train Epoch 8: [120000/195000 (62%)] Loss: 5.156400 Acc: 1.000000%
Train Epoch 8: [180000/195000 (92%)] Loss: 5.221623 Acc: 1.500000%
Validation: loss: 5.1847, accuracy: 64/5000 (1.280000%)
Train Epoch 9:
Train Epoch 9: [60000/195000 (31%)] Loss: 5.166508 Acc: 0.500000%
Train Epoch 9: [120000/195000 (62%)] Loss: 5.146281 Acc: 2.166667%
Train Epoch 9: [180000/195000 (92%)] Loss: 5.101658 Acc: 1.833333%
Validation: loss: 5.1647, accuracy: 57/5000 (1.140000%)
Train Epoch 10:
Train Epoch 10: [60000/195000 (31%)] Loss: 5.111825 Acc: 2.166667%
Train Epoch 10: [120000/195000 (62%)] Loss: 5.120124 Acc: 1.333333%
Train Epoch 10: [180000/195000 (92%)] Loss: 5.155893 Acc: 1.500000%
Validation: loss: 5.2003, accuracy: 56/5000 (1.120000%)
Train Epoch 11:
Train Epoch 11: [60000/195000 (31%)] Loss: 5.129882 Acc: 1.500000%
Train Epoch 11: [120000/195000 (62%)] Loss: 5.123697 Acc: 2.166667%
Train Epoch 11: [180000/195000 (92%)] Loss: 5.129713 Acc: 1.166667%
Validation: loss: 5.1506, accuracy: 66/5000 (1.320000%)
Train Epoch 12:
Train Epoch 12: [60000/195000 (31%)] Loss: 5.132351 Acc: 1.833333%
Train Epoch 12: [120000/195000 (62%)] Loss: 5.072290 Acc: 2.500000%
Train Epoch 12: [180000/195000 (92%)] Loss: 5.148057 Acc: 1.500000%
Validation: loss: 5.1646, accuracy: 84/5000 (1.680000%)
Train Epoch 13:
Train Epoch 13: [60000/195000 (31%)] Loss: 5.055867 Acc: 1.833333%
Train Epoch 13: [120000/195000 (62%)] Loss: 5.105156 Acc: 1.666667%
Train Epoch 13: [180000/195000 (92%)] Loss: 5.051926 Acc: 2.500000%
Validation: loss: 5.1318, accuracy: 75/5000 (1.500000%)
Train Epoch 14:
Train Epoch 14: [60000/195000 (31%)] Loss: 5.125892 Acc: 2.333333%
Train Epoch 14: [120000/195000 (62%)] Loss: 5.127582 Acc: 2.000000%
Train Epoch 14: [180000/195000 (92%)] Loss: 5.022996 Acc: 2.000000%
Validation: loss: 5.1276, accuracy: 81/5000 (1.620000%)
Train Epoch 15:
Train Epoch 15: [60000/195000 (31%)] Loss: 5.103314 Acc: 1.666667%
Train Epoch 15: [120000/195000 (62%)] Loss: 5.013259 Acc: 2.333333%
Train Epoch 15: [180000/195000 (92%)] Loss: 5.053484 Acc: 2.500000%
Validation: loss: 5.1178, accuracy: 89/5000 (1.780000%)
Train Epoch 16:
Train Epoch 16: [60000/195000 (31%)] Loss: 5.018335 Acc: 1.500000%
Train Epoch 16: [120000/195000 (62%)] Loss: 5.067611 Acc: 1.166667%
Train Epoch 16: [180000/195000 (92%)] Loss: 5.066584 Acc: 3.833333%
Validation: loss: 5.0978, accuracy: 78/5000 (1.560000%)
Train Epoch 17:
Train Epoch 17: [60000/195000 (31%)] Loss: 5.034590 Acc: 2.500000%
Train Epoch 17: [120000/195000 (62%)] Loss: 5.094223 Acc: 1.666667%
Train Epoch 17: [180000/195000 (92%)] Loss: 5.037110 Acc: 3.333333%
Validation: loss: 5.1191, accuracy: 73/5000 (1.460000%)
Train Epoch 18:
Train Epoch 18: [60000/195000 (31%)] Loss: 4.950844 Acc: 3.500000%
Train Epoch 18: [120000/195000 (62%)] Loss: 5.054138 Acc: 2.166667%
Train Epoch 18: [180000/195000 (92%)] Loss: 4.983550 Acc: 1.833333%
Validation: loss: 5.0850, accuracy: 100/5000 (2.000000%)
Train Epoch 19:
Train Epoch 19: [60000/195000 (31%)] Loss: 5.010904 Acc: 1.166667%
Train Epoch 19: [120000/195000 (62%)] Loss: 4.990295 Acc: 3.333333%
Train Epoch 19: [180000/195000 (92%)] Loss: 5.060635 Acc: 2.666667%
Validation: loss: 5.0801, accuracy: 93/5000 (1.860000%)
Train Epoch 20:
Train Epoch 20: [60000/195000 (31%)] Loss: 5.002995 Acc: 3.166667%
Train Epoch 20: [120000/195000 (62%)] Loss: 5.050232 Acc: 2.500000%
Train Epoch 20: [180000/195000 (92%)] Loss: 5.216767 Acc: 1.500000%
Validation: loss: 5.0789, accuracy: 99/5000 (1.980000%)
Train Epoch 21:
Train Epoch 21: [60000/195000 (31%)] Loss: 5.037951 Acc: 2.333333%
Train Epoch 21: [120000/195000 (62%)] Loss: 4.991344 Acc: 2.666667%
Train Epoch 21: [180000/195000 (92%)] Loss: 5.009448 Acc: 1.166667%
Validation: loss: 5.1027, accuracy: 96/5000 (1.920000%)
Train Epoch 22:
Train Epoch 22: [60000/195000 (31%)] Loss: 5.020790 Acc: 1.666667%
Train Epoch 22: [120000/195000 (62%)] Loss: 5.078711 Acc: 3.500000%
Train Epoch 22: [180000/195000 (92%)] Loss: 4.992603 Acc: 2.833333%
Validation: loss: 5.1492, accuracy: 91/5000 (1.820000%)
Train Epoch 23:
Train Epoch 23: [60000/195000 (31%)] Loss: 4.963354 Acc: 3.333333%
Train Epoch 23: [120000/195000 (62%)] Loss: 4.998185 Acc: 2.333333%
Train Epoch 23: [180000/195000 (92%)] Loss: 4.994598 Acc: 2.333333%
Validation: loss: 5.0829, accuracy: 100/5000 (2.000000%)
Train Epoch 24:
Train Epoch 24: [60000/195000 (31%)] Loss: 4.967025 Acc: 2.500000%
Train Epoch 24: [120000/195000 (62%)] Loss: 4.977130 Acc: 2.833333%
Train Epoch 24: [180000/195000 (92%)] Loss: 5.018489 Acc: 2.000000%
Validation: loss: 5.0972, accuracy: 90/5000 (1.800000%)
Train Epoch 25:
Train Epoch 25: [60000/195000 (31%)] Loss: 4.968870 Acc: 2.333333%
Train Epoch 25: [120000/195000 (62%)] Loss: 4.953188 Acc: 3.333333%
Train Epoch 25: [180000/195000 (92%)] Loss: 5.030626 Acc: 1.666667%
Validation: loss: 5.0669, accuracy: 109/5000 (2.180000%)
Train Epoch 26:
Train Epoch 26: [60000/195000 (31%)] Loss: 4.988792 Acc: 3.000000%
Train Epoch 26: [120000/195000 (62%)] Loss: 4.946746 Acc: 1.166667%
Train Epoch 26: [180000/195000 (92%)] Loss: 4.929563 Acc: 1.500000%
Validation: loss: 5.0528, accuracy: 101/5000 (2.020000%)
Train Epoch 27:
Train Epoch 27: [60000/195000 (31%)] Loss: 5.011306 Acc: 3.166667%
Train Epoch 27: [120000/195000 (62%)] Loss: 4.950402 Acc: 2.333333%
Train Epoch 27: [180000/195000 (92%)] Loss: 4.954709 Acc: 4.166667%
Validation: loss: 5.1368, accuracy: 90/5000 (1.800000%)
Train Epoch 28:
Train Epoch 28: [60000/195000 (31%)] Loss: 4.836417 Acc: 3.666667%
Train Epoch 28: [120000/195000 (62%)] Loss: 4.971429 Acc: 2.333333%
Train Epoch 28: [180000/195000 (92%)] Loss: 4.949065 Acc: 1.666667%
Validation: loss: 5.0566, accuracy: 104/5000 (2.080000%)
Train Epoch 29:
Train Epoch 29: [60000/195000 (31%)] Loss: 4.945239 Acc: 2.333333%
Train Epoch 29: [120000/195000 (62%)] Loss: 4.894031 Acc: 3.000000%
Train Epoch 29: [180000/195000 (92%)] Loss: 4.957749 Acc: 3.000000%
Validation: loss: 5.0450, accuracy: 98/5000 (1.960000%)
Train Epoch 30:
Train Epoch 30: [60000/195000 (31%)] Loss: 4.953151 Acc: 1.833333%
Train Epoch 30: [120000/195000 (62%)] Loss: 4.883019 Acc: 3.666667%
Train Epoch 30: [180000/195000 (92%)] Loss: 4.953509 Acc: 2.833333%
Validation: loss: 5.0686, accuracy: 98/5000 (1.960000%)
Train Epoch 31:
Train Epoch 31: [60000/195000 (31%)] Loss: 4.950002 Acc: 2.833333%
Train Epoch 31: [120000/195000 (62%)] Loss: 4.892151 Acc: 2.666667%
Train Epoch 31: [180000/195000 (92%)] Loss: 4.968844 Acc: 3.333333%
Validation: loss: 5.0467, accuracy: 108/5000 (2.160000%)
Train Epoch 32:
Train Epoch 32: [60000/195000 (31%)] Loss: 4.887580 Acc: 1.666667%
Train Epoch 32: [120000/195000 (62%)] Loss: 4.925264 Acc: 3.666667%
Train Epoch 32: [180000/195000 (92%)] Loss: 4.942373 Acc: 2.833333%
Validation: loss: 5.1494, accuracy: 107/5000 (2.140000%)
Train Epoch 33:
Train Epoch 33: [60000/195000 (31%)] Loss: 4.924540 Acc: 2.333333%
Train Epoch 33: [120000/195000 (62%)] Loss: 4.937187 Acc: 2.666667%
Train Epoch 33: [180000/195000 (92%)] Loss: 4.939903 Acc: 3.166667%
Validation: loss: 5.0482, accuracy: 108/5000 (2.160000%)
Train Epoch 34:
Train Epoch 34: [60000/195000 (31%)] Loss: 4.959404 Acc: 2.166667%
Train Epoch 34: [120000/195000 (62%)] Loss: 4.969062 Acc: 3.000000%
Train Epoch 34: [180000/195000 (92%)] Loss: 5.003931 Acc: 1.666667%
Validation: loss: 5.1075, accuracy: 100/5000 (2.000000%)
Train Epoch 35:
Train Epoch 35: [60000/195000 (31%)] Loss: 4.930809 Acc: 2.500000%
Train Epoch 35: [120000/195000 (62%)] Loss: 4.904979 Acc: 3.000000%
Train Epoch 35: [180000/195000 (92%)] Loss: 4.870503 Acc: 3.166667%
Validation: loss: 5.0689, accuracy: 105/5000 (2.100000%)
Train Epoch 36:
Train Epoch 36: [60000/195000 (31%)] Loss: 4.912104 Acc: 3.166667%
Train Epoch 36: [120000/195000 (62%)] Loss: 4.878937 Acc: 3.666667%
Train Epoch 36: [180000/195000 (92%)] Loss: 4.952935 Acc: 2.833333%
Validation: loss: 5.0603, accuracy: 101/5000 (2.020000%)
Train Epoch 37:
Train Epoch 37: [60000/195000 (31%)] Loss: 4.899093 Acc: 3.000000%
Train Epoch 37: [120000/195000 (62%)] Loss: 4.938381 Acc: 2.166667%
Train Epoch 37: [180000/195000 (92%)] Loss: 4.900822 Acc: 2.000000%
Validation: loss: 5.0450, accuracy: 112/5000 (2.240000%)
Train Epoch 38:
Train Epoch 38: [60000/195000 (31%)] Loss: 4.898347 Acc: 2.833333%
Train Epoch 38: [120000/195000 (62%)] Loss: 4.978992 Acc: 3.166667%
Train Epoch 38: [180000/195000 (92%)] Loss: 4.931381 Acc: 2.166667%
Validation: loss: 5.0334, accuracy: 110/5000 (2.200000%)
Train Epoch 39:
Train Epoch 39: [60000/195000 (31%)] Loss: 4.896255 Acc: 2.166667%
Train Epoch 39: [120000/195000 (62%)] Loss: 4.827150 Acc: 2.666667%
Train Epoch 39: [180000/195000 (92%)] Loss: 4.938774 Acc: 2.166667%
Validation: loss: 5.0380, accuracy: 104/5000 (2.080000%)
Train Epoch 40:
Train Epoch 40: [60000/195000 (31%)] Loss: 4.915246 Acc: 2.833333%
Train Epoch 40: [120000/195000 (62%)] Loss: 4.893751 Acc: 3.833333%
Train Epoch 40: [180000/195000 (92%)] Loss: 4.840287 Acc: 3.333333%
Validation: loss: 5.0329, accuracy: 116/5000 (2.320000%)
Train Epoch 41:
Train Epoch 41: [60000/195000 (31%)] Loss: 4.894638 Acc: 2.500000%
Train Epoch 41: [120000/195000 (62%)] Loss: 4.946701 Acc: 3.000000%
Train Epoch 41: [180000/195000 (92%)] Loss: 4.883231 Acc: 2.833333%
Validation: loss: 5.0334, accuracy: 107/5000 (2.140000%)
Train Epoch 42:
Train Epoch 42: [60000/195000 (31%)] Loss: 4.847127 Acc: 4.500000%
Train Epoch 42: [120000/195000 (62%)] Loss: 4.870208 Acc: 3.166667%
Train Epoch 42: [180000/195000 (92%)] Loss: 4.941588 Acc: 2.833333%
Validation: loss: 5.0430, accuracy: 96/5000 (1.920000%)
Train Epoch 43:
Train Epoch 43: [60000/195000 (31%)] Loss: 4.801641 Acc: 2.666667%
Train Epoch 43: [120000/195000 (62%)] Loss: 4.955671 Acc: 2.500000%
Train Epoch 43: [180000/195000 (92%)] Loss: 4.832190 Acc: 2.666667%
Validation: loss: 5.0345, accuracy: 110/5000 (2.200000%)
Train Epoch 44:
Train Epoch 44: [60000/195000 (31%)] Loss: 4.843873 Acc: 5.500000%
Train Epoch 44: [120000/195000 (62%)] Loss: 4.849736 Acc: 4.166667%
Train Epoch 44: [180000/195000 (92%)] Loss: 4.952663 Acc: 3.000000%
Validation: loss: 5.0255, accuracy: 111/5000 (2.220000%)
Train Epoch 45:
Train Epoch 45: [60000/195000 (31%)] Loss: 4.896338 Acc: 3.000000%
Train Epoch 45: [120000/195000 (62%)] Loss: 4.913451 Acc: 2.666667%
Train Epoch 45: [180000/195000 (92%)] Loss: 4.892143 Acc: 2.333333%
Validation: loss: 5.0679, accuracy: 109/5000 (2.180000%)
Train Epoch 46:
Train Epoch 46: [60000/195000 (31%)] Loss: 4.903991 Acc: 2.666667%
Train Epoch 46: [120000/195000 (62%)] Loss: 4.849306 Acc: 3.666667%
Train Epoch 46: [180000/195000 (92%)] Loss: 4.853780 Acc: 3.666667%
Validation: loss: 5.0412, accuracy: 105/5000 (2.100000%)
Train Epoch 47:
Train Epoch 47: [60000/195000 (31%)] Loss: 4.835373 Acc: 4.000000%
Train Epoch 47: [120000/195000 (62%)] Loss: 4.852568 Acc: 4.333333%
Train Epoch 47: [180000/195000 (92%)] Loss: 4.829763 Acc: 4.500000%
Validation: loss: 5.0320, accuracy: 116/5000 (2.320000%)
Train Epoch 48:
Train Epoch 48: [60000/195000 (31%)] Loss: 4.879048 Acc: 4.500000%
Train Epoch 48: [120000/195000 (62%)] Loss: 4.831172 Acc: 4.166667%
Train Epoch 48: [180000/195000 (92%)] Loss: 4.786196 Acc: 6.000000%
Validation: loss: 5.0315, accuracy: 105/5000 (2.100000%)
Train Epoch 49:
Train Epoch 49: [60000/195000 (31%)] Loss: 4.866763 Acc: 4.666667%
Train Epoch 49: [120000/195000 (62%)] Loss: 4.837421 Acc: 4.166667%
Train Epoch 49: [180000/195000 (92%)] Loss: 4.855717 Acc: 3.333333%
Validation: loss: 5.0291, accuracy: 109/5000 (2.180000%)
Train Epoch 50:
Train Epoch 50: [60000/195000 (31%)] Loss: 4.730161 Acc: 4.166667%
Train Epoch 50: [120000/195000 (62%)] Loss: 4.907151 Acc: 2.833333%
Train Epoch 50: [180000/195000 (92%)] Loss: 4.907317 Acc: 4.000000%
Validation: loss: 5.0344, accuracy: 110/5000 (2.200000%)
###Markdown
Results of trained model
###Code
# create a network
model = Net(num_classes=class_num)
print('Construct model complete')
if cuda:
model.cuda()
# load the pre-trained network
checkpoint = torch.load('./models/ASCAD_variable_key.pth')
pretrained_dict = checkpoint['model_state_dict']
model_dict = pretrained_dict
model.load_state_dict(model_dict)
# evaluate the trained model
with torch.no_grad():
print('Epoch:{}'.format(checkpoint['epoch']))
test(model, model_flag='pretrained_source')
###Output
Construct model complete
Epoch:44
test loss: 4.9892, test accuracy: 2274/100000 (2.27%)
###Markdown
Size of the network
###Code
model = Net(num_classes=class_num)
sum(p.numel() for p in model.parameters() if p.requires_grad)
###Output
_____no_output_____
###Markdown
Layer-wise correlation (LWC)
###Code
def get_layer_out(model):
model.eval()
iter_ = iter(train_loader_wo_shuffle)
num_iter = len(train_loader_wo_shuffle)
x0_output = np.zeros((train_num,1400),dtype=np.float64)
x1_output = np.zeros((train_num,20),dtype=np.float64)
bilinear_output = np.zeros((train_num,400),dtype=np.float64)
x3_output = np.zeros((train_num,20),dtype=np.float64)
final_output = np.zeros((train_num,class_num),dtype=np.float64)
for i in range(1, num_iter+1):
data, _ = iter_.next()
if cuda:
data= data.cuda()
data = Variable(data)
x0_o, x1_o, bilinear_x2_o, x3_o, final_o = model(data)
x0_output[batch_size*(i-1):batch_size*i,:] = x0_o.cpu()
x1_output[batch_size*(i-1):batch_size*i,:] = x1_o.cpu()
bilinear_output[batch_size*(i-1):batch_size*i,:] = bilinear_x2_o.cpu()
x3_output[batch_size*(i-1):batch_size*i,:] = x3_o.cpu()
final_output[batch_size*(i-1):batch_size*i,:] = final_o.cpu()
return x0_output, x1_output, bilinear_output,x3_output, final_output
### network for getting layer output
class Net_Layer_out(nn.Module):
def __init__(self, num_classes=class_num):
super(Net_Layer_out, self).__init__()
# the encoder part
self.features = nn.Sequential(
nn.Conv1d(1, 2, kernel_size=1),
nn.SELU(),
nn.BatchNorm1d(2),
nn.AvgPool1d(kernel_size=2, stride=2),
nn.Flatten()
)
# the fully-connected layer 1
self.classifier_1 = nn.Sequential(
nn.Linear(1400, 20),
nn.SELU(),
)
# the fully-connected layer 2
self.classifier_2 = nn.Sequential(
nn.Linear(400, 20),
nn.SELU()
)
# the output layer
self.final_classifier = nn.Sequential(
nn.Linear(20, num_classes)
)
# how the network runs
def forward(self, input):
x0 = self.features(input)
x0 = x0.view(x0.size(0), -1)
x1 = self.classifier_1(x0)
bilinear_x2 = torch.bmm(x1.unsqueeze(2), x1.unsqueeze(1))
bilinear_x2 = bilinear_x2.view(-1, x1.size(1) **2)
x3 = self.classifier_2(bilinear_x2)
output = self.final_classifier(x3)
return x0, x1, bilinear_x2, x3, output
# create a network
model = Net_Layer_out(num_classes=class_num)
train_loader_wo_shuffle = load_testing(batch_size, kwargs_train)
print('Construct model complete')
if cuda:
model.cuda()
# load the pre-trained network
checkpoint = torch.load('./models/ASCAD_variable_key.pth')
pretrained_dict = checkpoint['model_state_dict']
model_dict = pretrained_dict
model.load_state_dict(model_dict)
# get the layer output
with torch.no_grad():
x0_out,x1_out,bilinear_x2_out,x3_out, final_out = get_layer_out(model)
corr_layer0_vs_trs = Matrix_Cor(x1_out, X_train[0:train_num,:])
fig = plt.figure(figsize=(20,10))
fig1 = fig.add_subplot(1,1,1)
fig_labels = []
font1 = {'family' : 'Times New Roman',
'weight' : 'normal',
'size' : 18,
}
fig1.set_xlabel("Time samples",font1)
fig1.set_ylabel(r"$LWC_{sel}$",font1)
for i in range(x1_out.shape[1]):
fig1.plot(abs(corr_layer0_vs_trs[:,i]))
fig_labels.append(r'Neuron %d' % (i))
fig1.legend(fig_labels, ncol=5, loc='upper center',
bbox_to_anchor=[0.5, 1.0],
columnspacing=1.0, labelspacing=0.0,
handletextpad=0.1, handlelength=1.5,
fancybox=True, shadow=True, fontsize=14)
fig1.set_ylim(0,1)
fig1.margins(0,0)
plt.yticks(size = 14)
plt.xticks(size = 14)
plt.show()
def LWC_comb(sbox_out, layer_output):
median_mat = np.zeros((sbox_out.shape[0],1), dtype=np.float64, order='C')
for i in range(0,sbox_out.shape[0]):
temp = HW_byte[sbox_out[i]]
median_mat[i,0] = temp
corr_mat = Matrix_Cor(median_mat, layer_output)
return corr_mat
layer_wise_corr = np.zeros((6), dtype = np.float64)
layer_wise_corr[0] = np.max(LWC_comb(Y_train[0:train_num], X_train[0:train_num,:]))
layer_wise_corr[1] = np.max(LWC_comb(Y_train[0:train_num], x0_out))
layer_wise_corr[2] = np.max(LWC_comb(Y_train[0:train_num], x1_out))
layer_wise_corr[3] = np.max(LWC_comb(Y_train[0:train_num], bilinear_x2_out))
layer_wise_corr[4] = np.max(LWC_comb(Y_train[0:train_num], x3_out))
layer_wise_corr[5] = np.max(LWC_comb(Y_train[0:train_num], final_out))
fig = plt.figure(figsize=(20,10))
fig3 = fig.add_subplot(1,1,1)
font1 = {'family' : 'Times New Roman',
'weight' : 'normal',
'size' : 18,
}
ticks = [r'$input$',r'$flatten$',r'$fc_1$',r'$bilinear$',r'$fc_2$',r'$output$']
fig3.set_xlabel("Layers",font1)
fig3.set_ylabel(r"$LWC_{comb}$",font1)
labels = []
trace_num_max = 500
x = range(0, 6)
fig3.plot(x,layer_wise_corr, 'blue',marker='x')
plt.yticks(fontproperties = 'Times New Roman', size = 14)
plt.xticks(x, ticks, rotation=0, fontproperties = 'Times New Roman', size = 14)
fig3.set_ylim(0,0.28)
fig3.grid()
plt.show()
###Output
findfont: Font family ['Times New Roman'] not found. Falling back to DejaVu Sans.
|
notebooks/check_GL_area.ipynb | ###Markdown
First, let's check the different dz mosaics for consistent cell area
###Code
thedir='/Volumes/ice2/ben/ATL14_test/rel002_new/'
dz_1km = pc.grid.data().from_h5(thedir+'/dz.h5', group='dz')
dz_10km = pc.grid.data().from_h5(thedir+'/dz_10km.h5', group='avg_dz_10000m')
dz_20km = pc.grid.data().from_h5(thedir+'/dz_20km.h5', group='avg_dz_20000m')
dz_40km = pc.grid.data().from_h5(thedir+'/dz_40km.h5', group='avg_dz_40000m')
(np.nansum(dz_40km.cell_area)-np.nansum(dz_1km.cell_area))/1.e6
! h5ls {thedir+'/z0.h5/z0'}
import h5py
with h5py.File(thedir+'/z0.h5','r') as h5f:
ca_100m = np.array(h5f['/z0/cell_area'])
mask_100m = np.array(h5f['/z0/mask'])
(np.nansum(dz_1km.cell_area)-np.nansum(ca_100m*mask_100m))/1.e6
fig=plt.figure();
hax=fig.subplots(1,2, sharex=True, sharey=True)
hax[0].imshow(dz_1km.cell_area, extent=dz_1km.extent, origin='lower')
hax[1].imshow(dz_10km.cell_area, extent=dz_10km.extent, origin='lower')
dzi = dz_1km.interp(dz_10km.x, dz_10km.y, gridded=True, field='cell_area')
K=np.ones([11, 11])
K[0,:]/=2
K[-1,:]/=2
K[:,0]/=2
K[:,-1]/=2
plt.figure()
plt.imshow(K)
from scipy.ndimage import convolve
ca_fill=dz_1km.cell_area.copy()
ca_fill[~np.isfinite(ca_fill)]=0
ca_sm_10km = convolve(ca_fill, K, mode='constant' )
[ca_sm_10km.shape,
dz_1km.x.shape]
ca_interp=pc.grid.data().from_dict({'x':dz_1km.x-1000, 'y':dz_1km.y-1000,'z':ca_sm_10km}).interp(dz_10km.x, dz_10km.y, gridded=True)
plt.figure(); plt.imshow(dz_10km.cell_area - ca_interp, origin='lower'); plt.colorbar()
[dz_10km.cell_area[150, 75], dzi[150, 75]*np.sum(K), dz_10km.cell_area[150, 75] - dzi[150, 75]*np.sum(K)]
np.sum(np.isfinite(dz_10km.cell_area))*5000/1.e6
! h5ls {thedir+'/dz_10km.h5'}
thefile='/home/ben/git_repos/surfaceChange/ATL15.h5'
thefile='/Volumes/ice2/ben/ATL14_test/001/ATL15.h5'
V0={}
dV0={}
A0={}
for group in ['', '_10km','_20km', '_40km']:
D15=pc.grid.data().from_h5(thefile, group='height_change'+group)
field='cell_area'+group
A=getattr(D15, field)
dh=getattr(D15, 'delta_h'+group)
dhdt=getattr(D15, 'dhdt_lag1'+group)
A[A>1.e16]=np.NaN
A0[group]=np.nansum(A)
V0[group] = np.array([np.nansum(A*dh[:,:,ii]) for ii in range(dh.shape[2])])
dV0[group] = np.array([np.nansum(A*dhdt[:,:,ii]) for ii in range(dhdt.shape[2])])
Acell = {'':1.e3**2, '_10km':1.e4**2, '_20km':2.e4**2, '_40km':4.e4**2}
{key:(A0[key]-A0[''])/Acell[key] for key in A0}
dhdt.shape
{key:V0[key]-V0[''] for key in V0}
{key:dV0[key]-dV0[''] for key in V0}
plt.figure();
plt.plot(dV0['']/np.nansum(A0['']))
plt.plot(dV0['_10km']/np.nansum(A0['_10km']))
plt.plot(dV0['_20km']/np.nansum(A0['_20km']))
plt.plot(dV0['_40km']/np.nansum(A0['_40km']))
plt.legend(['1km', '10km', '20km', '40km'])
hfig=plt.figure();
hax=hfig.subplots(1, 4, sharex=True, sharey=True)
for ii, av in enumerate(A0.keys()):
D15=pc.grid.data().from_h5(thefile, group='height_change'+av)
temp=getattr(D15, 'cell_area'+av)
temp[temp>1.e15]=np.NaN
hax[ii].imshow(getattr(D15, 'cell_area'+av), extent=D15.extent, origin='lower')
D15=pc.grid.data().from_h5(thefile, group='height_change'+group)
D15
av
! h5ls /home/ben/git_repos/surfaceChange/ATL15.h5/height_change_10km
###Output
_____no_output_____ |
PythonDataScienceHandbook/notebooks/01.06-Errors-and-Debugging.ipynb | ###Markdown
*This notebook contains an excerpt from the [Python Data Science Handbook](http://shop.oreilly.com/product/0636920034919.do) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/PythonDataScienceHandbook).**The text is released under the [CC-BY-NC-ND license](https://creativecommons.org/licenses/by-nc-nd/3.0/us/legalcode), and code is released under the [MIT license](https://opensource.org/licenses/MIT). If you find this content useful, please consider supporting the work by [buying the book](http://shop.oreilly.com/product/0636920034919.do)!* Errors and Debugging Code development and data analysis always require a bit of trial and error, and IPython contains tools to streamline this process.This section will briefly cover some options for controlling Python's exception reporting, followed by exploring tools for debugging errors in code. Controlling Exceptions: ``%xmode``Most of the time when a Python script fails, it will raise an Exception.When the interpreter hits one of these exceptions, information about the cause of the error can be found in the *traceback*, which can be accessed from within Python.With the ``%xmode`` magic function, IPython allows you to control the amount of information printed when the exception is raised.Consider the following code:
###Code
def func1(a, b):
return a / b
def func2(x):
a = x
b = x - 1
return func1(a, b)
func2(1)
###Output
_____no_output_____
###Markdown
Calling ``func2`` results in an error, and reading the printed trace lets us see exactly what happened.By default, this trace includes several lines showing the context of each step that led to the error.Using the ``%xmode`` magic function (short for *Exception mode*), we can change what information is printed.``%xmode`` takes a single argument, the mode, and there are three possibilities: ``Plain``, ``Context``, and ``Verbose``.The default is ``Context``, and gives output like that just shown before.``Plain`` is more compact and gives less information:
###Code
%xmode Plain
func2(1)
###Output
_____no_output_____
###Markdown
The ``Verbose`` mode adds some extra information, including the arguments to any functions that are called:
###Code
%xmode Verbose
func2(1)
###Output
_____no_output_____
###Markdown
This extra information can help narrow-in on why the exception is being raised.So why not use the ``Verbose`` mode all the time?As code gets complicated, this kind of traceback can get extremely long.Depending on the context, sometimes the brevity of ``Default`` mode is easier to work with. Debugging: When Reading Tracebacks Is Not EnoughThe standard Python tool for interactive debugging is ``pdb``, the Python debugger.This debugger lets the user step through the code line by line in order to see what might be causing a more difficult error.The IPython-enhanced version of this is ``ipdb``, the IPython debugger.There are many ways to launch and use both these debuggers; we won't cover them fully here.Refer to the online documentation of these two utilities to learn more.In IPython, perhaps the most convenient interface to debugging is the ``%debug`` magic command.If you call it after hitting an exception, it will automatically open an interactive debugging prompt at the point of the exception.The ``ipdb`` prompt lets you explore the current state of the stack, explore the available variables, and even run Python commands!Let's look at the most recent exception, then do some basic tasks–print the values of ``a`` and ``b``, and type ``quit`` to quit the debugging session:
###Code
%debug
###Output
> [0;32m<ipython-input-1-d849e34d61fb>[0m(2)[0;36mfunc1[0;34m()[0m
[0;32m 1 [0;31m[0;32mdef[0m [0mfunc1[0m[0;34m([0m[0ma[0m[0;34m,[0m [0mb[0m[0;34m)[0m[0;34m:[0m[0;34m[0m[0m
[0m[0;32m----> 2 [0;31m [0;32mreturn[0m [0ma[0m [0;34m/[0m [0mb[0m[0;34m[0m[0m
[0m[0;32m 3 [0;31m[0;34m[0m[0m
[0m
ipdb> print(a)
1
ipdb> print(b)
0
ipdb> quit
###Markdown
The interactive debugger allows much more than this, though–we can even step up and down through the stack and explore the values of variables there:
###Code
%debug
###Output
> [0;32m<ipython-input-1-d849e34d61fb>[0m(2)[0;36mfunc1[0;34m()[0m
[0;32m 1 [0;31m[0;32mdef[0m [0mfunc1[0m[0;34m([0m[0ma[0m[0;34m,[0m [0mb[0m[0;34m)[0m[0;34m:[0m[0;34m[0m[0m
[0m[0;32m----> 2 [0;31m [0;32mreturn[0m [0ma[0m [0;34m/[0m [0mb[0m[0;34m[0m[0m
[0m[0;32m 3 [0;31m[0;34m[0m[0m
[0m
ipdb> up
> [0;32m<ipython-input-1-d849e34d61fb>[0m(7)[0;36mfunc2[0;34m()[0m
[0;32m 5 [0;31m [0ma[0m [0;34m=[0m [0mx[0m[0;34m[0m[0m
[0m[0;32m 6 [0;31m [0mb[0m [0;34m=[0m [0mx[0m [0;34m-[0m [0;36m1[0m[0;34m[0m[0m
[0m[0;32m----> 7 [0;31m [0;32mreturn[0m [0mfunc1[0m[0;34m([0m[0ma[0m[0;34m,[0m [0mb[0m[0;34m)[0m[0;34m[0m[0m
[0m
ipdb> print(x)
1
ipdb> up
> [0;32m<ipython-input-6-b2e110f6fc8f>[0m(1)[0;36m<module>[0;34m()[0m
[0;32m----> 1 [0;31m[0mfunc2[0m[0;34m([0m[0;36m1[0m[0;34m)[0m[0;34m[0m[0m
[0m
ipdb> down
> [0;32m<ipython-input-1-d849e34d61fb>[0m(7)[0;36mfunc2[0;34m()[0m
[0;32m 5 [0;31m [0ma[0m [0;34m=[0m [0mx[0m[0;34m[0m[0m
[0m[0;32m 6 [0;31m [0mb[0m [0;34m=[0m [0mx[0m [0;34m-[0m [0;36m1[0m[0;34m[0m[0m
[0m[0;32m----> 7 [0;31m [0;32mreturn[0m [0mfunc1[0m[0;34m([0m[0ma[0m[0;34m,[0m [0mb[0m[0;34m)[0m[0;34m[0m[0m
[0m
ipdb> quit
###Markdown
This allows you to quickly find out not only what caused the error, but what function calls led up to the error.If you'd like the debugger to launch automatically whenever an exception is raised, you can use the ``%pdb`` magic function to turn on this automatic behavior:
###Code
%xmode Plain
%pdb on
func2(1)
###Output
Exception reporting mode: Plain
Automatic pdb calling has been turned ON
|
module-4-select-important-features/.ipynb_checkpoints/LS_DS_244_Feature_Selection-checkpoint.ipynb | ###Markdown
_Lambda School Data Science - Model Validation_ Feature SelectionObjectives:* Feature importance* Feature selection Yesterday we saw that... Less isn't always more (but sometimes it is) More isn't always better (but sometimes it is) Saavas, Ando [Feature Selection (4 parts)](https://blog.datadive.net/selecting-good-features-part-i-univariate-selection/)>There are in general two reasons why feature selection is used:1. Reducing the number of features, to reduce overfitting and improve the generalization of models.2. To gain a better understanding of the features and their relationship to the response variables.>These two goals are often at odds with each other and thus require different approaches: depending on the data at hand a feature selection method that is good for goal (1) isn’t necessarily good for goal (2) and vice versa. What seems to happen often though is that people use their favourite method (or whatever is most conveniently accessible from their tool of choice) indiscriminately, especially methods more suitable for (1) for achieving (2). While they are not always mutually exclusive, here's a little bit about what's going on with these two goals Goal 1: Reducing Features, Reducing Overfitting, Improving Generalization of ModelsThis is when you're actually trying to engineer a packaged, machine learning pipeline that is streamlined and highly generalizable to novel data as more is collected, and you don't really care "how" it works as long as it does work. Approaches that are good at this tend to fail at Goal 2 because they handle multicollinearity by (sometime randomly) choosing/indicating just one of a group of strongly correlated features. This is good to reduce redundancy, but bad if you want to interpret the data. Goal 2: Gaining a Better Understanding of the Features and their RelationshipsThis is when you want a good, interpretable model or you're doing data science more for analysis than engineering. Company asks you "How do we increase X?" and you can tell them all the factors that correlate to it and their predictive power.Approaches that are good at this tend to fail at Goal 1 because, well, they *don't* handle the multicollinearity problem. If three features are all strongly correlated to each other as well as the output, they will all have high scores. But including all three features in a model is redundant. Each part in Saavas's Blog series describes an increasingly complex (and computationally costly) set of methods for feature selection and interpretation.The ultimate comparison is completed using an adaptation of a dataset called Friedman's 1 regression dataset from Friedman, Jerome H.'s '[Multivariate Adaptive Regression Splines](http://www.stat.ucla.edu/~cocteau/stat204/readings/mars.pdf).>The data is generated according to formula $y=10sin(πX_1X_2)+20(X_3–0.5)^2+10X_4+5X_5+ϵ$, where the $X_1$ to $X_5$ are drawn from uniform distribution and ϵ is the standard normal deviate N(0,1). Additionally, the original dataset had five noise variables $X_6,…,X_{10}$, independent of the response variable. We will increase the number of variables further and add four variables $X_{11},…,X_{14}$ each of which are very strongly correlated with $X_1,…,X_4$, respectively, generated by $f(x)=x+N(0,0.01)$. This yields a correlation coefficient of more than 0.999 between the variables. This will illustrate how different feature ranking methods deal with correlations in the data.**Okay, that's a lot--here's what you need to know:**1. $X_1$ and $X_2$ have the same non-linear relationship to $Y$ -- though together they do have a not-quite-linear relationship to $Y$ (with sinusoidal noise--but the range of the values doesn't let it get negative)2. $X_3$ has a quadratic relationship with $Y$3. $X_4$ and $X_5$ have linear relationships to $Y$, with $X_4$ being weighted twice as heavily as $X_5$4. $X_6$ through $X_{10}$ are random and have NO relationship to $Y$5. $X_{11}$ through $X_{14}$ correlate strongly to $X_1$ through $X_4$ respectively (and thus have the same respective relationships with $Y$)This will help us see the difference between the models in selecting features and interpreting features* how well they deal with multicollinearity (5)* how well they identify noise (4)* how well they identify different kinds of relationships* how well they identify/interpret predictive power of individual variables.
###Code
# import
import numpy as np
# Create the dataset
# from https://blog.datadive.net/selecting-good-features-part-iv-stability-selection-rfe-and-everything-side-by-side/
np.random.seed(42)
size = 1500 # I increased the size from what's given in the link
Xs = np.random.uniform(0, 1, (size, 14))
# Changed variable name to Xs to use X later
#"Friedamn #1” regression problem
Y = (10 * np.sin(np.pi*Xs[:,0]*Xs[:,1]) + 20*(Xs[:,2] - .5)**2 +
10*Xs[:,3] + 5*Xs[:,4] + np.random.normal(0,1))
#Add 4 additional correlated variables (correlated with X1-X4)
Xs[:,10:] = Xs[:,:4] + np.random.normal(0, .025, (size,4))
names = ["X%s" % i for i in range(1,15)]
# Putting it into pandas--because... I like pandas. And usually you'll be
# working with dataframes not arrays (you'll care what the column titles are)
import pandas as pd
friedmanX = pd.DataFrame(data=Xs, columns=names)
friedmanY = pd.Series(data=Y, name='Y')
friedman = friedmanX.join(friedmanY)
friedman.head()
###Output
_____no_output_____
###Markdown
We want to be able to look at classification problems too, so let's bin the Y values to create a categorical feature from the Y values. It should have *roughly* similar relationships to the X features as Y does.
###Code
# First, let's take a look at what Y looks like
import matplotlib.pyplot as plt
import seaborn as sns
sns.distplot(friedmanY);
###Output
/usr/local/lib/python3.6/dist-packages/matplotlib/axes/_axes.py:6521: MatplotlibDeprecationWarning:
The 'normed' kwarg was deprecated in Matplotlib 2.1 and will be removed in 3.1. Use 'density' instead.
alternative="'density'", removal="3.1")
###Markdown
That's pretty normal, let's make two binary categories--one balanced, one unbalanced, to see the difference.* balanced binary variable will be split evenly in half* unbalanced binary variable will indicate whether $Y <5$.
###Code
friedman['Y_bal'] = friedman['Y'].apply(lambda y: 1 if (y < friedman.Y.median()) else 0)
friedman['Y_un'] = friedman['Y'].apply(lambda y: 1 if (y < 5) else 0)
print(friedman.Y_bal.value_counts(), '\n\n', friedman.Y_un.value_counts())
friedman.head()
# Finally, let's put it all into our usual X and y's
# (I already have the X dataframe as friedmanX, but I'm working backward to
# follow a usual flow)
X = friedman.drop(columns=['Y', 'Y_bal', 'Y_un'])
y = friedman.Y
y_bal = friedman.Y_bal
y_un = friedman.Y_un
###Output
_____no_output_____
###Markdown
Alright! Let's get to it! Remember, with each part, we are increasing complexity of the analysis and thereby increasing the computational costs and runtime. So even before univariate selection--which compares each feature to the output feature one by one--there is a [VarianceThreshold](https://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.VarianceThreshold.htmlsklearn.feature_selection.VarianceThreshold) object in sklearn.feature_selection. It defaults to getting rid of any features that are the same across all samples. Great for cleaning data in that respect. The `threshold` parameter defaults to `0` to show the above behavior. if you change it, make sure you have good reason. Use with caution. Part 1: univariate selection* Best for goal 2 - getting "a better understanding of the data, its structure and characteristics"* unable to remove redundancy (for example selecting only the best feature among a subset of strongly correlated features)* Super fast - can be used for baseline models or just after baseline[sci-kit's univariariate feature selection objects and techniques](https://scikit-learn.org/stable/modules/feature_selection.htmlunivariate-feature-selection) Y (continuous output)options (they do what they sound like they do)* SelectKBest* SelectPercentileboth take the same parameter options for `score_func`* `f_regression`: scores by correlation coefficient, f value, p value--basically automates what you can do by looking at a correlation matrix except without the ability to recognize collinearity* `mutual_info_regression`: can capture non-linear correlations, but doesn't handle noise wellLet's take a look at mutual information (MI)
###Code
import sklearn.feature_selection as fe
MIR = fe.SelectKBest(fe.mutual_info_regression, k='all').fit(X, y)
MIR_scores = pd.Series(data=MIR.scores_, name='MI_Reg_Scores', index=names)
MIR_scores
###Output
_____no_output_____
###Markdown
Y_bal (balanced binary output)options* SelectKBest* SelectPercentilethese options will cut out features with error rates above a certain tolerance level, define in parameter -`alpha`* SelectFpr (false positive rate--false positives predicted/total negatives in dataset)* SelectFdr (false discovery rate--false positives predicted/total positives predicted)* ~~SelectFwe (family-wise error--for multinomial classification tasks)~~all have the same optons for parameter `score_func`* `chi2`* `f_classif`* `mutual_info_classif`
###Code
MIC_b = fe.SelectFpr(fe.mutual_info_classif).fit(X, y_bal)
MIC_b_scores = pd.Series(data=MIC_b.scores_,
name='MIC_Bal_Scores', index=names)
MIC_b_scores
###Output
_____no_output_____
###Markdown
Y_un (unbalanced binary output)
###Code
MIC_u = fe.SelectFpr(fe.mutual_info_classif).fit(X, y_un)
MIC_u_scores = pd.Series(data=MIC_u.scores_,
name='MIC_Unbal_Scores', index=names)
MIC_u_scores
###Output
_____no_output_____
###Markdown
Part 2: linear models and regularization* L1 Regularization (Lasso for regression) is best for goal 1: "produces sparse solutions and as such is very useful selecting a strong subset of features for improving model performance" (forces coefficients to zero, telling you which you could remove--but doesn't handle multicollinearity)* L2 Regularization (Ridge for regression) is best for goal 2: "can be used for data interpretation due to its stability and the fact that useful features tend to have non-zero coefficients* Also fast[sci-kit's L1 feature selection](https://scikit-learn.org/stable/modules/feature_selection.htmll1-based-feature-selection) (can easily be switched to L2 using the parameter `penalty='l2'` for categorical targets or using `Ridge` instead of Lasso for continuous targets)We won't do this here, because1. You know regression2. The same principles apply as shown in Part 3 below with `SelectFromModel`3. There's way cooler stuff coming up Part 3: random forests* Best for goal 1, not 2 because: * strong features can end up with low scores * biased towards variables with many categories* "require very little feature engineering and parameter tuning"* Takes a little more time depending on your dataset - but a popular technique[sci-kit's implementation of tree-based feature selection](https://scikit-learn.org/stable/modules/feature_selection.htmltree-based-feature-selection) Y
###Code
from sklearn.ensemble import RandomForestRegressor as RFR
# Fitting a random forest regression
rfr = RFR().fit(X, y)
# Creating scores from feature_importances_ ranking (some randomness here)
rfr_scores = pd.Series(data=rfr.feature_importances_, name='RFR', index=names)
rfr_scores
###Output
/usr/local/lib/python3.6/dist-packages/sklearn/ensemble/forest.py:246: FutureWarning: The default value of n_estimators will change from 10 in version 0.20 to 100 in 0.22.
"10 in version 0.20 to 100 in 0.22.", FutureWarning)
###Markdown
Y_bal
###Code
from sklearn.ensemble import RandomForestClassifier as RFC
# Fitting a Random Forest Classifier
rfc_b = RFC().fit(X, y_bal)
# Creating scores from feature_importances_ ranking (some randomness here)
rfc_b_scores = pd.Series(data=rfc_b.feature_importances_, name='RFC_bal',
index=names)
rfc_b_scores
###Output
/usr/local/lib/python3.6/dist-packages/sklearn/ensemble/forest.py:246: FutureWarning: The default value of n_estimators will change from 10 in version 0.20 to 100 in 0.22.
"10 in version 0.20 to 100 in 0.22.", FutureWarning)
###Markdown
Y_un
###Code
# Fitting a Random Forest Classifier
rfc_u = RFC().fit(X, y_un)
# Creating scores from feature_importances_ ranking (some randomness here)
rfc_u_scores = pd.Series(data=rfc_u.feature_importances_,
name='RFC_unbal', index=names)
rfc_u_scores
###Output
/usr/local/lib/python3.6/dist-packages/sklearn/ensemble/forest.py:246: FutureWarning: The default value of n_estimators will change from 10 in version 0.20 to 100 in 0.22.
"10 in version 0.20 to 100 in 0.22.", FutureWarning)
###Markdown
SelectFromModel is a meta-transformer that can be used along with any estimator that has a `coef_` or `feature_importances_` attribute after fitting. The features are considered unimportant and removed, if the corresponding `coef_` or `feature_importances_` values are below the provided `threshold` parameter. Apart from specifying the `threshold` numerically, there are built-in heuristics for finding a `threshold` using a string argument. Available heuristics are `'mean'`, `'median'` and float multiples of these like `'0.1*mean'`.
###Code
# Random forest regression transformation of X (elimination of least important
# features)
rfr_transform = fe.SelectFromModel(rfr, prefit=True)
X_rfr = rfr_transform.transform(X)
# Random forest classifier transformation of X_bal (elimination of least important
# features)
rfc_b_transform = fe.SelectFromModel(rfc_b, prefit=True)
X_rfc_b = rfc_b_transform.transform(X)
# Random forest classifier transformation of X_un (elimination of least important
# features)
rfc_u_transform = fe.SelectFromModel(rfc_u, prefit=True)
X_rfc_u = rfc_u_transform.transform(X)
RF_comparisons = pd.DataFrame(data=np.array([rfr_transform.get_support(),
rfc_b_transform.get_support(),
rfc_u_transform.get_support()]).T,
columns=['RF_Regressor', 'RF_balanced_classifier',
'RF_unbalanced_classifier'],
index=names)
RF_comparisons
###Output
_____no_output_____
###Markdown
Part 4: stability selection, RFE, and everything side by side* These methods take longer since they are *wrapper methods* and build multiple ML models before giving results. "They both build on top of other (model based) selection methods such as regression or SVM, building models on different subsets of data and extracting the ranking from the aggregates."* Stability selection is good for both goal 1 and 2: "among the top performing methods for many different datasets and settings" * For categorical targets * ~~[RandomizedLogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.RandomizedLogisticRegression.html)~~ (Deprecated) use [RandomizedLogisticRegression](https://thuijskens.github.io/stability-selection/docs/randomized_lasso.htmlstability_selection.randomized_lasso.RandomizedLogisticRegression) * [ExtraTreesClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.htmlsklearn.ensemble.ExtraTreesClassifier) * For continuous targets * ~~[RandomizedLasso](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.RandomizedLasso.html)~~ (Deprecated) use [RandomizedLasso](https://thuijskens.github.io/stability-selection/docs/randomized_lasso.htmlstability_selection.randomized_lasso.RandomizedLogisticRegression) * [ExtraTreesRegressor](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesRegressor.htmlsklearn.ensemble.ExtraTreesRegressor) Welcome to open-source, folks! [Here](https://github.com/scikit-learn/scikit-learn/issues/8995) is the original discussion to deprecate `RandomizedLogisticRegression` and `RandomizedLasso`. [Here](https://github.com/scikit-learn/scikit-learn/issues/9657) is a failed attempt to resurrect it. It looks like it'll be gone for good soon. So we shouldn't get dependent on it. The alternatives from the deprecated scikit objects come from an official scikit-learn-contrib module called [stability_selection](https://github.com/scikit-learn-contrib/stability-selection). They also have a `StabilitySelection` object that acts similarly scikit's `SelectFromModel`.* recursive feature elimination (RFE) is best for goal 1 * [sci-kit's RFE and RFECV (RFE with built-in cross-validation)](https://scikit-learn.org/stable/modules/feature_selection.htmlrecursive-feature-elimination)
###Code
!pip install git+https://github.com/scikit-learn-contrib/stability-selection.git
###Output
Collecting git+https://github.com/scikit-learn-contrib/stability-selection.git
Cloning https://github.com/scikit-learn-contrib/stability-selection.git to /tmp/pip-req-build-r3yzun5t
Requirement already satisfied: nose>=1.1.2 in /usr/local/lib/python3.6/dist-packages (from stability-selection==0.0.1) (1.3.7)
Requirement already satisfied: scikit-learn>=0.19 in /usr/local/lib/python3.6/dist-packages (from stability-selection==0.0.1) (0.20.2)
Requirement already satisfied: matplotlib>=2.0.0 in /usr/local/lib/python3.6/dist-packages (from stability-selection==0.0.1) (3.0.2)
Requirement already satisfied: numpy>=1.8.0 in /usr/local/lib/python3.6/dist-packages (from stability-selection==0.0.1) (1.14.6)
Requirement already satisfied: scipy>=0.13.3 in /usr/local/lib/python3.6/dist-packages (from scikit-learn>=0.19->stability-selection==0.0.1) (1.1.0)
Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib>=2.0.0->stability-selection==0.0.1) (0.10.0)
Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib>=2.0.0->stability-selection==0.0.1) (2.5.3)
Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib>=2.0.0->stability-selection==0.0.1) (2.3.1)
Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib>=2.0.0->stability-selection==0.0.1) (1.0.1)
Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from cycler>=0.10->matplotlib>=2.0.0->stability-selection==0.0.1) (1.11.0)
Requirement already satisfied: setuptools in /usr/local/lib/python3.6/dist-packages (from kiwisolver>=1.0.1->matplotlib>=2.0.0->stability-selection==0.0.1) (40.7.0)
Building wheels for collected packages: stability-selection
Building wheel for stability-selection (setup.py) ... [?25ldone
[?25h Stored in directory: /tmp/pip-ephem-wheel-cache-h8uk030e/wheels/58/be/39/79880712b91ffa56e341ff10586a1956527813437ddd759473
Successfully built stability-selection
Installing collected packages: stability-selection
Successfully installed stability-selection-0.0.1
###Markdown
Okay, I tried this package... it seems to have some problems... hopefully a good implementation of stability selection for Lasso and Logistic Regression will be created soon! In the meantime, scikit's RandomLasso and RandomLogisticRegression have not been removed, so you can fiddle some! Just alter the commented out code!* import from scikit instead of stability-selection* use scikit's `SelectFromModel` as shown above!Ta Da! Y
###Code
'''from stability_selection import (RandomizedLogisticRegression,
RandomizedLasso, StabilitySelection,
plot_stability_path)
# Stability selection using randomized lasso method
rl = RandomizedLasso(max_iter=2000)
rl_selector = StabilitySelection(base_estimator=rl, lambda_name='alpha',
n_jobs=2)
rl_selector.fit(X, y);
'''
from sklearn.ensemble import ExtraTreesRegressor as ETR
# Stability selection using randomized decision trees
etr = ETR(n_estimators=50).fit(X, y)
# Creating scores from feature_importances_ ranking (some randomness here)
etr_scores = pd.Series(data=etr.feature_importances_,
name='ETR', index=names)
etr_scores
from sklearn.linear_model import LinearRegression
# Recursive feature elimination with cross validaiton using linear regression
# as the model
lr = LinearRegression()
# rank all features, i.e continue the elimination until the last one
rfe = fe.RFECV(lr)
rfe.fit(X, y)
rfe_score = pd.Series(data=(-1*rfe.ranking_), name='RFE', index=names)
rfe_score
###Output
/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_split.py:2053: FutureWarning: You should specify a value for 'cv' instead of relying on the default value. The default value will change from 3 to 5 in version 0.22.
warnings.warn(CV_WARNING, FutureWarning)
###Markdown
Y_bal
###Code
# stability selection using randomized logistic regression
'''rlr_b = RandomizedLogisticRegression()
rlr_b_selector = StabilitySelection(base_estimator=rlr_b, lambda_name='C',
n_jobs=2)
rlr_b_selector.fit(X, y_bal);'''
from sklearn.ensemble import ExtraTreesClassifier as ETC
# Stability selection using randomized decision trees
etc_b = ETC(n_estimators=50).fit(X, y_bal)
# Creating scores from feature_importances_ ranking (some randomness here)
etc_b_scores = pd.Series(data=etc_b.feature_importances_,
name='ETC_bal', index=names)
etc_b_scores
from sklearn.linear_model import LogisticRegression
# Recursive feature elimination with cross validaiton using logistic regression
# as the model
logr_b = LogisticRegression(solver='lbfgs')
# rank all features, i.e continue the elimination until the last one
rfe_b = fe.RFECV(logr_b)
rfe_b.fit(X, y_bal)
rfe_b_score = pd.Series(data=(-1*rfe_b.ranking_), name='RFE_bal', index=names)
rfe_b_score
###Output
/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_split.py:2053: FutureWarning: You should specify a value for 'cv' instead of relying on the default value. The default value will change from 3 to 5 in version 0.22.
warnings.warn(CV_WARNING, FutureWarning)
###Markdown
Y_un
###Code
# stability selection uisng randomized logistic regression
'''rlr_u = RandomizedLogisticRegression(max_iter=2000)
rlr_u_selector = StabilitySelection(base_estimator=rlr_u, lambda_name='C')
rlr_u_selector.fit(X, y_un);'''
# Stability selection using randomized decision trees
etc_u = ETC(n_estimators=50).fit(X, y_un)
# Creating scores from feature_importances_ ranking (some randomness here)
etc_u_scores = pd.Series(data=etc_u.feature_importances_,
name='ETC_unbal', index=names)
etc_u_scores
# Recursive feature elimination with cross validaiton using logistic regression
# as the model
logr_u = LogisticRegression(solver='lbfgs')
# rank all features, i.e continue the elimination until the last one
rfe_u = fe.RFECV(logr_u)
rfe_u.fit(X, y_un)
rfe_u_score = pd.Series(data=(-1*rfe_u.ranking_), name='RFE_unbal', index=names)
rfe_u_score
'''RL_comparisons = pd.DataFrame(data=np.array([rl_selector.get_support(),
rlr_b_selector.get_support(),
rlr_u_selector.get_support()]).T,
columns=['RandomLasso', 'RandomLog_bal',
'RandomLog_unbal'],
index=names)
RL_comparisons'''
comparisons = pd.concat([MIR_scores, MIC_b_scores, MIC_u_scores, rfr_scores,
rfc_b_scores, rfc_u_scores, etr_scores, etc_b_scores,
etc_u_scores, rfe_score, rfe_b_score, rfe_u_score],
axis=1)
comparisons
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
scaled_df = scaler.fit_transform(comparisons)
scaled_comparisons = pd.DataFrame(scaled_df, columns=comparisons.columns,
index=names)
scaled_comparisons
###Output
/usr/local/lib/python3.6/dist-packages/sklearn/preprocessing/data.py:323: DataConversionWarning: Data with input dtype int64, float64 were all converted to float64 by MinMaxScaler.
return self.partial_fit(X, y)
###Markdown
What do you notice from the diagram below?
###Code
sns.heatmap(scaled_comparisons);
###Output
_____no_output_____ |
word2vec_example.ipynb | ###Markdown
word2vec
###Code
import pandas as pd
data = pd.read_csv('../web_crawler_hyeom/KoreaNewsCrawler/OUTPUT/output/other_categories/Article_사회_202001_202003.csv', header=None, error_bad_lines=False)
data.columns = ['date', 'category', 'source', 'title', 'content', 'url']
print(data.shape)
sample = data.sample(n=10000)
sample.head()
!pip install konlpy
from konlpy.tag import Okt
from tqdm import tqdm
okt = Okt()
sample['content'][262504]
okt.pos(sample['content'][262504])
all_tokens = [
okt.pos(text) for text in tqdm(sample['content'], desc='tokenize...')
]
['a','a']
new_all_tokens = []
for tokens in tqdm(all_tokens):
tmp = []
for t in tokens:
tmp.append('/'.join(t))
new_all_tokens.append(tmp)
###Output
0%| | 0/10000 [00:00<?, ?it/s][A
6%|▋ | 645/10000 [00:00<00:01, 6447.90it/s][A
13%|█▎ | 1292/10000 [00:00<00:01, 6090.95it/s][A
20%|█▉ | 1985/10000 [00:00<00:01, 6102.03it/s][A
27%|██▋ | 2678/10000 [00:00<00:01, 6166.81it/s][A
34%|███▎ | 3371/10000 [00:01<00:01, 4984.32it/s][A
38%|███▊ | 3800/10000 [00:01<00:02, 2388.25it/s][A
41%|████▏ | 4133/10000 [00:02<00:04, 1392.32it/s][A
44%|████▍ | 4387/10000 [00:02<00:06, 820.93it/s] [A
48%|████▊ | 4782/10000 [00:02<00:04, 1043.79it/s][A
50%|█████ | 5001/10000 [00:02<00:05, 934.25it/s] [A
59%|█████▉ | 5889/10000 [00:02<00:03, 1277.05it/s][A
66%|██████▋ | 6630/10000 [00:02<00:01, 1698.82it/s][A
74%|███████▍ | 7388/10000 [00:02<00:01, 2214.15it/s][A
80%|████████ | 8045/10000 [00:02<00:00, 2763.70it/s][A
87%|████████▋ | 8669/10000 [00:03<00:00, 3318.12it/s][A
100%|██████████| 10000/10000 [00:03<00:00, 3077.20it/s][A
###Markdown
```pythonimport torchtorch.save(new_all_tokens, 'save_dir/all_tokens.torch')```
###Code
%%time
from gensim.models import Word2Vec
model = Word2Vec(
sentences = new_all_tokens,
size = 300,
workers = 10
)
###Output
CPU times: user 1min 21s, sys: 554 ms, total: 1min 22s
Wall time: 20.3 s
###Markdown
```pythonmodel.save('save_dir/w2v.model')```
###Code
## 학습이 끝나면, 필요없는 메모리 unload
model.init_sims(replace=True)
'코로나/Noun' in list(model.wv.vocab.keys())
for i in sample['content'][:5]:
print(i, end='\n\n')
model.wv.most_similar('코로나/Noun')
model.wv.most_similar('마스크/Noun')
model.wv.get_vector('코로나/Noun')
class First(object):
def __init__(self):
super(First, self).__init__()
print("first")
class Second(object):
def __init__(self):
super(Second, self).__init__()
print("second")
class Third(First, Second):
def __init__(self):
super(Third, self).__init__()
print("third")
###Output
_____no_output_____
###Markdown
다시시작
###Code
import torch
new_all_tokens = torch.load('save_dir/all_tokens.torch')
from gensim.models import Word2Vec
model = Word2Vec.load('save_dir/w2v.model')
len(vocabs)
vocabs = list(model.wv.vocab.keys())
word_vectors = [model.wv[v] for v in vocabs]
vocabs = [v for v in vocabs if v.split('/')[-1] == 'Noun']
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
xys = pca.fit_transform(word_vectors)
xs = xys[:,0]
ys = xys[:,1]
import matplotlib.pyplot as plt
plt.rc('font', family='NanumGothic')
def plot_2d_graph(vocabs, xs, ys):
plt.figure(figsize=(8,6))
plt.scatter(xs, ys, marker='o')
for i,v in enumerate(vocabs):
plt.annotate(v, xy=(xs[i], ys[i]))
from numpy.random import choice
idxs = choice(range(len(vocabs)), size=50, replace=False)
plot_2d_graph(
[v for i,v in enumerate(vocabs) if i in idxs],
[x for i,x in enumerate(xs) if i in idxs],
[y for i,y in enumerate(ys) if i in idxs]
)
###Output
/opt/conda/envs/finbert/lib/python3.7/site-packages/matplotlib/backends/backend_agg.py:214: RuntimeWarning: Glyph 8722 missing from current font.
font.set_text(s, 0.0, flags=flags)
/opt/conda/envs/finbert/lib/python3.7/site-packages/matplotlib/backends/backend_agg.py:183: RuntimeWarning: Glyph 8722 missing from current font.
font.set_text(s, 0, flags=flags)
|
02.bike_count/04.bike-count-RNN.ipynb | ###Markdown
Bike count forecasting using RNN
###Code
import pandas as pd
import numpy as np
import os
import time
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
# preprocessing methods
from sklearn.preprocessing import StandardScaler
# accuracy measures and data spliting
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
# deep learning libraries
from keras.models import Input, Model
from keras.models import Sequential
from keras.layers import LSTM, Dense, GRU, SimpleRNN
from keras.layers import Conv1D, MaxPooling1D
from keras import layers
from keras import losses
from keras import optimizers
from keras import metrics
from keras import callbacks
from keras import initializers
plt.style.use('fivethirtyeight')
plt.rcParams['figure.figsize'] = 15, 7
###Output
_____no_output_____
###Markdown
1. Data import and basic analysis
###Code
DATADIR = '../data/bike/'
MODELDIR = '../checkpoints/bike-sharing/rnn/'
data_path = os.path.join(DATADIR, 'bike-sharing-processed.csv')
data = pd.read_csv(data_path)
data.set_index('date', inplace=True)
data.sort_index(inplace=True)
data.head()
plt.plot(data.cnt, '.')
plt.title('Bike sharing count')
plt.xlabel('sample id')
plt.ylabel('count')
plt.show()
###Output
_____no_output_____
###Markdown
2. Data preparation
###Code
y = data[['cnt']].copy()
X = data.drop(columns=['cnt'], axis=1)
print(f'X and y shape:')
print(X.shape, y.shape)
# date selection
datelist = data.index.unique()
# two month data for testset
print(f'Test start date: {datelist[-61]}')
# Train test split : last 60 days for test set
X_train = X[X.index < datelist[-61]]
X_test = X[X.index >= datelist[-61]]
y_train = y[y.index < datelist[-61]]
y_test = y[y.index >= datelist[-61]]
print(f'Size of train and test set respectively:')
print(X_train.shape,X_test.shape, y_train.shape, y_test.shape)
timesteps = 1
features = X_train.shape[1]
xavier = initializers.glorot_normal()
X_train = np.reshape(X_train.values, (X_train.shape[0], timesteps, features))
X_test = np.reshape(X_test.values, (X_test.shape[0], timesteps, features))
X_train.shape, X_test.shape, y_train.shape, y_test.shape
###Output
_____no_output_____
###Markdown
3. Model building
###Code
def model_evaluation(y_train, y_test, y_train_pred, y_test_pred):
# MAE and NRMSE calculation
train_rmse = np.sqrt(mean_squared_error(y_train, y_train_pred))
train_mae = mean_absolute_error(y_train, y_train_pred)
train_nrmse = train_rmse/np.std(y_train.values)
test_rmse = np.sqrt(mean_squared_error(y_test, y_test_pred))
test_mae = mean_absolute_error(y_test, y_test_pred)
test_nrmse = test_rmse/np.std(y_test.values)
print(f'Training MAE: {np.round(train_mae, 3)}')
print(f'Trainig NRMSE: {np.round(train_nrmse, 3)}')
print()
print(f'Test MAE: {np.round(test_mae)}')
print(f'Test NRMSE: {np.round(test_nrmse)}')
return
def model_training(X_train, X_test, y_train, model, batch=8, name='m'):
start = time.time()
loss = losses.mean_squared_error
opt = optimizers.Adam()
metric = [metrics.mean_absolute_error]
model.compile(loss=loss, optimizer=opt, metrics=metric)
callbacks_list = [callbacks.ReduceLROnPlateau(monitor='loss', factor=0.2,
patience=5, min_lr=0.001)]
history = model.fit(X_train, y_train,
epochs=100,
batch_size=batch,
verbose=0,
shuffle=False,
callbacks=callbacks_list
)
# save model weights and
if os.path.exists(MODELDIR):
pass
else:
os.makedirs(MODELDIR)
m_name = name + str('.h5')
w_name = name + str('_w.h5')
model.save(os.path.join(MODELDIR, m_name))
model.save_weights(os.path.join(MODELDIR, w_name))
# prediction
y_train_pred = model.predict(X_train)
y_test_pred = model.predict(X_test)
end = time.time()
time_taken = np.round((end-start), 3)
print(f'Time taken to complete the process: {time_taken} seconds')
return y_train_pred, y_test_pred, history
###Output
_____no_output_____
###Markdown
RNN - v1
###Code
model = Sequential()
model.add(SimpleRNN(3, input_shape = (timesteps, features), kernel_initializer=xavier,
activation='relu'))
model.add(Dense(1, kernel_initializer=xavier))
model.summary()
y_train_pred, y_test_pred, history = model_training(X_train, X_test, y_train, model, batch=8, name='rnn-v1')
model_evaluation(y_train, y_test, y_train_pred, y_test_pred)
plt.plot(y_test.values, label='actual')
plt.plot(y_test_pred, label='predicted')
plt.ylabel('count')
plt.xlabel('sample id')
plt.title('Actual vs Predicted on test data using RNN')
plt.legend()
plt.tight_layout()
plt.show()
###Output
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
simple_rnn_1 (SimpleRNN) (None, 3) 48
_________________________________________________________________
dense_1 (Dense) (None, 1) 4
=================================================================
Total params: 52
Trainable params: 52
Non-trainable params: 0
_________________________________________________________________
Time taken to complete the process: 9.143 seconds
Training MAE: 3132.46
Trainig NRMSE: 1.835
Test MAE: 2698.0
Test NRMSE: 2.0
###Markdown
RNN - v2
###Code
model = Sequential()
model.add(SimpleRNN(3, input_shape = (timesteps, features), kernel_initializer=xavier,
activation='relu', return_sequences=True))
model.add(SimpleRNN(3, kernel_initializer=xavier, activation='relu', return_sequences=True))
model.add(SimpleRNN(3, kernel_initializer=xavier, activation='relu'))
model.add(Dense(1, kernel_initializer=xavier))
model.summary()
y_train_pred, y_test_pred, history = model_training(X_train, X_test, y_train, model, batch=8, name='rnn-v2')
model_evaluation(y_train, y_test, y_train_pred, y_test_pred)
plt.plot(y_test.values, label='actual')
plt.plot(y_test_pred, label='predicted')
plt.ylabel('count')
plt.xlabel('sample id')
plt.title('Actual vs Predicted on test data using RNN')
plt.legend()
plt.tight_layout()
plt.show()
###Output
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
simple_rnn_6 (SimpleRNN) (None, 1, 3) 48
_________________________________________________________________
simple_rnn_7 (SimpleRNN) (None, 1, 3) 21
_________________________________________________________________
simple_rnn_8 (SimpleRNN) (None, 3) 21
_________________________________________________________________
dense_2 (Dense) (None, 1) 4
=================================================================
Total params: 94
Trainable params: 94
Non-trainable params: 0
_________________________________________________________________
Time taken to complete the process: 15.411 seconds
Training MAE: 4494.103
Trainig NRMSE: 2.496
Test MAE: 4523.0
Test NRMSE: 3.0
###Markdown
RNN - v3
###Code
model = Sequential()
model.add(SimpleRNN(8, input_shape = (timesteps, features), kernel_initializer=xavier,
activation='relu', return_sequences=True))
model.add(SimpleRNN(16, kernel_initializer=xavier, activation='relu', return_sequences=True))
model.add(SimpleRNN(8, kernel_initializer=xavier, activation='relu'))
model.add(Dense(1, kernel_initializer=xavier))
model.summary()
y_train_pred, y_test_pred, history = model_training(X_train, X_test, y_train, model, batch=8, name='rnn-v3')
model_evaluation(y_train, y_test, y_train_pred, y_test_pred)
plt.plot(y_test.values, label='actual')
plt.plot(y_test_pred, label='predicted')
plt.ylabel('count')
plt.xlabel('sample id')
plt.title('Actual vs Predicted on test data using RNN')
plt.legend()
plt.tight_layout()
plt.show()
###Output
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
simple_rnn_9 (SimpleRNN) (None, 1, 8) 168
_________________________________________________________________
simple_rnn_10 (SimpleRNN) (None, 1, 16) 400
_________________________________________________________________
simple_rnn_11 (SimpleRNN) (None, 8) 200
_________________________________________________________________
dense_3 (Dense) (None, 1) 9
=================================================================
Total params: 777
Trainable params: 777
Non-trainable params: 0
_________________________________________________________________
Time taken to complete the process: 15.659 seconds
Training MAE: 844.919
Trainig NRMSE: 0.549
Test MAE: 1711.0
Test NRMSE: 1.0
###Markdown
RNN - v4
###Code
model = Sequential()
model.add(SimpleRNN(8, input_shape = (timesteps, features), kernel_initializer=xavier,
activation='relu', return_sequences=True))
model.add(SimpleRNN(16, kernel_initializer=xavier, activation='relu', return_sequences=True))
model.add(SimpleRNN(8, kernel_initializer=xavier, activation='relu'))
model.add(Dense(1, kernel_initializer=xavier))
model.summary()
y_train_pred, y_test_pred, history = model_training(X_train, X_test, y_train, model, batch=16, name='rnn-v4')
model_evaluation(y_train, y_test, y_train_pred, y_test_pred)
plt.plot(y_test.values, label='actual')
plt.plot(y_test_pred, label='predicted')
plt.ylabel('count')
plt.xlabel('sample id')
plt.title('Actual vs Predicted on test data using RNN')
plt.legend()
plt.tight_layout()
plt.show()
###Output
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
simple_rnn_12 (SimpleRNN) (None, 1, 8) 168
_________________________________________________________________
simple_rnn_13 (SimpleRNN) (None, 1, 16) 400
_________________________________________________________________
simple_rnn_14 (SimpleRNN) (None, 8) 200
_________________________________________________________________
dense_4 (Dense) (None, 1) 9
=================================================================
Total params: 777
Trainable params: 777
Non-trainable params: 0
_________________________________________________________________
Time taken to complete the process: 8.583 seconds
Training MAE: 1255.245
Trainig NRMSE: 0.793
Test MAE: 1798.0
Test NRMSE: 1.0
###Markdown
RNN - v5
###Code
model = Sequential()
model.add(SimpleRNN(8, input_shape = (timesteps, features), kernel_initializer=xavier,
activation='relu', return_sequences=True))
model.add(SimpleRNN(16, kernel_initializer=xavier, activation='relu', return_sequences=True))
model.add(SimpleRNN(8, kernel_initializer=xavier, activation='relu'))
model.add(Dense(1, kernel_initializer=xavier))
model.summary()
y_train_pred, y_test_pred, history = model_training(X_train, X_test, y_train, model, batch=4, name='rnn-v5')
model_evaluation(y_train, y_test, y_train_pred, y_test_pred)
plt.plot(y_test.values, label='actual')
plt.plot(y_test_pred, label='predicted')
plt.ylabel('count')
plt.xlabel('sample id')
plt.title('Actual vs Predicted on test data using RNN')
plt.legend()
plt.tight_layout()
plt.show()
###Output
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
simple_rnn_15 (SimpleRNN) (None, 1, 8) 168
_________________________________________________________________
simple_rnn_16 (SimpleRNN) (None, 1, 16) 400
_________________________________________________________________
simple_rnn_17 (SimpleRNN) (None, 8) 200
_________________________________________________________________
dense_5 (Dense) (None, 1) 9
=================================================================
Total params: 777
Trainable params: 777
Non-trainable params: 0
_________________________________________________________________
Time taken to complete the process: 30.338 seconds
Training MAE: 704.593
Trainig NRMSE: 0.471
Test MAE: 1275.0
Test NRMSE: 1.0
###Markdown
RNN - v6 (final model)
###Code
model = Sequential()
model.add(SimpleRNN(8, input_shape = (timesteps, features), kernel_initializer=xavier,
activation='relu', return_sequences=True))
model.add(SimpleRNN(16, kernel_initializer=xavier, activation='relu', return_sequences=True))
model.add(SimpleRNN(16, kernel_initializer=xavier, activation='relu', return_sequences=True))
model.add(SimpleRNN(8, kernel_initializer=xavier, activation='relu'))
model.add(Dense(1, kernel_initializer=xavier))
model.summary()
y_train_pred, y_test_pred, history = model_training(X_train, X_test, y_train, model, batch=2, name='rnn-v6')
model_evaluation(y_train, y_test, y_train_pred, y_test_pred)
plt.plot(y_test.values, label='actual')
plt.plot(y_test_pred, label='predicted')
plt.ylabel('count')
plt.xlabel('sample id')
plt.title('Actual vs Predicted on test data using RNN')
plt.legend()
plt.tight_layout()
plt.show()
###Output
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
simple_rnn_22 (SimpleRNN) (None, 1, 8) 168
_________________________________________________________________
simple_rnn_23 (SimpleRNN) (None, 1, 16) 400
_________________________________________________________________
simple_rnn_24 (SimpleRNN) (None, 1, 16) 528
_________________________________________________________________
simple_rnn_25 (SimpleRNN) (None, 8) 200
_________________________________________________________________
dense_7 (Dense) (None, 1) 9
=================================================================
Total params: 1,305
Trainable params: 1,305
Non-trainable params: 0
_________________________________________________________________
Time taken to complete the process: 72.353 seconds
Training MAE: 651.45
Trainig NRMSE: 0.454
Test MAE: 1253.0
Test NRMSE: 1.0
###Markdown
RNN-v7
###Code
model = Sequential()
model.add(SimpleRNN(8, input_shape = (timesteps, features), kernel_initializer=xavier,
activation='relu', return_sequences=True))
model.add(SimpleRNN(16, kernel_initializer=xavier, activation='relu', return_sequences=True))
model.add(SimpleRNN(16, kernel_initializer=xavier, activation='relu', return_sequences=True))
model.add(SimpleRNN(8, kernel_initializer=xavier, activation='relu', return_sequences=True))
model.add(SimpleRNN(8, kernel_initializer=xavier, activation='relu'))
model.add(Dense(1, kernel_initializer=xavier))
model.summary()
y_train_pred, y_test_pred, history = model_training(X_train, X_test, y_train, model, batch=2, name='rnn-v7')
model_evaluation(y_train, y_test, y_train_pred, y_test_pred)
plt.plot(y_test.values, label='actual')
plt.plot(y_test_pred, label='predicted')
plt.ylabel('count')
plt.xlabel('sample id')
plt.title('Actual vs Predicted on test data using RNN')
plt.legend()
plt.tight_layout()
plt.show()
###Output
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
simple_rnn_26 (SimpleRNN) (None, 1, 8) 168
_________________________________________________________________
simple_rnn_27 (SimpleRNN) (None, 1, 16) 400
_________________________________________________________________
simple_rnn_28 (SimpleRNN) (None, 1, 16) 528
_________________________________________________________________
simple_rnn_29 (SimpleRNN) (None, 1, 8) 200
_________________________________________________________________
simple_rnn_30 (SimpleRNN) (None, 8) 136
_________________________________________________________________
dense_8 (Dense) (None, 1) 9
=================================================================
Total params: 1,441
Trainable params: 1,441
Non-trainable params: 0
_________________________________________________________________
Time taken to complete the process: 88.76 seconds
Training MAE: 763.082
Trainig NRMSE: 0.529
Test MAE: 1373.0
Test NRMSE: 1.0
|
Data Science/05. Plotting in detail/03. Grid, Axes, and Labels.ipynb | ###Markdown
Grid Axes Labels
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
x = np.arange(3)
plt.plot(x, x**2, x, x**3, x, 2*x, 2**x)
plt.grid(True)
plt.show()
x = np.arange(3)
plt.plot(x, x**2, x, x**3, x, 2*x, 2**x)
plt.grid(True)
print(plt.axis())
plt.show()
x = np.arange(3)
plt.plot(x, x**2, x, x**3, x, 2*x, 2**x)
plt.grid(True)
plt.axis([0, 2, 0, 8])
plt.show()
x = np.arange(3)
plt.plot(x, x**2, x, x**3, x, 2*x, 2**x)
plt.grid(True)
plt.xlim([0, 2])
plt.ylim([0, 8])
plt.show()
x = np.arange(3)
plt.plot(x, x**2, x, x**3, x, 2*x, 2**x)
plt.grid(True)
plt.xlabel('x= np.aranage(3)')
plt.ylabel('y= f(x)')
plt.xlim([0, 2])
plt.ylim([0, 8])
plt.show()
x = np.arange(3)
plt.plot(x, x**2, x, x**3, x, 2*x, 2**x)
plt.grid(True)
plt.xlabel('x= np.aranage(3)')
plt.ylabel('y= f(x)')
plt.title('Plot Title')
plt.xlim([0, 2])
plt.ylim([0, 8])
plt.show()
x = np.arange(3)
plt.plot(x, x**2, label='x**2')
plt.plot(x, x**3, label='x**3')
plt.plot(x, 2*x, label='2*x')
plt.plot(x, 2**x, label='2**x')
plt.legend()
plt.grid(True)
plt.xlabel('x= np.aranage(3)')
plt.ylabel('y= f(x)')
plt.title('Plot Title')
plt.xlim([0, 2])
plt.ylim([0, 8])
plt.show()
x = np.arange(3)
plt.plot(x, x**2, x, x**3, x, 2*x, 2**x)
plt.legend(['x**2', 'x**3', '2*x', '2**x'])
plt.grid(True)
plt.xlabel('x= np.aranage(3)')
plt.ylabel('y= f(x)')
plt.title('Plot Title')
plt.xlim([0, 2])
plt.ylim([0, 8])
plt.show()
x = np.arange(3)
plt.plot(x, x**2, x, x**3, x, 2*x, 2**x)
plt.legend(['x**2', 'x**3', '2*x', '2**x'], loc='upper center')
plt.grid(True)
plt.xlabel('x= np.aranage(3)')
plt.ylabel('y= f(x)')
plt.title('Plot Title')
plt.xlim([0, 2])
plt.ylim([0, 8])
plt.show()
x = np.arange(3)
plt.plot(x, x**2, x, x**3, x, 2*x, 2**x)
plt.legend(['x**2', 'x**3', '2*x', '2**x'], loc='upper center')
plt.grid(True)
plt.xlabel('x= np.aranage(3)')
plt.ylabel('y= f(x)')
plt.title('Plot Title')
plt.xlim([0, 2])
plt.ylim([0, 8])
plt.savefig('test.png') #save as png
plt.show()
###Output
_____no_output_____ |
demos/Record Disambiguation - People.ipynb | ###Markdown
Record DisambiguationIn this notebook we perform entity disambiguation on records, specifically person records.
###Code
%load_ext autoreload
%autoreload 2
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from IPython.display import display, Markdown
from sklearn.model_selection import train_test_split
from tqdm.auto import tqdm
import sys
sys.path.append("..")
from heritageconnector.disambiguation.helpers import load_training_data, plot_performance_curves
from heritageconnector.disambiguation.pipelines import Disambiguator
from heritageconnector.disambiguation.postprocessing import filter_max_wikidata_links, enforce_correct_type
from heritageconnector.utils.wikidata import get_sparql_results, url_to_qid
from heritageconnector.utils.generic import paginate_list
from heritageconnector.config import config
pd.set_option('display.max_colwidth', None)
pd.set_option('display.max_rows', None)
###Output
The autoreload extension is already loaded. To reload it, use:
%reload_ext autoreload
###Markdown
1. Load dataThis data has already been generated using `Disambiguator.save_training_data_to_folder` and `Disambiguator.save_test_data_to_folder`.
###Code
train_dir = "/Volumes/Kalyan_SSD/SMG/disambiguation/people_281020/train/"
test_dir = "/Volumes/Kalyan_SSD/SMG/disambiguation/people_281020/test/"
X, y, pairs, pids = load_training_data(train_dir)
X_new, pairs_new, pids_new = load_training_data(test_dir)
pids, pids_new
X.sum(axis=0), X_new.sum(axis=0)
pairs.head()
###Output
_____no_output_____
###Markdown
2. Train classifierThe disambiguator wraps `sklearn.tree.DecisionTreeClassifier` and takes its parameters as inputs. 2a. Test classifier performanceWe'll perform a train/test split on the labelled data to quickly test the classifier's performance using its `score` method. The `score` method here returns [balanced accuracy](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.balanced_accuracy_score.html): accuracy weighted so that each class is considered evenly.
###Code
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42, test_size=0.1)
clf = Disambiguator('PERSON').fit(X_train, y_train)
for threshold in [0.5, 0.6, 0.7, 0.8, 0.9]:
print(str(threshold) + " --- \n" + clf.score(X_test, y_test, threshold))
###Output
0.5 ---
balanced accuracy score: 0.9794740146913499
precision score: 0.9054054054054054
recall score: 0.9640287769784173
0.6 ---
balanced accuracy score: 0.9794740146913499
precision score: 0.9054054054054054
recall score: 0.9640287769784173
0.7 ---
balanced accuracy score: 0.9794740146913499
precision score: 0.9054054054054054
recall score: 0.9640287769784173
0.8 ---
balanced accuracy score: 0.9794740146913499
precision score: 0.9054054054054054
recall score: 0.9640287769784173
0.9 ---
balanced accuracy score: 0.9796554699626254
precision score: 0.9115646258503401
recall score: 0.9640287769784173
###Markdown
2b. Use classifier to predict new Wikidata links
###Code
clf = Disambiguator('PERSON').fit(X, y)
y_pred = clf.predict(X_new, threshold=0.9)
y_pred_proba = clf.predict_proba(X_new)
print(f"{np.unique(y_pred, return_counts=True)[1][1]} potential new links found")
pairs_new = clf.get_predictions_table(X_new, pairs_new, threshold=0.9)
display(Markdown("The graph below shows the distribution of the number of predicted matches per SMG ID. Around 75% have a unique match, and most of the remainder have two matches."))
sns.distplot(pairs_new.loc[pairs_new["y_pred"] == True, "internal_id"].value_counts(), kde=False, norm_hist=True).set_ylabel('proportion')
plt.gca().set_title('Count of Number of SMG IDs per True Prediction');
###Output
_____no_output_____
###Markdown
2c. Returning top-ranked links onlyWe can filter some of the duplicate Wikidata candidates for each SMG item found above by _only returning the top-ranked positive matches_. `clf.predict_top_ranked_pairs` does this.
###Code
pairs_true = clf.get_top_ranked_pairs(pairs_new)
print(f"No. new links: {len(pairs_true)}")
print(f"No. SMG items with new links: {len(pairs_true['internal_id'].unique())}")
pairs_true.head(20)
###Output
No. new links: 2355
No. SMG items with new links: 2271
###Markdown
2d. Filter matchesBy type, number of links
###Code
max_links_per_record = 4
pairs_true_filtered = enforce_correct_type(pairs_true)
pairs_true_filtered = filter_max_wikidata_links(pairs_true_filtered, 4)
print("-- After Filtering --")
print(f"No. new links: {len(pairs_true_filtered)}")
print(f"No. SMG items with new links: {len(pairs_true_filtered['internal_id'].unique())}")
###Output
-- After Filtering --
No. new links: 2345
No. SMG items with new links: 2267
###Markdown
3. Explain classifierWe can see that the classifier prioritises P569/P570 (birth and death dates), P21 (gender), label similarity, and occupation.It's interesting to note that P31 (instance of), which tells the classifier whether the Wikidata record is a human, is not used. This is likely because P569/P570/P106/P21 are qualities which only humans can have.P31 is likely to be much more prevalent when classifying objects, and distinguishing between e.g. paintings and posters.
###Code
clf.print_tree(feature_names=pids)
###Output
|--- P569 <= 1.00
| |--- P106 <= 0.50
| | |--- P570 <= 1.00
| | | |--- label <= 0.99
| | | | |--- P735 <= 0.03
| | | | | |--- class: False
| | | | |--- P735 > 0.03
| | | | | |--- class: False
| | | |--- label > 0.99
| | | | |--- P21 <= 0.50
| | | | | |--- class: False
| | | | |--- P21 > 0.50
| | | | | |--- class: False
| | |--- P570 > 1.00
| | | |--- label <= 0.94
| | | | |--- class: False
| | | |--- label > 0.94
| | | | |--- P734 <= 0.97
| | | | | |--- class: False
| | | | |--- P734 > 0.97
| | | | | |--- class: True
| |--- P106 > 0.50
| | |--- label <= 0.95
| | | |--- label <= 0.87
| | | | |--- P570 <= 0.28
| | | | | |--- class: False
| | | | |--- P570 > 0.28
| | | | | |--- class: False
| | | |--- label > 0.87
| | | | |--- P569 <= 0.90
| | | | | |--- class: True
| | | | |--- P569 > 0.90
| | | | | |--- class: False
| | |--- label > 0.95
| | | |--- P569 <= 0.42
| | | | |--- P734 <= 0.92
| | | | | |--- class: True
| | | | |--- P734 > 0.92
| | | | | |--- class: True
| | | |--- P569 > 0.42
| | | | |--- P569 <= 0.99
| | | | | |--- class: False
| | | | |--- P569 > 0.99
| | | | | |--- class: True
|--- P569 > 1.00
| |--- label <= 0.86
| | |--- class: False
| |--- label > 0.86
| | |--- P569 <= 1.00
| | | |--- P570 <= 1.00
| | | | |--- P106 <= 0.50
| | | | | |--- class: False
| | | | |--- P106 > 0.50
| | | | | |--- class: True
| | | |--- P570 > 1.00
| | | | |--- P735 <= 0.60
| | | | | |--- class: True
| | | | |--- P735 > 0.60
| | | | | |--- class: True
| | |--- P569 > 1.00
| | | |--- P569 <= 1.00
| | | | |--- P570 <= 1.00
| | | | | |--- class: False
| | | | |--- P570 > 1.00
| | | | | |--- class: True
| | | |--- P569 > 1.00
| | | | |--- label <= 0.95
| | | | | |--- class: True
| | | | |--- label > 0.95
| | | | | |--- class: True
###Markdown
4. Export model and final predictions
###Code
clf.save_classifier_to_disk("/Volumes/Kalyan_SSD/SMG/disambiguation/people_281020/clf.pkl")
pairs_true_filtered.to_csv("/Volumes/Kalyan_SSD/SMG/disambiguation/people_281020/people_preds_positive.csv", index=False)
###Output
_____no_output_____
###Markdown
You can also use the below cell to export a sample of positive and negative samples to an Excel document for manual review
###Code
pairs_pos_sample = pairs_new[pairs_new['y_pred'] == True].sample(30, random_state=42)
pairs_neg_sample = pairs_new[pairs_new['y_pred'] == False].sample(30, random_state=42)
pairs_sample = pd.concat([pairs_pos_sample, pairs_neg_sample], ignore_index=False)
pairs_sample = pairs_sample.copy()
pairs_sample['wikidata_id'] = "https://www.wikidata.org/entity/" + pairs_sample['wikidata_id']
pairs_sample.to_excel("people_classifier_sample_for_review.xlsx")
###Output
_____no_output_____ |
problems/0041/solution.ipynb | ###Markdown
Problem 41 Pandigital primeWe shall say that an $n$-digit number is pandigital if it makes use of all the digits $1$ to $n$ exactly once. For example, $2143$ is a $4$-digit pandigital and is also prime.What is the largest $n$-digit pandigital prime that exists? Solution
###Code
from euler.primes import prime_numbers
def compute() -> int:
for prime in reversed(list(prime_numbers(7_654_321))):
str_prime = str(prime)
if set(str_prime) == set(map(str, range(1, len(str_prime) + 1))):
return prime
compute()
%timeit -n 100 -r 1 -p 6 compute()
###Output
969.678 ms ± 0 ns per loop (mean ± std. dev. of 1 run, 100 loops each)
|
docs/source/Introduction/libraries/Numpy tutorial.ipynb | ###Markdown
The NumPy Library Try me [](https://colab.research.google.com/github/ffraile/operations-research-notebooks/blob/main/docs/source/Introduction/libraries/Numpy%20tutorial.ipynb)[](https://mybinder.org/v2/gh/ffraile/operations-research-notebooks/main?labpath=docs%2Fsource%2FIntroduction%2Flibraries%2FNumpy%20tutorial.ipynb)The [Numpy](https://numpy.org/) (Numerical Python) is a package of numerical functions to effectively work with multidimensional data structures in Python. In Python, it is possible to work with anidated lists to work with multidimensional structures (arrays and matrix), but this is not efficient. The Numpy library defines the numpy array object to provide an efficient and convenient object to define multidimensional structures.To use Numpy in your Notebooks and programs, you first need to import the package (in this example we use the alias np):
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
The Numpy ArrayThe numpy array uses a similar structure to a Python list, although as mentioned above, it provides additional functionalities to easily create and manipulate multidimensional data structures. The data in an array are called elements and they are accessed using brackets, just as with Python lists. The dimensions of a numpy array are called **axes**. The elements within an axe are separated using commas and surrounded by brackets. Axes are also separated by brackets, so that a numpy array is represented as an anidated python list. The **rank** is the number of axis of an array. The **shape** is a list representing the number of elements in each axis. The elements of a numpy array can be of any numerical type.
###Code
b = np.array([[1,2,3,4],[5,6,7,8]]) #This creates a 2-dimensional (rank 2) 2x4 array
print("My first Numpy array:")
print(b)
print("element in position (1,2) is:")
print(b[1,2])
print("Number of dimensions:")
print(b.ndim) #number of dimensions or rank
print("Shape of array:")
print(b.shape) #shape (eg n rows, m columns)
print("Total number of elements:")
print(b.size) #number of elements
###Output
My first Numpy array:
[[1 2 3 4]
[5 6 7 8]]
element in position (1,2) is:
7
Number of dimensions:
2
Shape of array:
(2, 4)
Total number of elements:
8
###Markdown
Create Numpy ArraysNumpy includes several functions for creating numpy arrays initialized with convenient ranks, shapes, or elements with constant or random values.**Some examples:**
###Code
o = np.ones((3,2)) # array of 3x2 1s
print(o)
b=np.zeros((3,4)) # array of 3x4 zeroes
print(b)
c=np.random.random(3) #array of 3x1 random numbers
print(c)
d=np.full((2,2),12) # array of 2x2 12s
print(d)
id =np.eye(3,3) # identity array of size 3x3
print(id)
###Output
[[1. 1.]
[1. 1.]
[1. 1.]]
[[0. 0. 0. 0.]
[0. 0. 0. 0.]
[0. 0. 0. 0.]]
[0.71574091 0.54968971 0.72723399]
[[12 12]
[12 12]]
[[1. 0. 0.]
[0. 1. 0.]
[0. 0. 1.]]
###Markdown
Creating sequencesSome useful functions for creating lists are **arange** and **linspace**: - **arange(start, end, step)**: creates a numpy array with elements ranging from **start** to **end** incrementing by **step**. Only end is required, using only end will create an evenly spaced range from 0 to end. - **linspace(start,end,numvalues)**: creates a numpy array with **numvalues** elements with evenly distributed values ranging from **start** to **end**. The increment is calculated by the function so that the resulting number of elements matches the numvalues input parameter.
###Code
a = np.arange(0, 10, 2)
print(a)
b=np.linspace(0,10,6)
print(b)
###Output
[0 2 4 6 8]
[ 0. 2. 4. 6. 8. 10.]
###Markdown
Arithmetic operationsYou can apply element-wise **arithmetic** and **logical** calculations to numpy arrays using arithmetic or logical operators. The functions np.**exp()**, np.**sqrt()**, or np.**log()** are other examples of functions that operate in the elements of a numpy array. You can check the entire list of available functions in the official [Numpy documentation]( https://numpy.org/doc/).**Some examples:**
###Code
x =np.array([[1,2,3,4],[5,6,7,8]])
y =np.array([[9,10,11,12],[13,14,15,16]])
print(x+y)
print(y-x)
print(np.sqrt(y))
print(np.log(x))
print(x**2)
print(x+5)
###Output
[[10 12 14 16]
[18 20 22 24]]
[[8 8 8 8]
[8 8 8 8]]
[[3. 3.16227766 3.31662479 3.46410162]
[3.60555128 3.74165739 3.87298335 4. ]]
[[0. 0.69314718 1.09861229 1.38629436]
[1.60943791 1.79175947 1.94591015 2.07944154]]
[[ 1 4 9 16]
[25 36 49 64]]
[[ 6 7 8 9]
[10 11 12 13]]
###Markdown
Note that in the last examples, we are adding a scalar value to a numpy array. In general, we can apply arithmetic operations on array of different dimensions, given that the smallest dimension between the operands is one, or that the arrays have the same dimensions. When this condition is met, numpy will expand the smaller array to match the shape of the larger array with an operation called **broadcasting**. Array functionsNumpy also provides an extensive list of array functions: - **sum()**: Returns the sum of all elements. - **min()**: Returns the minimum value within the array - **max()**: Returns the maximum value within the array - **mean()**: Returns the mean of an array - **median()**: Returns the median value of the array - **cumsum()**: Returns the cumulative sum of the elements of the array. All of the functions above support the additional **axis** parameter to work on a specific dimension.
###Code
x =np.array([[1,2,3,4],[5,6,7,8]])
y =np.array([[9,10,11,12],[13,14,15,16]])
print("sum of all elements in x:")
print(np.sum(x))
print("mean value of y:")
print(np.mean(y))
###Output
sum of all elements in x:
36
mean value of y:
12.5
###Markdown
Other functions take two arrays as arguments and perform element wise operations:- minimum(): Returns an array with the minimum value in each position of the array- maximum(): Returns an array with the maximum value in each position of the array
###Code
b=np.linspace(0,1,10)
r = c=np.random.random(10)
print(np.minimum(b,r))
###Output
[0. 0.11111111 0.22222222 0.0069694 0.44444444 0.3403326
0.19167794 0.71257103 0.78045669 0.64287305]
|
jw300_bpe_en_xh_masakhane.ipynb | ###Markdown
Masakhane - Machine Translation for African Languages (Using JoeyNMT) Note before beginning: - The idea is that you should be able to make minimal changes to this in order to get SOME result for your own translation corpus. - The tl;dr: Go to the **"TODO"** comments which will tell you what to update to get up and running - If you actually want to have a clue what you're doing, read the text and peek at the links - With 100 epochs, it should take around 7 hours to run in Google Colab - Once you've gotten a result for your language, please attach and email your notebook that generated it to [email protected] - If you care enough and get a chance, doing a brief background on your language would be amazing. See examples in [(Martinus, 2019)](https://arxiv.org/abs/1906.05685) Retrieve your data & make a parallel corpusIf you are wanting to use the JW300 data referenced on the Masakhane website or in our GitHub repo, you can use `opus-tools` to convert the data into a convenient format. `opus_read` from that package provides a convenient tool for reading the native aligned XML files and to convert them to TMX format. The tool can also be used to fetch relevant files from OPUS on the fly and to filter the data as necessary. [Read the documentation](https://pypi.org/project/opustools-pkg/) for more details.Once you have your corpus files in TMX format (an xml structure which will include the sentences in your target language and your source language in a single file), we recommend reading them into a pandas dataframe. Thankfully, Jade wrote a silly `tmx2dataframe` package which converts your tmx file to a pandas dataframe.
###Code
from google.colab import drive
drive.mount('/content/drive')
# TODO: Set your source and target languages. Keep in mind, these traditionally use language codes as found here:
# These will also become the suffix's of all vocab and corpus files used throughout
import os
source_language = "en"
target_language = "xh"
lc = False # If True, lowercase the data.
seed = 42 # Random seed for shuffling.
tag = "baseline" # Give a unique name to your folder - this is to ensure you don't rewrite any models you've already submitted
os.environ["src"] = source_language # Sets them in bash as well, since we often use bash scripts
os.environ["tgt"] = target_language
os.environ["tag"] = tag
# This will save it to a folder in our gdrive instead!
!mkdir -p "/content/drive/My Drive/masakhane/$src-$tgt-$tag"
os.environ["gdrive_path"] = "/content/drive/My Drive/masakhane/%s-%s-%s" % (source_language, target_language, tag)
!echo $gdrive_path
# Install opus-tools
! pip install opustools-pkg
# Downloading our corpus
! opus_read -d JW300 -s $src -t $tgt -wm moses -w jw300.$src jw300.$tgt -q
# extract the corpus file
! gunzip JW300_latest_xml_$src-$tgt.xml.gz
# Download the global test set.
! wget https://raw.githubusercontent.com/juliakreutzer/masakhane/master/jw300_utils/test/test.en-any.en
# And the specific test set for this language pair.
os.environ["trg"] = target_language
os.environ["src"] = source_language
! wget https://raw.githubusercontent.com/juliakreutzer/masakhane/master/jw300_utils/test/test.en-$trg.en
! mv test.en-$trg.en test.en
! wget https://raw.githubusercontent.com/juliakreutzer/masakhane/master/jw300_utils/test/test.en-$trg.$trg
! mv test.en-$trg.$trg test.$trg
# Read the test data to filter from train and dev splits.
# Store english portion in set for quick filtering checks.
en_test_sents = set()
filter_test_sents = "test.en-any.en"
j = 0
with open(filter_test_sents) as f:
for line in f:
en_test_sents.add(line.strip())
j += 1
print('Loaded {} global test sentences to filter from the training/dev data.'.format(j))
import pandas as pd
# TMX file to dataframe
source_file = 'jw300.' + source_language
target_file = 'jw300.' + target_language
source = []
target = []
skip_lines = [] # Collect the line numbers of the source portion to skip the same lines for the target portion.
with open(source_file) as f:
for i, line in enumerate(f):
# Skip sentences that are contained in the test set.
if line.strip() not in en_test_sents:
source.append(line.strip())
else:
skip_lines.append(i)
with open(target_file) as f:
for j, line in enumerate(f):
# Only add to corpus if corresponding source was not skipped.
if j not in skip_lines:
target.append(line.strip())
print('Loaded data and skipped {}/{} lines since contained in test set.'.format(len(skip_lines), i))
df = pd.DataFrame(zip(source, target), columns=['source_sentence', 'target_sentence'])
# if you get TypeError: data argument can't be an iterator is because of your zip version run this below
#df = pd.DataFrame(list(zip(source, target)), columns=['source_sentence', 'target_sentence'])
df.head(3)
df[df.duplicated()]
###Output
_____no_output_____
###Markdown
Pre-processing and exportIt is generally a good idea to remove duplicate translations and conflicting translations from the corpus. In practice, these public corpora include some number of these that need to be cleaned.In addition we will split our data into dev/test/train and export to the filesystem.
###Code
print("Length of Data before Removing duplicate: ",len(df))
df = df.drop_duplicates()
print("Length of Data after Removing duplicate: ",len(df))
# This section does the split between train/dev for the parallel corpora then saves them as separate files
# We use 1000 dev test and the given test set.
import csv
# Do the split between dev/train and create parallel corpora
num_dev_patterns = 1000
# Optional: lower case the corpora - this will make it easier to generalize, but without proper casing.
if lc: # Julia: making lowercasing optional
df["source_sentence"] = df["source_sentence"].str.lower()
df["target_sentence"] = df["target_sentence"].str.lower()
# Julia: test sets are already generated
dev = df.tail(num_dev_patterns) # Herman: Error in original
stripped = df.drop(df.tail(num_dev_patterns).index)
with open("train."+source_language, "w") as src_file, open("train."+target_language, "w") as trg_file:
for index, row in stripped.iterrows():
src_file.write(row["source_sentence"]+"\n")
trg_file.write(row["target_sentence"]+"\n")
with open("dev."+source_language, "w") as src_file, open("dev."+target_language, "w") as trg_file:
for index, row in dev.iterrows():
src_file.write(row["source_sentence"]+"\n")
trg_file.write(row["target_sentence"]+"\n")
#stripped[["source_sentence"]].to_csv("train."+source_language, header=False, index=False) # Herman: Added `header=False` everywhere
#stripped[["target_sentence"]].to_csv("train."+target_language, header=False, index=False) # Julia: Problematic handling of quotation marks.
#dev[["source_sentence"]].to_csv("dev."+source_language, header=False, index=False)
#dev[["target_sentence"]].to_csv("dev."+target_language, header=False, index=False)
# Doublecheck the format below. There should be no extra quotation marks or weird characters.
! head train.*
! head dev.*
! head test.*
! cat train.en | wc -l
! cat train.xh | wc -l
! cat dev.en | wc -l
! cat dev.xh | wc -l
! cat test.en | wc -l
! cat test.xh | wc -l
###Output
816515
816515
1000
1000
2717
2717
###Markdown
--- Installation of JoeyNMTJoeyNMT is a simple, minimalist NMT package which is useful for learning and teaching. Check out the documentation for JoeyNMT [here](https://joeynmt.readthedocs.io)
###Code
# Install JoeyNMT
! git clone https://github.com/joeynmt/joeynmt.git
! cd joeynmt; pip3 install .
###Output
Cloning into 'joeynmt'...
remote: Enumerating objects: 15, done.[K
remote: Counting objects: 6% (1/15)[K
remote: Counting objects: 13% (2/15)[K
remote: Counting objects: 20% (3/15)[K
remote: Counting objects: 26% (4/15)[K
remote: Counting objects: 33% (5/15)[K
remote: Counting objects: 40% (6/15)[K
remote: Counting objects: 46% (7/15)[K
remote: Counting objects: 53% (8/15)[K
remote: Counting objects: 60% (9/15)[K
remote: Counting objects: 66% (10/15)[K
remote: Counting objects: 73% (11/15)[K
remote: Counting objects: 80% (12/15)[K
remote: Counting objects: 86% (13/15)[K
remote: Counting objects: 93% (14/15)[K
remote: Counting objects: 100% (15/15)[K
remote: Counting objects: 100% (15/15), done.[K
remote: Compressing objects: 100% (12/12), done.[K
remote: Total 2199 (delta 4), reused 5 (delta 3), pack-reused 2184[K
Receiving objects: 100% (2199/2199), 2.60 MiB | 2.80 MiB/s, done.
Resolving deltas: 100% (1525/1525), done.
Processing /content/joeynmt
Requirement already satisfied: future in /usr/local/lib/python3.6/dist-packages (from joeynmt==0.0.1) (0.16.0)
Requirement already satisfied: pillow in /usr/local/lib/python3.6/dist-packages (from joeynmt==0.0.1) (4.3.0)
Requirement already satisfied: numpy<2.0,>=1.14.5 in /usr/local/lib/python3.6/dist-packages (from joeynmt==0.0.1) (1.17.4)
Requirement already satisfied: setuptools>=41.0.0 in /usr/local/lib/python3.6/dist-packages (from joeynmt==0.0.1) (41.6.0)
Requirement already satisfied: torch>=1.1 in /usr/local/lib/python3.6/dist-packages (from joeynmt==0.0.1) (1.3.1)
Requirement already satisfied: tensorflow>=1.14 in /usr/local/lib/python3.6/dist-packages (from joeynmt==0.0.1) (1.15.0)
Requirement already satisfied: torchtext in /usr/local/lib/python3.6/dist-packages (from joeynmt==0.0.1) (0.3.1)
Collecting sacrebleu>=1.3.6
Downloading https://files.pythonhosted.org/packages/0e/e5/93d252182f7cbd4b59bb3ec5797e2ce33cfd6f5aadaf327db170cf4b7887/sacrebleu-1.4.2-py3-none-any.whl
Collecting subword-nmt
Downloading https://files.pythonhosted.org/packages/74/60/6600a7bc09e7ab38bc53a48a20d8cae49b837f93f5842a41fe513a694912/subword_nmt-0.3.7-py2.py3-none-any.whl
Requirement already satisfied: matplotlib in /usr/local/lib/python3.6/dist-packages (from joeynmt==0.0.1) (3.1.1)
Requirement already satisfied: seaborn in /usr/local/lib/python3.6/dist-packages (from joeynmt==0.0.1) (0.9.0)
Collecting pyyaml>=5.1
[?25l Downloading https://files.pythonhosted.org/packages/e3/e8/b3212641ee2718d556df0f23f78de8303f068fe29cdaa7a91018849582fe/PyYAML-5.1.2.tar.gz (265kB)
[K |████████████████████████████████| 266kB 7.9MB/s
[?25hCollecting pylint
[?25l Downloading https://files.pythonhosted.org/packages/e9/59/43fc36c5ee316bb9aeb7cf5329cdbdca89e5749c34d5602753827c0aa2dc/pylint-2.4.4-py3-none-any.whl (302kB)
[K |████████████████████████████████| 307kB 42.4MB/s
[?25hRequirement already satisfied: six==1.12 in /usr/local/lib/python3.6/dist-packages (from joeynmt==0.0.1) (1.12.0)
Requirement already satisfied: olefile in /usr/local/lib/python3.6/dist-packages (from pillow->joeynmt==0.0.1) (0.46)
Requirement already satisfied: keras-applications>=1.0.8 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (1.0.8)
Requirement already satisfied: opt-einsum>=2.3.2 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (3.1.0)
Requirement already satisfied: keras-preprocessing>=1.0.5 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (1.1.0)
Requirement already satisfied: gast==0.2.2 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (0.2.2)
Requirement already satisfied: wrapt>=1.11.1 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (1.11.2)
Requirement already satisfied: absl-py>=0.7.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (0.8.1)
Requirement already satisfied: grpcio>=1.8.6 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (1.15.0)
Requirement already satisfied: google-pasta>=0.1.6 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (0.1.8)
Requirement already satisfied: tensorboard<1.16.0,>=1.15.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (1.15.0)
Requirement already satisfied: wheel>=0.26 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (0.33.6)
Requirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (1.1.0)
Requirement already satisfied: tensorflow-estimator==1.15.1 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (1.15.1)
Requirement already satisfied: astor>=0.6.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (0.8.0)
Requirement already satisfied: protobuf>=3.6.1 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (3.10.0)
Requirement already satisfied: tqdm in /usr/local/lib/python3.6/dist-packages (from torchtext->joeynmt==0.0.1) (4.28.1)
Requirement already satisfied: requests in /usr/local/lib/python3.6/dist-packages (from torchtext->joeynmt==0.0.1) (2.21.0)
Collecting portalocker
Downloading https://files.pythonhosted.org/packages/91/db/7bc703c0760df726839e0699b7f78a4d8217fdc9c7fcb1b51b39c5a22a4e/portalocker-1.5.2-py2.py3-none-any.whl
Requirement already satisfied: typing in /usr/local/lib/python3.6/dist-packages (from sacrebleu>=1.3.6->joeynmt==0.0.1) (3.6.6)
Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->joeynmt==0.0.1) (1.1.0)
Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->joeynmt==0.0.1) (2.4.5)
Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib->joeynmt==0.0.1) (0.10.0)
Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->joeynmt==0.0.1) (2.6.1)
Requirement already satisfied: pandas>=0.15.2 in /usr/local/lib/python3.6/dist-packages (from seaborn->joeynmt==0.0.1) (0.25.3)
Requirement already satisfied: scipy>=0.14.0 in /usr/local/lib/python3.6/dist-packages (from seaborn->joeynmt==0.0.1) (1.3.2)
Collecting astroid<2.4,>=2.3.0
[?25l Downloading https://files.pythonhosted.org/packages/ad/ae/86734823047962e7b8c8529186a1ac4a7ca19aaf1aa0c7713c022ef593fd/astroid-2.3.3-py3-none-any.whl (205kB)
[K |████████████████████████████████| 215kB 44.3MB/s
[?25hCollecting mccabe<0.7,>=0.6
Downloading https://files.pythonhosted.org/packages/87/89/479dc97e18549e21354893e4ee4ef36db1d237534982482c3681ee6e7b57/mccabe-0.6.1-py2.py3-none-any.whl
Collecting isort<5,>=4.2.5
[?25l Downloading https://files.pythonhosted.org/packages/e5/b0/c121fd1fa3419ea9bfd55c7f9c4fedfec5143208d8c7ad3ce3db6c623c21/isort-4.3.21-py2.py3-none-any.whl (42kB)
[K |████████████████████████████████| 51kB 7.0MB/s
[?25hRequirement already satisfied: h5py in /usr/local/lib/python3.6/dist-packages (from keras-applications>=1.0.8->tensorflow>=1.14->joeynmt==0.0.1) (2.8.0)
Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.6/dist-packages (from tensorboard<1.16.0,>=1.15.0->tensorflow>=1.14->joeynmt==0.0.1) (3.1.1)
Requirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.6/dist-packages (from tensorboard<1.16.0,>=1.15.0->tensorflow>=1.14->joeynmt==0.0.1) (0.16.0)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests->torchtext->joeynmt==0.0.1) (2019.9.11)
Requirement already satisfied: idna<2.9,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests->torchtext->joeynmt==0.0.1) (2.8)
Requirement already satisfied: urllib3<1.25,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests->torchtext->joeynmt==0.0.1) (1.24.3)
Requirement already satisfied: chardet<3.1.0,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests->torchtext->joeynmt==0.0.1) (3.0.4)
Requirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.15.2->seaborn->joeynmt==0.0.1) (2018.9)
Collecting typed-ast<1.5,>=1.4.0; implementation_name == "cpython" and python_version < "3.8"
[?25l Downloading https://files.pythonhosted.org/packages/31/d3/9d1802c161626d0278bafb1ffb32f76b9d01e123881bbf9d91e8ccf28e18/typed_ast-1.4.0-cp36-cp36m-manylinux1_x86_64.whl (736kB)
[K |████████████████████████████████| 737kB 44.4MB/s
[?25hCollecting lazy-object-proxy==1.4.*
[?25l Downloading https://files.pythonhosted.org/packages/0b/dd/b1e3407e9e6913cf178e506cd0dee818e58694d9a5cd1984e3f6a8b9a10f/lazy_object_proxy-1.4.3-cp36-cp36m-manylinux1_x86_64.whl (55kB)
[K |████████████████████████████████| 61kB 8.5MB/s
[?25hBuilding wheels for collected packages: joeynmt, pyyaml
Building wheel for joeynmt (setup.py) ... [?25l[?25hdone
Created wheel for joeynmt: filename=joeynmt-0.0.1-cp36-none-any.whl size=72136 sha256=f89096f6dc4b35697dc6609bc1739a621f9e13a0ecf58ad0e963439e575bc81b
Stored in directory: /tmp/pip-ephem-wheel-cache-49iy698r/wheels/db/01/db/751cc9f3e7f6faec127c43644ba250a3ea7ad200594aeda70a
Building wheel for pyyaml (setup.py) ... [?25l[?25hdone
Created wheel for pyyaml: filename=PyYAML-5.1.2-cp36-cp36m-linux_x86_64.whl size=44104 sha256=357da8d77f9d8c15bd8f7ea1d8cc2df424f11fb8eccf15864e225a0255897b69
Stored in directory: /root/.cache/pip/wheels/d9/45/dd/65f0b38450c47cf7e5312883deb97d065e030c5cca0a365030
Successfully built joeynmt pyyaml
Installing collected packages: portalocker, sacrebleu, subword-nmt, pyyaml, typed-ast, lazy-object-proxy, astroid, mccabe, isort, pylint, joeynmt
Found existing installation: PyYAML 3.13
Uninstalling PyYAML-3.13:
Successfully uninstalled PyYAML-3.13
Successfully installed astroid-2.3.3 isort-4.3.21 joeynmt-0.0.1 lazy-object-proxy-1.4.3 mccabe-0.6.1 portalocker-1.5.2 pylint-2.4.4 pyyaml-5.1.2 sacrebleu-1.4.2 subword-nmt-0.3.7 typed-ast-1.4.0
###Markdown
Preprocessing the Data into Subword BPE Tokens- One of the most powerful improvements for agglutinative languages (a feature of most Bantu languages) is using BPE tokenization [ (Sennrich, 2015) ](https://arxiv.org/abs/1508.07909).- It was also shown that by optimizing the umber of BPE codes we significantly improve results for low-resourced languages [(Sennrich, 2019)](https://www.aclweb.org/anthology/P19-1021) [(Martinus, 2019)](https://arxiv.org/abs/1906.05685)- Below we have the scripts for doing BPE tokenization of our data. We use 4000 tokens as recommended by [(Sennrich, 2019)](https://www.aclweb.org/anthology/P19-1021). You do not need to change anything. Simply running the below will be suitable.
###Code
# One of the huge boosts in NMT performance was to use a different method of tokenizing.
# Usually, NMT would tokenize by words. However, using a method called BPE gave amazing boosts to performance
# Do subword NMT
from os import path
os.environ["src"] = source_language # Sets them in bash as well, since we often use bash scripts
os.environ["tgt"] = target_language
# Learn BPEs on the training data.
os.environ["data_path"] = path.join("joeynmt", "data", source_language + target_language) # Herman!
! subword-nmt learn-joint-bpe-and-vocab --input train.$src train.$tgt -s 4000 -o bpe.codes.4000 --write-vocabulary vocab.$src vocab.$tgt
# Apply BPE splits to the development and test data.
! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$src < train.$src > train.bpe.$src
! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$tgt < train.$tgt > train.bpe.$tgt
! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$src < dev.$src > dev.bpe.$src
! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$tgt < dev.$tgt > dev.bpe.$tgt
! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$src < test.$src > test.bpe.$src
! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$tgt < test.$tgt > test.bpe.$tgt
# Create directory, move everyone we care about to the correct location
! mkdir -p $data_path
! cp train.* $data_path
! cp test.* $data_path
! cp dev.* $data_path
! cp bpe.codes.4000 $data_path
! ls $data_path
# Also move everything we care about to a mounted location in google drive (relevant if running in colab) at gdrive_path
! cp train.* "$gdrive_path"
! cp test.* "$gdrive_path"
! cp dev.* "$gdrive_path"
! cp bpe.codes.4000 "$gdrive_path"
! ls "$gdrive_path"
# Create that vocab using build_vocab
! sudo chmod 777 joeynmt/scripts/build_vocab.py
! joeynmt/scripts/build_vocab.py joeynmt/data/$src$tgt/train.bpe.$src joeynmt/data/$src$tgt/train.bpe.$tgt --output_path joeynmt/data/$src$tgt/vocab.txt
# Some output
! echo "BPE Yoruba Sentences"
! tail -n 5 test.bpe.$tgt
! echo "Combined BPE Vocab"
! tail -n 10 joeynmt/data/$src$tgt/vocab.txt # Herman
# Also move everything we care about to a mounted location in google drive (relevant if running in colab) at gdrive_path
! cp train.* "$gdrive_path"
! cp test.* "$gdrive_path"
! cp dev.* "$gdrive_path"
! cp bpe.codes.4000 "$gdrive_path"
! ls "$gdrive_path"
###Output
bpe.codes.4000 dev.en test.bpe.xh test.xh train.en
dev.bpe.en dev.xh test.en train.bpe.en train.xh
dev.bpe.xh test.bpe.en test.en-any.en train.bpe.xh
###Markdown
Creating the JoeyNMT ConfigJoeyNMT requires a yaml config. We provide a template below. We've also set a number of defaults with it, that you may play with!- We used Transformer architecture - We set our dropout to reasonably high: 0.3 (recommended in [(Sennrich, 2019)](https://www.aclweb.org/anthology/P19-1021))Things worth playing with:- The batch size (also recommended to change for low-resourced languages)- The number of epochs (we've set it at 30 just so it runs in about an hour, for testing purposes)- The decoder options (beam_size, alpha)- Evaluation metrics (BLEU versus Crhf4)
###Code
# This creates the config file for our JoeyNMT system. It might seem overwhelming so we've provided a couple of useful parameters you'll need to update
# (You can of course play with all the parameters if you'd like!)
name = '%s%s' % (source_language, target_language)
gdrive_path = os.environ["gdrive_path"]
# Create the config
config = """
name: "{name}_transformer"
data:
src: "{source_language}"
trg: "{target_language}"
train: "data/{name}/train.bpe"
dev: "data/{name}/dev.bpe"
test: "data/{name}/test.bpe"
level: "bpe"
lowercase: False
max_sent_length: 100
src_vocab: "data/{name}/vocab.txt"
trg_vocab: "data/{name}/vocab.txt"
testing:
beam_size: 5
alpha: 1.0
training:
#load_model: "{gdrive_path}/models/{name}_transformer/1.ckpt" # if uncommented, load a pre-trained model from this checkpoint
random_seed: 42
optimizer: "adam"
normalization: "tokens"
adam_betas: [0.9, 0.999]
scheduling: "plateau" # TODO: try switching from plateau to Noam scheduling
patience: 5 # For plateau: decrease learning rate by decrease_factor if validation score has not improved for this many validation rounds.
learning_rate_factor: 0.5 # factor for Noam scheduler (used with Transformer)
learning_rate_warmup: 1000 # warmup steps for Noam scheduler (used with Transformer)
decrease_factor: 0.7
loss: "crossentropy"
learning_rate: 0.0003
learning_rate_min: 0.00000001
weight_decay: 0.0
label_smoothing: 0.1
batch_size: 4096
batch_type: "token"
eval_batch_size: 3600
eval_batch_type: "token"
batch_multiplier: 1
early_stopping_metric: "ppl"
epochs: 50 # TODO: Decrease for when playing around and checking of working. Around 30 is sufficient to check if its working at all
validation_freq: 1000 # TODO: Set to at least once per epoch.
logging_freq: 100
eval_metric: "bleu"
model_dir: "models/{name}_transformer"
overwrite: False # TODO: Set to True if you want to overwrite possibly existing models.
shuffle: True
use_cuda: True
max_output_length: 100
print_valid_sents: [0, 1, 2, 3]
keep_last_ckpts: 3
model:
initializer: "xavier"
bias_initializer: "zeros"
init_gain: 1.0
embed_initializer: "xavier"
embed_init_gain: 1.0
tied_embeddings: True
tied_softmax: True
encoder:
type: "transformer"
num_layers: 6
num_heads: 4 # TODO: Increase to 8 for larger data.
embeddings:
embedding_dim: 256 # TODO: Increase to 512 for larger data.
scale: True
dropout: 0.2
# typically ff_size = 4 x hidden_size
hidden_size: 256 # TODO: Increase to 512 for larger data.
ff_size: 1024 # TODO: Increase to 2048 for larger data.
dropout: 0.3
decoder:
type: "transformer"
num_layers: 6
num_heads: 4 # TODO: Increase to 8 for larger data.
embeddings:
embedding_dim: 256 # TODO: Increase to 512 for larger data.
scale: True
dropout: 0.2
# typically ff_size = 4 x hidden_size
hidden_size: 256 # TODO: Increase to 512 for larger data.
ff_size: 1024 # TODO: Increase to 2048 for larger data.
dropout: 0.3
""".format(name=name, gdrive_path=os.environ["gdrive_path"], source_language=source_language, target_language=target_language)
with open("joeynmt/configs/transformer_{name}.yaml".format(name=name),'w') as f:
f.write(config)
###Output
_____no_output_____
###Markdown
*Tensorboard*JoeyNMT additionally uses TensorboardX to visualize training and validation curves and attention matrices during training. Launch Tensorboard (requires installation that is not included in JoeyNMTs requirements) like this:
###Code
# Restart runtime using 'Runtime' -> 'Restart runtime...'
%tensorflow_version 1.x
import tensorflow as tf
print(tf.__version__)
%load_ext tensorboard
%tensorboard --logdir "$gdrive_path/models/enxh_transformer/tensorboard"
###Output
_____no_output_____
###Markdown
Train the ModelThis single line of joeynmt runs the training using the config we made above
###Code
# # Train the model
# # You can press Ctrl-C to stop. And then run the next cell to save your checkpoints!
# !cd joeynmt; python3 -m joeynmt train configs/transformer_$src$tgt.yaml
# Train the model
# You can press Ctrl-C to stop. And then run the next cell to save your checkpoints!
!cd joeynmt; python3 -m joeynmt train configs/transformer_$src$tgt.yaml
# Copy the created models from the notebook storage to google drive for persistant storage
!mkdir -p "$gdrive_path/models/${src}${tgt}_transformer/" # Herman
!cp -r joeynmt/models/${src}${tgt}_transformer/* "$gdrive_path/models/${src}${tgt}_transformer/"
# Output our validation accuracy
! cat "$gdrive_path/models/${src}${tgt}_transformer/validations.txt"
# Test our model
! cd joeynmt; python3 -m joeynmt test "$gdrive_path/models/${src}${tgt}_transformer/config.yaml"
###Output
_____no_output_____
###Markdown
Plot Perplexity and Bleu
###Code
#Plot Perplexity
! python3 joeynmt/scripts/plot_validations.py "$gdrive_path/models/${src}${tgt}_transformer" \
--plot_values PPL \
--output_path "$gdrive_path/models/${src}${tgt}_transformer/ppl.png"
# from IPython.display import Image
from IPython.display import Image, display
display(Image("$gdrive_path/models/${src}${tgt}_transformer/ppl.png"))
#Plot Bleu Score
! python3 joeynmt/scripts/plot_validations.py "$gdrive_path/models/${src}${tgt}_transformer" \
--plot_values bleu \
--output_path "$gdrive_path/models/${src}${tgt}_transformer"/bleu.png
# from IPython.display import Image
from IPython.display import Image, display
display(Image("$gdrive_path/models/${src}${tgt}_transformer/bleu.png"))
###Output
_____no_output_____
###Markdown
Copy model from virtual drive
###Code
#Remove Afterwards
# !cp -r "/content/drive/My Drive/masakhane/en-yo-baseline/" /content/
###Output
_____no_output_____
###Markdown
NMT Attention Alignment Visualizations
###Code
# # Install keras attention
# ! cd content;
# ! git clone https://github.com/thushv89/attention_keras.git
# ! cd /content/attention_keras;
# ! cd /content;
# ! git clone https://github.com/M4t1ss/SoftAlignments.git
https://github.com/zhaocq-nlp/Attention-Visualization
https://github.com/shreydesai/attention-viz
###Output
_____no_output_____
###Markdown
vizseq
###Code
! pip install vizseq
! pip install -U tqdm
! pip3 install -U nltk
import vizseq
from glob import glob
root = '/content/drive/My Drive/Colab Notebooks/MyJoeyNMT'
src, ref, hypo = glob(f'{root}/navy_xhen/word/test.xh'), glob(f'{root}/navy_xhen/word/test.en'), glob(f'{root}/models/transformer_xhen/predictions.test')
#First, load the vizseq package:
vizseq.view_stats(src, ref)
vizseq.view_n_grams(src)
# To view corpus-level scores (BLEU and METEOR):
vizseq.view_scores(ref, hypo, ['bleu', 'meteor'])
vizseq.available_scorers()
# import vizseq.VizSeqSortingType
vizseq.view_examples(src, ref, hypo, ['bleu', 'meteor'], page_sz=2, page_no=12)
# Google Translate Integration
vizseq.set_google_credential_path('path to google credential json file')
vizseq.view_examples(src, ref, hypo, ['bleu'], need_g_translate=True)
from vizseq.ipynb import fairseq_viz as vizseq_fs
log_path = 'examples/data/wmt14_fr_en_test.fairseq_generate.log'
vizseq_fs.view_stats(log_path)
vizseq_fs.view_examples(log_path, ['bleu', 'meteor'], need_g_translate=True)
vizseq_fs.view_scores(log_path, ['bleu', 'meteor'])
vizseq_fs.view_n_grams(log_path)
https://docs.dgl.ai/en/latest/tutorials/models/4_old_wines/7_transformer.html
https://ronxin.github.io/wevi/
https://vcg.seas.harvard.edu/publications/seq2seq-vis-a-visual-debugging-tool-for-sequence-to-sequence-models
###Output
_____no_output_____ |
LRModel.ipynb | ###Markdown
SMOTE ENN
###Code
r1 = X_train.shape[0] ; r2 = X_test.shape[0]
c1 = X_train.shape[1]; c2 = X_test.shape[1]
print("Train Data has {0} number of rows and {1} of columns".format(r1,c1))
print("Test Data has {0} number of rows and {1} of columns".format(r2,c2))
smote = SMOTEENN(random_state=0)
X_train_over, y_train_over = smote.fit_resample(X_train, y_train)
print('SMOTE 적용 전 학습용 피처/레이블 데이터 세트: ', X_train.shape, y_train.shape)
print('SMOTE 적용 전 레이블 값 분포: \n', pd.Series(y_train).value_counts())
print('SMOTE 적용 후 학습용 피처/레이블 데이터 세트: ', X_train_over.shape, y_train_over.shape)
print('SMOTE 적용 후 레이블 값 분포: \n', pd.Series(y_train_over).value_counts())
df = pd.DataFrame(y_train_over)
sns.countplot(x = 'DLQ_YN', data=df)
plt.show()
###Output
_____no_output_____
###Markdown
MODELING
###Code
from sklearn.linear_model import LogisticRegression
log_clf = LogisticRegression()
parameters = {
'C':[0.3, 0.5, 0.8, 1.20],
'max_iter':[1000],
'penalty': ['l2']
}
grid_log = GridSearchCV(log_clf, param_grid=parameters, cv=3, refit=True, return_train_score=True)
#param_grid의 하이퍼 파라미터들을 순차적으로 학습/평가 .
grid_log.fit(X_train_over, y_train_over)
# GridSearchCV 결과는 cv_results_ 라는 딕셔너리로 저장됨. 이를 DataFrame으로 변환
scores_df = pd.DataFrame(grid_log.cv_results_)
scores_df[['params', 'mean_test_score', 'rank_test_score',
'split0_test_score', 'split1_test_score', 'split2_test_score']]
print('GridSearchCV 최적 파라미터:', grid_log.best_params_)
print('GridSearchCV 최고 정확도: {0:.4f}'.format(grid_log.best_score_))
# refit=True로 설정된 GridSearchCV 객체가 fit()을 수행 시 학습이 완료된 Estimator를 내포하고 있으므로 predict()를 통해 예측도 가능.
pred = grid_log.predict(X_test)
pred_prob = grid_log.predict_proba(X_test)[:,1]
print('테스트 데이터 세트 정확도: {0:.4f}'.format(accuracy_score(y_test,pred)))
best_model = grid_log.best_estimator_
def get_clf_eval(y_test, pred=None, pred_proba=None):
confusion = confusion_matrix( y_test, pred)
accuracy = accuracy_score(y_test , pred)
precision = precision_score(y_test , pred)
recall = recall_score(y_test , pred)
f1 = f1_score(y_test,pred)
# ROC-AUC 추가
roc_auc = roc_auc_score(y_test, pred_proba)
print('오차 행렬')
print(confusion)
# ROC-AUC print 추가
print('정확도: {0:.4f}, 정밀도: {1:.4f}, 재현율: {2:.4f}, F1: {3:.4f}, AUC:{4:.4f}'.format(accuracy, precision, recall, f1, roc_auc))
get_clf_eval(y_test, pred, pred_prob)
from sklearn.metrics import roc_curve
def roc_curve_plot(y_test , pred_proba_c1):
# 임곗값에 따른 FPR, TPR 값을 반환 받음.
fprs , tprs , thresholds = roc_curve(y_test ,pred_proba_c1)
# ROC Curve를 plot 곡선으로 그림.
plt.plot(fprs , tprs, label='ROC')
# 가운데 대각선 직선을 그림.
plt.plot([0, 1], [0, 1], 'k--', label='Random')
# FPR X 축의 Scale을 0.1 단위로 변경, X,Y 축명 설정등
start, end = plt.xlim()
plt.xticks(np.round(np.arange(start, end, 0.1),2))
plt.xlim(0,1); plt.ylim(0,1)
plt.xlabel('FPR( 1 - Sensitivity )'); plt.ylabel('TPR( Recall )')
plt.legend()
plt.show()
roc_curve_plot(y_test, best_model.predict_proba(X_test)[:, 1] )
importance = best_model.coef_[0]
print(importance)
coefs = best_model.coef_[0]
indices = np.argsort(coefs)[::-1]
plt.figure()
plt.title("Feature importances (Logistic Regression)")
plt.bar(range(10), coefs[indices[:10]],
color="r", align="center")
plt.xticks(range(10), X_train_over.columns[indices[:10]], rotation=45, ha='right')
plt.subplots_adjust(bottom=0.3)
###Output
_____no_output_____
###Markdown
MODEL SAVE
###Code
best_catboost_model = grid_log.best_estimator_
import pickle
filename = 'LOGISTIC_MODEL.pkl'
joblib.dump(best_catboost_model, filename)
###Output
_____no_output_____ |
finding-relationships-data-python/02/demos/demo-04-CalculatingAndVisualizingAutocorrelation.ipynb | ###Markdown
Loading Data
###Code
bikesharing_data = pd.read_csv('datasets/bike_sharing_hourly.csv', index_col=0)
bikesharing_data.head(10)
bikesharing_data[['temp', 'hum']].describe()
###Output
_____no_output_____
###Markdown
* The autocorrelation is used to find how similar a signal, or function, is to itself at a certain time difference
###Code
bikesharing_data[['temp', 'hum']].corr()
bikesharing_data['temp'].autocorr(lag=2)
bikesharing_data['temp'].autocorr(lag=12)
bikesharing_data['temp'].autocorr(lag=102)
bikesharing_data['temp'].autocorr(lag=1002)
bikesharing_data['hum'].autocorr(lag=12)
###Output
_____no_output_____
###Markdown
Autocorrelation Plot
###Code
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 14))
ax1.acorr(bikesharing_data['temp'],
maxlags=12, color='green')
ax1.title.set_text('Temperature')
ax1.set_xlabel('Lags', fontsize=15)
ax2.acorr(bikesharing_data['hum'],
maxlags=12, color='red')
ax2.title.set_text('Humidity')
ax2.set_xlabel('Lags', fontsize=15)
plt.show()
bikesharing_data['temp'].autocorr(lag=24)
bikesharing_data['hum'].autocorr(lag=24)
###Output
_____no_output_____
###Markdown
* Here we can checking the humidy for one day(24 hours)
###Code
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 8))
ax1.acorr(bikesharing_data['temp'],
maxlags=24, color='deeppink')
ax1.title.set_text('Temperature')
ax1.set_xlabel('Lags', fontsize=12)
ax2.acorr(bikesharing_data['hum'],
maxlags=24, color='blue')
ax2.title.set_text('Humidity')
ax2.set_xlabel('Lags', fontsize=12)
plt.suptitle('Autocorrelation')
plt.show()
###Output
_____no_output_____
###Markdown
* Lets check the humidity for 48 hours* From here we can see the difference between day-night temperature and humidity
###Code
bikesharing_data['temp'].autocorr(lag=48)
bikesharing_data['hum'].autocorr(lag=48)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 8))
ax1.acorr(bikesharing_data['temp'],
maxlags=48, color='red')
ax1.title.set_text('Temperature')
ax1.set_xlabel('Lags', fontsize=12)
ax2.acorr(bikesharing_data['hum'],
maxlags=48, color='black')
ax2.title.set_text('Humidity')
ax2.set_xlabel('Lags', fontsize=12)
plt.show()
###Output
_____no_output_____
###Markdown
* Let's check the the autocorrelation of windspeed also, for two days
###Code
bikesharing_data['hum'].autocorr(lag=48)
bikesharing_data['windspeed'].autocorr(lag=48)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 8))
ax1.acorr(bikesharing_data['hum'],
maxlags=48, color='red')
ax1.title.set_text('Humidity')
ax1.set_xlabel('Lags', fontsize=12)
ax2.acorr(bikesharing_data['windspeed'],
maxlags=48, color='black')
ax2.title.set_text('Windspeed')
ax2.set_xlabel('Lags', fontsize=12)
plt.show()
###Output
_____no_output_____ |
Amazon Fine Food Reviews Analysis_Logistic Regression.ipynb | ###Markdown
Amazon Fine Food Reviews AnalysisData Source: https://www.kaggle.com/snap/amazon-fine-food-reviews EDA: https://nycdatascience.com/blog/student-works/amazon-fine-foods-visualization/The Amazon Fine Food Reviews dataset consists of reviews of fine foods from Amazon.Number of reviews: 568,454Number of users: 256,059Number of products: 74,258Timespan: Oct 1999 - Oct 2012Number of Attributes/Columns in data: 10 Attribute Information:1. Id2. ProductId - unique identifier for the product3. UserId - unqiue identifier for the user4. ProfileName5. HelpfulnessNumerator - number of users who found the review helpful6. HelpfulnessDenominator - number of users who indicated whether they found the review helpful or not7. Score - rating between 1 and 58. Time - timestamp for the review9. Summary - brief summary of the review10. Text - text of the review Objective:Given a review, determine whether the review is positive (rating of 4 or 5) or negative (rating of 1 or 2).[Q] How to determine if a review is positive or negative? [Ans] We could use Score/Rating. A rating of 4 or 5 can be cosnidered as a positive review. A rating of 1 or 2 can be considered as negative one. A review of rating 3 is considered nuetral and such reviews are ignored from our analysis. This is an approximate and proxy way of determining the polarity (positivity/negativity) of a review. [1]. Reading Data [1.1] Loading the dataThe dataset is available in two forms1. .csv file2. SQLite DatabaseIn order to load the data, We have used the SQLITE dataset as it is easier to query the data and visualise the data efficiently. Here as we only want to get the global sentiment of the recommendations (positive or negative), we will purposefully ignore all Scores equal to 3. If the score is above 3, then the recommendation wil be set to "positive". Otherwise, it will be set to "negative".
###Code
%matplotlib inline
import warnings
warnings.filterwarnings("ignore")
import sqlite3
import pandas as pd
import numpy as np
import nltk
import string
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import confusion_matrix
from sklearn import metrics
from sklearn.metrics import roc_curve, auc
from nltk.stem.porter import PorterStemmer
import re
# Tutorial about Python regular expressions: https://pymotw.com/2/re/
import string
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.stem.wordnet import WordNetLemmatizer
from gensim.models import Word2Vec
from gensim.models import KeyedVectors
import pickle
from tqdm import tqdm
import os
# using SQLite Table to read data.
con = sqlite3.connect('database.sqlite')
# filtering only positive and negative reviews i.e.
# not taking into consideration those reviews with Score=3
# SELECT * FROM Reviews WHERE Score != 3 LIMIT 500000, will give top 500000 data points
# you can change the number to any other number based on your computing power
# filtered_data = pd.read_sql_query(""" SELECT * FROM Reviews WHERE Score != 3 LIMIT 500000""", con)
# for Logistic Regresion assignment I am taking 100k points
filtered_data = pd.read_sql_query(""" SELECT * FROM Reviews WHERE Score != 3 LIMIT 100000""", con)
# Give reviews with Score>3 a positive rating(1), and reviews with a score<3 a negative rating(0).
def partition(x):
if x < 3:
return 0
return 1
#changing reviews with score less than 3 to be positive and vice-versa
actualScore = filtered_data['Score']
positiveNegative = actualScore.map(partition)
filtered_data['Score'] = positiveNegative
print("Number of data points in our data", filtered_data.shape)
filtered_data.head(3)
display = pd.read_sql_query("""
SELECT UserId, ProductId, ProfileName, Time, Score, Text, COUNT(*)
FROM Reviews
GROUP BY UserId
HAVING COUNT(*)>1
""", con)
print(display.shape)
display.head()
display[display['UserId']=='AZY10LLTJ71NX']
display['COUNT(*)'].sum()
###Output
_____no_output_____
###Markdown
[2] Exploratory Data Analysis [2.1] Data Cleaning: DeduplicationIt is observed (as shown in the table below) that the reviews data had many duplicate entries. Hence it was necessary to remove duplicates in order to get unbiased results for the analysis of the data. Following is an example:
###Code
display= pd.read_sql_query("""
SELECT *
FROM Reviews
WHERE Score != 3 AND UserId="AR5J8UI46CURR"
ORDER BY ProductID
""", con)
display.head()
###Output
_____no_output_____
###Markdown
As it can be seen above that same user has multiple reviews with same values for HelpfulnessNumerator, HelpfulnessDenominator, Score, Time, Summary and Text and on doing analysis it was found that ProductId=B000HDOPZG was Loacker Quadratini Vanilla Wafer Cookies, 8.82-Ounce Packages (Pack of 8) ProductId=B000HDL1RQ was Loacker Quadratini Lemon Wafer Cookies, 8.82-Ounce Packages (Pack of 8) and so onIt was inferred after analysis that reviews with same parameters other than ProductId belonged to the same product just having different flavour or quantity. Hence in order to reduce redundancy it was decided to eliminate the rows having same parameters.The method used for the same was that we first sort the data according to ProductId and then just keep the first similar product review and delelte the others. for eg. in the above just the review for ProductId=B000HDL1RQ remains. This method ensures that there is only one representative for each product and deduplication without sorting would lead to possibility of different representatives still existing for the same product.
###Code
#Sorting data according to ProductId in ascending order
sorted_data=filtered_data.sort_values('ProductId', axis=0, ascending=True, inplace=False, kind='quicksort', na_position='last')
#Deduplication of entries
final=sorted_data.drop_duplicates(subset={"UserId","ProfileName","Time","Text"}, keep='first', inplace=False)
final.shape
#Checking to see how much % of data still remains
(final['Id'].size*1.0)/(filtered_data['Id'].size*1.0)*100
###Output
_____no_output_____
###Markdown
Observation:- It was also seen that in two rows given below the value of HelpfulnessNumerator is greater than HelpfulnessDenominator which is not practically possible hence these two rows too are removed from calcualtions
###Code
display= pd.read_sql_query("""
SELECT *
FROM Reviews
WHERE Score != 3 AND Id=44737 OR Id=64422
ORDER BY ProductID
""", con)
display.head()
final=final[final.HelpfulnessNumerator<=final.HelpfulnessDenominator]
#Before starting the next phase of preprocessing lets see the number of entries left
print(final.shape)
#How many positive and negative reviews are present in our dataset?
final['Score'].value_counts()
###Output
(87773, 10)
###Markdown
[3] Preprocessing [3.1]. Preprocessing Review TextNow that we have finished deduplication our data requires some preprocessing before we go on further with analysis and making the prediction model.Hence in the Preprocessing phase we do the following in the order below:-1. Begin by removing the html tags2. Remove any punctuations or limited set of special characters like , or . or etc.3. Check if the word is made up of english letters and is not alpha-numeric4. Check to see if the length of the word is greater than 2 (as it was researched that there is no adjective in 2-letters)5. Convert the word to lowercase6. Remove Stopwords7. Finally Snowball Stemming the word (it was obsereved to be better than Porter Stemming)After which we collect the words used to describe positive and negative reviews
###Code
# printing some random reviews
sent_0 = final['Text'].values[0]
print(sent_0)
print("="*50)
sent_1000 = final['Text'].values[1000]
print(sent_1000)
print("="*50)
sent_1500 = final['Text'].values[1500]
print(sent_1500)
print("="*50)
sent_4900 = final['Text'].values[4900]
print(sent_4900)
print("="*50)
# remove urls from text python: https://stackoverflow.com/a/40823105/4084039
sent_0 = re.sub(r"http\S+", "", sent_0)
sent_1000 = re.sub(r"http\S+", "", sent_1000)
sent_150 = re.sub(r"http\S+", "", sent_1500)
sent_4900 = re.sub(r"http\S+", "", sent_4900)
print(sent_0)
# https://stackoverflow.com/questions/16206380/python-beautifulsoup-how-to-remove-all-tags-from-an-element
from bs4 import BeautifulSoup
soup = BeautifulSoup(sent_0, 'lxml')
text = soup.get_text()
print(text)
print("="*50)
soup = BeautifulSoup(sent_1000, 'lxml')
text = soup.get_text()
print(text)
print("="*50)
soup = BeautifulSoup(sent_1500, 'lxml')
text = soup.get_text()
print(text)
print("="*50)
soup = BeautifulSoup(sent_4900, 'lxml')
text = soup.get_text()
print(text)
# https://stackoverflow.com/a/47091490/4084039
import re
def decontracted(phrase):
# specific
phrase = re.sub(r"won't", "will not", phrase)
phrase = re.sub(r"can\'t", "can not", phrase)
# general
phrase = re.sub(r"n\'t", " not", phrase)
phrase = re.sub(r"\'re", " are", phrase)
phrase = re.sub(r"\'s", " is", phrase)
phrase = re.sub(r"\'d", " would", phrase)
phrase = re.sub(r"\'ll", " will", phrase)
phrase = re.sub(r"\'t", " not", phrase)
phrase = re.sub(r"\'ve", " have", phrase)
phrase = re.sub(r"\'m", " am", phrase)
return phrase
sent_1500 = decontracted(sent_1500)
print(sent_1500)
print("="*50)
#remove words with numbers python: https://stackoverflow.com/a/18082370/4084039
sent_0 = re.sub("\S*\d\S*", "", sent_0).strip()
print(sent_0)
#remove spacial character: https://stackoverflow.com/a/5843547/4084039
sent_1500 = re.sub('[^A-Za-z0-9]+', ' ', sent_1500)
print(sent_1500)
# https://gist.github.com/sebleier/554280
# we are removing the words from the stop words list: 'no', 'nor', 'not'
# <br /><br /> ==> after the above steps, we are getting "br br"
# we are including them into stop words list
# instead of <br /> if we have <br/> these tags would have revmoved in the 1st step
stopwords= set(['br', 'the', 'i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', "you're", "you've",\
"you'll", "you'd", 'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', \
'she', "she's", 'her', 'hers', 'herself', 'it', "it's", 'its', 'itself', 'they', 'them', 'their',\
'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that', "that'll", 'these', 'those', \
'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does', \
'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of', \
'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into', 'through', 'during', 'before', 'after',\
'above', 'below', 'to', 'from', 'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further',\
'then', 'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few', 'more',\
'most', 'other', 'some', 'such', 'only', 'own', 'same', 'so', 'than', 'too', 'very', \
's', 't', 'can', 'will', 'just', 'don', "don't", 'should', "should've", 'now', 'd', 'll', 'm', 'o', 're', \
've', 'y', 'ain', 'aren', "aren't", 'couldn', "couldn't", 'didn', "didn't", 'doesn', "doesn't", 'hadn',\
"hadn't", 'hasn', "hasn't", 'haven', "haven't", 'isn', "isn't", 'ma', 'mightn', "mightn't", 'mustn',\
"mustn't", 'needn', "needn't", 'shan', "shan't", 'shouldn', "shouldn't", 'wasn', "wasn't", 'weren', "weren't", \
'won', "won't", 'wouldn', "wouldn't"])
# Combining all the above stundents
from tqdm import tqdm
preprocessed_reviews = []
# tqdm is for printing the status bar
for sentance in tqdm(final['Text'].values):
sentance = re.sub(r"http\S+", "", sentance)
sentance = BeautifulSoup(sentance, 'lxml').get_text()
sentance = decontracted(sentance)
sentance = re.sub("\S*\d\S*", "", sentance).strip()
sentance = re.sub('[^A-Za-z]+', ' ', sentance)
# https://gist.github.com/sebleier/554280
sentance = ' '.join(e.lower() for e in sentance.split() if e.lower() not in stopwords)
preprocessed_reviews.append(sentance.strip())
preprocessed_reviews[1500]
###Output
_____no_output_____
###Markdown
[3.2] Preprocessing Review Summary
###Code
## Similartly you can do preprocessing for review summary also.
###Output
_____no_output_____
###Markdown
[4] Featurization [4.1] BAG OF WORDS
###Code
#BoW
count_vect = CountVectorizer() #in scikit-learn
count_vect.fit(preprocessed_reviews)
print("some feature names ", count_vect.get_feature_names()[:10])
print('='*50)
final_counts = count_vect.transform(preprocessed_reviews)
print("the type of count vectorizer ",type(final_counts))
print("the shape of out text BOW vectorizer ",final_counts.get_shape())
print("the number of unique words ", final_counts.get_shape()[1])
###Output
some feature names ['aa', 'aahhhs', 'aback', 'abandon', 'abates', 'abbott', 'abby', 'abdominal', 'abiding', 'ability']
==================================================
the type of count vectorizer <class 'scipy.sparse.csr.csr_matrix'>
the shape of out text BOW vectorizer (4986, 12997)
the number of unique words 12997
###Markdown
[4.2] Bi-Grams and n-Grams.
###Code
#bi-gram, tri-gram and n-gram
#removing stop words like "not" should be avoided before building n-grams
# count_vect = CountVectorizer(ngram_range=(1,2))
# please do read the CountVectorizer documentation http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html
# you can choose these numebrs min_df=10, max_features=5000, of your choice
count_vect = CountVectorizer(ngram_range=(1,2), min_df=10, max_features=5000)
final_bigram_counts = count_vect.fit_transform(preprocessed_reviews)
print("the type of count vectorizer ",type(final_bigram_counts))
print("the shape of out text BOW vectorizer ",final_bigram_counts.get_shape())
print("the number of unique words including both unigrams and bigrams ", final_bigram_counts.get_shape()[1])
###Output
the type of count vectorizer <class 'scipy.sparse.csr.csr_matrix'>
the shape of out text BOW vectorizer (4986, 3144)
the number of unique words including both unigrams and bigrams 3144
###Markdown
[4.3] TF-IDF
###Code
tf_idf_vect = TfidfVectorizer(ngram_range=(1,2), min_df=10)
tf_idf_vect.fit(preprocessed_reviews)
print("some sample features(unique words in the corpus)",tf_idf_vect.get_feature_names()[0:10])
print('='*50)
final_tf_idf = tf_idf_vect.transform(preprocessed_reviews)
print("the type of count vectorizer ",type(final_tf_idf))
print("the shape of out text TFIDF vectorizer ",final_tf_idf.get_shape())
print("the number of unique words including both unigrams and bigrams ", final_tf_idf.get_shape()[1])
###Output
some sample features(unique words in the corpus) ['ability', 'able', 'able find', 'able get', 'absolute', 'absolutely', 'absolutely delicious', 'absolutely love', 'absolutely no', 'according']
==================================================
the type of count vectorizer <class 'scipy.sparse.csr.csr_matrix'>
the shape of out text TFIDF vectorizer (4986, 3144)
the number of unique words including both unigrams and bigrams 3144
###Markdown
[4.4] Word2Vec
###Code
# Train your own Word2Vec model using your own text corpus
i=0
list_of_sentance=[]
for sentance in preprocessed_reviews:
list_of_sentance.append(sentance.split())
# Using Google News Word2Vectors
# in this project we are using a pretrained model by google
# its 3.3G file, once you load this into your memory
# it occupies ~9Gb, so please do this step only if you have >12G of ram
# we will provide a pickle file wich contains a dict ,
# and it contains all our courpus words as keys and model[word] as values
# To use this code-snippet, download "GoogleNews-vectors-negative300.bin"
# from https://drive.google.com/file/d/0B7XkCwpI5KDYNlNUTTlSS21pQmM/edit
# it's 1.9GB in size.
# http://kavita-ganesan.com/gensim-word2vec-tutorial-starter-code/#.W17SRFAzZPY
# you can comment this whole cell
# or change these varible according to your need
is_your_ram_gt_16g=False
want_to_use_google_w2v = False
want_to_train_w2v = True
if want_to_train_w2v:
# min_count = 5 considers only words that occured atleast 5 times
w2v_model=Word2Vec(list_of_sentance,min_count=5,size=50, workers=4)
print(w2v_model.wv.most_similar('great'))
print('='*50)
print(w2v_model.wv.most_similar('worst'))
elif want_to_use_google_w2v and is_your_ram_gt_16g:
if os.path.isfile('GoogleNews-vectors-negative300.bin'):
w2v_model=KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300.bin', binary=True)
print(w2v_model.wv.most_similar('great'))
print(w2v_model.wv.most_similar('worst'))
else:
print("you don't have gogole's word2vec file, keep want_to_train_w2v = True, to train your own w2v ")
w2v_words = list(w2v_model.wv.vocab)
print("number of words that occured minimum 5 times ",len(w2v_words))
print("sample words ", w2v_words[0:50])
###Output
number of words that occured minimum 5 times 3817
sample words ['product', 'available', 'course', 'total', 'pretty', 'stinky', 'right', 'nearby', 'used', 'ca', 'not', 'beat', 'great', 'received', 'shipment', 'could', 'hardly', 'wait', 'try', 'love', 'call', 'instead', 'removed', 'easily', 'daughter', 'designed', 'printed', 'use', 'car', 'windows', 'beautifully', 'shop', 'program', 'going', 'lot', 'fun', 'everywhere', 'like', 'tv', 'computer', 'really', 'good', 'idea', 'final', 'outstanding', 'window', 'everybody', 'asks', 'bought', 'made']
###Markdown
[4.4.1] Converting text into vectors using Avg W2V, TFIDF-W2V [4.4.1.1] Avg W2v
###Code
# average Word2Vec
# compute average word2vec for each review.
sent_vectors = []; # the avg-w2v for each sentence/review is stored in this list
for sent in tqdm(list_of_sentance): # for each review/sentence
sent_vec = np.zeros(50) # as word vectors are of zero length 50, you might need to change this to 300 if you use google's w2v
cnt_words =0; # num of words with a valid vector in the sentence/review
for word in sent: # for each word in a review/sentence
if word in w2v_words:
vec = w2v_model.wv[word]
sent_vec += vec
cnt_words += 1
if cnt_words != 0:
sent_vec /= cnt_words
sent_vectors.append(sent_vec)
print(len(sent_vectors))
print(len(sent_vectors[0]))
###Output
100%|████████████████████████████████████████████████████████████████████████████| 4986/4986 [00:03<00:00, 1330.47it/s]
###Markdown
[4.4.1.2] TFIDF weighted W2v
###Code
# S = ["abc def pqr", "def def def abc", "pqr pqr def"]
model = TfidfVectorizer()
tf_idf_matrix = model.fit_transform(preprocessed_reviews)
# we are converting a dictionary with word as a key, and the idf as a value
dictionary = dict(zip(model.get_feature_names(), list(model.idf_)))
# TF-IDF weighted Word2Vec
tfidf_feat = model.get_feature_names() # tfidf words/col-names
# final_tf_idf is the sparse matrix with row= sentence, col=word and cell_val = tfidf
tfidf_sent_vectors = []; # the tfidf-w2v for each sentence/review is stored in this list
row=0;
for sent in tqdm(list_of_sentance): # for each review/sentence
sent_vec = np.zeros(50) # as word vectors are of zero length
weight_sum =0; # num of words with a valid vector in the sentence/review
for word in sent: # for each word in a review/sentence
if word in w2v_words and word in tfidf_feat:
vec = w2v_model.wv[word]
# tf_idf = tf_idf_matrix[row, tfidf_feat.index(word)]
# to reduce the computation we are
# dictionary[word] = idf value of word in whole courpus
# sent.count(word) = tf valeus of word in this review
tf_idf = dictionary[word]*(sent.count(word)/len(sent))
sent_vec += (vec * tf_idf)
weight_sum += tf_idf
if weight_sum != 0:
sent_vec /= weight_sum
tfidf_sent_vectors.append(sent_vec)
row += 1
###Output
100%|█████████████████████████████████████████████████████████████████████████████| 4986/4986 [00:20<00:00, 245.63it/s]
###Markdown
[5] Assignment 5: Apply Logistic Regression Apply Logistic Regression on these feature sets SET 1:Review text, preprocessed one converted into vectors using (BOW) SET 2:Review text, preprocessed one converted into vectors using (TFIDF) SET 3:Review text, preprocessed one converted into vectors using (AVG W2v) SET 4:Review text, preprocessed one converted into vectors using (TFIDF W2v) Hyper paramter tuning (find best hyper parameters corresponding the algorithm that you choose) Find the best hyper parameter which will give the maximum AUC value Find the best hyper paramter using k-fold cross validation or simple cross validation data Use gridsearch cv or randomsearch cv or you can also write your own for loops to do this task of hyperparameter tuning Pertubation Test Get the weights W after fit your model with the data X. Add a noise to the X (X' = X + e) and get the new data set X' (if X is a sparsematrix, X.data+=e) Fit the model again on data X' and get the weights W' Add a small eps value(to eliminate the divisible by zero error) to W and W’ i.eW=W+10^-6 and W’ = W’+10^-6 Now find the % change between W and W' (| (W-W') / (W) |)*100) Calculate the 0th, 10th, 20th, 30th, ...100th percentiles, and observe any sudden rise in the values of percentage_change_vector Ex: consider your 99th percentile is 1.3 and your 100th percentiles are 34.6, there is sudden rise from 1.3 to 34.6, now calculate the 99.1, 99.2, 99.3,..., 100th percentile values and get the proper value after which there is sudden rise the values, assume it is 2.5 Print the feature names whose % change is more than a threshold x(in our example it's 2.5) Sparsity Calculate sparsity on weight vector obtained after using L1 regularization NOTE: Do sparsity and multicollinearity for any one of the vectorizers. Bow or tf-idf is recommended. Feature importance Get top 10 important features for both positive and negative classes separately. Feature engineering To increase the performance of your model, you can also experiment with with feature engineering like : Taking length of reviews as another feature. Considering some features from review summary as well. Representation of results You need to plot the performance of model both on train data and cross validation data for each hyper parameter, like shown in the figure. Once after you found the best hyper parameter, you need to train your model with it, and find the AUC on test data and plot the ROC curve on both train and test. Along with plotting ROC curve, you need to print the confusion matrix with predicted and original labels of test data points. Please visualize your confusion matrices using seaborn heatmaps. Conclusion You need to summarize the results at the end of the notebook, summarize it in the table format. To print out a table please refer to this prettytable library link Note: Data Leakage1. There will be an issue of data-leakage if you vectorize the entire data and then split it into train/cv/test.2. To avoid the issue of data-leakag, make sure to split your data first and then vectorize it. 3. While vectorizing your data, apply the method fit_transform() on you train data, and apply the method transform() on cv/test data.4. For more details please go through this link. Applying Logistic Regression
###Code
#Import Required libraries
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score,auc
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import GridSearchCV
from seaborn import heatmap
from sklearn import preprocessing
# Splitting the data
X_train,X_test,y_train,y_test=train_test_split(preprocessed_reviews,final['Score'].values,test_size=0.3,random_state=0)
X_train, X_cv, y_train, y_cv = train_test_split(X_train, y_train, test_size=0.3)
print(len(X_train),len(X_cv),len(X_test))
###Output
43008 18433 26332
###Markdown
[5.1] Logistic Regression on BOW, SET 1 [5.1.1] Applying Logistic Regression with L1 regularization on BOW, SET 1
###Code
vectorizer = CountVectorizer()
vectorizer.fit(X_train) # fit has to happen only on train data
X_train_bow = vectorizer.transform(X_train)
X_cv_bow=vectorizer.transform(X_cv)
X_test_bow = vectorizer.transform(X_test)
print("After vectorizations")
print(X_train_bow.shape, y_train.shape)
print(X_cv_bow.shape,y_cv.shape)
print(X_test_bow.shape, y_test.shape)
print("="*100)
# Training stage
# Using np.random.uniform to generate values for C hyperparameter
# Ref: https://docs.scipy.org/doc/numpy/reference/generated/numpy.random.uniform.html
# On repeated fast training of Logistic Reg, it was found that from values even beyond 10 results in gradual underfitting,
C=np.sort(np.random.uniform(0.0001,3,100))
print("Min value picked: ",np.min(C)," Max value picked: ",np.max(C))
# Rest is mostly a borrowed code from KNN assignment
train_auc = []
cv_auc = []
for i in tqdm(C):
clf=LogisticRegression(C=i,penalty='l1')
clf.fit(X_train_bow,y_train)
# roc_auc_score(y_true, y_score) the 2nd parameter should be probability estimates of the positive class
# not the predicted outputs
y_train_pred = clf.predict_proba(X_train_bow)[:,1]
y_cv_pred = clf.predict_proba(X_cv_bow)[:,1]
train_auc.append(roc_auc_score(y_train,y_train_pred))
cv_auc.append(roc_auc_score(y_cv, y_cv_pred))
plt.plot(C, train_auc, label='Train AUC')
plt.plot(C, cv_auc, label='CV AUC')
plt.legend()
plt.xlabel("C: hyperparameter")
plt.ylabel("AUC")
plt.title("ERROR PLOTS-L1-LR-BOW")
plt.show()
#Test Phase
best_C=0.246
optimal_model=LogisticRegression(C=best_C,penalty='l1')
optimal_model.fit(X_train_bow,y_train)
train_fpr, train_tpr, thresholds = roc_curve(y_train, optimal_model.predict_proba(X_train_bow)[:,1])
test_fpr, test_tpr, thresholds = roc_curve(y_test, optimal_model.predict_proba(X_test_bow)[:,1])
plt.plot(train_fpr, train_tpr, label="train AUC ="+str(auc(train_fpr, train_tpr)))
plt.plot(test_fpr, test_tpr, label="test AUC ="+str(auc(test_fpr, test_tpr)))
plt.legend()
plt.xlabel("C: hyperparameter")
plt.ylabel("AUC")
plt.title("ERROR PLOTS-LR-L1-BOW")
plt.show()
print("="*100)
# Ref : https://seaborn.pydata.org/generated/seaborn.heatmap.html
print("Train confusion matrix")
heatmap(confusion_matrix(y_train, optimal_model.predict(X_train_bow)),xticklabels=['Predicted: NO','Predicted: Yes'],yticklabels=['Actual: NO', 'Actual: Yes'],annot=True,fmt='d',cmap="YlGnBu")
plt.show()
print("Test confusion matrix")
heatmap(confusion_matrix(y_test, optimal_model.predict(X_test_bow)),xticklabels=['Predicted: NO','Predicted: Yes'],yticklabels=['Actual: NO', 'Actual: Yes'],annot=True,fmt='d',cmap="YlGnBu")
plt.show()
###Output
_____no_output_____
###Markdown
[5.1.1.1] Calculating sparsity on weight vector obtained using L1 regularization on BOW, SET 1
###Code
w=optimal_model.coef_.flatten()
np.count_nonzero(w)
# Calculating sparsity as #empty-cells/#total-celss
print("Sparsity: ",(len(w)-np.count_nonzero(w))/len(w))
###Output
Sparsity: 0.9633748214650072
###Markdown
[5.1.2] Applying Logistic Regression with L2 regularization on BOW, SET 1
###Code
# Training stage
# Using np.random.uniform to generate values for C hyperparameter
# Ref: https://docs.scipy.org/doc/numpy/reference/generated/numpy.random.uniform.html
# On repeated fast training of Logistic Reg, it was found that from values even beyond 10 results in gradual underfitting,
C=np.sort(np.random.uniform(0.0001,3,200))
print("Min value picked: ",np.min(C)," Max value picked: ",np.max(C))
# Rest is mostly a borrowed code from KNN assignment
train_auc = []
cv_auc = []
for i in tqdm(C):
clf=LogisticRegression(C=i,penalty='l2')#Changing the penalty to l2 here
clf.fit(X_train_bow,y_train)
# roc_auc_score(y_true, y_score) the 2nd parameter should be probability estimates of the positive class
# not the predicted outputs
y_train_pred = clf.predict_proba(X_train_bow)[:,1]
y_cv_pred = clf.predict_proba(X_cv_bow)[:,1]
train_auc.append(roc_auc_score(y_train,y_train_pred))
cv_auc.append(roc_auc_score(y_cv, y_cv_pred))
plt.plot(C, train_auc, label='Train AUC')
plt.plot(C, cv_auc, label='CV AUC')
plt.legend()
plt.xlabel("C: hyperparameter")
plt.ylabel("AUC")
plt.title("ERROR PLOTS-L2-LR-BOW")
plt.show()
#Test Phase
best_C=0.15
optimal_model=LogisticRegression(C=best_C,penalty='l2')
optimal_model.fit(X_train_bow,y_train)
train_fpr, train_tpr, thresholds = roc_curve(y_train, optimal_model.predict_proba(X_train_bow)[:,1])
test_fpr, test_tpr, thresholds = roc_curve(y_test, optimal_model.predict_proba(X_test_bow)[:,1])
plt.plot(train_fpr, train_tpr, label="train AUC ="+str(auc(train_fpr, train_tpr)))
plt.plot(test_fpr, test_tpr, label="test AUC ="+str(auc(test_fpr, test_tpr)))
plt.legend()
plt.xlabel("C: hyperparameter")
plt.ylabel("AUC")
plt.title("ERROR PLOTS-LR-L2-BOW")
plt.show()
print("="*100)
# Ref : https://seaborn.pydata.org/generated/seaborn.heatmap.html
print("Train confusion matrix")
heatmap(confusion_matrix(y_train, optimal_model.predict(X_train_bow)),xticklabels=['Predicted: NO','Predicted: Yes'],yticklabels=['Actual: NO', 'Actual: Yes'],annot=True,fmt='d',cmap="YlGnBu")
plt.show()
print("Test confusion matrix")
heatmap(confusion_matrix(y_test, optimal_model.predict(X_test_bow)),xticklabels=['Predicted: NO','Predicted: Yes'],yticklabels=['Actual: NO', 'Actual: Yes'],annot=True,fmt='d',cmap="YlGnBu")
plt.show()
###Output
_____no_output_____
###Markdown
[5.1.2.1] Performing pertubation test (multicollinearity check) on BOW, SET 1
###Code
W=optimal_model.coef_
print(W)
# Adding a simple noise of 1 to X_train_bow
X_train_bow.data+=1
#Fitting the optimal model according to new data
optimal_model.fit(X_train_bow,y_train)
#New Weights
W_new=optimal_model.coef_
print(W_new)
# This is to ensure that there is no zero elements in vector W
print(np.nonzero(W==0))
# Caluclate percentage change in W
percentage_change_error=np.abs((W-W_new)/W)*100
print(percentage_change_error)
# Ref: https://docs.scipy.org/doc/numpy/reference/generated/numpy.percentile.html
# Ref: https://docs.scipy.org/doc/numpy/reference/generated/numpy.arange.html
percentiles=np.arange(0,110,10)
np.percentile(percentage_change_error,percentiles)
###Output
_____no_output_____
###Markdown
Pertubation Test conclusion In calculating percentage change error we saw that there are percentage changes as great as 3.46e+05, suggesting that the BOW vectorization here sufferes from acute multicollinearity. Using weight vectors for feature selection here may not be a good idea. We could do forward feature selection however [5.1.3] Feature Importance on BOW, SET 1 [5.1.3.1] Top 10 important features of positive class from SET 1
###Code
# Based on the multicolineaty check using W's for for defining important features may not a good idea.
# For the purpose of assignment however we'll be using weight vectors only to define feature interepabilty
# Getting the top 10 fetures
# Ref: https://stackoverflow.com/questions/34226400/find-the-index-of-the-k-smallest-values-of-a-numpy-array
a=optimal_model.coef_.flatten() # Removing multidimentionality
#Getting the indices of top 10 highest coeffiencts
ind = np.argpartition(a, -10)[-10:]
#Most important feature first at index 0
list_of_feat=np.array(vectorizer.get_feature_names())
mos_pos_feat=np.sort(list_of_feat[ind])[::-1]
print("Top 10 positive features:")
print(mos_pos_feat)
###Output
Top 10 positive features:
['yummy' 'pleasantly' 'perfect' 'hooked' 'excellent' 'delicious'
'complaint' 'beat' 'awesome' 'addicted']
###Markdown
[5.1.3.2] Top 10 important features of negative class from SET 1
###Code
# Ref : https://stackoverflow.com/questions/34226400/find-the-index-of-the-k-smallest-values-of-a-numpy-array
a=optimal_model.coef_.flatten() # Removing multidimentionality
#Getting the indices of top 10 lowest coeffiencts
ind = np.argpartition(a, 10)[:10]
#Most important feature first at index 0
list_of_feat=np.array(vectorizer.get_feature_names())
mos_neg_feat=np.sort(list_of_feat[ind])
print("Top 10 negative features:")
print(mos_neg_feat)
###Output
Top 10 negative features:
['awful' 'badly' 'died' 'disappointing' 'disappointment' 'poor' 'rip'
'tasteless' 'terrible' 'worst']
###Markdown
[5.2] Logistic Regression on TFIDF, SET 2 [5.2.1] Applying Logistic Regression with L1 regularization on TFIDF, SET 2
###Code
# Please write all the code with proper documentation
vectorizer = TfidfVectorizer()
vectorizer.fit(X_train) # fit has to happen only on train data
X_train_tfidf = vectorizer.transform(X_train)
X_cv_tfidf=vectorizer.transform(X_cv)
X_test_tfidf = vectorizer.transform(X_test)
print("After vectorizations")
print(X_train_tfidf.shape, y_train.shape)
print(X_cv_tfidf.shape,y_cv.shape)
print(X_test_tfidf.shape, y_test.shape)
print("="*100)
# Training stage
# Using np.random.uniform to generate values for C hyperparameter
# Ref: https://docs.scipy.org/doc/numpy/reference/generated/numpy.random.uniform.html
# On repeated fast training of Logistic Reg, it was found that from values even beyond 10 results in gradual underfitting,
C=np.sort(np.random.uniform(0.0001,3,150))
print("Min value picked: ",np.min(C)," Max value picked: ",np.max(C))
# Rest is mostly a borrowed code from KNN assignment
train_auc = []
cv_auc = []
for i in tqdm(C):
clf=LogisticRegression(C=i,penalty='l1')
clf.fit(X_train_tfidf,y_train)
# roc_auc_score(y_true, y_score) the 2nd parameter should be probability estimates of the positive class
# not the predicted outputs
y_train_pred = clf.predict_proba(X_train_tfidf)[:,1]
y_cv_pred = clf.predict_proba(X_cv_tfidf)[:,1]
train_auc.append(roc_auc_score(y_train,y_train_pred))
cv_auc.append(roc_auc_score(y_cv, y_cv_pred))
plt.plot(C, train_auc, label='Train AUC')
plt.plot(C, cv_auc, label='CV AUC')
plt.legend()
plt.xlabel("C: hyperparameter")
plt.ylabel("AUC")
plt.title("ERROR PLOTS-L1-LR-TFIDF")
plt.show()
#Test Phase
best_C=0.463
optimal_model=LogisticRegression(C=best_C,penalty='l1')
optimal_model.fit(X_train_tfidf,y_train)
train_fpr, train_tpr, thresholds = roc_curve(y_train, optimal_model.predict_proba(X_train_tfidf)[:,1])
test_fpr, test_tpr, thresholds = roc_curve(y_test, optimal_model.predict_proba(X_test_tfidf)[:,1])
plt.plot(train_fpr, train_tpr, label="train AUC ="+str(auc(train_fpr, train_tpr)))
plt.plot(test_fpr, test_tpr, label="test AUC ="+str(auc(test_fpr, test_tpr)))
plt.legend()
plt.xlabel("C: hyperparameter")
plt.ylabel("AUC")
plt.title("ERROR PLOTS-LR-L1-TFIDF")
plt.show()
print("="*100)
# Ref : https://seaborn.pydata.org/generated/seaborn.heatmap.html
print("Train confusion matrix")
heatmap(confusion_matrix(y_train, optimal_model.predict(X_train_tfidf)),xticklabels=['Predicted: NO','Predicted: Yes'],yticklabels=['Actual: NO', 'Actual: Yes'],annot=True,fmt='d',cmap="YlGnBu")
plt.show()
print("Test confusion matrix")
heatmap(confusion_matrix(y_test, optimal_model.predict(X_test_tfidf)),xticklabels=['Predicted: NO','Predicted: Yes'],yticklabels=['Actual: NO', 'Actual: Yes'],annot=True,fmt='d',cmap="YlGnBu")
plt.show()
###Output
_____no_output_____
###Markdown
[5.2.2] Applying Logistic Regression with L2 regularization on TFIDF, SET 2
###Code
# Following link explains why Standardization to apply in Scipy sparse matrix would be a bad idea
# https://stackoverflow.com/questions/20240068/scaling-issues-with-scipy-sparse-matrix-while-using-scikit
# https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.Normalizer.html
# Need to Normalize the X_train_tfidf, X_cv_tfidf and X_test_tfidf as L2 logistic regression is failing to converge
# on the TFIDF data
normalizer=preprocessing.Normalizer(norm='l2',copy=False)
normalizer.fit_transform(X_train_tfidf)
normalizer.fit_transform(X_cv_tfidf)
normalizer.fit_transform(X_test_tfidf)
# Training stage
# Using np.random.uniform to generate values for C hyperparameter
# Ref: https://docs.scipy.org/doc/numpy/reference/generated/numpy.random.uniform.html
# On repeated fast training of Logistic Reg, it was found that from values even beyond 10 results in gradual underfitting,
C=np.sort(np.random.uniform(0.0001,5,150))
print("Min value picked: ",np.min(C)," Max value picked: ",np.max(C))
# Rest is mostly a borrowed code from KNN assignment
train_auc = []
cv_auc = []
for i in tqdm(C):
clf=LogisticRegression(C=i,penalty='l2')
clf.fit(X_train_tfidf,y_train)
# roc_auc_score(y_true, y_score) the 2nd parameter should be probability estimates of the positive class
# not the predicted outputs
y_train_pred = clf.predict_proba(X_train_tfidf)[:,1]
y_cv_pred = clf.predict_proba(X_cv_tfidf)[:,1]
train_auc.append(roc_auc_score(y_train,y_train_pred))
cv_auc.append(roc_auc_score(y_cv, y_cv_pred))
plt.plot(C, train_auc, label='Train AUC')
plt.plot(C, cv_auc, label='CV AUC')
plt.legend()
plt.xlabel("C: hyperparameter")
plt.ylabel("AUC")
plt.title("ERROR PLOTS-L2-LR-BOW")
plt.show()
#Test Phase
best_C=0.48
optimal_model=LogisticRegression(C=best_C,penalty='l2')
optimal_model.fit(X_train_tfidf,y_train)
train_fpr, train_tpr, thresholds = roc_curve(y_train, optimal_model.predict_proba(X_train_tfidf)[:,1])
test_fpr, test_tpr, thresholds = roc_curve(y_test, optimal_model.predict_proba(X_test_tfidf)[:,1])
plt.plot(train_fpr, train_tpr, label="train AUC ="+str(auc(train_fpr, train_tpr)))
plt.plot(test_fpr, test_tpr, label="test AUC ="+str(auc(test_fpr, test_tpr)))
plt.legend()
plt.xlabel("C: hyperparameter")
plt.ylabel("AUC")
plt.title("ERROR PLOTS-LR-L2-TFIDF")
plt.show()
print("="*100)
# Ref : https://seaborn.pydata.org/generated/seaborn.heatmap.html
print("Train confusion matrix")
heatmap(confusion_matrix(y_train, optimal_model.predict(X_train_tfidf)),xticklabels=['Predicted: NO','Predicted: Yes'],yticklabels=['Actual: NO', 'Actual: Yes'],annot=True,fmt='d',cmap="YlGnBu")
plt.show()
print("Test confusion matrix")
heatmap(confusion_matrix(y_test, optimal_model.predict(X_test_tfidf)),xticklabels=['Predicted: NO','Predicted: Yes'],yticklabels=['Actual: NO', 'Actual: Yes'],annot=True,fmt='d',cmap="YlGnBu")
plt.show()
###Output
_____no_output_____
###Markdown
[5.2.3] Feature Importance on TFIDF, SET 2 [5.2.3.1] Top 10 important features of positive class from SET 2
###Code
# Getting the top 10 fetures
# Ref: https://stackoverflow.com/questions/34226400/find-the-index-of-the-k-smallest-values-of-a-numpy-array
a=optimal_model.coef_.flatten() # Removing multidimentionality
#Getting the indices of top 10 highest coeffiencts
ind = np.argpartition(a, -10)[-10:]
#Most important feature first at index 0
list_of_feat=np.array(vectorizer.get_feature_names())
mos_pos_feat=np.sort(list_of_feat[ind])[::-1]
print("Top 10 positive features:")
print(mos_pos_feat)
###Output
Top 10 positive features:
['wonderful' 'perfect' 'nice' 'loves' 'love' 'great' 'good' 'excellent'
'delicious' 'best']
###Markdown
[5.2.3.2] Top 10 important features of negative class from SET 2
###Code
# Ref : https://stackoverflow.com/questions/34226400/find-the-index-of-the-k-smallest-values-of-a-numpy-array
a=optimal_model.coef_.flatten() # Removing multidimentionality
#Getting the indices of top 10 lowest coeffiencts
ind = np.argpartition(a, 10)[:10]
#Most important feature first at index 0
list_of_feat=np.array(vectorizer.get_feature_names())
mos_neg_feat=np.sort(list_of_feat[ind])
print("Top 10 negative features:")
print(mos_neg_feat)
###Output
Top 10 negative features:
['awful' 'disappointed' 'disappointing' 'horrible' 'money' 'not' 'stale'
'terrible' 'unfortunately' 'worst']
###Markdown
[5.3] Logistic Regression on AVG W2V, SET 3
###Code
# Defining a utitly function to split a list of sentences into list of list of words
def sent_split(X):
list_of_words=[]
for sentance in X:
list_of_words.append(sentance.split())
return list_of_words
# Word2vec model should be only be built on train data, not on CV and test data
list_of_words_train=sent_split(X_train)
w2v_model=Word2Vec(list_of_words_train,min_count=5,size=50, workers=2)
w2v_words = list(w2v_model.wv.vocab)
print("number of words that occured minimum 5 times ",len(w2v_words))
print("sample words ", w2v_words[0:10])
# average Word2Vec
# compute average word2vec for each review in X based on word2vec model trained on X_train
def avg_word2vec(X):
sent_vectors = [] # the avg-w2v for each sentence/review is stored in this list
list_of_w=sent_split(X)
for sent in tqdm(list_of_w): # for each review/sentence
sent_vec = np.zeros(50) # as word vectors are of zero length 50
cnt_words =0; # num of words with a valid vector in the sentence/review
for word in sent: # for each word in a review/sentence
if word in w2v_words:
vec = w2v_model.wv[word]
sent_vec += vec
cnt_words += 1
if cnt_words != 0:
sent_vec /= cnt_words
sent_vectors.append(sent_vec)
return sent_vectors
# Convert reviews in avg-word2vec
X_train_avg_w2v=avg_word2vec(X_train)
X_cv_avg_w2v=avg_word2vec(X_cv)
X_test_avg_w2v=avg_word2vec(X_test)
###Output
100%|███████████████████████████████████████████████████████████████████████████| 43008/43008 [01:13<00:00, 582.64it/s]
100%|███████████████████████████████████████████████████████████████████████████| 18433/18433 [00:32<00:00, 561.61it/s]
100%|███████████████████████████████████████████████████████████████████████████| 26332/26332 [00:47<00:00, 557.28it/s]
###Markdown
[5.3.1] Applying Logistic Regression with L1 regularization on AVG W2V SET 3
###Code
# Training stage
# Using np.random.uniform to generate values for C hyperparameter
# Ref: https://docs.scipy.org/doc/numpy/reference/generated/numpy.random.uniform.html
# On repeated fast training of Logistic Reg, it was found that from values even beyond 10 results in gradual underfitting,
# In case of Avg-Word2Vec, the auc score did not improved even after 1
C=np.sort(np.random.uniform(0.00001,0.5,150))
print("Min value picked: ",np.min(C)," Max value picked: ",np.max(C))
# Rest is mostly a borrowed code from KNN assignment
train_auc = []
cv_auc = []
for i in tqdm(C):
clf=LogisticRegression(C=i,penalty='l1')
clf.fit(X_train_avg_w2v,y_train)
# roc_auc_score(y_true, y_score) the 2nd parameter should be probability estimates of the positive class
# not the predicted outputs
y_train_pred = clf.predict_proba(X_train_avg_w2v)[:,1]
y_cv_pred = clf.predict_proba(X_cv_avg_w2v)[:,1]
train_auc.append(roc_auc_score(y_train,y_train_pred))
cv_auc.append(roc_auc_score(y_cv, y_cv_pred))
plt.plot(C, train_auc, label='Train AUC')
plt.plot(C, cv_auc, label='CV AUC')
plt.legend()
plt.xlabel("C: hyperparameter")
plt.ylabel("AUC")
plt.title("ERROR PLOTS-L1-LR-AVGW2V")
plt.show()
#Test Phase
best_C=0.052
optimal_model=LogisticRegression(C=best_C,penalty='l1')
optimal_model.fit(X_train_avg_w2v,y_train)
train_fpr, train_tpr, thresholds = roc_curve(y_train, optimal_model.predict_proba(X_train_avg_w2v)[:,1])
test_fpr, test_tpr, thresholds = roc_curve(y_test, optimal_model.predict_proba(X_test_avg_w2v)[:,1])
plt.plot(train_fpr, train_tpr, label="train AUC ="+str(auc(train_fpr, train_tpr)))
plt.plot(test_fpr, test_tpr, label="test AUC ="+str(auc(test_fpr, test_tpr)))
plt.legend()
plt.xlabel("C: hyperparameter")
plt.ylabel("AUC")
plt.title("ERROR PLOTS-LR-L1-AVGW2V")
plt.show()
print("="*100)
# Ref : https://seaborn.pydata.org/generated/seaborn.heatmap.html
print("Train confusion matrix")
heatmap(confusion_matrix(y_train, optimal_model.predict(X_train_avg_w2v)),xticklabels=['Predicted: NO','Predicted: Yes'],yticklabels=['Actual: NO', 'Actual: Yes'],annot=True,fmt='d',cmap="YlGnBu")
plt.show()
print("Test confusion matrix")
heatmap(confusion_matrix(y_test, optimal_model.predict(X_test_avg_w2v)),xticklabels=['Predicted: NO','Predicted: Yes'],yticklabels=['Actual: NO', 'Actual: Yes'],annot=True,fmt='d',cmap="YlGnBu")
plt.show()
###Output
_____no_output_____
###Markdown
[5.3.2] Applying Logistic Regression with L2 regularization on AVG W2V, SET 3
###Code
# Training stage
# Using np.random.uniform to generate values for C hyperparameter
# Ref: https://docs.scipy.org/doc/numpy/reference/generated/numpy.random.uniform.html
# On repeated fast training of Logistic Reg, it was found that from values even beyond 10 results in gradual underfitting,
C=np.sort(np.random.uniform(0.0001,0.5,150))
print("Min value picked: ",np.min(C)," Max value picked: ",np.max(C))
# Rest is mostly a borrowed code from KNN assignment
train_auc = []
cv_auc = []
for i in tqdm(C):
clf=LogisticRegression(C=i,penalty='l2')
clf.fit(X_train_avg_w2v,y_train)
# roc_auc_score(y_true, y_score) the 2nd parameter should be probability estimates of the positive class
# not the predicted outputs
y_train_pred = clf.predict_proba(X_train_avg_w2v)[:,1]
y_cv_pred = clf.predict_proba(X_cv_avg_w2v)[:,1]
train_auc.append(roc_auc_score(y_train,y_train_pred))
cv_auc.append(roc_auc_score(y_cv, y_cv_pred))
plt.plot(C, train_auc, label='Train AUC')
plt.plot(C, cv_auc, label='CV AUC')
plt.legend()
plt.xlabel("C: hyperparameter")
plt.ylabel("AUC")
plt.title("ERROR PLOTS-L2-LR-BOW")
plt.show()
#Test Phase
best_C=0.018
optimal_model=LogisticRegression(C=best_C,penalty='l2')
optimal_model.fit(X_train_avg_w2v,y_train)
train_fpr, train_tpr, thresholds = roc_curve(y_train, optimal_model.predict_proba(X_train_avg_w2v)[:,1])
test_fpr, test_tpr, thresholds = roc_curve(y_test, optimal_model.predict_proba(X_test_avg_w2v)[:,1])
plt.plot(train_fpr, train_tpr, label="train AUC ="+str(auc(train_fpr, train_tpr)))
plt.plot(test_fpr, test_tpr, label="test AUC ="+str(auc(test_fpr, test_tpr)))
plt.legend()
plt.xlabel("C: hyperparameter")
plt.ylabel("AUC")
plt.title("ERROR PLOTS-LR-L2-AVGW2V")
plt.show()
print("="*100)
# Ref : https://seaborn.pydata.org/generated/seaborn.heatmap.html
print("Train confusion matrix")
heatmap(confusion_matrix(y_train, optimal_model.predict(X_train_avg_w2v)),xticklabels=['Predicted: NO','Predicted: Yes'],yticklabels=['Actual: NO', 'Actual: Yes'],annot=True,fmt='d',cmap="YlGnBu")
plt.show()
print("Test confusion matrix")
heatmap(confusion_matrix(y_test, optimal_model.predict(X_test_avg_w2v)),xticklabels=['Predicted: NO','Predicted: Yes'],yticklabels=['Actual: NO', 'Actual: Yes'],annot=True,fmt='d',cmap="YlGnBu")
plt.show()
###Output
_____no_output_____
###Markdown
[5.4] Logistic Regression on TFIDF W2V, SET 4
###Code
# Again TFID-W2V again needs to be only on the train data.
model = TfidfVectorizer()
tf_idf_matrix = model.fit_transform(list(X_train))
# we are converting a dictionary with word as a key, and the idf as a value
dictionary = dict(zip(model.get_feature_names(), list(model.idf_)))
# TF-IDF weighted Word2Vec
tfidf_feat = model.get_feature_names() # tfidf words/col-names
# final_tf_idf is the sparse matrix with row= sentence, col=word and cell_val = tfidf
def tfidf_word2vec(X):
tfidf_sent_vectors = []; # the tfidf-w2v for each sentence/review is stored in this list
list_of_w=sent_split(X)
row=0;
for sent in tqdm(list_of_w): # for each review/sentence
sent_vec = np.zeros(50) # as word vectors are of zero length
weight_sum =0; # num of words with a valid vector in the sentence/review
for word in sent: # for each word in a review/sentence
if word in w2v_words and word in tfidf_feat:
vec = w2v_model.wv[word]
#tf_idf = tf_idf_matrix[row, tfidf_feat.index(word)]
# to reduce the computation we are
# dictionary[word] = idf value of word in whole courpus
# sent.count(word) = tf valeus of word in this review
tf_idf = dictionary[word]*(sent.count(word)/len(sent))
sent_vec += (vec * tf_idf)
weight_sum += tf_idf
if weight_sum != 0:
sent_vec /= weight_sum
tfidf_sent_vectors.append(sent_vec)
row += 1
return tfidf_sent_vectors
# Convert reviews in tfidf-word2vec
X_train_tfidf_w2v=tfidf_word2vec(X_train)
X_cv_tfidf_w2v=tfidf_word2vec(X_cv)
X_test_tfidf_w2v=tfidf_word2vec(X_test)
###Output
100%|████████████████████████████████████████████████████████████████████████████| 43008/43008 [32:14<00:00, 22.24it/s]
100%|████████████████████████████████████████████████████████████████████████████| 18433/18433 [13:43<00:00, 22.37it/s]
100%|████████████████████████████████████████████████████████████████████████████| 26332/26332 [19:51<00:00, 22.09it/s]
###Markdown
[5.4.1] Applying Logistic Regression with L1 regularization on TFIDF W2V, SET 4
###Code
# Training stage
# Using np.random.uniform to generate values for C hyperparameter
# Ref: https://docs.scipy.org/doc/numpy/reference/generated/numpy.random.uniform.html
# On repeated fast training of Logistic Reg, it was found that from values even beyond 10 results in gradual underfitting,
C=np.sort(np.random.uniform(0.00001,1,100))
print("Min value picked: ",np.min(C)," Max value picked: ",np.max(C))
# Rest is mostly a borrowed code from KNN assignment
train_auc = []
cv_auc = []
for i in tqdm(C):
clf=LogisticRegression(C=i,penalty='l1')
clf.fit(X_train_tfidf_w2v,y_train)
# roc_auc_score(y_true, y_score) the 2nd parameter should be probability estimates of the positive class
# not the predicted outputs
y_train_pred = clf.predict_proba(X_train_tfidf_w2v)[:,1]
y_cv_pred = clf.predict_proba(X_cv_tfidf_w2v)[:,1]
train_auc.append(roc_auc_score(y_train,y_train_pred))
cv_auc.append(roc_auc_score(y_cv, y_cv_pred))
plt.plot(C, train_auc, label='Train AUC')
plt.plot(C, cv_auc, label='CV AUC')
plt.legend()
plt.xlabel("C: hyperparameter")
plt.ylabel("AUC")
plt.title("ERROR PLOTS-L1-LR-TFIDFW2V")
plt.show()
#Test Phase
best_C=0.081
optimal_model=LogisticRegression(C=best_C,penalty='l1')
optimal_model.fit(X_train_tfidf_w2v,y_train)
train_fpr, train_tpr, thresholds = roc_curve(y_train, optimal_model.predict_proba(X_train_tfidf_w2v)[:,1])
test_fpr, test_tpr, thresholds = roc_curve(y_test, optimal_model.predict_proba(X_test_tfidf_w2v)[:,1])
plt.plot(train_fpr, train_tpr, label="train AUC ="+str(auc(train_fpr, train_tpr)))
plt.plot(test_fpr, test_tpr, label="test AUC ="+str(auc(test_fpr, test_tpr)))
plt.legend()
plt.xlabel("C: hyperparameter")
plt.ylabel("AUC")
plt.title("ERROR PLOTS-LR-L1-TFIDFW2V")
plt.show()
print("="*100)
# Ref : https://seaborn.pydata.org/generated/seaborn.heatmap.html
print("Train confusion matrix")
heatmap(confusion_matrix(y_train, optimal_model.predict(X_train_tfidf_w2v)),xticklabels=['Predicted: NO','Predicted: Yes'],yticklabels=['Actual: NO', 'Actual: Yes'],annot=True,fmt='d',cmap="YlGnBu")
plt.show()
print("Test confusion matrix")
heatmap(confusion_matrix(y_test, optimal_model.predict(X_test_tfidf_w2v)),xticklabels=['Predicted: NO','Predicted: Yes'],yticklabels=['Actual: NO', 'Actual: Yes'],annot=True,fmt='d',cmap="YlGnBu")
plt.show()
###Output
_____no_output_____
###Markdown
[5.4.2] Applying Logistic Regression with L2 regularization on TFIDF W2V, SET 4
###Code
# Training stage
# Using np.random.uniform to generate values for C hyperparameter
# Ref: https://docs.scipy.org/doc/numpy/reference/generated/numpy.random.uniform.html
# On repeated fast training of Logistic Reg, it was found that from values even beyond 10 results in gradual underfitting,
C=np.sort(np.random.uniform(0.00001,0.5,150))
print("Min value picked: ",np.min(C)," Max value picked: ",np.max(C))
# Rest is mostly a borrowed code from KNN assignment
train_auc = []
cv_auc = []
for i in tqdm(C):
clf=LogisticRegression(C=i,penalty='l2')
clf.fit(X_train_tfidf_w2v,y_train)
# roc_auc_score(y_true, y_score) the 2nd parameter should be probability estimates of the positive class
# not the predicted outputs
y_train_pred = clf.predict_proba(X_train_tfidf_w2v)[:,1]
y_cv_pred = clf.predict_proba(X_cv_tfidf_w2v)[:,1]
train_auc.append(roc_auc_score(y_train,y_train_pred))
cv_auc.append(roc_auc_score(y_cv, y_cv_pred))
plt.plot(C, train_auc, label='Train AUC')
plt.plot(C, cv_auc, label='CV AUC')
plt.legend()
plt.xlabel("C: hyperparameter")
plt.ylabel("AUC")
plt.title("ERROR PLOTS-L2-LR-TFIDFW2V")
plt.show()
#Test Phase
best_C=0.025
optimal_model=LogisticRegression(C=best_C,penalty='l2')
optimal_model.fit(X_train_tfidf_w2v,y_train)
train_fpr, train_tpr, thresholds = roc_curve(y_train, optimal_model.predict_proba(X_train_tfidf_w2v)[:,1])
test_fpr, test_tpr, thresholds = roc_curve(y_test, optimal_model.predict_proba(X_test_tfidf_w2v)[:,1])
plt.plot(train_fpr, train_tpr, label="train AUC ="+str(auc(train_fpr, train_tpr)))
plt.plot(test_fpr, test_tpr, label="test AUC ="+str(auc(test_fpr, test_tpr)))
plt.legend()
plt.xlabel("C: hyperparameter")
plt.ylabel("AUC")
plt.title("ERROR PLOTS-LR-L2-TFIDFW2V")
plt.show()
print("="*100)
# Ref : https://seaborn.pydata.org/generated/seaborn.heatmap.html
print("Train confusion matrix")
heatmap(confusion_matrix(y_train, optimal_model.predict(X_train_tfidf_w2v)),xticklabels=['Predicted: NO','Predicted: Yes'],yticklabels=['Actual: NO', 'Actual: Yes'],annot=True,fmt='d',cmap="YlGnBu")
plt.show()
print("Test confusion matrix")
heatmap(confusion_matrix(y_test, optimal_model.predict(X_test_tfidf_w2v)),xticklabels=['Predicted: NO','Predicted: Yes'],yticklabels=['Actual: NO', 'Actual: Yes'],annot=True,fmt='d',cmap="YlGnBu")
plt.show()
###Output
_____no_output_____
###Markdown
[6] Conclusions
###Code
# Source: http://zetcode.com/python/prettytable/
from prettytable import PrettyTable
pt = PrettyTable()
pt.field_names = ["Vectorizer", "Model", "Hyperparameter", "AUC"]
C_list=[0.246,0.15,0.463,0.48,0.052,0.018,0.081,0.025]
vect=['BOW']*2+['TFIDF']*2+['AVG-W2V']*2+['TFIDF-W2V']*2
AUC_LIST=[0.933,0.937,0.936,0.946,0.896,0.89,0.87,0.871,]
mod=['L1-LR','L2-LR']*4
for a,b,c,d in zip(vect,mod,C_list,AUC_LIST):
pt.add_row([a,b,c,d])
print(pt)
###Output
+------------+-------+----------------+-------+
| Vectorizer | Model | Hyperparameter | AUC |
+------------+-------+----------------+-------+
| BOW | L1-LR | 0.246 | 0.933 |
| BOW | L2-LR | 0.15 | 0.937 |
| TFIDF | L1-LR | 0.463 | 0.936 |
| TFIDF | L2-LR | 0.48 | 0.946 |
| AVG-W2V | L1-LR | 0.052 | 0.896 |
| AVG-W2V | L2-LR | 0.018 | 0.89 |
| TFIDF-W2V | L1-LR | 0.081 | 0.87 |
| TFIDF-W2V | L2-LR | 0.025 | 0.871 |
+------------+-------+----------------+-------+
|
examples/Visualization_Examples.ipynb | ###Markdown
Constants used to control the examples shown
###Code
# Set to one of: "iris", "breast_cancer", or "wine"
DATASET_TESTED = "iris"
###Output
_____no_output_____
###Markdown
Method to load data
###Code
def get_iris():
iris = load_iris()
X, y = iris.data, iris.target
X = pd.DataFrame(X, columns=iris['feature_names'])
y = pd.Series(y)
return X, y
def get_breast_cancer():
X, y = load_breast_cancer(return_X_y=True, as_frame=True)
return X,y
def get_wine():
X, y = load_wine(return_X_y=True, as_frame=True)
return X,y
###Output
_____no_output_____
###Markdown
Example using RotationFeatures with a decision tree on the Iris dataset
###Code
if DATASET_TESTED == "iris":
X,y = get_iris()
elif DATASET_TESTED == "breast_cancer":
X,y = get_breast_cancer()
elif DATASET_TESTED == "wine":
X,y = get_wine()
else:
assert False, "Not a valid test dataset"
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
rota = RotationFeatures(degree_increment=30)
rota.fit(X_train)
X_train_extended = rota.transform(X_train)
#!!!!!!!!!!!!!!!!!!!!!!!!!!
# todo: seems to be bug with this line -- issue if pass pd df ?
#X_train_extended = pd.DataFrame(X_train_extended, index=X_train.index)
X_test_extended = rota.transform(X_test)
dt = tree.DecisionTreeClassifier(max_depth=5, random_state=42)
dt.fit(X_train_extended,y_train)
y_pred = dt.predict(X_test_extended)
###Output
_____no_output_____
###Markdown
Presenting the features generated
###Code
display(X_train_extended)
###Output
_____no_output_____
###Markdown
Example Visualizing a Single Node
###Code
tree_viewer = GraphTwoDimTree(tree=dt,
X_orig=X_train,
X_extended=X_train_extended,
y=y_train,
rota=rota)
tree_viewer.graph_node(node_idx=0,
row=X_train_extended.iloc[10],
show_log_scale=True)
###Output
_____no_output_____
###Markdown
Example Visualizing the Decision Path for a Single Prediction
###Code
tree_viewer.graph_decision_path(row=X_train_extended.iloc[2], show_log_scale=False)
###Output
Decision Path: [0 2 4 5 6]
###Markdown
Example Showing Incorrect Predictions
###Code
tree_viewer.graph_incorrect_rows(X_test_extended, y_test, y_pred, max_rows_shown=5)
###Output
Number of rows: 38. Number of incorrect: 1. Percent incorrect: 3
****************************************************************
Displaying decision path for row 83. Predicted: 2. Actual: 1
****************************************************************
Decision Path: [0 2 4 5 6]
###Markdown
Visualize a full Decision Tree
###Code
tree_viewer.graph_tree(show_log_scale=False, show_combined_2d_space=True)
###Output
_____no_output_____ |
week11_1/myproject/02_Query.ipynb | ###Markdown
Query Pattern* What is total rental cost between 13/03/2014-24/03/2014?* How much money collected from the car id=2? Getting a record by id
###Code
c=Customer.objects.get(id=2)
print(c)
###Output
Customer object (2)
###Markdown
Getting all records from table Customer
###Code
Customer.objects.all()
# SQL command
print Customer.objects.all().query
###Output
SELECT "myapp_customer"."id", "myapp_customer"."first_name", "myapp_customer"."last_name", "myapp_customer"."Address", "myapp_customer"."postcode", "myapp_customer"."telephone", "myapp_customer"."email" FROM "myapp_customer"
###Markdown
Filter records within range
###Code
from datetime import datetime
import pytz
utc=pytz.timezone('UTC')
start_date = utc.localize( datetime.strptime('2014-03-13','%Y-%m-%d') )
stop_date = utc.localize( datetime.strptime('2014-03-24','%Y-%m-%d') )
Rent.objects.filter(rent_date__range=[start_date, stop_date])
# SQL command
print Rent.objects.filter(rent_date__range=[start_date, stop_date ]).query
###Output
SELECT "myapp_rent"."id", "myapp_rent"."rent_date", "myapp_rent"."return_date", "myapp_rent"."cost", "myapp_rent"."car_id", "myapp_rent"."customer_id" FROM "myapp_rent" WHERE "myapp_rent"."rent_date" BETWEEN 2014-03-13 00:00:00 AND 2014-03-24 00:00:00
###Markdown
Filter less_than_or_equal (__lte)
###Code
# rent that happended before or equal 13 March 2014
Rent.objects.filter(rent_date__lte=start_date)
# SQL command
print Rent.objects.filter(rent_date__lte=start_date).query
###Output
SELECT "myapp_rent"."id", "myapp_rent"."rent_date", "myapp_rent"."return_date", "myapp_rent"."cost", "myapp_rent"."car_id", "myapp_rent"."customer_id" FROM "myapp_rent" WHERE "myapp_rent"."rent_date" <= 2014-03-13 00:00:00
###Markdown
Filter greater than (__gt)
###Code
# rent that happended after 13 March 2014
Rent.objects.filter(rent_date__gt=start_date)
# SQL command
print Rent.objects.filter(rent_date__gt=start_date).query
###Output
SELECT "myapp_rent"."id", "myapp_rent"."rent_date", "myapp_rent"."return_date", "myapp_rent"."cost", "myapp_rent"."car_id", "myapp_rent"."customer_id" FROM "myapp_rent" WHERE "myapp_rent"."rent_date" > 2014-03-13 00:00:00
###Markdown
What is total rental cost between 13/03/2014-24/03/2014? Naive solution ( but slow )
###Code
%%timeit -n10
total=0
q=Rent.objects.filter(rent_date__range=[start_date, stop_date])
for i in q:
total=total + i.cost
###Output
10 loops, best of 3: 2.33 ms per loop
###Markdown
Better by Using "aggregration()"
###Code
%%timeit -n10
from django.db.models import Sum, Max, Min, Avg
Rent.objects.filter(rent_date__range=[start_date, stop_date]).aggregate(Sum('cost'))
q=Rent.objects.filter(rent_date__range=[start_date, stop_date])
r=q.aggregate(Sum('cost'))
r
Rent.objects.filter(rent_date__range=[start_date, stop_date]).aggregate(Max('cost'))
###Output
_____no_output_____
###Markdown
Annotate Count
###Code
from django.db.models import Count
q=Car.objects.annotate(Count("rent"))
q[0].rent__count
for i in q:
print "rent__count:%s car:%s"%(i.rent__count, i)
print Car.objects.annotate(Count("rent")).query
###Output
SELECT "myapp_car"."id", "myapp_car"."maker", "myapp_car"."price", "myapp_car"."model", "myapp_car"."year", COUNT("myapp_rent"."id") AS "rent__count" FROM "myapp_car" LEFT OUTER JOIN "myapp_rent" ON ("myapp_car"."id" = "myapp_rent"."car_id") GROUP BY "myapp_car"."id", "myapp_car"."maker", "myapp_car"."price", "myapp_car"."model", "myapp_car"."year"
###Markdown
Reverse relation
###Code
Car.objects.get(id=2)
Car.objects.get(id=2).rent_set.all()
# SQL command
print Car.objects.get(id=2).rent_set.all().query
###Output
SELECT "myapp_rent"."id", "myapp_rent"."rent_date", "myapp_rent"."return_date", "myapp_rent"."cost", "myapp_rent"."car_id", "myapp_rent"."customer_id" FROM "myapp_rent" WHERE "myapp_rent"."car_id" = 2
###Markdown
How much money collected from the car id=2? Reverse relation (slow)
###Code
%%timeit -n1
sum_cost=Car.objects.get(id=2).rent_set.all().aggregate(Sum('cost'))
print sum_cost
print Car.objects.get(id=2).rent_set.all().query
###Output
SELECT "myapp_rent"."id", "myapp_rent"."rent_date", "myapp_rent"."return_date", "myapp_rent"."cost", "myapp_rent"."car_id", "myapp_rent"."customer_id" FROM "myapp_rent" WHERE "myapp_rent"."car_id" = 2
###Markdown
Forward relation
###Code
%%timeit -n1
sum_cost=Rent.objects.filter(car__id=2).aggregate(Sum('cost'))
print sum_cost
print Rent.objects.filter(car__id=2).query
###Output
SELECT "myapp_rent"."id", "myapp_rent"."rent_date", "myapp_rent"."return_date", "myapp_rent"."cost", "myapp_rent"."car_id", "myapp_rent"."customer_id" FROM "myapp_rent" WHERE "myapp_rent"."car_id" = 2
###Markdown
Find total income for each car
###Code
q=Car.objects.annotate(Sum("rent__cost"))
for i in q:
print "income:%s car:%s"%(i.rent__cost__sum,i)
###Output
income:1529.50 car:id: 1, Mitsubishi L200
income:1525.00 car:id: 2, Mini Cooper
income:2240.00 car:id: 3, TVR Tuscan
income:1119.95 car:id: 4, BMW Z3
income:480.00 car:id: 5, Toyota Celica
income:699.95 car:id: 6, Audi TT
income:514.85 car:id: 7, Mercedes E320
###Markdown
Q: Why do we need to use revese relation? A: Sometimes we need to iterate over all cars to get total cost of each car.
###Code
%%timeit -n1
for i in Car.objects.all():
print "%s\n %s"%( i, i.rent_set.all().aggregate(Sum('cost')) )
###Output
id: 1, Mitsubishi L200
{'cost__sum': Decimal('1529.50')}
id: 2, Mini Cooper
{'cost__sum': Decimal('1525.00')}
id: 3, TVR Tuscan
{'cost__sum': Decimal('2240.00')}
id: 4, BMW Z3
{'cost__sum': Decimal('1119.95')}
id: 5, Toyota Celica
{'cost__sum': Decimal('480.00')}
id: 6, Audi TT
{'cost__sum': Decimal('699.95')}
id: 7, Mercedes E320
{'cost__sum': Decimal('514.85')}
id: 1, Mitsubishi L200
{'cost__sum': Decimal('1529.50')}
id: 2, Mini Cooper
{'cost__sum': Decimal('1525.00')}
id: 3, TVR Tuscan
{'cost__sum': Decimal('2240.00')}
id: 4, BMW Z3
{'cost__sum': Decimal('1119.95')}
id: 5, Toyota Celica
{'cost__sum': Decimal('480.00')}
id: 6, Audi TT
{'cost__sum': Decimal('699.95')}
id: 7, Mercedes E320
{'cost__sum': Decimal('514.85')}
id: 1, Mitsubishi L200
{'cost__sum': Decimal('1529.50')}
id: 2, Mini Cooper
{'cost__sum': Decimal('1525.00')}
id: 3, TVR Tuscan
{'cost__sum': Decimal('2240.00')}
id: 4, BMW Z3
{'cost__sum': Decimal('1119.95')}
id: 5, Toyota Celica
{'cost__sum': Decimal('480.00')}
id: 6, Audi TT
{'cost__sum': Decimal('699.95')}
id: 7, Mercedes E320
{'cost__sum': Decimal('514.85')}
1 loop, best of 3: 8.94 ms per loop
###Markdown
Better Solution by using "annotation()"
###Code
%%timeit -n1
cars=Car.objects.all().annotate(Sum('rent__cost'))
for i in cars:
print "%s\n %s"%( i, i.rent__cost__sum )
print Car.objects.all().annotate(Sum('rent__cost')).query
###Output
SELECT "myapp_car"."id", "myapp_car"."maker", "myapp_car"."price", "myapp_car"."model", "myapp_car"."year", CAST(SUM("myapp_rent"."cost") AS NUMERIC) AS "rent__cost__sum" FROM "myapp_car" LEFT OUTER JOIN "myapp_rent" ON ("myapp_car"."id" = "myapp_rent"."car_id") GROUP BY "myapp_car"."id", "myapp_car"."maker", "myapp_car"."price", "myapp_car"."model", "myapp_car"."year"
|
Course_Material/Day1_PartB.ipynb | ###Markdown
Day 1, Part B: More on Reward Design Learning goals- Further examine the effects of reward function changes Definitions- **Simulation environment**: Notice that this is not the same as the python/conda environment. The simulation environment is the simulated world where the reinforcement learning takes place. It provides opportunities for an agent to learn and explore, and ideally provides challenges that aid in efficient learning.- **Agent (aka actor or policy)**: An entity in the simulation environment that performs actions. The agent could be a person, a robot, a car, a thermostat, etc.- **State variable**: An observed variable in the simulation environment. They can be coordinates of objects or entities, an amount of fuel in a tank, air temperature, wind speed, etc.- **Action variable**: An action that the agent can perform. Examples: step forward, increase velocity to 552.5 knots, push object left with force of 212.3 N, etc.- **Reward**: A value given to the agent for doing something considered to be 'good'. Reward is commonly assigned at each time step and cumulated during a learning episode.- **Episode**: A learning event consisting of multiple steps in which the agent can explore. It starts with the unmodified environment and continues until the goal is achieved or something prevents further progress, such as a robot getting stuck in a hole. Multiple episodes are typically run in loops until the model is fully trained.- **Model (aka policy or agent)**: An RL model is composed of the modeling architecture (e.g., neural network) and parameters or weights that define the unique behavior of the model.- **Policy (aka model or agent)**: The parameters of a model that encode the best choices to make in an environment. The choices are not necessarily good ones until the model undergoes training. The policy (or model) is the "brain" of the agent.- **Replay Buffer**: A place in memory to store state, action, reward and other variables describing environmental state transitions. It is effectively the agent's memory of past experiences.  Modify the CartPole RewardWe will work on reward modifications later in this course. For now, you can try modifying the reward here/ We've included `MyCartPole.py` in the Course_Material folder - a subclassed step function from the main environment definition. Because of the register step below, you'll need to restart the kernel every time you modify the reward, so we've included the imports cell below for easy access.To try different reward functions with the code below, your workflow should look like the following:1. Modify the reward section (below line 40 in the .py), *remember to save.*2. Restart your kernel and clear all outputs (probably don't want to rerun all the training above).3. Run the following 5 cells below to retrain and look at reward.4. Feel free to play around with the total_timesteps=25000 if you want a shorter/longer test of your new reward
###Code
import os
import gym
from stable_baselines3 import PPO
from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common.vec_env import DummyVecEnv, SubprocVecEnv
from stable_baselines3.common.env_util import make_vec_env
from stable_baselines3.common.utils import set_random_seed
from tqdm import trange
import hvplot.pandas # This adds HoloViews plotting capability directly from a Pandas dataframe
import pandas as pd
from gym.envs.registration import registry, make, spec
def register(id, *args, **kwargs):
if id in registry.env_specs:
return
else:
return gym.envs.registration.register(id, *args, **kwargs)
register(id='MyCartPole-v1',
entry_point='MyCartPole:MyCartPoleEnv',
max_episode_steps=1000,
reward_threshold=2500.0)
log_dir = "tmp/"
os.makedirs(log_dir, exist_ok=True)
env = gym.make("MyCartPole-v1")
env = Monitor(env, log_dir)
model = PPO('MlpPolicy', env, verbose=0)
model.learn(total_timesteps=25000)
training_reward = pd.DataFrame(pd.to_numeric(pd.read_csv("tmp/monitor.csv")[1:].reset_index()['index'])).reset_index()
training_reward.rename(columns={'level_0':"Episode",'index':"Reward"},inplace=True)
training_reward.hvplot(x="Episode",y="Reward")
reward_list = []
episode_reward = 0
obs = env.reset()
for _ in trange(1000):
action, _states = model.predict(obs)
obs, reward, done, info = env.step(action)
episode_reward += reward
env.render()
if done:
reward_list.append(episode_reward)
episode_reward = 0
env.reset()
env.env.viewer.close()
env.close()
###Output
_____no_output_____ |
.ipynb_checkpoints/fizyr_keras_retinanet-checkpoint.ipynb | ###Markdown
Object Detection in Google Colab with Fizyr RetinanetJupyter notebook providing steps to train a Keras/Tensorflow model for object detection with custom dataset.It runs in Google Colab using [Fizyr implementation](https://github.com/fizyr/keras-retinanet) of RetinaNet in Keras.Requirements are only dataset images and annotations file made in [LabelImg](https://github.com/tzutalin/labelImg).Colab Runtime type: Python3, GPU enabled. Environment SetupDownload and install in Colab required packages and import libraries.
###Code
!git clone https://github.com/fizyr/keras-retinanet.git
%cd keras-retinanet/
!pip install .
!python setup.py build_ext --inplace
import os
import shutil
import zipfile
import urllib
import xml.etree.ElementTree as ET
import numpy as np
import csv
import pandas
from google.colab import drive
from google.colab import files
###Output
/media/vy/DATA/projects/retinanet/env/lib/python3.6/site-packages/IPython/utils/traitlets.py:5: UserWarning: IPython.utils.traitlets has moved to a top-level traitlets package.
warn("IPython.utils.traitlets has moved to a top-level traitlets package.")
###Markdown
Making DatasetDownload from Drive training dataset, and convert it to Fizyr annotations format.Before upload in Google Drive a zip file containing annotations and images for training dataset, with following format (check my zip sample):```objdet_reduced_dataset.zip|- img1.jpg|- img1.xml|- img2.jpg|- img2.xml...```Then change accordingly DATASET_DRIVEID.
###Code
DATASET_DRIVEID = '1YgTANSod7X5Yf-3YvsrbJPSwvESxq2b2'
DATASET_DIR = 'dataset'
ANNOTATIONS_FILE = 'annotations.csv'
CLASSES_FILE = 'classes.csv'
drive_url = 'https://drive.google.com/uc?export=download&id=' + DATASET_DRIVEID
file_name = DATASET_DRIVEID + '.zip'
urllib.request.urlretrieve(drive_url, file_name)
print('Download completed!')
os.makedirs(DATASET_DIR, exist_ok=True)
with zipfile.ZipFile(file_name, 'r') as zip_ref:
zip_ref.extractall(DATASET_DIR)
os.remove(file_name)
print('Extract completed!')
annotations = []
classes = set([])
for xml_file in [f for f in os.listdir(DATASET_DIR) if f.endswith(".xml")]:
tree = ET.parse(os.path.join(DATASET_DIR, xml_file))
root = tree.getroot()
file_name = None
for elem in root:
if elem.tag == 'filename':
file_name = os.path.join(DATASET_DIR, elem.text)
if elem.tag == 'object':
obj_name = None
coords = []
for subelem in elem:
if subelem.tag == 'name':
obj_name = subelem.text
if subelem.tag == 'bndbox':
for subsubelem in subelem:
coords.append(subsubelem.text)
item = [file_name] + coords + [obj_name]
annotations.append(item)
classes.add(obj_name)
with open(ANNOTATIONS_FILE, 'w') as f:
writer = csv.writer(f)
writer.writerows(annotations)
with open(CLASSES_FILE, 'w') as f:
for i, line in enumerate(classes):
f.write('{},{}\n'.format(line,i))
###Output
_____no_output_____
###Markdown
Training ModelDownload pretrained model and run training.In the next cell choose one option:1. download Fizyr Resnet50 pretrained model2. download your custom pretrained model, to continue previous training epochsIn the last cell optionally export trained model to Google Drive.
###Code
PRETRAINED_MODEL = './snapshots/_pretrained_model.h5'
#### OPTION 1: DOWNLOAD INITIAL PRETRAINED MODEL FROM FIZYR ####
URL_MODEL = 'https://github.com/fizyr/keras-retinanet/releases/download/0.5.1/resnet50_coco_best_v2.1.0.h5'
urllib.request.urlretrieve(URL_MODEL, PRETRAINED_MODEL)
#### OPTION 2: DOWNLOAD CUSTOM PRETRAINED MODEL FROM GOOGLE DRIVE. CHANGE DRIVE_MODEL VALUE. USE THIS TO CONTINUE PREVIOUS TRAINING EPOCHS ####
#drive.mount('/content/gdrive')
#DRIVE_MODEL = '/content/gdrive/My Drive/Colab Notebooks/objdet_tensorflow_colab/resnet50_csv_10.h5'
#shutil.copy(DRIVE_MODEL, PRETRAINED_MODEL)
print('Downloaded pretrained model to ' + PRETRAINED_MODEL)
!keras_retinanet/bin/train.py --freeze-backbone --random-transform --weights {PRETRAINED_MODEL} --batch-size 8 --steps 500 --epochs 10 csv annotations.csv classes.csv
#### OPTIONAL: EXPORT TRAINED MODEL TO DRIVE ####
#drive.mount('/content/gdrive')
#COLAB_MODEL = './snapshots/resnet50_csv_10.h5'
#DRIVE_DIR = '/content/gdrive/My Drive/Colab Notebooks/objdet_tensorflow_colab/'
#shutil.copy(COLAB_MODEL, DRIVE_DIR)
###Output
_____no_output_____
###Markdown
InferenceRun inference with uploaded image on trained model.
###Code
THRES_SCORE = 0.8
# show images inline
%matplotlib inline
# automatically reload modules when they have changed
%reload_ext autoreload
%autoreload 2
# import keras
import keras
# import keras_retinanet
from keras_retinanet import models
from keras_retinanet.utils.image import read_image_bgr, preprocess_image, resize_image
from keras_retinanet.utils.visualization import draw_box, draw_caption
from keras_retinanet.utils.colors import label_color
# import miscellaneous modules
import matplotlib.pyplot as plt
import cv2
import os
import numpy as np
import time
# set tf backend to allow memory to grow, instead of claiming everything
import tensorflow as tf
def get_session():
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
return tf.Session(config=config)
# use this environment flag to change which GPU to use
#os.environ["CUDA_VISIBLE_DEVICES"] = "1"
# set the modified tf session as backend in keras
keras.backend.tensorflow_backend.set_session(get_session())
model_path = os.path.join('snapshots', sorted(os.listdir('snapshots'), reverse=True)[0])
print(model_path)
# load retinanet model
model = models.load_model(model_path, backbone_name='resnet50')
model = models.convert_model(model)
# load label to names mapping for visualization purposes
labels_to_names = pandas.read_csv(CLASSES_FILE,header=None).T.loc[0].to_dict()
def img_inference(img_path):
image = read_image_bgr(img_infer)
# copy to draw on
draw = image.copy()
draw = cv2.cvtColor(draw, cv2.COLOR_BGR2RGB)
# preprocess image for network
image = preprocess_image(image)
image, scale = resize_image(image)
# process image
start = time.time()
boxes, scores, labels = model.predict_on_batch(np.expand_dims(image, axis=0))
print("processing time: ", time.time() - start)
# correct for image scale
boxes /= scale
# visualize detections
for box, score, label in zip(boxes[0], scores[0], labels[0]):
# scores are sorted so we can break
if score < THRES_SCORE:
break
color = label_color(label)
b = box.astype(int)
draw_box(draw, b, color=color)
caption = "{} {:.3f}".format(labels_to_names[label], score)
draw_caption(draw, b, caption)
plt.figure(figsize=(10, 10))
plt.axis('off')
plt.imshow(draw)
plt.show()
uploaded = files.upload()
img_infer = list(uploaded)[0]
print('Running inference on: ' + img_infer)
img_inference(img_infer)
###Output
_____no_output_____ |
Problog/Monty Hall example analysis.ipynb | ###Markdown
Monty Hall problem Probabilistic programming model analysis for ascending door number
###Code
import timeit
import pandas as pd
%matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
import numpy as np
import networkx as nx
from problog import get_evaluatable
from problog.program import PrologString
from problog.formula import LogicFormula, LogicDAG
from problog.ddnnf_formula import DDNNF
from problog.cnf_formula import CNF
###Output
_____no_output_____
###Markdown
import model problog model with embedded BP
###Code
with open('modelT.pl') as model:
m = model.read()
lfs = []
dags = []
cnfs = []
###Output
_____no_output_____
###Markdown
Evaluate model for door nubmer between $3$ to $11$
###Code
times = []
door_num = range(3, 12)
for i in door_num:
start = timeit.default_timer()
model = m.format(door_num=i)
p = PrologString(model)
formula = get_evaluatable().create_from(p)
print(formula.evaluate())
stop = timeit.default_timer()
times.append(stop - start)
for i in door_num:
model = m.format(door_num=i)
p = PrologString(model)
lf = LogicFormula.create_from(p)
lfs.append(lf)
dag = LogicDAG.create_from(lf)
dags.append(dag)
cnf = CNF.create_from(dag)
cnfs.append(cnf)
pd.DataFrame(data={'Number Of Doors': door_num,
'Solving time': times,
'Lines in LF':[len(str(lf).split('\n')) for lf in lfs]})
print(lfs[0])
plt.figure(num=None, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k')
plt.plot(door_num, times)
plt.title('Monty Hall ascending door number',fontsize=16)
plt.ylabel('Time (sec)',fontsize=14)
plt.xlabel('# of doors ',fontsize=14)
plt.savefig('Monty Hall ascending door number.png')
plt.show()
G = nx.DiGraph()
G.add_edges_from(
[('A', 'B'), ('A', 'C'), ('D', 'B'), ('E', 'C'), ('E', 'F'),
('B', 'H'), ('B', 'G'), ('B', 'F'), ('C', 'G')])
val_map = {'A': 1.0,
'D': 0.5714285714285714,
'H': 0.0}
values = [val_map.get(node, 0.25) for node in G.nodes()]
# Specify the edges you want here
red_edges = [('A', 'C'), ('E', 'C')]
edge_colours = ['black' if not edge in red_edges else 'red'
for edge in G.edges()]
black_edges = [edge for edge in G.edges() if edge not in red_edges]
# Need to create a layout when doing
# separate calls to draw nodes and edges
pos = nx.spring_layout(G)
nx.draw_networkx_nodes(G, pos, cmap=plt.get_cmap('jet'),
node_color = values, node_size = 500)
nx.draw_networkx_labels(G, pos)
nx.draw_networkx_edges(G, pos, edgelist=red_edges, edge_color='r', arrows=True)
nx.draw_networkx_edges(G, pos, edgelist=black_edges, arrows=False)
plt.show()
###Output
_____no_output_____ |
code/Parameter Tuning.ipynb | ###Markdown
Histogram of message length by type
###Code
sms.groupby("Label").Length.plot(kind = "hist", alpha = 0.5, bins = 100)
to_process = sms["Text"].copy()
to_process = to_process.str.lower()
stop_words = set(stopwords.words("english"))
def clean_message(text):
# te
text = text.translate(str.maketrans("", "", string.punctuation))
text = [word for word in text.split() if word not in stopwords.words("english")]
return " ".join(text)
text_cleaned = to_process.apply(clean_message)
from collections import Counter
freqSpam = Counter(" ".join(text_cleaned[sms["Label"] == "spam"]).split()).most_common(20)
freqHam = Counter(" ".join(text_cleaned[sms["Label"] == "ham"]).split()).most_common(20)
import matplotlib.pyplot as plt
labels, ys = zip(*freqHam)
xs = np.arange(len(labels))
width = 0.5
plt.bar(xs, ys, width, align='center')
plt.xticks(xs, labels)
plt.xticks(rotation=70)
plt.title("Top 20 Most Frequent Words for Ham")
plt.ylabel("Frequency")
plt.show()
labels, ys = zip(*freqSpam)
xs = np.arange(len(labels))
width = 0.5
plt.bar(xs, ys, width, align='center')
plt.xticks(xs, labels)
plt.xticks(rotation=70)
plt.title("Top 20 Most Frequent Words for Spam")
plt.ylabel("Frequency")
plt.show()
vectorizer = TfidfVectorizer("english")
features = vectorizer.fit_transform(text_cleaned)
import matplotlib.pyplot as plt
from scipy.sparse import coo_matrix
import matplotlib
def plot_coo_matrix(m):
if not isinstance(m, coo_matrix):
m = coo_matrix(m)
fig = plt.figure()
ax = fig.add_subplot(111, axisbg='black')
ax.plot(m.col, m.row, 's', color='white', ms=1)
ax.set_xlim(0, m.shape[1])
ax.set_ylim(0, m.shape[0])
ax.set_aspect('equal')
for spine in ax.spines.values():
spine.set_visible(False)
ax.invert_yaxis()
ax.set_aspect('equal')
ax.set_xticks([])
ax.set_yticks([])
return ax
ax = plot_coo_matrix(features)
ax.figure.show()
###Output
/usr/local/lib/python3.6/site-packages/matplotlib/cbook.py:136: MatplotlibDeprecationWarning: The axisbg attribute was deprecated in version 2.0. Use facecolor instead.
warnings.warn(message, mplDeprecation, stacklevel=1)
/usr/local/lib/python3.6/site-packages/matplotlib/figure.py:402: UserWarning: matplotlib is currently using a non-GUI backend, so cannot show the figure
"matplotlib is currently using a non-GUI backend, "
###Markdown
Split Data
###Code
featureTrain, featureTest, labelTrain, labelTest = train_test_split(features, sms["Label"],
test_size = 0.2, random_state = 1234)
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.svm import SVC
from sklearn.svm import LinearSVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.metrics import accuracy_score
###Output
_____no_output_____
###Markdown
Choose different kernels and bandwidth for SVC
###Code
kernels = {'rbf' : 'rbf','polynominal' : 'poly', 'sigmoid': 'sigmoid'}
predScore = []
for k, v in kernels.items():
for i in np.linspace(0.05, 1, num = 20):
svc = SVC(kernel = v, gamma = i)
svc.fit(featureTrain, labelTrain)
pred = svc.predict(featureTest)
predScore.append((k, [i, accuracy_score(labelTest, pred)]))
df = pd.DataFrame.from_items(predScore,orient='index', columns=['Gamma','Score'])
df['Score'].plot(kind='line', figsize=(10,5), ylim=(0.8,1.0),y = "Score")
###Output
_____no_output_____
###Markdown
the `sigmoid` kernel with `gamma = 1` produces an accuracy of 97.8475%. Elastic netexpected to perform better than using only $\ell_1$ or $\ell_2$ regularization
###Code
labelTrain2 = labelTrain == "ham"
labelTest2 = labelTest == "spam"
predScore = []
for j in np.linspace(0.01, 1, num = 20):
eln = SGDClassifier(loss = 'log', penalty = 'elasticnet', alpha = 0.0001, l1_ratio = j)
eln.fit(featureTrain, labelTrain)
pred = eln.predict(featureTest)
predScore.append((i, [j, accuracy_score(labelTest, pred)]))
df = pd.DataFrame.from_items(predScore, orient = "index", columns = ['l1_ratio', 'Score'])
df.plot(x = 'l1_ratio', y = 'Score', kind = "line")
###Output
_____no_output_____
###Markdown
When `l1_ratio = 0.05`, the prediction has an accuracy of 96.9507%. Decision Tree
###Code
predScore = []
for i in np.arange(5,31):
dtc = DecisionTreeClassifier(min_samples_split = i, random_state = 2345)
dtc.fit(featureTrain, labelTrain)
pred = dtc.predict(featureTest)
predScore.append((i,[i, accuracy_score(labelTest, pred)]))
df = pd.DataFrame.from_items(predScore, orient = "index", columns = ["min_samples_split", "Score"])
df.plot(x = 'min_samples_split', y = 'Score', kind = "line")
###Output
_____no_output_____
###Markdown
Multinomial Naive Bayes
###Code
predScore = []
for i in np.linspace(0.05, 1, num = 20):
mnb = MultinomialNB(alpha = i)
mnb.fit(featureTrain, labelTrain)
pred = mnb.predict(featureTest)
predScore.append((i,[i, accuracy_score(labelTest, pred)]))
df = pd.DataFrame.from_items(predScore, orient = "index", columns = ["alpha", "Score"])
df.plot(x = 'alpha', y = 'Score', kind = "line")
###Output
_____no_output_____
###Markdown
K-Nearest Neighbor
###Code
predScore = []
for i in np.arange(20, 51):
knc = KNeighborsClassifier(n_neighbors = i)
knc.fit(featureTrain, labelTrain)
pred = knc.predict(featureTest)
predScore.append((i,[i, accuracy_score(labelTest, pred)]))
df = pd.DataFrame.from_items(predScore, orient = "index", columns = ["n_neighbors", "Score"])
df.plot(x = 'n_neighbors', y = 'Score', kind = "line")
###Output
_____no_output_____
###Markdown
Random Forest
###Code
predScore = []
for i in np.arange(20, 71):
rfc = RandomForestClassifier(n_estimators = i, random_state = 2345)
rfc.fit(featureTrain, labelTrain)
pred = rfc.predict(featureTest)
predScore.append((i,[i, accuracy_score(labelTest, pred)]))
df = pd.DataFrame.from_items(predScore, orient = "index", columns = ["n_estimators", "Score"])
df.plot(x = 'n_estimators', y = 'Score', kind = "line")
###Output
_____no_output_____
###Markdown
Adaboost
###Code
predScore = []
for i in np.arange(20, 51):
abc = AdaBoostClassifier(n_estimators = i, random_state = 2345)
abc.fit(featureTrain, labelTrain)
pred = abc.predict(featureTest)
predScore.append((i, [i, accuracy_score(labelTest, pred)]))
df = pd.DataFrame.from_items(predScore, orient = "index", columns = ["n_estimators", "Score"])
df.plot(x = 'n_estimators', y = 'Score', kind = "line")
###Output
_____no_output_____
###Markdown
Bagging
###Code
predScore = []
for i in np.arange(20, 51):
bgc = BaggingClassifier(n_estimators = i, random_state = 2345)
bgc.fit(featureTrain, labelTrain)
pred = bgc.predict(featureTest)
predScore.append((i, [i, accuracy_score(labelTest, pred)]))
df = pd.DataFrame.from_items(predScore, orient = "index", columns = ["n_estimators", "Score"])
df.plot(x = 'n_estimators', y = 'Score', kind = "line")
###Output
_____no_output_____
###Markdown
ExtraTrees
###Code
predScore = []
for i in np.arange(20, 51):
etc = ExtraTreesClassifier(n_estimators = i, random_state = 2345)
etc.fit(featureTrain, labelTrain)
pred = etc.predict(featureTest)
predScore.append((i, [i, accuracy_score(labelTest, pred)]))
df = pd.DataFrame.from_items(predScore, orient = "index", columns = ["n_estimators", "Score"])
df.plot(x = 'n_estimators', y = 'Score', kind = "line")
###Output
_____no_output_____ |
Dia_2/grupo1/Edwin.ipynb | ###Markdown
###Code
from IPython.display import YouTubeVideo, HTML
YouTubeVideo('VIxciS1B9eo')
mi_lista = [0,1,2,3,4,5,6,7,8,10]
for i in mi_lista:
print (i)
dias_semana = ['lunes','martes','miercoles','jueves','viernes','sabado','domingo']
dia = 'martes'
if dia == 'domingo' or dia == 'sabado':
print ('me lebanto tarde a las 10am')
else:
print ('me levanto temprano a las 7am')
dias_semana = ['lunes','martes','miercoles','jueves','viernes','sabado','domingo']
for dia in dias_semana:
if dia == 'domingo' or dia == 'sabado':
print (dia, 'me lebanto tarde a las 10am')
else:
print (dia, 'me levanto temprano a las 7am')
###Output
lunes me levanto temprano a las 7am
martes me levanto temprano a las 7am
miercoles me levanto temprano a las 7am
jueves me levanto temprano a las 7am
viernes me levanto temprano a las 7am
sabado me lebanto tarde a las 10am
domingo me lebanto tarde a las 10am
###Markdown
FuncionesLas funciones nos facilitan la programación porque no tenemos que escribir nuevamente todo el codigo de una rutina que vamos a reutilizarUna función se define en python como:```pythondef mi_funcion(var1,var2): el algoritmo return x```
###Code
# ejemplo
def mi_funcion(x,y):
return x+y
print (mi_funcion(4,5))
#ejemplo
def contar_letras(texto):
n = len(texto)
return n
def contar_palabras(texto):
lista = texto.split(' ')
n = len(lista)
return n
def contar_palabras_letras(texto):
palabras = contar_palabras(texto)
letras = contar_letras(texto)
return [palabras, letras]
print (contar_palabras_letras('contar palabras y letras'))
contar_palabras_letras('clubes de ciencia 2017 univecidad de los andes')
def hora_me_levanto(dia):
if dia =='domingo' or dia == 'sabado':
resultado = 'me levanto a las 12 am'
else:
resultado = 'me levanto a la 5 am'
return resultado
hora_me_levanto('sabado')
# ejemplo
def potencia(x,n):
a = 1
for i in range(n): # range(n) genera una lista de numeros de 0 a n-1 de 1 en 1
a = a*x
return a
def factorial(n):
if n == 0:
return 1
if n < 0:
return 'valor negativo'
factorial = 1
for i in range(1,n+1):
factorial = factorial*i
return factorial
print (potencia(3,3))
print (factorial(4))
###Output
27
24
###Markdown
Reto de Programación- Construya una función que retorne el nombre de una de sus compañeros de grupo cuando se ingresa el número de tarjeta de identidad```pythondef encontrar_nombre(numero_identidad): codigo return nombre_completo```- La serie de Fibonacci es muy importante en varias areas del conocimiento. Esta se define como:$$f_{0} = 0 ,$$ $$f_{1} = 1,$$ $$f_{n} = f_{n-1} + f_{n-2}$$Es decir, el siguiente valor es la suma de los dos anteriores.$$ f_{2} = 1 + 0,$$$$f_{3} = 1 + 1,$$$$f_{4} = 2 + 1$$Escriba una función que retorne la serie de Fibonacci de algun número $n$.Por ejemplo para $n=4$, la función debe devolver la lista [0,1,1,2,3]
###Code
def encontrar_nombre(numero_identidad):
nombre_completo = {'1003712136':'edwin balaguera','1009339849':'juan mape','1000065444':'maria galvis','100028707':'paula galvis'}
return nombre_completo [numero_identidad]
encontrar_nombre('1003712136')
###Output
_____no_output_____
###Markdown
LibreriasLas librerias contienen funciones que nos ayudan resolver problemas complejos y nos facilitan la programación.```pythonimport pandas Pandas nos permite leer archivos de excel, filtrar, y hacer estadisticas sobre tabalasimport numpy Numpy contiene funciones de operaciones matematicas y algebra de matricesimport matplotlib Matplotlib es una libreria que nos ayuda a graficar datos y funciones matematicas```
###Code
# ejemplo, La hora actual del servidor
import datetime
print (datetime.datetime.now())
# ejemplo, Transpuesta de una matriz
import numpy as np
A = np.matrix([[1, 2, -3],
[1, -2, 3],
[1, -2, 3]])
print (A.shape) # las dimensiones de la matriz
print (A.transpose()) # tarnspuesta de la matriz A
import matplotlip.pylab as plt
plt.figure()
x = [1,2,3,4,5]
y = [1,2,3,4,5]
plt.scatter(x,y,c ='black',s=100)
plt.show()
%matplotlib notebook
# ejemplo, Gráfica de y = x**2
import matplotlib.pylab as plt
x = list(range(-50,50))
y = [i**2 for i in x]
plt.figure()
plt.scatter(x,y)
plt.title('$y = x^{2}$') # titulo
plt.xlabel('x') # titulo eje x
plt.ylabel('y') # titulo eje y
plt.show()
x = np.linspace(0, 2 * np.pi, 500)
y1 = np.sin(x)
y2 = np.sin(3 * x)
fig, ax = plt.subplots()
ax.fill(x, y1, 'b', x, y2, 'r', alpha=0.3)
plt.show()
# ejemplo, Crear una tabal de datos de sus compañeros
import pandas as pd
nombres = ['Jocelyn', 'Laura','Luis Alejandro']
apellidos = ['Kshi', 'Diaz', 'Mahecha']
pais = ['Estados Unidos', 'Colombia', 'Colombia']
pd.DataFrame({'nombre': nombres, 'apellido': apellidos, 'pais': pais})
###Output
_____no_output_____
###Markdown
Reto de ProgramaciónCree un dataframe ó tabla que tenga las siguinetes columnas: t, a, v, y:- t es el tiempo y va de 0 a 100- a es la aceleración de la gravedad a = 10- v es la velocidad, y es función de t : $v = 20 - at$ - y es función de t: $y = -5t^{2}$Grafique y, v, a en función de t Pandas y Tablas de datos
###Code
temperatura_global = pd.read_csv('GlobalTemperatures.csv')
###Output
_____no_output_____
###Markdown
Analisis Temperaturashttps://www.dkrz.de/Nutzerportal-en/doku/vis/sw/python-matplotlib/matplotlib-sourcecode/python-matplotlib-example-contour-filled-plothttps://data.giss.nasa.gov/gistemp/maps/
###Code
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import matplotlib.cm as mpl_cm
import matplotlib.pyplot as plt
import iris
import iris.quickplot as qplt
fname = iris.sample_data_path('air_temp.pp')
temperature_cube = iris.load_cube(fname)
# Load a Cynthia Brewer palette.
brewer_cmap = mpl_cm.get_cmap('brewer_OrRd_09')
# Draw the contour with 25 levels.
plt.figure()
qplt.contourf(temperature_cube, 25)
# Add coastlines to the map created by contourf.
plt.gca().coastlines()
plt.show()
# Draw the contours, with n-levels set for the map colours (9).
# NOTE: needed as the map is non-interpolated, but matplotlib does not provide
# any special behaviour for these.
plt.figure()
qplt.contourf(temperature_cube, brewer_cmap.N, cmap=brewer_cmap)
# Add coastlines to the map created by contourf.
plt.gca().coastlines()
plt.show()
###Output
_____no_output_____ |
Data-Science-HYD-2k19/ASSIGNMENTS/.ipynb_checkpoints/Assignment - IV-checkpoint.ipynb | ###Markdown
Task - I 1.Given a sequence of n values x1, x2, ..., xn and a window size k>0, the k-th moving average ofthe given sequence is defined as follows:The moving average sequence has n-k+1 elements as shown below.The moving averages with k=4 of a ten-value sequence (n=10) is shown belowi 1 2 3 4 5 6 7 8 9 10===== == == == == == == == == == ==Input 10 20 30 40 50 60 70 80 90 100y1 25 = (10+20+30+40)/4y2 35 = (20+30+40+50)/4y3 45 = (30+40+50+60)/4y4 55 = (40+50+60+70)/4y5 65 = (50+60+70+80)/4y6 75 = (60+70+80+90)/4y7 85 = (70+80+90+100)/4Thus, the moving average sequence has n-k+1=10-4+1=7 values. Problem Statement:Write a function to find moving average in an array over a window:Test it over [3, 5, 7, 2, 8, 10, 11, 65, 72, 81, 99, 100, 150] and window of 3.
###Code
import numpy as np
n = int(input("Enter n:"))
l = []
for i in range(n):
temp = int(input())
l.append(temp)
k = int(input("Enter k: "))
def summ(l):
res = 0
for i in l:
res+=i
return res
def movavg(l,k):
cnt = 0
mv = []
while(cnt<=len(l)-k):
temp = l[cnt:cnt+k]
temp = summ(temp)/k
mv.append(temp)
cnt+=1
return mv
res = []
res = movavg(l,k)
res
###Output
_____no_output_____
###Markdown
Task - II 1.How-to-count-distance-to-the-previous-zeroFor each value, count the difference back to the previous zero (or the start of the Series,whichever is closer)create a new column 'Y'Consider a DataFrame df where there is an integer column 'X'import pandas as pddf = pd.DataFrame({'X': [7, 2, 0, 3, 4, 2, 5, 0, 3, 4]})
###Code
import pandas as pd
import numpy as np
df = pd.DataFrame({'X':[7,2,0,3,4,2,5,0,3,4]})
df
#Working of np.r_
#np.r_ is the row wise stacking of the arrays
a = np.array([1,2,3,4,5])
b = np.array([6,7,8,9,10])
print(np.r_[a,b])
print(np.r_[a[0:3],b[0:3],a[3:],b[3:]])
[df["X"].loc[i] for i in range(10)]
[0]*10
###Output
_____no_output_____ |
preprocess/run_blanks_from_training.ipynb | ###Markdown
Classify blanks (train + dev) and save to files
###Code
# # TRAINING DATASET # #
# # Pick out a subset of articles
art = arts_train[:]
# art = arts_train[14:15]
from utils_SQuAD import classify_blanks_from_answers
maxWords_per_FITB = 2
art3 = classify_blanks_from_answers(art,maxWords_per_FITB=2,return_full=False)
# Do a test print
print(art3[0]['title'])
print(art3[0]['paragraphs'][0]['context_blanked'])
# # Save the file
from utils import get_foldername, save_data
foldername = get_foldername('sq_pp_training')
save_data(art3,'train.json',foldername);
# # DEV DATASET # #
# # Pick out a subset of articles
art = arts_dev[:]
from utils_SQuAD import classify_blanks_from_answers
maxWords_per_FITB = 2
arts3dev = classify_blanks_from_answers(art,maxWords_per_FITB=2,return_full=False)
# Do a test print
print(arts3dev[0]['title'])
print(arts3dev[0]['paragraphs'][0]['context_blanked'])
# # Save the file
from utils import get_foldername, save_data
foldername = get_foldername('sq_pp_training')
save_data(arts3dev,'dev.json',foldername);
###Output
File /home/davestanley/src/animated-succotash/data/SQuAD_pp_trainingblanks/dev.json exists...skipping.
###Markdown
Re-load the data, merge, and run quick test
###Code
# Load in the data
from utils import load_data
foldername = get_foldername('sq_pp_training')
artstrain_blanks = load_data('train.json',foldername)
artsdev_blanks = load_data('dev.json',foldername)
print("Narticles train=" + str(len(artstrain_blanks)))
# Merge it with original data to get full dataset
from utils_SQuAD import merge_arts_paragraph_fields
# Training + test data
list_of_fields = ['context_blanked','blank_classification']
arts_train = merge_arts_paragraph_fields(arts_train,artstrain_blanks,list_of_fields)
arts_dev = merge_arts_paragraph_fields(arts_dev,artsdev_blanks,list_of_fields)
# Do a test print
print(arts_train[0]['paragraphs'][0]['context_blanked'])
###Output
Beyoncé Giselle Knowles-Carter (/biːˈjɒnseɪ/ bee-YON-say) (born September 4, 1981) is an American singer, songwriter, record producer and actress. Born and raised in ______ Texas, she performed in various ______ and ______ competitions as a child, and rose to fame in the ______ ______ as ______ ______ of R&B girl-group ______ Child. Managed by her father, ______ Knowles, the group became one of the world's best-selling girl groups of all time. Their hiatus saw the release of Beyoncé's debut album, ______ in ______ (2003), which established her as a solo artist worldwide, earned ______ Grammy Awards and featured the Billboard Hot 100 number-one singles "Crazy in Love" and "Baby Boy".
###Markdown
Display paragraph containing blanks
###Code
print(arts_train[0]['title'])
p = arts_train[0]['paragraphs'][3]
c = p['context']
cs = c.split()
bc = p['blank_classification']
for i in range(len(bc)):
if bc[i]:
print('Blank at word #' + str(i) + ' ' + cs[i])
print( p['context'])
print( p['context_blanked'])
###Output
Beyoncé
Blank at word #18 salon
Blank at word #24 Xerox
Blank at word #40 Solange
Blank at word #50 Destiny's
Blank at word #84 Joseph
Blank at word #85 Broussard.
Blank at word #91 Methodist
Beyoncé Giselle Knowles was born in Houston, Texas, to Celestine Ann "Tina" Knowles (née Beyincé), a hairdresser and salon owner, and Mathew Knowles, a Xerox sales manager. Beyoncé's name is a tribute to her mother's maiden name. Beyoncé's younger sister Solange is also a singer and a former member of Destiny's Child. Mathew is African-American, while Tina is of Louisiana Creole descent (with African, Native American, French, Cajun, and distant Irish and Spanish ancestry). Through her mother, Beyoncé is a descendant of Acadian leader Joseph Broussard. She was raised in a Methodist household.
Beyoncé Giselle Knowles was born in Houston, Texas, to Celestine Ann "Tina" Knowles (née Beyincé), a hairdresser and ______ owner, and Mathew Knowles, a ______ sales manager. Beyoncé's name is a tribute to her mother's maiden name. Beyoncé's younger sister ______ is also a singer and a former member of ______ Child. Mathew is African-American, while Tina is of Louisiana Creole descent (with African, Native American, French, Cajun, and distant Irish and Spanish ancestry). Through her mother, Beyoncé is a descendant of Acadian leader ______ ______ She was raised in a ______ household.
|
S Analysis.ipynb | ###Markdown
Sales Analysis Import Necessary Libraries
###Code
import pandas as pd
import os
###Output
_____no_output_____
###Markdown
Task 1 :Merging 12 months of sales into a single line
###Code
files = [file for file in os.listdir('.\Pandas-Data-Science-Tasks-master\SalesAnalysis\Sales_Data')]
all_months_data = pd.DataFrame()
for file in files:
df = pd.read_csv(".\Pandas-Data-Science-Tasks-master\SalesAnalysis\Sales_Data/"+file)
all_months_data=pd.concat([all_months_data,df])
all_months_data.to_csv("all_data.csv", index=False)
###Output
_____no_output_____
###Markdown
Read in updated dataframe
###Code
all_data=pd.read_csv("all_data.csv")
all_data.head()
###Output
_____no_output_____
###Markdown
Clean Up the Data! Drop rows of NAN
###Code
nan_df = all_data[all_data.isnull().any(axis=1)]
nan_df.head()
all_data = all_data.dropna(how = "all")
all_data.head()
###Output
_____no_output_____
###Markdown
Find 'or' and delete it
###Code
all_data = all_data[all_data['Order Date'].str[0:2] != 'Or']
###Output
_____no_output_____
###Markdown
Convert columns to the correct type
###Code
all_data['Quantity Ordered'] = pd.to_numeric(all_data['Quantity Ordered'])
all_data['Price Each'] = pd.to_numeric(all_data['Price Each'])
###Output
_____no_output_____
###Markdown
Augmented data with additional columns Task 2 : Add Month Column
###Code
all_data['Months'] = all_data['Order Date'].str[0:2]
all_data['Months'] = all_data['Months'].astype('int32')
all_data.head()
###Output
_____no_output_____
###Markdown
Task 3 : Add a sales column
###Code
all_data['Sales'] = all_data['Quantity Ordered']*all_data['Price Each']
all_data.head()
###Output
_____no_output_____
###Markdown
Task 4 : Add city column
###Code
# let's use .apply()
def get_city(address):
return address.split(',')[1]
def get_state(address):
return address.split(',')[2].split(' ')[1]
all_data['City'] = all_data['Purchase Address'].apply(lambda x : f"{get_city(x)} ({get_state(x)})")
all_data.head()
###Output
_____no_output_____
###Markdown
Question 1 : what was the best month for sales? How much was earned that month?
###Code
results = all_data.groupby('Months').sum()
import matplotlib.pyplot as plt
months = range(1,13)
plt.bar(months, results['Sales'])
plt.xticks(months)
plt.ylabel('Sales in USD ($)')
plt.xlabel('Months number')
plt.show()
###Output
_____no_output_____
###Markdown
Question 2 :What city had the highest number of sales?
###Code
results = all_data.groupby('City').sum()
results
import matplotlib.pyplot as plt
cities = [city for city ,df in all_data.groupby('City')]
plt.bar(cities, results['Sales'])
plt.xticks(cities, rotation = 'vertical', size=8)
plt.ylabel('Sales in USD ($)')
plt.xlabel('City Name')
plt.show()
###Output
_____no_output_____
###Markdown
Question 3 : what time should we display advertisements to maximize likelihood of customer's buying product ?
###Code
all_data['Order Date'] = pd.to_datetime(all_data['Order Date'])
all_data['Hour'] = all_data['Order Date'].dt.hour
all_data['Minute'] = all_data['Order Date'].dt.minute
hours = [hours for hours, df in all_data.groupby('Hour')]
plt.plot(hours, all_data.groupby(['Hour']).count())
plt.xticks(hours)
plt.xlabel('Hours')
plt.ylabel('Number of Orders')
plt.grid()
plt.show()
###Output
_____no_output_____
###Markdown
Questio 4 :What products are most often sold together?
###Code
df = all_data[all_data['Order ID'].duplicated(keep=False)]
df['Grouped'] = df.groupby('Order ID')['Product'].transform(lambda x: ','.join(x))
df = df[['Order ID', 'Grouped']].drop_duplicates()
df.head()
from itertools import combinations
from collections import Counter
count = Counter()
for row in df['Grouped']:
row_list = row.split(',')
count.update(Counter(combinations(row_list, 2)))
for key, value in count.most_common(10):
print(key,value)
###Output
('iPhone', 'Lightning Charging Cable') 1005
('Google Phone', 'USB-C Charging Cable') 987
('iPhone', 'Wired Headphones') 447
('Google Phone', 'Wired Headphones') 414
('Vareebadd Phone', 'USB-C Charging Cable') 361
('iPhone', 'Apple Airpods Headphones') 360
('Google Phone', 'Bose SoundSport Headphones') 220
('USB-C Charging Cable', 'Wired Headphones') 160
('Vareebadd Phone', 'Wired Headphones') 143
('Lightning Charging Cable', 'Wired Headphones') 92
###Markdown
Question 5 : What Product sold the most ? Why do you think it sold the most ?
###Code
product_group = all_data.groupby('Product')
quantity_ordered = product_group.sum()['Quantity Ordered']
products = [product for product, df in product_group]
plt.bar(products, quantity_ordered)
plt.xticks(products, rotation='vertical', size=8)
plt.xlabel('products')
plt.ylabel('Quantity Ordered')
plt.show()
prices = all_data.groupby('Product').mean()['Price Each']
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
ax1.bar(products, quantity_ordered, color='g')
ax2.plot(products, prices, 'b-')
ax1.set_xlabel('product name')
ax1.set_ylabel('quantity ordered', color='g')
ax2.set_ylabel('price ($)', color='b')
ax1.set_xticklabels(products, rotation='vertical', size=8)
plt.show()
###Output
_____no_output_____ |
dynamicProgramming/decodeNums.ipynb | ###Markdown
Title : Decode WaysChapter : Dynamic ProgrammingLink : [YouTube](https://youtu.be/SGicalYx4wE)ChapterLink : [PlayList](https://www.youtube.com/playlist?list=PLDV-cCQnUlIa0owhTLK-VT994Qh6XTy4v)문제 : 숫자열을 디코딩 하는 방법의 갯수를 계산하여라
###Code
def decodingBtmUp(s: str) -> int:
str_length = len(s)
if str_length == 0 :
return 0
dp = [None]*(str_length+1)
dp[-1] = 1
last_char = s[-1]
if int(last_char) == 0:
dp[str_length-1] = 0
else:
dp[str_length-1] = 1
for idx in range(str_length-2,-1,-1):
single_num = int(s[idx])
single_count = 0
if 0<single_num:
single_count = dp[idx+1]
double_num = int(s[idx:idx+2])
double_count = 0
if 10<=double_num<=26:
double_count = dp[idx+2]
count = single_count + double_count
dp[idx] = count
return dp[0]
decodingBtmUp("212325")
###Output
_____no_output_____ |
notebooks/5_01--DFM_Modeling_example.ipynb | ###Markdown
5. Optimizing, and sampling the kernel in OJ 287 I. Introduction to pymc3, xo, and gradientsAdapted from [a notebook provided by Dan Foreman-Mackey](https://gist.github.com/dfm/f84d0ee1af2425fc29efefe29a7e934d).
###Code
%matplotlib inline
%config InlineBackend.figure_format = "retina"
import matplotlib.pyplot as plt
plt.style.use("default")
plt.rcParams["savefig.dpi"] = 100
plt.rcParams["figure.dpi"] = 100
plt.rcParams["font.size"] = 16
plt.rcParams["font.family"] = "sans-serif"
plt.rcParams["font.sans-serif"] = ["Liberation Sans"]
plt.rcParams["mathtext.fontset"] = "custom"
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
import os
import lightkurve as lk
# MAST appears to have gone down, but I downloaded it once before it died
# so I'll used the cached file
epicid = 211991001
# lcf = lk.search_lightcurvefile("EPIC {0}".format(epicid), campaign=5)[0].download()
lcf = lk.KeplerLightCurveFile(os.path.expanduser(
"~/.lightkurve-cache/mastDownload/K2/ktwo211991001-c05_lc/ktwo211991001-c05_llc.fits"))
lc = lcf.PDCSAP_FLUX.remove_nans().normalize().remove_outliers()
lc.plot();
import astropy.units as u
pg = lc.to_periodogram(normalization="psd", freq_unit=u.Hz)
pg.plot()
plt.yscale("log")
plt.xscale("log");
import numpy as np
import pymc3 as pm
import theano.tensor as tt
import exoplanet as xo
# Make sure that all the data have the right type
x = np.ascontiguousarray(lc.time, dtype=np.float64)
y = np.ascontiguousarray(lc.flux - 1, dtype=np.float64)
yerr = np.ascontiguousarray(lc.flux_err, dtype=np.float64)
# Build the model in PyMC3
with pm.Model() as gp_model:
# The mean stellar flux (relative to the normalized baseline) in ppm
mean = pm.Normal("mean", mu=0, sd=np.std(y))
# A jitter term to capture underestimated error bars and model misspecification
logs2 = pm.Normal("logs2", mu=np.log(np.mean(yerr**2)), sd=10.0)
# Two SHO terms with two parameters each:
# 1. The amplitude of the variability, and
# 2. The turnover (angular) frequency in 1/d
logw_init = np.log(2*np.pi) - np.log([5, 10])
loga = pm.Normal("loga", mu=np.log(np.var(y)), sd=10.0, shape=2)
logw0 = pm.Normal("logw0", mu=logw_init, sd=10.0, shape=2)
logS0 = pm.Deterministic("logS0", loga-logw0+0.5*np.log(2))
kernel = xo.gp.terms.SHOTerm(log_S0=logS0[0], log_w0=logw0[0], Q=1/np.sqrt(2))
kernel += xo.gp.terms.SHOTerm(log_S0=logS0[1], log_w0=logw0[1], Q=1/np.sqrt(2))
# We put this together and evaluate the GP likelihood
gp = xo.gp.GP(kernel, x, yerr**2 + tt.exp(logs2))
pm.Potential("loglike", gp.log_likelihood(y - mean))
# Then we maximize the log posterior to find an estimate of the maximum
# a posteriori (map) parameters. Note: The order of these optimizations
# has been chosen to work for this problem using a combination of intution
# and trial and error.
gp_map_soln = gp_model.test_point
gp_map_soln = xo.optimize(gp_map_soln, vars=[mean])
gp_map_soln = xo.optimize(gp_map_soln, vars=[logs2])
gp_map_soln = xo.optimize(gp_map_soln, vars=[loga, logw0])
gp_map_soln = xo.optimize(gp_map_soln)
with gp_model:
K = xo.eval_in_model(gp.kernel.value(x[None, :] - x[:, None]), gp_map_soln)
sim = np.random.multivariate_normal(
np.zeros_like(y)+gp_map_soln["mean"], K, size=50)
plt.plot(x, y, "k", lw=0.5, label="data")
plt.plot(x, sim[0], lw=0.5, label="simulation")
plt.ylabel("relative flux")
plt.xlabel("time [days]")
plt.title("EPIC {0}".format(epicid), fontsize=14)
plt.legend(fontsize=10);
###Output
_____no_output_____ |
clothin.ipynb | ###Markdown
Clothing-Classifier Install tf.keras, a high-level API to build and train models in TensorFlow.
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
###Output
2.0.0
###Markdown
Fashion MNIST is a data set of clothings. Below are 60,000 images to train the network, and 10,000 images to evaluate the accuracy to classify images. Import and load the Fashion MNIST directly from TensorFlow.
###Code
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
###Output
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-labels-idx1-ubyte.gz
32768/29515 [=================================] - 0s 1us/step
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-images-idx3-ubyte.gz
26427392/26421880 [==============================] - 1s 0us/step
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-labels-idx1-ubyte.gz
8192/5148 [===============================================] - 0s 0us/step
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-images-idx3-ubyte.gz
4423680/4422102 [==============================] - 0s 0us/step
###Markdown
We store the class names here since they are not included with the dataset. Each image is mapped to a single label. The labels are an array of integers, ranging from 0 to 9 shown below.
###Code
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
###Output
_____no_output_____
###Markdown
Exploring the data60,000 images in the training set, represented as 28 x 28x pixels per image
###Code
train_images.shape
###Output
_____no_output_____
###Markdown
Length of training set
###Code
len(train_labels)
###Output
_____no_output_____
###Markdown
Integer between 0 and 9:
###Code
train_labels
###Output
_____no_output_____
###Markdown
10,000 images in the test set.
###Code
test_images.shape
###Output
_____no_output_____
###Markdown
Preprocess the dataUtilizing matploblib (plt) to visualize image; pixel values fall in the range of 0 to 255.
###Code
plt.figure()
plt.imshow(train_images[0])
plt.colorbar()
plt.grid(False)
plt.show()
train_images = train_images / 255.0
test_images = test_images / 255.0
###Output
_____no_output_____
###Markdown
Display first 25 images with class names from the training set with for loop.
###Code
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
plt.show()
###Output
_____no_output_____
###Markdown
Build the modelNeed to set up the layers, which is the basic building block of a neural network.
###Code
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(10, activation='softmax')
])
###Output
_____no_output_____
###Markdown
Compile Model* Needs settings before model is ready for training, which are the loss function, optimizer, and metrics. * loss function - easures how accurate the model is during training * optimizer - how the model is updated based on the data * metrics - monitors the training and testing steps
###Code
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=10)
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print('\nTest accuracy:', test_acc)
predictions = model.predict(test_images)
predictions[0]
np.argmax(predictions[0])
test_labels[0]
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array, true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array, true_label[i]
plt.grid(False)
plt.xticks(range(10))
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
i = 0
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions[i], test_labels)
plt.show()
i = 12
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions[i], test_labels)
plt.show()
# Plot the first X test images, their predicted labels, and the true labels.
# Color correct predictions in blue and incorrect predictions in red.
num_rows = 5
num_cols = 3
num_images = num_rows*num_cols
plt.figure(figsize=(2*2*num_cols, 2*num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2*num_cols, 2*i+1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(num_rows, 2*num_cols, 2*i+2)
plot_value_array(i, predictions[i], test_labels)
plt.tight_layout()
plt.show()
# Grab an image from the test dataset.
img = test_images[1]
print(img.shape)
# Add the image to a batch where it's the only member.
img = (np.expand_dims(img,0))
print(img.shape)
predictions_single = model.predict(img)
print(predictions_single)
plot_value_array(1, predictions_single[0], test_labels)
_ = plt.xticks(range(10), class_names, rotation=45)
np.argmax(predictions_single[0])
###Output
_____no_output_____ |
002-Image Classification with Pre-trained Squeezenet (Async).ipynb | ###Markdown
Read Images
###Code
image_paths = glob.glob('data/images/cats_dogs/*')
image_paths
###Output
_____no_output_____
###Markdown
OpenVINO Inference
###Code
# Get SqueezeNet Labels
# https://github.com/runwayml/model-squeezenet/blob/master/labels.json
with open('data/sqNet_labels.txt','r') as f:
labels_map = f.read()
labels = eval(labels_map)
labels
###Output
_____no_output_____
###Markdown
Prepare Model
###Code
# Model Path
model_xml = "intel_models/squeezenet1.1.xml"
model_bin = "intel_models/squeezenet1.1.bin"
"""
After a few tests, setting device to GPU does not necessarily improve FPS
"""
# Device
device = 'GPU' # Options include CPU, GPU, MYRIAD, [HDDL or HETERO] I am not familiar with the last two
def PrepareAsyncNetWork(model_xml,model_bin,device,num_requests):
ie = IECore()
net = ie.read_network(model = model_xml,weights = model_bin)
####################### Very Important $$$$
# Check to make sure that the plugin has support for all layers in the model
supported_layers = ie.query_network(net,device_name = device)
unsupported_layers = [layer for layer in supported_layers.values() if layer!= device]
if len(unsupported_layers)>0:
raise Exception(f"Number of unsupported layers {len(unsupported_layers)}")
####################################################################################
exec_net = ie.load_network(network=net,num_requests = num_requests, device_name = device)
# Store name of input and output blobs
input_blob = next(iter(net.input_info))
output_blob = next(iter(net.outputs))
# Extract Dimension (n:batch, c:color channel,h: height, w: width )
n, c ,h ,w = net.input_info[input_blob].input_data.shape
print('Extract Model Input Dimension:',n,c,h,w)
return input_blob, output_blob, exec_net, (n,c,h,w)
def PrepareInputImages(input_paths):
images = []
for path in input_paths:
image = cv2.imread(path)
input_height,input_width = image.shape[:2]
# Resize
in_frame = cv2.resize(image,(w,h))
in_frame = in_frame.transpose((2,0,1)) # Moving color channels to head
in_frame = in_frame.reshape((n,c,h,w))
images.append(in_frame)
return images
num_requests = len(image_paths)
input_blob, output_blob, execution_network,dimensions = PrepareAsyncNetWork(model_xml,
model_bin,
device,
num_requests=num_requests)
n,c,h,w = dimensions
frames = PrepareInputImages(image_paths)
def MakeAsyncPrediction(execution_network, input_blob,output_blob, inference_frames):
results = []
st_time = time.time()
for idx,frame in enumerate(inference_frames):
infer_request_handle = execution_network.start_async(request_id=idx, inputs={input_blob: frame})
infer_status = infer_request_handle.wait()
res = infer_request_handle.output_blobs[output_blob]
res = res.buffer
results.append(res)
ed_time = time.time()
time_sp = ed_time-st_time
FPS = np.round((len(inference_frames)/time_sp),4)
print(f"FPS: {FPS}")
return FPS, results
FPS_records = []
for _ in tqdm(range(100)):
FPS,results = MakeAsyncPrediction(execution_network,input_blob,output_blob,frames)
FPS_records.append(FPS)
np.mean(FPS_records)
###Output
_____no_output_____ |
advanced_model.ipynb | ###Markdown
Dataset
###Code
TEXT = Field(
sequential=True,
use_vocab=True,
tokenize=word_tokenize,
lower=True,
batch_first=True,
)
LABEL = Field(
sequential=False,
use_vocab=False,
batch_first=True,
)
cola_train_data, cola_valid_data, cola_test_data = TabularDataset.splits(
path=DATA_PATH,
train="cola_train.tsv",
validation="cola_valid.tsv",
test="cola_test.tsv",
format="tsv",
fields=[("text", TEXT), ("label", LABEL)],
skip_header=1
)
TEXT.build_vocab(cola_train_data, min_freq=2)
cola_train_iterator, cola_valid_iterator, cola_test_iterator = BucketIterator.splits(
(cola_train_data, cola_valid_data, cola_test_data),
batch_size=32,
device=None,
sort=False,
)
sat_train_data, sat_valid_data, sat_test_data = TabularDataset.splits(
path=DATA_PATH,
train="sat_train.tsv",
validation="sat_valid.tsv",
test="sat_test.tsv",
format="tsv",
fields=[("text", TEXT), ("label", LABEL)],
skip_header=1
)
sat_train_iterator, sat_valid_iterator, sat_test_iterator = BucketIterator.splits(
(sat_train_data, sat_valid_data, sat_test_data),
batch_size=8,
device=None,
sort=False,
)
###Output
_____no_output_____
###Markdown
LSTM Pooling Classifier
###Code
class LSTMPoolingClassifier(nn.Module):
def __init__(self, num_embeddings, embedding_dim, hidden_size, num_layers, pad_idx):
super(LSTMPoolingClassifier, self).__init__()
self.embed_layer = nn.Embedding(num_embeddings=num_embeddings, embedding_dim=embedding_dim, padding_idx=pad_idx)
self.hidden_size = hidden_size
self.embedding_dim = embedding_dim
self.num_layers = num_layers
self.ih2h = nn.LSTM(embedding_dim, hidden_size, num_layers=num_layers,
bidirectional=True, batch_first=True, dropout=0.5)
self.pool2o = nn.Linear(2 * hidden_size, 1)
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax()
self.dropout = nn.Dropout(p=0.5)
def forward(self, x):
x = self.embed_layer(x)
o, _ = self.ih2h(x)
pool = nn.functional.max_pool1d(o.transpose(1, 2), x.shape[1])
pool = pool.transpose(1, 2).squeeze()
pool = self.dropout(pool)
output = self.sigmoid(self.pool2o(pool))
return output.squeeze()
def train(model: nn.Module,
iterator: Iterator,
optimizer: torch.optim.Optimizer,
criterion: nn.Module,
device: str):
model.train()
epoch_loss = 0
for _, batch in enumerate(iterator):
optimizer.zero_grad()
text = batch.text
if text.shape[0] > 1:
label = batch.label.type(torch.FloatTensor)
text = text.to(device)
label = label.to(device)
output = model(text).flatten()
loss = criterion(output, label)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
return epoch_loss / len(iterator)
def evaluate(model: nn.Module,
iterator: Iterator,
criterion: nn.Module,
device: str):
model.eval()
epoch_loss = 0
with torch.no_grad():
for _, batch in enumerate(iterator):
text = batch.text
label = batch.label.type(torch.FloatTensor)
text = text.to(device)
label = label.to(device)
output = model(text).flatten()
loss = criterion(output, label)
epoch_loss += loss.item()
return epoch_loss / len(iterator)
def test(
model: nn.Module,
iterator: Iterator,
device: str):
with torch.no_grad():
y_real = []
y_pred = []
model.eval()
for batch in iterator:
text = batch.text
label = batch.label.type(torch.FloatTensor)
text = text.to(device)
output = model(text).flatten().cpu()
y_real += [label]
y_pred += [output]
y_real = torch.cat(y_real)
y_pred = torch.cat(y_pred)
fpr, tpr, _ = roc_curve(y_real, y_pred)
auroc = auc(fpr, tpr)
return auroc
def epoch_time(start_time: int,
end_time: int):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
###Output
_____no_output_____
###Markdown
Pretrain with cola dataset
###Code
PAD_IDX = TEXT.vocab.stoi[TEXT.pad_token]
N_EPOCHS = 20
lstm_pool_classifier = LSTMPoolingClassifier(
num_embeddings=len(TEXT.vocab),
embedding_dim=100,
hidden_size=200,
num_layers=4,
pad_idx=PAD_IDX,
)
if torch.cuda.is_available():
device = "cuda:0"
else:
device = "cpu"
_ = lstm_pool_classifier.to(device)
optimizer = torch.optim.Adam(lstm_pool_classifier.parameters())
bce_loss_fn = nn.BCELoss()
for epoch in range(N_EPOCHS):
start_time = time.time()
train_loss = train(lstm_pool_classifier, cola_train_iterator, optimizer, bce_loss_fn, device)
valid_loss = evaluate(lstm_pool_classifier, cola_valid_iterator, bce_loss_fn, device)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
print(f'Epoch: {epoch+1:02} | Time: {epoch_mins}m {epoch_secs}s')
print(f'\tTrain Loss: {train_loss:.5f}')
print(f'\t Val. Loss: {valid_loss:.5f}')
test_auroc = test(lstm_pool_classifier, cola_test_iterator, device)
print(f"| CoLA Dataset Test AUROC: {test_auroc:.5f}")
before_tuning_lstm_pool_classifier = deepcopy(lstm_pool_classifier)
pool_sat_test_auroc = test(lstm_pool_classifier, sat_test_iterator, device)
print(f'| SAT Dataset Test AUROC: {pool_sat_test_auroc:.5f}')
###Output
| SAT Dataset Test AUROC: 0.69231
###Markdown
Fine Tuning
###Code
PAD_IDX = TEXT.vocab.stoi[TEXT.pad_token]
N_EPOCHS = 20
for epoch in range(N_EPOCHS):
start_time = time.time()
train_loss = train(lstm_pool_classifier, sat_train_iterator, optimizer, bce_loss_fn, device)
valid_loss = evaluate(lstm_pool_classifier, sat_valid_iterator, bce_loss_fn, device)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
print(f'Epoch: {epoch+1:02} | Time: {epoch_mins}m {epoch_secs}s')
print(f'\tTrain Loss: {train_loss:.5f}')
print(f'\t Val. Loss: {valid_loss:.5f}')
pool_tuned_test_auroc = test(lstm_pool_classifier, sat_test_iterator, device)
print(f"| SAT Dataset Test AUROC: {pool_tuned_test_auroc:.5f}")
_ = before_tuning_lstm_pool_classifier.cpu()
_ = lstm_pool_classifier.cpu()
pool_sat_test_auroc = test(before_tuning_lstm_pool_classifier, sat_test_iterator, "cpu")
pool_tuned_test_auroc = test(lstm_pool_classifier, sat_test_iterator, "cpu")
print(f"Before fine-tuning SAT Dataset Test AUROC: {pool_sat_test_auroc:.5f}")
print(f"After fine-tuning SAT Dataset Test AUROC: {pool_tuned_test_auroc:.5f}")
with open("advanced_before_tuning_model.dill", "wb") as f:
model = {
"TEXT": TEXT,
"LABEL": LABEL,
"classifier": before_tuning_lstm_pool_classifier
}
dill.dump(model, f)
with open("advanced_after_tuning_model.dill", "wb") as f:
model = {
"TEXT": TEXT,
"LABEL": LABEL,
"classifier": lstm_pool_classifier
}
dill.dump(model, f)
###Output
_____no_output_____ |
section_4/analyse_results.ipynb | ###Markdown
Fig. 8It loads and transform the result of correlation generated by the script `correlation.py`. Cavity and simulations are compared
###Code
import numpy as np
import itertools
import matplotlib.pyplot as plt
from collections import Counter
import random
import pickle
import re
import pandas as pd
from os import listdir
from collections import defaultdict
import os
import sys
sys.path.insert(0, "../lib") # add the library folder to the path I look for modules
import latexify
%matplotlib inline
###Output
_____no_output_____
###Markdown
$$C_{ij}= \left\langle\tanh\left(\frac{\beta}{2}h_i\right)\tanh\left(\frac{\beta}{2}h_j\right) \right\rangle_{\mathbf{n}\partial_i\mathbf{n}\partial_j}$$
###Code
dir_list = next(os.walk('.'))[1]# select only first subdirectories
folder_pattern = re.compile("kin=[0-9]+")
folder_names=[name for name in dir_list if folder_pattern.match(name)]# select only folder with specific names
pattern2 = re.compile("\d+.\d+|\d+")
matching_folders=[[n for n in pattern2.findall(folder)] for folder in folder_names]
print("Select one of this folder\ngamma")
for el in matching_folders:
print(str(el[0]))
kin = 2
def load_obj(folder,name ):
with open(folder+'/data/dic-' + name + '.pkl', 'rb') as f:
return pickle.load(f)
def load_and_hist(T,kin,**kwargs):
dic=load_obj('kin:'+str(kin),"T:"+str(T))
N=dic["N"]
kin=dic["kin"]
T=dic["T"]
J=dic["J"]
P_sim=dic["P_sim"]
C_sim=dic["C_sim"]
P_cav=dic["P_cav"]
C_cav=dic["C_cav"]
h_dyn,b_dyn=histogram(P_sim,**kwargs,density=True)
h_cavity,b_cavity=histogram(P_cavity,bins=b_dyn,density=True)
#plot(bins[:-1],h_cavity,"-",mfc="w",label="cavity"+str(T))
return b_dyn,h_dyn,h_cavity
folder="kin="+str(kin)
filenames=listdir(folder+"/data")
pattern = re.compile("dic-T=\d*\.\d+|\d+.pkl")
dictnames=[name for name in filenames if pattern.match(name)]# select only dictionary files
pattern2 = re.compile("\d*\.\d+|\d+")
sim_params=[[n for n in pattern2.findall(dic)] for dic in dictnames]
print("Simulation available for")
for el in sim_params:
print(el[0])
def triu_todense(data,N):
A= np.zeros((N,N))
i,j = np.triu_indices(N,k=1)
A[i,j]=data
np.fill_diagonal(A,1)
return A
###Output
Simulation available for
0.2
0.4
###Markdown
Load data
###Code
T = 0.4
dic=load_obj(folder,"T="+str(T))
N=dic["N"]
kin=dic["kin"]
T=dic["T"]
J=dic["J"]
P_sim=dic["P_sim"]
C_sim=dic["C_sim"]
P_cav=dic["P_cav"]
C_cav=dic["C_cav"]
new_C_cav = dic['new_C_cav']
m = 2*P_cav-1
C_sim = triu_todense(C_sim,N)
C_cav = triu_todense(C_cav,N)
new_C_cav = triu_todense(new_C_cav,N)
cov_cav = (C_cav-np.outer(m,m))/np.sqrt(np.outer(1-m**2,1-m**2))
cov_cav_new = (new_C_cav-np.outer(m,m))/np.sqrt(np.outer(1-m**2,1-m**2))
cov_sim = (C_sim-np.outer(2*P_sim-1,2*P_sim-1))/np.sqrt(np.outer(4*P_sim*(1-P_sim),4*P_sim*(1-P_sim)))
cov_sim[np.isnan(cov_sim)]=0
cov_sim[np.isinf(cov_sim)]=0
np.fill_diagonal(cov_sim,1)
print('N',N,'N. replics:',dic['N_replics'],'N iterations',dic['N_iterations'])
latexify.latexify(columns = 2)
h,b,_ = plt.hist(cov_sim[np.triu_indices(N)],400,density=True,label = 'sim')
h,b = np.histogram(cov_cav[np.triu_indices(N)],b,density=True)
plt.plot((b[1:]+b[:-1])/2,h,label = 'cav 1',alpha = 1,lw = 0.8)
plt.semilogy()
plt.legend()
plt.xlabel('corr.coefficient,$\\rho$',fontsize= 12)
plt.ylabel('$\Pi(\\rho)$',fontsize= 12)
plt.tight_layout()
#plt.savefig('corr_coeff.pdf')
plt.figure()
h,b,_ = plt.hist(cov_sim[np.triu_indices(N)],500,density=True,label = 'sim')
h,b = np.histogram(cov_cav_new[np.triu_indices(N)],b,density=True)
plt.plot((b[1:]+b[:-1])/2,h,label = 'cav 2',alpha = 1,lw = 0.8)
plt.semilogy()
plt.legend()
plt.xlabel('corr.coefficient,$\\rho$',fontsize= 12)
plt.ylabel('$\Pi(\\rho)$',fontsize= 12)
plt.tight_layout()
print('T=',T)
#plt.savefig('corr_coeff_corrected.pdf')
###Output
T= 0.4
###Markdown
Quantify accuracyLet me call $Y = |C_{cav}-C_{sim}|$. The matrix investigates the different values returned byCavity and simulation for the correlation
###Code
Y = np.triu(np.abs(cov_cav_new-cov_sim))
i,j = np.where(np.abs(Y)>1e-2)
i,j,_ = zip(*sorted(zip(i,j,np.abs(Y)[np.abs(Y)>1e-2]),key =lambda x:x[2],reverse=True) )
h,b = np.histogram(Y[np.triu_indices(N,k=1)],bins = 1000)
plt.bar(b[:-1],np.cumsum(h)/(N*(N-1)/2),np.diff(b))
plt.xlabel('$\epsilon$,error between cavity and simulation')
plt.ylabel('$\mathrm{Prob}$(err$<\epsilon$)')
###Output
_____no_output_____ |
site/public/courses/DS-2.4/Notebooks/Network_Analysis/basic-network-analysis-tutorial.ipynb | ###Markdown
**Basic Network Analysis Tutorial***08.08.2017*Update: * added Community Detection!* fixed formatting issues......................................* added Elevation and Temporal Travel Dependencies* Fixed some minor errors* added formulars Table of Contents1. Introduction2. Fundamental Graph Theory3. Network Properties4. Network Robustness5. Community Detection6. Application: Competition 7. Summary and Outlook8. References 1. Introduction Welcome to this short introduction on how to use Network Analysis for this competition. Gain a deeper understanding of why certain taxis may have a longer trip duration than others and how to extract some useful features for your machine learning algorithm, e.g., calculate the shortest path between the pickup and dropoff point and given that, which Boroughs & Neighborhoods does the taxi traverse? Are there any 'sensitive' roads on the path of a given taxi which may cause a longer trip time? These and more questions can be addressed by a network analysis. This notebook uses graph data from this [dataset](https://www.kaggle.com/crailtap/street-network-of-new-york-in-graphml) , specific it makes use of the Manhattan subgraph, because computation times on the full graph would be to long for Kaggle Kernels. Also i would like to encourage you to check out the awesome [OSMNX package](https://github.com/gboeing/osmnx)from which i extracted the dataset and from which i make use of some functions. It is not available on Kaggle because it needs a Internet connection to download the graphs.The rest of the notebook is structured as follows: First we take a look at some basic properties of the network, like how big is the network and start digging deeper to explore the generative process of the network and which roads may be important in a sense of high traffic volume and under the aspect of road closures aka attacks. Finally we will calculate useful features for the competition, like shortest paths and which boroughs it passes.Here is a sneak peak of the raw New York City Street Network!  2. Fundamental Graph Theory In this and the following sections, we will introduce some basic terminology of graph theory and try to illustrate those on the New York City Street Network or the Manhattan subgraph. We will start by defining the fundamental definition, what is a graph?A graph **G = (V, E)** consists of a set of **nodes** V ( or vertices, points) and a set of **edges** E ( links, lines) which illustrate how the nodes in the network are interacting with each other. Edges can be **directed** or ** undirected**. The number of nodes **N** is often called the **size** of the network and states the number of objects in the model. In this example nodes are represented by way points in the OSM map from which the graph was generated, e.g., crossings and edges are roads or sub parts of roads between two nodes.Each node or edge can hold different attributes, e.g., nodes can be assigned to different types like crossings or dead-ends and edges might have a certain numerical attribute like a speed limit. Edges attributes, in the case of numerical attributes, are called weights. An graph with weighted edges is called an **weighted graph**.A first measurement for a node in the graph is the so called **degree**, which stands for the number of edges it has to other nodes, denoted by *k*. One can also might ask what is the average degree in the network? But wait a second... if the degree is the number of edges it has to other nodes, don't we have to distinguish between directed and undirected edges to calculate the degree? Indeed, we need to distiguish between the **In-degree** and the **Out-degree** of a node, simply measuring how many edges are leaving a node and how many edges are coming in. This of cource depends on if the graph is direted or not. In the case of an undirected graph we can calculate the **average degree** by the following formular: $$ (k) = \frac{1}{N} \sum_{i = 1}^N k_i = \frac{2E}{N}$$ Similar this can be done seperatly for the in- and out-degree: $$ (k^{in}) = \frac{1}{N} \sum_{i = 1}^N k^{in}_i =(k^{out}) = \frac{1}{N} \sum_{i = 1}^N k^{out}_i = \frac{E}{V}$$ because $$k_i = k^{out}_i + k^{in}_i $$ Let's have a first look at the network (or you can call it graph) and the basic properties of it. But first we need to load the graph and import a library which is capable of doing so. In this Notebook we use networkx as the graph computing library but there are many more: [igraph](http://igraph.org/redirect.html), [osmnx](https://github.com/gboeing/osmnx) or [SNAP](http://snap.stanford.edu/).
###Code
#For later use
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
import networkx as nx
warnings.filterwarnings('ignore')
#load the graph with nx.read_graphml
G = nx.read_graphml('../input/street-network-of-new-york-in-graphml/newyork.graphml')
nx.info(G)
###Output
_____no_output_____
###Markdown
The graph has *4426* nodes and *9626* edges so the size of the network is *4426* and it states that it is an MultiDIGraph, which means the edges of the graph are **directed**, so they point to a specific node in the network. Why is this so? Because our Network is the Manhattan Street Network and one property of it is that it contains one-way streets which can only be modeled as directed edges. Because we represent the edges as directed each 'normal' street is now also modeled as a directed edge which means we need to introduce two edges for each normal street, one for each direction of the street. On average the In-degree is *2.17* and the out-degree is also *2.17*, both are the same just as discussed. Average In-degree and Out-degree are always the same, but the distribution of the individual degrees can vary. To mention here is that just like for the degree, some graph porperties are defined on either directed or undirected some can be translated to both, so in order to calculate some measurements we provide also an undirected graph for the Manhattan network simply by calling the build in graph function from networkx. A graph is often called **simple graph**, if it contains no self-loops and directed edges.
###Code
G_simple = nx.Graph(G)
nx.info(G_simple)
###Output
_____no_output_____
###Markdown
Interesting! The number of nodes is still *4426* but the number of edges is decreased to only *788* edges. Also the degree went up to *3.56*. One should not be surprised why the new degree is not just in + out-degree, the simplified network merged multiple edges between two nodes to reduce itself into a undirected network, but for a directed network one can always state degree, in-degree and out-degree! Indeed his degree seems more convenient, because of the grid like structure in Manhattan. So let's have a close look on the distribution of the node degress of our graph for simplified network as for the directed case.
###Code
from collections import Counter
degree_dic = Counter(G.degree().values())
degree_hist = pd.DataFrame({"degree": list(degree_dic.values()),
"Number of Nodes": list(degree_dic.keys())})
plt.figure(figsize=(20,10))
sns.barplot(y = 'degree', x = 'Number of Nodes',
data = degree_hist,
color = 'darkblue')
plt.xlabel('Node Degree', fontsize=30)
plt.ylabel('Number of Nodes', fontsize=30)
plt.tick_params(axis='both', which='major', labelsize=20)
plt.show()
###Output
_____no_output_____
###Markdown
Ok, so most of the nodes tend to have a degree of *3* or *4* which comes from the grid like structure of Manhattan. And this is confirmed by plotting the distributions for the directed case, most nodes with 2 outgoing edges have also two incoming edges!
###Code
ins = list((G.in_degree()).values())
outs = list((G.out_degree()).values())
degrees = pd.DataFrame({"in-degree": ins, "out-degree": outs})
fig = sns.jointplot(x="in-degree",y="out-degree",data=degrees,kind="kde", color = 'darkblue',size=8)
###Output
_____no_output_____
###Markdown
Given the number of nodes and edges one can ask, what is the structure of the network and how does it look like?A first measure of the structure of a network is the so called **density** which measure how many links from all possible links within the network are realized. The density is *0* if there are no edges, called **empty graph** and *1* if we have a **complete Graph**, all possible links between nodes are established. $$dens_{undirected} = \frac{2E}{N(N-1)}$$$$dens_{directed} = \frac{E}{N(N-1)}$$
###Code
nx.density(G)
###Output
_____no_output_____
###Markdown
Having a density of *0.00049* makes sense because in a street network not all nodes can be connected to all other nodes. Enough text for now, let's plot the graph!
###Code
#import osmnx
#ox.plot_graph(G,fig_height= 12, node_size=10, node_zorder=2, node_color = '#808080')
###Output
_____no_output_____
###Markdown
 Nice! This gives us a nice overview of how Manhattan looks like. But such awesome figures like this made with osmnx are not always the case. If we plot the graph with the build in draw function from networkx, our nodes are just plotting according to some layout we choose:
###Code
nx.draw(G, pos=nx.spring_layout(G), node_size=0.01, width=0.1)
###Output
_____no_output_____
###Markdown
Wow, doesn't look much like the Manhattan street network right? One should keep in mind to never trust a graph Visualization as it can lead to false impressions on the properties of the graph. Talking about properties, what attributes do our nodes have?
###Code
# we cant not just access the nodes with G(0) orso, we must call them by their id
# G.nodes() returns a list of all node ids, e.g., '42459137'
G[G.nodes()[1]]
###Output
_____no_output_____
###Markdown
Each node is a dictionary containing nodes to which it is connected with properties as how many lanes the street has, if it is a oneway street or not, the name of the street and in some cases even the maximum allowed speed. 3. Network Properties In this section we will talk about some basic measurements which will give us some feedback about the structure of the graph. This will include what is the average shortest path distance between nodes, in which way are the nodes in the network connected to each other and how strong is the connection between a node and his neighbours. We will start by defining what the **shortest path** between two nodes *i* and *j* in the network is. The shortest path *d(i,j)*, as the name suggests, is just the path in the network between nodes *i* and *j* which has the fewest edges. In the case of an undirected network, the shortest path between *i* and *j* is always the same regardless from which node we start, however in an directed network this does not hold true and the shortest path between the nodes can vary depending from which node we start. On the bases of the shortest path we can define many more measurements, e.g., the longest shortest path in the network is called the **diameter** of the graph and gives us a feeling of how far things are seperated in the graph. We will compute the diameter on the simple graph for computation time.
###Code
nx.diameter(G_simple)
###Output
_____no_output_____
###Markdown
The function returns a number of *88* edges which lie on the longest shortest path.Besides the longest shortest path we can also ask what is the average shortest path length denoted by: $$ a = \sum_{i ,j \in E} \frac{d(i,j)}{N(N-1)}$$, where *d(i,j)* is the shortest path.
###Code
nx.average_shortest_path_length(G_simple)
###Output
_____no_output_____
###Markdown
Coming back to the question of what is the structure of our network, one can ask what is the generative process behind the network? Is the network random? or does it follow some underlying laws on how it is created.Here we introduce the **Scale-Free Property**, which states that 'real' networks do have a certain underlying creation process, like the WWW there some nodes do get more attention than others and therefore manage to build much more edges than other nodes., resulting in some nodes which have a much higher degree compared to other nodes. These nodes with a very high degree in the network are called **hubs**. One can think of Twitter as a Social Network there prominent people represent hubs, having much more edges to other nodes than the average user.But does our network follow the Scale-Free Property because it is a 'real' network? Let's plot the degree distributions to find out!
###Code
from collections import Counter
import collections
import scipy as sp
from scipy import stats
import matplotlib.pyplot as plt
in_degrees = G.in_degree()
in_h = Counter(in_degrees.values())
in_dic = collections.OrderedDict(sorted(in_h.items()))
in_hist = list(in_dic.values())
in_values =list(in_dic.keys())
out_degrees = G.out_degree()
out_h = Counter(out_degrees.values())
out_dic = collections.OrderedDict(sorted(out_h.items()))
out_hist = list(out_dic.values())
out_values =list(out_dic.keys())
mu = 2.17
sigma = sp.sqrt(mu)
mu_plus_sigma = mu + sigma
x = range(0,10)
prob = stats.poisson.pmf(x, mu)*4426
plt.figure(figsize=(12, 8))
plt.grid(True)
plt.loglog(out_values,out_hist,'ro-') # in-degree
plt.loglog(in_values,in_hist,'bv-') # in-degree
plt.plot(x, prob, "o-", color="black")
plt.legend(['In-degree','Out-degree','Poission'])
plt.xlabel('Degree')
plt.ylabel('Number of nodes')
plt.title('Manhatten Street Network')
plt.xlim([0,2*10**2])
plt.show()
###Output
_____no_output_____
###Markdown
If a graphs degree distribution follows the scale free property on a log-log scale plot like above, the data points should form approximately a straight line indicating the presents of hubs. In our figure in the cell above this is clearly not the case. As already plotted, the degree distributions follow a Poisson Distribution which is typical for a random network. So what can we say about the Manhattan Street Network? It has more edges than nodes, and the fact that it is not scale-free means the absents of hub nodes and it follows a Poisson Distribution like random networks do.Now we can ask the question, is it good for a road network that its degree distribution does not have the scale free property and is even random? How does this influences the **robustness** of the network in a sense that what happens if specific roads are closed, how does this influnces the traffic flow? 4. Network Robustness What does it take to break down all the movement in Manhattan? What roads are sensible in a sense that if these roads are closed the impact on the whole network traffic flow is drastic. Network Robustness tries to define measurements which try to capture how robust a network is to attacks, failures or something like a traffic jam. In this section some basic measurements will be introduced and tested on the Manhattan subgraph. Node Connectivity The **Node Connectivity** describes the number of nodes we must delete from the Graph G until it is **disconnected**. **Connected** means that if every node in our graph G can reach any other node in the network via edges. If this is not the case the graph is disconnected. An important property of any graph should to be that it is not easily to disconnect. This is some kind of vague definition, especially for a road network as there might be dead-end roads, removing the connecting node of the dead-end would immediately make our graph G disconnected. Here it is time in introduce also the notation of a **simple graph** which is a graph without directed edges or self-loops. Many measurements in libraries are only calculated on simple graphs because it simplifies calculations or the measurements are just not defined on directed graphs.For the next few sections we treat our graph as undirected to illustrate these measurements:
###Code
#create two simple graphs from our original directed graph
G_simple = nx.Graph(G)
G_simple2 = nx.Graph(G)
nx.node_connectivity(G_simple)
###Output
_____no_output_____
###Markdown
As aspected the output of the node connectivity function is 1, meaning our graph is disconnected after removing just 1 node. But does this matter? No, because the size of the removed subgraph is just a single node and the rest of the network is still connected. If however the size of the resulting disconnected part is relatively big, this indicates a problem in the structure of the network. Algebraic Connectivity Basically our network is nothing else as a matrix containing 1's if two nodes are connected to each other.Graphs can be differently defined as matrices and one of these matrices is the so called Laplacian matrix, which has special properties in the eigenspace. Its eigenvalues are non negative and if ordered the smallest one eigenvalue is zero. The second smallest eigenvalue of the Laplacian matrix is called the **algebraic connectivity** or the **Fiedler value**. It is directly indicater for the robustness of the network have the properties that: 1. The algebraic connectivity is equal to zero if and only if the graph is disconnected. 2. The algebraic connectivity of an graph is not greater than the node connectivity.
###Code
nx.algebraic_connectivity(G_simple)
###Output
_____no_output_____
###Markdown
According to its properties we can say, that the graph is connected because the algebraic connectivity is *0.00034* and < node connectivity. Betweenness Centrality Betweenness Centrality can be measure for nodes or edges and it defines the fraction of all shortest paths in the network passing through the edge/ node for which it is calculated. Roads with a very high betweenness centrality lie on many shortest paths in the network and should be considered to be important roads in the network which may have increased traffic volume.
###Code
#compute the betweeness centrality on one of the simple graphs, this can take a while
between = nx.betweenness_centrality(G_simple)
###Output
_____no_output_____
###Markdown
In the cell above we created two simple graphs and calculated the betweeness-centrality for each node in the network. We can now tell which nodes in the network play an important role as they are traversed more often. Let's find out which is on the most shortest path in the network:
###Code
#G_projected = ox.project_graph(G)
#max_node, max_bc = max(between.items(), key=lambda x: x[1])
#max_node, max_bc
###Output
_____no_output_____
###Markdown
(42431099, 0.2170387058765219) In Manhatten the node with ID 42431099 has the highest betweenness centrality and 21.7% of all shortest paths running through it. This needs to be plotted! 
###Code
G['42431099']
###Output
_____no_output_____
###Markdown
So the node with the highest betweenness centrality is located in West End! Now it may be interesting so see how all nodes betweenness centrality looks an one map and maybe there are some patterns to detect! We plot the centrality for each node from low (dark violet) to high (light yellow).   Network Attacks Now we know some basic robustness measurements, so it is time to see how robust is our network really?For this we will attack the networks nodes with two approaches: 1. Delete nodes according to the calculated betweenness centrality, going from high scoring nodes to low scoring ones 2. Random node failures, deleting nodes by randomDeleting nodes will have the effect that the **giant component**, the largest connected component in the graph, will shrink and some nodes might have a specific role in this process which cause a drastic shrinkage of the giant component.
###Code
'''
import operator
from random import shuffle
from random import randrange
from random import randint
import random
import matplotlib.ticker as mtick
sorted_x = sorted(between.items(), key=operator.itemgetter(1), reverse=True)
rand_x = list(range(0,4426 ))
random.shuffle(rand_x)
between_giant = []
between_rand = []
avg_degs = []
for x in range(3000):
remove = sorted_x[x]
remove2 = sorted_x[rand_x[x]]
G_simple.remove_nodes_from(remove)
G_simple2.remove_nodes_from(remove2)
giant = len(max(nx.connected_component_subgraphs(G_simple), key=len))
giant2 = len(max(nx.connected_component_subgraphs(G_simple2), key=len))
between_giant.append(giant)
between_rand.append(giant2)
y1 = between_giant
y2 = between_giant
y1= y1[ :-1]
y2= y2[1: ]
perc = np.linspace(0,100,len(between_giant))
fig = plt.figure(1, (12,8))
ax = fig.add_subplot(1,1,1)
ax.plot(perc, between_giant)
ax.plot(perc, between_rand)
fmt = '%.0f%%' # Format you want the ticks, e.g. '40%'
xticks = mtick.FormatStrFormatter(fmt)
ax.xaxis.set_major_formatter(xticks)
ax.set_xlabel('Fraction of Nodes Removed')
ax.set_ylabel('Giant Component Size')
ax.legend(['betweenness','random'])
plt.show()
'''
###Output
_____no_output_____
###Markdown
 Ok, what does the figure above tells us? First of all, deleting nodes which play an important role in the network leads to a faster shrinkage of the giant component than just deleting nodes by random! But only at a given percentage level! At the beginning it doesn't matter if the nodes are picked at random or by their importance, this indicates the robustness of the network. However, at a point there about 10 percent of the nodes are removed deleting specific important nodes lead to a much faster reduction in the giants component size. So these nodes must play an important role in combining the nodes of the network!Interestingly after only deleting about 50% of the nodes the size of the giant component rapidly reaches a size of almost zero nodes. Would the network be more robust if the network would contain hubs? Or Would this make the network even more prone to attacks? Leave a comment below what you think! 5. Community Detection This section introduces Community Detection, one of my favorite topics in network analysis.The goal of Community Detection is to find subgraphs aka communities in a given graph which we want to analyse.We start by defining what exactly is a community? Well, there is no 'one' or 'right' defintion of community, because it really depends on the kind of graph you want to analyse and what question you want to answer. A common definition based on the graphs structure is, that a community is a group of nodes which are higly connected within this group, but are less connected to other nodes which are not in this group. But as said this is not the only definition you can use, sometimes you can define communities based on a given node attribute or a combination of both, graph based and attributes. For this section we will use the infamous [Zachary's karate club](https://en.wikipedia.org/wiki/Zachary%27s_karate_club) network, because it is less computional expensive and also very easy to draw. The short story behind the network is, that a conflict between an instructor and an admin led to the split of the club into two seperate ones. Because the networkx library is not so convenient for community detection, we will switch to igraph for this section, which has more algorithms for this topic, but first we have a look at the network!
###Code
import networkx as nx
import matplotlib.pyplot as plt
import igraph as ig
import random
np.random.seed(3)
G1=nx.karate_club_graph()
nx.draw_spring(G1)
###Output
_____no_output_____
###Markdown
Most of the existing community detection algorithms work only on undirected graphs, so we will convert the networkx graph to igraph and also make it undirected.
###Code
#convert from networkx to igraph
G2 = ig.Graph.Adjacency((nx.to_numpy_matrix(G1) > 0).tolist())
#make the igraph graph undirected :D
G2.to_undirected()
###Output
_____no_output_____
###Markdown
In the following we will discuss a bunch of algorithms which are more or less used in practice. Girvan–Newman algorithm
###Code
np.random.seed(3)
dendrogram = G2.community_edge_betweenness()
# convert it into a flat clustering
clusters = dendrogram.as_clustering(2)
# get the membership vector
membership = clusters.membership
nx.draw_spring(G1, cmap = plt.get_cmap('jet'), node_color = membership, node_size=120, with_labels=False)
###Output
_____no_output_____
###Markdown
Modularity Maximization
###Code
np.random.seed(3)
dendrogram = G2.community_fastgreedy()
# convert it into a flat clustering
clusters = dendrogram.as_clustering(2)
# get the membership vector
membership = clusters.membership
nx.draw_spring(G1, cmap = plt.get_cmap('jet'), node_color = membership, node_size=120, with_labels=False)
###Output
_____no_output_____
###Markdown
Leading Eigenvector
###Code
np.random.seed(3)
dendrogram = G2.community_leading_eigenvector(2)
#get membership
membership = dendrogram.membership
nx.draw_spring(G1, cmap = plt.get_cmap('jet'), node_color = membership, node_size=120, with_labels=False)
###Output
_____no_output_____
###Markdown
6. Application: Competition In this last Section we will see how to compute the shortest path for our taxi trip data and how to add one could possible make use of all kind of centrality measures as features. Shortest PathsFirst of all we need two functions which will compute the nearest node in the network for a given taxi pick-up and or drop-off point
###Code
#taken from. https://github.com/gboeing/osmnx
def great_circle_vec(lat1, lng1, lat2, lng2, earth_radius=6371009):
phi1 = np.deg2rad(90 - lat1)
phi2 = np.deg2rad(90 - lat2)
theta1 = np.deg2rad(lng1)
theta2 = np.deg2rad(lng2)
cos = (np.sin(phi1) * np.sin(phi2) * np.cos(theta1 - theta2) + np.cos(phi1) * np.cos(phi2))
arc = np.arccos(cos)
distance = arc * earth_radius
return distance
def get_nearest_node(G, point, return_dist=False):
coords = np.array([[node, data['x'], data['y']] for node, data in G.nodes(data=True)])
df = pd.DataFrame(coords, columns=['node', 'x', 'y']).set_index('node')
df['reference_y'] = point[0]
df['reference_x'] = point[1]
distances = great_circle_vec(lat1=df['reference_y'],
lng1=df['reference_x'],
lat2=df['x'].astype('float'),
lng2=df['y'].astype('float'))
nearest_node = int(distances.idxmin())
if return_dist:
return nearest_node, distances.loc[nearest_node]
else:
return nearest_node
#load the training data
train = pd.read_csv('../input/nyc-taxi-trip-duration/train.csv')
#go through the dataset and calculate the shortest path
for index, row in train[24:25].iterrows():
pick_point = ( row['pickup_longitude'],row['pickup_latitude'])
drop_point = ( row['dropoff_longitude'],row['dropoff_latitude'])
pick_node = get_nearest_node(G, pick_point)
drop_node = get_nearest_node(G, drop_point)
try:
route = nx.shortest_path(G, str(pick_node), str(drop_node))
#plot the shortest path on the graph
#fig, ax = ox.plot_graph_route(G, route,fig_height=15, node_size=1)
print("Shortest Path:")
print(route)
gsub = G.subgraph(route)
s_len = sum([float(d['length']) for u, v, d in gsub.edges(data=True)])
print("Length in Km:")
print(s_len/1000)
except:
print("Some Error")
#handle error
pass
#the corresponding node betweenness scores for each edge in the shortest path
print("Betweenness Centrality for each node on the path")
node_bet = []
for node in route:
node_bet.append(between[node])
print(node_bet)
print(np.asarray(node_bet).sum())
print("betweeness sum")
print(sum(node_bet))
print("have to check why this is not < 1 ")
###Output
_____no_output_____ |
pputidame/solve_demo.ipynb | ###Markdown
Postprocessing
###Code
with open('./me_models/solution.pickle', 'rb') as solution:
me = pickle.load(solution)
with open('./me_models/ecoli_solution.pickle', 'rb') as solution:
ecome = pickle.load(solution)
b
me.solution
# Exchange
df_m = exchange_single_model(bsub)
df = exchange_single_model(me)
df.join(df_m['flux'],lsuffix='_me',rsuffix='_m')
# Solution summary
summary_df = solution_summary(me)
summary_m_df = solution_summary(bsub)
summary_df.to_csv('./solution_summary.csv')
summary_df = solution_summary(me)
###Output
_____no_output_____
###Markdown
Compare M/ME with ecoli
###Code
_,rxn_id_dict = homogenize_reactions(model=bsub,ref_model=eco)
# M - ME comparison of metabolic fluxes in Bacillus
flux_dict = me.get_metabolic_flux()
me_flux_df = pd.DataFrame.from_dict({'flux':flux_dict}).rename(index=rxn_id_dict)
comparison_df = summary_m_df.join(me_flux_df,lsuffix='_m',rsuffix='_me')
comparison_df[abs(comparison_df.flux_m)>0]
# M - ME comparison of metabolic fluxes in E. coli
summary_df_ecoli = solution_summary(ecome)
summary_m_df_ecoli = solution_summary(eco)
flux_dict_ecoli = ecome.get_metabolic_flux()
comparison_df_ecoli = summary_m_df_ecoli.join(\
pd.DataFrame.from_dict({'flux':flux_dict_ecoli}),lsuffix='_m',rsuffix='_me')
summary_m_df[summary_m_df.flux > 0]
import matplotlib.pyplot as plt
threshold = 100
temp_df = comparison_df[abs(comparison_df.flux_m)<threshold]
temp_df_ecoli = comparison_df_ecoli[abs(comparison_df_ecoli.flux_m)<threshold]
plt.figure(figsize=(10,4))
plt.subplot(1,2,1)
plt.scatter(temp_df['flux_m'],temp_df['flux_me'])
plt.xlabel('M simulation')
plt.ylabel('ME simulation')
plt.title('Bacillus')
plt.subplot(1,2,2)
plt.scatter(temp_df_ecoli['flux_m'],temp_df_ecoli['flux_me'])
plt.xlabel('M simulation')
plt.ylabel('ME simulation')
plt.title('E. coli')
# Store results to visualize in Escher
comparison_df['flux_m'].to_csv('fluxdist_m_bsub.csv',header=False)
comparison_df['flux_me'].to_csv('fluxdist_me_bsub.csv',header=False)
comparison_df_ecoli['flux_m'].to_csv('fluxdist_m_ecoli.csv',header=False)
comparison_df_ecoli['flux_me'].to_csv('fluxdist_me_ecoli.csv',header=False)
with open('./me_models/ecoli_solution.pickle', 'rb') as f:
ecome = pickle.load(f)
aminoacids = [m.id for m in bsub.metabolites if '__L_c' in m.id and len(m.id) == 8]
all_mets = [m.id for m in bsub.metabolites]
sp_dict = {}
for aa in aminoacids:
idx = all_mets.index(aa)
sp = bsub.solution.y[idx]
sp_dict[aa] = sp
pd.DataFrame.from_dict({'sp':sp_dict}).sort_values(by='sp')
###Output
_____no_output_____ |
ipynb/deprecated/energy/EnergyModel_ClusterEnergy.ipynb | ###Markdown
Energy Model Building Flow - Example 1for platforms supporting per-cluster energy meters This notebook shows how to build an energy model of a JUNO platform running a Linux kernel.It can be used as a reference implementation of an energy model building flow for platformswhere it's possible to measure the energy consumption of each frequency domain.For JUNO, Linux kernel Hardware monitors will be used to measure energy.Hardware monitors measure energy in microJoule [$\mu$J]. Note: this requires `scipy`You can install it with `sudo -H pip install scipy` or similar.
###Code
import logging
from conf import LisaLogging
LisaLogging.setup()
%matplotlib inline
import devlib
import json
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import re
import trappy
from collections import namedtuple, OrderedDict
from csv import DictWriter
from devlib.utils.misc import ranges_to_list
from env import TestEnv
from matplotlib.ticker import FormatStrFormatter, MaxNLocator
from scipy.stats import linregress
from time import sleep
from trappy.plotter.ColorMap import ColorMap
###Output
_____no_output_____
###Markdown
Configuration
###Code
# Setup a target configuration
my_conf = {
# Target platform and board
"platform" : 'linux',
"board" : 'juno',
# Target board IP/MAC address
"host" : '10.1.211.18',
# Login credentials
"username" : 'root',
"password" : '',
# Tools required by the experiments
"tools" : ['trace-cmd'],
"modules" : ['hwmon', 'bl', 'cpufreq', 'cpuidle', 'hotplug', 'cgroups'],
# Energy meters description
"emeter" : {
'instrument' : 'hwmon',
'conf' : {
'sites' : [ 'BOARDLITTLE', 'BOARDBIG' ],
'kinds' : [ 'energy' ],
},
'channel_map' : {
'little' : 'BOARDLITTLE',
'big' : 'BOARDBIG',
}
},
# FTrace events to collect for all the tests configuration which have
# the "ftrace" flag enabled
"ftrace" : {
"events" : [
"cpu_frequency",
"cpu_idle",
"sched_switch"
],
"buffsize" : 10 * 1024,
},
}
# Initialize a test environment using:
# the provided target configuration (my_conf)
te = TestEnv(target_conf=my_conf, force_new=True)
target = te.target
###Output
2016-09-08 19:27:12,272 INFO : Target - Using base path: /data/lisa
2016-09-08 19:27:12,273 INFO : Target - Loading custom (inline) target configuration
2016-09-08 19:27:12,274 INFO : Target - Devlib modules to load: ['cgroups', 'hwmon', 'cpufreq', 'bl', 'hotplug', 'cpuidle']
2016-09-08 19:27:12,275 INFO : Target - Connecting linux target:
2016-09-08 19:27:12,276 INFO : Target - username : root
2016-09-08 19:27:12,276 INFO : Target - host : 10.1.211.18
2016-09-08 19:27:12,277 INFO : Target - password :
2016-09-08 19:27:12,278 INFO : Target - Connection settings:
2016-09-08 19:27:12,279 INFO : Target - {'username': 'root', 'host': '10.1.211.18', 'password': ''}
2016-09-08 19:27:54,035 INFO : Target - Initializing target workdir:
2016-09-08 19:27:54,037 INFO : Target - /root/devlib-target
2016-09-08 19:28:10,999 INFO : Target - Topology:
2016-09-08 19:28:11,001 INFO : Target - [[0, 3, 4, 5], [1, 2]]
2016-09-08 19:28:12,634 INFO : Platform - Loading default EM:
2016-09-08 19:28:12,636 INFO : Platform - /data/lisa/libs/utils/platforms/juno.json
2016-09-08 19:28:13,839 INFO : FTrace - Enabled tracepoints:
2016-09-08 19:28:13,841 INFO : FTrace - cpu_frequency
2016-09-08 19:28:13,842 INFO : FTrace - cpu_idle
2016-09-08 19:28:13,843 INFO : FTrace - sched_switch
2016-09-08 19:28:13,844 INFO : HWMon - Scanning for HWMON channels, may take some time...
2016-09-08 19:28:13,850 INFO : HWMon - Channels selected for energy sampling:
2016-09-08 19:28:13,851 INFO : HWMon - a57_energy
2016-09-08 19:28:13,852 INFO : HWMon - a53_energy
2016-09-08 19:28:13,853 INFO : TestEnv - Set results folder to:
2016-09-08 19:28:13,854 INFO : TestEnv - /data/lisa/results/20160908_192813
2016-09-08 19:28:13,855 INFO : TestEnv - Experiment results available also in:
2016-09-08 19:28:13,856 INFO : TestEnv - /data/lisa/results_latest
###Markdown
Energy Model Parameters (CPUs, OPPs and Idle States)
###Code
# The EM reports capacity and energy consumption for each frequency domain.
# The frequency domains to be considered by the following EM building flow
# are described by the parameters of this named tuple
ClusterDescription = namedtuple('ClusterDescription',
['name', 'emeter_ch', 'core_name',
'cpus', 'freqs', 'idle_states'])
# List of frequency domains (i.e. clusters) to be considered for the EM
clusters = [
ClusterDescription(
# Name of the cluster
name = "big",
# Name of the energy meter channel as specified in the target configuration
emeter_ch = "big",
# Name of the cores in the cluster
core_name = target.big_core,
# List of cores in the cluster
cpus = target.bl.bigs,
# List of frequencies available in the cluster
freqs = target.bl.list_bigs_frequencies(),
# List of idle states available in the cluster
idle_states = range(len(target.cpuidle.get_states()))
),
ClusterDescription("little",
"little",
target.little_core,
target.bl.littles,
target.bl.list_littles_frequencies(),
range(len(target.cpuidle.get_states()))
)
]
clusters
# Mapping between cluster names and cluster IDs
cluster_ids = OrderedDict([(0, 'little'), (1, 'big')])
###Output
_____no_output_____
###Markdown
Benchmark example
###Code
class Sysbench(object):
"""
Sysbench benchmark class.
:param duration: maximum workload duration in seconds
:type duration: int
"""
sysbench_path = "/data/local/tmp/bin/sysbench"
def __init__(self, target, duration):
self.target = target
self.duration = duration
def run(self, cgroup, threads):
"""
Run benchmark using the specified number of 'threads'
to be executed under the specified 'cgroup'.
:param cgroup: cgroup where to run the benchmark on
:type cgroup: str
:param threads: number of threads to spawn
:type threads: int
:returns: float - performance score
"""
bench_out = self.target.cgroups.run_into(
cgroup,
"{} --test=cpu --num-threads={} --max-time={} run"
.format(self.sysbench_path, threads, self.duration)
)
match = re.search(r'(total number of events:\s*)([\d.]*)', bench_out)
return float(match.group(2))
###Output
_____no_output_____
###Markdown
Utility Functions
###Code
def linfit(x, y):
slope, intercept, r, p, stderr = linregress(x, y)
return slope, intercept
###Output
_____no_output_____
###Markdown
Energy Model Building Active States Profiling
###Code
def compute_power_perf(clusters, loop_cnt, benchmark, bkp_file='pstates.csv'):
"""
Perform P-States profiling on each input cluster.
This method requires a `benchmark` object with the following
characteristics:
- duration, attribute that tells the workload duration in seconds
- run(cgroup, threads), run the benchmark into the specified 'cgroup',
spawning the specified number of 'threads',
and return a performance score of their execution.
Data will be saved into a CSV file at each iteration such that, if something
goes wrong, the user can restart the experiment considering only idle_states
that had not yet been profiled.
:param clusters: list of clusters to profile
:type clusters: list(namedtuple(ClusterDescription))
:param loop_cnt: number of iterations for each experiment
:type loop_cnt: int
:param benchmark: benchmark object
:type benchmark: int
:param bkp_file: CSV file name
:type bkp_file: str
"""
# Make sure all CPUs are online
target.hotplug.online_all()
# Set cpufreq governor to userpace to allow manual frequency scaling
target.cpufreq.set_all_governors('userspace')
bkp_file = os.path.join(te.res_dir, bkp_file)
with open(bkp_file, 'w') as csvfile:
writer = DictWriter(csvfile,
fieldnames=['cluster', 'cpus', 'freq',
'perf', 'energy', 'power'])
# A) For each cluster (i.e. frequency domain) to profile...
power_perf = []
for cl in clusters:
target_cg, _ = target.cgroups.isolate(cl.cpus)
# P-States profiling requires to plug in CPUs one at the time
for cpu in cl.cpus:
target.hotplug.offline(cpu)
# B) For each additional cluster's plugged in CPU...
on_cpus = []
for cnt, cpu in enumerate(cl.cpus):
# Hotplug ON one more CPU
target.hotplug.online(cpu)
on_cpus.append(cpu)
# Ensure online CPUs are part of the target cgroup
# (in case hotplug OFF removes it)
target_cg.set(cpus=on_cpus)
cl_cpus = set(target.list_online_cpus()).intersection(set(cl.cpus))
logging.info('Cluster {:8} (Online CPUs : {})'\
.format(cl.name, list(cl_cpus)))
# C) For each OPP supported by the current cluster
for freq in cl.freqs:
# Set frequency to freq for current CPUs
target.cpufreq.set_frequency(cpu, freq)
# Run the benchmark for the specified number of iterations each time
# collecting a sample of energy consumption and reported performance
energy = 0
perf = 0
for i in xrange(loop_cnt):
te.emeter.reset()
# Run benchmark into the target cgroup
perf += benchmark.run(target_cg.name, cnt + 1)
nrg = te.emeter.report(te.res_dir).channels
energy += nrg[cl.emeter_ch]
sleep(10)
# Compute average energy and performance for the current number of
# active CPUs all running at the current OPP
perf = perf / loop_cnt
energy = energy / loop_cnt
power = energy / benchmark.duration
logging.info(' avg_prf: {:7.3}, avg_pwr: {:7.3}'
.format(perf, power))
# Keep track of this new P-State profiling point
new_row = {'cluster': cl.name,
'cpus': cnt + 1,
'freq': freq,
'perf': perf,
'energy' : energy,
'power': power}
power_perf.append(new_row)
# Save data in a CSV file
writer.writerow(new_row)
# C) profile next P-State
# B) add one more CPU (for the current frequency domain)
# A) Profile next cluster (i.e. frequency domain)
target.hotplug.online_all()
power_perf_df = pd.DataFrame(power_perf)
return power_perf_df.set_index(['cluster', 'freq', 'cpus'])\
.sort_index(level='cluster')
sysbench = Sysbench(target, 10)
loop_cnt = 5
power_perf_df = compute_power_perf(clusters, loop_cnt, sysbench)
def plot_pstates(power_perf_df, cluster):
"""
Plot P-States profiling for the specified cluster.
:param power_perf_df: DataFrame reporting power and performance values
:type power_perf_df: :mod:`pandas.DataFrame`
:param cluster: cluster description
:type cluster: namedtuple(ClusterDescription)
"""
cmap = ColorMap(len(cluster.freqs))
color_map = map(cmap.cmap, range(len(cluster.freqs)))
color_map = dict(zip(cluster.freqs, color_map))
fig, ax = plt.subplots(1, 1, figsize=(16, 10))
grouped = power_perf_df.loc[cluster.name].groupby(level='freq')
for freq, df in grouped:
x = df.index.get_level_values('cpus').tolist()
y = df.power.tolist()
slope, intercept = linfit(x, y)
x.insert(0, 0)
y.insert(0, intercept)
# Plot linear fit of the points
ax.plot(x, [slope*i + intercept for i in x], color=color_map[freq])
# Plot measured points
ax.scatter(x, y, color=color_map[freq], label='{} kHz'.format(freq))
ax.set_title('JUNO {} cluster P-States profiling'.format(cluster.name),
fontsize=16)
ax.legend()
ax.set_xlabel('Active cores')
ax.set_ylabel('Power [$\mu$W]')
ax.set_xlim(-0.5, len(cluster.cpus)+1)
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
ax.grid(True)
big_cl = clusters[0]
plot_pstates(power_perf_df, big_cl)
little_cl = clusters[1]
plot_pstates(power_perf_df, little_cl)
###Output
_____no_output_____
###Markdown
Statistics
###Code
def power_perf_stats(power_perf_df):
"""
For each cluster compute per-OPP power and performance statistics.
:param power_perf_df: dataframe containing power and performance numbers
:type power_perf_df: :mod:`pandas.DataFrame`
"""
clusters = power_perf_df.index.get_level_values('cluster')\
.unique().tolist()
stats = []
for cl in clusters:
cl_power_df = power_perf_df.loc[cl].reset_index()
grouped = cl_power_df.groupby('freq')
for freq, df in grouped:
perf = df['perf'] / df['cpus']
power = df['power'] / df['cpus']
energy = df['energy'] / df['cpus']
avg_row = {'cluster': cl,
'freq': freq,
'stats': 'avg',
'perf': perf.mean(),
'power': power.mean(),
'energy': energy.mean()
}
std_row = {'cluster': cl,
'freq': freq,
'stats': 'std',
'perf': perf.std(),
'power': power.std(),
'energy': energy.std()
}
min_row = {'cluster': cl,
'freq': freq,
'stats': 'min',
'perf': perf.min(),
'power': power.min(),
'energy': energy.min()
}
max_row = {'cluster' : cl,
'freq' : freq,
'stats' : 'max',
'perf' : perf.max(),
'power' : power.max(),
'energy': energy.max()
}
c99_row = {'cluster' : cl,
'freq' : freq,
'stats' : 'c99',
'perf' : perf.quantile(q=0.99),
'power' : power.quantile(q=0.99),
'energy': energy.quantile(q=0.99)
}
stats.append(avg_row)
stats.append(std_row)
stats.append(min_row)
stats.append(max_row)
stats.append(c99_row)
stats_df = pd.DataFrame(stats).set_index(['cluster', 'freq', 'stats'])\
.sort_index(level='cluster')
return stats_df.unstack()
pp_stats = power_perf_stats(power_perf_df)
###Output
_____no_output_____
###Markdown
Plots
###Code
def plot_power_perf(pp_stats, clusters):
cmap = ColorMap(len(clusters) + 1)
color_map = map(cmap.cmap, range(len(clusters) + 1))
fig, ax = plt.subplots(1, 1, figsize=(16, 10))
max_perf = pp_stats.perf['avg'].max()
max_power = pp_stats.power['avg'].max()
for i, cl in enumerate(clusters):
cl_df = pp_stats.loc[cl.name]
norm_perf_df = cl_df.perf['avg'] * 100.0 / max_perf
norm_power_df = cl_df.power['avg'] * 100.0 / max_power
x = norm_perf_df.values.tolist()
y = norm_power_df.values.tolist()
ax.plot(x, y, color=color_map[i], marker='o', label=cl.name)
norm_perf_df = cl_df.perf['max'] * 100.0 / max_perf
norm_power_df = cl_df.power['max'] * 100.0 / max_power
x = norm_perf_df.values.tolist()
y = norm_power_df.values.tolist()
ax.plot(x, y, '--', color=color_map[-1])
norm_perf_df = cl_df.perf['min'] * 100.0 / max_perf
norm_power_df = cl_df.power['min'] * 100.0 / max_power
x = norm_perf_df.values.tolist()
y = norm_power_df.values.tolist()
ax.plot(x, y, '--', color=color_map[-1])
ax.set_title('JUNO Power VS Performance curves', fontsize=16)
ax.legend()
ax.set_xlabel('Performance [%]')
ax.set_ylabel('Power [%]')
ax.set_xlim(0, 120)
ax.set_ylim(0, 120)
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
ax.grid(True)
plot_power_perf(pp_stats, clusters)
###Output
_____no_output_____
###Markdown
Idle States Profiling
###Code
def compute_idle_power(clusters, loop_cnt, sleep_duration, bkp_file='cstates.csv'):
"""
Perform C-States profiling on each input cluster.
Data will be saved into a CSV file at each iteration such that if something
goes wrong the user can restart the experiment considering only idle_states
that had not been processed.
:param clusters: list of clusters to profile
:type clusters: list(namedtuple(ClusterDescription))
:param loop_cnt: number of loops for each experiment
:type loop_cnt: int
:param sleep_duration: sleep time in seconds
:type sleep_duration: int
:param bkp_file: CSV file name
:type bkp_file: str
"""
# Make sure all CPUs are online
target.hotplug.online_all()
with open(bkp_file, 'w') as csvfile:
writer = DictWriter(csvfile,
fieldnames=['cluster', 'cpus',
'idle_state', 'energy', 'power'])
# Disable frequency scaling by setting cpufreq governor to userspace
target.cpufreq.set_all_governors('userspace')
# A) For each cluster (i.e. frequency domain) to profile...
idle_power = []
for cl in clusters:
target.cgroups.isolate(cl.cpus)
# C-States profiling requires to plug in CPUs one at the time
for cpu in cl.cpus:
target.hotplug.offline(cpu)
# B) For each additional cluster's plugged in CPU...
for cnt, cpu in enumerate(cl.cpus):
# Hotplug ON one more CPU
target.hotplug.online(cpu)
cl_cpus = set(target.list_online_cpus()).intersection(set(cl.cpus))
logging.info('Cluster {:8} (Online CPUs : {})'\
.format(cl.name, list(cl_cpus)))
# C) For each OPP supported by the current cluster
for idle in cl.idle_states:
# Disable all idle states but the current one
for c in cl.cpus:
target.cpuidle.disable_all(cpu=c)
target.cpuidle.enable(idle, cpu=c)
# Sleep for the specified duration each time collecting a sample
# of energy consumption and reported performance
energy = 0
for i in xrange(loop_cnt):
te.emeter.reset()
sleep(sleep_duration)
nrg = te.emeter.report(te.res_dir).channels
energy += nrg[cl.emeter_ch]
# Compute average energy and performance for the current number of
# active CPUs all idle at the current OPP
energy = energy / loop_cnt
power = energy / SLEEP_DURATION
logging.info(' avg_pwr: {:7.3}'
.format(power))
# Keep track of this new C-State profiling point
new_row = {'cluster': cl.name,
'cpus': cnt + 1,
'idle_state': idle,
'energy': energy,
'power': power}
idle_power.append(new_row)
# Save data in a CSV file
writer.writerow(new_row)
# C) profile next C-State
# B) add one more CPU (for the current frequency domain)
# A) profile next cluster (i.e. frequency domain)
target.hotplug.online_all()
idle_df = pd.DataFrame(idle_power)
return idle_df.set_index(['cluster', 'idle_state', 'cpus']).sort_index(level='cluster')
SLEEP_DURATION = 10
loop_cnt = 5
idle_df = compute_idle_power(clusters, loop_cnt, SLEEP_DURATION)
###Output
2016-09-08 20:02:42,283 INFO : Cluster big - Online CPUs : set([1])
2016-09-08 20:06:04,376 INFO : Cluster big - Online CPUs : set([1, 2])
2016-09-08 20:09:23,459 INFO : Cluster little - Online CPUs : set([0])
2016-09-08 20:13:10,543 INFO : Cluster little - Online CPUs : set([0, 3])
2016-09-08 20:16:36,396 INFO : Cluster little - Online CPUs : set([0, 3, 4])
2016-09-08 20:20:02,271 INFO : Cluster little - Online CPUs : set([0, 3, 4, 5])
###Markdown
Statistics
###Code
WFI = 0
CORE_OFF = 1
def idle_power_stats(idle_df):
"""
For each cluster compute per idle state power statistics.
:param idle_df: dataframe containing power numbers
:type idle_df: :mod:`pandas.DataFrame`
"""
stats = []
for cl in clusters:
cl_df = idle_df.loc[cl.name].reset_index()
# Start from deepest idle state
cl_df = cl_df.sort_values('idle_state', ascending=False)
grouped = cl_df.groupby('idle_state', sort=False)
for state, df in grouped:
energy = df.energy
power = df.power
state_name = "C{}_CLUSTER".format(state)
if state == CORE_OFF:
core_off_nrg_avg = energy.mean()
core_off_pwr_avg = power.mean()
if state == WFI:
energy = df.energy.diff()
energy[0] = df.energy[0] - core_off_nrg_avg
power = df.power.diff()
power[0] = df.power[0] - core_off_pwr_avg
state_name = "C0_CORE"
avg_row = {'cluster': cl.name,
'idle_state': state_name,
'stats': 'avg',
'energy': energy.mean(),
'power': power.mean()
}
std_row = {'cluster': cl.name,
'idle_state': state_name,
'stats': 'std',
'energy': energy.std(),
'power': power.std()
}
min_row = {'cluster' : cl.name,
'idle_state' : state_name,
'stats' : 'min',
'energy' : energy.min(),
'power' : power.min()
}
max_row = {'cluster' : cl.name,
'idle_state' : state_name,
'stats' : 'max',
'energy' : energy.max(),
'power' : power.max()
}
c99_row = {'cluster' : cl.name,
'idle_state' : state_name,
'stats' : 'c99',
'energy' : energy.quantile(q=0.99),
'power' : power.quantile(q=0.99)
}
stats.append(avg_row)
stats.append(std_row)
stats.append(min_row)
stats.append(max_row)
stats.append(c99_row)
stats_df = pd.DataFrame(stats).set_index(
['cluster', 'idle_state', 'stats']).sort_index(level='cluster')
return stats_df.unstack()
idle_stats = idle_power_stats(idle_df)
###Output
_____no_output_____
###Markdown
Plots
###Code
def plot_cstates(idle_power_df, cluster):
"""
Plot C-States profiling for the specified cluster.
:param idle_power_df: dataframe reporting power values in each idle state
:type idle_power_df: :mod:`pandas.DataFrame`
:param cluster: cluster description
:type cluster: namedtuple(ClusterDescription)
"""
n_cpus = len(cluster.cpus)
cmap = ColorMap(len(cluster.idle_states))
color_map = map(cmap.cmap, cluster.idle_states)
color_map = [c for c in color_map for i in xrange(n_cpus)]
cl_df = idle_power_df.loc[cluster.name]
ax = cl_df.power.plot.bar(figsize=(16,8), color=color_map, alpha=0.5,
legend=False, table=True)
idx = 0
grouped = cl_df.groupby(level=0)
for state, df in grouped:
x = df.index.get_level_values('cpus').tolist()
y = df.power.tolist()
slope, intercept = linfit(x, y)
y = [slope * v + intercept for v in x]
x = range(n_cpus * idx, n_cpus * (idx + 1))
ax.plot(x, y, color=color_map[idx*n_cpus], linewidth=4)
idx += 1
ax.grid(True)
ax.get_xaxis().set_visible(False)
ax.set_ylabel("Idle Power [$\mu$W]")
ax.set_title("JUNO {} cluster C-states profiling"\
.format(cluster.name), fontsize=16)
little = clusters[1]
plot_cstates(idle_df, little)
big = clusters[0]
plot_cstates(idle_df, big)
###Output
_____no_output_____
###Markdown
Energy Model Generation
###Code
def pstates_model_df(clusters, pp_stats, power_perf_df, metric='avg'):
"""
Build two data frames containing data to create the energy model for each
cluster given as input.
:param clusters: list of clusters to profile
:type clusters: list(namedtuple(ClusterDescription))
:param pp_stats: power and performance statistics
:type pp_stats: :mod:`pandas.DataFrame`
:param power_perf_df: power and performance data
:type power_perf_df: :mod:`pandas.DataFrame`
"""
max_score = pp_stats.perf[metric].max()
core_cap_energy = []
cluster_cap_energy = []
for cl in clusters:
# ACTIVE Energy
grouped = power_perf_df.loc[cl.name].groupby(level='freq')
for freq, df in grouped:
# Get average energy at OPP freq for 1 CPU
energy_freq_1 = pp_stats.loc[cl.name].loc[freq]['energy'][metric]
# Get cluster energy at OPP freq
x = df.index.get_level_values('cpus').tolist()
y = df.energy.tolist()
slope, intercept = linfit(x, y)
# Energy can't be negative but the regression line may intercept the
# y-axis at a negative value. Im this case cluster energy can be
# assumed to be 0.
cluster_energy = intercept if intercept >= 0.0 else 0.0
core_energy = energy_freq_1 - cluster_energy
# Get score at OPP freq
score_freq = pp_stats.loc[cl.name].loc[freq]['perf'][metric]
capacity = int(score_freq * 1024 / max_score)
core_cap_energy.append({'cluster' : cl.name,
'core': cl.core_name,
'freq': freq,
'cap': capacity,
'energy': core_energy})
cluster_cap_energy.append({'cluster': cl.name,
'freq': freq,
'cap': capacity,
'energy': cluster_energy})
core_cap_nrg_df = pd.DataFrame(core_cap_energy)
cluster_cap_nrg_df = pd.DataFrame(cluster_cap_energy)
return core_cap_nrg_df, cluster_cap_nrg_df
core_cap_nrg_df, cluster_cap_nrg_df = pstates_model_df(clusters,
pp_stats,
power_perf_df)
core_cap_nrg_df
cluster_cap_nrg_df
def energy_model_dict(clusters, core_cap_nrg_df, cluster_cap_nrg_df, metric='avg'):
n_states = len(clusters[0].idle_states)
nrg_dict = {}
grouped = core_cap_nrg_df.groupby('cluster')
for cl, df in grouped:
nrg_dict[cl] = {
"opps" : {},
"core": {
"name": df.core.iloc[0],
"busy-cost": OrderedDict(),
"idle-cost": OrderedDict()
},
"cluster": {
"busy-cost": OrderedDict(),
"idle-cost": OrderedDict()
}
}
# Core COSTS
# ACTIVE costs
for row in df.iterrows():
nrg_dict[cl]["opps"][row[1].cap] = row[1].freq
nrg_dict[cl]["core"]["busy-cost"][row[1].cap] = int(row[1].energy)
# IDLE costs
wfi_nrg = idle_stats.loc[cl].energy[metric][0]
# WFI
nrg_dict[cl]["core"]["idle-cost"][0] = int(wfi_nrg)
# All remaining states are zeroes
for i in xrange(1, n_states):
nrg_dict[cl]["core"]["idle-cost"][i] = 0
# Cluster COSTS
cl_data = cluster_cap_nrg_df[cluster_cap_nrg_df.cluster == cl]
# ACTIVE costs
for row in cl_data.iterrows():
nrg_dict[cl]["cluster"]["busy-cost"][row[1].cap] = int(row[1].energy)
# IDLE costs
# Core OFF is the first valid idle cost for cluster
idle_data = idle_stats.loc[cl].energy[metric]
# WFI (same as Core OFF)
nrg_dict[cl]["cluster"]["idle-cost"][0] = int(idle_data[1])
# All other idle states (from CORE OFF down)
for i in xrange(1, n_states):
nrg_dict[cl]["cluster"]["idle-cost"][i] = int(idle_data[i])
return nrg_dict
nrg_dict = energy_model_dict(clusters, core_cap_nrg_df, cluster_cap_nrg_df)
###Output
_____no_output_____
###Markdown
Device Tree EM Format
###Code
def dump_device_tree(nrg_dict, outfile='sched-energy.dtsi'):
"""
Generate device tree energy model file.
:param nrg_dict: dictionary describing the energy model
:type nrg_dict: dict
:param outfile: output file name
:type outfile: str
"""
with open(os.path.join(te.res_dir, outfile), 'w') as out:
out.write("energy-costs {\n")
idx = 0
for cl_name in nrg_dict.keys():
core = nrg_dict[cl_name]["core"]
# Dump Core costs
out.write("\tCPU_COST_{}: core_cost{} {}\n"\
.format(core["name"], idx, '{'))
# ACTIVE costs
out.write("\t\tbusy-cost-data = <\n")
for cap, nrg in core["busy-cost"].iteritems():
out.write("\t\t\t{} {}\n".format(cap, nrg))
out.write("\t\t>;\n")
# IDLE costs
out.write("\t\tidle-cost-data = <\n")
# arch idle
out.write("\t\t\t{}\n".format(core["idle-cost"][0]))
for nrg in core["idle-cost"].values():
out.write("\t\t\t{}\n".format(nrg))
out.write("\t\t>;\n")
out.write("\t};\n")
# Dump Cluster costs
cl = nrg_dict[cl_name]["cluster"]
out.write("\tCLUSTER_COST_{}: cluster_cost{} {}\n"\
.format(cl_name, idx, '{'))
# ACTIVE costs
out.write("\t\tbusy-cost-data = <\n")
for cap, nrg in cl["busy-cost"].iteritems():
out.write("\t\t\t{} {}\n".format(cap, nrg))
out.write("\t\t>;\n")
# IDLE costs
out.write("\t\tidle-cost-data = <\n")
# arch idle
out.write("\t\t\t{}\n".format(cl["idle-cost"][0]))
for nrg in cl["idle-cost"].values():
out.write("\t\t\t{}\n".format(nrg))
out.write("\t\t>;\n")
out.write("\t};\n")
idx += 1
out.write("};")
###Output
_____no_output_____
###Markdown
C Code EM Format
###Code
def dump_c_code(nrg_dict, cluster_ids, outfile='energy_model.c'):
"""
Generate C code energy model file.
:param nrg_dict: dictionary describing the energy model
:type nrg_dict: dict
:param cluster_ids: mapping between cluster names and cluster IDs
:type cluster_ids: dict
:param outfile: output file name
:type outfile: str
"""
with open(os.path.join(te.res_dir, outfile), 'w') as o:
core_names = []
for cl_name in nrg_dict.keys():
# Dump Core data
core = nrg_dict[cl_name]["core"]
core_names.append(core["name"])
o.write("static struct capacity_state cap_states_core_{}[] = {}\n"\
.format(core["name"], '{'))
o.write("\t/* Power per CPU */\n")
for cap, nrg in core["busy-cost"].iteritems():
o.write("\t {{ .cap = {:5d}, .power = {:5d}, }},\n"\
.format(cap, nrg))
o.write("\t};\n")
o.write("\n")
o.write("static struct idle_state idle_states_core_{}[] = {}\n"\
.format(core["name"], '{'))
# arch idle (same as WFI)
o.write("\t {{ .power = {:5d}, }},\n".format(core["idle-cost"][0]))
for nrg in core["idle-cost"].values():
o.write("\t {{ .power = {:5d}, }},\n".format(nrg))
o.write("\t};\n")
o.write("\n")
# Dump Cluster data
cl = nrg_dict[cl_name]["cluster"]
o.write("static struct capacity_state cap_states_cluster_{}[] = {}\n"\
.format(cl_name, '{'))
o.write("\t/* Power per cluster */\n")
for cap, nrg in cl["busy-cost"].iteritems():
o.write("\t {{ .cap = {:5d}, .power = {:5d}, }},\n"\
.format(cap, nrg))
o.write("\t};\n")
o.write("\n")
o.write("static struct idle_state idle_states_cluster_{}[] = {}\n"\
.format(cl_name, '{'))
# arch idle (same as Core OFF)
o.write("\t {{ .power = {:5d}, }},\n".format(cl["idle-cost"][0]))
for nrg in cl["idle-cost"].values():
o.write("\t {{ .power = {:5d}, }},\n".format(nrg))
o.write("\t};\n")
o.write("\n")
o.write("static struct sched_group_energy energy_cluster_{} = {}\n"\
.format(core["name"], '{'))
o.write("\t.nr_idle_states = ARRAY_SIZE(idle_states_cluster_{}),\n"\
.format(core["name"]))
o.write("\t.idle_states = idle_states_cluster_{},\n"\
.format(core["name"]))
o.write("\t.nr_cap_states = ARRAY_SIZE(cap_states_cluster_{}),\n"\
.format(core["name"]))
o.write("\t.cap_states = cap_states_cluster_{},\n"\
.format(core["name"]))
o.write("};\n")
o.write("\n")
# Array of pointers to CORE sched_group_energy structs
o.write("static struct sched_group_energy *energy_cores[] = {\n")
for cl_name in cluster_ids.values():
o.write("\t&energy_core_{},\n"\
.format(nrg_dict[cl_name]["core"]["name"]))
o.write("};\n")
o.write("\n")
# Array of pointers to CLUSTER sched_group_energy structs
o.write("static struct sched_group_energy *energy_clusters[] = {\n")
for name in cluster_ids.values():
o.write("\t&energy_cluster_{},\n".format(name))
o.write("};\n")
o.write("\n")
o.write("static inline\n")
o.write("const struct sched_group_energy * const cpu_core_energy(int cpu)\n")
o.write("{\n")
o.write("\treturn energy_cores[cpu_topology[cpu].cluster_id];\n")
o.write("}\n")
o.write("\n")
o.write("static inline\n")
o.write("const struct sched_group_energy * const cpu_cluster_energy(int cpu)\n")
o.write("{\n")
o.write("\treturn energy_clusters[cpu_topology[cpu].cluster_id];\n")
o.write("}\n")
###Output
_____no_output_____
###Markdown
JSON EM Format
###Code
def dump_json(nrg_dict, outfile='energy_model.json'):
"""
Generate JSON energy model file.
:param nrg_dict: dictionary describing the energy model
:type nrg_dict: dict
:param outfile: output file name
:type outfile: str
"""
with open(os.path.join(te.res_dir, outfile), 'w') as ofile:
json.dump(nrg_dict, ofile, sort_keys=True, indent=4)
###Output
_____no_output_____ |
003-学习 Pandas/Q&A 问答合集/005-rename-columns.ipynb | ###Markdown
005-pandas 中如何重命名列? > How do I rename columns in a pandas DataFrame?
###Code
import pandas as pd
ufo = pd.read_csv('https://bit.ly/ufo4cda')
ufo.head()
ufo.columns
ufo.rename(columns={'Colors Reported':'Colors_Reported', 'Shape Reported':'Shape_Reported'}, inplace=True)
ufo.columns
ufo_cols = ['city', 'colors reported', 'shape reported', 'state', 'time']
ufo.columns = ufo_cols
ufo.head()
ufo = pd.read_csv('https://bit.ly/ufo4cda', names = ufo_cols, header=0)
ufo.head()
ufo.columns
ufo.columns = ufo.columns.str.replace(' ','_')
ufo.columns
###Output
_____no_output_____ |
Notebooks/1-MLPs/3-Exercise_1.ipynb | ###Markdown
Multilayer Perceptrons for Multiclass Classification (Exercise) The DataWe will be using the Glass Identification dataset from [UCI Machine Learning Repository](https://archive.ics.uci.edu/):https://archive.ics.uci.edu/ml/datasets/Glass+Identification**Information**From USA Forensic Science Service, 5 types of glass defined in terms of their oxide content (i.e. Na, Fe, K, etc).The study of classification of types of glass was motivated by criminological investigation. At the scene of the crime, the glass left can be used as evidence... if it is correctly identified!**Attributes**1. RI: refractive index2. Na: Sodium (unit measurement: weight percent in corresponding oxide, as are attributes 4-10)3. Mg: Magnesium4. Al: Aluminum5. Si: Silicon6. K: Potassium7. Ca: Calcium8. Ba: Barium9. Fe: Iron10. Type of glass: (class attribute) - 1. building windows - 2. vehicle windows - 3. containers - 4. tableware - 5. headlamps Table of Contents- [Python libraries](libraries)- [Data Exploration and Feature Engineering](exploration) - [Read the data](read) - [Descriptive statistics](statistics) - [Class balance](balance) - [Correlations](correlation) - [Remove outliers](outliers)- [Label and One-Hot encoding](one-hot) - [Create the `X` and `y` variables](variables) - [Encode target labels](encoders)- [Split the Data](split)- [Normalize the Data](normalize)- [Create the Model](model)- [Train the Model](training) - [Choosing too many epochs and overfit](overfit) - [Early Stopping](early_stop)- [Evaluate the Model](evaluation)- [Predictions](predictions) Python libraries
###Code
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Data Exploration and Feature Engineering Read the data
###Code
df = pd.read_csv('../../Data/glass.csv').drop(columns='Unnamed: 0')
df.head()
glass_types = {"building_windows": 1,
"vehicle_windows": 2,
"containers": 3,
"tableware": 4,
"headlamps": 5}
df['glass_type_id'] = df['glass_type'].apply(lambda x: glass_types[x])
df.head()
###Output
_____no_output_____
###Markdown
Descriptive statistics**TASK: Show the descriptive statistics of each column.**
###Code
# CODE HERE
###Output
_____no_output_____
###Markdown
Class balance**TASK: Check if the classes are balanced. Create a countplot as shown below.**
###Code
# CODE HERE
###Output
_____no_output_____
###Markdown
Correlations**TASK: Show the correlation between different attributes. Create a heatmap of the pairwise correlation of columns.**
###Code
# CODE HERE
###Output
_____no_output_____
###Markdown
**TASK: Create a bar plot showing the correlation of the numeric attributes to the new `glass_type_id` column.**
###Code
# CODE HERE
###Output
_____no_output_____
###Markdown
Remove outliersSeaborn uses [inter-quartile range](https://en.wikipedia.org/wiki/Interquartile_range) to detect the outliers. What we need to do is to reproduce the same function in the column you want to drop the outliers. We can do that by using the next function.
###Code
def remove_outliers(df, column):
Q1 = df[column].quantile(0.25)
Q3 = df[column].quantile(0.75)
IQR = Q3 - Q1 #IQR is interquartile range.
filter = (df[column] >= Q1 - 1.5 * IQR) & (df[column] <= Q3 + 1.5 *IQR)
return df.loc[filter]
###Output
_____no_output_____
###Markdown
Refractive index**TASK: Create a boxplot showing the relationship between the `glass_type_id` and the `RI` columns.**
###Code
# CODE HERE
###Output
_____no_output_____
###Markdown
**TASK: Remove the outliers using the `remove_outliers` function and create again the boxplot.**
###Code
# CODE HERE
###Output
_____no_output_____
###Markdown
Potassium**TASK: Create a boxplot showing the relationship between the `glass_type_id` and the `K` columns.**
###Code
# CODE HERE
###Output
_____no_output_____
###Markdown
**TASK: Remove the outliers using the `remove_outliers` function and create again the boxplot.**
###Code
# CODE HERE
###Output
_____no_output_____
###Markdown
Label and One-Hot encodingFor multiclass classification we have to represent categorical data in the form of binary vectors. Create the `X` and `y` variables**TASK: Create the `X` and `y` variables by taking the `.values` of the numerical features and labels, respectively. Take as labels the `glass_type` column.**
###Code
# CODE HERE
###Output
_____no_output_____
###Markdown
**TASK: Display the shapes of the `X` and `y` variables and the first 5 labels.**
###Code
# CODE HERE
# CODE HERE
# CODE HERE
###Output
_____no_output_____
###Markdown
Encode target labels **TASK: Import [`LabelEncoder`](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelEncoder.html) and [`OneHotEncoder`](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OneHotEncoder.htmlsklearn.preprocessing.OneHotEncoder) form `sklearn`.**
###Code
# CODE HERE
###Output
_____no_output_____
###Markdown
Label Encoder**TASK: Use a `LabelEncoder` to encode target labels in `y` with value between `0` and `n_classes-1`. Display the shape of the new `y` variable and the first 5 values.**
###Code
# CODE HERE
# CODE HERE
# CODE HERE
###Output
_____no_output_____
###Markdown
One-Hot Encoder**TASK: Use a `OneHotEncoder` to encode the new categorical features of `y` as a one-hot numeric array. Display the shape of the new `y` variable and the first 5 values.**
###Code
# CODE HERE
# CODE HERE
# CODE HERE
###Output
_____no_output_____
###Markdown
Split the Data**TASK: Import [`train_test_split`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html) form `sklearn`.**
###Code
# CODE HERE
###Output
_____no_output_____
###Markdown
**TASK: Perform a train/test split with `test_size=0.25` and a `random_state=42`. Display the shapes of the `X_train` and `y_train` variables.**
###Code
# CODE HERE
# CODE HERE
# CODE HERE
###Output
_____no_output_____
###Markdown
Normalize the Data**TASK: Import [`MinMaxScaler`](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html) form `sklearn`.**
###Code
# CODE HERE
###Output
_____no_output_____
###Markdown
**TASK: Use a `MinMaxScaler` to normalize the `X_train` and `X_test` values.**
###Code
# CODE HERE
###Output
_____no_output_____
###Markdown
Create the Model**TASK: Import [`Sequential`](https://www.tensorflow.org/guide/keras/sequential_model) model and [`Dense`](https://keras.io/api/layers/core_layers/dense/) layer form `tensorflow.keras`.**
###Code
# CODE HERE
###Output
_____no_output_____
###Markdown
**TASK: Build a sequential model with a dense hidden layer of 10 neurons and a dense output layer of 5 neurons. As we are dealing with a multiclass classification task use the [`softmax`](https://en.wikipedia.org/wiki/Softmax_function) activation function in the output layer and the `categorical_crossentropy` loss. Add also the `accuracy` as an [additional metric](https://keras.io/api/metrics/).**
###Code
# CODE HERE
###Output
_____no_output_____
###Markdown
Train the Model Choosing too many epochs and overfit **TASK: Train the model for 2000 epochs. Don't forget to include the validation data.**
###Code
# CODE HERE
###Output
_____no_output_____
###Markdown
Check if the model overfits:**TASK: Plot the training and validation loss.**
###Code
# CODE HERE
###Output
_____no_output_____
###Markdown
**TASK: Plot the training and validation accuracy.**
###Code
# CODE HERE
###Output
_____no_output_____
###Markdown
Early StoppingLet's use early stopping to track the val_loss and stop training once it begins increasing too much!**TASK: Import [`EarlyStopping`](https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/EarlyStopping) callback form `tensorflow.keras`.**
###Code
# CODE HERE
###Output
_____no_output_____
###Markdown
**TASK: Create the `EarlyStopping` callback.**
###Code
# CODE HERE
###Output
_____no_output_____
###Markdown
**TASK: Train the model for 2000 epochs with the `EarlyStopping` callback.**
###Code
model = Sequential()
model.add(Dense(10,activation='relu'))
model.add(Dense(5,activation='softmax'))
# For a binary classification problem
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# CODE HERE
###Output
_____no_output_____
###Markdown
**TASK: Plot the training and validation loss.**
###Code
# CODE HERE
###Output
_____no_output_____
###Markdown
**TASK: Plot the training and validation accuracy.**
###Code
# CODE HERE
###Output
_____no_output_____
###Markdown
**TASK: OPTIONAL: Save your model.**
###Code
# CODE HERE
###Output
_____no_output_____
###Markdown
Evaluate the ModelCheck common classification metrics [here](https://scikit-learn.org/stable/modules/model_evaluation.htmlclassification-metrics).**TASK: Create predictions from the `X_test` dataset and display a `classification_report` and `confusion_matrix` for the `X_test` dataset. Notice that the predictions are not one-hot encoded**
###Code
# CODE HERE
# CODE HERE
# CODE HERE
# CODE HERE
###Output
_____no_output_____
###Markdown
**[Precision and recall](https://en.wikipedia.org/wiki/Precision_and_recall)** **[Confusion matrix](https://en.wikipedia.org/wiki/Confusion_matrix)** Predictions **TASK: Predict the glass type of the last value in the `DataFrame`. Did the prediction match the actual result?**
###Code
# CODE HERE
# CODE HERE
# CODE HERE
# CODE HERE
###Output
_____no_output_____ |
src/transformers/models/bart/BART-Extended_Draft.ipynb | ###Markdown
BART-Extended architecture The goal of this Notebook is to evaluate how to load BART extended model
###Code
import transformers.models.bart.modeling_bart_edited as BartExtended
from transformers import BartTokenizer, BartForConditionalGeneration, BartConfig
pretrained_bart = BartForConditionalGeneration.from_pretrained('facebook/bart-large-cnn')
tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn')
ARTICLE_TO_SUMMARIZE = "My friends are cool but they eat too many carbs. I hope one day they start eating healthier. Maybe a plant-based diet would be enough."
inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors='pt')
bart_extended = BartExtended.BartExtendedForConditionalGeneration(pretrained_bart.config)
# Generate Summary
summary_ids = pretrained_bart.generate(inputs['input_ids'], num_beams=4, max_length=20, early_stopping=True)
print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids])
# Generate Summary
summary_ids = bart_extended.generate(inputs['input_ids'], num_beams=4, max_length=20, early_stopping=True)
print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids])
def init_bart_extended_layer(bart_extended_layer, bart_layer):
# copy weights
bart_extended_layer.self_attn.load_state_dict(bart_layer.self_attn.state_dict())
bart_extended_layer.self_attn_layer_norm.load_state_dict(bart_layer.self_attn_layer_norm.state_dict())
bart_extended_layer.encoder_source_attn.load_state_dict(bart_layer.encoder_attn.state_dict())
bart_extended_layer.encoder_source_attn_layer_norm.load_state_dict(bart_layer.encoder_attn_layer_norm.state_dict())
bart_extended_layer.encoder_knowledge_attn.load_state_dict(bart_layer.encoder_attn.state_dict())
bart_extended_layer.encoder_knowledge_attn_layer_norm.load_state_dict(bart_layer.encoder_attn_layer_norm.state_dict())
bart_extended_layer.fc1.load_state_dict(bart_layer.fc1.state_dict())
bart_extended_layer.fc2.load_state_dict(bart_layer.fc2.state_dict())
bart_extended_layer.final_layer_norm.load_state_dict(bart_layer.final_layer_norm.state_dict())
def init_bart_extended_decoder(extended_decoder, pretrained_decoder):
# Initializing Embedding layers
extended_decoder.embed_tokens.load_state_dict(pretrained_decoder.embed_tokens.state_dict())
extended_decoder.embed_positions.load_state_dict(pretrained_decoder.embed_positions.state_dict())
# Initializing layers
for extended_layer, pretrained_layer in zip(extended_decoder.layers, pretrained_decoder.layers):
init_bart_extended_layer(extended_layer, pretrained_layer)
# Initializing Layer normalization layer
extended_decoder.layernorm_embedding.load_state_dict(pretrained_decoder.layernorm_embedding.state_dict())
extended_decoder = BartExtended.BartExtendedDecoder(pretrained_bart.config)
init_bart_extended_decoder(extended_decoder, pretrained_bart.model.decoder)
self.encoder_source = BartExtended.BartEncoder(pretrained_bart.config)
self.encoder_source.load_state_dict(pretrained_bart.encoder.state_dict())
self.encoder_knowledge = BartExtended.BartEncoder(pretrained_bart.config)
self.encoder_knowledge.load_state_dict(pretrained_bart.encoder.state_dict())
class Bart_Extended_Model(nn.Module):
def __init__(self, config: BartConfig):
super().__init__(config)
padding_idx, vocab_size = config.pad_token_id, config.vocab_size
self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)
extended_decoder = BartExtended.BartExtendedDecoder(pretrained_bart.config)
init_bart_extended_decoder(extended_decoder, pretrained_bart.model.decoder)
self.encoder_source = BartExtended.BartEncoder(pretrained_bart.config)
self.encoder_source.load_state_dict(pretrained_bart.encoder.state_dict())
self.encoder_knowledge = BartExtended.BartEncoder(pretrained_bart.config)
self.encoder_knowledge.load_state_dict(pretrained_bart.encoder.state_dict())
self.decoder = BartExtended.BartExtendedDecoder(pretrained_bart.config)
init_bart_extended_decoder(self.decoder, pretrained_bart.model.decoder)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
from transformers import BartTokenizer, BartExtendedForConditionalGeneration, BartConfig
BART_Extended_model = BartExtendedForConditionalGeneration.from_pretrained('facebook/bart-large-cnn')
tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn')
ARTICLE_TO_SUMMARIZE = "My friends are cool but they eat too many carbs. I hope one day they start eating healthier. Maybe a plant-based diet would be enough."
inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors='pt')
# Generate Summary
summary_ids = BART_model.generate(inputs['input_ids'], num_beams=4, max_length=20, early_stopping=True)
print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids])
model = pretrained_bart.model
fc_kk = model.encoder_source.layers[0].fc1
fc_kk.weight
pretrained_bart.model.encoder_knowledge.layers[0].fc1.weight
###Output
_____no_output_____
###Markdown
- Initialize the BART model with cnn_dm pretrained model- Define a function to copy layers from the pretrained BART model to the BART_Extended model- Make sure that the architecture is properly created https://github.com/huggingface/transformers/blob/master/src/transformers/models/bart/modeling_bart.py
###Code
for pretrained_bart.model.decoder.layers
class BART_Extended(nn.Module):
def __init__(self):
super(BART_Extended, self).__init__()
self.encoder_source = pretrained_bart.model.encoder.copy()
self.encoder_knowledge = pretrained_bart.model.encoder.copy()
self.decoder =
BART_model.model
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
pretrained_bart.model.encoder.copy()
# from transformers import BartForConditionalGeneration, BartTokenizer
#
# model = BartForConditionalGeneration.from_pretrained("facebook/bart-large")#, force_bos_token_to_be_generated=True)
# tok = BartTokenizer.from_pretrained("facebook/bart-large")
# example_english_phrase = "UN Chief Says There Is No <mask> in Syria"
# batch = tok(example_english_phrase, return_tensors='pt')
# generated_ids = model.generate(batch['input_ids'])
#assert tok.batch_decode(generated_ids, skip_special_tokens=True) == ['UN Chief Says There Is No Plan to Stop Chemical Weapons in Syria']
# tok.batch_decode(generated_ids, skip_special_tokens=True)
###Output
_____no_output_____ |
PyTorch_examples/pytorch-Deep-Learning-master/12-regularization.ipynb | ###Markdown
Regularisation in NNs¶ Before we start doing anything, I think it's important to understand for NLP, this is the intuitive process on what we are trying to do when we are processing our data in the IMDB dataset:1. Tokenization: break sentence into individual words - Before: `"PyTorch seems really easy to use!"` - After: `["PyTorch", "seems", "really", "easy", "to", "use", "!"]`2. Building vocabulary: build an index of words associated with unique numbers - Before: `["PyTorch", "seems", "really", "easy", "to", "use", "!"]` - After: `{"Pytorch: 0, "seems": 1, "really": 2, ...}`3. Convert to numerals: map words to unique numbers (indices) - Before: `{"Pytorch: 0, "seems": 1, "really": 2, ...}` - After: `[0, 1, 2, ...]`4. Embedding look-up: map sentences (indices now) to fixed matrices - ```[[0.1, 0.4, 0.3], [0.8, 0.1, 0.5], ...]```
###Code
# Critical plotting imports
import matplotlib.pyplot as plt
%matplotlib inline
# PyTorch imports
from torchtext import data, datasets
import torch
import torch.nn as nn
import torch.nn.functional as F
# Checking for iterable objects
import collections
import random
# Set seed
torch.manual_seed(1337)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(1337)
# Set plotting style
plt.style.use(('dark_background', 'bmh'))
plt.rc('axes', facecolor='none')
plt.rc('figure', figsize=(16, 4))
# Create instances of fields
# The important field here is fix_length: all examples using this field will be padded to, or None for flexible sequence lengths
# We are fixing this because we will be using a FNN not an LSTM/RNN/GRU where we can go through uneven sequence lengths
max_len = 80
text = data.Field(sequential=True, fix_length=max_len, batch_first=True, lower=True, dtype=torch.long)
label = data.LabelField(sequential=False, dtype=torch.float)
# Calling splits() class method of datasets.IMDB to return a torchtext.data.Dataset object
datasets.IMDB.download('./')
ds_train, ds_test = datasets.IMDB.splits(text, label, path='./imdb/aclImdb/')
# Training and test set each 25k samples
# 2 fields due to the way we split above
print('train : ', len(ds_train))
print('test : ', len(ds_test))
print('train.fields :', ds_train.fields)
# Get validation set
seed_num = 1337
ds_train, ds_valid = ds_train.split(random_state=random.seed(seed_num))
# Now we've training, validation and test set
print('train : ', len(ds_train))
print('valid : ', len(ds_valid))
print('valid : ', len(ds_test))
# Build vocabulary
# num_words = 25000
num_words = 1000
text.build_vocab(ds_train, max_size=num_words)
label.build_vocab(ds_train)
# Print vocab size
print('Vocabulary size: {}'.format(len(text.vocab)))
print('Label size: {}'.format(len(label.vocab)))
# Print most common vocabulary text
most_common_samples = 10
print(text.vocab.freqs.most_common(most_common_samples))
# Print most common labels
print(label.vocab.freqs.most_common())
# Sample 0 label
ds_train[0].label
# Sample 0 text: broken down into individual portions
ds_train[0].text
# Sample 0 text: human readeable sample
def show_text(sample):
print(' '.join(word for word in sample))
show_text(ds_train[0].text)
# Create and iterable object for our training, validation and testing datasets
# Batches examples of similar lengths together that minimizes amount of padding needed
batch_size = 64 # Change batch size from 1 to bigger number once explanation is done
train_loader, valid_loader, test_loader = data.BucketIterator.splits(
(ds_train, ds_valid, ds_test), batch_size=batch_size, sort_key=lambda x: len(x.text), repeat=False
)
# Check if iterator above is an iterable which should show True
isinstance(train_loader, collections.Iterable)
# What's inside this iteratable object? Our text and label although now everything is in machine format (not "words") but in numbers!
# The text we saw above becomes a matrix of size 1 x 80 represented by the fixed length we defined before that
list(train_loader)[0]
# Alternative to above, this is much faster but the above code is easy to understand and implement
next(train_loader.__iter__())
test_batch = next(train_loader.__iter__())
# What methods can we call on this batch object? Text and label
test_batch.fields
# Let's break this down to check what's in a batch
test_batch.text
# 1 comment per batch, each comment is limited to a size of 80 as we've defined
test_batch.text.size()
test_batch.label
# Extremely weird problem in torchtext where BucketIterator returns a Batch object versus just a simple tuple of tensors containing our text index and labels
# So let's fix this with a new class FixBatchGenerator
class FixBatchGenerator:
def __init__(self, dl, x_field, y_field):
self.dl, self.x_field, self.y_field = dl, x_field, y_field
def __len__(self):
return len(self.dl)
def __iter__(self):
for batch in self.dl:
X = getattr(batch, self.x_field)
y = getattr(batch, self.y_field)
yield (X,y)
train_loader, valid_loader, test_loader = FixBatchGenerator(train_loader, 'text', 'label'), FixBatchGenerator(valid_loader, 'text', 'label'), FixBatchGenerator(test_loader, 'text', 'label')
# Text index
print(next(train_loader.__iter__())[0])
# Text label
print(next(train_loader.__iter__())[1])
class FeedforwardNeuralNetModel(nn.Module):
def __init__(self, input_dim, embedding_dim, hidden_dim, output_dim):
super(FeedforwardNeuralNetModel, self).__init__()
# Embedding layer
self.embedding = nn.Embedding(input_dim, embedding_dim)
# Linear function
self.fc1 = nn.Linear(embedding_dim*embedding_dim, hidden_dim)
# Linear function (readout)
self.fc2 = nn.Linear(hidden_dim, output_dim)
def forward(self, x):
# Embedding
embedded = self.embedding(x)
embedded = embedded.view(-1, embedding_dim*embedding_dim)
# Linear function
out = self.fc1(embedded)
# Non-linearity
out = torch.relu(out)
# Toggle 3: Dropout
# out = torch.dropout(out, 0.8)
# Linear function (readout)
# Take note here use a final sigmoid function so your loss should not go through sigmoid again.
# BCELoss is the right class to use as it doesn't pass your output through a sigmoid function again.
# In multi-class problems you're used to softmax which can be simplified to a logistic,
# function when you have a two-class problem.
out = self.fc2(out)
out = torch.sigmoid(out)
return out
input_dim = num_words + 2
embedding_dim = max_len
hidden_dim = 32
output_dim = 1
# Instantiate model class and assign to object
model = FeedforwardNeuralNetModel(input_dim, embedding_dim, hidden_dim, output_dim)
# Push model to CUDA device if available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)
# Loss function
criterion = nn.BCELoss()
# Optimizer
# Toggle 2: L2 Norm option - this is called weight decay
# optimizer = torch.optim.Adam(model.parameters(), lr=1e-3, weight_decay=0.005)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
# Number of groups of parameters
print('Number of groups of parameters {}'.format(len(list(model.parameters()))))
print('-'*50)
# Print parameters
for i in range(len(list(model.parameters()))):
print(list(model.parameters())[i].size())
print('-'*50)
iter = 0
num_epochs = 10
history_train_acc, history_val_acc, history_train_loss, history_val_loss = [], [], [], []
best_accuracy = 0
for epoch in range(num_epochs):
# print('-'*50)
for i, (samples, labels) in enumerate(train_loader):
# Training mode
model.train()
# Load samples
samples = samples.view(-1, max_len).to(device)
labels = labels.view(-1, 1).to(device)
# Clear gradients w.r.t. parameters
optimizer.zero_grad()
# Forward pass to get output/logits
outputs = model(samples)
# Calculate Loss: softmax --> cross entropy loss
loss = criterion(outputs, labels)
# Toggle 1: L1 norm, add to original loss
# fc1_params = torch.cat([x.view(-1) for x in model.fc1.parameters()])
# loss += 0.001 * torch.norm(fc1_params, 1)
# Getting gradients w.r.t. parameters
loss.backward()
# Updating parameters
optimizer.step()
iter += 1
if iter % 100 == 0:
# Get training statistics
train_loss = loss.data.item()
# Testing mode
model.eval()
# Calculate Accuracy
correct = 0
total = 0
# Iterate through test dataset
for samples, labels in valid_loader:
# Load samples
samples = samples.view(-1, max_len).to(device)
labels = labels.view(-1).to(device)
# Forward pass only to get logits/output
outputs = model(samples)
# Val loss
val_loss = criterion(outputs.view(-1, 1), labels.view(-1, 1))
# We use a threshold to define.
# There is another way to do this with one-hot label. Feel free to explore and understand what are the pros/cons of each.
# This opens up a whole topic on why it becomes problematic when we expand beyond 2 class to 10 classes.
# Why do we encode? Why can't we do 0, 1, 2, 3, 4 etc. without one-hot encoding?
predicted = outputs.ge(0.5).view(-1)
# Total number of labels
total += labels.size(0)
# Total correct predictions
correct += (predicted.type(torch.FloatTensor).cpu() == labels.type(torch.FloatTensor)).sum().item()
# correct = (predicted == labels.byte()).int().sum().item()
accuracy = 100. * correct / total
# Print Loss
print('Iter: {} | Train Loss: {} | Val Loss: {} | Val Accuracy: {}'.format(iter, train_loss, val_loss.item(), round(accuracy, 2)))
# Append to history
history_val_loss.append(val_loss.data.item())
history_val_acc.append(round(accuracy, 2))
history_train_loss.append(train_loss)
# Save model when accuracy beats best accuracy
if accuracy > best_accuracy:
best_accuracy = accuracy
# We can load this best model on the validation set later
torch.save(model.state_dict(), 'best_model.pth')
# Plotting loss graph
plt.plot(history_train_loss, label='Train')
plt.plot(history_val_loss, label='Validation')
plt.title('Loss Graph')
plt.legend()
plt.show()
# Plotting validation accuracy graph
plt.plot(history_val_acc)
plt.title('Validation Accuracy')
weights = torch.Tensor().to(device)
for param_group in list(model.parameters()):
weights = torch.cat((param_group.view(-1), weights))
print(param_group.size())
# Toggle 0: No regularization
weights_nothing = weights.cpu().detach().numpy()
# Toggle 1: L1 norm on FC1
# weights_L1 = weights.detach().numpy()
# Toggle 2: L2 norm
# weights_L2 = weights.detach().numpy()
# Toggle 3: dropout
# weights_dropout = weights.detach().numpy()
# plt.hist(weights_L1.reshape(-1), range=(-.5, .5), bins=20)
# plt.hist(weights_nothing.reshape(-1), range=(-.5, .5), bins=20)
# Show weight distribution
plt.hist((
weights_nothing.reshape(-1),
weights_L1.reshape(-1),
weights_L2.reshape(-1),
), 49, range=(-.5, .5), label=(
'No-reg',
'L1',
'L2',
))
plt.legend();
###Output
_____no_output_____ |
.ipynb_checkpoints/M3_Reco_Exploration-checkpoint.ipynb | ###Markdown
M3 Reconstruction Training a Neural Network to identify the best M3 ComboBy Zach Shelton4/21/2021
###Code
#NanoAOD HackSchema
#Solution from Danny Noonan
from __future__ import print_function, division
import uproot
import numpy as np
#Make sure to install both old awkward0 and new awkward1(referred to now as awkward)
import awkward1 as ak
import awkward0 as ak0
from coffea.nanoevents import NanoAODSchema,NanoEventsFactory
from uproot3_methods import TLorentzVectorArray
import uproot3_methods
import numpy as np
import coffea.hist as hist
import matplotlib.pyplot as plt
import awkward
class HackSchema(NanoAODSchema):
def __init__(self, base_form):
base_form["contents"].pop("Muon_fsrPhotonIdx", None)
base_form["contents"].pop("Electron_photonIdx", None)
super().__init__(base_form)
def m3_recon(tree):
comb= ak.combinations(tree,n=3,axis=1,fields=['j1','j2','j3'])
trijets= comb.j1+comb.j2+comb.j3
recon =ak.max(trijets,axis=1)
reconfinal=np.sqrt(recon.t*recon.t-recon.x*recon.x-recon.y*recon.y-recon.z*recon.z)
list1= ak.to_numpy(reconfinal)
return list1
files ="TTbarPowheg_Semilept_Skim_NanoAOD_1of21.root"
import coffea.processor as processor
from pprint import pprint
file=uproot.open(files)
nEvents=file['hEvents'].values[0]+file['hEvents'].values[2]
from pprint import pprint
###Output
C:\Users\zshel\anaconda3\envs\top_tag1\lib\site-packages\awkward0\__init__.py:23: FutureWarning: Consider switching from 'awkward0' to 'awkward', since the new interface became the default in 2020.
pip install -U awkward
In Python:
>>> import awkward as ak
>>> new_style_array = ak.from_awkward0(old_style_array)
>>> old_style_array = ak.to_awkward0(new_style_array)
FutureWarning
###Markdown
Note: It seems the Jet columns are sorted from greatest p_t to smallest p_tFeel free to test, but it seems to be my observation, choosing the 1st, 2nd or 3rd jet via index should remove the issue of it being a coffea sorting artifact or procedure
###Code
#Now lets redo with the cuts detailed by CMS Draft Analysis
#https://drive.google.com/file/d/1XEOLyZ-Q1HdEQY379RpyyQkOF1Q8KlsL/view
events =NanoEventsFactory.from_root(files,schemaclass=HackSchema).events()
events.GenPart.pdgId
#Condensing All Cuts to a single Cell
tight_jets=events.Jet
print(tight_jets)
jetSel = ak.num(tight_jets[((tight_jets.pt>30)&(tight_jets.eta<2.4)&(tight_jets.eta>-2.4))],axis=1)>=3
jetSelection=(jetSel&(ak.num(tight_jets.btagCSVV2>.4184)>=1))
#Condensing_all Lepton_cuts
tight_muons = events.Muon
muonsel=ak.num(tight_muons[((tight_muons.pt>30)&(abs(tight_muons.eta)<2.4))],axis=1)==1
tight_electrons= events.Electron
electronsel=ak.num(tight_electrons[((tight_electrons.pt>35)&(abs(tight_electrons.eta)<2.4))],axis=1)==1
leptonsel=(muonsel|electronsel)
print(leptonsel)
jetlepselmask = (jetSelection&leptonsel)
print((jetlepselmask))
print(events[jetlepselmask])
final=events[jetlepselmask]
#postcuts_m3=m3_recon(events[jetlepselmask].Jet)
events =NanoEventsFactory.from_root(files,schemaclass=HackSchema).events()
events.Jet.fields
#events.Jet.
jets = ak.zip({"pt":final.Jet.pt[:,0:8],"eta":final.Jet.eta[:,0:8],"phi":final.Jet.phi[:,0:8],"mass":final.Jet.mass[:,0:8],"btag":final.Jet.btagCSVV2[:,0:8]})
jets.fields
#First cut Combos without b-tagged
#This will become my data tensor I pass to a Neural Net, pending additions of more(the combinations):
comb= ak.combinations(jets,n=3,axis=1,highlevel=1)
truthcomb=ak.combinations(final.GenJet.partonFlavour[:,0:8],n=3,axis=1)
truth={'b~':-5,'b':5,'s':3,'s~':-3,'c':4,'c~':-4,'non-jet':0,'d':1,'d~':-1,'t':6,'t~':-6,'g':21,'g~':-21,'u':2,'u~':-2}
test =truthcomb[1]
#Absolute Value of Truth Terms should add up to 11,13,15 corresponding to d,s,b
sumray=(abs(truthcomb['0'])+abs(truthcomb['1'])+abs(truthcomb['2']))
m1=sumray==11
m2=sumray==13
m3=sumray==15
mask=m1|m2|m3
#mask is a 35xN Awkward array.
TruthValues=mask
pprint(mask[1])
###Output
<Array [True, False, False, ... False, False] type='35 * bool'>
###Markdown
Notes for work- Jet TightID- More - Particle Values- Delta RWrap into Coffea ExecutorSeperate values, weights and triggersread these into tensors for KerasBest ML Algorithm?- Deep Neural Net- Iterative Boosted Tree - They are fast - External ML algorithm modifies BDT parameters- Combine old processes togetherShould I use Keras or PyTorch?_______________________________________________________________________________Running Notes and questions- Standardizing the "size", tensorflow has a ragged tensor, which is tf's variable size data arrays. I keep getting the following output - Awkward doesn't have native access to ndim? That seems not correct, not sure if its my implementation.
###Code
#Create truth groups 2 bjets and 1 light(gluon?)
###Output
_____no_output_____ |
matplotlib/gallery_jupyter/axisartist/demo_ticklabel_alignment.ipynb | ###Markdown
Demo Ticklabel Alignment
###Code
import matplotlib.pyplot as plt
import mpl_toolkits.axisartist as axisartist
def setup_axes(fig, rect):
ax = axisartist.Subplot(fig, rect)
fig.add_subplot(ax)
ax.set_yticks([0.2, 0.8])
ax.set_yticklabels(["short", "loooong"])
ax.set_xticks([0.2, 0.8])
ax.set_xticklabels([r"$\frac{1}{2}\pi$", r"$\pi$"])
return ax
fig = plt.figure(figsize=(3, 5))
fig.subplots_adjust(left=0.5, hspace=0.7)
ax = setup_axes(fig, 311)
ax.set_ylabel("ha=right")
ax.set_xlabel("va=baseline")
ax = setup_axes(fig, 312)
ax.axis["left"].major_ticklabels.set_ha("center")
ax.axis["bottom"].major_ticklabels.set_va("top")
ax.set_ylabel("ha=center")
ax.set_xlabel("va=top")
ax = setup_axes(fig, 313)
ax.axis["left"].major_ticklabels.set_ha("left")
ax.axis["bottom"].major_ticklabels.set_va("bottom")
ax.set_ylabel("ha=left")
ax.set_xlabel("va=bottom")
plt.show()
###Output
_____no_output_____ |
tf/Cond_prob.ipynb | ###Markdown
Conditional Probablity of touch type|trial type and choice | touch type, trial type
###Code
# Import libraries
import matplotlib.pyplot as plt
%matplotlib inline
import pandas as pd
import seaborn as sns
import numpy as np
from sklearn.metrics import confusion_matrix
# load pro/ret, trial type and choice data
tt = pd.read_csv('~/work/whiskfree/data/tt_36_subset_sorted.csv',header=None)
ch = pd.read_csv('~/work/whiskfree/data/ch_36_subset_sorted.csv',header=None)
proret = pd.read_csv('~/work/whiskfree/data/proret_36_subset_sorted.csv',header=None)
tt = tt.values.reshape(-1,1)
ch = ch.values.reshape(-1,1)
proret = proret.values.reshape(-1,1)
cm_tt = confusion_matrix(tt,proret)
def labelled_image(cm):
with sns.axes_style("white"):
plt.imshow(cm,interpolation='none')
for i in range(0,3):
for j in range(0,3):
plt.text(j, i, "{0:.2f}".format(cm[i,j]), va='center', ha='center',bbox=dict(facecolor='white',edgecolor='white', alpha=0.5))
xlabels = ['Retraction','Protraction','No Touch']
ylabels = ['Posterior','Anterior','No Go']
plt.title('Touch type | Trial type')
plt.xlabel('Touch type')
plt.ylabel('Trial type')
plt.xticks([0,1,2],xlabels)
plt.yticks([0,1,2],ylabels)
labelled_image(cm_tt)
cm_tt/np.sum(cm_tt)
print(cm_tt)
print(sum(cm_tt.T))
norm_cm_tt = cm_tt.T/sum(cm_tt.T)
norm_cm_tt = norm_cm_tt.T
198/(198+28+83)
81/(81+110+66)
labelled_image(norm_cm_tt)
plt.title('P(Touch type|Trial type)')
norm_cm_tch = cm_tt/sum(cm_tt)
labelled_image(norm_cm_tch)
plt.title('P(Trial type | Touch type)')
###Output
_____no_output_____
###Markdown
Now compute P (choice | trial type, touch type)
###Code
ch_given_ttpr = np.zeros([3,3,3])
for i in range(len(tt)):
tt_i = tt[i]
ch_i = ch[i]
pr_i = proret[i]
ch_given_ttpr[tt_i-1,pr_i-1,ch_i-1] += 1
x = plt.hist(ch)
labelled_image(ch_given_ttpr[:,:,0])
plt.title('Trial type | Touch type, Choice = 0')
print(np.sum(ch_given_ttpr[:,:,0]))
labelled_image(ch_given_ttpr[:,:,1])
plt.title('Trial type | Touch type, Choice = 1')
print(np.sum(ch_given_ttpr[:,:,1]))
labelled_image(ch_given_ttpr[:,:,2])
plt.title('Trial type | Touch type, Choice = 2')
print(np.sum(ch_given_ttpr[:,:,2]))
for i in range(3):
print(i)
plt.plot(ch[:100])
labelled_image(confusion_matrix(tt,ch))
plt.xticks([0,1,2],ylabels)
###Output
_____no_output_____ |
notebooks/autoencoders/CIFAR10/one_anomaly_detector.ipynb | ###Markdown
Fashion MNIST
###Code
from keras.datasets import fashion_mnist
from sklearn.metrics import roc_auc_score
from sklearn.metrics import mean_squared_error
_, (fashion_x_test, _) = fashion_mnist.load_data()
fashion_x_test = fashion_x_test.astype('float32') / 255.
fashion_x_test = np.reshape(fashion_x_test, (len(x_test), 28, 28, 1))
show_10_images(fashion_x_test)
show_10_images(autoencoder.predict(fashion_x_test))
labels = len(x_test) * [0] + len(fashion_x_test) * [1]
test_samples = np.concatenate((x_test, fashion_x_test))
losses = anomaly_detector.predict(test_samples)
print("AUROC:", roc_auc_score(labels, losses))
###Output
AUROC: 0.99937089
###Markdown
EMNIST Letters
###Code
from torchvision.datasets import EMNIST
emnist_letters = EMNIST('./', "letters", train=False, download=True)
emnist_letters = emnist_letters.test_data.numpy()
emnist_letters = emnist_letters.astype('float32') / 255.
emnist_letters = np.swapaxes(emnist_letters, 1, 2)
emnist_letters = np.reshape(emnist_letters, (len(emnist_letters), 28, 28, 1))
show_10_images(emnist_letters)
show_10_images(autoencoder.predict(emnist_letters))
labels = len(x_test) * [0] + len(emnist_letters) * [1]
test_samples = np.concatenate((x_test, emnist_letters))
losses = anomaly_detector.predict(test_samples)
print("AUROC:", roc_auc_score(labels, losses))
###Output
AUROC: 0.9604927475961538
###Markdown
Gaussian Noise
###Code
mnist_mean = np.mean(x_train)
mnist_std = np.std(x_train)
gaussian_data = np.random.normal(mnist_mean, mnist_std, size=(10000, 28, 28, 1))
show_10_images(gaussian_data)
show_10_images(autoencoder.predict(gaussian_data))
labels = len(x_test) * [0] + len(gaussian_data) * [1]
test_samples = np.concatenate((x_test, gaussian_data))
losses = anomaly_detector.predict(test_samples)
print("AUROC:", roc_auc_score(labels, losses))
###Output
AUROC: 1.0
###Markdown
Uniform Noise
###Code
import math
b = math.sqrt(3.) * mnist_std
a = -b + mnist_mean
b += mnist_mean
uniform_data = np.random.uniform(low=a, high=b, size=(10000, 28, 28, 1))
show_10_images(uniform_data)
show_10_images(autoencoder.predict(uniform_data))
labels = len(x_test) * [0] + len(uniform_data) * [1]
test_samples = np.concatenate((x_test, uniform_data))
losses = anomaly_detector.predict(test_samples)
print("AUROC:", roc_auc_score(labels, losses))
###Output
AUROC: 1.0
|
ctypes Tips.ipynb | ###Markdown
`ctypes` Tips [`ctypes`](https://docs.python.org/3/library/ctypes.html) is a very handy tool for building Python wrappers for shared libraries written for C or C++. In most cases, it is probably preferable to use this, rather than write an *extension module* in C or C++ to provide the Python API: it can take a lot of code to implement the necessary C/C++ wrappers to represent Python objects and methods, while this can usually be done directly in Python with a fraction of the effort.While the documentation for `ctypes` is quite comprehensive, there are a few subtle points that might not be clear.A Python wrapper will typically need a lot of things from the `ctypes` module. Its own documentation page uses wildcard imports in the examples, which I prefer to avoid. Instead, I reference its exports by importing the module under a shorter name:
###Code
import ctypes as ct
###Output
_____no_output_____
###Markdown
Load The Runtime Library, Not The Development Library Consider the following directory entries currently on my Debian system for the [Cairo](https://cairographics.org/) graphics library: /usr/lib/x86_64-linux-gnu/libcairo.so -> libcairo.so.2.11600.0 /usr/lib/x86_64-linux-gnu/libcairo.so.2 -> libcairo.so.2.11600.0 /usr/lib/x86_64-linux-gnu/libcairo.so.2.11600.0As you can see, there are 3 separate names for the same file. Which one should you use?The answer is, use the name `libcairo.so.2`. The unversioned name comes from the *development* package: > dpkg-query -S /usr/lib/x86_64-linux-gnu/libcairo.so libcairo2-dev:amd64: /usr/lib/x86_64-linux-gnu/libcairo.sowhile the versioned names come from the *runtime* package: > dpkg-query -S /usr/lib/x86_64-linux-gnu/libcairo.so.2 libcairo2:amd64: /usr/lib/x86_64-linux-gnu/libcairo.so.2So, in a wrapper for Cairo, you would load the library using something like cairo = ct.cdll.LoadLibrary("libcairo.so.2")You only need to care about the first numeric component of the version, since that is the one incremented for any ABI changes (which might necessitate changes to your wrapper).While having the development package installed is useful while you are developing your wrapper (being able to refer to the include files for information, etc), you should only require your users to have the runtime package in order to be able to run scripts that use your wrapper. Of course, they, too might find the development package useful when writing such scripts. But let that be their choice.This only applies to distros like Debian which publish their packages in precompiled binary form. In ones like Gentoo, where users install everything from source, there is no distinction between “development” and “runtime” packages. `c_void_p` The `ctypes` explanation of `c_void_p` (the untyped pointer) is that the Python type is `int` or `None`. When creating a `c_void_p`, you can pass an integer for the address (including 0 for `NULL`), or you can pass `None` as an alternative for `NULL`. But when getting back one of these, the 0 or `NULL` address is always converted to `None`:
###Code
p1 = ct.c_void_p(3)
p2 = ct.c_void_p(0)
print(p1.value, p2.value)
###Output
_____no_output_____
###Markdown
Note that, while other pointer types have a `contents` attribute you can use to dereference the pointer, `c_void_p` does not. Getting Addresses Of Python Objects Sometimes you want to pass the address of the data inside a Python object directly to a library routine, to save copying data back and forth. This is particularly useful for Python objects of type `bytes` and `bytearray`, as well as arrays created with the [`array`](https://docs.python.org/3/library/array.html) module. This has to be done in slightly different ways for these different objects.To demonstrate this, I will make calls to the low-level `libc` [`memcpy`(3)](https://linux.die.net/man/3/memcpy) routine to copy data between Python objects:
###Code
libc = ct.cdll.LoadLibrary("libc.so.6")
libc.memcpy.restype = ct.c_void_p
libc.memcpy.argtypes = (ct.c_void_p, ct.c_void_p, ct.c_size_t) # dst, src, count
###Output
_____no_output_____
###Markdown
For a `bytes` object, a simple `cast` is sufficient to obtain the address of the data:
###Code
b1 = b"some:text"
b2 = b"other text"
print(b1, b2)
b1adr = ct.cast(b1, ct.c_void_p).value
b2adr = ct.cast(b2, ct.c_void_p).value
libc.memcpy(b2adr, b1adr, 5)
print(b1, b2)
###Output
_____no_output_____
###Markdown
For a `bytearray`, things are slightly more involved.
###Code
b1 = bytearray(b"different text")
b1adr = ct.addressof((ct.c_ubyte * len(b1)).from_buffer(b1))
libc.memcpy(b2adr, b1adr, 6)
print(b1, b2)
###Output
_____no_output_____
###Markdown
By the way, you can’t use this technique on `bytes`; it appears this only works on *mutable* objects.[`array`](https://docs.python.org/3/library/array.html) arrays have a `buffer_info()` method which returns the address and length of the underlying memory buffer. While this still works, it is apparently deprecated. So the same trick works as for `bytearray`s:
###Code
import array
b1 = array.array("B", b"yet other text")
b1adr = ct.addressof((ct.c_ubyte * len(b1)).from_buffer(b1))
libc.memcpy(b2adr, b1adr, 7)
print(b1.tobytes(), b2)
###Output
_____no_output_____
###Markdown
Casting can be used to create a pointer to a `ctypes` array type.
###Code
b = bytearray(b"some text")
b1 = (ct.c_ubyte * 0).from_buffer(b)
###Output
_____no_output_____
###Markdown
In this case, I have set the array length to 0, which prevents me from using `b1` directly to access any of the bytes in `b`, but a pointer constructed from `b1` is not so constrained:
###Code
p = ct.cast(b1, ct.POINTER(ct.c_ubyte))
[chr(c) for c in p[0:3]]
###Output
_____no_output_____
###Markdown
Because the original Python object is mutable, `ctypes` allows me to use the pointer to assign to its components from within Python (this would not be allowed for a pointer into a `bytes` object, for example):
###Code
p[5] = ord("z")
b
###Output
_____no_output_____
###Markdown
Of course, external libraries are not going to respect Python’s access-control mechanisms. `c_char` And `c_char_p` A `c_char_p` is not quite equivalent to `ct.POINTER(c_char)`; it is assumed to point to a *null-terminated* array of `c_char`. Accessing the `value` attribute returns the data up to, but not including, the terminating null:
###Code
b = b"hello\0 there"
ct.cast(b, ct.c_char_p).value
###Output
_____no_output_____
###Markdown
Note you cannot assign to the `value` or `contents` of a `c_char_p` (this silently reallocates the buffer to hold the new value):
###Code
ct.cast(b, ct.c_char_p).contents = b"text"
b
###Output
_____no_output_____
###Markdown
But you can to the `value` of an _array_ of `c_char` (note the extra null inserted after the value):
###Code
ct.cast(b, ct.POINTER(len(b) * ct.c_char))[0][0:4] = (4 * ct.c_char)(*list(b"text"))
b
###Output
_____no_output_____
###Markdown
Here’s a similar thing done to a `bytearray` instead of a `bytes` object:
###Code
b = bytearray(b"hello\0 there")
(len(b) * ct.c_char).from_buffer(b).value = b"tex"
b
###Output
_____no_output_____
###Markdown
Array Conversions Conversion of a ctypes array (at least of simple element types) to a Python sequence is quite straightforward:
###Code
c_arr = (3 * ct.c_int)(5, 4, 3)
list(c_arr)
###Output
_____no_output_____
###Markdown
Conversion the other way is slightly more involved:
###Code
arr = [8, 7, 6]
c_arr = (len(arr) * ct.c_int)(*arr)
c_arr, list(c_arr)
###Output
_____no_output_____
###Markdown
Pointers To Simple Types Dereferencing a pointer to a simple type can be done either via the `contents` attribute or by array indexing. But note that `contents` returns a reference to the `ctypes` object holding the value; this in turn has a `value` attribute that you can use to change the value.
###Code
i1 = ct.c_int(3)
i2 = ct.c_int(3)
p1 = ct.pointer(i1)
p2 = ct.pointer(i2)
print(p1.contents, p2[0])
p1.contents.value = 2 # “p1.contents = 2” won’t work
p2[0] = 5
print(p1.contents, p2.contents)
###Output
_____no_output_____ |
jupyter_notebooks/15_JSON_YAML.ipynb | ###Markdown
Python Cheat SheetBasic cheatsheet for Python mostly based on the book written by Al Sweigart, [Automate the Boring Stuff with Python](https://automatetheboringstuff.com/) under the [Creative Commons license](https://creativecommons.org/licenses/by-nc-sa/3.0/) and many other sources. Read It- [Website](https://www.pythoncheatsheet.org)- [Github](https://github.com/wilfredinni/python-cheatsheet)- [PDF](https://github.com/wilfredinni/Python-cheatsheet/raw/master/python_cheat_sheet.pdf)- [Jupyter Notebook](https://mybinder.org/v2/gh/wilfredinni/python-cheatsheet/master?filepath=jupyter_notebooks) JSON, YAML and configuration files JSONOpen a JSON file with:
###Code
import json
with open("filename.json", "r") as f:
content = json.loads(f.read())
###Output
_____no_output_____
###Markdown
Write a JSON file with:
###Code
import json
content = {"name": "Joe", "age": 20}
with open("filename.json", "w") as f:
f.write(json.dumps(content, indent=2))
###Output
_____no_output_____
###Markdown
YAMLCompared to JSON, YAML allows a much better humain maintainance and gives ability to add comments.It is a convinient choice for configuration files where human will have to edit.There are two main librairies allowing to access to YAML files:- [PyYaml](https://pypi.python.org/pypi/PyYAML)- [Ruamel.yaml](https://pypi.python.org/pypi/ruamel.yaml)Install them using `pip install` in your virtual environment.The first one it easier to use but the second one, Ruamel, implements much better the YAMLspecification, and allow for example to modify a YAML content without altering comments.Open a YAML file with:
###Code
from ruamel.yaml import YAML
with open("filename.yaml") as f:
yaml=YAML()
yaml.load(f)
###Output
_____no_output_____
###Markdown
Anyconfig[Anyconfig](https://pypi.python.org/pypi/anyconfig) is a very handy package allowing to abstract completly the underlying configuration file format. It allows to load a Python dictionary from JSON, YAML, TOML, and so on.Install it with:
###Code
%%bash
pip install anyconfig
###Output
_____no_output_____
###Markdown
Usage:
###Code
import anyconfig
conf1 = anyconfig.load("/path/to/foo/conf.d/a.yml")
###Output
_____no_output_____ |
Guest Lectures/Image Processing/Pres_4_DL_Workflow.ipynb | ###Markdown
Deep Learning refers to the artificial neural network
###Code
from keras.datasets import mnist
(train_images, train_labels),(test_images,test_labels) = mnist.load_data()
# Get the statistics of the taining and testing data
print(train_images.shape)
print(train_labels.shape)
print(test_images.shape)
print(test_labels.shape)
from keras import layers
from keras import models
network = models.Sequential()
network.add(layers.Dense(512, activation='relu',input_shape=(28*28,)))
network.add(layers.Dense(10,activation='softmax'))
print(network.summary())
network.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
# Reshaping the values
train_images = train_images.reshape((60000,28*28))
test_images = test_images.reshape((10000,28*28))
# Convert the type of float
train_images = train_images.astype('float32')/255
test_images = test_images.astype('float32')/255
from keras.utils import to_categorical
print(train_labels)
train_labels = to_categorical(train_labels)
print(train_labels)
network.fit(train_images, train_labels,epochs=5, batch_size=128)
###Output
_____no_output_____ |
notebooks/twitter_data_exploration_400k_tweets.ipynb | ###Markdown
Exploring the Complete Twitter Dataset* The purpose of this notebook is to explore the full dataset of 400k tweets relating to bcpoli* Tweet created dates range from August 14, 2020 to November 19, 2020* Columns not required for analysis will be dropped here. * The remaining data will be exported for preprocessing in "classify_unlabelled_tweets.ipynb"
###Code
import sys
sys.path.insert(0, '~/data_bootcamp/data-science-final-project/scripts/')
import pandas as pd
import numpy as np
import pdpipe as pdp
import re
import string
import nltk
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
stop_words = set(stopwords.words('english'))
from IPython.display import JSON
import matplotlib.pyplot as plt
import pickle
# Pandas Display Settings, if you wish
#pd.set_option('display.max_colwidth', None)
#pd.set_option("display.max_columns", 30)
# Import custom functions
from functions import *
###Output
[nltk_data] Downloading package vader_lexicon to
[nltk_data] /Users/lclark/nltk_data...
[nltk_data] Package vader_lexicon is already up-to-date!
###Markdown
~398K Tweets from August 14th, 2020 - November 19th, 2020
###Code
%%time
df = pd.read_json('/Volumes/My Passport/Tweets/bcpoli_400k_extended.jsonl', lines=True)
df.info(memory_usage='deep')
# It appears that over ten thousand. tweets have been deleted since August
%%time
# Make copy of imported data and set index to unique tweet ID
raw = df.copy()
raw = raw[~raw.index.duplicated(keep='first')]
# Filter out columns
raw = col_filter(raw)
# Extract features from user column dict with .get
raw = extract_username(raw)
# Create is_retweet column
raw['is_retweet'] = raw['full_text'].apply(is_retweet) # This was originally for pdpipe and could be rewrittten
# Create new col "rt_full_text" from dict column "retweet_status"
raw = extract_full_text(raw)
# Repalce truncated retweet full_text
raw = replace_retweet_text(raw)
###Output
_____no_output_____
###Markdown
Creating a Data Processing Pipeline
###Code
%%time
# Pandas Processing Pipeline
pipeline = pdp.ColDrop('user')
pipeline+= pdp.ApplyByCols('full_text', lower_case, 'full_lower', drop=False)
pipeline+= pdp.ApplyByCols('full_lower', covid_mention, 'covid_mention', drop=True)
pipeline+= pdp.ApplyByCols('full_text', preprocess, 'full_clean', drop=False)
pipeline+= pdp.ApplyByCols('full_text', (lambda x: preprocess(x, hashtags=True)), 'no_hashtags', drop=False)
pipeline+= pdp.ApplyByCols('full_text', vader_preprocess, 'vader_text', drop=False)
pipeline+= pdp.ColDrop('retweeted_status')
pipeline+= pdp.ColDrop('rt_full_text')
raw = pipeline(raw)
raw.sample(n=5)
raw.user_name.nunique()
###Output
_____no_output_____
###Markdown
Create new DataFrames separate analysis
###Code
# Using the updated DataFrame of tweet.
# df_filtered_tweets_master has been processed identically as above
# df_filtered_tweets_master will always be the most current DataFrame
# Reproduciibility still possible with /data/tweet_ids.txt. It is updated with the tweet_ids from df_filtered_tweets_master
raw = pd.read_pickle('~/data_bootcamp/data-science-final-project/data/df_filtered_tweets_master.pkl')
# Create a new column with word lemma
# This will drastically improve the qauilty and variance of ngrams
raw['lemma'] = raw.no_hashtags.apply(lambda x: lemmatize_text(x))
raw.head()
# Create new DataFrame of only original tweets
df_no_rt = raw[raw['is_retweet'] == 0]
df_no_rt.info()
# Create a new DatFrame with only original non-covid tweets
# This will be used to guage the covids impact on sentiment
df_no_rt_no_covid = df_no_rt[df_no_rt['covid_mention'] == 0]
df_no_rt_no_covid.info()
# Create a new DataFrame with only original covid, mentioning tweets
# This will be used to guage the covids impact on sentiment
df_no_rt_covid_mention = df_no_rt[df_no_rt['covid_mention'] == 1]
df_no_rt_covid_mention.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 33423 entries, 1294232573636304896 to 1333146115932209152
Data columns (total 10 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 created_at 33423 non-null datetime64[ns, UTC]
1 full_text 33423 non-null object
2 vader_text 33423 non-null object
3 no_hashtags 33423 non-null object
4 full_clean 33423 non-null object
5 covid_mention 33423 non-null int64
6 retweet_count 33423 non-null int64
7 user_name 33423 non-null object
8 is_retweet 33423 non-null int64
9 lemma 33423 non-null object
dtypes: datetime64[ns, UTC](1), int64(3), object(6)
memory usage: 2.8+ MB
###Markdown
Examining some metrics
###Code
# Total retweet count of all 384221 tweets
raw.is_retweet.sum()
# Estimated total covid/pandemic mentions
raw.covid_mention.sum()
raw.info()
# Estimated total covid/pandemic mentions in 112131 original tweets
no_rt_count = df_no_rt.shape[0]
no_rt_covid_count = df_no_rt.covid_mention.sum()
mention_ratio_no_rt = (no_rt_covid_count/no_rt_count) * 100
print('Estimated percentage of tweets related to #bcpoli that mention covid or the pandemic in some way:', '%0.2f'% mention_ratio_no_rt,'%')
print(f'Total of {no_rt_count} original tweets related to #bcpoli that mention covid or the pandemic in some way:', no_rt_covid_count)
###Output
Estimated percentage of tweets related to #bcpoli that mention covid or the pandemic in some way: 29.65 %
Total of 112734 original tweets related to #bcpoli that mention covid or the pandemic in some way: 33423
###Markdown
Bigrams, Trigrams and TopicsA new column for lemmatized words should be created when extracting ngrams, as the ngrams will be diluted with plural and non-plural forms of words
###Code
# Most frequent bigrams, hastags removed, stop words removed - Includes original tweets and retweets
# This will be more interesting with data grouped by week
top_ngrams(raw, n=2, ngrams=20)
# Top bigrams from original tweets only
top_ngrams(df_no_rt, n=2, ngrams=20)
# Top bigrams from original tweets only, without covid mentioned
# This is a good example of when stemming is benficial - See pluralized words below
top_ngrams(df_no_rt_no_covid, n=2, ngrams=20)
# Most frequent trigrams, hastags removed, stop words removed - Includes original tweets and retweets
# This will be more interesting with data grouped by week
top_ngrams(raw, n=3, ngrams=20)
# Top trigrams from original tweets only
top_ngrams(df_no_rt, n=3, ngrams=20)
# Top trigrams from original tweets only, without covid mentioned
# This is a good example of when stemming is benficial - See pluralized words below
# Also a great example of why trigrams are useful - (old, growth) (growth, forests)
top_ngrams(df_no_rt_no_covid, n=3, ngrams=20)
# Pickle DataFrames for later use
#df_no_rt.to_pickle('~/data_bootcamp/data-science-final-project/data/df_original_tweets.pkl')
#df_no_rt_covid_mention.to_pickle('~/data_bootcamp/data-science-final-project/data/df_original_tweets_covid_mention.pkl')
#df_no_rt_no_covid.to_pickle('~/data_bootcamp/data-science-final-project/data/df_original_tweets_no_covid.pkl')
###Output
_____no_output_____ |
v1.52.2/Functions/4. User comparison.ipynb | ###Markdown
4. User comparison Table of Contents1. [Preparation](preparation)2. [Functions](functions)3. [Tests](tests) Preparation
###Code
%run "../Functions/3. Per session and per user analysis.ipynb"
print("4. User comparison")
###Output
_____no_output_____
###Markdown
Functions
###Code
def getAllUsers( dataframe ):
allUserIds = np.array(dataframe['userId'].unique())
allUserIds = [i for i in allUserIds if not i in ['nan', np.nan, 'null']]
return allUserIds
# _source is used as correction source, if we want to include answers to these questions
def getAllUserVectorData( userIds, _rmDF, _gfDF, _source = correctAnswers, _printDebug = True, _binary=True):
# result
isInitialized = False
allData = []
f = FloatProgress(min=0, max=len(userIds))
display(f)
for userId in userIds:
#print(str(userId))
f.value += 1
dataVector = getUserDataVector(userId, _rmDF = _rmDF, _gfDF = _gfDF, _source = _source, _printDebug = _printDebug, _binary=_binary)
if not isInitialized:
isInitialized = True
allData = dataVector
else:
allData = pd.concat([allData, dataVector], axis=1)
f.close()
del f
#print('done')
return allData
def getAllUserVectorDataCustom(_rmDF, _gfDF, before, after, gfMode = False, rmMode = True, sessionCount = 1):
userIds = []
if (before and after):
userIds = getSurveysOfUsersWhoAnsweredBoth(_gfDF, gfMode = gfMode, rmMode = rmMode)
elif before:
if rmMode:
userIds = getRMBefores(_gfDF)
else:
userIds = getGFBefores(_gfDF)
elif after:
if rmMode:
userIds = getRMAfters(_gfDF)
else:
userIds = getGFormAfters(_gfDF)
if(len(userIds) > 0):
userIds = userIds[localplayerguidkey]
allUserVectorData = getAllUserVectorData(userIds, _rmDF = _rmDF, _gfDF = _gfDF)
allUserVectorData = allUserVectorData.T
result = allUserVectorData[allUserVectorData['sessionsCount'] == sessionCount].T
return result
else:
print("no matching user")
return []
methods = ['pearson', 'kendall', 'spearman']
def plotAllUserVectorDataCorrelationMatrix(
_allUserVectorData,
_method = methods[0],
_title='RedMetrics Correlations',
_abs=False,
_clustered=False,
_figsize = (20,20),
columnSubset=[]
):
_progress = FloatProgress(min=0, max=4)
display(_progress)
# computation of subset
if len(columnSubset) > 0 and pd.Series(columnSubset).isin(_allUserVectorData.columns).all():
_allUserVectorData = _allUserVectorData.loc[:,columnSubset]
# computation of correlation matrix
_m = _method
if(not (_method in methods)):
_m = methods[0]
_correlation = _allUserVectorData.astype(float).corr(_m)
_progress.value += 1
if(_abs):
_correlation = _correlation.abs()
_progress.value += 1
vmin=-1
if _abs:
vmin=0
vmax=1
# plot
if(_clustered):
# removing NaNs
# can't cluster NaN lines in _correlation
# copied/pasted from '2. Google form analysis.ipynb' plotCorrelationMatrix
_notNaNsIndices = []
_notNaNsColumns = []
for index in _correlation.index:
if(~pd.isnull(_correlation.loc[index,:]).all()):
_notNaNsIndices.append(index)
_correlation = _correlation.loc[_notNaNsIndices,_notNaNsIndices]
_progress.value += 1
sns.clustermap(
_correlation,
cmap=plt.cm.jet,
square=True,
figsize=_figsize,
vmin=vmin,
vmax=vmax,
)
else:
_fig = plt.figure(figsize=_figsize)
_ax = plt.subplot(111)
_ax.set_title(_title)
_progress.value += 1
sns.heatmap(
_correlation,
ax=_ax,
cmap=plt.cm.jet,
square=True,
vmin=vmin,
vmax=vmax,
)
_progress.value += 1
def getPercentageCrossCorrect(binarized, figsize=(40,100)):
cbar_kws = dict(orientation= "horizontal")
#cbar_kws = dict(orientation= "horizontal",location="top")
#cbar_kws = dict(orientation= "horizontal", position="top")
intermediaryNumerator = getCrossCorrectAnswers(binarized).round().astype(int)*100
percentagesCrossCorrect = (intermediaryNumerator / binarized.shape[0]).round().astype(int)
_fig = plt.figure(figsize=figsize)
_ax = plt.subplot(121)
_ax.set_title('percentage correct')
sns.heatmap(
percentagesCrossCorrect,
ax=_ax,
cmap=plt.cm.jet,
square=True,
annot=True,
fmt='d',
cbar_kws=cbar_kws,
vmin=0,
vmax=100,
)
totalPerQuestion = np.dot(np.ones(binarized.shape[0]), binarized)
totalPerQuestion[totalPerQuestion == 0] = 1
percentagesConditionalCrossCorrect = (intermediaryNumerator / totalPerQuestion).round().astype(int).fillna(0)
_ax = plt.subplot(122)
_ax.set_title('percentage correct, conditionnally: p(y | x)')
sns.heatmap(
percentagesConditionalCrossCorrect,
ax=_ax,
cmap=plt.cm.jet,
square=True,
annot=True,
fmt='d',
cbar_kws=cbar_kws,
vmin=0,
vmax=100,
)
plt.tight_layout()
def getCompletedRate(_rmdf):
players = _rmdf[QUserId].nunique()
completers = _rmdf[_rmdf['type'] == 'complete'][QUserId].nunique()
return float(completers)/float(players)
allBinaryUserVectorDataPath = dataFolderPath + "allBinaryUserVectorData/"
allNumericUserVectorDataPath = dataFolderPath + "allNumericUserVectorData/"
def getAllDataCSVPath(filePathStem, binary=True):
if binary:
return allBinaryUserVectorDataPath + filePathStem + csvSuffix
return allNumericUserVectorDataPath + filePathStem + csvSuffix
def loadAllDataCSV(filePathStem, binary=True):
currentDF = pd.read_csv(getAllDataCSVPath(filePathStem, binary=binary), dtype=str)
if currentDF.columns[0] == 'Unnamed: 0':
currentDF.index = currentDF.loc[:,'Unnamed: 0']
del currentDF.index.name
currentDF = currentDF.drop('Unnamed: 0', axis='columns')
currentDF = currentDF.apply(np.float64)
return currentDF
def saveAllDataCSV(allData, filePathStem, binary=True):
allData.to_csv(getAllDataCSVPath(filePathStem, binary=binary), encoding=csvEncoding)
regenerateData = False
if regenerateData:
allBinaryDataPlaytestPhase1PretestPosttestUniqueProfiles = getAllUserVectorData(
getAllResponders(gfdfPlaytestPhase1PretestPosttestUniqueProfiles),
_rmDF = rmdfPlaytestPhase1PretestPosttestUniqueProfiles,
_gfDF = gfdfPlaytestPhase1PretestPosttestUniqueProfiles,
_source = correctAnswers + demographicAnswers,
_binary=True )
allBinaryDataPlaytestPhase1PretestPosttestUniqueProfilesVolunteers = getAllUserVectorData(
getAllResponders(gfdfPlaytestPhase1PretestPosttestUniqueProfilesVolunteers),
_rmDF = rmdfPlaytestPhase1PretestPosttestUniqueProfilesVolunteers,
_gfDF = gfdfPlaytestPhase1PretestPosttestUniqueProfilesVolunteers,
_source = correctAnswers + demographicAnswers,
_binary=True )
allBinaryDataPlaytestPhase2PretestPosttestUniqueProfiles = getAllUserVectorData(
getAllResponders(gfdfPlaytestPhase2PretestPosttestUniqueProfiles),
_rmDF = rmdfPlaytestPhase2PretestPosttestUniqueProfiles,
_gfDF = gfdfPlaytestPhase2PretestPosttestUniqueProfiles,
_source = correctAnswers + demographicAnswers,
_binary=True )
allBinaryDataPlaytestPhase2PretestPosttestUniqueProfilesVolunteers = getAllUserVectorData(
getAllResponders(gfdfPlaytestPhase2PretestPosttestUniqueProfilesVolunteers),
_rmDF = rmdfPlaytestPhase2PretestPosttestUniqueProfilesVolunteers,
_gfDF = gfdfPlaytestPhase2PretestPosttestUniqueProfilesVolunteers,
_source = correctAnswers + demographicAnswers,
_binary=True )
saveAllDataCSV(allBinaryDataPlaytestPhase1PretestPosttestUniqueProfiles, "PlaytestPhase1PretestPosttestUniqueProfiles", binary=True)
saveAllDataCSV(allBinaryDataPlaytestPhase1PretestPosttestUniqueProfilesVolunteers, "PlaytestPhase1PretestPosttestUniqueProfilesVolunteers", binary=True)
saveAllDataCSV(allBinaryDataPlaytestPhase2PretestPosttestUniqueProfiles, "PlaytestPhase2PretestPosttestUniqueProfiles", binary=True)
saveAllDataCSV(allBinaryDataPlaytestPhase2PretestPosttestUniqueProfilesVolunteers, "PlaytestPhase2PretestPosttestUniqueProfilesVolunteers", binary=True)
else:
allBinaryDataPlaytestPhase1PretestPosttestUniqueProfiles = loadAllDataCSV("PlaytestPhase1PretestPosttestUniqueProfiles", binary=True)
allBinaryDataPlaytestPhase1PretestPosttestUniqueProfilesVolunteers = loadAllDataCSV("PlaytestPhase1PretestPosttestUniqueProfilesVolunteers", binary=True)
allBinaryDataPlaytestPhase2PretestPosttestUniqueProfiles = loadAllDataCSV("PlaytestPhase2PretestPosttestUniqueProfiles", binary=True)
allBinaryDataPlaytestPhase2PretestPosttestUniqueProfilesVolunteers = loadAllDataCSV("PlaytestPhase2PretestPosttestUniqueProfilesVolunteers", binary=True)
if regenerateData:
allNumericDataPlaytestPhase1PretestPosttestUniqueProfiles = getAllUserVectorData(
getAllResponders(gfdfPlaytestPhase1PretestPosttestUniqueProfiles),
_rmDF = rmdfPlaytestPhase1PretestPosttestUniqueProfiles,
_gfDF = gfdfPlaytestPhase1PretestPosttestUniqueProfiles,
_source = correctAnswers + demographicAnswers,
_binary=False )
allNumericDataPlaytestPhase1PretestPosttestUniqueProfilesVolunteers = getAllUserVectorData(
getAllResponders(gfdfPlaytestPhase1PretestPosttestUniqueProfilesVolunteers),
_rmDF = rmdfPlaytestPhase1PretestPosttestUniqueProfilesVolunteers,
_gfDF = gfdfPlaytestPhase1PretestPosttestUniqueProfilesVolunteers,
_source = correctAnswers + demographicAnswers,
_binary=False )
allNumericDataPlaytestPhase2PretestPosttestUniqueProfiles = getAllUserVectorData(
getAllResponders(gfdfPlaytestPhase2PretestPosttestUniqueProfiles),
_rmDF = rmdfPlaytestPhase2PretestPosttestUniqueProfiles,
_gfDF = gfdfPlaytestPhase2PretestPosttestUniqueProfiles,
_source = correctAnswers + demographicAnswers,
_binary=False )
allNumericDataPlaytestPhase2PretestPosttestUniqueProfilesVolunteers = getAllUserVectorData(
getAllResponders(gfdfPlaytestPhase2PretestPosttestUniqueProfilesVolunteers),
_rmDF = rmdfPlaytestPhase2PretestPosttestUniqueProfilesVolunteers,
_gfDF = gfdfPlaytestPhase2PretestPosttestUniqueProfilesVolunteers,
_source = correctAnswers + demographicAnswers,
_binary=False )
saveAllDataCSV(allNumericDataPlaytestPhase1PretestPosttestUniqueProfiles, "PlaytestPhase1PretestPosttestUniqueProfiles", binary=False)
saveAllDataCSV(allNumericDataPlaytestPhase1PretestPosttestUniqueProfilesVolunteers, "PlaytestPhase1PretestPosttestUniqueProfilesVolunteers", binary=False)
saveAllDataCSV(allNumericDataPlaytestPhase2PretestPosttestUniqueProfiles, "PlaytestPhase2PretestPosttestUniqueProfiles", binary=False)
saveAllDataCSV(allNumericDataPlaytestPhase2PretestPosttestUniqueProfilesVolunteers, "PlaytestPhase2PretestPosttestUniqueProfilesVolunteers", binary=False)
else:
allNumericDataPlaytestPhase1PretestPosttestUniqueProfiles = loadAllDataCSV("PlaytestPhase1PretestPosttestUniqueProfiles", binary=False)
allNumericDataPlaytestPhase1PretestPosttestUniqueProfilesVolunteers = loadAllDataCSV("PlaytestPhase1PretestPosttestUniqueProfilesVolunteers", binary=False)
allNumericDataPlaytestPhase2PretestPosttestUniqueProfiles = loadAllDataCSV("PlaytestPhase2PretestPosttestUniqueProfiles", binary=False)
allNumericDataPlaytestPhase2PretestPosttestUniqueProfilesVolunteers = loadAllDataCSV("PlaytestPhase2PretestPosttestUniqueProfilesVolunteers", binary=False)
allDataPlaytestPhase1PretestPosttestUniqueProfiles = allBinaryDataPlaytestPhase1PretestPosttestUniqueProfiles
allDataPlaytestPhase1PretestPosttestUniqueProfilesVolunteers = allBinaryDataPlaytestPhase1PretestPosttestUniqueProfilesVolunteers
allDataPlaytestPhase2PretestPosttestUniqueProfiles = allBinaryDataPlaytestPhase2PretestPosttestUniqueProfiles
allDataPlaytestPhase2PretestPosttestUniqueProfilesVolunteers = allBinaryDataPlaytestPhase2PretestPosttestUniqueProfilesVolunteers
###Output
_____no_output_____ |
notebooks/EX-2-ClickHouse-SQL-Alchemy.ipynb | ###Markdown
ClickHouse SQLAlchemyThis notebook provides simple examples from the clickhouse-sqlalchemy driver including a demonstration of integration with pandas and matplotlib. Import SQLAlchemy + clickhouse-sqlalchemy entities.
###Code
from sqlalchemy import create_engine, Column, MetaData, literal
from clickhouse_sqlalchemy import Table, make_session, get_declarative_base, types, engines
###Output
_____no_output_____
###Markdown
Initialize SQLAlchemy to use local server with native connectivity. If you leave off '+native' the driver will use http[s].
###Code
uri = 'clickhouse+native://default:@localhost/default'
engine = create_engine(uri)
session = make_session(engine)
metadata = MetaData(bind=engine)
Base = get_declarative_base(metadata=metadata)
###Output
_____no_output_____
###Markdown
Define a class to represent sensor data from devices.
###Code
class SensorData(Base):
dev_id = Column(types.Int32, primary_key=True)
type = Column(types.String)
mdate = Column(types.Date)
mdatetime = Column(types.DateTime, primary_key=True)
value = Column(types.Float64)
__table_args__ = (
engines.MergeTree('mdate', ('dev_id', 'mdate')),
)
###Output
_____no_output_____
###Markdown
Drop and then recreate the SQL table. Ignore errors if the table does not exist previously.
###Code
table = SensorData.__table__
try:
table.drop()
except:
# Exceptions are ignored
pass
table.create()
###Output
_____no_output_____
###Markdown
Create sensor data for 5 mythical devices. Readings increase linearly from a base that is randomly selected for each device.
###Code
from datetime import date, datetime, timedelta
from random import random
today = date.today()
this_instant = datetime.today()
data = []
for i in range(5):
base = random()
for j in range(10):
data.append({'dev_id': i,
'type': 'widget-a',
'mdate': today,
'mdatetime': this_instant + timedelta(minutes=j),
'value': base + j * 0.1})
session.execute(table.insert(), data)
###Output
_____no_output_____
###Markdown
Enable %sql magic function.
###Code
%load_ext sql
%sql clickhouse://default:@localhost/default
###Output
_____no_output_____
###Markdown
Prove that the magic function works by showing tables. %sql can handle any query.
###Code
%sql show tables
###Output
* clickhouse://default:***@localhost/default
Done.
###Markdown
Select all rows back and convert to a data frame.
###Code
result = %sql select * from sensor_data
df = result.DataFrame()
df
df.describe()
###Output
_____no_output_____
###Markdown
Data frames integrate nicely with graphics. Use selection on the data frame to pull out rows for each device in sucession and plot them as separate lines.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# Break up the data frame and graph each device separately.
markers = ['o', 'x', '^', '+', '*']
for i in range(5):
df_segment = df[df['dev_id'] == i]
plt.plot('mdatetime', 'value', data=df_segment, linestyle='--', marker=markers[i])
plt.xticks(rotation=90)
plt.show()
###Output
_____no_output_____
###Markdown
It's more common to use ClickHouse to compute aggregates. Find the min, average, and max values for each device and likewise convert them to a data frame.
###Code
result = %sql select dev_id, min(value), avg(value), max(value) from sensor_data group by dev_id order by dev_id
df2 = result.DataFrame()
df2
###Output
* clickhouse://default:***@localhost/default
Done.
###Markdown
Let's put the average values per device into a nice bar chart. It's easy to add additional sets of bars or create subplots but this will do for today.
###Code
plt.bar('dev_id', 'avg(value)', data=df2, align='center', alpha=0.5)
plt.title('Average device measurements')
plt.xlabel('Device ID')
plt.ylabel('Value')
plt.show()
###Output
_____no_output_____ |
4-assets/BOOKS/Jupyter-Notebooks/03-Function_Practice_Exercises.ipynb | ###Markdown
______Content Copyright by Pierian Data Function Practice ExercisesProblems are arranged in increasing difficulty:* Warmup - these can be solved using basic comparisons and methods* Level 1 - these may involve if/then conditional statements and simple methods* Level 2 - these may require iterating over sequences, usually with some kind of loop* Challenging - these will take some creativity to solve WARMUP SECTION: LESSER OF TWO EVENS: Write a function that returns the lesser of two given numbers *if* both numbers are even, but returns the greater if one or both numbers are odd lesser_of_two_evens(2,4) --> 2 lesser_of_two_evens(2,5) --> 5
###Code
def lesser_of_two_evens(a,b):
pass
# Check
lesser_of_two_evens(2,4)
# Check
lesser_of_two_evens(2,5)
###Output
_____no_output_____
###Markdown
ANIMAL CRACKERS: Write a function takes a two-word string and returns True if both words begin with same letter animal_crackers('Levelheaded Llama') --> True animal_crackers('Crazy Kangaroo') --> False
###Code
def animal_crackers(text):
pass
# Check
animal_crackers('Levelheaded Llama')
# Check
animal_crackers('Crazy Kangaroo')
###Output
_____no_output_____
###Markdown
MAKES TWENTY: Given two integers, return True if the sum of the integers is 20 *or* if one of the integers is 20. If not, return False makes_twenty(20,10) --> True makes_twenty(12,8) --> True makes_twenty(2,3) --> False
###Code
def makes_twenty(n1,n2):
pass
# Check
makes_twenty(20,10)
# Check
makes_twenty(2,3)
###Output
_____no_output_____
###Markdown
LEVEL 1 PROBLEMS OLD MACDONALD: Write a function that capitalizes the first and fourth letters of a name old_macdonald('macdonald') --> MacDonald Note: `'macdonald'.capitalize()` returns `'Macdonald'`
###Code
def old_macdonald(name):
pass
# Check
old_macdonald('macdonald')
###Output
_____no_output_____
###Markdown
MASTER YODA: Given a sentence, return a sentence with the words reversed master_yoda('I am home') --> 'home am I' master_yoda('We are ready') --> 'ready are We' Note: The .join() method may be useful here. The .join() method allows you to join together strings in a list with some connector string. For example, some uses of the .join() method: >>> "--".join(['a','b','c']) >>> 'a--b--c'This means if you had a list of words you wanted to turn back into a sentence, you could just join them with a single space string: >>> " ".join(['Hello','world']) >>> "Hello world"
###Code
def master_yoda(text):
pass
# Check
master_yoda('I am home')
# Check
master_yoda('We are ready')
###Output
_____no_output_____
###Markdown
ALMOST THERE: Given an integer n, return True if n is within 10 of either 100 or 200 almost_there(90) --> True almost_there(104) --> True almost_there(150) --> False almost_there(209) --> True NOTE: `abs(num)` returns the absolute value of a number
###Code
def almost_there(n):
pass
# Check
almost_there(104)
# Check
almost_there(150)
# Check
almost_there(209)
###Output
_____no_output_____
###Markdown
LEVEL 2 PROBLEMS FIND 33: Given a list of ints, return True if the array contains a 3 next to a 3 somewhere. has_33([1, 3, 3]) → True has_33([1, 3, 1, 3]) → False has_33([3, 1, 3]) → False
###Code
def has_33(nums):
pass
# Check
has_33([1, 3, 3])
# Check
has_33([1, 3, 1, 3])
# Check
has_33([3, 1, 3])
###Output
_____no_output_____
###Markdown
PAPER DOLL: Given a string, return a string where for every character in the original there are three characters paper_doll('Hello') --> 'HHHeeellllllooo' paper_doll('Mississippi') --> 'MMMiiissssssiiippppppiii'
###Code
def paper_doll(text):
pass
# Check
paper_doll('Hello')
# Check
paper_doll('Mississippi')
###Output
_____no_output_____
###Markdown
BLACKJACK: Given three integers between 1 and 11, if their sum is less than or equal to 21, return their sum. If their sum exceeds 21 *and* there's an eleven, reduce the total sum by 10. Finally, if the sum (even after adjustment) exceeds 21, return 'BUST' blackjack(5,6,7) --> 18 blackjack(9,9,9) --> 'BUST' blackjack(9,9,11) --> 19
###Code
def blackjack(a,b,c):
pass
# Check
blackjack(5,6,7)
# Check
blackjack(9,9,9)
# Check
blackjack(9,9,11)
###Output
_____no_output_____
###Markdown
SUMMER OF '69: Return the sum of the numbers in the array, except ignore sections of numbers starting with a 6 and extending to the next 9 (every 6 will be followed by at least one 9). Return 0 for no numbers. summer_69([1, 3, 5]) --> 9 summer_69([4, 5, 6, 7, 8, 9]) --> 9 summer_69([2, 1, 6, 9, 11]) --> 14
###Code
def summer_69(arr):
pass
# Check
summer_69([1, 3, 5])
# Check
summer_69([4, 5, 6, 7, 8, 9])
# Check
summer_69([2, 1, 6, 9, 11])
###Output
_____no_output_____
###Markdown
CHALLENGING PROBLEMS SPY GAME: Write a function that takes in a list of integers and returns True if it contains 007 in order spy_game([1,2,4,0,0,7,5]) --> True spy_game([1,0,2,4,0,5,7]) --> True spy_game([1,7,2,0,4,5,0]) --> False
###Code
def spy_game(nums):
pass
# Check
spy_game([1,2,4,0,0,7,5])
# Check
spy_game([1,0,2,4,0,5,7])
# Check
spy_game([1,7,2,0,4,5,0])
###Output
_____no_output_____
###Markdown
COUNT PRIMES: Write a function that returns the *number* of prime numbers that exist up to and including a given number count_primes(100) --> 25By convention, 0 and 1 are not prime.
###Code
def count_primes(num):
pass
# Check
count_primes(100)
###Output
_____no_output_____
###Markdown
Just for fun: PRINT BIG: Write a function that takes in a single letter, and returns a 5x5 representation of that letter print_big('a') out: * * * ***** * * * *HINT: Consider making a dictionary of possible patterns, and mapping the alphabet to specific 5-line combinations of patterns. For purposes of this exercise, it's ok if your dictionary stops at "E".
###Code
def print_big(letter):
pass
print_big('a')
###Output
_____no_output_____ |
01_Plain_Text_Extractor.ipynb | ###Markdown
Connect to GDrive and set working directory1. Add a shortcut for working directory('IDPCode') to your drive as depicted below:2. Run the command below to connect the GDrive:
###Code
from google.colab import drive
drive.mount('/content/drive')
%cd /content/drive/My Drive/IDPCode/
# Library install examples: https://colab.research.google.com/notebooks/snippets/importing_libraries.ipynb
!pip install pdfplumber
!pip install PyPDF2
!pip install tika
!pip install xlsxwriter
!pip install pikepdf
!pip install pdfminer.six
!pip install folderstats
from os import path
from glob import glob
import pandas as pd
import os
import re
import time
import sys
import string
# Show all of columns in dataframe: https://stackoverflow.com/questions/49188960/how-to-show-all-of-columns-name-on-pandas-dataframe
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
DATA_DIRECTORY='data'
# Utility functions
import folderstats
def find_data_files(directory):
df = folderstats.folderstats(DATA_DIRECTORY, ignore_hidden=True)
df_files = df[df['folder']==False]
df_pdf_files = df_files[df_files['extension']=='pdf']
df_pdf_files_in_depth_1 = df_pdf_files[df_pdf_files['depth']==1]
return df_pdf_files_in_depth_1['path']
millis = lambda: int(round(time.time() * 1000))
def word_count(text):
return sum([i.strip(string.punctuation).isalpha() for i in text.split()])
def reorder_columns(dataframe):
cols = list(dataframe.columns.values)
page_cols = [k for k in cols if k.startswith('page_')]
cols.remove('file_path')
cols.remove('total_page_count')
meta_cols = list(set(cols)-set(page_cols))
dataframe[['file_path', 'total_page_count'] + cols + meta_cols].head()
return dataframe
papers = find_data_files(DATA_DIRECTORY)
###Output
_____no_output_____
###Markdown
Plain Text Extraction from PDFThere are 4 available good pyton package candidates to extract plain text from PDF files.- Tika: https://tika.apache.org/- PyPDF2: https://pypi.org/project/PyPDF2/ beside converting PDF to plain text, it is able to extract meta data.- Pdfplumber: https://github.com/jsvine/pdfplumber- PDFminer3: https://pypi.org/project/pdfminer3/ Tika Example usage:```pythondata = parser.from_file(file_path)``` Methods:* `data.keys()` : ['content', 'metadata', 'status']* `data.items()` : ['content':"...", 'metadata':{'author':"...", ...} 'status': 200]* `data['content']` : "..."* `data['metadata']` : ['Author', 'Content-Type', 'Creation-Date', 'Keywords', 'Last-Modified', 'Last-Save-Date', 'X-Parsed-By', 'X-TIKA:content_handler', 'X-TIKA:embedded_depth', 'X-TIKA:parse_time_millis', 'access_permission:assemble_document', 'access_permission:can_modify', 'access_permission:can_print', 'access_permission:can_print_degraded', 'access_permission:extract_content', 'access_permission:extract_for_accessibility', 'access_permission:fill_in_form', 'access_permission:modify_annotations', 'cp:subject', 'created', 'creator', 'date', 'dc:creator', 'dc:description', 'dc:format', 'dc:subject', 'dc:title', 'dcterms:created', 'dcterms:modified', 'description', 'meta:author', 'meta:creation-date', 'meta:keyword', 'meta:save-date', 'modified', 'pdf:PDFVersion', 'pdf:charsPerPage', 'pdf:docinfo:created', 'pdf:docinfo:creator', 'pdf:docinfo:creator_tool', 'pdf:docinfo:keywords', 'pdf:docinfo:modified', 'pdf:docinfo:producer', 'pdf:docinfo:subject', 'pdf:docinfo:title', 'pdf:encrypted', 'pdf:hasMarkedContent', 'pdf:hasXFA', 'pdf:hasXMP', 'pdf:unmappedUnicodeCharsPerPage', 'producer', 'resourceName', 'subject', 'title', 'xmp:CreatorTool', 'xmpMM:DocumentID', 'xmpTPg:NPages', 'Content-Encoding', 'Content-Length', 'X-TIKA:embedded_resource_path', 'X-TIKA:origResourceName', 'embeddedResourceType']
###Code
from tika import parser
from io import StringIO
from bs4 import BeautifulSoup
# Extracting plain text page by page: https://github.com/chrismattmann/tika-python/issues/191
# Tika example usage and Metadata extraction: https://cbrownley.wordpress.com/2016/06/26/parsing-pdfs-in-python-with-tika/
def tika_extract_pages(pages_txt, data, max_page_count):
xhtml_data = BeautifulSoup(data['content'])
all_data = xhtml_data.find_all('div', attrs={'class': 'page'})
pages_txt['total_page_count'] = len(all_data)
for i, content in enumerate(all_data):
page = i+1
# Parse PDF data using TIKA (xml/html)
# It's faster and safer to create a new buffer than truncating it
# https://stackoverflow.com/questions/4330812/how-do-i-clear-a-stringio-object
_buffer = StringIO()
_buffer.write(str(content))
parsed_content = parser.from_buffer(_buffer.getvalue())
# Add pages
text = parsed_content['content'].strip() if parsed_content['content'] else ''
pages_txt['page_'+str(page)] = text
pages_txt['page_'+str(page)+'_wc'] = word_count(text)
# Stop if a limit is defined!
if max_page_count is not None and page is max_page_count:
break
def tika_parser(file_path, max_page_count=None):
current_time = millis()
print("Start to process {} at {}...".format(file_path, current_time), end = '')
pages_txt = {}
pages_txt['file_path'] = file_path
# Read PDF file
data = parser.from_file(file_path, xmlContent=True)
# Extract pages
tika_extract_pages(pages_txt, data, max_page_count)
# Extract Metadata
pages_txt.update(data['metadata'])
print("then it is processed in {} milliseconds".format(millis()-current_time))
return pages_txt
# Convert all PDFs to plain text
current_time = millis()
data = []
for paper in papers:
data.append(tika_parser(paper, 3)) #take first 3 pages of each paper
df_all_papers = pd.DataFrame.from_dict(data)
df_all_papers_ordered_tika = reorder_columns(df_all_papers)
# Write result to an excel file
df_all_papers_ordered_tika.to_excel("All_Papers_In_Plain_Text_TIKA.xlsx", engine="xlsxwriter", encoding='utf-8')
print('Total duration with Tika: {} millis'.format(millis()-current_time))
df_all_papers_ordered_tika
###Output
_____no_output_____
###Markdown
PyPDF2 Example usage:`pdf = PdfFileReader(file_path)` Methods:- `pdf.getNumPages()` : 30- `pdf.documentInfo()` : ['/Author', '/CreationDate', '/Creator', '/Keywords', '/ModDate', '/Producer', '/Subject', '/Title']- `pdf.getPage(i).extractText()`: ""
###Code
from PyPDF2 import PdfFileReader
# Example: https://www.blog.pythonlibrary.org/2018/06/07/an-intro-to-pypdf2/
def pypdf2_parser(file_path, max_page_count=None):
current_time = millis()
print("Start to process {} at {}...".format(file_path, current_time), end = '')
pages_txt = {}
pages_txt['file_path'] = file_path
with open(file_path, 'rb') as file:
pdf = PdfFileReader(file)
#metadata = pdf.getDocumentInfo()
pages_txt['total_page_count'] = pdf.getNumPages()
for i in range(0, pages_txt['total_page_count']):
page = i + 1
# Add pages
text = pdf.getPage(i).extractText()
pages_txt['page_'+str(page)] = text
pages_txt['page_'+str(page)+'_wc'] = word_count(text)
# Stop if a limit is defined!
if max_page_count is not None and page is max_page_count:
break
print("then it is processed in {} milliseconds".format(millis()-current_time))
return pages_txt
# PyPDF2 - Convert all PDFs to plain text
current_time = millis()
data = []
for paper in papers:
data.append(pypdf2_parser(paper, 3))
df_all_papers = pd.DataFrame.from_dict(data)
df_all_papers_ordered_pypdf2 = reorder_columns(df_all_papers)
# Write result to an excel file
df_all_papers_ordered_pypdf2.to_excel("All_Papers_In_Plain_Text_pypdf2.xlsx", engine="xlsxwriter", encoding='utf-8')
print('Total duration with pdfplumber: {} millis'.format(millis()-current_time))
df_all_papers_ordered_pypdf2
###Output
_____no_output_____
###Markdown
Pdfplumber
###Code
import pdfplumber
def pdfplumber_parser(file_path, max_page_count=None):
current_time = millis()
print("Start to process {} at {}...".format(file_path, current_time), end = '')
pages_txt = {}
pages_txt['file_path'] = file_path
data = pdfplumber.open(file_path)
pages_txt['total_page_count'] = len(data.pages)
for i in range(0, pages_txt['total_page_count']):
page = i + 1
# Add pages
text = data.pages[i].extract_text()
pages_txt['page_'+str(page)] = text
pages_txt['page_'+str(page)+'_wc'] = word_count(text)
# Stop if a limit is defined!
if max_page_count is not None and page is max_page_count:
break
print("then it is processed in {} milliseconds".format(millis()-current_time))
return pages_txt
# pdfplumber - Convert all PDFs to plain text
current_time = millis()
data = []
for paper in papers:
data.append(pdfplumber_parser(paper, 3))
df_all_papers = pd.DataFrame.from_dict(data)
df_all_papers_ordered_pdfplumber = reorder_columns(df_all_papers)
# Write result to an excel file
df_all_papers_ordered_pdfplumber.to_excel("All_Papers_In_Plain_Text_pdfplumber.xlsx", engine="xlsxwriter", encoding='utf-8')
print('Total duration with pdfplumber: {} millis'.format(millis()-current_time))
df_all_papers_ordered_pdfplumber
###Output
_____no_output_____
###Markdown
PDFminer
###Code
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfinterp import resolve1
import more_itertools
def pdfminer_parser(file_path, max_page_count=None):
current_time = millis()
print("Start to process {} at {}...".format(file_path, current_time), end = '')
pages_txt = {}
pages_txt['file_path'] = file_path
#pages_txt['total_page_count'] = len(data.pages)
output_string = StringIO()
with open(file_path, 'rb') as in_file:
parser = PDFParser(in_file)
doc = PDFDocument(parser)
rsrcmgr = PDFResourceManager()
device = TextConverter(rsrcmgr, output_string, laparams=LAParams())
interpreter = PDFPageInterpreter(rsrcmgr, device)
pdf_pages = PDFPage.create_pages(doc)
pages_txt['total_page_count'] = resolve1(doc.catalog['Pages'])['Count']
for i, data in enumerate(pdf_pages):
page = i + 1
interpreter.process_page(data)
text = output_string.getvalue()
# Add pages
pages_txt['page_'+str(page)] = text
pages_txt['page_'+str(page)+'_wc'] = word_count(text)
text = ''
output_string.truncate(0)
output_string.seek(0)
# Stop if a limit is defined!
if max_page_count is not None and page is max_page_count:
break
print("then it is processed in {} milliseconds".format(millis()-current_time))
return pages_txt
# PDFMiner - Convert all PDFs to plain text
current_time = millis()
data = []
for paper in papers:
data.append(pdfminer_parser(paper, 3))
df_all_papers = pd.DataFrame.from_dict(data)
df_all_papers_ordered_pdfminer = reorder_columns(df_all_papers)
# Write result to an excel file
df_all_papers_ordered_pdfminer.to_excel("All_Papers_In_Plain_Text_pdfminer.xlsx", engine="xlsxwriter", encoding='utf-8')
print('Total duration with pdfplumber: {} millis'.format(millis()-current_time))
df_all_papers_ordered_pdfminer
###Output
_____no_output_____
###Markdown
Troubleshooting- Unparsable chractersSome PDF file may contain unparsable characters. For example, a word `effect` passed in the title `The design and effects of control systems: tests of direct- and indirect-effects models` cannot be parsed properly in the file 11_AOS.pdf. Even in the normal copy/paste behavior of the computer(osx/ubuntu), this word cannot be copied properly from the PDF. In the clipboard of the operating system(osx/ubuntu), `ff` in `effects` is disappeared. - In TIKA, the title is parsed as `The design and e�ects of control systems: tests of direct- and indirect-e�ects models`. - In pdfplumber, the title is parsed as `The design and e(cid:128)ects of control systems: tests of direct- and indirect-e(cid:128)ects models` - In PyPDF2, the title is parsed as `Thedesignande•ectsofcontrolsystems:testsofdirect-andindirect-e•ectsmodels`- .
###Code
import pikepdf
unparseble_character_file = 'data/11_AOS.pdf'
paper = DATA_DIRECTORYPyPDF2+'/11_AOS_parsed.pdf'
pdf = pikepdf.open(unparseble_character_file)
pdf.save(paper)
data = []
data.append(tika_parser(paper, 3))
df_all_papers = pd.DataFrame.from_dict(data)
df_all_papers_ordered_test = reorder_columns(df_all_papers)
df_all_papers_ordered_test
###Output
Start to process data/11_AOS_parsed.pdf at 1603403458981...then it is processed in 199 milliseconds
|
AI for Medical Diagnosis/Week 1/AI4M_C1_W1_lecture_ex_02.ipynb | ###Markdown
AI for Medicine Course 1 Week 1 lecture exercises Counting labelsAs you saw in the lecture videos, one way to avoid having class imbalance impact the loss function is to weight the losses differently. To choose the weights, you first need to calculate the class frequencies.For this exercise, you'll just get the count of each label. Later on, you'll use the concepts practiced here to calculate frequencies in the assignment!
###Code
# Import the necessary packages
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
# Read csv file containing training datadata
train_df = pd.read_csv("nih/train-small.csv")
# Count up the number of instances of each class (drop non-class columns from the counts)
class_counts = train_df.sum().drop(['Image','PatientId'])
for column in class_counts.keys():
print(f"The class {column} has {train_df[column].sum()} samples")
# Plot up the distribution of counts
sns.barplot(class_counts.values, class_counts.index, color='b')
plt.title('Distribution of Classes for Training Dataset', fontsize=15)
plt.xlabel('Number of Patients', fontsize=15)
plt.ylabel('Diseases', fontsize=15)
plt.show()
###Output
_____no_output_____
###Markdown
Weighted Loss function Below is an example of calculating weighted loss. In the assignment, you will calculate a weighted loss function. This sample code will give you some intuition for what the weighted loss function is doing, and also help you practice some syntax you will use in the graded assignment.For this example, you'll first define a hypothetical set of true labels and then a set of predictions.Run the next cell to create the 'ground truth' labels.
###Code
# Generate an array of 4 binary label values, 3 positive and 1 negative
y_true = np.array(
[[1],
[1],
[1],
[0]])
print(f"y_true: \n{y_true}")
###Output
y_true:
[[1]
[1]
[1]
[0]]
###Markdown
Two modelsTo better understand the loss function, you will pretend that you have two models.- Model 1 always outputs a 0.9 for any example that it's given. - Model 2 always outputs a 0.1 for any example that it's given.
###Code
# Make model predictions that are always 0.9 for all examples
y_pred_1 = 0.9 * np.ones(y_true.shape)
print(f"y_pred_1: \n{y_pred_1}")
print()
y_pred_2 = 0.1 * np.ones(y_true.shape)
print(f"y_pred_2: \n{y_pred_2}")
###Output
y_pred_1:
[[0.9]
[0.9]
[0.9]
[0.9]]
y_pred_2:
[[0.1]
[0.1]
[0.1]
[0.1]]
###Markdown
Problems with the regular loss functionThe learning goal here is to notice that with a regular loss function (not a weighted loss), the model that always outputs 0.9 has a smaller loss (performs better) than model 2.- This is because there is a class imbalance, where 3 out of the 4 labels are 1.- If the data were perfectly balanced, (two labels were 1, and two labels were 0), model 1 and model 2 would have the same loss. Each would get two examples correct and two examples incorrect.- However, since the data is not balanced, the regular loss function implies that model 1 is better than model 2. Notice the shortcomings of a regular non-weighted lossSee what loss you get from these two models (model 1 always predicts 0.9, and model 2 always predicts 0.1), see what the regular (unweighted) loss function is for each model.
###Code
loss_reg_1 = -1 * np.sum(y_true * np.log(y_pred_1)) + \
-1 * np.sum((1 - y_true) * np.log(1 - y_pred_1))
print(f"loss_reg_1: {loss_reg_1:.4f}")
loss_reg_2 = -1 * np.sum(y_true * np.log(y_pred_2)) + \
-1 * np.sum((1 - y_true) * np.log(1 - y_pred_2))
print(f"loss_reg_2: {loss_reg_2:.4f}")
print(f"When the model 1 always predicts 0.9, the regular loss is {loss_reg_1:.4f}")
print(f"When the model 2 always predicts 0.1, the regular loss is {loss_reg_2:.4f}")
###Output
When the model 1 always predicts 0.9, the regular loss is 2.6187
When the model 2 always predicts 0.1, the regular loss is 7.0131
###Markdown
Notice that the loss function gives a greater loss when the predictions are always 0.1, because the data is imbalanced, and has three labels of `1` but only one label for `0`.Given a class imbalance with more positive labels, the regular loss function implies that the model with the higher prediction of 0.9 performs better than the model with the lower prediction of 0.1 How a weighted loss treats both models the sameWith a weighted loss function, you will get the same weighted loss when the predictions are all 0.9 versus when the predictions are all 0.1. - Notice how a prediction of 0.9 is 0.1 away from the positive label of 1.- Also notice how a prediction of 0.1 is 0.1 away from the negative label of 0- So model 1 and 2 are "symmetric" along the midpoint of 0.5, if you plot them on a number line between 0 and 1. Weighted Loss EquationCalculate the loss for the zero-th label (column at index 0)- The loss is made up of two terms. To make it easier to read the code, you will calculate each of these terms separately. We are giving each of these two terms a name for explanatory purposes, but these are not officially called $loss_{pos}$ or $loss_{neg}$ - $loss_{pos}$: we'll use this to refer to the loss where the actual label is positive (the positive examples). - $loss_{neg}$: we'll use this to refer to the loss where the actual label is negative (the negative examples). $$ loss^{(i)} = loss_{pos}^{(i)} + los_{neg}^{(i)} $$$$loss_{pos}^{(i)} = -1 \times weight_{pos}^{(i)} \times y^{(i)} \times log(\hat{y}^{(i)})$$$$loss_{neg}^{(i)} = -1 \times weight_{neg}^{(i)} \times (1- y^{(i)}) \times log(1 - \hat{y}^{(i)})$$ Since this sample dataset is small enough, you can calculate the positive weight to be used in the weighted loss function. To get the positive weight, count how many NEGATIVE labels are present, divided by the total number of examples.In this case, there is one negative label, and four total examples.Similarly, the negative weight is the fraction of positive labels.Run the next cell to define positive and negative weights.
###Code
# calculate the positive weight as the fraction of negative labels
w_p = 1/4
# calculate the negative weight as the fraction of positive labels
w_n = 3/4
print(f"positive weight w_p: {w_p}")
print(f"negative weight w_n {w_n}")
###Output
positive weight w_p: 0.25
negative weight w_n 0.75
###Markdown
Model 1 weighted lossRun the next two cells to calculate the two loss terms separately.Here, `loss_1_pos` and `loss_1_neg` are calculated using the `y_pred_1` predictions.
###Code
# Calculate and print out the first term in the loss function, which we are calling 'loss_pos'
loss_1_pos = -1 * np.sum(w_p * y_true * np.log(y_pred_1 ))
print(f"loss_1_pos: {loss_1_pos:.4f}")
# Calculate and print out the second term in the loss function, which we're calling 'loss_neg'
loss_1_neg = -1 * np.sum(w_n * (1 - y_true) * np.log(1 - y_pred_1 ))
print(f"loss_1_neg: {loss_1_neg:.4f}")
# Sum positive and negative losses to calculate total loss
loss_1 = loss_1_pos + loss_1_neg
print(f"loss_1: {loss_1:.4f}")
###Output
loss_1: 1.8060
###Markdown
Model 2 weighted lossNow do the same calculations for when the predictions are from `y_pred_2'. Calculate the two terms of the weighted loss function and add them together.
###Code
# Calculate and print out the first term in the loss function, which we are calling 'loss_pos'
loss_2_pos = -1 * np.sum(w_p * y_true * np.log(y_pred_2))
print(f"loss_2_pos: {loss_2_pos:.4f}")
# Calculate and print out the second term in the loss function, which we're calling 'loss_neg'
loss_2_neg = -1 * np.sum(w_n * (1 - y_true) * np.log(1 - y_pred_2))
print(f"loss_2_neg: {loss_2_neg:.4f}")
# Sum positive and negative losses to calculate total loss when the prediction is y_pred_2
loss_2 = loss_2_pos + loss_2_neg
print(f"loss_2: {loss_2:.4f}")
###Output
loss_2: 1.8060
###Markdown
Compare model 1 and model 2 weighted loss
###Code
print(f"When the model always predicts 0.9, the total loss is {loss_1:.4f}")
print(f"When the model always predicts 0.1, the total loss is {loss_2:.4f}")
###Output
When the model always predicts 0.9, the total loss is 1.8060
When the model always predicts 0.1, the total loss is 1.8060
###Markdown
What do you notice?Since you used a weighted loss, the calculated loss is the same whether the model always predicts 0.9 or always predicts 0.1. You may have also noticed that when you calculate each term of the weighted loss separately, there is a bit of symmetry when comparing between the two sets of predictions.
###Code
print(f"loss_1_pos: {loss_1_pos:.4f} \t loss_1_neg: {loss_1_neg:.4f}")
print()
print(f"loss_2_pos: {loss_2_pos:.4f} \t loss_2_neg: {loss_2_neg:.4f}")
###Output
loss_1_pos: 0.0790 loss_1_neg: 1.7269
loss_2_pos: 1.7269 loss_2_neg: 0.0790
###Markdown
Even though there is a class imbalance, where there are 3 positive labels but only one negative label, the weighted loss accounts for this by giving more weight to the negative label than to the positive label. Weighted Loss for more than one classIn this week's assignment, you will calculate the multi-class weighted loss (when there is more than one disease class that your model is learning to predict). Here, you can practice working with 2D numpy arrays, which will help you implement the multi-class weighted loss in the graded assignment.You will work with a dataset that has two disease classes (two columns)
###Code
# View the labels (true values) that you will practice with
y_true = np.array(
[[1,0],
[1,0],
[1,0],
[1,0],
[0,1]
])
y_true
###Output
_____no_output_____
###Markdown
Choosing axis=0 or axis=1You will use `numpy.sum` to count the number of times column `0` has the value 0. First, notice the difference when you set axis=0 versus axis=1
###Code
# See what happens when you set axis=0
print(f"using axis = 0 {np.sum(y_true,axis=0)}")
# Compare this to what happens when you set axis=1
print(f"using axis = 1 {np.sum(y_true,axis=1)}")
###Output
using axis = 0 [4 1]
using axis = 1 [1 1 1 1 1]
###Markdown
Notice that if you choose `axis=0`, the sum is taken for each of the two columns. This is what you want to do in this case. If you set `axis=1`, the sum is taken for each row. Calculate the weightsPreviously, you visually inspected the data to calculate the fraction of negative and positive labels. Here, you can do this programmatically.
###Code
# set the positive weights as the fraction of negative labels (0) for each class (each column)
w_p = np.sum(y_true == 0,axis=0) / y_true.shape[0]
w_p
# set the negative weights as the fraction of positive labels (1) for each class
w_n = np.sum(y_true == 1, axis=0) / y_true.shape[0]
w_n
###Output
_____no_output_____
###Markdown
In the assignment, you will train a model to try and make useful predictions. In order to make this example easier to follow, you will pretend that your model always predicts the same value for every example.
###Code
# Set model predictions where all predictions are the same
y_pred = np.ones(y_true.shape)
y_pred[:,0] = 0.3 * y_pred[:,0]
y_pred[:,1] = 0.7 * y_pred[:,1]
y_pred
###Output
_____no_output_____
###Markdown
As before, calculate the two terms that make up the loss function. Notice that you are working with more than one class (represented by columns). In this case, there are two classes.Start by calculating the loss for class `0`.$$ loss^{(i)} = loss_{pos}^{(i)} + los_{neg}^{(i)} $$$$loss_{pos}^{(i)} = -1 \times weight_{pos}^{(i)} \times y^{(i)} \times log(\hat{y}^{(i)})$$$$loss_{neg}^{(i)} = -1 \times weight_{neg}^{(i)} \times (1- y^{(i)}) \times log(1 - \hat{y}^{(i)})$$ View the zero column for the weights, true values, and predictions that you will use to calculate the loss from the positive predictions.
###Code
# Print and view column zero of the weight
print(f"w_p[0]: {w_p[0]}")
print(f"y_true[:,0]: {y_true[:,0]}")
print(f"y_pred[:,0]: {y_pred[:,0]}")
# calculate the loss from the positive predictions, for class 0
loss_0_pos = -1 * np.sum(w_p[0] *
y_true[:, 0] *
np.log(y_pred[:, 0])
)
print(f"loss_0_pos: {loss_0_pos:.4f}")
###Output
loss_0_pos: 0.9632
###Markdown
View the zero column for the weights, true values, and predictions that you will use to calculate the loss from the negative predictions.
###Code
# Print and view column zero of the weight
print(f"w_n[0]: {w_n[0]}")
print(f"y_true[:,0]: {y_true[:,0]}")
print(f"y_pred[:,0]: {y_pred[:,0]}")
# Calculate the loss from the negative predictions, for class 0
loss_0_neg = -1 * np.sum(
w_n[0] *
(1 - y_true[:, 0]) *
np.log(1 - y_pred[:, 0])
)
print(f"loss_0_neg: {loss_0_neg:.4f}")
# add the two loss terms to get the total loss for class 0
loss_0 = loss_0_neg + loss_0_pos
print(f"loss_0: {loss_0:.4f}")
###Output
loss_0: 1.2485
###Markdown
Now you are familiar with the array slicing that you would use when there are multiple disease classes stored in a two-dimensional array. Now it's your turn!* Can you calculate the loss for class (column) `1`?
###Code
# calculate the loss from the positive predictions, for class 1
loss_1_pos = -1 * np.sum(w_p[1] *
y_true[:, 1] *
np.log(y_pred[:, 1])
)
print('The output of loss for positive prediction, class(column) 1 :{:.4f}'.format(loss_1_pos))
###Output
The output of loss for positive prediction, class(column) 1 :0.2853
###Markdown
Expected output```CPPloss_1_pos: 0.2853```
###Code
# Calculate the loss from the negative predictions, for class 1
loss_1_neg = -1 * np.sum(
w_n[1] *
(1 - y_true[:, 1]) *
np.log(1 - y_pred[:, 1])
)
print('The output of loss for negative predictions, class(column) 1 :{:.4f}'.format(loss_1_neg))
###Output
The output of loss for negative predictions, class(column) 1 :0.9632
###Markdown
Expected output```CPPloss_1_neg: 0.9632```
###Code
# add the two loss terms to get the total loss for class 0
loss_1 = loss_1_pos + loss_1_neg
print('The total loss for class 0: {:.4f}'.format(loss_1))
###Output
The total loss for class 0: 1.2485
|
Course_4-Big_Data_Processing_using_Apache_Spark/Module_2-Spark_Structured_APIs/1-Introduction_to_Structured_APIs/Graded_Question/graded_question.ipynb | ###Markdown
Using the data frame abstraction, calculate the number of ‘Iris_setosa’ species.
###Code
df.filter(df["species"] == 'Iris-setosa').count()
###Output
_____no_output_____
###Markdown
Is there any ‘Iris-setosa' species with sepal_width greater than 4.0 and sepal_width less than 5.0? If yes, find out how many.
###Code
df.filter((df['species']=="Iris-setosa") & (df['sepal_width']>4) & (df['sepal_width']<5)).count()
###Output
_____no_output_____
###Markdown
Analyse the 'Iris-versicolor' species of the flower and calculate the sum of all ‘sepal_width’ and ‘petal_length’ for this species.
###Code
df.filter(df['species'] == 'Iris-versicolor').groupBy('species').sum('sepal_width','petal_length').show()
###Output
###Markdown
Calculate the minimum petal_width for ‘Iris-virginica’ species.
###Code
df.filter(df['species'] == 'Iris-virginica' ).groupBy('species').min('petal_width').show()
###Output
|
Lesson04/Binary_representation_answers.ipynb | ###Markdown
Binary Representation_answers Question 1:What is the binary representation of 12?
###Code
bin(12)
###Output
_____no_output_____
###Markdown
Question 2:What is the binary representation of 4?
###Code
bin(4)
###Output
_____no_output_____
###Markdown
Question 3:Using bitwise OR, find the number which combines the bits of 12 and 4 and its binary representation.
###Code
12 | 4
bin(12 | 4)
###Output
_____no_output_____ |
lession3_homework_question.ipynb | ###Markdown
1. What's the model? why all the models are wrong, but some are useful?
###Code
# model是经过训练以识别特定类型的模式的文件
# 所有的模型的是错误的,但是有些事有用地,科学并不一定是正确的,只是在一定时间内和一定条件下的相对正确,
# 而在将来的某天,可能被完善,甚至被彻底颠覆,科学的进步就是不断地用新方法去验证前人的思想,否定前人的
# 观念同时提出符合当前时代背景的新观念,因此科学具备可证伪性
###Output
_____no_output_____
###Markdown
2. What's the underfitting and overfitting? List the reasons that could make model overfitting or underfitting.
###Code
# underfitting : 欠拟合, 通俗讲就是模型不能很好的拟合数据,即表现出比较高的偏差
# underfitting的原因 : 选择的模型不合理;数据的特征较少
# overfitting : 过拟合, 模型具有比较高的方差,在训练集上能够做到很好甚至完美的拟合数据,但是不具有泛化性,当引入新数据时,拟合情况不是很好
# overfitting的原因 : 数据量太少;样本数据中存在着噪音;参数太多,模型复杂度太高;
###Output
_____no_output_____
###Markdown
3. What's the precision, recall, AUC, F1, F2score. What are they mainly target on?
###Code
# precision : 精确率 表示预测出为正的样本中有多少是正确地 precision = (TP) / (TP + FP)
# recall : 召回率 表示实际为正的样本中预测出了多少正确地正样本 recall = (TP) / (TP + FN)
# AUC : ROC曲线下的面积 自己设定一个阈值,一般为0.5,当ROC大于阈值时,则可以判定为正,小于阈值判定为负
# ROC 曲线 : 横坐标是 FPR , 纵坐标是 TPR
# F1 : F值是精确率和召回率的调和值, 2/F1 = 1/precision + 1/recall
# F2 : F2 = (1+β**2)/(β**2)(precission*recall / (precision + recall)) 当β==1时为F1score,当β>1时为F2score
# precison,recall,AUC,F1,F2score 这些评估方法主要用于评估分类问题
###Output
_____no_output_____
###Markdown
4. Based on our course and yourself mind, what's the machine learning?
###Code
# 机器学习与传统的分析式编程最主要的区别在于分析的角色不同,传统编程通常是程序员设定好算法,然后通过编写if-else等代码语句来分析已达到期望
# 输出的结果,而机器学习则是让机器自己学习分析,而不是按照这程序员设定的代码一步一步执行操作,可以根据本身学习出来的模型对于未知的情况进行
# 预测,而传统编程则是人为的分析好,然后转换成代码逻辑,进行预测时需要修改参数,本质上还是一个人为的过程。随着科技的进步,人类生活产生越来
# 越多的数据,海量的数据单靠人为来分析已经很难满足当前的需求了,而且未来数据量只会越来越大,这样就需要充分利用高速计算机的性能,让计算机自
# 己掌握学习的能力,处理越来越多的分析需求
###Output
_____no_output_____
###Markdown
5. "正确定义了机器学习模型的评价标准(evaluation), 问题基本上就已经解决一半". 这句话是否正确?你是怎么看待的?
###Code
# 我觉得这句话有一定道理,机器学习模型的评价标准应该是符合业务需求地,根据不同的业务需求评价标准也会不同,不单单只是机器学习问题任何工作中的
# 问题都应该先从明确业务需求开始,只有明确了需求才能设计出有价值的机器学习模型,所以在设计模型之前,应该先选好要用什么指标来评价模型,比如分
# 类问题那么我们就要选用 AUC 或者 F-score,回归问题则选用MAE,MSE,聚类问题选用rand index,Mutual Information,如果不能使用正确的模型
# 评估指标那么就很难得到好的模型,机器学习模型是服务于业务地,在不同的业务场景下要求不同,例如:在做良次品分类时,可以设定不同的阈值,根据业
# 务需求设定合适的阈值才能产生最好的结果。社会不断进步地,一个模型建立好了以后也不可能一成不变,为了适应时代的发展,就需要不断地对模型进行
# 调优,那么在调优过程中模型的评价标准就是一个很重要的选择,能够帮助我们不断地完善我们的模型
###Output
_____no_output_____ |
Notebooks/03_siamese/03_siamese_triplet_mnist.ipynb | ###Markdown
Siamese networks Colab preparation
###Code
%load_ext autoreload
%autoreload 2
from os import path
import numpy as np
import random
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import torch
from torch.optim import lr_scheduler
import torch.optim as optim
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision.datasets import MNIST
from torchvision import transforms
###Output
_____no_output_____
###Markdown
1. Setup and initializationsWe'll go through learning feature embeddings using different loss functions on MNIST dataset. This is just for visualization purposes, thus we'll be using 2-dimensional embeddings which isn't the best choice in practice.For every experiment the same embedding network is used (`32 conv 5x5 -> ReLU -> MaxPool 2x2 -> 64 conv 5x5 -> ReLU -> MaxPool 2x2 -> Fully Connected 256 -> ReLU -> Fully Connected 256 -> ReLU -> Fully Connected 2`) with the same hyperparameters.
###Code
class ExperimentParams():
def __init__(self):
self.num_classes = 10
self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
self.batch_size = 256
self.lr = 1e-2
self.num_epochs = 10
self.num_workers = 4
self.data_dir = '/home/docker_user/'
args = ExperimentParams()
###Output
_____no_output_____
###Markdown
1.1 Prepare datasetWe'll be working on MNIST dataset
###Code
mean, std = 0.1307, 0.3081
train_dataset = MNIST(f'{args.data_dir}/data/MNIST', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((mean,), (std,))
]))
test_dataset = MNIST(f'{args.data_dir}/data/MNIST', train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((mean,), (std,))
]))
###Output
_____no_output_____
###Markdown
1.2 Common setup
###Code
mnist_classes = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',
'#9467bd', '#8c564b', '#e377c2', '#7f7f7f',
'#bcbd22', '#17becf']
def plot_embeddings(embeddings, targets, title='',xlim=None, ylim=None):
plt.figure(figsize=(10,10))
for i in range(10):
inds = np.where(targets==i)[0]
plt.scatter(embeddings[inds,0], embeddings[inds,1], alpha=0.5, color=colors[i])
if xlim:
plt.xlim(xlim[0], xlim[1])
if ylim:
plt.ylim(ylim[0], ylim[1])
plt.legend(mnist_classes)
plt.title(title)
def extract_embeddings(dataloader, model, args):
with torch.no_grad():
model.eval()
embeddings = np.zeros((len(dataloader.dataset), 2))
labels = np.zeros(len(dataloader.dataset))
k = 0
for images, target in dataloader:
images = images.to(args.device)
embeddings[k:k+len(images)] = model.get_embedding(images).data.cpu().numpy()
labels[k:k+len(images)] = target.numpy()
k += len(images)
return embeddings, labels
def get_raw_images(dataloader,mean=0.1307, std=0.3081):
raw_images = np.zeros((len(dataloader.dataset), 1, 28, 28))
k = 0
for input, target in dataloader:
raw_images[k:k+len(input)] = (input*std + mean).data.cpu().numpy()
k += len(input)
return raw_images
def show(img, title=None):
# img is a torch.Tensor
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1,2,0)), interpolation='nearest')
plt.axis('off')
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
###Output
_____no_output_____
###Markdown
2. Baseline: Classification with softmaxWe'll train the model for classification and use outputs of penultimate layer as embeddings. We will define our base embedding architecture which will serve as common backbone for our experiments 2.1 Architecture ExerciseComplete the missing blocks in the definition of the following `EmbeddingNet` architecture: (`32 conv 5x5 -> ReLU -> MaxPool 2x2 -> 64 conv 5x5 -> ReLU -> MaxPool 2x2 -> Fully Connected 256 -> ReLU -> Fully Connected 256 -> ReLU -> Fully Connected 2`)
###Code
class EmbeddingNet(nn.Module):
def __init__(self):
super(EmbeddingNet, self).__init__()
# self.conv1 = nn.Conv2d(1, ...)
# self.conv2 = ...
# self.fc1 = ...
# self.fc2 = ...
# self.fc3 = ...
def forward(self, x, debug=False):
x1 = F.max_pool2d(F.relu(self.conv1(x)), kernel_size=2, stride=2)
# output = ...
if debug == True:
print(f'input: {x.size()}')
print(f'x1: {x1.size()}')
return output
def get_embedding(self, x):
return self.forward(x)
###Output
_____no_output_____
###Markdown
If you want to better check the sizes of the hidden states and do debugging, you can add a `debug` variable in the `forward` function just like above
###Code
input = torch.zeros(1, 1, 28, 28)
net = EmbeddingNet()
net(input,debug=True)
###Output
_____no_output_____
###Markdown
Now let's define a classification net that will add fully connected layer on top of `EmbeddingNet` ExerciceFill in the missing spots in the `forward` pass:
###Code
class ClassificationNet(nn.Module):
def __init__(self, embedding_net, num_classes):
super(ClassificationNet, self).__init__()
self.embedding_net = embedding_net
self.prelu = nn.PReLU()
self.fc = nn.Linear(2, num_classes)
def forward(self, x, debug=False):
embedding = None
output = self.fc(self.prelu(embedding))
# if debug == True:
# print(f'input: {x.size()}')
# print(f'embedding: {embedding.size()}')
# print(f'output: {output.size()}')
return output
def get_embedding(self, x):
return self.prelu(None)
###Output
_____no_output_____
###Markdown
2.2 Training
###Code
# Set up data loaders
kwargs = {'num_workers': args.num_workers, 'pin_memory': True}
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, **kwargs)
embedding_net = EmbeddingNet()
model = ClassificationNet(embedding_net, num_classes=args.num_classes)
loss_fn = torch.nn.CrossEntropyLoss()
model.to(args.device)
loss_fn.to(args.device)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
scheduler = lr_scheduler.StepLR(optimizer, 8, gamma=0.1, last_epoch=-1)
train_embeddings_baseline, train_labels_baseline = extract_embeddings(train_loader, model, args)
plot_embeddings(train_embeddings_baseline, train_labels_baseline, 'Train embeddings before training')
def train_classif_epoch(train_loader, model, loss_fn, optimizer, args, log_interval=50):
model.train()
losses = []
total_loss, total_corrects, num_samples = 0, 0, 0
corrects = 0
for batch_idx, (data, target) in enumerate(train_loader):
num_samples += data.size(0)
data, target = data.to(args.device), target.to(args.device)
optimizer.zero_grad()
outputs = model(data)
loss = loss_fn(outputs, target)
losses.append(loss.data.item())
_,preds = torch.max(outputs.data,1)
corrects += torch.sum(preds == target.data).cpu()
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
print('Train: [{}/{} ({:.0f}%)]\tLoss: {:.6f} \tAccuracy: {}'.format(
batch_idx * len(data[0]), len(train_loader.dataset),
100. * batch_idx / len(train_loader), np.mean(losses), float(total_corrects)/num_samples))
total_loss += np.sum(losses)
total_corrects += corrects
losses, corrects = [], 0
return total_loss/(batch_idx + 1), total_corrects/num_samples
def test_classif_epoch(test_loader, model, loss_fn, args, log_interval=50):
with torch.no_grad():
model.eval()
losses, corrects = [], 0
num_samples = 0
for batch_idx, (data, target) in enumerate(test_loader):
num_samples += data.size(0)
data, target = data.to(args.device), target.to(args.device)
outputs = model(data)
loss = loss_fn(outputs, target)
losses.append(loss.data.item())
_,preds = torch.max(outputs.data,1)
corrects += torch.sum(preds == target.data).cpu()
return np.sum(losses)/(batch_idx + 1), corrects/num_samples
start_epoch = 0
for epoch in range(0, start_epoch):
scheduler.step()
for epoch in range(start_epoch, args.num_epochs):
scheduler.step()
train_loss, train_accuracy = train_classif_epoch(train_loader, model, loss_fn, optimizer, args)
message = 'Epoch: {}/{}. Train set: Average loss: {:.4f} Average accuracy: {:.4f}'.format(
epoch + 1, args.num_epochs, train_loss, train_accuracy)
val_loss, val_accuracy = test_classif_epoch(test_loader, model, loss_fn, args)
message += '\nEpoch: {}/{}. Validation set: Average loss: {:.4f} Average accuracy: {:.4f}'.format(epoch + 1, args.num_epochs,
val_loss, val_accuracy)
print(message)
###Output
_____no_output_____
###Markdown
2.3 Visualizations
###Code
train_embeddings_baseline, train_labels_baseline = extract_embeddings(train_loader, model, args)
plot_embeddings(train_embeddings_baseline, train_labels_baseline, 'Train embeddings classification')
test_embeddings_baseline, test_labels_baseline = extract_embeddings(test_loader, model, args)
plot_embeddings(test_embeddings_baseline, test_labels_baseline, 'Test embeddings classification')
###Output
_____no_output_____
###Markdown
While the embeddings look separable (which is what we trained them for), they don't have good metric properties. They might not be the best choice as a descriptor for new classes. 3. Siamese networkNow we'll train a siamese network that takes a pair of images and trains the embeddings so that the distance between them is minimized if their from the same class or greater than some margin value if they represent different classes.We'll minimize a contrastive loss function*:$$L_{contrastive}(x_0, x_1, y) = \frac{1}{2} y \lVert f(x_0)-f(x_1)\rVert_2^2 + \frac{1}{2}(1-y)\{max(0, m-\lVert f(x_0)-f(x_1)\rVert_2)\}^2$$*Raia Hadsell, Sumit Chopra, Yann LeCun, [Dimensionality reduction by learning an invariant mapping](http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf), CVPR 2006* 3.1 ArchitectureWe will first define the siamese architecture on top of our `EmbeddingNet` ExerciseFill in the forward part of `SiameseNet`
###Code
class SiameseNet(nn.Module):
def __init__(self, embedding_net):
super(SiameseNet, self).__init__()
self.embedding_net = embedding_net
def forward(self, x1, x2):
# fill in the missing 2 lines :)
return output1, output2
def get_embedding(self, x):
return self.embedding_net(x)
###Output
_____no_output_____
###Markdown
3.2 Data loaderWe will also need to adapt our data loader to fetch pairs of images
###Code
from torch.utils.data import Dataset
from torch.utils.data.sampler import BatchSampler
from PIL import Image
class SiameseMNIST(Dataset):
"""
train mode: For each sample creates randomly a positive or a negative pair
test mode: Creates fixed pairs for testing
"""
def __init__(self, mnist_dataset):
self.mnist_dataset = mnist_dataset
self.train = self.mnist_dataset.train
self.transform = self.mnist_dataset.transform
if self.train:
self.train_labels = self.mnist_dataset.train_labels
self.train_data = self.mnist_dataset.train_data
self.labels_set = set(self.train_labels.numpy())
self.label_to_indices = {label: np.where(self.train_labels.numpy() == label)[0]
for label in self.labels_set}
else:
# generate fixed pairs for testing
self.test_labels = self.mnist_dataset.test_labels
self.test_data = self.mnist_dataset.test_data
self.labels_set = set(self.test_labels.numpy())
'''
create a dictionary with an entry key for each label and the value an array storing
the indices of the images having the respective label
'''
self.label_to_indices = {label: np.where(self.test_labels.numpy() == label)[0]
for label in self.labels_set}
random_state = np.random.RandomState(42)
# itereate through test_data and randomly select samples with the same label
positive_pairs = [[i,
random_state.choice(self.label_to_indices[self.test_labels[i].item()]),
1]
for i in range(0, len(self.test_data), 2)]
# itereate through test_data, create a list of all labels different from current one and then
# randomly select samples with having one of these labels
negative_pairs = [[i,
random_state.choice(self.label_to_indices[
np.random.choice(
list(self.labels_set - set([self.test_labels[i].item()]))
)
]),
0]
for i in range(1, len(self.test_data), 2)]
# format: [index1, index2, label(0/1)]
self.test_pairs = positive_pairs + negative_pairs
def __getitem__(self, index):
# at train time pairs of samples are fetched randomly on the fly
if self.train:
# select random label,i.e. similar (1) or non-similar (0) images
target = np.random.randint(0, 2)
img1, label1 = self.train_data[index], self.train_labels[index].item()
if target == 1:
# select an image with the same label as img1
siamese_index = index
while siamese_index == index:
siamese_index = np.random.choice(self.label_to_indices[label1])
else:
# eliminate label1 from the set of possible labels to select
siamese_label = np.random.choice(list(self.labels_set - set([label1])))
# randomly select an image having a label from this subset
siamese_index = np.random.choice(self.label_to_indices[siamese_label])
img2 = self.train_data[siamese_index]
else:
img1 = self.test_data[self.test_pairs[index][0]]
img2 = self.test_data[self.test_pairs[index][1]]
target = self.test_pairs[index][2]
img1 = Image.fromarray(img1.numpy(), mode='L')
img2 = Image.fromarray(img2.numpy(), mode='L')
if self.transform is not None:
img1 = self.transform(img1)
img2 = self.transform(img2)
return (img1, img2), target
def __len__(self):
return len(self.mnist_dataset)
###Output
_____no_output_____
###Markdown
3.3 Loss function $$L_{contrastive}(x_0, x_1, y) = \frac{1}{2} y \lVert f(x_0)-f(x_1)\rVert_2^2 + \frac{1}{2}(1-y)\{max(0, m-\lVert f(x_0)-f(x_1)\rVert_2)\}^2$$ ExerciseFill in the missing parts of the `contrastive loss`
###Code
class ContrastiveLoss(nn.Module):
"""
Contrastive loss
Takes embeddings of two samples and a target label == 1 if samples are from the same class and label == 0 otherwise
"""
def __init__(self, margin):
super(ContrastiveLoss, self).__init__()
self.margin = margin
self.eps = 1e-9
def forward(self, output1, output2, target, size_average=True):
# compute squared distances between output2 and output1
squared_distances = None
# add the second term from them loss. You can use ReLU for compressing the max formula
losses = 0.5 * (target.float() * squared_distances +
None )
return losses.mean() if size_average else losses.sum()
###Output
_____no_output_____
###Markdown
3.4 Training
###Code
# Set up data loaders
siamese_train_dataset = SiameseMNIST(train_dataset) # Returns pairs of images and target same/different
siamese_test_dataset = SiameseMNIST(test_dataset)
args.batch_size = 128
kwargs = {'num_workers': args.num_workers, 'pin_memory': True}
siamese_train_loader = torch.utils.data.DataLoader(siamese_train_dataset, batch_size=args.batch_size, shuffle=True, **kwargs)
siamese_test_loader = torch.utils.data.DataLoader(siamese_test_dataset, batch_size=args.batch_size, shuffle=False, **kwargs)
margin = 1.
embedding_net = EmbeddingNet()
model = SiameseNet(embedding_net)
loss_fn = ContrastiveLoss(margin)
model.to(args.device)
loss_fn.to(args.device)
args.lr = 1e-3
optimizer = optim.Adam(model.parameters(), lr=args.lr)
scheduler = lr_scheduler.StepLR(optimizer, 8, gamma=0.1, last_epoch=-1)
def train_siamese_epoch(train_loader, model, loss_fn, optimizer, args, log_interval=100):
model.train()
losses = []
total_loss, num_samples = 0, 0
for batch_idx, (data, target) in enumerate(train_loader):
num_samples += data[0].size(0)
data = tuple(d.to(args.device) for d in data)
target = target.to(args.device)
optimizer.zero_grad()
outputs = model(data[0], data[1])
# alternatively: outputs = model(*data)
loss = loss_fn(outputs[0], outputs[1], target)
# alternatively: loss = loss_fn(*outputs, target)
losses.append(loss.data.item())
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
print('Train: [{}/{} ({:.0f}%)]\tLoss: {:.6f} '.format(
batch_idx * len(data[0]), len(train_loader.dataset),
100. * batch_idx / len(train_loader), np.mean(losses)))
total_loss += np.sum(losses)
losses = []
return total_loss/(batch_idx + 1)
def test_siamese_epoch(test_loader, model, loss_fn, args, log_interval=50):
with torch.no_grad():
model.eval()
losses = []
num_samples = 0
for batch_idx, (data, target) in enumerate(test_loader):
num_samples += data[0].size(0)
data = tuple(d.to(args.device) for d in data)
target = target.to(args.device)
outputs = model(data[0], data[1])
loss = loss_fn(outputs[0], outputs[1], target)
losses.append(loss.data.item())
return np.sum(losses)/(batch_idx + 1)
start_epoch = 0
# needed for annealing learning rate in case of resuming of training
for epoch in range(0, start_epoch):
scheduler.step()
# main training loop
for epoch in range(start_epoch, args.num_epochs):
scheduler.step()
# train stage
train_loss = train_siamese_epoch(siamese_train_loader, model, loss_fn, optimizer, args)
message = 'Epoch: {}/{}. Train set: Average loss: {:.4f}'.format(
epoch + 1, args.num_epochs, train_loss)
# testing/validation stage
test_loss = test_siamese_epoch(siamese_test_loader, model, loss_fn, args)
message += '\nEpoch: {}/{}. Validation set: Average loss: {:.4f}'.format(epoch + 1, args.num_epochs,
test_loss)
print(message)
###Output
_____no_output_____
###Markdown
3.5 Visualizations
###Code
train_embeddings_cl, train_labels_cl = extract_embeddings(train_loader, model, args)
plot_embeddings(train_embeddings_cl, train_labels_cl, title='Train embeddings (constrastive loss)')
test_embeddings_cl, test_labels_cl = extract_embeddings(test_loader, model, args)
plot_embeddings(test_embeddings_cl, test_labels_cl, title='Test embeddings (contrastive loss)')
###Output
_____no_output_____
###Markdown
In order to two compare vectors $x_1$ and $x_2$ we can use the `cosine similarity` $$\text{similarity}=\frac{x_1 \cdot x_2}{\max(\Vert x_1 \Vert _2 \cdot \Vert x_2 \Vert_2, \epsilon)}$$An alternative is the Euclidean distance. In order to save computation at query time we can pre-process our vectors and L2-normalize them. Now we can simply perform comparison by dot product ExercisePerform L2-normalization on the embeddings using `numpy`
###Code
# L2-normalize embeddings
test_embeddings_norm = ....
###Output
_____no_output_____
###Markdown
ExerciseWrite now a function `most_sim` that computes all dot products between a query vector and the dataset, extracts the indices of the `topk` most similar vectors and put thme in a list of tuples (
###Code
def most_sim(x, emb, topk=6):
return None
test_images_raw = get_raw_images(test_loader)
def launch_query(test_embeddings_norm, test_images_raw, query_id=None):
query_id = random.randint(0, test_embeddings_norm.shape[0]) if query_id is None else query_id
query_vector = test_embeddings_norm[query_id,:]
print(f'query_id: {query_id} | query_embedding: {query_vector}')
knns = most_sim(query_vector, test_embeddings_norm)
knn_images = np.array([test_images_raw[x[0]] for x in knns ])
title=['q: 1.0', f'1nn: {knns[1][1]:.3}', f'2nn: {knns[2][1]:.3}',
f'3nn: {knns[3][1]:.3}', f'4nn: {knns[4][1]:.3}', f'5nn: {knns[5][1]:.3}']
show(torchvision.utils.make_grid(torch.from_numpy(knn_images)), title=title)
# print(knns)
for i in range(5):
launch_query(test_embeddings_norm, test_images_raw)
###Output
_____no_output_____
###Markdown
Triplet networkWe'll train a triplet network, that takes an anchor, positive (same class as anchor) and negative (different class than anchor) examples. The objective is to learn embeddings such that the anchor is closer to the positive example than it is to the negative example by some margin value.Source: [2] *Schroff, Florian, Dmitry Kalenichenko, and James Philbin. [Facenet: A unified embedding for face recognition and clustering.](https://arxiv.org/abs/1503.03832) CVPR 2015.***Triplet loss**: $L_{triplet}(x_a, x_p, x_n) = max(0, m + \lVert f(x_a)-f(x_p)\rVert_2^2 - \lVert f(x_a)-f(x_n)\rVert_2^2$\) 4.1 ArchitectureWe will first define the triplet architecture on top of our `EmbeddingNet` ExerciseFill in the forward part of `TripleNet`
###Code
class TripletNet(nn.Module):
def __init__(self, embedding_net):
super(TripletNet, self).__init__()
self.embedding_net = embedding_net
def forward(self, x1, x2, x3):
# missing 3 lines here
return output1, output2, output3
def get_embedding(self, x):
return self.embedding_net(x)
###Output
_____no_output_____
###Markdown
4.2 Data loaderWe will also need to adapt our data loader to fetch triplets of images
###Code
from torch.utils.data import Dataset
from torch.utils.data.sampler import BatchSampler
from PIL import Image
class TripletMNIST(Dataset):
"""
Train: For each sample (anchor) randomly chooses a positive and negative samples
Test: Creates fixed triplets for testing
"""
def __init__(self, mnist_dataset):
self.mnist_dataset = mnist_dataset
self.train = self.mnist_dataset.train
self.transform = self.mnist_dataset.transform
if self.train:
self.train_labels = self.mnist_dataset.train_labels
self.train_data = self.mnist_dataset.train_data
self.labels_set = set(self.train_labels.numpy())
self.label_to_indices = {label: np.where(self.train_labels.numpy() == label)[0]
for label in self.labels_set}
else:
self.test_labels = self.mnist_dataset.test_labels
self.test_data = self.mnist_dataset.test_data
# generate fixed triplets for testing
self.labels_set = set(self.test_labels.numpy())
self.label_to_indices = {label: np.where(self.test_labels.numpy() == label)[0]
for label in self.labels_set}
random_state = np.random.RandomState(29)
triplets = [[i,
random_state.choice(self.label_to_indices[self.test_labels[i].item()]),
random_state.choice(self.label_to_indices[
np.random.choice(
list(self.labels_set - set([self.test_labels[i].item()]))
)
])
]
for i in range(len(self.test_data))]
self.test_triplets = triplets
def __getitem__(self, index):
if self.train:
img1, label1 = self.train_data[index], self.train_labels[index].item()
positive_index = index
while positive_index == index:
positive_index = np.random.choice(self.label_to_indices[label1])
negative_label = np.random.choice(list(self.labels_set - set([label1])))
negative_index = np.random.choice(self.label_to_indices[negative_label])
img2 = self.train_data[positive_index]
img3 = self.train_data[negative_index]
else:
img1 = self.test_data[self.test_triplets[index][0]]
img2 = self.test_data[self.test_triplets[index][1]]
img3 = self.test_data[self.test_triplets[index][2]]
img1 = Image.fromarray(img1.numpy(), mode='L')
img2 = Image.fromarray(img2.numpy(), mode='L')
img3 = Image.fromarray(img3.numpy(), mode='L')
if self.transform is not None:
img1 = self.transform(img1)
img2 = self.transform(img2)
img3 = self.transform(img3)
return (img1, img2, img3), []
def __len__(self):
return len(self.mnist_dataset)
###Output
_____no_output_____
###Markdown
4.3 Loss function ExerciseFill in the missing parts of the `triplet loss`: $L_{triplet}(x_a, x_p, x_n) = max(0, m + \lVert f(x_a)-f(x_p)\rVert_2^2 - \lVert f(x_a)-f(x_n)\rVert_2^2$\)
###Code
class TripletLoss(nn.Module):
"""
Triplet loss
Takes embeddings of an anchor sample, a positive sample and a negative sample
"""
def __init__(self, margin):
super(TripletLoss, self).__init__()
self.margin = margin
def forward(self, anchor, positive, negative, size_average=True):
distance_positive = None # fill in code
distance_negative = None # fill in code
# you can again use ReLU instead of max
losses = None # fill in code
return losses.mean() if size_average else losses.sum()
###Output
_____no_output_____
###Markdown
4.4 Training
###Code
triplet_train_dataset = TripletMNIST(train_dataset) # Returns triplets of images
triplet_test_dataset = TripletMNIST(test_dataset)
args.batch_size = 128
kwargs = {'num_workers': args.num_workers, 'pin_memory': True}
triplet_train_loader = torch.utils.data.DataLoader(triplet_train_dataset, batch_size=args.batch_size, shuffle=True, **kwargs)
triplet_test_loader = torch.utils.data.DataLoader(triplet_test_dataset, batch_size=args.batch_size, shuffle=False, **kwargs)
margin = 1.
embedding_net = EmbeddingNet()
model = TripletNet(embedding_net)
loss_fn = TripletLoss(margin)
model.to(args.device)
loss_fn.to(args.device)
args.lr = 1e-3
optimizer = optim.Adam(model.parameters(), lr=args.lr)
scheduler = lr_scheduler.StepLR(optimizer, 8, gamma=0.1, last_epoch=-1)
n_epochs = 5
log_interval = 100
###Output
_____no_output_____
###Markdown
ExerciceCode your own train/test sequences similarly to the previous examples.Watch out for some differences though.
###Code
def train_triplet_epoch(train_loader, model, loss_fn, optimizer, args, log_interval=100):
model.train()
losses = []
total_loss, num_samples = 0, 0
# fill in code here
return total_loss/(batch_idx + 1)
def test_triplet_epoch(test_loader, model, loss_fn, args, log_interval=50):
losses = []
num_samples = 0
# fill in code here
return np.sum(losses)/(batch_idx + 1)
start_epoch = 0
# needed for annealing learning rate in case of resuming of training
for epoch in range(0, start_epoch):
scheduler.step()
# main training loop
for epoch in range(start_epoch, args.num_epochs):
scheduler.step()
# train stage
train_loss = train_triplet_epoch(triplet_train_loader, model, loss_fn, optimizer, args)
message = 'Epoch: {}/{}. Train set: Average loss: {:.4f}'.format(
epoch + 1, args.num_epochs, train_loss)
# testing/validation stage
test_loss = test_triplet_epoch(triplet_test_loader, model, loss_fn, args)
message += '\nEpoch: {}/{}. Validation set: Average loss: {:.4f}'.format(epoch + 1, args.num_epochs,
test_loss)
print(message)
###Output
_____no_output_____
###Markdown
4.5 Visualizations
###Code
train_embeddings_tl, train_labels_tl = extract_embeddings(train_loader, model, args)
plot_embeddings(train_embeddings_tl, train_labels_tl, title='Train triplet embeddings')
test_embeddings_tl, test_labels_tl = extract_embeddings(test_loader, model, args)
plot_embeddings(test_embeddings_tl, test_labels_tl, title='Val triplet embeddings')
# L2-normalize embeddings
test_embeddings_tl_norm = test_embeddings_tl / np.linalg.norm(test_embeddings_tl, axis=-1, keepdims=True)
test_images_raw = get_raw_images(test_loader)
for i in range(5):
launch_query(test_embeddings_tl_norm, test_images_raw)
###Output
_____no_output_____ |
PythonJupyterNotebooks/Week12-Day2-Activity4-voice_blockchain-solved.ipynb | ###Markdown
The Voice of the BlockchainCanada lies at the frontier of the blockchain sector with increasing adoption rates and favorable regulations. In this activity you will retrieve news articles regarding blockchain in Canada for both English and French languages to capture the voice of the blockchain.
###Code
# Initial imports
import os
import pandas as pd
from path import Path
from dotenv import load_dotenv
from newsapi import NewsApiClient
# Load environment variables and retrieve the News API key
load_dotenv()
api_key = os.getenv("NEWSAPI")
# Create the newsapi client
newsapi = NewsApiClient(api_key=api_key)
###Output
_____no_output_____
###Markdown
Getting News Articles in EnglishIn this section you have to fetch all the news articles using the News API with the keywords `blockchain`, `canada`, and `2020` in English.
###Code
# Fetch news about Canada and Blockchain in 2020 in the English language
blockchain_news_en = newsapi.get_everything(
q="blockchain AND canada AND 2020",
language="en"
)
# Show the total number of news
blockchain_news_en["totalResults"]
###Output
_____no_output_____
###Markdown
Getting News Articles in FrenchFetching news in French will require keywords on this language, so retrieve all the news articles using the News API using the keywords `blockchain`, `canada`, and `2020`.
###Code
# Fetch news about Canada and Blockchain in 2020 in the French language
blockchain_news_fr = newsapi.get_everything(
q="blockchain AND canada AND 2020",
language="fr"
)
# Show the total number of news
blockchain_news_fr["totalResults"]
###Output
_____no_output_____
###Markdown
Create a DataFrame with All the ResultsThe first task on this section is to create a function called `create_df(news, language)` that will transform the `articles` list in a DataFrame. This function will receive two parameters: `news` is the articles' list and `language` is a string to specify the language of the news articles.The resulting DataFrame should have the following columns:* Title: The article's title* Description: The article's description* Text: The article's content* Date: The date when the article was published, using the format `YYY-MM-DD` (eg. 2019-07-11)* Language: A string specifying the news language (`en` for English, `fr` for French)
###Code
# Function to create a dataframe for english news and french news
def create_df(news, language):
articles = []
for article in news:
try:
title = article["title"]
description = article["description"]
text = article["content"]
date = article["publishedAt"][:10]
articles.append({
"title": title,
"description": description,
"text": text,
"date": date,
"language": language
})
except AttributeError:
pass
return pd.DataFrame(articles)
###Output
_____no_output_____
###Markdown
Use the create_df() function to create a DataFrame for the English news and another for the French news.
###Code
# Create a DataFrame with the news in English
blockchain_en_df = create_df(blockchain_news_en["articles"], "en")
# Create a DataFrame with the news in French
blockchain_fr_df = create_df(blockchain_news_fr["articles"], "fr")
###Output
_____no_output_____
###Markdown
Concatenate both DataFrames having the English news at the top and the French news at the bottom.
###Code
# Concatenate dataframes
blockchain_df = pd.concat([blockchain_en_df, blockchain_fr_df])
# Show the head articles (they are in English)
blockchain_df.head()
# Show the tail articles (they are in French)
blockchain_df.tail()
###Output
_____no_output_____
###Markdown
Save tha final DataFrame as a CSV file for further analysis in the forthcoming activities.
###Code
# Save to CSV
file_path = Path("../Resources/blockchain_news_en_fr.csv")
blockchain_df.to_csv(file_path, index=False, encoding='utf-8-sig')
###Output
_____no_output_____ |
Pymaceuticals/pymaceuticals_starter_.ipynb | ###Markdown
Analysis Section: The three observations made in this exhibit/challenge are: 1. This will actually be your last step once you run code below 2. You will insert this cell above the one below 3. How to do markdown in Jupyter Notebook? https://jupyter-notebook.readthedocs.io/en/stable/examples/Notebook/Working%20With%20Markdown%20Cells.html
###Code
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats as st
# Study data files
mouse_metadata_path = "data/Mouse_metadata.csv"
study_results_path = "data/Study_results.csv"
# Read the mouse data and the study results
mouse_metadata = pd.read_csv(mouse_metadata_path)
study_results = pd.read_csv(study_results_path)
# Combine the data into a single dataset
# Display the data table for preview
# Checking the number of mice.
# Getting the duplicate mice by ID number that shows up for Mouse ID and Timepoint.
# Optional: Get all the data for the duplicate mouse ID.
# Create a clean DataFrame by dropping the duplicate mouse by its ID.
# Checking the number of mice in the clean DataFrame.
###Output
_____no_output_____
###Markdown
Summary Statistics
###Code
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
# Use groupby and summary statistical methods to calculate the following properties of each drug regimen:
# mean, median, variance, standard deviation, and SEM of the tumor volume.
# Assemble the resulting series into a single summary dataframe.
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
# Using the aggregation method, produce the same summary statistics in a single line
###Output
_____no_output_____
###Markdown
Bar and Pie Charts
###Code
# Generate a bar plot showing the total number of timepoints for all mice tested for each drug regimen using Pandas.
# Generate a bar plot showing the total number of timepoints for all mice tested for each drug regimen using pyplot.
# Generate a pie plot showing the distribution of female versus male mice using Pandas
# Generate a pie plot showing the distribution of female versus male mice using pyplot
###Output
_____no_output_____
###Markdown
Quartiles, Outliers and Boxplots
###Code
# Calculate the final tumor volume of each mouse across four of the treatment regimens:
# Capomulin, Ramicane, Infubinol, and Ceftamin
# Start by getting the last (greatest) timepoint for each mouse
# Merge this group df with the original dataframe to get the tumor volume at the last timepoint
# Put treatments into a list for for loop (and later for plot labels)
# Create empty list to fill with tumor vol data (for plotting)
# Calculate the IQR and quantitatively determine if there are any potential outliers.
# Locate the rows which contain mice on each drug and get the tumor volumes
# add subset
# Determine outliers using upper and lower bounds
# Generate a box plot of the final tumor volume of each mouse across four regimens of interest
###Output
_____no_output_____
###Markdown
Line and Scatter Plots
###Code
# Generate a line plot of tumor volume vs. time point for a mouse treated with Capomulin
# Generate a scatter plot of average tumor volume vs. mouse weight for the Capomulin regimen
###Output
_____no_output_____
###Markdown
Correlation and Regression
###Code
# Calculate the correlation coefficient and linear regression model
# for mouse weight and average tumor volume for the Capomulin regimen
###Output
The correlation between mouse weight and the average tumor volume is 0.84
|
Session_1/3_Exploring_PYNQ-Z1.ipynb | ###Markdown
Exploring PYNQ-Z1 ---- Contents * [ARM A9 Processor Subsystem](ARM-A9-Processor-Subsystem)* [Network Status](Network-Status)* [Operating System](Operating-System)* [Python Details](Python-Details) ---- GoalThe aim of this notebook is to help you famaliarize yourself with the Zynq Processing System, and the underlying OS. You will see how to run shell commands and Python commands to query the underlying hardware and software and find out the packages that are included in the PYNQ image. ARM A9 Processor Subsystem Note:Starting a code cell with a bang character, eg `!`, instructs the IPython REPL to treat the code on that line as an OS shell command
###Code
!cat /proc/cpuinfo
###Output
_____no_output_____
###Markdown
Available DRAM ...
###Code
!cat /proc/meminfo | grep 'Mem*'
###Output
_____no_output_____
###Markdown
----[Contents](Contents)---- Network Status Wired Ethernet connection
###Code
!ifconfig eth0
###Output
_____no_output_____
###Markdown
Confirm local hostname
###Code
!hostname
###Output
_____no_output_____
###Markdown
----[Contents](Contents)---- Operating System Verify Linux version ...
###Code
!cat /etc/os-release | grep VERSION
###Output
_____no_output_____
###Markdown
----[Contents](Contents)---- Python Details NoteHere we are executing a Python script rather than shell commands
###Code
import sys
print('\nPython Version:\n {} \n\nPython Platform:\n{}\n'.format(sys.version, sys.platform))
print ('Python path settings:')
for path_entry in sys.path:
print(path_entry)
# List of all Python packages currently installed
!pip3.6 list --format=columns
# On being 'Pythonic'
import this
###Output
_____no_output_____ |
model-layer/knowledge-distillation-module/DE-RRD/logs & learning_curve.ipynb | ###Markdown
In this notebook, we provide the learning curves of DE and RRD. Please note that *Topology Distillation (KDD'21)*, which is a follow-up study of DE, is available in https://github.com/SeongKu-Kang/Topology_Distillation_KDD21. Also, *IR-RRD (Information Sciences'21)*, which is a follow-up study of RRD, is available in https://github.com/SeongKu-Kang/IR-RRD_INS21.
###Code
import matplotlib.pyplot as plt
def read_log(filename, measure='H@5'):
with open(filename, 'r', encoding='utf8') as f:
lines = f.readlines()
result = []
for line in lines:
if ('valid' in line) and (measure in line):
start_idx = line.find(measure)
result.append(float(line[start_idx+len(measure)+2: start_idx+len(measure)+2 + 6]))
return result
# bpr
path = './logs/'
student_log = read_log(path + 'student.log')
DE_log = read_log(path + 'DE.log')
RRD_log = read_log(path + 'URRD.log')
epoch = 100
plt.plot([i for i in range(epoch)], student_log[:epoch], label='Student')
plt.plot([i for i in range(epoch)], DE_log[:epoch], label='DE')
plt.plot([i for i in range(epoch)], RRD_log[:epoch], label='RRD')
plt.legend(loc=4, fontsize=17)
plt.tick_params(axis="x", labelsize=15.9)
plt.tick_params(axis="y", labelsize=18)
plt.xlabel('Epoch', fontsize=20)
plt.ylabel('H@5', fontsize=20)
plt.show()
###Output
_____no_output_____ |
reading_assignments/4_Note-Unsupervised.ipynb | ###Markdown
$\newcommand{\xv}{\mathbf{x}} \newcommand{\wv}{\mathbf{w}} \newcommand{\yv}{\mathbf{y}} \newcommand{\zv}{\mathbf{z}} \newcommand{\uv}{\mathbf{u}} \newcommand{\vv}{\mathbf{v}} \newcommand{\Chi}{\mathcal{X}} \newcommand{\R}{\rm I\!R} \newcommand{\sign}{\text{sign}} \newcommand{\Tm}{\mathbf{T}} \newcommand{\Xm}{\mathbf{X}} \newcommand{\Zm}{\mathbf{Z}} \newcommand{\I}{\mathbf{I}} \newcommand{\Um}{\mathbf{U}} \newcommand{\Vm}{\mathbf{V}} \newcommand{\muv}{\boldsymbol\mu} \newcommand{\Sigmav}{\boldsymbol\Sigma} \newcommand{\Lambdav}{\boldsymbol\Lambda}$In this note, we discuss the two subtopics of unsupervised learing, clustering and dimensionality reduction. ClusteringWhen there is no information available for classification target, often we still want to find different groups of data. We call these as *clusters*, and we call this approach as *clustering*. Let us talk about two clustering techniques, k-means and Gaussian mixture models.
###Code
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
K-meansFirst and easy clustering approach is K-means algorithm, which is non-probabilistic. Without knowing about actual groups, we want to find K partitions of N observations in D dimensions. That is, the observation data $\Xm = {\xv_1, \cdots, \xv_N}$ and $\xv_i \in \R^D$.When we devide data into K clusters, an intuitive way is to find groups whose inter-point distances inside a cluster are smaller than the out-side-cluter-point distances. We can formulate this by selecting a center of each cluster, named $\muv_k$:$$E = \sum_{i=1}^{N} \sum_{k=1}^{K} \delta_{ik} \Vert \xv_i - \muv_k \Vert^2.$$where $\delta_{ik} \in {0, 1}$ is a binary indicator variable that has 1 if $\xv_i$ is assigned to cluster $k$, otherwise 0. Thus, our goal is finding the $\delta_{ik}$ and $\muv_k$ that minimizes the $E$. For this goal, 1. we first choose an initial $\muv_k$ randomly,2. minimize $E$ w.r.t. the $\delta_{ik}$.3. Now, fixing the $\delta_{ik}$, update $\muv_k$ that minimizes $E$. 4. Repeating 2 and 3, we obtain the $K$ clusters after convergence.
###Code
from sklearn.metrics import pairwise_distances_argmin
# K-means: Sketch.
def kmeans(X, K=2, maxiter=100):
N = X.shape[0]
# select initial centers
cidx = np.random.choice(N, K, replace=False)
centers = X[cidx, :]
E = []
classes = []
# repeat until convergence or up to maxiter
for step in range(maxiter):
#### TODO: finish this for-loop for k-means!
# Assignment: find clusters that minimizes E with current centers
classes = pairwise_distances_argmin(X, centers)
new_centers = np.array([X[classes == i].mean(0) for i in range(K)])
# print(new_centers)
# compute E and check convergence
E.append(np.linalg.norm(new_centers - centers))
if np.all(centers == new_centers):
break
# Update: update cluster centers
centers = new_centers
return centers, classes, E
###Output
_____no_output_____
###Markdown
Hint and Sample Data PracticeHere follows some hints to fill in the blanks.
###Code
X = np.random.rand(100,2)
X
c = X[[1,4], :]
c
C = c[:, np.newaxis, :]
C
C.shape
X - C
(X - C)**2
np.sum((X - C)**2, axis=2)
np.argmin(np.sum((X - C)**2, axis=2), axis=0)
kmeans(X)
mus, classes, errs = kmeans(X)
plt.plot(errs)
plt.plot(X[classes==0, 0], X[classes==0, 1], 'or')
plt.plot(X[classes==1, 0], X[classes==1, 1], 'ob')
plt.plot(mus[:, 0], mus[:, 1], 'x')
xs, ys = np.meshgrid(np.linspace(0, 1, 500), np.linspace(0, 1, 500))
Xb = np.vstack((xs.flat, ys.flat)).T
# find classes from the mus
edists = np.sum((Xb - mus[:,np.newaxis,:])**2, axis=2)
cs = np.argmin(edists, axis=0)
# plot the boundary
plt.clf()
plt.contour(xs, ys, cs.reshape(xs.shape), cmap=plt.cm.bone)
plt.plot(X[classes==0, 0], X[classes==0, 1], 'or')
plt.plot(X[classes==1, 0], X[classes==1, 1], 'ob')
plt.plot(mus[:, 0], mus[:, 1], 'x')
###Output
_____no_output_____
###Markdown
Gaussian MixturePreviously we learned K-means clustering algorithm with linear border for each classes. We observed that near the border, there are vague assignment. How confident are you on the assignments, especially near the border in K-means? With probabilistic model, we can softly assign clusters with a certain probability. For this, we can assume each cluster is Gaussian distribution:$$p(\xv_k) \sim N(\xv_k \mid \muv_k, \Sigmav_k).$$For the entire data $\xv \in \{\xv_1, \cdots, \xv_K \}$, the Gaussian mixture distribution can be written as $$p(\xv) = \sum_{k=1}^K \pi_k N(\xv \mid \muv_k, \Sigmav_k).$$Here, we assume the latent indicator variable $\zv$, which satisfys $z_k \in \{0, 1\}$ and $\sum_k z_k = 1$. In the above Gaussian mixture model, $\pi_k$ is called as a *mixing coefficient*, which is the marginal distribution over $\zv$, $$p(z_k = 1) = \pi_k,$$such that $$0 \le \pi_k \le 1, $$and $$\sum_{k=1}^{K} \pi_k = 1.$$Since $\zv$ is a indicator variable, we can write$$p(\zv) = \prod_{k=1}^K \pi_k^{z_k}.$$From our first assumption of Gaussian for each class, $$p(\xv_k) = p(\xv \mid z_k = 1) = N(\xv \mid \muv_k, \Sigmav_k).$$Rewriting this with a vector notation,$$p(\xv \mid \zv) = \prod_{k=1}^K N(\xv \mid \muv_k, \Sigmav_k)^{z_k}.$$Marginalizing the joint distribution $p(\xv, \zv)$ over $\zv$, $$p(\xv) = \sum_{\zv} p(\zv) p(\xv \mid \zv) = \sum_{k=1}^K \pi_k N(\xv \mid \muv_k, \Sigmav_k). $$Remember that in logistic regression and discriminant analysis models, we are interested in the posterior probability$p(T=k \mid \xv)$. Similarly, we are interested in the probability of the classification from the observation $\xv$, thus,$$\begin{align*}p(z_k = 1 \mid \xv) &= \frac{p(\xv \mid z_k = 1) p(z_k= 1)}{p(\xv)} \\ \\ &= \frac{ \pi_kN(\xv \mid \muv_k, \Sigmav_k)}{\sum_{l=1}^K \pi_l N(\xv \mid \muv_l, \Sigmav_l)}.\end{align*}$$For concise notation, let us define $\kappa(z_k) \equiv p(z_k = 1 \mid \xv)$. LikelihoodNow, consider the batch data input $\Xm$ with $N$ data samples and $D$ dimensional input for each. Here, the latent variables are now in matrix $\Zm$ for $N$ samples and $K$ classes. From the assumption of i.i.d, we can write the joint distribution:$$\begin{align*}p(\Xm, \zv \mid \pi, \muv, \Sigmav) &= \prod_{n=1}^N p(\xv_n, \zv_n) \\ &= \prod_{n=1}^N p(\xv_n \mid \zv_n) p(\zv_n) \\ &= \prod_{n=1}^N \prod_{k=1}^{K} \Big[ \pi_k N(\xv_n \mid \muv_k, \Sigmav_k)\Big]^{z_k} .\end{align*}$$From the marginal distribution and the assumption of i.i.d, we can write the likelihood function:$$\begin{align*}p(\Xm \mid \pi, \muv, \Sigmav) &= \sum_\zv p(\Xm, \zv \mid \pi, \muv, \Sigmav)\\ &= \prod_{n=1}^N \sum_\zv p(\xv_n \mid \zv_n) p(\zv_n) \\ &= \prod_{n=1}^N \sum_\zv \prod_{k=1}^{K} \Big[ \pi_k N(\xv_n \mid \muv_k, \Sigmav_k)\Big]^{z_k} .\end{align*}$$Applying the logarihtm, the log-likelihood is $$LL = \ln p(\Xm \mid \pi, \muv, \Sigmav) = \sum_{n=1}^N \ln \Big[ \sum_{k=1}^K\pi_k N(\xv_n \mid \muv_k, \Sigmav_k) \Big]. $$ The parameters: $\muv, \Sigma, \pi$Now, let us find the maximum of the log-likelihood w.r.t $\muv_k$, $\Sigmav_k$, and $\pi_k$. Before this, let's review the derivation of multivariate Gaussian distribution. Derivation of Gaussian - w.r.t. $\muv$,$$\begin{align*}\frac{d}{d\muv} N(\xv; \muv, \Sigmav) &= \frac{d}{d\muv} \Bigg[ \frac{1}{ (2\pi)^{\frac{d}{2}} \vert \Sigmav \vert^{\frac{1}{2}}} e^{ - \frac{1}{2} (\xv - \muv)^\top \Sigmav^{-1} (\xv - \muv) } \Bigg] \\\\ &= \Bigg[ \frac{1}{ (2\pi)^{\frac{d}{2}} \vert \Sigmav \vert^{\frac{1}{2}}} e^{ - \frac{1}{2} (\xv - \muv)^\top \Sigmav^{-1} (\xv - \muv) } \Bigg] \frac{d}{d\muv}\Bigg( - \frac{1}{2} (\xv - \muv)^\top \Sigmav^{-1} (\xv - \muv) \Bigg) \\ \\ &= \Bigg[ \frac{1}{ (2\pi)^{\frac{d}{2}} \vert \Sigmav \vert^{\frac{1}{2}}} e^{ - \frac{1}{2} (\xv - \muv)^\top \Sigmav^{-1} (\xv - \muv) } \Bigg] \frac{d}{d\muv}\Bigg( - \frac{1}{2} [ \xv^\top \Sigmav^{-1} \xv - 2 \muv^\top \Sigmav^{-1} \xv + \muv^\top \Sigmav^{-1} \muv ] \Bigg) \\ \\ &= \Bigg[ \frac{1}{ (2\pi)^{\frac{d}{2}} \vert \Sigmav \vert^{\frac{1}{2}}} e^{ - \frac{1}{2} (\xv - \muv)^\top \Sigmav^{-1} (\xv - \muv) } \Bigg] \Bigg( \Sigmav^{-1} \xv - \Sigmav^{-1} \muv \Bigg) \\ \\ &= N(\xv; \muv, \Sigmav) \Sigmav^{-1} (\xv - \muv) \end{align*}$$ - w.r.t. $\Sigmav$, $$\begin{align*}\frac{d}{d\Sigmav} N(\xv; \muv, \Sigmav) &= \frac{d}{d\Sigmav} \Bigg[ (2\pi)^{-\frac{d}{2}} \vert \Sigmav \vert^{-\frac{1}{2}} e^{ - \frac{1}{2} (\xv - \muv)^\top \Sigmav^{-1} (\xv - \muv) } \Bigg] \\\\&= (2\pi)^{-\frac{d}{2}} \frac{d}{d\Sigmav} \Bigg[ \vert \Sigmav \vert^{-\frac{1}{2}} e^{ - \frac{1}{2} (\xv - \muv)^\top \Sigmav^{-1} (\xv - \muv) } \Bigg] \\\\&= (2\pi)^{-\frac{d}{2}} \Bigg[ \frac{d \vert \Sigmav \vert^{-\frac{1}{2}}}{d\Sigmav} e^{ - \frac{1}{2} (\xv - \muv)^\top \Sigmav^{-1} (\xv - \muv) } + \vert \Sigmav \vert^{-\frac{1}{2}} \frac{d e^{ - \frac{1}{2} (\xv - \muv)^\top \Sigmav^{-1} (\xv - \muv) } }{d\Sigmav}\Bigg] \\\\&= (2\pi)^{-\frac{d}{2}} \Bigg[ -\frac{1}{2} \vert \Sigmav \vert^{-\frac{3}{2}} \frac{d \vert \Sigmav \vert }{d\Sigmav} e^{ - \frac{1}{2} (\xv - \muv)^\top \Sigmav^{-1} (\xv - \muv) } + \vert \Sigmav \vert^{-\frac{1}{2}} e^{ - \frac{1}{2} (\xv - \muv)^\top \Sigmav^{-1} (\xv - \muv) } \frac{d \big( - \frac{1}{2} (\xv - \muv)^\top \Sigmav^{-1} (\xv - \muv) \big)}{d\Sigmav} \Bigg] \\\\&= (2\pi)^{-\frac{d}{2}} \Bigg[ -\frac{1}{2} \vert \Sigmav \vert^{-\frac{3}{2}} \vert \Sigmav \vert \Sigmav^{-1} e^{ - \frac{1}{2} (\xv - \muv)^\top \Sigmav^{-1} (\xv - \muv) } -\frac{1}{2} \vert \Sigmav \vert^{-\frac{1}{2}} e^{ - \frac{1}{2} (\xv - \muv)^\top \Sigmav^{-1} (\xv - \muv) } (- \Sigmav^{-1} (\xv - \muv) (\xv - \muv)^\top \Sigmav^{-1} ) \Bigg] \\\\&= -\frac{1}{2} (2\pi)^{-\frac{d}{2}} \Bigg[ \vert \Sigmav \vert^{-\frac{1}{2}} \Sigmav^{-1} e^{ - \frac{1}{2} (\xv - \muv)^\top \Sigmav^{-1} (\xv - \muv) } - \vert \Sigmav \vert^{-\frac{1}{2}} e^{ - \frac{1}{2} (\xv - \muv)^\top \Sigmav^{-1} (\xv - \muv) } \Sigmav^{-1} (\xv - \muv) (\xv - \muv)^\top \Sigmav^{-1} \Bigg] \\\\&= -\frac{1}{2} (2\pi)^{-\frac{d}{2}} \vert \Sigmav \vert^{-\frac{1}{2}} e^{ - \frac{1}{2} (\xv - \muv)^\top \Sigmav^{-1} (\xv - \muv) } \Sigmav^{-1} \Bigg[ \I - (\xv - \muv) (\xv - \muv)^\top \Sigmav^{-1} \Bigg] \\\\&= -\frac{1}{2} N(\xv; \muv, \Sigmav) \Sigmav^{-1} \Bigg[ \I - (\xv - \muv) (\xv - \muv)^\top \Sigmav^{-1} \Bigg]\end{align*}$$ Now, back to the derivation of the log-likelihood: $$\begin{align*}\frac{\partial LL}{\partial \muv_k} &= \sum_{n=1}^N \frac{1}{\sum_{l=1}^K\pi_l N(\xv_n \mid \muv_l, \Sigmav_l)} \nabla_{\muv_k} \Big[ \sum_{k=1}^K\pi_k N(\xv_n \mid \muv_k, \Sigmav_k) \Big] \\\\ &= \sum_{n=1}^N \frac{1}{\sum_{l=1}^K\pi_l N(\xv_n \mid \muv_l, \Sigmav_l)} \nabla_{\muv_k} \pi_k N(\xv_n \mid \muv_k, \Sigmav_k) \\ \\ &= \sum_{n=1}^N \frac{1}{\sum_{l=1}^K\pi_l N(\xv_n \mid \muv_l, \Sigmav_l)} \pi_kN(\xv_n; \muv_k, \Sigmav_k) \Sigmav_k^{-1} (\xv_n - \muv_k) \\ &= \sum_{n=1}^N \frac{\pi_k N(\xv_n; \muv_k, \Sigmav_k)}{\sum_{l=1}^K\pi_l N(\xv_n \mid \muv_l, \Sigmav_l)} \Sigmav_k^{-1} (\xv_n - \muv_k) \\ \\ &= \sum_{n=1}^N \kappa(z_k) \Sigmav_k^{-1} (\xv_n - \muv_k)\end{align*} $$Setting this to zero, $$\sum_{n=1}^N \kappa(z_k) \Sigmav_k^{-1} (\xv_n - \muv_k) = 0 \\ \sum_{n=1}^N \kappa(z_k) (\xv_n - \muv_k) = 0 \\\sum_{n=1}^N \kappa(z_k) \xv_n = \sum_{n=1}^N \kappa(z_k) \muv_k \\\sum_{n=1}^N \kappa(z_k) \xv_n = N_k \muv_k \\\\\muv_k = \frac{1}{N_k} \sum_{n=1}^N \kappa(z_k) \xv_n,$$where the number of samples for class $k$, $N_k = \sum_{n=1}^{N} \kappa(z_k)$.Now, w.r.t $\Sigmav_k$, $$\begin{align*}\frac{\partial LL}{\partial \Sigmav_k} &= \sum_{n=1}^N \frac{1}{\sum_{l=1}^K\pi_l N(\xv_n \mid \muv_l, \Sigmav_l)} \nabla_{\Sigmav_k} \Big[ \sum_{k=1}^K\pi_k N(\xv_n \mid \muv_k, \Sigmav_k) \Big] \\\\ &= \sum_{n=1}^N \frac{1}{\sum_{l=1}^K\pi_l N(\xv_n \mid \muv_l, \Sigmav_l)} \nabla_{\Sigmav_k} \pi_k N(\xv_n \mid \muv_k, \Sigmav_k) \\ \\ &= \sum_{n=1}^N \frac{1}{\sum_{l=1}^K\pi_l N(\xv_n \mid \muv_l, \Sigmav_l)} \pi_kN(\xv_n; \muv_k, \Sigmav_k) \Sigmav_k^{-1} (\I - (\xv_n - \muv_k)(\xv_n - \muv_k)^\top \Sigmav_k^{-1} ) \\ \\ &= \sum_{n=1}^N \frac{\pi_k N(\xv_n; \muv_k, \Sigmav_k)}{\sum_{l=1}^K\pi_l N(\xv_n \mid \muv_l, \Sigmav_l)} \Sigmav_k^{-1} (\I - (\xv_n - \muv_k)(\xv_n - \muv_k)^\top \Sigmav_k^{-1} ) \\ \\ &= \sum_{n=1}^N \kappa(z_k) \Sigmav_k^{-1} (\I - (\xv_n - \muv_k)(\xv_n - \muv_k)^\top \Sigmav_k^{-1} )\end{align*} $$Setting the last one to zero, and multiplying $\Sigma$ on both side of equal, $$\sum_{n=1}^N \kappa(z_k) (\I - (\xv_n - \muv_k)(\xv_n - \muv_k)^\top \Sigmav_k^{-1} ) = 0 \\\sum_{n=1}^N \kappa(z_k) \I = \sum_{n=1}^N \kappa(z_k) (\xv_n - \muv_k)(\xv_n - \muv_k)^\top \Sigmav_k^{-1} \\N_k \Sigmav_k = \sum_{n=1}^N \kappa(z_k) (\xv_n - \muv_k)(\xv_n - \muv_k)^\top \\\Sigmav_k = \frac{1}{N_k} \sum_{n=1}^N \kappa(z_k) (\xv_n - \muv_k)(\xv_n - \muv_k)^\top$$w.r.t $\pi_k$, $$\begin{align*}\frac{\partial LL}{\partial \pi_k} &= \sum_{n=1}^N \frac{1}{\sum_{l=1}^K\pi_l N(\xv_n \mid \muv_l, \Sigmav_l)} \nabla_{\pi_k} \Big[ \sum_{k=1}^K\pi_k N(\xv_n \mid \muv_k, \Sigmav_k) \Big] \\\\ &= \sum_{n=1}^N \frac{1}{\sum_{l=1}^K\pi_l N(\xv_n \mid \muv_l, \Sigmav_l)} \nabla_{\pi_k} \pi_k N(\xv_n \mid \muv_k, \Sigmav_k) \\ \\ &= \frac{1}{\pi_k} \sum_{n=1}^N \frac{\pi_k N(\xv_n; \muv_k, \Sigmav_k) }{\sum_{l=1}^K\pi_l N(\xv_n \mid \muv_l, \Sigmav_l)} \\ \\ &= \frac{1}{\pi_k} \sum_{n=1}^N \kappa(z_k) \end{align*} $$Setting this to zero does not give any info but zero. What did we do wrong? Here, let us consider the constraint $\sum_k \pi_k = 1$. Adding this constraint with a Lagrange multiplier, $$LL + \lambda (\sum_k \pi_k - 1) = \sum_{n=1}^N \ln \Big[ \sum_{k=1}^K\pi_k N(\xv_n \mid \muv_k, \Sigmav_k) \Big] + \lambda (\sum_k \pi_k - 1)$$Computing the derivative and setting it to zero, $$\frac{1}{\pi_k} \sum_{n=1}^N \kappa(z_k) + \lambda = 0 \\\pi_k = -\frac{N_k}{\lambda} $$From the constraint, $\sum_k \pi_k = 1$, we can see that $$\sum_k \pi_k = - \sum_k \frac{N_k}{\lambda} = -\frac{1}{\lambda} \sum_k N_k = -\frac{N}{\lambda} = 1 \\\\\Rightarrow \lambda = -N$$Thus, $$\pi_k = \frac{N_k}{N} $$Collecting all the updates, - $\muv_k = \frac{1}{N_k} \sum_{n=1}^N \kappa(z_k) \xv_n$- $\Sigmav_k = \frac{1}{N_k} \sum_{n=1}^N \kappa(z_k) (\xv_n - \muv_k)(\xv_n - \muv_k)^\top$- $\pi_k = \frac{N_k}{N} $ Algorithm- Initialize $\muv_k$, $\Sigmav_k$, and $\pi_k$. - **Expection Step**: evaluate the responsibilities $\kappa(z_{nk})$, $$\kappa(z_{nk}) = \frac{ \pi_kN(\xv \mid \muv_k, \Sigmav_k)}{\sum_{l=1}^K \pi_l N(\xv \mid \muv_l, \Sigmav_l)}. $$- **Maximization Step**: re-estimate $\muv_k$, $\Sigmav_k$, and $\pi_k$, - $\muv_k = \frac{1}{N_k} \sum_{n=1}^N \kappa(z_k) \xv_n$, - $\Sigmav_k = \frac{1}{N_k} \sum_{n=1}^N \kappa(z_k) (\xv_n - \muv_k)(\xv_n - \muv_k)^\top$, - $\pi_k = \frac{N_k}{N} $, with - $N_k = \sum_{n=1}^{N} \kappa(z_k)$. - Compute the log-likelihood and check convergence: $$LL = \ln p(\Xm \mid \pi, \muv, \Sigmav) = \sum_{n=1}^N \ln \Big[ \sum_{k=1}^K\pi_k N(\xv_n \mid \muv_k, \Sigmav_k) \Big]. $$- If not converged, repeat EM steps. Normal PDFBefore starting to write the EM algorithm for GMM, let us review our multivariate normal pdf!
###Code
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# multinomial pdf
def normal_dist(X, mu, Sigma):
""" multinomial distribution probability density function
parameters
-----------
X ndarray (N x D)
input data
mu ndarray (D x 1)
mean vector
Sigma ndarray (D x D)
covariance matrix
return
------
@pdf_evals ndarray (N x 1)
"""
N, D = X.shape
SigmaDet = Sigma if D == 1 else np.linalg.det(Sigma)
try:
SigmaInv = 1. / Sigma if D == 1 else np.linalg.inv(Sigma)
except LinAlgError:
raise np.linalg.LinAlgError('normalD: failed to compute inverse of Sigma')
scale = 1. / np.sqrt((2 * np.pi)**D * SigmaDet)
shiftedX = X - mu
return scale * np.exp(-0.5 * np.sum(shiftedX @ SigmaInv * shiftedX, axis=1, keepdims=True))
mu = np.array([1, 1])
cov = np.eye(2)
X = np.random.multivariate_normal(mu, cov, 10)
normal_dist(X, mu, cov)
###Output
_____no_output_____
###Markdown
PDF for Mixture ModelNow, let's consider multiple means for mixture model. For this, we review the Python *map* function.
###Code
a = np.arange(5)
b = np.arange(5) + 10
list(map(list, zip(a,b)))
#list(map(lambda a,b: a+b, zip(a,b)))
for i,j in zip(a,b):
print(i, j)
list(map(lambda t: t[0]+t[1], zip(a,b)))
""" Revised multinomial PDF function for mixture models
"""
from numpy.linalg import LinAlgError
# multinomial pdf
def mixnorm_dist(X, mus, Sigmas):
""" multinomial distribution probability density function
parameters
-----------
X ndarray (N x D)
input data
mu list of ndarray (D x 1)
mean vectors
Sigma list of ndarray (D x D)
covariance matrices
return
------
@pdf_evals ndarray (N x K) for K = len(mus)
"""
N, D = X.shape
SigmaDets = np.fromiter(map(lambda S: S if D == 1 else np.linalg.det(S), Sigmas), dtype=np.float)
try:
SigmaInvs = np.array(list(map(lambda S: 1. / S if D == 1 else np.linalg.inv(S), Sigmas)))
except LinAlgError:
raise LinAlgError('normalD: failed to compute inverse of Sigma')
scale = 1. / np.sqrt((2 * np.pi)**D * SigmaDets)
shiftedX = X - np.asarray(mus)[:, np.newaxis, :]
#quad = np.array(list(map(lambda V: np.sum(V[0] @ V[1] * V[1], axis=1, keepdims=True), zip(shiftedX, SigmaInvs))))
quad = np.array(list(map(lambda V: np.sum(V[0] @ V[1] * V[0], axis=1, keepdims=True), zip(shiftedX, SigmaInvs))))
quad = np.hstack(quad)
return scale * np.exp(-0.5 * quad)
mu1 = np.array([1, 1])
mu2 = np.array([3, 2])
cov = np.eye(2)
X = np.vstack((np.random.multivariate_normal(mu1, cov, 10),
np.random.multivariate_normal(mu2, cov, 10)))
mixnorm_dist(X, [mu1, mu2], [cov, cov])
#X[:, :] - np.asarray([mu1, mu2])[:, np.newaxis, :]
# compare with the normalD
print(normal_dist(X, mu1, cov))
print(normal_dist(X, mu2, cov))
probs = mixnorm_dist(X, [mu1, mu2], [cov, cov])
kappa = probs / np.sum(probs, axis=1, keepdims=True)
kappa
classes = np.argmax(kappa, axis=1)
classes
mean1 = X[classes==0].mean(axis=0)
mean1
cov1 = np.cov(X[classes==0].T)
cov1
np.bincount(classes)
np.bincount([0,1,1,2,4,4,4])
def GMM(X, K=2, maxiter=100):
N, D = X.shape
# select initial centers - randomly for now, but YOU CAN USE K-MEANS for initial assignment
cidx = np.random.choice(N, K, replace=False)
mus = X[cidx, :]
Sigmas = [np.eye(D) for k in range(K)] # uni-variance
pi = np.array([1/K] * K)
LL = [] # LogLikelihood log - expected to monotonically increasing
b = []
# repeat until convergence or up to maxiter
for step in range(maxiter):
# Expectation
probs = mixnorm_dist(X, [mu1, mu2], [cov, cov]) # TODO- finish this
kappa = probs / np.sum(probs, axis=1, keepdims=True) # TODO- finish this
classes = np.argmax(kappa, axis=1)
# Maximization
Nk = np.bincount(classes)
for i in range(K):
m_i = np.sum(self.X[:,c], axis=0)
mus[i, :] = (1/m_i)*np.sum(self.X*r_ic[:,c].reshape(len(self.X),1),axis=0) # TODO- finish this
Sigmas[i][...] = np.dot((b[j].reshape(len(X),1) * (X - means[j])).T, (X - means[j])) / (np.sum(b[j])+eps) # TODO- finish this
pi = # TODO- finish this
ll = np.sum(np.log(np.sum(probs * pi, axis=1)))
# convergence check: let us run w/o this for now.
#if len(LL) > 0 and np.abs(ll - LL[-1]) < np.finfo(float).eps:
# break
LL.append(ll) # sum all the errors
return kappa, mus, Sigmas, pi, LL
###Output
_____no_output_____
###Markdown
Let us bring back our old example.
###Code
# simulated samples
mu1 = [-1, -1]
cov1 = np.eye(2)
mu2 = [2,3]
cov2 = np.eye(2) * 3
C1 = np.random.multivariate_normal(mu1, cov1, 50)
C2 = np.random.multivariate_normal(mu2, cov2, 50)
plt.plot(C1[:, 0], C1[:, 1], 'or')
plt.plot(C2[:, 0], C2[:, 1], 'xb')
plt.xlim([-3, 6])
plt.ylim([-3, 7])
from matplotlib.colors import BASE_COLORS
COLORS = list(BASE_COLORS.keys())[:-1]
# scatter plots for K classes
def kscatter_plot(X, classes):
K = len(np.unique(classes))
Cs = [X[classes == k] for k in range(K)]
csm = [''.join([c,'.']) for c in COLORS]
mum = [''.join([c,'x']) for c in COLORS[::-1]]
for k in range(K):
plt.plot(Cs[k][:,0], Cs[k][:, 1], csm[k])
plt.plot(mus[k][0], mus[k][1], mum[k], markersize=10)
# applying gmm on this data
X = np.vstack((C1, C2))
K = 2
ks, mus, sigmas, pi, ll = GMM(X, 2, 1000)
classes = np.argmax(ks, axis=1)
print(classes)
print("----means-------------")
print(mus)
for k in range(K):
print("----Class k: Cov-Mat----")
print(sigmas[k])
plt.plot(classes)
plt.ylabel('classes')
plt.xlabel('samples')
plt.figure()
kscatter_plot(X, classes)
# Cs = [X[classes == k] for k in range(K)]
# csm = ['m.', 'c.']
# mum = ['rx', 'bx']
# for k in range(K):
# plt.plot(Cs[k][:,0], Cs[k][:, 1], csm[k])
# plt.plot(mus[k][0], mus[k][1], mum[k])
plt.figure()
plt.plot(ll)
plt.ylabel("log-likilihood")
###Output
[1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 0 0 0 0 0 0 0 0 1 1 1 0 0 1 0 1 0 1 0 1 1 1
0 1 0 0 0 1 0 0 1 0 1 1 1 1 0 0 0 0 0 0 0 1 0 0 1 1]
----means-------------
[[ 2.06069238 4.17384165]
[-0.28087196 -0.40858763]]
----Class k: Cov-Mat----
[[ 2.58347819 0.09917755]
[ 0.09917755 1.18408964]]
----Class k: Cov-Mat----
[[ 2.25513196 1.1274902 ]
[ 1.1274902 2.16567898]]
###Markdown
K-means vs GMM with data with different variances
###Code
mu1 = [-2, 2]
cov1 = np.eye(2)
mu2 = [2,2]
cov2 = np.eye(2)
mu3 = [0,-8]
cov3 = np.eye(2) * 5
C1 = np.random.multivariate_normal(mu1, cov1, 50)
C2 = np.random.multivariate_normal(mu2, cov2, 50)
C3 = np.random.multivariate_normal(mu3, cov3, 50)
plt.plot(C1[:, 0], C1[:, 1], 'or')
plt.plot(C2[:, 0], C2[:, 1], 'xy')
plt.plot(C3[:, 0], C3[:, 1], '^b')
Xvs = np.vstack((C1, C2, C3))
mus, classes, errs = kmeans(Xvs, K=3)
kscatter_plot(Xvs, classes)
ks, mus, sigmas, pi, ll = GMM(Xvs, 3, 1000)
classes = np.argmax(ks, axis=1)
kscatter_plot(Xvs, classes)
###Output
_____no_output_____
###Markdown
How to choose the initial centers for GMM?
###Code
def GMM(X, K=2, maxiter=100, init=None):
""" Gaussian Mixture Model
Parameters
----------
X ndarray (N x D)
input data to cluster
K int
the number of clusters
maxiter int
the maximum number of iteration
init string
kmeans init or random
Returns
-------
@kappa ndarray (N x K)
responsibilities for each class
@mus ndarray (K x D)
centers for clusters
@Sigmas list of ndarray (D x D)
list of covariance matrices
@pi vector (K,)
mixing coefficient
@LL list
log-likelihood log
"""
N, D = X.shape
if init is None:
# select initial centers - randomly for now, but YOU CAN USE K-MEANS for initial assignment
cidx = np.random.choice(N, K, replace=False)
mus = X[cidx, :]
Sigmas = [np.eye(D) for k in range(K)] # uni-variance
pi = np.array([1/K] * K)
else:
# init with kmeans
mus, classes, errs = kmeans(Xvs, K=3)
Nk = np.bincount(classes)
pi = Nk / N
mus = np.asarray(mus)
Sigmas = [np.cov(X[classes==k].T) for k in range(K)]
LL = [] # LogLikelihood log - expected to monotonically increasing
# TODO: Finish GMM with your previous codes
return kappa, mus, Sigmas, pi, LL
ks, mus, sigmas, pi, ll = GMM(Xvs, 3, 1000, init='kmeans')
classes = np.argmax(ks, axis=1)
kscatter_plot(Xvs, classes)
###Output
_____no_output_____
###Markdown
Dimensionality Reduction Principal Component Analysis (PCA)Principal Component Analysis (PCA) is one of the dimensionality reduction tools. PCA is also used for feature extraction, data compression, and data visualization. It is also known as Karhunen-Loeve (KL) transform.PCA reduces the dimension of the data by finding the data samples that vary the most. For this reason, we first look at the variance of the data. $$var(\zv) = \frac{1}{N} \sum_{n=1}^{N} \zv_n^2 $$From the variance, PCA finds the orthogonal projection of the data onto the *principal subspace*, which is a lower dimensional linear space. Now, let us look for a direction $\vv$ that maximazes the variances.Here, $\vv$ is a unit vector, so the dot product represents a projection onto it. The projection of data $\xv_n$ onto $\vv$ is $$\zv = \xv_n^\top \vv.$$The variance of this projected data $\zv$ is $$\begin{align*}var(\zv) &= \frac{1}{N} \sum_{n=1}^{N} \vv^\top \xv_n \xv_n^\top \vv \\ &= \vv^\top \Big( \frac{1}{N} \sum_{n=1}^{N} \xv_n \xv_n^\top \Big) \vv \\ &= \vv^\top \Sigmav \vv\end{align*}$$where $\Sigmav$ is a covariance matrix of $\xv_n$. Optimization Problem$$\begin{equation*}\begin{aligned}& \underset{\vv}{\text{maximize}}& & \vv^\top \Sigmav \vv \\& \text{subject to}& & \vv^\top \vv = 1.\end{aligned}\end{equation*}$$Using a Lagrange multiplier that is denoted by $\lambda$, we can make an unconstrained maximization of$$q(\vv) = \vv^\top \Sigmav \vv + \lambda ( 1 - \vv^\top \vv). $$As usual, setting the derivative w.r.t. $\vv$, we can get$$0 = 2 \Sigmav \vv - 2 \lambda \vv, $$$$\lambda \vv = \Sigmav \vv.$$We can see that the direction vector $\vv$ is an eigenvector!Also, since $\vv$ is a unit vector, we can apply $\vv^\top \vv = 1$ by left-multiplying $\vv^\top$,$$\lambda = \vv^\top \Sigmav \vv.$$So, we can obtain the maximum variance when $\vv$ is the eigenvector having the largest eigenvalue $\lambda$. This eigenvector is called as the *first principal component*. Other principal components, or other directions that are orthogonal to the first principal component, are found by the eigendecomposition of $\Sigmav$, or the singular value decomposition of data sample matrix $\Xm$ **with zero means**. $$\Xm = \Um \Lambdav^{\frac{1}{2}} \Vm^\top,$$where the $\Lambdav$ is a diagonal matrix with eigenvalue elements. For implementation, we need to keep track of shapes of each matrix. - $\Xm$: N x D- $\Um$: N x D- $\Lambdav$: D x D- $\Vm$: D x D
###Code
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# PCA
def pca(X):
""" principal component analysis
parameter
---------
X ndarray (N x D)
input data
return
------
@U ndarray (N x D)
left singular vectors
@S ndarray (D x D)
diagonal matrix with square root of eigenvalues
@V ndarray (D x D)
right singular vectors
@mu ndarray (D,)
1d vector of column means
"""
### TODO: implement PCA using np.linalg.svd
return U, S, V.T, means
mu1 = np.array([0, 0])
S1 = np.eye(2)
X1 = np.random.multivariate_normal(mu1, S1, 300)
plt.figure(figsize=(6,6))
plt.plot(X1[:, 0], X1[:, 1], 'ob')
plt.xlim([-8, 8])
plt.ylim([-8, 8])
mu2 = np.array([2, 1])
S2 = np.array([[1,2],[0.5,3]])
X2 = np.random.multivariate_normal(mu2, S2, 300)
plt.figure(figsize=(6,6))
plt.plot(X2[:, 0], X2[:, 1], 'ob')
plt.xlim([-8, 8])
plt.ylim([-8, 8])
U, L, V, m1 = pca(X1)
L
U
V
U.shape
COLORS = ['r', 'c', 'y']
def plot_pca(X, V, mu, L):
plt.plot(X[:, 0], X[:, 1], 'ob')
for d in range(V.shape[1]):
l = np.sqrt(L[d])
print(l)
p1 = mu - l * V[:, d]
p2 = mu + l * V[:, d]
plt.plot([p1[0], p2[0]], [p1[1], p2[1]], color=COLORS[d], linewidth=5)
plt.figure(figsize=(6,6))
plot_pca(X1, V, m1, L)
plt.xlim([-8, 8])
plt.ylim([-8, 8])
L
U, L, V, m2 = pca(X2)
plt.figure(figsize=(6,6))
plot_pca(X2, V, m2, L)
plt.xlim([-8, 8])
plt.ylim([-8, 8])
L
V
newX2 = (X2) @ V
plt.plot(newX2[:, 0], newX2[:, 1], 'ob')
plt.xlim([-10, 10])
plt.ylim([-10, 10])
###Output
_____no_output_____
###Markdown
Review Standardization (Input Transformation)
###Code
newX2 = (X2 - m2)
plt.plot(newX2[:, 0], newX2[:, 1], 'ob')
plt.xlim([-10, 10])
plt.ylim([-10, 10])
newX2 = (X2 - m2) @ V
plt.plot(newX2[:, 0], newX2[:, 1], 'ob')
plt.xlim([-10, 10])
plt.ylim([-10, 10])
newX2 = (X2 - m2) @ V
newX2 /= np.std(newX2, 0)
plt.plot(newX2[:, 0], newX2[:, 1], 'ob')
plt.xlim([-10, 10])
plt.ylim([-10, 10])
###Output
_____no_output_____ |
Roberto_Zerbini's_Blog_Dimensionality_Reduction.ipynb | ###Markdown
###Code
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_openml
from sklearn.utils import check_random_state
from sklearn.model_selection import train_test_split
###Output
_____no_output_____
###Markdown
Curse of Dimensionality
###Code
distances = []
dimensions = range(1,10000,250)
for i in dimensions:
n_dimensions = i
point1 = np.random.rand(n_dimensions)
point2 = np.random.rand(n_dimensions)
distances.append(np.linalg.norm(point1-point2))
fig, ax = plt.subplots()
ax.plot(dimensions, distances, "b-", label = 'Euclidean Distance')
ax.set_xlabel("Dimensions")
ax.set_ylabel("Distance")
plt.legend(loc="upper left", fontsize=8)
plt.title('Curse of Dimensionality')
plt.show()
###Output
_____no_output_____
###Markdown
PCA
###Code
z = np.linspace(0, 3*np.pi)
x = np.cos(z - np.pi/2)
y = np.sin(z - np.pi/2)
X = np.array(list(zip(x,y,z)))
fig, ax = plt.subplots(subplot_kw=dict(projection='3d'))
ax.scatter3D(x, y, z)
plt.show()
X_centered = X - X.mean(axis=0)
U, s, Vt = np.linalg.svd(X_centered)
c1 = Vt.T[:, 0]
c2 = Vt.T[:, 1]
# Draw plane
xx, yy = np.meshgrid(np.arange(np.min(X_centered[:,0]), np.max(X_centered[:,0]), .01), np.arange(np.min(X_centered[:,1]), np.max(X_centered[:,1]), .01))
z1 = (-c1[0] * xx - c2[1] * yy ) * 1. / c1[2]
z2 = (-c2[0] * xx - c2[1] * yy ) * 1. / c2[2]
# plot the surface
plt3d = plt.figure().gca(projection='3d')
plt3d.plot_surface(xx, yy, z1, alpha=0.09)
plt3d.plot_surface(xx, yy, z2, alpha=0.09)
plt3d.scatter(*(X_centered.T), c = X_centered[:,0], cmap='inferno', label = 'Projection')
plt.show()
W2 = Vt.T[:, :2]
X2D = X_centered.dot(W2)
fig, ax = plt.subplots()
plt.title('2D projection')
ax.scatter(X2D[:,0],X2D[:,1], c = X2D[:,1], cmap='inferno', label = 'Projection')
ax.set_xlabel("X")
ax.set_ylabel("Y")
plt.show()
###Output
_____no_output_____
###Markdown
Compression
###Code
# Load data from https://www.openml.org/d/554
X, y = fetch_openml('mnist_784', version=1, return_X_y=True)
X = X / 255.
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=10000, random_state = 123)
n_rows = 4
n_cols = 10
plt.figure(figsize=(n_cols * 1.2, n_rows * 1.2))
random_state = check_random_state(0)
permutation = random_state.permutation(X_train.shape[0])
X_train = X_train[permutation]
y_train = y_train[permutation]
for row in range(n_rows):
for col in range(n_cols):
index = n_cols * row + col
plt.subplot(n_rows, n_cols, index + 1)
plt.imshow(X_train[index].reshape(28,28), cmap="binary", interpolation="nearest")
plt.axis('off')
plt.title(y_train[index], fontsize=12)
plt.subplots_adjust(wspace=0.2, hspace=0.5)
plt.show()
import timeit
from sklearn.linear_model import Perceptron
clf = Perceptron(tol=1e-3, verbose = 0, random_state=123)
clf.fit(X, y)
%%timeit
clf.fit(X, y)
clf.score(X, y)
from sklearn.decomposition import PCA
pca = PCA(n_components=784)
pca.fit(X)
print(pca.explained_variance_ratio_)
from sklearn.decomposition import PCA
pca = PCA(n_components=.95)
pca.fit(X)
X_compressed = pca.transform(X)
X_compressed.shape
clf = Perceptron(tol=1e-3, verbose = 0, random_state=123)
clf.fit(X_compressed, y)
%%timeit
clf.fit(X_compressed, y)
clf.score(X_compressed, y)
###Output
_____no_output_____
###Markdown
Visualization
###Code
from sklearn.manifold import TSNE
X_compressed = PCA(n_components=30).fit_transform(X[:10000])
X2D = TSNE(n_components=2, perplexity=40, n_iter=1000).fit_transform(X_compressed)
fig, ax = plt.subplots(figsize=(15,10))
scatter = ax.scatter(X2D[:,0],X2D[:,1], c = np.array(y[:10000], dtype = 'int'), marker='o', cmap = 'tab10', label=np.unique(y[:10000]), s=30)
legend1 = ax.legend(*scatter.legend_elements(),
loc="upper left", title="Classes")
ax.add_artist(legend1)
plt.show()
###Output
_____no_output_____ |
Prace_domowe/Praca_domowa7/Grupa1/PrzybylekPaulina/PracaDomowa7.ipynb | ###Markdown
Praca Domowa 7Autor: Paulina Przybyłek Wczytanie danych i odpowiednich pakietów
###Code
from skimage import io
import matplotlib.pyplot as plt
import numpy as np
from sklearn.decomposition import PCA
import warnings
warnings.filterwarnings('ignore')
###Output
_____no_output_____
###Markdown
Wczytajmy obrazek z kotkiem. Jest to kolorowy obrazek zapisany jako plik `.jpg`. Poniżej możemy zobaczyć jak wygląda rzeczywiście.
###Code
image = io.imread('kotek.jpg') # url nie chciał się wczytać - występował problem
print(image[0:2])
def plot_image(image):
"""
Zwraca obrazek podany jako argument w postaci macierzy.
"""
plt.figure(figsize=(12, 6))
plt.imshow(image)
plt.axis('off')
plt.show()
plot_image(image)
###Output
_____no_output_____
###Markdown
Ciekawy może nie jest, ale walorem jest uroczy i ładny kotek (szczególnie jak ktoś lubi koty ^^). Redukcja wymiarów - PCAPrzyjrzyjmy się wymiarom wczytanego obrazka zanim przejdziemy do redukcji wymiarów.
###Code
image.shape
# musimy zrobić obrazek 2D
image_2D = np.reshape(image, (575, 864*3))
print("Wymiary naszego obrazka: ", image_2D.shape)
###Output
Wymiary naszego obrazka: (575, 2592)
###Markdown
Spróbujmy na takim obrazku wykonać PCA - korzystając z implementacji sklearn. Tylko jak ustawić parametry? Spróbujemy `svd_solver = "randomized"` przy ustawionym ziarnie na $0$ a następnie znajdziemy odpowiednią liczbę komponentów do PCA.
###Code
pca = PCA(svd_solver="randomized", random_state=0).fit(image_2D)
plt.figure(figsize=(16,10))
plt.plot(np.cumsum(pca.explained_variance_ratio_)) # skumulowany procent wyjaśnionej wariancji
plt.xlim(-2,80) # dalej nie wzrasta zbytnio
plt.grid(alpha=0.2)
plt.yticks([0.5, 0.6, 0.7, 0.8, 0.9, 0.92, 0.94, 0.96, 0.98, 1.0])
plt.xlabel('Liczba komponentów użytych do PCA')
plt.ylabel('Skumulowany % wyjaśnionej wariancji')
plt.show()
###Output
_____no_output_____
###Markdown
Dla $80$ komponentów uzyskujemy niemal $99\%$ wariancji. I tak znacznie zmniejszymy wymiary, więc zobaczmy jak to wygląda.
###Code
def image_pca(components):
"""
Funkcja wykonuje PCA na obrazku dla podanej liczby komponentów.
"""
image_pca = PCA(n_components = components, svd_solver="randomized", random_state=0).fit(image_2D)
image_reduced = image_pca.transform(image_2D)
print("Wymiary po redukcji: ", image_reduced.shape)
print("Procent wyjaśnionej wariancji: ", np.sum(image_pca.explained_variance_ratio_))
image_reduced = image_pca.inverse_transform(image_reduced)
return image_reduced
image_reduced = image_pca(80)
# wróćmy do obrazka z 3 kanałami
image_converted = np.reshape(image_reduced, (575,864,3))
plot_image((image_converted).astype(np.uint8))
###Output
_____no_output_____
###Markdown
Mimo dużego procentu wariancji kotek stracił na jakości. Jednak pamiętajmy, że wzięliśmy $80$ kompentów z $2592$, więc zmniejszyliśmy objętość $32,4$. Spróbujmy zrobić to dla większej liczby i zobaczmy jak będzie to wyglądać. Co ważne już teraz poza czterema miejscami obraz odtworzył się idealnie, więc wiele nie brakuje.
###Code
image_reduced = image_pca(360)
image_converted = np.reshape(image_reduced, (575,864,3))
plot_image((image_converted).astype(np.uint8))
###Output
Wymiary po redukcji: (575, 360)
Procent wyjaśnionej wariancji: 0.9999130496142241
|
01_Softmax_with_temperature.ipynb | ###Markdown
Softmax with temperatureThis notebook presents how change in temperature in [softmax](https://en.wikipedia.org/wiki/Softmax_function) function are related to changes in distribution [entropy](https://en.wikipedia.org/wiki/Entropy_(information_theory)).Softmax with temperature is defined as:$$\Large softmax(x_i) = \frac{e^{\frac{x_i}{t}}}{\sum_{j=0}^{K}e^{\frac{x_j}{t}}}$$where $t$ is temperature. Define functions
###Code
def softmax_t(x, t=1):
return np.exp(x / t) / np.sum(np.exp(x / t))
def shannon_entropy(x):
return -np.sum(x * np.log2(x))
###Output
_____no_output_____
###Markdown
Create dataLet's assume a 10-class classification problem. We'll randomly pick 10 values and interpret them as pre-softmax logits.
###Code
logits = np.random.randn(10)
# Sanity check
probas = softmax_t(logits)
logits, probas, probas.sum()
###Output
_____no_output_____
###Markdown
Let's plot our probabilities with low (==.01), default (==1), high (==3) and very high (==100) temperatures.
###Code
plt.figure(figsize = (20, 4))
plt.subplot(1, 4, 1)
plt.bar(np.arange(len(probas)), softmax_t(logits, .01), alpha = .8)
plt.title('Distribution of probabilities\n$temp = .01$')
plt.xlabel('Class')
plt.ylabel('$P(class)$')
plt.subplot(1, 4, 2)
plt.bar(np.arange(len(probas)), probas, alpha = .8)
plt.title('Distribution of probabilities\n$temp = 1$')
plt.xlabel('Class')
plt.ylabel('$P(class)$')
plt.subplot(1, 4, 3)
plt.bar(np.arange(len(probas)), softmax_t(logits, 3), alpha = .8)
plt.title('Distribution of probabilities\n$temp = 3$')
plt.xlabel('Class')
plt.ylabel('$P(class)$')
plt.subplot(1, 4, 4)
plt.bar(np.arange(len(probas)), softmax_t(logits, 100), alpha = .8)
plt.title('Distribution of probabilities\n$temp = 100$')
plt.xlabel('Class')
plt.ylabel('$P(class)$')
plt.show()
###Output
_____no_output_____
###Markdown
We can see that relative differences between probabilities decrease when temperature $t$ increases. For extremely low values of $t$ we get very close to "hard" max function. For very high values of $t$ we're approaching uniform distribution. Looking at the plots above, we expect that entropy of the distribution should increase with temperature $t$ (which probably sounds very intuitive to those of you wiyh background in physics).Let's check it! Compute entropy for different values of $t$
###Code
temps = []
probas = []
entropies = []
for t in np.arange(.01, 100.01, .1):
probas_ = softmax_t(logits, t)
probas.append(probas_)
temps.append(t)
entropies.append(shannon_entropy(probas))
plt.figure(figsize = (5, 4))
plt.scatter(temps, entropies, alpha = .1)
plt.xlabel('Temperature $t$')
plt.ylabel('Entropy (bits)')
plt.title('How entropy changes with $t$')
plt.show()
###Output
_____no_output_____ |
x-archive-temp/ca08-SKL_Regression/LinearRegression.ipynb | ###Markdown
--- Cookbook 3: Linear Regression**Author list:** Alexander Fred Ojala**References / Sources:** * http://nbviewer.jupyter.org/github/jdwittenauer/ipython-notebooks/blob/master/notebooks/ml/ML-Exercise1.ipynb * http://college.cengage.com/mathematics/brase/understandable_statistics/7e/students/datasets/slr/frames/slr05.html (data)**License Agreement:** Feel free to do whatever you want with this code___ *This notebook highlights the basic ML concepts: Simple linear regression, multiple linear regression, and linear predicition.* Linear Regression & Prediction___Most basic predictive method in Machine Learning. The goal is to minimize the sum of the squared errros to fit a straight line to a set of data points.The linear regression model fits a linear function to a set of data points. For simple linear regression that is: $Y = \beta_0 + \beta_1 X$where,* $\beta_0$ is the intercept* $\beta_1$ the slope parameter* $Y$ is the dependent variable (sometimes called "target variable")* $X$ is the independent variable (sometimes called "predictor", "regressor", or "feature")
###Code
#import packages
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
#import data Fires per 10000 housing units and Thefts per 10000 population in Chicago
import os
path = os.getcwd() + '/data2.csv'
df = pd.read_csv(path, header=None, names=['Fires', 'Thefts']) #read in to table and add header
df.head()
data=df/10 #normalize data
df=df/10
df.insert(0, 'Ones', 1) # Insert ones for intercept
df.head()
###Output
_____no_output_____
###Markdown
Simple and Multiple Linear Regression___ Matrix multiplication Linear Regression to obtain the weights$ W = (X^T X)^{-1} X^T Y $$ W_1 = (X^T X)^{-1} $$ W_2 = X^T Y $
###Code
#def lm_weights(df):
n=len(df) #number of rows
m=len(df.iloc[0,:])
df_matrix = df.as_matrix()
nx = df_matrix[:,0:(m-1)]
ny = df_matrix[:,m-1]
ny = ny[:,np.newaxis] # add new axis for matrix multiplication
W1 = np.linalg.inv(np.dot(nx.T,nx)) #Calculate first part of weight
W2 = np.dot(nx.T,ny)
W = np.dot(W1,W2)
weights_df = pd.DataFrame(W,index=['beta0','beta1'])
#print weights_df
#return W
#W = lm_weights(df)
# Plot Results
x = np.linspace(data.Fires.min(), data.Fires.max(), 100)
f = W[0, 0] + (W[1, 0] * x)
fig, ax = plt.subplots(figsize=(12,8))
ax.plot(x, f, 'r', label='Prediction')
ax.scatter(data.Fires, data.Thefts, label='Traning Data')
ax.legend(loc=2)
ax.set_xlabel('Fires')
ax.set_ylabel('Thefts')
ax.set_title('Predicted Fires vs. Thefts')
###Output
_____no_output_____
###Markdown
Linear Regression with Gradient Descent and Cost function___ Now let's implement linear regression using gradient descent to minimize the cost function. The equations implemented in the following code samples are detailed in "ex1.pdf" in the "exercises" folder.First we'll create a function to compute the cost of a given solution (characterized by the parameters beta).
###Code
def computeCost(X, y, beta):
inner = np.power(((X * beta.T) - y), 2)
return np.sum(inner) / (2 * len(X))
###Output
_____no_output_____
###Markdown
Now let's do some variable initialization.
###Code
# set X (training data) and y (target variable)
cols = df.shape[1]
X = df.iloc[:,0:cols-1]
y = df.iloc[:,cols-1:cols]
###Output
_____no_output_____
###Markdown
Let's take a look to make sure X (training set) and y (target variable) look correct.
###Code
X.head()
y.head()
###Output
_____no_output_____
###Markdown
The cost function is expecting numpy matrices so we need to convert X and y before we can use them. We also need to initialize beta.
###Code
X = np.matrix(X.values)
y = np.matrix(y.values)
beta = np.matrix(np.array([0,0]))
###Output
_____no_output_____
###Markdown
Here's what beta looks like.
###Code
beta
###Output
_____no_output_____
###Markdown
Let's take a quick look at the shape of our matrices.
###Code
X.shape, beta.shape, y.shape
###Output
_____no_output_____
###Markdown
Now let's compute the cost for our initial solution (0 values for theta).
###Code
computeCost(X, y, beta)
###Output
_____no_output_____
###Markdown
So far so good. Now we need to define a function to perform gradient descent on the parameters beta.
###Code
def gradientDescent(X, y, beta, alpha, iters):
temp = np.matrix(np.zeros(beta.shape))
parameters = int(beta.ravel().shape[1])
cost = np.zeros(iters)
for i in range(iters):
error = (X * beta.T) - y
for j in range(parameters):
term = np.multiply(error, X[:,j])
temp[0,j] = beta[0,j] - ((alpha / len(X)) * np.sum(term))
beta = temp
cost[i] = computeCost(X, y, beta)
return beta, cost
###Output
_____no_output_____
###Markdown
Initialize some additional variables - the learning rate alpha, and the number of iterations to perform.
###Code
alpha = 0.01
iters = 1000
###Output
_____no_output_____
###Markdown
Now let's run the gradient descent algorithm to fit our parameters theta to the training set.
###Code
g, cost = gradientDescent(X, y, beta, alpha, iters)
g
###Output
_____no_output_____
###Markdown
Finally we can compute the cost (error) of the trained model using our fitted parameters.
###Code
computeCost(X, y, g)
###Output
_____no_output_____
###Markdown
Now let's plot the linear model along with the data to visually see how well it fits.
###Code
x = np.linspace(data.Fires.min(), data.Fires.max(), 100)
f = g[0, 0] + (g[0, 1] * x)
fig, ax = plt.subplots(figsize=(12,8))
ax.plot(x, f, 'r', label='Prediction')
ax.scatter(data.Fires, data.Thefts, label='Traning Data')
ax.legend(loc=2)
ax.set_xlabel('Fires')
ax.set_ylabel('Thefts')
ax.set_title('Predicted Fires vs. Thefts')
###Output
_____no_output_____
###Markdown
Looks pretty good! Since the gradient decent function also outputs a vector with the cost at each training iteration, we can plot that as well. Notice that the cost always decreases - this is an example of a convex optimization problem.
###Code
fig, ax = plt.subplots(figsize=(12,8))
ax.plot(np.arange(iters), cost, 'r')
ax.set_xlabel('Iterations')
ax.set_ylabel('Cost')
ax.set_title('Error vs. Training Epoch')
###Output
_____no_output_____
###Markdown
Multiple Linear Regression Exercise 1 also included a housing price data set with 2 variables (size of the house in square feet and number of bedrooms) and a target (price of the house). Let's use the techniques we already applied to analyze that data set as well.
###Code
path = os.getcwd() + '/ex1data2.txt'
data2 = pd.read_csv(path, header=None, names=['Size', 'Bedrooms', 'Price'])
data2.head()
###Output
_____no_output_____
###Markdown
For this task we add another pre-processing step - normalizing the features. This is very easy with pandas.
###Code
data2 = (data2 - data2.mean()) / data2.std()
data2.head()
###Output
_____no_output_____
###Markdown
Now let's repeat our pre-processing steps from part 1 and run the linear regression procedure on the new data set.
###Code
# add ones column
data2.insert(0, 'Ones', 1)
# set X (training data) and y (target variable)
cols = data2.shape[1]
X2 = data2.iloc[:,0:cols-1]
y2 = data2.iloc[:,cols-1:cols]
# convert to matrices and initialize theta
X2 = np.matrix(X2.values)
y2 = np.matrix(y2.values)
theta2 = np.matrix(np.array([0,0,0]))
# perform linear regression on the data set
g2, cost2 = gradientDescent(X2, y2, theta2, alpha, iters)
# get the cost (error) of the model
computeCost(X2, y2, g2)
###Output
_____no_output_____
###Markdown
We can take a quick look at the training progess for this one as well.
###Code
fig, ax = plt.subplots(figsize=(12,8))
ax.plot(np.arange(iters), cost2, 'r')
ax.set_xlabel('Iterations')
ax.set_ylabel('Cost')
ax.set_title('Error vs. Training Epoch')
###Output
_____no_output_____
###Markdown
Instead of implementing these algorithms from scratch, we could also use scikit-learn's linear regression function. Let's apply scikit-learn's linear regressio algorithm to the data from part 1 and see what it comes up with.
###Code
from sklearn import linear_model
model = linear_model.LinearRegression()
model.fit(X, y)
###Output
_____no_output_____
###Markdown
Here's what the scikit-learn model's predictions look like.
###Code
x = np.array(X[:, 1].A1)
f = model.predict(X).flatten()
fig, ax = plt.subplots(figsize=(12,8))
ax.plot(x, f, 'r', label='Prediction')
ax.scatter(data.Fires, data.Thefts, label='Traning Data')
ax.legend(loc=2)
ax.set_xlabel('Fires')
ax.set_ylabel('Thefts')
ax.set_title('Predicted Thefts vs. Numbers of Fires')
###Output
_____no_output_____ |
Notebooks/submission.ipynb | ###Markdown
Submission NotebookWe have worked on optimising the charging and discharging of the battery. Exploratory Data AnalysisLet's look at the 'apx_da_hourly' data.
###Code
import sys
sys.path.append("../")
from Hack import load
import matplotlib.pyplot as plt
import pandas as pd
import statsmodels.api as sm
epex = load.epex().load()
# Show seasonal trends
epex['doy'] = epex.index.day_of_year
epex['year'] = epex.index.year
fig, axs = plt.subplots(1,1, figsize=(15,10), sharex='col')
for i in [2019, 2020, 2021]:
epex_temp = epex.loc[epex.year==i]
epex_group = epex_temp.groupby(['doy']).mean()
axs.plot(epex_group.index, epex_group['apx_da_hourly'], label=str(i))
axs.set_ylabel('apx_da_hourly')
axs.set_xlabel('day of year')
axs.legend()
fig.subplots_adjust(hspace=0.1)
# Show daily variations (e.g. 2019)
doy1 = 300
doy2 = 310
fig, axs = plt.subplots(1,1, figsize=(15,10), sharex='col')
epex_temp = epex.loc[(epex.doy>doy1)&(epex.doy<doy2)&(epex.year==2019)]
axs.plot(epex_temp.index, epex_temp['apx_da_hourly'], label=str(i))
axs.set_ylabel('apx_da_hourly')
axs.set_xlabel('2019 (yyyy-mm-dd)')
axs.legend()
fig.subplots_adjust(hspace=0.1)
###Output
_____no_output_____
###Markdown
Challenge We decided to focus on the optimisation side of the project, feeling more confident with this than the forecasting Model* The problem is how to train a program to allow a battery to decide at each point in time whether to charge or discharge, so that overall it tends to maximise the cumulatibe profit* We settled on solving this using a reinforcement learning approach because the problem wasn't obviously a regression or classification issue, more learning a strategy for when to buy or sell* Reinforcement learning is good for this because it allows an informed improvement to the policy as well as optimising the profit Implementation* We used the stable_baselines3 package, due to its good documentation* We defined a custom Gym Environment class to define our game, which effectively sets out the rules for our algorithm* We need to define an action_space and an observation_space for this environment. The action_space consisted of all the actions our agent (battery could do). This was buy energy (& charge the battery), sell energy (& discharge the battery) or hold. The observation_space is the all of the properties of the environment that can influence the actions the agent might take. * Our observation_space consisted of the current price of electricity, the current energy of the battery and some metric for whether it would be good or bad to sell energy* This last feature was the most difficult feature to decide. This is probably the most crucial feature however because it is basically what determines our reward or punishment scheme for our battery. We wanted to punish our battery if it tries to buy energy when the price is high, or sell when the price is low. And vice versa. We did this by calculating at every 30 minute interval the revenue if the battery tried to buy or sell for the next 30 minutes, and compared this to the theoretical amount that could be made if the price was equal to the average price (our metric for what is high and what is low). Originally, we used the mean for the average but we found that this meant that spikes in the data were really problematic as they strongly offset the mean. Instead, we found the median a much more sensible measure. The figures expected_price_2019.png, expected_price_2020.png, expected_price_2021.png show this* We trained the data on the first two years of data (2019 and 2020), and then tried to make predictions in 2021 based on this trained model Results (See the figure directory for our results)1) We originally trained our data on a week's worth of data at the start of 2019. We then got predictions for the next week of data. Here the model is clearly working as expected. We evaluate the mean reward of our model and find that it's positive (our model is making a profit). And better than the random model.2) We then train our dataset on a month's worth of data at the start of 2019 and then get predictions for the next month. The model still performs well and better than the random model. The model has good behaviours3) Ultimately, we want to model data in 2021. So we treat 2019-2020 as a training dataset and then predict values for 2021. This is where our model fails to work effectively. We think this is because we don't have a good way to handle spikes in the data. Our model can't predict future spikes at all, so sometimes it sells when the price is yet to climb (which we don't penalise).
###Code
from Hack import load, rl
from stable_baselines3 import PPO
from stable_baselines3.common.vec_env import DummyVecEnv
epex = load.epex().load()
price_array = epex['apx_da_hourly'].values
start_of_2020 = None
start_of_2021 = None
for idx, (i, row) in enumerate(epex.iterrows()):
if i.year > 2019 and start_of_2020 is None:
start_of_2020 = idx
if i.year > 2020 and start_of_2021 is None:
start_of_2021 = idx
break
print(start_of_2020, start_of_2021)
loaded_model = PPO.load("Models/train_first_month.zip")
%matplotlib qt5
period = 'all'
# period = 'sept'
if period == 'sept':
# test on september
test_start_idx = 42434 # end_idx # start_of_2020 # end_idx # start_of_2020 # 2*24*7
test_end_idx = test_start_idx + 4*7*24*2 # -1 # start_of_2021 # 2*end_idx # start_of_2021 # 30770 + 2*24*7
elif period == 'all':
test_start_idx = 4*7*24*2
test_end_idx = -1
elif period == '2020':
test_start_idx = start_of_2020
test_end_idx = start_of_2021
elif period == '2021':
test_start_idx = start_of_2021
test_end_idx = -1
test_price_array = price_array[test_start_idx:test_end_idx]
new_env = DummyVecEnv([lambda: rl.energy_price_env(test_price_array)])
mean_reward_after_train = rl.evaluate(loaded_model, new_env=new_env, num_episodes=1, index=epex.index[test_start_idx:test_end_idx])
###Output
C:\Users\Ronan\Anaconda3\envs\ml\lib\site-packages\gym\logger.py:34: UserWarning: [33mWARN: Box bound precision lowered by casting to float32[0m
warnings.warn(colorize("%s: %s" % ("WARN", msg % args), "yellow"))
|
notebooks/07a-lightdlf.ipynb | ###Markdown
Prueba de Lightdlf> Desde Tensores hasta transformaciones no lineales, descenso de gradiente y funcion de perdida
###Code
import os
import sys
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
from lightdlf_old.cpu.core import Tensor
from lightdlf_old.cpu.layers import Linear, Tanh, Sigmoid, Relu, Sequential, MSELoss
from lightdlf_old.cpu.optimizers import SGD
import numpy as np
np.random.seed(0)
data = Tensor(np.array([[0,0],[0,1],[1,0],[1,1]]), autograd=True) # (4,2)
target = Tensor(np.array([[0],[1],[0],[1]]), autograd=True) # (4,1)
model = Sequential([Linear(2,3),
Tanh(),
Linear(3,1),
Sigmoid()])
criterion = MSELoss()
# optim = SGD(model.get_parameters(), alpha=0.05) # Lineal
optim = SGD(model.get_parameters(), alpha=1) # Tanh, Sigmoid
for i in range(10):
# Predecir
pred = model.forward(data)
# Comparar
loss = criterion.forward(pred, target)
# Aprender
loss.backward(Tensor(np.ones_like(loss.data)))
optim.step()
print(loss)
###Output
[1.06372865]
[0.75148144]
[0.57384259]
[0.39574294]
[0.2482279]
[0.15515294]
[0.10423398]
[0.07571169]
[0.05837623]
[0.04700013]
|
Week 4 project CA/Export_LAtimes_duplicates_shp.ipynb | ###Markdown
In this notebook, we export to .shp files the duplicated precincts in the LA times precincts data to view them in QGIS. 2 "versions" are exported: - the simple "subshapefile" of the duplicates - a shapefile with only one entry per precinct, with area computed as the sum of all areas (note that this might be an approximation rather than the exact area as we did not handle overlaps), and geometry as the multipolygon composed of all polygons associated to a precinct.
###Code
import os
import numpy as np
import pandas as pd
import geopandas as gpd
precincts = gpd.read_file('./Data/LAtimes2016_merged/merged_CA2016.shp')
dups = precincts[precincts.duplicated('pct16', keep =False)== True]
#save duplicates shapefile - for QGIS use
dups.to_file("LA_times_duplicates.shp")
###Output
_____no_output_____
###Markdown
Now we will group all the duplicates in one line for each precinct. Changes to make to obtain this file:- turn the polygon pieces into a single multipolygon object for each county ID. - compute the area, being careful about overlaps of precinct pieces (for now, we only care about geometry, and can actually obtain area from the geometry, so the simple approximation of summing all areas of the pieces corresponding to one precinct will be used in the first place)
###Code
dups.crs
areas = dups.groupby('pct16').agg({'area':'sum'})
duplicates = dups[['pct16']].drop_duplicates('pct16', keep = 'first')
len(duplicates), len(areas)
#difference is due to 'None' precinct...
duplicates = duplicates.join(areas, on = 'pct16')
from shapely.ops import cascaded_union
duplicates['geometry'] = np.nan
df = dups[dups['pct16'] == '071-RAN1050']
geoms = df['geometry'].to_list()
geom = gpd.GeoSeries(cascaded_union(geoms))
geom[0]
for index, row in duplicates.iterrows():
precinct = row['pct16']
if not precinct is None :
try :
df = dups[dups['pct16'] == precinct]
geoms = df['geometry'].to_list()
geom = gpd.GeoSeries(cascaded_union(geoms))
duplicates['geometry'][index] = geom[0]
except:
print('you should check precinct')
print(precinct)
dups[dups.pct16 == '085-0']
duplicates.loc[duplicates.pct16 == '085-0','geometry'] = np.nan
duplicates = duplicates[~pd.isnull(duplicates['geometry'])]
duplicates = gpd.GeoDataFrame(duplicates, geometry='geometry')
duplicates.crs = dups.crs
duplicates.to_file("LA_times_duplicates_agg.shp")
###Output
_____no_output_____ |
measuredivergence/modelsofdivergence.ipynb | ###Markdown
Measuring the divergence between models.We have lots of ways of comparing different models of the same data. One model may be more "accurate" than the other--or have better "precision" or "recall."But what if you want to compare models of _different_ data? For instance, say we we have one model that separates science fiction from mainstream fiction, and another that separates fantasy from the mainstream. We'd like to be able to ask _how similar_ the two models are. Are SF and fantasy different from mainstream fiction in similar ways?One intuitive way to pose this question might be to ask, "Can a model that separates SF from the mainstream also spearate fantasy?" In practice, it can, but it does so somewhat worse than a model originally trained on fantasy.That leads us to the question explored in this notebook: can we explain what we mean when we say "model A performs somewhat worse on dataset B"? What counts as significantly worse? And also, can we compare the divergences between models (A -> B and C -> D) by reference to any shared yardstick? For instance, suppose I also train multiple models of science fiction in different periods. Just as a model of SF loses accuracy on fantasy, a model of 1970s SF will lose some accuracy if asked to make predictions about the 19th century. We might understand that loss of accuracy as a measure of generic change. Can we meaningfully compare this measure of change to the distance between genres? Could we say, for instance, "In the 1970s, SF was about as different from fantasy as it was from its own past in the era of Jules Verne?" The general approachWe don't start out knowing, in principle, which genres are more or less similar to each other, so it's hard to calibrate the space of similarity between models.We do know, however, that a model asked to discriminate between two random samples of the same set will produce very little useful information. So we might reasonably use that to mark "zero" on our thermometer. Whatever boundary (A vs. B) we want to model, a model of an entirely random boundary should count as "not at all meaningfully similar to it."Then we could calibrate the space between A vs. B and sheer randomness by gradually mixing B into A. For instance, we could start diluting A by replacing 5% of the A examples with examples of B. This will weaken our model; the A/B boundary will be less accurately traced. Then we replace 10% of the examples of A. Then 15%, and so on. By the time we're done, we have twenty models defining the space between A/B and a random boundary. Choosing a metricI ran that test, using science fiction (labeled by librarians or OCLC) as A and a collection of fiction selected randomly from HathiTrust for B. (See labnotebook.md for details of the code used.)I'm going to read in the results of the dilution below as **sfvsrand**. Each row in this dataset actually defines a _comparison_ between two models rather than a single model. There are 86 comparisons and only 41 models. But the rows do also reference the accuracy of the original models, so we can use them to plot the original models' accuracy if we don't mind plotting the same dot several times.
###Code
import pandas as pd
import numpy as np
from scipy.stats import pearsonr
from matplotlib import pyplot as plt
%matplotlib inline
sfvsrand = pd.read_csv('sf_divergences.tsv', sep = '\t')
sfvsrand.head()
###Output
_____no_output_____
###Markdown
the accuracies of the models, considered separatelyAs you can see below, there's a gratifyingly linear relationship between the amount of dilution we apply and the accuracy of the resulting model.The accuracies don't get *quite* down to a flat .50; that's because our model-tuning method overfits a little through parameter selection, in an attempt to give every dataset the benefit of a doubt. If you were testing algorithms, you might not want to do this, but we're interested in the data, and applied consistently it does no harm.In any case, the difference between .90 and .52 or whatever is sufficiently clear.
###Code
ax = sfvsrand.plot.scatter('ratiodiff', 'acc2')
ax.set_xlabel('amount of dilution')
ax.set_ylabel('accuracy')
plt.show()
print("r = " + str(pearsonr(sfvsrand.ratiodiff, sfvsrand.acc2)[0]) )
###Output
_____no_output_____
###Markdown
loss of accuracyBut the accuracies of the models, considered separately, don't do much for us. We definitely cannot conclude that every 70% accurate model has some relation to science fiction. It may have no relation at all! We're interested in the accuracies we get when we apply one model to another model's data.For instance what if we apply the diluted models back to the clean data, and ask how much accuracy they lose, compared to the models that were trained on those original data samples
###Code
ax = sfvsrand.plot.scatter('ratiodiff', 'loss2on1')
ax.set_xlabel('amount of dilution')
ax.set_ylabel('accuracy lost by diluted model on clean data')
plt.show()
print("r = " + str(pearsonr(sfvsrand.ratiodiff, sfvsrand.loss2on1)[0]) )
###Output
_____no_output_____
###Markdown
It's not exactly a linear relationship, but it's clear that diluted models lose more accuracy. What if we try vice-versa? Do models trained on clean data also lose accuracy relative to models trained on the diluted data?
###Code
ax = sfvsrand.plot.scatter('ratiodiff', 'loss1on2')
ax.set_xlabel('amount of dilution')
ax.set_ylabel('accuracy lost by clean model on diluted data')
plt.show()
print("r = " + str(pearsonr(sfvsrand.ratiodiff, sfvsrand.loss1on2)[0]) )
###Output
_____no_output_____
###Markdown
Not so much. "Loss of accuracy" can be an asymmetric relationship. If category B is not very coherent to start with, a model of category A can sometimes make predictions that are almost as good as a model trained on B's data--even when a model trained on B is uninformative about A! This is admittedly most likely to happen in the artificial situation we have constructed (where B is just a diluted version of A). But it's not impossible to imagine an analogous asymmetry occurring in the real world. E.g, one could imagine that hardboiled detective novels, or hard SF, are "extreme" or "stylized" versions of a genre. More generally, whenever we were comparing models with different base accuracies, we would have to worry that "loss of accuracy" was an asymmetric measure.So let's look for other metrics. "Accuracy" was to begin with a bit crude, since it depends on a binary division into two classes. What if we ask a more detailed question, about the model's ability to sort instances according to their probability of belonging to the specified genre: P(genre|text)? We could measure this, for instance, through correlation. correlation coefficientsLet's look at the Spearman correlation coefficient, which compares the way two models rank the texts in a dataset. First let's ask the model trained on diluted data to make predictions on clean data, and then calculate the correlation between those predictions and the predictions made by a model actually trained on clean data.
###Code
plt.scatter(sfvsrand.ratiodiff, sfvsrand.spear2on1)
plt.xlabel('ratio of dilution')
plt.ylabel('spearman correlation')
plt.show()
print("r = " + str(pearsonr(sfvsrand.ratiodiff, sfvsrand.spear2on1)[0]) )
###Output
_____no_output_____
###Markdown
It's not very linear, but that's because correlation is capped at 1. We can fix that with a Fisher's z-transform, rendering the underlying metric linear. It's equivalent to arctanh.
###Code
plt.scatter(sfvsrand.ratiodiff, np.arctanh(sfvsrand.spear2on1))
plt.xlabel('ratio of dilution')
plt.ylabel('spearman correlation')
plt.show()
print("r = " + str(pearsonr(sfvsrand.ratiodiff, np.arctanh(sfvsrand.spear2on1))[0]) )
###Output
_____no_output_____
###Markdown
Hey, that's about as strong a linear relationship as our original metric, "loss of accuracy." Does it also tend in practice to be symmetric?Let's now ask a model trained on clean data (A) to make predictions about a diluted dataset (B). We saw that these can be almost as "accurate" as a (not very accurate) model trained on diluted data. But we might hope that ability-to-rank is a sterner test, which will prove that A has no real congruence with B.
###Code
plt.scatter(sfvsrand.ratiodiff, np.arctanh(sfvsrand.spear1on2))
plt.xlabel('spearman correlation')
plt.ylabel('accuracy lost by diluted model on clean data')
plt.show()
print("r = " + str(pearsonr(sfvsrand.ratiodiff, np.arctanh(sfvsrand.spear1on2))[0]) )
###Output
_____no_output_____
###Markdown
Yes, loss of correlation is symmetric! It's not a mathematical guarantee, but the correlation of A on B's data tracks the correlation of B on A's data pretty closely. In fact, it looks the original model's inability to sort a transformed dataset is an even stronger predictor of the dataset's alienation from our original genre. This difference is slight, however, and it might be due to the artificial nature of our test. (There are instances in the diluted dataset that will literally be impossible to predict, so A is sort of guaranteed to fail.)Since these two measures tend to correlate, we could also average them, to produce a robust measure of divergence between two models. By calling this "robust," I mean that in the real world, we'll never know in practice which model is "A" and which one is "B." We might as well consider both models' perspectives on the other.
###Code
plt.scatter(sfvsrand.ratiodiff, np.arctanh(sfvsrand.spearman))
plt.xlabel('dilution of data')
plt.ylabel('spearman correlation')
plt.show()
print("r = " + str(pearsonr(sfvsrand.ratiodiff, np.arctanh(sfvsrand.spearman))[0]) )
###Output
_____no_output_____
###Markdown
our final metricSo we're measuring the divergence of models by averaging: spearman(modelA's prediction on Adata, modelB's prediction on Adata) and spearman(modelA's prediction on Bdata, modelB's prediction on Bdata)It doesn't matter greatly whether we use Spearman or Pearson correlation. The results are very close. It *does* matter that we transform the two correlation coefficients with np.arctanh() before averaging them.I tried a few other things, including KL divergence. They didn't in practice seem to work as well. Calibration problemsHowever, if we want to render different sets of models comparable, we need some way to translate a correlation coefficient into a specific "distance." Although real-world models aren't really diluted versions of each other, we might use "percentage of dilution" as a rough and ready yardstick for this. "Pre-World-War-II SF is about as informative about the postwar genre ... as a model of the postwar genre would be if it were diluted by 25%." That's somewhat intelligible, and in any case it permits us to make comparisons between different pairs.However, to use that yardstick, we'll need to translate our y axis in the graph above into a specific x value. And this is complicated by a messy reality: the different categories we will be modeling max out at different degrees of correlation.For instance, distinguishing science fiction from *fantasy* is significantly harder than distinguishing it from random (mainstream) fiction. Models attempting to trace this blurrier boundary top out at roughly 77% accuracy, and their predictions about specific books don't correlate with each other as strongly as models of sf-vs-random. See the red line below:
###Code
fanvsf = pd.read_csv('fsf_divergences.tsv', sep = '\t')
fig, ax = plt.subplots(figsize = (8, 6))
plt.scatter(sfvsrand.ratiodiff, np.arctanh(sfvsrand.spearman), c ='b', alpha = 0.6)
plt.scatter(fanvsf.ratiodiff, np.arctanh(fanvsf.spearman), c = 'r', alpha = 0.6)
plt.xlabel('dilution of data', fontsize = 14)
plt.ylabel('spearman correlation', fontsize = 14)
z = np.polyfit(sfvsrand.ratiodiff, np.arctanh(sfvsrand.spearman), 1)
p = np.poly1d(z)
ax.plot(sfvsrand.ratiodiff, p(sfvsrand.ratiodiff), linestyle = (0, (5, 5)), c = 'b')
z = np.polyfit(fanvsf.ratiodiff, np.arctanh(fanvsf.spearman), 1)
p = np.poly1d(z)
ax.plot(fanvsf.ratiodiff, p(fanvsf.ratiodiff), linestyle = (0, (5, 5)), c = 'r')
plt.ylim((-0.6, 1.8))
plt.show()
fant = pd.read_csv('fantasy_divergences.tsv', sep = '\t')
fig, ax = plt.subplots(figsize = (8, 6))
plt.scatter(sfvsrand.ratiodiff, np.arctanh(sfvsrand.spearman), c ='b', alpha = 0.6)
plt.scatter(fant.ratiodiff, np.arctanh(fant.spearman), c = 'r', alpha = 0.6)
plt.xlabel('dilution of data', fontsize = 14)
plt.ylabel('spearman correlation', fontsize = 14)
z = np.polyfit(sfvsrand.ratiodiff, np.arctanh(sfvsrand.spearman), 1)
p = np.poly1d(z)
ax.plot(sfvsrand.ratiodiff, p(sfvsrand.ratiodiff), linestyle = (0, (5, 5)), c = 'b')
z = np.polyfit(fant.ratiodiff, np.arctanh(fant.spearman), 1)
p = np.poly1d(z)
ax.plot(fant.ratiodiff, p(fant.ratiodiff), linestyle = (0, (5, 5)), c = 'r')
plt.ylim((-0.6, 1.8))
plt.show()
fant = pd.read_csv('fantasy_divergences.tsv', sep = '\t')
fig, ax = plt.subplots(figsize = (8, 6))
plt.scatter(sfvsrand.ratiodiff, np.arctanh((sfvsrand.loss+0.1)/sfvsrand.acc1), c ='b', alpha = 0.6)
plt.scatter(fant.ratiodiff, np.arctanh((fanvsf.loss+0.1)/fant.acc1), c = 'r', alpha = 0.6)
plt.xlabel('dilution of data', fontsize = 14)
plt.ylabel('aacuracy loss', fontsize = 14)
z = np.polyfit(sfvsrand.ratiodiff, (sfvsrand.loss + 0.1)/sfvsrand.acc1, 1)
p = np.poly1d(z)
ax.plot(sfvsrand.ratiodiff, p(sfvsrand.ratiodiff), linestyle = (0, (5, 5)), c = 'b')
z = np.polyfit(fant.ratiodiff, (fanvsf.loss + 0.1)/fant.acc1, 1)
p = np.poly1d(z)
ax.plot(fant.ratiodiff, p(fanvsf.ratiodiff), linestyle = (0, (5, 5)), c = 'r')
plt.show()
pearsonr(fant.ratiodiff, np.arctanh(fant.spearman))
allframes = pd.concat([fant, fanvsf, sfvsrand])
allframes = allframes[allframes.ratiodiff < .1]
pearsonr(allframes.acc2 + allframes.acc1, np.arctanh(allframes.spearman))
import statsmodels.formula.api as smf
lm = smf.ols(formula='ratiodiff ~ spearman + spearman:acc1 + loss', data=allframes).fit()
lm.summary()
import math
math.sqrt(.737)
###Output
_____no_output_____ |
notes/.ipynb_checkpoints/Kahraman_1994-checkpoint.ipynb | ###Markdown
Some manipulations on (Kahraman, 1994) [1] A. Kahraman, "Natural Modes of Planetary Gear Trains", Journal of Sound and Vibration, vol. 173, no. 1, pp. 125-130, 1994. https://doi.org/10.1006/jsvi.1994.1222.
###Code
from sympy import *
init_printing()
def symb(x,y):
return symbols('{0}_{1}'.format(x,y), type = float)
###Output
_____no_output_____
###Markdown
Displacement vector:
###Code
n = 3 # number of planets
N = n + 3 # number of degrees of freedom
crs = ['c', 'r', 's'] # carrier, ring, sun
pla = ['p{}'.format(idx + 1) for idx in range(n)] # planet
crs = crs + pla # put them together
coeff_list = symbols(crs)
c = coeff_list[0]
r = coeff_list[1]
s = coeff_list[2]
X = Matrix([symb('u', v) for v in coeff_list])
coeff_list[3:] = symbols(['p']*n)
p = coeff_list[3]
X.transpose() # Eq. (1a)
###Output
_____no_output_____
###Markdown
Stiffness matrix:where:* $k_1$: mesh stiffness for the ring-planet gear pair* $k_2$: mesh stiffness for the sun-planet gear pair* $k_c$: carrier housing stiffness* $k_r$: ring housing stiffness* $k_s$: sun housing stiffness* Diagonal 1, in red* Diagonal 2, in grey* Off-diagonal, in blue
###Code
k_1, k_2, k_c, k_r, k_s = symbols('k_1 k_2 k_c k_r k_s', type = float)
# Diagonal 1:
K_d1 = zeros(3, 3)
K_d1[0, 0] = n*(k_1 + k_2) + k_c
K_d1[1, 1] = n* k_1 + k_r
K_d1[2, 2] = n* k_2 + k_s
K_d1[0, 1] = K_d1[1, 0] = -n*k_1
K_d1[0, 2] = K_d1[2, 0] = -n*k_2
# Diagonal 2:
K_d2 = eye(n)*(k_1 + k_2)
# Off diagonal:
K_od = zeros(n, n)
K_od[:, 0] = (k_1 - k_2)*ones(n, 1)
K_od[:, 1] = -k_1 *ones(n, 1)
K_od[:, 2] = k_2 *ones(n, 1)
K = BlockMatrix([[K_d1, K_od.transpose()],
[K_od, K_d2]])
K = Matrix(K)
if(not K.is_symmetric()):
print('error.')
K
###Output
_____no_output_____
###Markdown
Inertia matrix:
###Code
M = diag(*[symb('m', v) for v in coeff_list])
M
###Output
_____no_output_____
###Markdown
Remove ring degree of freedom
###Code
X.row_del(1)
K.row_del(1)
K.col_del(1)
M.row_del(1)
M.col_del(1)
coeff_list.remove(r)
N = N - 1
###Output
_____no_output_____
###Markdown
Coordinate transformation:First from translational to torsional coordinates, them making the sun DOF to be the last one, making it easier to assemble a multi-stage gearbox.
###Code
R_1 = diag(*[symb('r', v) for v in coeff_list])
R_1
###Output
_____no_output_____
###Markdown
making the sun DOF to be the last one:
###Code
N1 = N - 1
R_2 = zeros(N, N)
R_2[0, 0] = 1
R_2[1, N1] = 1
R_2[2:N, 1:N1] = eye(n)
R_2
R = R_1*R_2
RMR = lambda m: transpose(R)*m*R
###Output
_____no_output_____
###Markdown
Inertia matrix
###Code
M = RMR(M)
if(not M.is_symmetric()):
print('error in M matrix')
M
###Output
_____no_output_____
###Markdown
Stiffness matrix
###Code
K = RMR(K)
if(not K.is_symmetric()):
print('error in K matrix')
###Output
_____no_output_____
###Markdown
The housing stiffness for both carrier and sunare null:
###Code
K = K.subs([(k_c, 0), (k_s, 0)])
K
###Output
_____no_output_____
###Markdown
From that, one can write the matrices for a planetary system with $n$-planets using the following code:
###Code
m_c, m_s, m_p, r_c, r_s, r_p = symbols('m_c m_s m_p r_c r_s r_p', type = float)
M_p = zeros(N, N)
M_p[0, 0] = m_c*r_c**2
M_p[N1, N1] = m_s*r_s**2
M_p[1:N1, 1:N1] = m_p*r_p**2 * eye(n)
K_p = zeros(N, N)
K_p[0, 0] = n*(k_1 + k_2)*r_c**2
K_p[N1, 0] = -n*k_2*r_s*r_c
K_p[0, N1] = -n*k_2*r_s*r_c
K_p[N1, N1] = n*k_2*r_s**2
K_p[0, 1:N1] = (k_1 - k_2)*r_c*r_p*ones(1, n)
K_p[1:N1, 0] = (k_1 - k_2)*r_c*r_p*ones(n, 1)
K_p[N1, 1:N1] = k_2*r_p*r_s*ones(1, n)
K_p[1:N1, N1] = k_2*r_p*r_s*ones(n, 1)
K_p[1:N1, 1:N1] = (k_1 + k_2)*r_p**2 * eye(n)
m_diff = abs(matrix2numpy(simplify(M_p - M))).sum()
k_diff = abs(matrix2numpy(simplify(K_p - K))).sum()
if(m_diff != 0.0):
print('Error in M matrix.')
if(k_diff != 0.0):
print('Error in K matrix.')
###Output
_____no_output_____
###Markdown
Combining planet DOFs:
###Code
C = zeros(N, 3)
C[ 0, 0] = 1
C[ N1, 2] = 1
C[1:N1, 1] = ones(n, 1)
CMC = lambda m: transpose(C)*m*C
###Output
_____no_output_____
###Markdown
Inertia matrix
###Code
M_C = CMC(M)
if(not M_C.is_symmetric()):
print('error in M_C matrix')
M_C
###Output
_____no_output_____
###Markdown
Stiffness matrix
###Code
K_C = CMC(K)
if(not K_C.is_symmetric()):
print('error in M_C matrix')
K_C
###Output
_____no_output_____
###Markdown
Adapting it to a parallel gear setConsidering only one of the sun-planets pairs, one should change the sub-indexes in the following way:* [p]lanet => [w]heel* [s]un => [p]inion;It also necessary to remove the mesh stiffness of the ring-planet pair Inertia matrix
###Code
k, w, p = symbols('k w p', type = float)
m_w, m_p, r_w, r_p = symbols('m_w m_p r_w r_p', type = float)
N2 = N - 2
M_par = M[N2:, N2:]
M_par = M_par.subs([(m_p, m_w), (m_s, m_p), (r_p, r_w), (r_s, r_p)]) #
M_par
###Output
_____no_output_____
###Markdown
Stiffness matrix
###Code
K_par = K[N2:, N2:]
K_par = K_par.subs(k_1, 0) # ring-planet mesh stiffness
K_par = K_par.subs(k_s, 0) # sun's bearing stiffness
K_par = K_par.subs(n*k_2, k_2) # only one pair, not n
K_par = K_par.subs(k_2, k) # mesh-stiffness of the pair
K_par = K_par.subs([(r_p, r_w), (r_s, r_p)])
K_par
###Output
_____no_output_____
###Markdown
From that, one can write the matrices for a parallel system using the following code:
###Code
M_p = diag(m_w*r_w**2, m_p*r_p**2)
mat_diff = abs(matrix2numpy(simplify(M_p - M_par))).sum()
if(mat_diff != 0.0):
print('Error in M_p matrix.')
K_p = diag(r_w**2, r_p**2)
K_p[0, 1] = r_p*r_w
K_p[1, 0] = r_p*r_w
K_p = k*K_p
mat_diff = abs(matrix2numpy(simplify(K_p - K_par))).sum()
if(mat_diff != 0.0):
print('Error in K_p matrix.')
###Output
_____no_output_____ |
Statistics/Lesson_2/Project_lesson_2.ipynb | ###Markdown
Загрузите данные, проверьте число наблюдений и столбцов, типы данных, наличие пропущенных значений, какие уникальные значения встречаются.Сколько уникальных рекламных кампаний было проведено?
###Code
tyk.shape
tyk.dtypes
tyk.xyz_campaign_id.value_counts()
###Output
_____no_output_____
###Markdown
Постройте график распределения числа показов (Impressions – сколько раз пользователи увидели данное объявление) для каждой рекламной кампании в Facebook, прологарифмировав значения. Выберите верные утверждения:
###Code
tyk_impres = tyk.groupby('fb_campaign_id') \
.agg({'Impressions': 'sum'})
np.log(tyk_impres)
sns.distplot(np.log(tyk_impres), kde=True)
###Output
/opt/tljh/user/lib/python3.7/site-packages/seaborn/distributions.py:2557: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms).
warnings.warn(msg, FutureWarning)
###Markdown
Теперь посчитаем ещё несколько полезных метрик. Первая – CTR (click-through rate), которая показывает кликабельность, т.е. отношение числа кликов к количеству показов.Создайте новую колонку, затем посмотрите на описательные статистики. В качестве ответа укажите ad_id объявления с наибольшим CTR.
###Code
tyk['CTR'] = tyk.Clicks / tyk.Impressions
tyk
tyk.CTR.idxmax()
tyk.iloc[150]
###Output
_____no_output_____
###Markdown
Визуализируйте CTR с разбивкой по номеру рекламной кампании (xyz_campaign_id). Какому графику соответствует распределение CTR кампании 916?
###Code
tyk_916 = tyk.query('xyz_campaign_id == "916"')
sns.distplot(tyk_916.CTR, kde = False, bins=20)
###Output
/opt/tljh/user/lib/python3.7/site-packages/seaborn/distributions.py:2557: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms).
warnings.warn(msg, FutureWarning)
###Markdown
CPC (cost-per-click) – стоимость за клик пользователя по объявлению. Рассчитывается путём деления суммы потраченных денег на общее число кликов:Выведите описательные статистики для новой переменной, посмотрите на форму распределения. В ответе укажите межквартильный размах, округленный до двух знаков после точки.
###Code
tyk['CPC'] = tyk.Spent / tyk.Clicks
tyk
sns.distplot(tyk.CPC, kde=True)
round(ss.iqr(tyk.CPC, nan_policy='omit'), 2)
###Output
_____no_output_____
###Markdown
Визируйте CPC с разбивкой по полу пользователей, которым были показаны объявления. Какой график получился?
###Code
tyk
tyk_m = tyk.query('gender == "M"').CPC.dropna()
tyk_f = tyk.query('gender == "F"').CPC.dropna()
sns.distplot(tyk_m, kde=True)
sns.distplot(tyk_f, kde=True)
###Output
/opt/tljh/user/lib/python3.7/site-packages/seaborn/distributions.py:2557: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms).
warnings.warn(msg, FutureWarning)
###Markdown
Конверсия (conversion rate) – отношение числа пользователей, совершивших целевое действие на определенном этапе, к общему числу тех, кто дошел до данного этапа.Посчитайте конверсию из клика в покупку. В качестве ответа укажите конверсию для объявления 1121814 в процентах, округлив значение до 2 знаков после точки. Например, если значение кликов равно 10, а покупок – 2, то CRCR на данном этапе составляет \frac{2}{10} = 0.2 = 20\%
###Code
tyk['CR'] = tyk.Approved_Conversion / tyk.Clicks
tyk.query('ad_id == "1121814"')
###Output
_____no_output_____ |
week_3/qlearning.ipynb | ###Markdown
Q-learningThis notebook will guide you through implementation of vanilla Q-learning algorithm.You need to implement QLearningAgent (follow instructions for each method) and use it on a number of tests below.
###Code
import sys, os
if 'google.colab' in sys.modules and not os.path.exists('.setup_complete'):
!wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/master/setup_colab.sh -O- | bash
!wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/coursera/grading.py -O ../grading.py
!wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/coursera/week3_model_free/submit.py
!touch .setup_complete
# This code creates a virtual display to draw game images on.
# It will have no effect if your machine has a monitor.
if type(os.environ.get("DISPLAY")) is not str or len(os.environ.get("DISPLAY")) == 0:
!bash ../xvfb start
os.environ['DISPLAY'] = ':1'
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from collections import defaultdict
import random
import math
import numpy as np
class QLearningAgent:
def __init__(self, alpha, epsilon, discount, get_legal_actions):
"""
Q-Learning Agent
based on https://inst.eecs.berkeley.edu/~cs188/sp19/projects.html
Instance variables you have access to
- self.epsilon (exploration prob)
- self.alpha (learning rate)
- self.discount (discount rate aka gamma)
Functions you should use
- self.get_legal_actions(state) {state, hashable -> list of actions, each is hashable}
which returns legal actions for a state
- self.get_qvalue(state,action)
which returns Q(state,action)
- self.set_qvalue(state,action,value)
which sets Q(state,action) := value
!!!Important!!!
Note: please avoid using self._qValues directly.
There's a special self.get_qvalue/set_qvalue for that.
"""
self.get_legal_actions = get_legal_actions
self._qvalues = defaultdict(lambda: defaultdict(lambda: 0))
self.alpha = alpha
self.epsilon = epsilon
self.discount = discount
def get_qvalue(self, state, action):
""" Returns Q(state,action) """
return self._qvalues[state][action]
def set_qvalue(self, state, action, value):
""" Sets the Qvalue for [state,action] to the given value """
self._qvalues[state][action] = value
#---------------------START OF YOUR CODE---------------------#
def get_value(self, state):
"""
Compute your agent's estimate of V(s) using current q-values
V(s) = max_over_action Q(state,action) over possible actions.
Note: please take into account that q-values can be negative.
"""
possible_actions = self.get_legal_actions(state)
# If there are no legal actions, return 0.0
if len(possible_actions) == 0:
return 0.0
# <YOUR CODE>
value = max([self.get_qvalue(state,a) for a in possible_actions])
return value
def update(self, state, action, reward, next_state):
"""
You should do your Q-Value update here:
Q(s,a) := (1 - alpha) * Q(s,a) + alpha * (r + gamma * V(s'))
"""
# agent parameters
gamma = self.discount
learning_rate = self.alpha
#<YOUR CODE>
qvalue = (1-learning_rate)*self.get_qvalue(state,action) + learning_rate*(reward+gamma*self.get_value(next_state))
#self.set_qvalue(state, action, <YOUR CODE: Q-value> )
self.set_qvalue(state, action, qvalue)
def get_best_action(self, state):
"""
Compute the best action to take in a state (using current q-values).
"""
possible_actions = self.get_legal_actions(state)
# If there are no legal actions, return None
if len(possible_actions) == 0:
return None
# <YOUR CODE>
q_dict = {a: self.get_qvalue(state,a) for a in possible_actions}
max_q = max(q_dict.values())
best_actions = [action for action, q in q_dict.items() if q == max_q]
best_action = random.choice(best_actions)
return best_action
def get_action(self, state):
"""
Compute the action to take in the current state, including exploration.
With probability self.epsilon, we should take a random action.
otherwise - the best policy action (self.get_best_action).
Note: To pick randomly from a list, use random.choice(list).
To pick True or False with a given probablity, generate uniform number in [0, 1]
and compare it with your probability
"""
# Pick Action
possible_actions = self.get_legal_actions(state)
action = None
# If there are no legal actions, return None
if len(possible_actions) == 0:
return None
# agent parameters:
epsilon = self.epsilon
# <YOUR CODE>
if random.random() < self.epsilon:
chosen_action = random.choice(possible_actions)
else:
chosen_action = self.get_best_action(state)
return chosen_action
###Output
_____no_output_____
###Markdown
Try it on taxiHere we use the qlearning agent on taxi env from openai gym.You will need to insert a few agent functions here.
###Code
import gym
env = gym.make("Taxi-v3")
n_actions = env.action_space.n
agent = QLearningAgent(
alpha=0.5, epsilon=0.25, discount=0.99,
get_legal_actions=lambda s: range(n_actions))
def play_and_train(env, agent, t_max=10**4):
"""
This function should
- run a full game, actions given by agent's e-greedy policy
- train agent using agent.update(...) whenever it is possible
- return total reward
"""
total_reward = 0.0
s = env.reset()
for t in range(t_max):
# get agent to pick action given state s.
a = agent.get_action(s) # <YOUR CODE>
next_s, r, done, _ = env.step(a)
# train (update) agent for state s
agent.update(s,a,r,next_s) # <YOUR CODE>
s = next_s
total_reward += r
if done:
break
return total_reward
from IPython.display import clear_output
rewards = []
for i in range(1000):
rewards.append(play_and_train(env, agent))
agent.epsilon *= 0.99
if i % 100 == 0:
clear_output(True)
plt.title('eps = {:e}, mean reward = {:.1f}'.format(agent.epsilon, np.mean(rewards[-10:])))
plt.plot(rewards)
plt.show()
###Output
_____no_output_____
###Markdown
Submit to Coursera I: Preparation
###Code
submit_rewards1 = rewards.copy()
###Output
_____no_output_____
###Markdown
Binarized state spacesUse agent to train efficiently on `CartPole-v0`. This environment has a continuous set of possible states, so you will have to group them into bins somehow.The simplest way is to use `round(x, n_digits)` (or `np.round`) to round a real number to a given amount of digits. The tricky part is to get the `n_digits` right for each state to train effectively.Note that you don't need to convert state to integers, but to __tuples__ of any kind of values.
###Code
def make_env():
return gym.make('CartPole-v0').env # .env unwraps the TimeLimit wrapper
env = make_env()
n_actions = env.action_space.n
print("first state: %s" % (env.reset()))
plt.imshow(env.render('rgb_array'))
###Output
first state: [ 0.0265191 0.03594735 -0.04879002 -0.02842462]
###Markdown
Play a few gamesWe need to estimate observation distributions. To do so, we'll play a few games and record all states.
###Code
def visualize_cartpole_observation_distribution(seen_observations):
seen_observations = np.array(seen_observations)
# The meaning of the observations is documented in
# https://github.com/openai/gym/blob/master/gym/envs/classic_control/cartpole.py
f, axarr = plt.subplots(2, 2, figsize=(16, 9), sharey=True)
for i, title in enumerate(['Cart Position', 'Cart Velocity', 'Pole Angle', 'Pole Velocity At Tip']):
ax = axarr[i // 2, i % 2]
ax.hist(seen_observations[:, i], bins=20)
ax.set_title(title)
xmin, xmax = ax.get_xlim()
ax.set_xlim(min(xmin, -xmax), max(-xmin, xmax))
ax.grid()
f.tight_layout()
seen_observations = []
for _ in range(1000):
seen_observations.append(env.reset())
done = False
while not done:
s, r, done, _ = env.step(env.action_space.sample())
seen_observations.append(s)
visualize_cartpole_observation_distribution(seen_observations)
###Output
_____no_output_____
###Markdown
Binarize environment
###Code
from gym.core import ObservationWrapper
class Binarizer(ObservationWrapper):
def observation(self, state):
# Hint: you can do that with round(x, n_digits).
# You may pick a different n_digits for each dimension.
# state = <YOUR CODE: round state to some amount digits>
state[0] = np.round(state[0],0)
state[1] = np.round(state[1],1)
state[2] = np.round(state[2],2)
state[3] = np.round(state[3],1)
return tuple(state)
env = Binarizer(make_env())
seen_observations = []
for _ in range(1000):
seen_observations.append(env.reset())
done = False
while not done:
s, r, done, _ = env.step(env.action_space.sample())
seen_observations.append(s)
if done:
break
visualize_cartpole_observation_distribution(seen_observations)
###Output
_____no_output_____
###Markdown
Learn binarized policyNow let's train a policy that uses binarized state space.__Tips:__* Note that increasing the number of digits for one dimension of the observations increases your state space by a factor of $10$.* If your binarization is too fine-grained, your agent will take much longer than 10000 steps to converge. You can either increase the number of iterations and reduce epsilon decay or change binarization. In practice we found that this kind of mistake is rather frequent.* If your binarization is too coarse, your agent may fail to find the optimal policy. In practice we found that on this particular environment this kind of mistake is rare.* **Start with a coarse binarization** and make it more fine-grained if that seems necessary.* Having $10^3$–$10^4$ distinct states is recommended (`len(agent._qvalues)`), but not required.* If things don't work without annealing $\varepsilon$, consider adding that, but make sure that it doesn't go to zero too quickly.A reasonable agent should attain an average reward of at least 50.
###Code
import pandas as pd
def moving_average(x, span=100):
return pd.DataFrame({'x': np.asarray(x)}).x.ewm(span=span).mean().values
agent = QLearningAgent(
alpha=0.5, epsilon=0.25, discount=0.99,
get_legal_actions=lambda s: range(n_actions))
rewards = []
epsilons = []
for i in range(10000):
reward = play_and_train(env, agent)
rewards.append(reward)
epsilons.append(agent.epsilon)
# OPTIONAL: <YOUR CODE: adjust epsilon>
if i > 10000/2:
agent.epsilon *= 0.99
if i % 100 == 0:
rewards_ewma = moving_average(rewards)
clear_output(True)
plt.plot(rewards, label='rewards')
plt.plot(rewards_ewma, label='rewards ewma@100')
plt.legend()
plt.grid()
plt.title('eps = {:e}, rewards ewma@100 = {:.1f}'.format(agent.epsilon, rewards_ewma[-1]))
plt.show()
###Output
_____no_output_____
###Markdown
Submit to Coursera II: Submission
###Code
submit_rewards2 = rewards.copy()
from submit import submit_qlearning
submit_qlearning(submit_rewards1, submit_rewards2, '[email protected]', '8o2bR1ONQP8ltdu5')
###Output
Submitted to Coursera platform. See results on assignment page!
|
.ipynb_checkpoints/Article scraping and topic classification-checkpoint.ipynb | ###Markdown
Article extraction, topic classification and database
###Code
# How to run virtualenv with jupyter notebook for the purposes of gcloud api
# first install virtualenv, create and switch to env
# pip install ipykernel
# python -m ipykernel install --user --name=my-virtualenv-name
# when inside ipynb, change kernel to desired env
# classification on test paragraph
#text = "Late last month, a new bistro bar and restaurant concept by the name of Escobar opened its doors at China Square Central. In case you don’t know by its name already, it’s a bar with a theme that revolves around everything Pablo Escobar, the real-life Colombian drug lord who enjoyed a recent resurgence in popularity thanks to beloved Netflix series Narcos. But with murals dedicated to The King of Cocaine and a themed menu with offerings such as “Stab in Your Heart burger” and “Don Corleone” pizza, one would wonder if it’s problematic to celebrate a narcoterrorist who was responsible for thousands of deaths and turned Colombia into the murder capital of the world during the height of his power."
#classify(text)
# newspaper api for article scraping
from newspaper import Article
#url = input()
#article = Article(url)
#article.download()
#article.parse()
#article.authors
#article.publish_date
#article.text
#article.text
import snippets
#catlist = snippets.classify_text(article.text)
#entlist = snippets.entities_text(article.text)
# SQL Initialisation
import sqlite3
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
cursor.execute('DROP TABLE data;') # DELETES TABLE
cursor.execute('CREATE TABLE data (url VARCHAR PRIMARY KEY, category VARCHAR, author VARCHAR);')
def write(url, category,author):
cursor.execute('''INSERT INTO data (url,category,author) VALUES ('{}','{}','{}')'''.format(url,category,author))
connection.commit()
def fetch():
cursor.execute('SELECT * FROM DATA;')
result = cursor.fetchall()
for r in result:
print(r)
url_list = ['https://mothership.sg/2018/02/changi-airport-crash-flight-delay/',
'https://mothership.sg/2018/02/16-years-on-super-brainy-ex-beauty-queen-nuraliza-osman-strives-to-harness-her-enduring-star-power-for-good/',
'https://mothership.sg/2018/02/lee-kuan-yew-told-judges-to-ignore-mp-letters/',
'https://mothership.sg/2018/02/public-apology-scandalise-court-chc-meme/',
'https://mothership.sg/2018/02/singapore-china-chang-wanquan-bromance-ng-eng-hen/',
'https://mothership.sg/2018/02/wrestle-shirtless-mma/',
'https://mothership.sg/2018/02/sias-first-boeing-787-10-will-be-flying-to-osaka-japan-in-may-2018/'
]
def main():
for url in url_list:
article = Article(url)
article.download()
article.parse()
catlist = snippets.classify_text(article.text)
author = article.authors[0]
write(url, catlist[1:], author)
fetch()
main()
###Output
('https://mothership.sg/2018/02/changi-airport-crash-flight-delay/', 'Travel/Air Travel', 'Belmont Lay')
('https://mothership.sg/2018/02/16-years-on-super-brainy-ex-beauty-queen-nuraliza-osman-strives-to-harness-her-enduring-star-power-for-good/', 'Beauty & Fitness/Beauty Pageants', 'Tanya Ong')
('https://mothership.sg/2018/02/lee-kuan-yew-told-judges-to-ignore-mp-letters/', 'News/Politics', 'Jeanette Tan')
('https://mothership.sg/2018/02/public-apology-scandalise-court-chc-meme/', 'News/Politics', 'Belmont Lay')
('https://mothership.sg/2018/02/singapore-china-chang-wanquan-bromance-ng-eng-hen/', 'Law & Government/Government', 'Chan Cheow Pong')
('https://mothership.sg/2018/02/wrestle-shirtless-mma/', 'Arts & Entertainment/Humor', 'Mandy How')
('https://mothership.sg/2018/02/sias-first-boeing-787-10-will-be-flying-to-osaka-japan-in-may-2018/', 'Travel/Air Travel', 'Kayla Wong')
|
Example_Mouse_Allen_to_Fluoro.ipynb | ###Markdown
Mouse Allen to fluoro exampleThis example maps betwen the allen CCF mouse atlas and fluorescence mouse image.Here we will use affine alignment in adition to deformable registration. Affine will be performed first, then both will be performed simultaneously.Also we will estimate artifact locations using the EM algorithm and compensate for them in our matching. Library importsWe start by importing necessary libraries. That includes numpy, matplotlib, and tensorflow for numerical work, nibabel for loading neuroimages, and lddmm and vis which are part of this library.
###Code
import numpy as np # for arrays
%matplotlib notebook
import matplotlib as mpl # for graphics
import matplotlib.pyplot as plt
import nibabel as nib # for loading neuroimages
import lddmm # algorithm
import vis # visualization
import tensorflow as tf
import imp # use imp.reload to update modules during development
import os
os.environ['CUDA_VISIBLE_DEVICES'] = ''
#Make sure GPU is not recognised
tf.test.gpu_device_name()
###Output
_____no_output_____
###Markdown
The TensorFlow backend uses all available GPU memory by default, hence it can be useful to limit it:
###Code
# get filenames
atlas_image_fname = 'average_template_50.img'
target_image_fname = '180517_Downsample.img'
# load them with nibabel
fnames = [atlas_image_fname,target_image_fname]
img = [nib.load(fname) for fname in fnames]
# get info about image space
if '.img' == atlas_image_fname[-4:]:
nxI = img[0].header['dim'][1:4]
dxI = img[0].header['pixdim'][1:4]
nxJ = img[1].header['dim'][1:4]
dxJ = img[1].header['pixdim'][1:4]
else:
# I'm only working with analyze for now
raise ValueError('Only Analyze images supported for now')
xI = [np.arange(nxi)*dxi - np.mean(np.arange(nxi)*dxi) for nxi,dxi in zip(nxI,dxI)]
xJ = [np.arange(nxi)*dxi - np.mean(np.arange(nxi)*dxi) for nxi,dxi in zip(nxJ,dxJ)]
# get the images, note they also include a fourth axis for time that I don't want
I = img[0].get_data()[:,:,:,0]
J = img[1].get_data()[:,:,:,0]
# I would like to pad one slice of the allen atlas so that it has zero boundary conditions
zeroslice = np.zeros((nxI[0],1,nxI[2]))
I = np.concatenate((I,zeroslice),axis=1)
nxI = img[0].header['dim'][1:4]
nxI[1] += 1
xI = [np.arange(nxi)*dxi - np.mean(np.arange(nxi)*dxi) for nxi,dxi in zip(nxI,dxI)]
# display the data
f = plt.figure()
vis.imshow_slices(I, x=xI, fig=f)
f.suptitle('Atlas I')
f.canvas.draw()
f = plt.figure()
vis.imshow_slices(J,x=xJ,fig=f)
f.suptitle('Target J')
f.canvas.draw()
###Output
_____no_output_____
###Markdown
Notice that this image has a giant bright spot. This is an artifact we will need to compensate for in order to do accurate registration. ReorientationThe allen atlas is not stored in the same orientation as our data, we will specify an initial affine transformation to put it in the correct transformation.
###Code
# the line below is a good initial orientation
A = np.array([[0,0,1,0],
[-1,0,0,0],
[0,1,0,0],
[0,0,0,1]])
# Taken and adapted from https://github.com/CSBDeep/CSBDeep/blob/master/csbdeep/utils/tf.py and utils.py
import keras
from keras import backend as K
from keras.callbacks import Callback
from keras.layers import Lambda
def is_tf_backend():
import keras.backend as K
return K.backend() == 'tensorflow'
def limit_gpu_memory(fraction, allow_growth=False):
"""Limit GPU memory allocation for TensorFlow (TF) backend.
Parameters
----------
fraction : float
Limit TF to use only a fraction (value between 0 and 1) of the available GPU memory.
Reduced memory allocation can be disabled if fraction is set to ``None``.
allow_growth : bool, optional
If ``False`` (default), TF will allocate all designated (see `fraction`) memory all at once.
If ``True``, TF will allocate memory as needed up to the limit imposed by `fraction`; this may
incur a performance penalty due to memory fragmentation.
Raises
------
ValueError
If `fraction` is not ``None`` or a float value between 0 and 1.
NotImplementedError
If TensorFlow is not used as the backend.
"""
is_tf_backend() or _raise(NotImplementedError('Not using tensorflow backend.'))
fraction is None or (np.isscalar(fraction) and 0<=fraction<=1) or _raise(ValueError('fraction must be between 0 and 1.'))
if K.tensorflow_backend._SESSION is None:
config = tf.ConfigProto()
if fraction is not None:
config.gpu_options.per_process_gpu_memory_fraction = fraction
config.gpu_options.allow_growth = bool(allow_growth)
session = tf.Session(config=config)
K.tensorflow_backend.set_session(session)
# print("[tf_limit]\t setting config.gpu_options.per_process_gpu_memory_fraction to ",config.gpu_options.per_process_gpu_memory_fraction)
else:
warnings.warn('Too late too limit GPU memory, can only be done once and before any computation.')
limit_gpu_memory(fraction=0.5,allow_growth=True)
# test the initial affine
X0,X1,X2 = np.meshgrid(xJ[0],xJ[1],xJ[2],indexing='ij')
X0tf = tf.constant(X0,dtype=lddmm.dtype)
X1tf = tf.constant(X1,dtype=lddmm.dtype)
X2tf = tf.constant(X2,dtype=lddmm.dtype)
Itf = tf.constant(I,dtype=lddmm.dtype)
B = np.linalg.inv(A)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
Xs = B[0,0]*X0tf + B[0,1]*X1tf + B[0,2]*X2tf + B[0,3]
Ys = B[1,0]*X0tf + B[1,1]*X1tf + B[1,2]*X2tf + B[1,3]
Zs = B[2,0]*X0tf + B[2,1]*X1tf + B[2,2]*X2tf + B[2,3]
Id = lddmm.interp3(xI[0], xI[1], xI[2], Itf, Xs, Ys, Zs)
Idnp = Id.eval()
f = plt.figure()
vis.imshow_slices(Idnp,x=xJ,fig=f)
f.suptitle('Initial affine transformation')
f.canvas.draw()
###Output
_____no_output_____
###Markdown
Run DR IT MD matchingBecause of the artifact we will run the missing data version of the algorithm. This can be specified by setting the `nMstep` argument to an integer grater than 0. This parameters says how many iterations of gradient descent are used in the maximization step of the EM algorithm.
###Code
# parameters
# cost function weights 1 / sigma^2
sigmaM = np.std(J) # matching
sigmaA = sigmaM*10.0 # artifact
sigmaR = 1e0 # regularization
# enery operator, power of laplacian p, characteristic length a
p = 2
a = (xI[0][1]-xI[0][0])*5
# other optimization parameters
niter = 200 # how many iteraitons of gradient descent
naffine = 50 # first naffine iterations are affine only (no deformation)
nt = 5 # this many timesteps to numerically integrate flow
# the linear part is a bit too big still (since I fixed voxel size issue)
# initial guess for affine (check picture above)
A0 = A
# When working with weights in EM algorithm, how many M steps per E step
# first test with 0 (it is working)
nMstep = 5
nMstep_affine = 1
# gradient descent step size
eL = 2e-4
eT = 1e-3
eV = 5e-3
# I think maybe eV has to be bigger
eV = 1e-2
# there is some oscilation in the translation and the linear part
out = lddmm.lddmm(I, J,
xI=xI, # location of pixels in domain
xJ=xJ,
niter=niter, # iterations of gradient descent
naffine=naffine, # iterations of affine only
eV = eV, # step size for deformation parameters
eT = eT, # step size for translation parameters
eL = eL, # step size for linear parameters
nt=nt, # timesteps for integtating flow
sigmaM=sigmaM, # matching cost weight 1/2sigmaM^2
sigmaR=sigmaR, # reg cost weight 1/2sigmaM^2
sigmaA=sigmaA, # artifact cost weight 1/2sigmaA^2
a=a, # kernel width
p=p, # power of laplacian in kernel (should be at least 2 for 3D)
A0=A0, # initial guess for affine matrix (should get orientation right)
nMstep=nMstep, # number of m steps for each e step
nMstep_affine=nMstep_affine # number of m steps during affine only phase
)
###Output
_____no_output_____ |
paper/Advection_diffusion/AD_artificial/sampling/sampling_test.ipynb | ###Markdown
2D Advection-Diffusion equation in this notebook we provide a simple example of the DeepMoD algorithm and apply it on the 2D advection-diffusion equation.
###Code
# General imports
import numpy as np
import torch
# DeepMoD functions
import matplotlib.pylab as plt
from deepymod import DeepMoD
from deepymod.model.func_approx import NN
from deepymod.model.library import Library2D_third
from deepymod.model.constraint import LeastSquares
from deepymod.model.sparse_estimators import Threshold,PDEFIND
from deepymod.training import train
from deepymod.training.sparsity_scheduler import TrainTestPeriodic
from scipy.io import loadmat
# Settings for reproducibility
np.random.seed(1)
torch.manual_seed(1)
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
###Output
_____no_output_____
###Markdown
Prepare the data Next, we prepare the dataset.
###Code
data = loadmat('../Diffusion_2D_space81.mat')
data = np.real(data['Expression1']).reshape((81,81,81,4))[:,:,:,3]
width, width_2, steps = data.shape
x_arr = np.linspace(0,1,width)
y_arr = np.linspace(0,1,width_2)
t_arr = np.linspace(0,1,steps)
x_grid, y_grid, t_grid = np.meshgrid(x_arr, y_arr, t_arr, indexing='ij')
x_grid.shape
number_of_samples = 6
tot_samples = number_of_samples*number_of_samples
Utrain = np.empty([tot_samples,data.shape[2]])
Xtrain = np.empty([tot_samples,data.shape[2],3])
plt.imshow(data[:,:,40])
for i in np.arange(x_grid.shape[2]):
idx = np.random.permutation(number_of_samples)
idy = np.random.permutation(number_of_samples)
#idx = np.arange(0,number_of_samples)
#idy = np.arange(0,number_of_samples)
Utrain[:,i] = np.array([data[idx,k,i] for k in idy]).flatten()
Xtrain[:,i,1] = np.array([x_grid[idx,k,i] for k in idy]).flatten()
Xtrain[:,i,2] = np.array([y_grid[idx,k,i] for k in idy]).flatten()
Xtrain[:,i,0] = np.array([t_grid[idx,k,i] for k in idy]).flatten()
Xtrain.shape
y = Utrain.flatten()
X = np.vstack((Xtrain[:,:,0].flatten(), Xtrain[:,:,1].flatten(), Xtrain[:,:,2].flatten())).T
# Add noise
noise_level = 0.0
y_noisy = y + noise_level * np.std(y) * np.random.randn(y.size)
# Randomize data
idx = np.random.permutation(y.shape[0])
X_train = torch.tensor(X[idx, :], dtype=torch.float32, requires_grad=True).to(device)
y_train = torch.tensor(y_noisy[idx], dtype=torch.float32).to(device)
y_train.shape
# Configure DeepMoD
network = NN(3, [40, 40, 40, 40], 1)
library = Library2D_third(poly_order=0)
estimator = Threshold(0.05)
sparsity_scheduler = TrainTestPeriodic(periodicity=50, patience=200, delta=1e-5)
constraint = LeastSquares()
model = DeepMoD(network, library, estimator, constraint).to(device)
optimizer = torch.optim.Adam(model.parameters(), betas=(0.99, 0.99), amsgrad=True, lr=2e-3)
logdir='runs/testje_2/'
train(model, X_train, y_train, optimizer,sparsity_scheduler, log_dir=logdir, split=0.8, max_iterations=50000, delta=1e-6, patience=200)
for i in time_range:
# Downsample data and prepare data without noise:
down_data= np.take(np.take(np.take(data,np.arange(0,x_dim,5),axis=0),np.arange(0,y_dim,5),axis=1),np.arange(0,t_dim,i),axis=2)
print("Dowmsampled shape:",down_data.shape, "Total number of data points:", np.product(down_data.shape))
index = len(np.arange(0,t_dim,i))
width, width_2, steps = down_data.shape
x_arr, y_arr, t_arr = np.linspace(0,1,width), np.linspace(0,1,width_2), np.linspace(0,1,steps)
x_grid, y_grid, t_grid = np.meshgrid(x_arr, y_arr, t_arr, indexing='ij')
X, y = np.transpose((t_grid.flatten(), x_grid.flatten(), y_grid.flatten())), np.float32(down_data.reshape((down_data.size, 1)))
# Add noise
noise_level = 0.0
y_noisy = y + noise_level * np.std(y) * np.random.randn(y.size, 1)
# Randomize data
idx = np.random.permutation(y.shape[0])
X_train = torch.tensor(X[idx, :], dtype=torch.float32, requires_grad=True).to(device)
y_train = torch.tensor(y_noisy[idx, :], dtype=torch.float32).to(device)
# Configure DeepMoD
network = NN(3, [40, 40, 40, 40], 1)
library = Library2D_third(poly_order=0)
estimator = Threshold(0.05)
sparsity_scheduler = TrainTestPeriodic(periodicity=50, patience=200, delta=1e-5)
constraint = LeastSquares()
model = DeepMoD(network, library, estimator, constraint).to(device)
optimizer = torch.optim.Adam(model.parameters(), betas=(0.99, 0.99), amsgrad=True, lr=2e-3)
logdir='final_runs/no_noise_x17/'+str(index)+'/'
train(model, X_train, y_train, optimizer,sparsity_scheduler, log_dir=logdir, split=0.8, max_iterations=50000, delta=1e-6, patience=200)
###Output
Dowmsampled shape: (17, 17, 41) Total number of data points: 11849
49975 MSE: 8.70e-06 Reg: 8.07e-06 L1: 1.64e+00 Algorithm converged. Writing model to disk.
Dowmsampled shape: (17, 17, 21) Total number of data points: 6069
49975 MSE: 4.26e-06 Reg: 5.59e-06 L1: 1.43e+00 Algorithm converged. Writing model to disk.
Dowmsampled shape: (17, 17, 11) Total number of data points: 3179
49975 MSE: 2.80e-06 Reg: 3.69e-06 L1: 1.47e+00 Algorithm converged. Writing model to disk.
Dowmsampled shape: (17, 17, 7) Total number of data points: 2023
49975 MSE: 3.36e-06 Reg: 2.87e-06 L1: 1.41e+00 Algorithm converged. Writing model to disk.
Dowmsampled shape: (17, 17, 6) Total number of data points: 1734
4700 MSE: 2.09e-04 Reg: 7.78e-06 L1: 1.00e+00 Algorithm converged. Writing model to disk.
Dowmsampled shape: (17, 17, 5) Total number of data points: 1445
49975 MSE: 4.50e-05 Reg: 1.17e-05 L1: 1.71e+00 Algorithm converged. Writing model to disk.
Dowmsampled shape: (17, 17, 4) Total number of data points: 1156
49975 MSE: 2.98e-05 Reg: 8.04e-06 L1: 1.42e+00 Algorithm converged. Writing model to disk.
Dowmsampled shape: (17, 17, 3) Total number of data points: 867
49975 MSE: 5.90e-06 Reg: 2.42e-06 L1: 1.26e+00 Algorithm converged. Writing model to disk.
|
notebooks/explore/variation_of_information.ipynb | ###Markdown
Exploring: Variation of Information Background My projects involve trying to compare the outputs of different climate models. There are currently more than 20+ climate models from different companies and each of them try to produce the most accurate prediction of some physical phenomena, e.g. Sea Surface Temperature, Mean Sea Level Pressure, etc. However, it's a difficult task to provide accurate comparison techniques for each of the models. There exist some methods such as the mean and standard deviation. There is also a very common framework of visually summarizing this information in the form of Taylor Diagrams. However, the drawback of using these methods is that they are typically non-linear methods and they cannot handle multidimensional, multivariate data. Another way to measure similarity would be in the family of Information Theory Measures (ITMs). Instead of directly measuring first-order output statistics, these methods summarize the information via a probability distribution function (PDF) of the dataset. These can measure non-linear relationships and are naturally multivariate that offers solutions to the shortcomings of the standard methods. I would like to explore this and see if this is a useful way of summarizing information. This is removing the Code
###Code
import numpy as np
import seaborn as sns
import pandas as pd
import statsmodels.api as smi
import sys
sys.path.insert(0, '/home/emmanuel/code/kernel_model_zoo/')
import matplotlib.pyplot as plt
plt.style.use('seaborn-talk')
%matplotlib inline
%load_ext autoreload
%autoreload 2
from kernellib.dependence import HSIC
SAVE_PATH = "/home/emmanuel/projects/2020_rbig_rs/reports/figures/explore/vi/"
###Output
_____no_output_____
###Markdown
Data We will use the classic dataset for Anscombe's quartet. This is a staple dataset which shows how we need to take care when comparing two datasets. In the example, we will show how visually, two datasets will look similar, but using a correlation measure like the Pearson's coefficient will fail because it is not able to capture the non-linear relationship between the two distributions.
###Code
# load dataset
df_anscombe = sns.load_dataset('anscombe')
df_anscombe.dataset.unique()
def get_case(df: pd.DataFrame, case: str='I'):
return df[df['dataset'] == case]
def plot_cases(df: pd.DataFrame, case: str='I', save=True, plot_type='reg'):
df = get_case(df, case)
plt.figure(figsize=(4,4))
if plot_type == 'reg':
pts = sns.regplot(
x="x",
y="y",
data=df,
)
elif plot_type == 'joint':
pts = sns.jointplot(
x="x",
y="y",
data=df,
kind="regplot",
)
elif plot_type == 'density':
pts = sns.jointplot(
x="x",
y="y",
data=df,
kind="kde",
)
else:
raise ValueError('')
plt.xlabel("")
plt.ylabel("")
plt.xticks([])
plt.yticks([])
# plt.axis('off')
plt.tight_layout()
if save is not None:
plt.savefig(SAVE_PATH + f'demo_case{case}_{plot_type}.png', dpi=200, transparent=True)
return None
plot_cases(df_anscombe, 'III', plot_type='reg')
###Output
_____no_output_____
###Markdown
This is a very simple case where we have a linear relationship between the datasets. The regression plot above shows a linear line that is fit between the two distributions. We can also see the marginal distributions (the histograms) for X and Y. As you can see, they are definitely similar. But now, we are going to look at a way to summarize this information. MathematicsThere are a few important quantities to consider when we need to represent the statistics and compare two datasets. * Variance* Covariance* Correlation* Root Mean Squared CovarianceThe covariance is a measure to determine how much two variances change. The covariance between X and Y is given by:$$C(X,Y)=\frac{1}{N}\sum_{i=1}^N (x_i - \mu_x)(y_i - \mu_i)$$where $N$ is the number of elements in both datasets. Notice how this formula assumes that the number of samples for X and Y are equivalent. This measure is unbounded as it can have a value between $-\infty$ and $\infty$. Let's look at an example of how to calculate this below.
###Code
# covariance formula
def cov(X, Y):
n_samples = X.shape[0]
# get mean
X_mu = X.mean()
Y_mu = Y.mean()
cov_xy = 0
# loop through the data points
for ix in range(n_samples):
cov_xy += (X.values[ix] - X_mu) * (Y.values[ix] - Y_mu)
return cov_xy / n_samples
# extract the data
X = get_case(df_anscombe, 'I')['x']
Y = get_case(df_anscombe, 'I')['y']
# get covariance
cov_xy = cov(X,Y)
print(cov_xy)
X.values[:, None].reshape(-1, 1).shape
###Output
_____no_output_____
###Markdown
That number is fairly meaningless now. But we can compare the covariance number of this versus the other cases. RefactorWe can remove the loop by doing a matrix multiplication.$$C(X,Y)=\frac{1}{N} (X-X_\mu)^\top (Y-Y_\mu)$$where $X,Y \in \mathbb{R}^{N\times 1}$
###Code
np.dot(X[:, None].T-X.mean(), Y[:, None]-Y.mean())/X.shape[0]
# covariance formula
def cov(X, Y):
n_samples = X.shape[0]
# get mean
X_mu = X.mean()
Y_mu = Y.mean()
# remove mean from data
X -= X_mu
Y -= Y_mu
# Ensure 2d
X = np.atleast_2d(X).reshape(-1, 1)
Y = np.atleast_2d(Y).reshape(-1, 1)
# calculate the covariance
cov_xy = X.T @ Y
return (cov_xy / n_samples).item()
def test_anscombe(func, save_name=None):
fig, axs = plt.subplots(nrows=2, ncols=2, figsize=(7,5))
for iax, icase in zip(axs.ravel(), ['I', 'II', 'III', 'IV']):
# data
X = get_case(df_anscombe, icase)['x']
Y = get_case(df_anscombe, icase)['y']
output = func(X.values,Y.values)
iax.scatter(X.values, Y.values, label=f"Case {icase}: $C$={output:.2f}")
iax.legend()
# iax.legend(f"Case: {icase}")
# get covariance
# print(f"Case {icase}: {cov_xy.item()}")
plt.tight_layout()
if save_name is not None:
plt.savefig(SAVE_PATH + f"demo_{save_name}.png")
plt.show()
test_anscombe(cov, 'cov')
###Output
_____no_output_____
###Markdown
Multi-Variate (Multi-Dimensional)
###Code
np.random.seed(123)
X = np.random.randn(20, 2)
Y = 0.5 * X
# calculate covariance matrix
cov = np.cov(X.squeeze(), Y.squeeze())
print(X.shape, Y.shape, cov.shape)
cov.shape
def cov_hs_features(X, Y):
# calculate covariance matrix
cov_xy = np.cov(X, Y)
# summarize information
cov_sum = np.linalg.norm(cov, ord='fro')
return cov_sum
# ||X.T @ Y||_F - feature space
lhs = np.linalg.norm(X.T @ Y, ord='fro')**2
print(lhs)
# ||XX.T @ YY.T||_F - sample space
mhs = np.trace(X @ X.T @ Y @ Y.T)
print(mhs)
# ||X.T @ Y||_F - feature space
lhs = np.linalg.norm(np.cov(X,Y), ord='fro')**2
print(lhs)
# ||XX.T @ YY.T||_F - sample space
mhs = np.trace(X @ X.T @ Y @ Y.T)
print(mhs)
# RHS
raw = np.trace(X @ Y.T) / np.sqrt(np.trace(X @ X.T) * np.trace(Y @ Y.T))
print(raw)
# MHS
###Output
1.0
###Markdown
Formula 1$$\frac{tr(XY^\top}{\sqrt{tr(XX^\top)tr(YY^T)}}$$
###Code
# raw formula
raw = np.trace(X @ Y.T) / np.sqrt(np.trace(X @ X.T) * np.trace(Y @ Y.T))
print(raw)
# numerator
numer1 = np.trace(X @ Y.T)**2
numer2 = np.linalg.norm(X @ Y.T)
print(numer1, numer2)
###Output
1.0
229.5889532845504 15.152193018984098
###Markdown
Formula II$$\frac{tr(XX\top YY^\top)}{\sqrt{tr(XX^\top XX^\top)tr(YY^\top YY^\top)}}$$
###Code
# formula 2
S = X @ X.T
T = Y @ Y.T
raw = np.trace(S.T @ T) / np.sqrt(np.trace(S.T @ S) * np.trace(T.T @ T))
print(raw)
# numerator
numer1 = np.trace(S.T @ T)
numer2 = np.linalg.norm(S.T @ T)
print(numer1, numer2)
# denominator
denom1 = np.sqrt(np.trace(S.T @ S) * np.trace(T.T @ T))
denom2 = np.sqrt(np.linalg.norm(S.T @ S) * np.linalg.norm(T.T @ T))
print(denom1, denom2)
###Output
0.9999999999999999
229.5889532845504 229.58895328455043
229.58895328455043 229.58895328455043
###Markdown
Proposed$$\frac{tr(X^\top Y)}{\sqrt{tr(X^\top X)tr(Y^\top Y)}}$$
###Code
# proposed
raw = np.trace(X.T @ Y) / np.sqrt(np.trace(X.T @ X) * np.trace(Y.T @ Y))
print(raw)
# numerator
numer1 = np.trace(X.T @ Y)
numer2 = np.linalg.norm(X.T @ Y)
print(numer1, numer2)
cov_feat_norm = cov_hs_features(X, Y)
print(cov_feat_norm)
X_ft_norm = cov_hs_features(X,X)
Y_ft_norm = cov_hs_features(Y,Y)
corr_feat_norm = cov_feat_norm / (X_ft_norm * Y_ft_norm)
print(corr_feat_norm)
np.inner(np.cov(X,X.T), np.cov(Y,Y.T))
def cov_hs_samples(X, Y):
# calculate samples covariance matrix
K_x = np.cov(X.T)
K_y = np.cov(Y.T)
# summarize
return np.sum(K_x * K_y)
cov_samp_norm = cov_hs_samples(X, Y)
print(cov_samp_norm)
X_samp_norm = cov_hs_samples(X,X)
Y_samp_norm = cov_hs_samples(Y,Y)
corr_samp_norm = cov_samp_norm / np.sqrt(X_samp_norm * Y_samp_norm)
print(corr_samp_norm)
cov_norm = cov_hs_features(X, Y)
print(cov_norm)
def get_linear_hsic(X, Y):
hsic_model = HSIC(kernel='linear', scorer='hsic', bias=True)
hsic_model.fit(X,Y);
hsic_score = hsic_model.score(X)
return hsic_score
def get_linear_cka(X, Y):
hsic_model = HSIC(kernel='linear', scorer='tka')
hsic_model.fit(X,Y);
hsic_score = hsic_model.score(X)
return hsic_score
cka_score = get_linear_cka(X, Y)
print(cka_score)
hsic_score = get_linear_hsic(X, Y)
print(hsic_score)
# Samples Covariance Trace
np.trace(np.cov(X.T) @ np.cov(Y.T))
# Feature Covariance Trace
np.linalg.norm(np.cov(X,Y), ord='fro')
np.linalg.norm(X.T @ Y, ord='fro')
def corr_hs(X, Y):
# calculate summarize covariance matrix
cov_sum = cov_hs(X, Y)
# summarize
X_sum = cov_hs(X, X)
Y_sum = cov_hs(Y, Y)
# calculate correlation
return cov_sum / np.sqrt(X_sum * Y_sum)
corr_sum = corr_hs(X,Y)
print(corr_sum)
# calculate empirical covariance
cov = X.T @ Y
assert cov.shape == (X.shape[1], Y.shape[1])
cov
###Output
_____no_output_____
###Markdown
So, we see that the covariance doesn't seem to change very much between datasets. CorrelationThis is the normalized version of the covariance measured mentioned above. This is done by dividing the covariance by the product of the standard deviation of the two samples X and Y. So the forumaltion is:$$\rho(X, Y) = \frac{C(X,Y)}{\sigma_x \sigma_y}$$With this normalization, we now have a measure that is bounded between -1 and 1. This makes it much more interpretable and also invariant to isotropic scaling, $\rho(X,Y)=\rho(\alpha X, \beta Y)$ where $\alpha, \beta \in \mathbb{R}^{+}$
###Code
def corr(X, Y):
# get standard deviation
X_std, Y_std = X.std(), Y.std()
# calculate the correlation
cov_xy = cov(X, Y)
# calculate the correlation
return (cov_xy / (X_std * Y_std)).item()
corr_xy = corr(X, Y)
print(corr_xy)
###Output
0.7422004694043999
###Markdown
Now that it is bounded between -1 and 1, this value let's us know that this value is equivalent to being close to 1. So fairly similar.
###Code
test_anscombe(corr, 'corr')
###Output
_____no_output_____
###Markdown
So at this point, this is a bit of a red flag. All of the $\rho$ values are the same but we can see very clearly that there are some key differences between the distributions. The covariance nor the correlation measure gave us useful information. Root Mean SquaredThis is a popular measure for measuring the errors between two datasets. More or less, it is a covariance measure that penalizes higher deviations between the datasets.
###Code
# covariance formula
def rmse(X, Y):
n_samples = X.shape[0]
# get mean
X_mu = X.mean()
Y_mu = Y.mean()
# remove mean from data
X -= X_mu
Y -= Y_mu
# calculate the squared covariance
cov_xy = np.average((X - Y) ** 2, axis=0)
return np.sqrt((cov_xy))
rmse_xy = rmse(X, Y)
print(rmse_xy)
###Output
1.936554834777258
###Markdown
RefactorThe scikit-learn library has a built-in `mean_sqared_error` function which you can call and then use the `np.sqrt` on the output.
###Code
from sklearn.metrics import mean_squared_error
def rmse(X, Y):
# calculate the squared covariance
rmse_xy = mean_squared_error(X, Y)
return np.sqrt(rmse_xy)
rmse_xy = rmse(X,Y)
print(rmse_xy)
test_anscombe(rmse, 'rmse')
###Output
_____no_output_____
###Markdown
HSIC
###Code
def get_linear_hsic(X, Y):
hsic_model = HSIC(kernel='linear', scorer='hsic', bias=True)
hsic_model.fit(X[:, None],Y[:, None]);
hsic_score = hsic_model.score(X[:, None])
return hsic_score
hsic_score = get_linear_hsic(X,Y)
print(hsic_score)
test_anscombe(get_linear_hsic, 'hsic_lin')
###Output
_____no_output_____
###Markdown
RBF Kernel
###Code
def get_rbf_hsic(X, Y):
hsic_model = HSIC(kernel='rbf', scorer='hsic')
hsic_model.fit(X[:, None],Y[:, None]);
hsic_score = hsic_model.score(X[:, None])
return hsic_score
test_anscombe(get_rbf_hsic, 'hsic_rbf')
###Output
_____no_output_____
###Markdown
Kernel Alignment Linear
###Code
def get_linear_ka(X, Y):
hsic_model = HSIC(kernel='linear', scorer='tka')
hsic_model.fit(X[:, None],Y[:, None]);
hsic_score = hsic_model.score(X[:, None])
return hsic_score
test_anscombe(get_linear_ka, 'cka_lin')
###Output
_____no_output_____
###Markdown
RBF Kernel
###Code
def get_rbf_ka(X, Y):
hsic_model = HSIC(kernel='rbf', scorer='tka')
hsic_model.fit(X[:, None],Y[:, None]);
hsic_score = hsic_model.score(X[:, None])
return hsic_score
test_anscombe(get_rbf_ka, 'ka_rbf')
###Output
_____no_output_____
###Markdown
Mutual InformationIn this section, I will be doing the same thing as before except this time I will use the equivalent Information Theory Measures. In principle, they should be better at capturing non-linear relationships and I will be able to add different representations using spatial-temporal information. EntropyThis is the simplest and it is analogous to the standard deviation $\sigma$. Entropy is defined by$$H(X) = - \int_{X} f(x) \log f(x) dx$$This is the expected amount of uncertainty present in a given distributin function $f(X)$. It captures the amount of surprise within a distribution. So if there are a large number of low probability events, then the expected uncertainty will be higher. Whereas distributions with fairly equally likely events will have low entropy values as there are not many surprise events, e.g. Uniform.
###Code
kde = smi.nonparametric.KDEUnivariate(Y)
kde.fit()
print(kde.entropy)
plt.plot(kde.support, kde.density)
import scipy.stats
def entropy(data, method='counts'):
if method == 'counts':
_, pdata = np.unique(data, return_counts=True)
entropy = scipy.stats.entropy(pdata)
elif method == 'kde':
kde = smi.nonparametric.KDEUnivariate(data)
kde.fit()
entropy = kde.entropy
else:
raise ValueError('Unrecognized method.')
return entropy
Hx = entropy(X, 'counts')
Hy = entropy(Y, 'counts')
print(Hx, Hy)
###Output
2.3978952727983707 2.3978952727983707
###Markdown
Mutual Information Given two distributions X and Y, we can calculate the mutual information as$$I(X,Y) = \int_X\int_Y p(x,y) \log \frac{p(x,y)}{p_x(x)p_y(y)}dxdy$$where $p(x,y)$ is the joint probability and $p_x(x), p_y(y)$ are the marginal probabilities of $X$ and $Y$ respectively. We can also express the mutual information as a function of the Entropy $H(X)$$$I(X,Y)=H(X) + H(Y) - H(X,Y)$$
###Code
def mutual_info(X,Y, method='kde'):
Hx = entropy(X, method)
Hy = entropy(Y, method)
Hxy = entropy(np.concatenate((X,Y)), method)
return Hx + Hy - Hxy
Hxy = entropy(pd.concat((X,Y)))
mi_xy = mutual_info(X.values, Y.values)
print(mi_xy)
test_anscombe(mutual_info, 'kde')
def norm_mutual_info(X,Y, method='kde'):
Hx = entropy(X, method)
Hy = entropy(Y, method)
Hxy = entropy(np.concatenate((X,Y)), method)
# mutual information
mi_xy = Hx + Hy - Hxy
return (mi_xy / (np.sqrt(Hx * Hy)))
test_anscombe(norm_mutual_info, 'nkde')
def red_mutual_info(X,Y, method='kde'):
Hx = entropy(X, method)
Hy = entropy(Y, method)
Hxy = entropy(np.concatenate((X,Y)), method)
# mutual information
mi_xy = Hx + Hy - Hxy
return (2 * mi_xy / (Hx + Hy))
test_anscombe(red_mutual_info, 'rkde')
###Output
_____no_output_____
###Markdown
Variation of Information$$\begin{aligned}VI(X,Y) &= H(X) + H(Y) - 2I(X,Y) \\&= I(X,X) + I(Y,Y) - 2I(X,Y)\end{aligned}$$
###Code
def variation_info(X,Y, method='kde'):
Hx = entropy(X, method)
Hy = entropy(Y, method)
Hxy = entropy(np.concatenate((X,Y)), method)
# mutual information
mi_xy = Hx + Hy - Hxy
# variation of information
vi_xy = Hx + Hy - 2 * mi_xy
return vi_xy
test_anscombe(variation_info, 'vikde')
###Output
_____no_output_____
###Markdown
RVI-Based Diagram Analagous to the Taylor Diagram, we can summarize the ITMs in a way that was easy to interpret. It used the relationship between the entropy, the mutual information and the normalized mutual information via the triangle inequality. Assuming we can draw a diagram using the law of cosines;$$c^2 = a^2 + b^2 - 2ab \cos \phi$$ we can write this in terms of $\sigma$, $\rho$ and $RMSE$ as we have expressed above.$$\begin{aligned}\text{RVI}^2 &= H(X) + H(Y) - 2 \sqrt{H(X)H(Y)} \frac{I(X,Y)}{\sqrt{H(X)H(Y)}} \\&= H(X) + H(Y) - 2 \sqrt{H(X)H(Y)} \rho\end{aligned}$$where The sides are as follows:* $a = \sigma_{\text{obs}}$ - the entropy of the observed data* $b = \sigma_{\text{sim}}$ - the entropy of the simulated data* $\rho = \frac{I(X,Y)}{\sqrt{H(X)H(Y)}}$ - the normalized mutual information* $RMSE$ - the variation of information between the two datasets
###Code
h_a = entropy(X, 'counts')
h_b = entropy(Y, 'kde')
print('H(X),H(Y):',h_a, h_b)
# joint entropy
h_ab = entropy(pd.concat((X,Y)), 'kde')
print('H(X,Y):',h_ab)
# mutual information
mi_ab = h_a + h_b - h_ab
print('MI(X,Y):', mi_ab)
# normalized mutual information
nmi_ab = mi_ab / np.sqrt(h_a * h_b)
print('NMI(X,Y):', nmi_ab)
# scaled mutual info
smi_ab = mi_ab * (h_ab / (h_a * h_b))
print('SMI(X,Y):', smi_ab)
# cos rho term
c_ab = 2 * smi_ab - 1
print('C_XY:', c_ab)
# vi
vi = h_a + h_b - 2 * np.sqrt(h_a * h_b) * nmi_ab
print('VI(X,Y):',vi)
def vi_coeffs(X, Y, method='counts'):
# entropy observations
h_a = entropy(X, method)
# entropy simulated
h_b = entropy(Y, method)
# joint entropy
h_ab = entropy(pd.concat((X,Y)), method)
# mutual information
mi_ab = h_a + h_b - h_ab
# normalized mutual information
nmi_ab = mi_ab / np.sqrt(h_a * h_b)
# scaled mutual information
smi_ab = 2 * mi_ab * (h_ab / (h_a * h_b)) - 1
# vi
vi_ab = h_a + h_b - 2 * np.sqrt((h_a * h_b)) * nmi_ab
# save coefficients
data = {
'h_a': h_a,
'h_b': h_b,
'nmi': nmi_ab,
'smi': smi_ab,
'theta': np.arccos(nmi_ab),
'vi': vi_ab
}
return data
# Model I
X = get_case(df_anscombe, 'I')['x']
Y = get_case(df_anscombe, 'I')['y']
data1 = vi_coeffs(X, Y, 'kde')
print(data1)
# Model II
X = get_case(df_anscombe, 'II')['x']
Y = get_case(df_anscombe, 'II')['y']
data2 = vi_coeffs(X, Y, 'kde')
print(data2)
# Model III
X = get_case(df_anscombe, 'III')['x']
Y = get_case(df_anscombe, 'III')['y']
data3 = vi_coeffs(X, Y, 'kde')
print(data3)
# # Model IV
# X = get_case(df_anscombe, 'IV')['x']
# Y = get_case(df_anscombe, 'IV')['y']
# data4 = vi_coeffs(X, Y)
# print(data4)
import matplotlib.pyplot as plt
import numpy as np
theta = np.linspace(0,np.pi)
r = np.sin(theta)
fig = plt.figure(figsize=(7,5))
ax = fig.add_subplot(111, polar=True)
m = ax.scatter(0, data1['h_a'], s=200, alpha=0.75, label='Data', zorder=0)
m1 = ax.scatter(data1['theta'], data1['h_b'], s=150, alpha=0.75, marker='x', label='Model I')
m1 = ax.scatter(data2['theta'], data2['h_b'], s=150, alpha=0.75, marker='o', label='Model II')
m1 = ax.scatter(data3['theta'], data3['h_b'], s=150, alpha=0.75, marker='.', label='Model III')
# m1 = ax.scatter(theta4, b4, s=100, alpha=0.75, marker='o', label='Model II')
# ax.plot(0)
ax.set_ylim([0, 3])
# ax.set_xticks([0.1, 0.2, 0.3, 0.9])
# ax.set_xticklabels([1.0, 0.9, 0.8, 0.6, 0.3, 0.2, 0.1])
# m1 = ax.scatter(theta1, a, s=50, alpha=0.75)
# m1 = ax.scatter(theta1, a, s=50, alpha=0.75)
c = ax.plot(theta, data1['h_a'] * np.ones(theta.shape), color='black', linestyle='dashed', alpha=0.75)
ax.set_xlabel('Entropy', labelpad=20)
ax.set_ylabel('Entropy', labelpad=20)
plt.legend()
ax.set_thetamin(0)
ax.set_thetamax(90)
plt.tight_layout()
plt.savefig(SAVE_PATH + 'demo_vi.png')
plt.show()
###Output
_____no_output_____ |
Chapter18/detecting_lanes_in_the_image_of_a_road.ipynb | ###Markdown
###Code
!wget https://www.dropbox.com/s/vgd22go8a6k721t/road_image.png
!pip install torch_snippets
from torch_snippets import show, read, subplots, cv2, np
IMG = read('road_image.png')
img = np.uint8(IMG.copy())
edges = cv2.Canny(img,50,150)
show(edges)
lines = cv2.HoughLines(edges,1,np.pi/180,150)
lines = lines[:,0,:]
for rho,theta in lines:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 10000*(-b))
y1 = int(y0 + 10000*(a))
x2 = int(x0 - 10000*(-b))
y2 = int(y0 - 10000*(a))
cv2.line(img,(x1,y1),(x2,y2),(0,0,255),2)
show(img)
###Output
_____no_output_____ |
4. Deep Neural Networks with PyTorch/5. Deep Networks/4. initializationsame.ipynb | ###Markdown
Initialization with Same Weights Objective for this Notebook 1. Learn hw to Define the Neural Network with Same Weights Initialization define Criterion Function, Optimizer, and Train the Model 2.Define the Neural Network with defult Weights Initialization define Criterion Function, Optimizer 3. Train the Model Table of ContentsIn this lab, we will see the problem of initializing the weights with the same value. We will see that even for a simple network, our model will not train properly. . Neural Network Module and Training Function Make Some Data Define the Neural Network with Same Weights Initialization define Criterion Function, Optimizer, and Train the Model Define the Neural Network with defult Weights Initialization define Criterion Function, Optimizer, and Train the ModelEstimated Time Needed: 25 min Preparation We'll need the following libraries
###Code
# Import the libraries we need for this lab
import torch
import torch.nn as nn
from torch import sigmoid
import matplotlib.pylab as plt
import numpy as np
torch.manual_seed(0)
###Output
_____no_output_____
###Markdown
Used for plotting the model
###Code
# The function for plotting the model
def PlotStuff(X, Y, model, epoch, leg=True):
plt.plot(X.numpy(), model(X).detach().numpy(), label=('epoch ' + str(epoch)))
plt.plot(X.numpy(), Y.numpy(), 'r')
plt.xlabel('x')
if leg == True:
plt.legend()
else:
pass
###Output
_____no_output_____
###Markdown
Neural Network Module and Training Function Define the activations and the output of the first linear layer as an attribute. Note that this is not good practice.
###Code
# Define the class Net
class Net(nn.Module):
# Constructor
def __init__(self, D_in, H, D_out):
super(Net, self).__init__()
# hidden layer
self.linear1 = nn.Linear(D_in, H)
self.linear2 = nn.Linear(H, D_out)
# Define the first linear layer as an attribute, this is not good practice
self.a1 = None
self.l1 = None
self.l2=None
# Prediction
def forward(self, x):
self.l1 = self.linear1(x)
self.a1 = sigmoid(self.l1)
self.l2=self.linear2(self.a1)
yhat = sigmoid(self.linear2(self.a1))
return yhat
###Output
_____no_output_____
###Markdown
Define the training function:
###Code
# Define the training function
def train(Y, X, model, optimizer, criterion, epochs=1000):
cost = []
total=0
for epoch in range(epochs):
total=0
for y, x in zip(Y, X):
yhat = model(x)
loss = criterion(yhat, y)
loss.backward()
optimizer.step()
optimizer.zero_grad()
#cumulative loss
total+=loss.item()
cost.append(total)
if epoch % 300 == 0:
PlotStuff(X, Y, model, epoch, leg=True)
plt.show()
model(X)
plt.scatter(model.a1.detach().numpy()[:, 0], model.a1.detach().numpy()[:, 1], c=Y.numpy().reshape(-1))
plt.title('activations')
plt.show()
return cost
###Output
_____no_output_____
###Markdown
Make Some Data
###Code
# Make some data
X = torch.arange(-20, 20, 1).view(-1, 1).type(torch.FloatTensor)
Y = torch.zeros(X.shape[0])
Y[(X[:, 0] > -4) & (X[:, 0] < 4)] = 1.0
###Output
_____no_output_____
###Markdown
Define the Neural Network with Same Weights Initialization define, Criterion Function, Optimizer and Train the Model Create the Cross-Entropy loss function:
###Code
# The loss function
def criterion_cross(outputs, labels):
out = -1 * torch.mean(labels * torch.log(outputs) + (1 - labels) * torch.log(1 - outputs))
return out
###Output
_____no_output_____
###Markdown
Define the Neural Network
###Code
# Train the model
# size of input
D_in = 1
# size of hidden layer
H = 2
# number of outputs
D_out = 1
# learning rate
learning_rate = 0.1
# create the model
model = Net(D_in, H, D_out)
###Output
_____no_output_____
###Markdown
This is the PyTorch default installation
###Code
model.state_dict()
###Output
_____no_output_____
###Markdown
Same Weights Initialization with all ones for weights and zeros for the bias.
###Code
model.state_dict()['linear1.weight'][0]=1.0
model.state_dict()['linear1.weight'][1]=1.0
model.state_dict()['linear1.bias'][0]=0.0
model.state_dict()['linear1.bias'][1]=0.0
model.state_dict()['linear2.weight'][0]=1.0
model.state_dict()['linear2.bias'][0]=0.0
model.state_dict()
###Output
_____no_output_____
###Markdown
Optimizer, and Train the Model:
###Code
#optimizer
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
#train the model usein
cost_cross = train(Y, X, model, optimizer, criterion_cross, epochs=1000)
#plot the loss
plt.plot(cost_cross)
plt.xlabel('epoch')
plt.title('cross entropy loss')
###Output
_____no_output_____
###Markdown
By examining the output of the paramters all thought they have changed they are identical.
###Code
model.state_dict()
yhat=model(torch.tensor([[-2.0],[0.0],[2.0]]))
yhat
###Output
_____no_output_____
###Markdown
Define the Neural Network, Criterion Function, Optimizer and Train the Model
###Code
# Train the model
# size of input
D_in = 1
# size of hidden layer
H = 2
# number of outputs
D_out = 1
# learning rate
learning_rate = 0.1
# create the model
model = Net(D_in, H, D_out)
###Output
_____no_output_____
###Markdown
Repeat the previous steps above by using the MSE cost or total loss:
###Code
#optimizer
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
#train the model usein
cost_cross = train(Y, X, model, optimizer, criterion_cross, epochs=1000)
#plot the loss
plt.plot(cost_cross)
plt.xlabel('epoch')
plt.title('cross entropy loss')
###Output
_____no_output_____ |
Labs/06-Mask-R-CNN/Mask R-CNN.ipynb | ###Markdown
Mask R-CNN with torchvisionIn Lab 06, you can use the Mask R-CNN implementation from [the multimodallearning Github repository](https://github.com/multimodallearning/pytorch-mask-rcnn)or [the Pytorch torchvision R-CNN implementation](https://pytorch.org/vision/stable/models.htmlmask-r-cnn).This is a quickstart on the torchvision version of Mask R-CNN.For help with fine tuning, see [the PyTorch instance segmentation fine tuning tutorial](https://pytorch.org/tutorials/intermediate/torchvision_tutorial.html). Running a pre-trained Mask R-CNN model on test imagesFirst, let's copy some utility code from the torchvision library, load a pre-trained Mask R-CNN model,and create a dataloader for the COCO validation images.
###Code
!cp /opt/pytorch/vision/references/detection/utils.py /home/jovyan/work/RTML/Mask\ R-CNN/
!cp /opt/pytorch/vision/references/detection/coco_utils.py /home/jovyan/work/RTML/Mask\ R-CNN/
!cp /opt/pytorch/vision/references/detection/transforms.py /home/jovyan/work/RTML/Mask\ R-CNN/
!cp /opt/pytorch/vision/references/detection/engine.py /home/jovyan/work/RTML/Mask\ R-CNN/
!cp /opt/pytorch/vision/references/detection/coco_eval.py /home/jovyan/work/RTML/Mask\ R-CNN/
import torch
import torchvision
from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor
from torchvision.datasets import CocoDetection
import utils
from coco_utils import get_coco
import transforms
# Load a model pre-trained on COCO and put it in inference mode
print('Loading pretrained model...')
model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True).cuda()
model.eval()
# Load the COCO 2017 train and val sets. We use the CocoDetection class definition
# from ./coco_utils.py, not the original torchvision.CocoDetection class. Also, we
# use transforms from ./transforms, not torchvision.transforms, because they need
# to transform the bboxes and masks along with the image.
coco_path = "/home/jovyan/work/COCO"
transform = transforms.Compose([
transforms.ToTensor()
])
print('Loading COCO train, val datasets...')
coco_train_dataset = get_coco(coco_path, 'train', transform)
coco_val_dataset = get_coco(coco_path, 'val', transform)
def collate_fn(batch):
return tuple(zip(*batch))
val_dataloader = torch.utils.data.DataLoader(coco_val_dataset, batch_size=8, shuffle=False, num_workers=4, collate_fn=collate_fn)
###Output
Loading pretrained model...
Loading COCO train, val datasets...
loading annotations into memory...
Done (t=10.38s)
creating index...
index created!
loading annotations into memory...
Done (t=0.33s)
creating index...
index created!
###Markdown
Next, we run the model on a batch from the validation set:
###Code
images, targets = next(iter(val_dataloader))
images = [ img.cuda() for img in images ]
predictions = model(images)
print('Prediction keys:', list(dict(predictions[0])))
print('Boxes shape:', predictions[0]['boxes'].shape)
print('Labels shape:', predictions[0]['labels'].shape)
print('Scores shape:', predictions[0]['scores'].shape)
print('Masks shape:', predictions[0]['masks'].shape)
###Output
Prediction keys: ['boxes', 'labels', 'scores', 'masks']
Boxes shape: torch.Size([100, 4])
Labels shape: torch.Size([100])
Scores shape: torch.Size([100])
Masks shape: torch.Size([100, 1, 426, 640])
###Markdown
The `predictions` list has one entry for each element of the batch. Each entry has the following keys:1. `boxes`: A tensor containing $[x1,y1,x2,y2]$ coordinates for the 100 top-scoring bounding boxes.2. `labels`: A tensor containing integer IDs of the labels corresponding to the 100 top bounding boxes.3. `scores`: A tensor containing the scores of the top 100 bounding boxes, sorted from highest score to lowest.4. `masks`: The mask corresponding to the most likely class for each of the top 100 bounding boxes. Each mask is the same size as the input image.With that information, let's write some code to visualize a result. The `draw_segmentation_map()` function isadapted from [Debugger Cafe's tutorial on Mask R-CNN](https://debuggercafe.com/instance-segmentation-with-pytorch-and-mask-r-cnn).
###Code
import numpy as np
import cv2
import random
# Array of labels for COCO dataset (91 elements)
coco_names = [
'__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', 'N/A', 'N/A',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
'bottle', 'N/A', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table',
'N/A', 'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book',
'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'
]
# Random colors to use for labeling objects
COLORS = np.random.uniform(0, 255, size=(len(coco_names), 3)).astype(np.uint8)
# Overlay masks, bounding boxes, and labels on input numpy image
def draw_segmentation_map(image, masks, boxes, labels):
alpha = 1
beta = 0.5 # transparency for the segmentation map
gamma = 0 # scalar added to each sum
# convert from RGB to OpenCV BGR format
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
for i in range(len(masks)):
mask = masks[i,:,:]
red_map = np.zeros_like(mask).astype(np.uint8)
green_map = np.zeros_like(mask).astype(np.uint8)
blue_map = np.zeros_like(mask).astype(np.uint8)
# apply a randon color mask to each object
color = COLORS[random.randrange(0, len(COLORS))]
red_map[mask > 0.5] = color[0]
green_map[mask > 0.5] = color[1]
blue_map[mask > 0.5] = color[2]
# combine all the masks into a single image
segmentation_map = np.stack([red_map, green_map, blue_map], axis=2)
# apply colored mask to the image
image = cv2.addWeighted(image, alpha, segmentation_map, beta, gamma)
# draw the bounding box around each object
p1 = (int(boxes[i][0]), int(boxes[i][1]))
p2 = (int(boxes[i][2]), int(boxes[i][3]))
color = (int(color[0]), int(color[1]), int(color[2]))
cv2.rectangle(image, p1, p2, color, 2)
# put the label text above the objects
p = (int(boxes[i][0]), int(boxes[i][1]-10))
cv2.putText(image, labels[i], p, cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2, cv2.LINE_AA)
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Overlay masks, bounding boxes, and labels of objects with scores greater than
# threshold on one of the images in the input tensor using the predictions output by Mask R-CNN.
def prediction_to_mask_image(images, predictions, img_index, threshold):
scores = predictions[img_index]['scores']
boxes_to_use = scores >= threshold
img = (images[img_index].cpu().permute(1, 2, 0).numpy() * 255).astype(np.uint8)
masks = predictions[img_index]['masks'][boxes_to_use, :, :].cpu().detach().squeeze(1).numpy()
boxes = predictions[img_index]['boxes'][boxes_to_use, :].cpu().detach().numpy()
labels = predictions[img_index]['labels'][boxes_to_use].cpu().numpy()
labels = [ coco_names[l] for l in labels ]
return draw_segmentation_map(img, masks, boxes, labels)
###Output
_____no_output_____
###Markdown
Let's use the code above to visualize the predictions for the first imagein the validation set (index 0), using a threshold of 0.5:
###Code
from matplotlib import pyplot as plt
masked_img = prediction_to_mask_image(images, predictions, 0, 0.5)
plt.figure(1, figsize=(12, 9), dpi=100)
plt.imshow(masked_img)
plt.title('Validation image result')
plt.show()
###Output
_____no_output_____
###Markdown
Evaluate on the COCO validation setLet's get predictions in a loop for the full COCO 2017 validation set:
###Code
from engine import evaluate
results = evaluate(model, val_dataloader, 'cuda:0')
###Output
Test: [ 0/625] eta: 0:11:49 model_time: 0.4978 (0.4978) evaluator_time: 0.2335 (0.2335) time: 1.1358 data: 0.4002 max mem: 12871
Test: [100/625] eta: 0:06:12 model_time: 0.4845 (0.4848) evaluator_time: 0.1976 (0.1987) time: 0.7205 data: 0.0134 max mem: 13892
Test: [200/625] eta: 0:05:07 model_time: 0.5174 (0.4874) evaluator_time: 0.2214 (0.2095) time: 0.7613 data: 0.0138 max mem: 13892
Test: [300/625] eta: 0:03:52 model_time: 0.4706 (0.4826) evaluator_time: 0.1945 (0.2080) time: 0.7470 data: 0.0138 max mem: 13892
Test: [400/625] eta: 0:02:40 model_time: 0.4712 (0.4833) evaluator_time: 0.1781 (0.2052) time: 0.7040 data: 0.0138 max mem: 13892
Test: [500/625] eta: 0:01:29 model_time: 0.4539 (0.4823) evaluator_time: 0.1674 (0.2075) time: 0.6716 data: 0.0139 max mem: 13892
Test: [600/625] eta: 0:00:17 model_time: 0.4502 (0.4820) evaluator_time: 0.1961 (0.2059) time: 0.6920 data: 0.0141 max mem: 13893
Test: [624/625] eta: 0:00:00 model_time: 0.4857 (0.4818) evaluator_time: 0.1823 (0.2053) time: 0.7112 data: 0.0142 max mem: 13893
Test: Total time: 0:07:24 (0.7115 s / it)
Averaged stats: model_time: 0.4857 (0.4818) evaluator_time: 0.1823 (0.2053)
Accumulating evaluation results...
DONE (t=4.17s).
Accumulating evaluation results...
DONE (t=4.08s).
IoU metric: bbox
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.37881
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.59169
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.41207
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.21443
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.41474
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.49329
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.31226
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.49422
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.51876
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.32195
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.55889
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.66009
IoU metric: segm
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.34600
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.56047
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.36803
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.15587
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.37372
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.50636
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.29432
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.45392
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.47373
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.26890
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.51531
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.62955
|
Statistics/Descriptive Statistics.ipynb | ###Markdown
Table of ContentsDescriptive StatisticsRequirementsUseful Python functionsRandomlen() & sum()max() & min()sorted()The Mean1. Arithmetic mean2. Geometric mean3. Harmonic meanThe MedianThe ModePercentilesThe BoxplotHistogramVariability1. Range2. Inter-Quartile Range3. VarianceNext timeUseful Resources Descriptive StatisticsWelcome to the notebook on descriptive statistics. Statistics is a very large field. People even go to grad school for it. For our site here, we will focus on some of the big hitters in statistics that make you a good data scientist. This is a cursory, whirlwind overview by somebody who has no letters after his name. So any mistakes or corrections, blame someone who does. And then send me an email or submit a pull request on Github and we'll square it away in no time.Descriptive statistics are measurements that describe a population (or sample of that population) of values. They tell you where the center tends to be, how spread out the values are, the shape of the distribution, and a bunch of other things that gradute students have put in their theses. But here we focus on some of the simpler values that you have to know to consider yourself a functional human, let alone data scientist. So follow along as we take a \\$5 hop-on hop-off tour of some of our basic statistics. RequirementsWe'll use two 3rd party Python libraries for displaying graphs. Run from terminal or shell:```shell> pip3 install seaborn> pip3 install pandas```
###Code
import seaborn as sns
import random as random
%matplotlib inline
###Output
_____no_output_____
###Markdown
Useful Python functionsMany statistics require knowing the length or sum of your data. Let's chug through some useful [built-in functions](https://docs.python.org/3/library/functions.html). RandomWe'll use the [`random` module](https://docs.python.org/3/library/random.html) a lot to generate random numbers to make fake datasets. The plain vanilla random generator will pull from a uniform distribution. There are also options to pull from other datasets. As we add more tools to our data science toolbox, we'll find that [NumPy's](https://docs.scipy.org/doc/numpy-1.13.0/index.html) random number generators to be more full-featured and play really nicely with [Pandas](https://pandas.pydata.org/), another key data science library. For now, we're going to avoid the overhead of learning another library and just use Python's standard library.
###Code
random.seed(42)
values = [random.randrange(1,1001,1) for _ in range(10000)]
values[0:10]
###Output
_____no_output_____
###Markdown
`len()` & `sum()`The building blocks of average. Self explanatory here.
###Code
len(values)
sum(values)
###Output
_____no_output_____
###Markdown
Below we'll use Seaborn to plot and visualize some of our data. Don't worry about this too much. Visualization, while important, is not the focus of this notebook.
###Code
sns.stripplot(x=values, jitter=True, alpha=0.2)
###Output
_____no_output_____
###Markdown
This graph is pretty cluttered. That makes sense because it's 10,000 values between 1 and 1,000. That tells us there should be an average of 10 entries for each value. I'll leave counting this average as an exercise for the reader.Let's make a sparse number line with just 200 values between 1 and 1000. There should be a lot more white space.
###Code
sparse_values = [random.randrange(1,1001) for _ in range (200)]
sns.stripplot(x=sparse_values, jitter=True)
###Output
_____no_output_____
###Markdown
`max()` & `min()`These built-in functions are useful for getting the range of our data, and just general inspection:
###Code
print("Max value: {}\nMin value: {}".format(
max(values), min(values)))
###Output
Max value: 1000
Min value: 1
###Markdown
`sorted()`Another very important technique in wrangling data is sorting it. If we have a dataset of salaries, for example, and we want to see the 10 top earners, this is how we'd do it. Let's look now at the first 20 items in our sorted data set. Probably won't be too exciting though:
###Code
sorted_vals = sorted(values)
sorted_vals[0:20]
###Output
_____no_output_____
###Markdown
If we wanted to sort values in-place (as in, perform an action like `values = sorted(values)`), we would use the `list` class' own `sort()` method: ```pythonvalues.sort()``` The MeanThe mean is a fancy statistical way to say "average." You're all familiar with what average means. But mathemeticians like to be special and specific. There's not just one type of mean. In fact, we'll talk about 3 kinds of "means" that are all useful for different types of numbers.1. **Arithmetic mean** for common numbers2. **Geometric mean** for returns or growth3. **Harmonic mean** for ratios and ratesBuckle up... 1. Arithmetic meanThis is your typical average. You've used it all your life. It's simply the sum of the elements divided by the length. Intuitively, think of it as if you made every element the exact same value so that the sum of all values remains the same as before. What would that value be?Mathematically the mean, denoted $\mu$, looks like: $$\mu = \frac{x_1 + x_2 + \cdots + x_n}{n}$$where $\bar{x}$ is our mean, $x_i$ is a value at the $i$th index of our list, and $n$ is the length of that list.In Python, it's a simple operation combining two builtins we saw above: `sum()` and `len()`
###Code
def arithmetic_mean(vals):
return sum(vals) / len(vals)
arithmetic_mean(values)
###Output
_____no_output_____
###Markdown
From this we see our average value is 502.1696. Let's double check that with our intuitive definition using the sum:
###Code
avg_sum = len(values) * arithmetic_mean(values) #10,000 * 502.1696
print("{} =? {}".format(sum(values), avg_sum))
###Output
5021696 =? 5021696.0
###Markdown
2. Geometric meanThe geometric mean is a similar idea but instead uses the product. It says if I multiply each value in our list together, what one value could I use instead to get the same result? The geometric mean is very useful in things like growth or returns (e.g. stocks) because adding returns doesn't give us the ability to get returns over a longer length of time. In other words, if I have a stock growing at 5% per year, what will be the total returns after 5 years?If you said 25%, you are wrong. It would be $1.05^5 - 1 \approx 27.63\%$Mathematically, our geometric is:$$ GM(x) = \sqrt[n]{x_1 \times x_2 \times \cdots \times x_n }$$
###Code
returns = [1.05, 1.06, .98, 1.08]
def product(vals):
'''
This is a function that will multiply every item in the list
together reducing it to a single number. The Pythonic way to
do this would be to use the 'reduce' function like so:
> reduce(lambda x, y: x * y, vals)
We are explicit here for clairty.
'''
prod = 1
for x in vals:
prod = prod * x
return prod
def geometric_mean(vals):
geo_mean = product(vals) ** (1/len(vals)) # raising to 1/n is the same as nth root
return geo_mean
geom = geometric_mean(returns)
geom
###Output
_____no_output_____
###Markdown
Using our `product` function above, we can easily multiply all the values together to get what your return after 4 years is:
###Code
product(returns)
###Output
_____no_output_____
###Markdown
or roughly $17.8\%$. Using our geometric mean should give us the same result:
###Code
geom**4
###Output
_____no_output_____
###Markdown
Now look at what happens with the arithmetic mean:
###Code
arm = arithmetic_mean(returns)
arm
arm**4
###Output
_____no_output_____
###Markdown
The arithmetic mean would tell us that after 4 years, we should have an $18.1\%$ return. But we know it should actually be a $17.8\%$ return. It can be tricky to know when to use the arithmetic and geometric means. You also must remember to add the $1$ to your returns or it will not mathematically play nice. 3. Harmonic meanThis one is also a bit tricky to get intuitively. Here we want an average of _rates_. Not to be confused with an average of _returns_. Recall a rate is simply a ratio between two quantities, like the price-to-earnings ratio of a stock or miles-per-hour of a car.Let's take a look at the mph example. If I have a car who goes 60mph for 50 miles, 50mph for another 50, and 40mph for yet another 50, then the car has traveled 150 miles in $\frac{50mi}{60\frac{mi}{h}} + \frac{50mi}{50\frac{mi}{h}} + \frac{50mi}{40\frac{mi}{h}} = 3.08\bar{3}h$. This corresponds to a geometric mean of $150mi \div 3.08\bar{3}h \approx 48.648mph$. Much different from our arithmetic mean of 50mph. (_Note: if in our example the car did not travel a clean 50 miles for every segment, we have to use a_ [weighted harmonic mean](https://en.wikipedia.org/wiki/Harmonic_meanWeighted_harmonic_mean).)Mathematically, the harmonic mean looks like this:$$ \frac{n}{\frac{1}{x_1}+\frac{1}{x_2}+\cdots+\frac{1}{x_n}} $$So let's code that up:
###Code
speeds = [60, 50, 40]
def harmonic_mean(vals):
sum_recip = sum(1/x for x in vals)
return len(vals) / sum_recip
harmonic_mean(speeds)
###Output
_____no_output_____
###Markdown
Now you know about the three [Pythagorean means](https://en.wikipedia.org/wiki/Pythagorean_means). Thank me after you brag at your next party. Let's now move on to something very important in descriptive statistics: The MedianThe median should be another familiar statistic, but often misquoted. When somebody is describing a set of numbers with just a mean, they might not be telling the whole story. For example, many sets of values are _skewed_ (a concept we will cover in the histogram section) in that most values are clustered around a certain area but have a long tail. Prices are usually good examples of this. Most wine is around \\$15-20, but we've all seen those super expensive bottles from a hermit's chateau in France. Salaries are also skewed (and politicians like to remind us how far skewed just 1\% of these people are).A useful statistic in these cases is the "median." The median gives us the middle value, as opposed to the average value. Here's a simple, but illustrative example:Suppose we take the salaries of 5 people at a bar[12000, 48000, 72000, 160000, 3360000]If I told you the average salary in this bar right now is \\$730,400, I'd be telling you the truth. But you can tell that our rich friend pulling in over 3 million is throwing off the curve. When he goes home early to do a business, the average drops to just \\$73,000. _A full 10 times less_.The median instead in this case is much more consistent, or in other words, not as prone to _outliers._ To find the median, we simply take the middle value. Or if there are an even number of entries, we take the average of the two middle values. Here it is in Python:
###Code
salaries = [12000, 48000, 72000, 160000, 3360000]
def median(vals):
n = len(vals)
sorted_vals = sorted(vals)
midpoint = n // 2
if n % 2 == 1:
return sorted_vals[midpoint]
else:
return arithmetic_mean([sorted_vals[midpoint-1], sorted_vals[midpoint]])
median(salaries)
###Output
_____no_output_____
###Markdown
A much more reasonable \$7200! Now let's see what happens when Moneybags goes home:
###Code
median(salaries[:-1])
###Output
_____no_output_____
###Markdown
The median drops down to \\$60,000 (which is the average of \\$48,000 and \\$72,000).Let's take a look at our original `values` list of 10,000 numbers.
###Code
median(values)
# Recall our values list is even, meaning 506.0 was both item 5000 and 5001
len(values)
# Lopping off the end returns the same value
median(values[:-1])
# Why? There are 9 506s in the list
from collections import Counter
c = Counter(values)
c[506]
###Output
_____no_output_____
###Markdown
Above we used the [`Counter`](https://docs.python.org/3.6/library/collections.htmlcollections.Counter) class in the standard library. This class is a subclass of the `dict` that holds a dictionary of keys to their counts. We can build our own version of it like so:
###Code
# Here we use the defaultdict that will initialize our first value if it doesn't yet exist
from collections import defaultdict
def make_counter(values):
counts = defaultdict(int)
for v in values:
counts[v] += 1
return counts
counts = make_counter([1, 2, 2, 3, 5, 6, 6, 6])
counts
###Output
_____no_output_____
###Markdown
Remember this part because it will show up very soon when we talk about histograms, the chef's knife of a data scientist's data exploration kitchen.But first, there's one more descriptive statistic that we should cover. And that is The ModeThe mode is simply the most common element. If there are multiple elements with the same count, then there are two modes. If all elements have the same count, there are no modes. If the distribution is _continuous_ (meaning it can take uncountably infinite values, which we will discuss in the Distributions chapter), then we use ranges of values to determine the mode. Honestly, I don't really find the mode too useful. A good time to use it is if there's a lot of _categorical data_ (meaning values like "blue", "red", "green" instead of _numerical data_ like 1,2,3). You might want to know what color car your dealership has the most of.Let's take a look at that example now. I've built a set of cars with up to 20 cars of any of four colors.
###Code
car_colors = ["red"] * random.randint(1,20) + \
["green"] * random.randint(1,20) + \
["blue"] * random.randint(1,20) + \
["black"] * random.randint(1,20)
car_colors
#Using our familiar counter
color_count = Counter(car_colors)
color_count
# We can see the mode above is 'blue' because we have 18. Let's verify:
def mode(counter):
# store a list of name:count tuples in case multiple modes
modes = [('',0)]
for k,v in counter.items():
highest_count = modes[0][1]
if v > highest_count:
modes = [(k,v)]
elif v == highest_count:
modes.append((k,v))
return modes
mode(color_count)
# If we have multiple modes?
mode(Counter(['blue']*3 + ['green']*3 + ['black']*2))
###Output
_____no_output_____
###Markdown
But that's enough about modes. Check out wikipedia if you want more because there's no point spending more time on them then they're worth.Hang in there, because we're getting close. Still to come is Percentiles, Boxplots, and Histograms. Three very import things.Let's get to it. PercentilesA percentile is familiar to anyone who has taken the SAT. It answers the question: what percentage of students are dumber than I am? Well the College Board would love to tell you: congratulations, you're in the 92nd percentile!Let's take a look at our old friend Mr. `values` with 10,000 numbers from 1-1000. Since this list is _uniformly distributed_, meaning every value is as likely to occur as any other, we expect that 25% of the numbers to be below 250, 50% to be below 500, and 75% to be below 750. Let's verify:
###Code
def percentile(vals, elem):
'''Returns the percent of numbers
below the index.
'''
count = 0
sorted_val = sorted(values)
for val in sorted_val:
if val > elem:
return count/len(values)
count += 1
for num in [250, 500, 750]:
print("Percentile for {}: {}%".format(num, percentile(values, num)*100))
###Output
Percentile for 250: 24.37%
Percentile for 500: 49.45%
Percentile for 750: 75.48%
###Markdown
Just like we'd expect. Now if the data set is not so nice and uniform, we expect these values to be quite different. Let's write a function to give us an element at a particular percentile:
###Code
from math import ceil
def pct_value(vals, pct):
sorted_vals = sorted(vals)
n = len(vals)
return sorted_vals[ceil(n*pct)]
for pct in [.25, .5, .75]:
print("Element at percentile {}%: {}".format(pct*100, pct_value(values, pct)))
###Output
Element at percentile 25.0%: 256
Element at percentile 50.0%: 506
Element at percentile 75.0%: 745
###Markdown
Notice how the element at the 50th percentile is also our median! Now we have a second definition of the median.Let's take a look now at a highly skewed set. It will range from 0-100 but we'll cluster it around 10
###Code
skewed = []
for i in range(1,100):
skewed += [i]*random.randint(0,int(4+i//abs(10.1-i)))
def print_statistics(vals, calc_mode=True):
print("Count: {}".format(len(vals)))
print("Mean: {:.2f}".format(arithmetic_mean(vals)))
print("Median: {}".format(median(vals)))
if calc_mode: print("Mode: {}".format(mode(Counter(vals))))
print("Max: {}".format(max(vals)))
print("Min: {}".format(min(vals)))
print("Range: {}".format(max(vals)-min(vals)))
for pct in [.25, .5, .75]:
print("{:.0f}th Percentile: {}".format(pct*100, pct_value(vals, pct)))
print("IQR: {}".format(pct_value(vals, 0.75) - pct_value(vals, 0.25)))
print_statistics(skewed)
###Output
Count: 353
Mean: 39.37
Median: 30
Mode: [(10, 73)]
Max: 99
Min: 1
Range: 98
25th Percentile: 10
50th Percentile: 32
75th Percentile: 66
IQR: 56
###Markdown
A few clues that this distribution is skewed:* The mean is significantly different from the median* The percentiles cluster around 25. A uniform distribution we'd expect 25, 50, and 75 for our percentiles.* The max i smuch higher than the mean, median, or even 75th percentile.Let's take a look at a simple plot to describe all of these statistics to us: The BoxplotAlso sometimes the Box-and-Whisker plot, this is a great way to visualize a lot of the information our `print_statistics` function displayed. In particular, we can see in one graph* Median* 75th percentile (called the third quartile)* 25th percentile (called the first quartile)* The reach ($\pm1.5*IQR$), which shows outliersIt does not show the mean, but it can be intuited by looking at the plot. Let's take a look at plots for values and skewed:
###Code
sns.boxplot(values)
###Output
_____no_output_____
###Markdown
A classic uniform distribution: centered about 500, the median goes right down the middle, and the whiskers are evenly spaced. Now let's take a look at the `skewed` list:
###Code
sns.boxplot(skewed)
###Output
_____no_output_____
###Markdown
Looks pretty different? Instead of being centered around 50, it looks like the box is centered around 40. The median is at 27 and much of the box is to the right of it. This shows us that the distribution is skewed to the right. There's another important way to visualize a distribution and that is HistogramAh, the moment we've all been waiting for. I keep teaching you ways to describe a dataset, but sometimes a picture is worth a thousand words. That picture is the Histogram.A histogram is a bar chart in which the values of the dataset are plotted horizontally on the X axis and the _frequencies_ (i.e. how many times that value was seen) are plotted on the Y axis. If you remember our functions to make a counter, a histogram is essentially a chart of those counts.Think for a minute on what the histogram for our uniform dataset of 10,000 randomly generated numbers would look like? Pretty boring right?
###Code
# Seaborn gives an easy way to plot histograms. Plotting from scratch is beyond the
# scope of the programming we will do
sns.distplot(values, kde=False, bins=100)
###Output
_____no_output_____
###Markdown
Seaborn's helpful `sns.distplot()` method simply turns a dataset into a histogram for us. The `bins` parameter allows us to make bins where instead of the frequency count of each variable, we plot the frequency count of a range of variables. This is very useful when we have continuous distribution (i.e. one that can take an infinite number of values of the range), as plotting every individual value is unfeasible and would make for an ugly graph.Let's take a look at our skewed data:
###Code
sns.distplot(skewed, kde=False, bins=100)
###Output
_____no_output_____
###Markdown
The data has tons of values around 10, and everything else hovers around 4. This type of distribution is called "unimodal" in that it has one peak, or one "contender" for the mode. Practically, unimodal and bimodal are incredibly common.I'm going to jump ahead to the next notebooks where we generate and describe different types of these distributions, how they're used, and how to describe them. One of the most basic and fundamental distribution in statistics is the unimodal Normal (or Gaussian) distribution. It's the familiar bell-curve. Let's use a simple, albeit slow, function to generate numbers according to the normal distribution (taken from https://www.taygeta.com/random/gaussian.html)
###Code
from math import sqrt, log, cos, sin, pi
def generateGauss():
x1 = random.random() # generate a random float in [0,1.0)
x2 = random.random()
y1 = sqrt(-2 *log(x1)) * cos(2*pi*x2)
y2 = sqrt(-2*log(x1)) * sin(2*pi*x2)
return y1, y2
gaussValues = []
for _ in range(10000):
gaussValues += list(generateGauss())
#Let's take a peek:
gaussValues
# and print our statistics
print_statistics(gaussValues, calc_mode=False)
###Output
Count: 20000
Mean: -0.01
Median: 0.0035974390206229565
Max: 4.291088338639748
Min: -4.52593184929381
Range: 8.817020187933558
25th Percentile: -0.6838808785947806
50th Percentile: 0.003600379797408655
75th Percentile: 0.665650740246061
IQR: 1.3495316188408415
###Markdown
The nature of the function is such that the mean should fall around 0. It looks like we accompished that. Also note how the 25th and 50th percentile are roughly the same number. This is an indication that the distribution is not significantly skewed. Let's take a look at it's histogram:
###Code
sns.distplot(gaussValues, kde=False)
###Output
_____no_output_____
###Markdown
Get used to this image because you will see it _everywhere_ in statistics, data science, and life. Even though there are many many distributions out there (and even more variations on each of those), most people will be happy to apply the "bell curve" to almost anything. Chance are, though, they're right. VariabilityLet's talk about one more descriptive statistic that describes to us how much the values vary. In other words, if we're looking at test scores, did everyone do about the same? Or were there some big winners and losers?Mathemeticians call this the _variability_. There are three major measures of variability: range, inter-quartile range (IQR), and variation/standard devaition. 1. RangeRange is a very simple measure: how far apart could my values possibly be? In our generated datasets above, the answer was pretty obvious. We generated a random number from 0 to 1000, so the range was $1000-0 = 1000$. The values of x could never go outside of this range.But what about our gaussian values? The tandard normal distribution has asymptotic end points instead of absolute end points, so it's not so clean. We can see from the graph above that it doesn't look like there are any values above and below 4, so we'd expect a range of something around 8:
###Code
print("Max: {}\nMin: {}\nRange: {}".format(
max(gaussValues),
min(gaussValues),
max(gaussValues) - min(gaussValues)))
###Output
Max: 4.291088338639748
Min: -4.52593184929381
Range: 8.817020187933558
###Markdown
Exactly what we expected. In practice, the range is a good descriptive statistic, but you can't do many other interesting things with it. It basically let's you say "our results were between X and Y", but nothing too much more profound. Another good way to describe the range is called 2. Inter-Quartile Rangeor IQR for short. This is a similar technique where instead of taking the difference between the max and min values, we take the difference between the 75th and 25th percentile. It gives a good sense of the range because it excludes outliers and tells you where the middle 50% of the values are grouped. Of course, this is most useful when we're looking at a unimodal distribution like our normal distribution, because for a distribution that's bimodal (i.e. has many values at either end of the range), it will be misleading.Here's how we caluclated it:
###Code
print("75th: {}\n25th: {}\nIQR: {}".format(
pct_value(gaussValues, .75),
pct_value(gaussValues, .25),
pct_value(gaussValues, .75) - pct_value(gaussValues, .25)
))
###Output
75th: 0.665650740246061
25th: -0.6838808785947806
IQR: 1.3495316188408415
###Markdown
So again, this tells us that 50% of our values are between -0.68 and 0.68 and all within the same 1.35 values. Comparing this to the range (which is not at all influenced by percentages of values), this gives you the sense that the're bunched around the mean.If we want a little more predictive power, though, it's time to talk about 3. VarianceVariance is a measure of how much values typically deviate (i.e. how far away they are) from the mean.If we want to calculate the variance, then we first see how far a value is from the mean ($\mu$), square it (which gets rid of the negative), sum them up, and divide by $n$. Essentially, it's an average where the value is the deviation squared instead of the value itself. Here's the formula for variance, denoted by $\sigma^2$:$$ \sigma^2 = \frac{(x_1 - \mu)^2+(x_2 - \mu)^2+ \cdots + (x_n - \mu)^2}{n} $$Let's code that up:
###Code
def variance(vals):
n = len(vals)
m = arithmetic_mean(vals)
variance = 0
for x in vals:
variance += (x - m)**2
return variance/n
variance(gaussValues)
###Output
_____no_output_____
###Markdown
The variance four our generated Gaussian numbers is roughly 1, which is another condition of a standard normal distribution (mean is 0, variance is 1). So no surprise. But the variance is a bit of a tricky number to intuit because it is the average of the _squared_ differences between the mean. Take our skewed for example:
###Code
variance(skewed)
###Output
_____no_output_____
###Markdown
How do you interpret this? The max value is 100, so is a variance of 934.8 a lot or a little? Also, let's say the skewed distribution was a measure of price in dollars. Therefore, the units of variance would be 934.8 dollars squared. Doesn't make a whole lot of sense.For this reason, most people will take the square root of the variance to give them a value called the **standard deviation**: $\sigma = \sqrt{\sigma^2}$.
###Code
def stddev(vals):
return sqrt(variance(vals))
stddev(skewed)
###Output
_____no_output_____ |
PA_Summary.ipynb | ###Markdown
SummaryI choose the *Thousand German News Articles Dataset* for my project assignment. There was not much cleaning needed as this was already done by the creator of the dataset and the steps shown in the 'class notebook' were sufficient.The dataset poses a multiclass (9) classification problem.
###Code
data_all['label'].value_counts().plot.barh(figsize=(5,5))
###Output
_____no_output_____ |
notebooks/Methods/.ipynb_checkpoints/Welch hyperparemeters-checkpoint.ipynb | ###Markdown
Determining Welch hyperparametersThe goal is to write a wrapper for the Welch function as implemented in scipy (https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.welch.htmlr34b375daf612-1) for PyleoclimLet's generate a periodic signal (evenly-spaced) with periodicities of 20 and 80. Default behavior and testing
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
freqs=[1/20,1/80]
time=np.arange(2001)
signals=[]
for freq in freqs:
signals.append(np.cos(2*np.pi*freq*time))
s=sum(signals)
#plot
plt.plot(time,s)
plt.xlabel('Time')
plt.ylabel('Value')
###Output
_____no_output_____
###Markdown
Use the original scipy function (default parameters) to calculate the spectrum
###Code
#params
fs=1.0
window='hann'
nperseg=None
noverlap=None
nfft=None
detrend='constant'
return_onesided=True
scaling='density'
axis=-1
average='mean'
#function
from scipy import signal
f,Pxx=signal.welch(s,fs=fs,window=window,nperseg=nperseg,noverlap=noverlap,nfft=nfft,detrend=detrend,return_onesided=return_onesided,scaling=scaling,axis=axis,average=average)
#and plot the resulting spectrum
plt.plot(1/f,Pxx)
plt.xlabel('Period')
plt.ylabel('Amplitude')
plt.xlim(0,200)
###Output
<ipython-input-22-3f216d932a6a>:6: RuntimeWarning: divide by zero encountered in true_divide
plt.plot(1/f,Pxx)
###Markdown
Setting the number of segments to the length of the timeseries -> Current default in pyleoclim
###Code
#params
fs=1.0
window='hann'
nperseg=len(s)
noverlap=None
nfft=None
detrend='constant'
return_onesided=True
scaling='density'
axis=-1
average='mean'
#function
from scipy import signal
f,Pxx=signal.welch(s,fs=fs,window=window,nperseg=nperseg,noverlap=noverlap,nfft=nfft,detrend=detrend,return_onesided=return_onesided,scaling=scaling,axis=axis,average=average)
#and plot the resulting spectrum
plt.plot(1/f,Pxx)
plt.xlabel('Period')
plt.ylabel('Amplitude')
plt.xlim(0,200)
###Output
<ipython-input-18-3f216d932a6a>:6: RuntimeWarning: divide by zero encountered in true_divide
plt.plot(1/f,Pxx)
###Markdown
Timeseries with only 3 cycles represented
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
time=np.arange(2001)
high_per=len(time)/3
freqs=[1/20,1/80,1/high_per]
signals=[]
for freq in freqs:
signals.append(np.cos(2*np.pi*freq*time))
s=sum(signals)
#plot
plt.plot(time,s)
plt.xlabel('Time')
plt.ylabel('Value')
###Output
_____no_output_____
###Markdown
Using the pyleoclim defaults
###Code
#params
fs=1.0
window='hann'
nperseg=len(s)
noverlap=None
nfft=None
detrend='constant'
return_onesided=True
scaling='density'
axis=-1
average='mean'
#function
from scipy import signal
f,Pxx=signal.welch(s,fs=fs,window=window,nperseg=nperseg,noverlap=noverlap,nfft=nfft,detrend=detrend,return_onesided=return_onesided,scaling=scaling,axis=axis,average=average)
#and plot the resulting spectrum
plt.plot(1/f,Pxx)
plt.xlabel('Period')
plt.ylabel('Amplitude')
plt.xlim(0,1500)
###Output
<ipython-input-38-ce12ed394372>:6: RuntimeWarning: divide by zero encountered in true_divide
plt.plot(1/f,Pxx)
###Markdown
Using the scipy defaults
###Code
#params
fs=1.0
window='hann'
nperseg=None
noverlap=None
nfft=None
detrend='constant'
return_onesided=True
scaling='density'
axis=-1
average='mean'
#function
from scipy import signal
f,Pxx=signal.welch(s,fs=fs,window=window,nperseg=nperseg,noverlap=noverlap,nfft=nfft,detrend=detrend,return_onesided=return_onesided,scaling=scaling,axis=axis,average=average)
#and plot the resulting spectrum
plt.plot(1/f,Pxx)
plt.xlabel('Period')
plt.ylabel('Amplitude')
plt.xlim(0,1500)
###Output
<ipython-input-30-ce12ed394372>:6: RuntimeWarning: divide by zero encountered in true_divide
plt.plot(1/f,Pxx)
###Markdown
3 segments with 50% overlap (REDFIT defaults)
###Code
#params
fs=1.0
window='hann'
nperseg=len(s)/2
noverlap=None
nfft=None
detrend='constant'
return_onesided=True
scaling='density'
axis=-1
average='mean'
#function
from scipy import signal
f,Pxx=signal.welch(s,fs=fs,window=window,nperseg=nperseg,noverlap=noverlap,nfft=nfft,detrend=detrend,return_onesided=return_onesided,scaling=scaling,axis=axis,average=average)
#and plot the resulting spectrum
plt.plot(1/f,Pxx)
plt.xlabel('Period')
plt.ylabel('Amplitude')
plt.xlim(0,1500)
###Output
<ipython-input-32-ce12ed394372>:6: RuntimeWarning: divide by zero encountered in true_divide
plt.plot(1/f,Pxx)
###Markdown
Adding more overlap (75%)
###Code
#params
fs=1.0
window='hann'
nperseg=len(s)
noverlap=0.75*len(s)
nfft=None
detrend='constant'
return_onesided=True
scaling='density'
axis=-1
average='mean'
#function
from scipy import signal
f,Pxx=signal.welch(s,fs=fs,window=window,nperseg=nperseg,noverlap=noverlap,nfft=nfft,detrend=detrend,return_onesided=return_onesided,scaling=scaling,axis=axis,average=average)
#and plot the resulting spectrum
plt.plot(1/f,Pxx)
plt.xlabel('Period')
plt.ylabel('Amplitude')
plt.xlim(0,1500)
###Output
<ipython-input-34-ce12ed394372>:6: RuntimeWarning: divide by zero encountered in true_divide
plt.plot(1/f,Pxx)
###Markdown
Considering less overlap
###Code
#params
fs=1.0
window='hann'
nperseg=len(s)
noverlap=0.3*len(s)
nfft=None
detrend='constant'
return_onesided=True
scaling='density'
axis=-1
average='mean'
#function
from scipy import signal
f,Pxx=signal.welch(s,fs=fs,window=window,nperseg=nperseg,noverlap=noverlap,nfft=nfft,detrend=detrend,return_onesided=return_onesided,scaling=scaling,axis=axis,average=average)
#and plot the resulting spectrum
plt.plot(1/f,Pxx)
plt.xlabel('Period')
plt.ylabel('Amplitude')
plt.xlim(0,1500)
###Output
<ipython-input-36-ce12ed394372>:6: RuntimeWarning: divide by zero encountered in true_divide
plt.plot(1/f,Pxx)
|
FractalDim/Ndim_Correlation_dimension.ipynb | ###Markdown
Correlation dimension's codeHere i'll develope a code to calculate the N dimensional atractor's correlation dimension. Some usefull notes:$$ C(r) = \frac{2}{N(N-1)} \sum_{j=1}^N \sum_{i=j+1}^N \Theta (r - r_{ij} ) $$where $\Theta (r - r_{ij})$ is the *Heaviside function* defined as:$$ \Theta (x) = \left\{\begin{array}{l}0 \ \ \ para \ x < 0\\1 \ \ \ para \ x \ge 0 \end{array}\right. $$and $r_{ij}$ is the distance between two points. Our correlation dimension will be the plot's slope of $ log \ C_r $ vs $ log \ r $$$ D_2 = \lim_{r\to 0} \lim_{N \to \infty} \frac{d log(C(r))}{d log(r)} $$
###Code
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy.integrate import odeint
%matplotlib notebook
###Output
_____no_output_____
###Markdown
First i define the basic functions i will use
###Code
#--- Heaviside Function
def Heaviside(x):
if x<0:
return 0
elif x>=0:
return 1
#--- N dimensional distance
def distn(x1, x2, dim):
dist = 0
for i in range(0, dim):
diff = (x1[i] - x2[i])**2
dist += diff
return np.sqrt(dist)
#--- Ode solutions after trascient
def sol_tras(f, y0, params, t, dt):
tras_state = odeint(f, y0, t, args=params)
state0 = tras_state[-1,:]
tt = np.arange( t[-1], 2*t[-1], dt)
state = odeint(f, state0, tt, args=params)
return state
###Output
_____no_output_____
###Markdown
The correlation function.- N is the number of points you want to use in the calculation- n the atractor's topological dimension- r is the distances choosen according to the system- Data is the solution itself
###Code
#--- each C_r
def C_r(r, Data, n, N):
tot_sum = 0
for i in range(0, N):
for j in range(i+1, N):
r_ij = distn(Data[:,i], Data[:,j], n)
xev = r - r_ij
tot_sum += Heaviside(xev)
#print(tot_sum, i)
Cr = 2*tot_sum / (N*(N-1))
return Cr
#---- all C_r depending on r
def Total_C_r(dat, R, N, dim):
tot_C_r = []
for r in R:
curr = C_r(r, dat, dim, N)
tot_C_r.append(curr)
return tot_C_r
###Output
_____no_output_____
###Markdown
Aplying this method to some known dynamical systems
###Code
#---- Henon's Map
def Henon_map(n, a, b):
x=[0.5*(1-b)]; y=[0.5*(1-b)]
for i in range(1, n):
xi = 1 - a*x[-1]**2 + y[-1]
yi = b*x[-1]
x.append(xi)
y.append(yi)
return x, y
#---- Lorentz' system
def Lorentz_eq2(r, t, rho, sigma, beta): #Atractor de Lorentz
x, y, z = r
dx = sigma*(y-x)
dy = x*(rho-z) -y
dz = x*y - beta*z
return np.array([dx, dy, dz])
###Output
_____no_output_____
###Markdown
Henon's Map correlation dimension using the new algorithm. Note that this is a discrete map, so the numerical solver will not be used
###Code
#---- 1 million points Henon's Map
x_H, y_H = Henon_map(int(1e6), 1.4, 0.3)
plt.plot(x_H, y_H, '.')
Henon = np.array((x_H, y_H))
dim_H = 2
#--- first set r manually
r_H = np.arange(0.01, 1.0, 0.01)
#--- set number of points N
N_H = 100
#--- Calculate C_r
Henon_Cr = Total_C_r(Henon, r_H, N_H, dim_H)
#--- Plot the resulting curve and fit
coef_H = np.polyfit(np.log(r_H), np.log(Henon_Cr), 1)
print("la dimensión de correlación será: ", coef_H[0])
plt.loglog(r_H, Henon_Cr)
plt.ylabel(r'log $C_r$')
plt.xlabel("log r")
plt.title("Henon's correlation dimension")
plt.grid()
plt.show()
###Output
la dimensión de correlación será: 1.2350871395373977
###Markdown
To verify the nth dimensional code, i am going to calculate the lorentz' system correlation dimension
###Code
#--- Solving the differential equations in the transcient
dt_L = 0.01
p_L = (28, 10, 8./3.)
t_L = np.arange(0, 100, dt_L)
y0_L = np.array([1, 0, 0])
sol_L = sol_tras(Lorentz_eq2, y0_L, p_L, t_L, dt_L)
#---- Plotting Lorentz atractor
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111, projection='3d')
ax.plot(sol_L[:,0], sol_L[:,1], sol_L[:,2])
Lorentz = np.array((sol_L[:,0], sol_L[:,1], sol_L[:,2]))
dim_L = 3
#--- first set r manually
r_L = np.arange(25, 30, 0.1)
#--- set number of points N
N_L = 100
#--- Calculate C_r
Lorentz_Cr = Total_C_r(Lorentz, r_L, N_L, dim_L)
#--- Plot the resulting curve and fit
coef_L = np.polyfit(np.log(r_L), np.log(Lorentz_Cr), 1)
print("la dimensión de correlación será: ", coef_L[0])
plt.loglog(r_L, Lorentz_Cr)
plt.ylabel(r'log $C_r$')
plt.xlabel("log r")
plt.title("Lorentz' correlation dimension")
plt.grid()
plt.show()
###Output
la dimensión de correlación será: 0.8426833441354541
|
seq2seq/exploration.ipynb | ###Markdown
**Overview** The Enron email dataset contains approximately 500,000 emails generated by employees of the Enron Corporation. It was obtained by the Federal Energy Regulatory Commission during its investigation of Enron's collapse.
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from collections import Counter
import plotly.graph_objects as go
from wordcloud import WordCloud
emails = pd.read_csv('./enron.csv', skiprows=lambda x:x%2)
cols = emails.columns
emails.head(3)
import email
message = emails.loc[0]["message"]
e = email.message_from_string(message)
e.items()
e.get_payload()
def get_field(field, messages):
column = []
for message in messages:
e = email.message_from_string(message)
column.append(e.get(field))
return column
emails["date"] = get_field("Date", emails["message"])
emails["subject"] = get_field("Subject", emails["message"])
emails["X-Folder"] = get_field("X-Folder", emails["message"])
emails["X-From"] = get_field("X-From", emails["message"])
emails["X-To"] = get_field("X-To", emails["message"])
emails.head(3)
def body(messages):
column = []
for message in messages:
e = email.message_from_string(message)
column.append(e.get_payload())
return column
emails["body"] = body(emails["message"])
emails.head(3)
###Output
_____no_output_____
###Markdown
EDA Distribution of message length
###Code
emails['Message Length'] = emails['body'].apply(lambda x: len(x))
sns.distplot(emails['Message Length'],bins=None,hist=False)
###Output
_____no_output_____
###Markdown
**Observations*** The message length distribution is right skewed **Observation*** The maximum message length is more than 2 million CDF of message length
###Code
fig, ax = plt.subplots(figsize=(10, 10))
counts, bin_edges = np.histogram(emails['Message Length'], bins=500,
density = True)
pdf = counts/(sum(counts))
cdf = np.cumsum(pdf)
plt.plot(bin_edges[1:], cdf)
plt.title('CDF of message length')
###Output
_____no_output_____
###Markdown
**Observations*** 99% messages have length below 500000 Top 10 Employee with most mails sent
###Code
def employee(file):
column = []
for string in file:
column.append(string.split("/")[0])
return column
emails["employee"] = employee(emails["file"])
emails.head(3)
top_10 = pd.DataFrame(emails["employee"].value_counts()[:10])
top_10.reset_index(inplace=True)
top_10.columns = ["employee_name", "count"]
top_10
sns.barplot(y="employee_name", x="count", data=top_10)
plt.xlabel("Number of emails send")
plt.ylabel("Employee Name")
plt.title('TOP 10 Employee to send mails')
plt.show();
###Output
_____no_output_____
###Markdown
**Observation*** The Top 10 emails senders sent 6000 to 14000 mails WordCloud for Message body
###Code
txt=emails['body'][:5000].values.astype(str)
txt_string=''
for i in txt:
txt_string+= str(i)
wordcloud = WordCloud(width=800, height=500, random_state=21, max_font_size=110).generate(txt_string)
plt.figure(figsize=(20, 20))
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis('off')
plt.title('WordCloud for MEssage Body')
plt.show()
###Output
_____no_output_____ |
Project#5/project_2_hiv.ipynb | ###Markdown
**Project2. Molecular property prediction using GCN**Dataset: HIV **Library + hyperparameter setup**
###Code
# install rdkit and deepchem on colab environment
!wget -c https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh
!chmod +x Miniconda3-latest-Linux-x86_64.sh
!time bash ./Miniconda3-latest-Linux-x86_64.sh -b -f -p /usr/local
!conda install -y -c deepchem -c rdkit -c conda-forge -c omnia deepchem=2.1.0 python=3.6
import sys
sys.path.append('/usr/local/lib/python3.6/site-packages/')
# import libraries
import deepchem as dc
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset
import time
import math
import numpy as np
import scipy.sparse as sp
import networkx as nx
from rdkit import Chem
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, roc_curve, auc #for ECFP
from sklearn.ensemble import RandomForestClassifier #for ECFP
from sklearn.metrics import roc_auc_score
from sklearn.metrics import average_precision_score
import matplotlib.pyplot as plt
# set hyperparameters
# HIV setting
hp = {}
hp['learning_rate'] = 1e-5
hp['epochs'] = 5
hp['batch_size'] = 128
hp['hidden1'] = 64
hp['hidden2'] = 128
hp['hidden3'] = 256
hp['hidden4'] = 512
hp['dropout'] = 0.2
print(hp)
###Output
/usr/local/lib/python3.6/dist-packages/sklearn/externals/joblib/__init__.py:15: DeprecationWarning: sklearn.externals.joblib is deprecated in 0.21 and will be removed in 0.23. Please import this functionality directly from joblib, which can be installed with: pip install joblib. If this warning is raised when loading pickled models, you may need to re-serialize those models with scikit-learn 0.21+.
warnings.warn(msg, category=DeprecationWarning)
###Markdown
**Helper functions for GCN data handling**
###Code
def load_lists(dataset):
# load data
adj_list = []
feat_list = []
label_list = []
for x, y, w, smiles in dataset.itersamples():
smiles = Chem.MolFromSmiles(smiles)
featurizer = dc.feat.ConvMolFeaturizer()
features = featurizer.featurize([smiles])[0]
edge_list = features.get_adjacency_list()
atom_feature = features.get_atom_features()
# convert adjacency list into adjacency matrix "A"
adj = np.zeros((len(edge_list), len(edge_list)))
for i in range(len(edge_list)):
for j in edge_list[i]:
adj[i][j]=1
adj_list.append(adj) # (num_atoms, num_atoms)
feat_list.append(atom_feature) # (num_atoms, num_features=75)
label_list.append(y) # 0/1
return adj_list, feat_list, label_list
# f(H(l), A) = nonlinear(Dhat^-1/2 * Ahat * Dhat^-1/2 * H_l * W_l)
# preprocessing for recurrent interaction during convolution
def normalize_adj(adj):
# add identity matrix
adj_hat = adj + np.eye(adj.shape[0])
# compute diagonal node degree matrix of Ahat
deg = np.sum(adj_hat,axis=0)
# sqrt inv
deg_inv = np.divide(1, deg)
deg_inv_sqrt = np.sqrt(deg_inv)
deg_inv_diag = np.diag(np.squeeze(np.asarray(deg_inv_sqrt)))
# normalize
adj_norm = np.matmul(np.matmul(deg_inv_diag, adj_hat), deg_inv_diag)
return adj_norm
# custom dataset class
class BaseDataset(Dataset):
def __init__(self, adj_list, feat_list, label_list, train_mode=False):
self.adj_list = adj_list
self.feat_list = feat_list
self.label_list = label_list
self.train_mode = train_mode
def __len__(self):
return len(self.adj_list)
def __getitem__(self, idx):
# return graph, feature and label
return adj_list[idx], feat_list[idx], label_list[idx]
def get_tensors(self):
# return batch tensors of normalized adjacency matrix, input and target
# return tensor shape (batch_size, *)
adj_list = self.adj_list
feat_list = self.feat_list
label_list = self.label_list
# get maximum size for padding
max_num_atom = -1
for adj in adj_list:
if adj.shape[0] > max_num_atom:
max_num_atom = adj.shape[0]
# make padded batch matrix of normalized adjacency matrix
padded_adj_list = []
for i, adj in enumerate(adj_list): # (num_atoms, num_atoms)
adj_norm = normalize_adj(adj) # normalize
# padding
pad_num = max_num_atom - adj.shape[0]
npad = ((0, pad_num), (0, pad_num))
padded_adj = np.pad(adj_norm, npad, mode='constant', constant_values=0)
# append
padded_adj_list.append(padded_adj)
# construct numpy array
adj_np = np.stack(padded_adj_list, axis=0) # (batch_size, num_atoms, num_atoms)
# make padded batch matrix of feature matrix
padded_feat_list = []
for i, feat in enumerate(feat_list): # (num_atoms, num_features=75)
# padding
pad_num = max_num_atom - feat.shape[0]
npad = ((0, pad_num), (0, 0))
padded_feat = np.pad(feat, npad, mode='constant', constant_values=0)
# append
padded_feat_list.append(padded_feat)
# construct numpy array
feat_np = np.stack(padded_feat_list, axis=0) # (batch_size, num_atoms, num_features=75)
feat_np = np.transpose(feat_np, [0, 2, 1]) # (batch_size, num_features=75, num_atoms)
# convert label list to numpy array
label_np = np.asarray(label_list)
adjs = torch.from_numpy(adj_np)
inputs = torch.from_numpy(feat_np)
targets = torch.from_numpy(label_np)
del adj_list, feat_list, label_list, padded_adj_list, padded_feat_list
return adjs, inputs, targets
# dataset constructor
def make_dataset(batch_size, adj_list, feat_list, label_list):
# construct BaseDataset objects for each batch
data_len = len(adj_list)
begin = 0
finished = 0
dataset_list = []
while(1):
if begin + batch_size < data_len:
end = begin + batch_size
dataset_list.append(BaseDataset(adj_list[begin:end],
feat_list[begin:end],
label_list[begin:end],
train_mode=True))
begin += batch_size
else:
end = data_len
dataset_list.append(BaseDataset(adj_list[begin:end],
feat_list[begin:end],
label_list[begin:end],
train_mode=True))
break
return math.ceil(data_len/batch_size), dataset_list
###Output
_____no_output_____
###Markdown
**GCN model code**
###Code
# graph convolution layer
class GraphConvolution(nn.Module):
def __init__(self, input_dim, output_dim, dropout=0):
super().__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.p = dropout
# feature weight matrix
self.w = nn.Parameter(torch.empty(output_dim, input_dim))
nn.init.xavier_uniform_(self.w)
self.relu = nn.ReLU() # non-linearity
self.dropout = nn.Dropout(p=self.p) # dropout
def forward(self, x, adjs):
# input, output tensor shapes: (batch_size, num_features, num_nodes)
# adjs tensor shape: (batch_size, num_nodes, num_nodes)
num_nodes = x.shape[2]
if x.shape[1] != self.input_dim:
print((x.shape[1], self.input_dim))
raise RuntimeError("input feature dimension not matched to input_dim argument")
x = x.type(torch.FloatTensor)
adjs = adjs.type(torch.FloatTensor)
# forward
x = self.dropout(x)
x = torch.transpose(x, 1, 2) # (batch_size, num_nodes, input_dim)
w = torch.transpose(self.w, 0, 1) # (input_dim, output_dim)
x = torch.einsum("abc,cd->abd", (x, w)) # (batch_size, num_nodes, output_dim)
x = torch.transpose(x, 1, 2) # (batch_size, output_dim, num_nodes)
o = torch.bmm(x, adjs) # (batch_size, output_dim, num_nodes)
return o
class GCNModel(nn.Module):
def __init__(self, input_dim):
super().__init__()
self.input_dim = input_dim
self.hp = hp # hyperparameters
hidden1 = self.hp['hidden1']
hidden2 = self.hp['hidden2']
hidden3 = self.hp['hidden3']
hidden4 = self.hp['hidden4']
p = self.hp['dropout']
self.gc1 = GraphConvolution(input_dim=self.input_dim, output_dim=hidden1, dropout=p)
self.gc2 = GraphConvolution(input_dim=hidden1, output_dim=hidden2, dropout=p)
self.gc3 = GraphConvolution(input_dim=hidden2, output_dim=hidden3, dropout=p)
self.gc4 = GraphConvolution(input_dim=hidden3, output_dim=hidden4, dropout=p)
self.bn1 = nn.BatchNorm1d(num_features=hidden1)
self.bn2 = nn.BatchNorm1d(num_features=hidden2)
self.bn3 = nn.BatchNorm1d(num_features=hidden3)
self.bn4 = nn.BatchNorm1d(num_features=hidden4)
self.bn_fc = nn.BatchNorm1d(num_features=1)
self.fc = nn.Linear(in_features=hidden4, out_features=1)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(p=p)
self.dropout_fc = nn.Dropout(p=0.5)
def forward(self, x, adjs):
# input tensor shape: (batch_size, input_dim, num_nodes)
x = self.relu(self.dropout(self.bn1(self.gc1(x, adjs))))
x = self.relu(self.dropout(self.bn2(self.gc2(x, adjs))))
x = self.relu(self.dropout(self.bn3(self.gc3(x, adjs))))
x = self.relu(self.dropout(self.bn4(self.gc4(x, adjs))))
x = self.fc(self.dropout_fc(torch.mean(x, dim=2)))
o = self.bn_fc(x)
# output tensor shape: (batch_size, output_dim=1)
return o
def train(model, train_batch_num, train_dataset_list, criterion):
train_roc_score = 0.
train_ap_score = 0.
cnt = 0
epoch_loss = 0.
for idx in range(train_batch_num):
train_dataset = train_dataset_list[idx]
adjs, inputs, targets = train_dataset.get_tensors()
output = model(inputs, adjs)
loss = criterion(output, targets)
epoch_loss += loss.item()
loss.backward()
optimizer.step()
targets = targets.clone().detach().numpy()
output = output.clone().detach().numpy()
output = 1 / (1 + np.exp(-output)) # sigmoid
output = output > 0.5
try:
train_roc_score += roc_auc_score(targets, output)
train_ap_score += average_precision_score(targets, output)
cnt += 1
except ValueError:
pass
train_roc_score /= cnt
train_ap_score /= cnt
epoch_loss /= train_batch_num
return train_roc_score, train_ap_score, epoch_loss
def evaluate(valid_batch_num, model, valid_dataset_list):
roc_score = 0.
ap_score = 0.
cnt = 0
for idx in range(valid_batch_num):
valid_dataset = valid_dataset_list[idx]
adjs, inputs, targets = valid_dataset.get_tensors()
output = model(inputs, adjs)
targets = targets.clone().detach().numpy()
output = output.clone().detach().numpy()
output = 1 / (1 + np.exp(-output)) # sigmoid
output = output > 0.5
try:
roc_score += roc_auc_score(targets, output)
ap_score += average_precision_score(targets, output)
cnt += 1
except ValueError:
pass
roc_score /= cnt
ap_score /= cnt
return roc_score, ap_score
###Output
_____no_output_____
###Markdown
**ECFP - random forest model**
###Code
### make helper functions for ECFP featurization ###
# function for getting ECFP feature
# input: train, validation, or test dataset
def loadECFP(dataset):
# load data
ECFP_feat = []
ECFP_label = []
for x, y, w, smiles in dataset.itersamples():
ECFP_feat.append(x)
ECFP_label.append(y)
# return ECFP feature and its label (X___, Y___)
return np.array(ECFP_feat), np.array(ECFP_label)
# ECFP model building funcion
# Randomly splitted data are used
def getAUCfromRF_model(X_train, Y_train, X_test, Y_test):
# make randomforest classifier
clf=RandomForestClassifier(n_estimators=100, criterion='entropy')
# train with train set
clf.fit(X_train, Y_train)
fpr, tpr, thresholds = roc_curve(Y_test, clf.predict_proba(X_test)[:, 1])
# get auc score with test sets
AUC = auc(fpr, tpr)
return(AUC)
###Output
_____no_output_____
###Markdown
**Main routine**ours / HIV benchmark result<img src=https://drive.google.com/uc?export=download&id=18lhC9JhPrxVkbsFx6xAk1sydA6mjN4AW height="200"/>
###Code
# loop routine
seed_list = [33,84,35,31,48,42,59,53,54,51]
GCN_AUC_list = []
ECFP_AUC_list = []
for i in range(10):
# set random seed
seed = seed_list[i]
np.random.seed(seed)
torch.manual_seed(seed)
# load dataset
tasks, datasets, transformers = dc.molnet.load_bace_classification(featurizer='ECFP',
split='scaffold')
train_dataset, valid_dataset, test_dataset = datasets
###########################################################
# GCN section
###########################################################
# load lists
train_adj_list, train_feat_list, train_label_list = load_lists(train_dataset)
valid_adj_list, valid_feat_list, valid_label_list = load_lists(valid_dataset)
test_adj_list, test_feat_list, test_label_list = load_lists(test_dataset)
# construct datasets
train_batch_num, train_dataset_list = make_dataset(batch_size = hp['batch_size'],
adj_list=train_adj_list,
feat_list=train_feat_list,
label_list=train_label_list)
valid_batch_num, valid_dataset_list = make_dataset(batch_size = hp['batch_size'],
adj_list=valid_adj_list,
feat_list=valid_feat_list,
label_list=valid_label_list)
test_batch_num, test_dataset_list = make_dataset(batch_size = hp['batch_size'],
adj_list=test_adj_list,
feat_list=test_feat_list,
label_list=test_label_list)
# main training routine
# define model
num_features = train_feat_list[0].shape[1]
model = GCNModel(input_dim = num_features)
# set optimizer and loss
optimizer = optim.Adam(model.parameters(), lr=hp['learning_rate'])
criterion = nn.BCEWithLogitsLoss(reduction='mean')
# optimization
print("\ntrain start: %d" %(i+1))
for epoch in range(hp['epochs']):
t = time.time()
model.train()
train_roc_score, train_ap_score, epoch_loss = train(model,
train_batch_num,
train_dataset_list,
criterion)
model.eval()
roc_score, ap_score = evaluate(valid_batch_num, model, valid_dataset_list)
print("epoch:", '%02d' % (epoch + 1),
"train: [loss=", "{:.4f}".format(epoch_loss),
"roc=", "{:.4f}".format(train_roc_score),
"ap=", "{:.4f}".format(train_ap_score),
"] val: [roc=", "{:.4f}".format(roc_score),
"ap=", "{:.4f}".format(ap_score),
"] t=", "{:.4f}".format(time.time() - t))
print('optimization finished!')
model.eval()
roc_score, ap_score = evaluate(test_batch_num, model, test_dataset_list)
print('Test ROC score: {:.5f}'.format(roc_score))
print('Test AP score: {:.5f}'.format(ap_score))
GCN_AUC_list.append(roc_score)
del model, train_dataset_list, valid_dataset_list, test_dataset_list
del tasks, datasets, transformers
del train_adj_list, train_feat_list, train_label_list
del valid_adj_list, valid_feat_list, valid_label_list
del test_adj_list, test_feat_list, test_label_list
###########################################################
# ECFP - random forest section
###########################################################
# train, test data for ECFP
X_train_E, Y_train_E = loadECFP(train_dataset)
X_test_E, Y_test_E = loadECFP(test_dataset)
# AUC list for ECFP
ECFP_AUC = getAUCfromRF_model(X_train_E, Y_train_E, X_test_E, Y_test_E)
ECFP_AUC_list.append(ECFP_AUC)
del train_dataset, valid_dataset, test_dataset
del X_train_E, Y_train_E, X_test_E, Y_test_E
GCN_mean = np.mean(np.array(GCN_AUC_list))
GCN_std = np.std(np.array(GCN_AUC_list))
ECFP_mean = np.mean(np.array(ECFP_AUC_list))
ECFP_std = np.std(np.array(ECFP_AUC_list))
fig = plt.figure(figsize=(2, 4))
plt.title("test AUC score")
y = [GCN_mean, ECFP_mean]
x = ['GCN', 'ECFP - RF']
plt.bar(x, y, width=0.3, yerr = [GCN_std, ECFP_std])
plt.ylim((0, 1))
plt.ylabel("ROC-AUC")
plt.show()
###Output
_____no_output_____ |
Currency Converter/.ipynb_checkpoints/main-checkpoint.ipynb | ###Markdown
Python Project on Currency Converter
###Code
import requests
from tkinter import *
class CurrencyConverter():
def __init__(self,url):
self.data= requests.get(url).json()
self.currencies = self.data['rates']
def convert(self, from_currency, to_currency, amount):
initial_amount = amount
if from_currency != 'USD' :
amount = amount / self.currencies[from_currency]
# limiting the precision to 2 decimal places
amount = round(amount * self.currencies[to_currency], 2)
return amount
class CurrencyConverterApp():
def __init__(self,root,converter):
self.root =root
root.title = 'Currency Converter'
self.converter = converter
def create_converter(self):
self.variable1 = StringVar(root)
self.variable2 = StringVar(root)
# initialise the variables
self.variable1.set("INR")
self.variable2.set("USD")
# Set the background colour of GUI window
root.configure(background = 'purple')
# Set the configuration of GUI window (WidthxHeight)
root.geometry("400x320")
# Create welcome to Real Time Currency Convertor label
headlabel = Label(root, text = 'Welcome to Real Time Currency Convertor', fg = 'white', bg = "black")
headlabel.config(font = ('Courier',10,'bold'))
# Create a 'DESC' label
label_desc = Label(root, text = f"1 Indian Rupee equals = {self.converter.convert('INR','USD',1)} USD \n Date : {self.converter.data['date']}")
# Create Entry box
self.Amount = Entry(root,bd = 3)
self.converted_amount_field = Entry(root,bd=3)
self.Amount.insert(0, 1)
self.converted_amount_field.insert(0,0.013)
# Create a dropdown
from_curr_options = OptionMenu(root, self.variable1, *converter.currencies.keys())
from_curr_options.config(width=10, font=('Courier', 10,'bold'), bg = 'yellow', fg = 'black')
from_curr_options.pack()
to_curr_options = OptionMenu(root, self.variable2, *converter.currencies.keys())
to_curr_options.config(width=10, font=('Courier', 10,'bold'), bg = 'red', fg = 'black')
to_curr_options.pack()
# Placing on screen
headlabel.place(x=50, y=10)
label_desc.place(x = 110, y= 50)
self.Amount.place(x = 60, y= 120)
self.converted_amount_field.place(x = 60,y = 160)
from_curr_options.place(x = 220 , y = 115)
to_curr_options.place(x = 220 , y = 155)
self.button1 = Button(root, text = "Convert", fg = "black", command = self.perform)
self.button1.config(font=('Courier', 15, 'bold'))
self.button1.place(x = 150, y = 200)
def perform(self,):
amount = float(self.Amount.get())
from_curr = self.variable1.get()
to_curr = self.variable2.get()
converted_amount = self.converter.convert(from_curr,to_curr,amount)
converted_amount = round(converted_amount, 2)
self.converted_amount_field.delete(0,END)
self.converted_amount_field.insert(0,converted_amount)
if __name__ == '__main__':
url = 'https://api.exchangerate-api.com/v4/latest/USD'
converter = CurrencyConverter(url)
root = Tk()
Converter = CurrencyConverterApp(root,converter)
Converter.create_converter()
root.mainloop()
###Output
_____no_output_____ |
c4_convolutional_neural_networks/week_13/neural_style_transfer/Art_Generation_with_Neural_Style_Transfer_v3a.ipynb | ###Markdown
Deep Learning & Art: Neural Style TransferIn this assignment, you will learn about Neural Style Transfer. This algorithm was created by [Gatys et al. (2015).](https://arxiv.org/abs/1508.06576)**In this assignment, you will:**- Implement the neural style transfer algorithm - Generate novel artistic images using your algorithm Most of the algorithms you've studied optimize a cost function to get a set of parameter values. In Neural Style Transfer, you'll optimize a cost function to get pixel values! Updates If you were working on the notebook before this update...* The current notebook is version "3a".* You can find your original work saved in the notebook with the previous version name ("v2") * To view the file directory, go to the menu "File->Open", and this will open a new tab that shows the file directory. List of updates* Use `pprint.PrettyPrinter` to format printing of the vgg model.* computing content cost: clarified and reformatted instructions, fixed broken links, added additional hints for unrolling.* style matrix: clarify two uses of variable "G" by using different notation for gram matrix.* style cost: use distinct notation for gram matrix, added additional hints.* Grammar and wording updates for clarity.* `model_nn`: added hints.
###Code
import os
import sys
import scipy.io
import scipy.misc
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
from PIL import Image
from nst_utils import *
import numpy as np
import tensorflow as tf
import pprint
import imageio
%matplotlib inline
###Output
_____no_output_____
###Markdown
1 - Problem StatementNeural Style Transfer (NST) is one of the most fun techniques in deep learning. As seen below, it merges two images, namely: a **"content" image (C) and a "style" image (S), to create a "generated" image (G**). The generated image G combines the "content" of the image C with the "style" of image S. In this example, you are going to generate an image of the Louvre museum in Paris (content image C), mixed with a painting by Claude Monet, a leader of the impressionist movement (style image S).Let's see how you can do this. 2 - Transfer LearningNeural Style Transfer (NST) uses a previously trained convolutional network, and builds on top of that. The idea of using a network trained on a different task and applying it to a new task is called transfer learning. Following the [original NST paper](https://arxiv.org/abs/1508.06576), we will use the VGG network. Specifically, we'll use VGG-19, a 19-layer version of the VGG network. This model has already been trained on the very large ImageNet database, and thus has learned to recognize a variety of low level features (at the shallower layers) and high level features (at the deeper layers). Run the following code to load parameters from the VGG model. This may take a few seconds.
###Code
pp = pprint.PrettyPrinter(indent=4)
model = load_vgg_model("pretrained-model/imagenet-vgg-verydeep-19.mat")
pp.pprint(model)
###Output
WARNING:tensorflow:From /Users/jmadrid/Documents/machine_learning/deep_learning_specialization/c4_convolutional_neural_networks/week_13/neural_style_transfer/nst_utils.py:124: The name tf.nn.avg_pool is deprecated. Please use tf.nn.avg_pool2d instead.
{ 'avgpool1': <tf.Tensor 'AvgPool:0' shape=(1, 150, 200, 64) dtype=float32>,
'avgpool2': <tf.Tensor 'AvgPool_1:0' shape=(1, 75, 100, 128) dtype=float32>,
'avgpool3': <tf.Tensor 'AvgPool_2:0' shape=(1, 38, 50, 256) dtype=float32>,
'avgpool4': <tf.Tensor 'AvgPool_3:0' shape=(1, 19, 25, 512) dtype=float32>,
'avgpool5': <tf.Tensor 'AvgPool_4:0' shape=(1, 10, 13, 512) dtype=float32>,
'conv1_1': <tf.Tensor 'Relu:0' shape=(1, 300, 400, 64) dtype=float32>,
'conv1_2': <tf.Tensor 'Relu_1:0' shape=(1, 300, 400, 64) dtype=float32>,
'conv2_1': <tf.Tensor 'Relu_2:0' shape=(1, 150, 200, 128) dtype=float32>,
'conv2_2': <tf.Tensor 'Relu_3:0' shape=(1, 150, 200, 128) dtype=float32>,
'conv3_1': <tf.Tensor 'Relu_4:0' shape=(1, 75, 100, 256) dtype=float32>,
'conv3_2': <tf.Tensor 'Relu_5:0' shape=(1, 75, 100, 256) dtype=float32>,
'conv3_3': <tf.Tensor 'Relu_6:0' shape=(1, 75, 100, 256) dtype=float32>,
'conv3_4': <tf.Tensor 'Relu_7:0' shape=(1, 75, 100, 256) dtype=float32>,
'conv4_1': <tf.Tensor 'Relu_8:0' shape=(1, 38, 50, 512) dtype=float32>,
'conv4_2': <tf.Tensor 'Relu_9:0' shape=(1, 38, 50, 512) dtype=float32>,
'conv4_3': <tf.Tensor 'Relu_10:0' shape=(1, 38, 50, 512) dtype=float32>,
'conv4_4': <tf.Tensor 'Relu_11:0' shape=(1, 38, 50, 512) dtype=float32>,
'conv5_1': <tf.Tensor 'Relu_12:0' shape=(1, 19, 25, 512) dtype=float32>,
'conv5_2': <tf.Tensor 'Relu_13:0' shape=(1, 19, 25, 512) dtype=float32>,
'conv5_3': <tf.Tensor 'Relu_14:0' shape=(1, 19, 25, 512) dtype=float32>,
'conv5_4': <tf.Tensor 'Relu_15:0' shape=(1, 19, 25, 512) dtype=float32>,
'input': <tf.Variable 'Variable:0' shape=(1, 300, 400, 3) dtype=float32_ref>}
###Markdown
* The model is stored in a python dictionary. * The python dictionary contains key-value pairs for each layer. * The 'key' is the variable name and the 'value' is a tensor for that layer. Assign input image to the model's input layerTo run an image through this network, you just have to feed the image to the model. In TensorFlow, you can do so using the [tf.assign](https://www.tensorflow.org/api_docs/python/tf/assign) function. In particular, you will use the assign function like this: ```pythonmodel["input"].assign(image)```This assigns the image as an input to the model. Activate a layerAfter this, if you want to access the activations of a particular layer, say layer `4_2` when the network is run on this image, you would run a TensorFlow session on the correct tensor `conv4_2`, as follows: ```pythonsess.run(model["conv4_2"])``` 3 - Neural Style Transfer (NST)We will build the Neural Style Transfer (NST) algorithm in three steps:- Build the content cost function $J_{content}(C,G)$- Build the style cost function $J_{style}(S,G)$- Put it together to get $J(G) = \alpha J_{content}(C,G) + \beta J_{style}(S,G)$. 3.1 - Computing the content costIn our running example, the content image C will be the picture of the Louvre Museum in Paris. Run the code below to see a picture of the Louvre.
###Code
content_image = imageio.imread("images/louvre.jpg")
imshow(content_image);
###Output
_____no_output_____
###Markdown
The content image (C) shows the Louvre museum's pyramid surrounded by old Paris buildings, against a sunny sky with a few clouds.** 3.1.1 - Make generated image G match the content of image C** Shallower versus deeper layers* The shallower layers of a ConvNet tend to detect lower-level features such as edges and simple textures.* The deeper layers tend to detect higher-level features such as more complex textures as well as object classes. Choose a "middle" activation layer $a^{[l]}$We would like the "generated" image G to have similar content as the input image C. Suppose you have chosen some layer's activations to represent the content of an image. * In practice, you'll get the most visually pleasing results if you choose a layer in the **middle** of the network--neither too shallow nor too deep. * (After you have finished this exercise, feel free to come back and experiment with using different layers, to see how the results vary.) Forward propagate image "C"* Set the image C as the input to the pretrained VGG network, and run forward propagation. * Let $a^{(C)}$ be the hidden layer activations in the layer you had chosen. (In lecture, we had written this as $a^{[l](C)}$, but here we'll drop the superscript $[l]$ to simplify the notation.) This will be an $n_H \times n_W \times n_C$ tensor. Forward propagate image "G"* Repeat this process with the image G: Set G as the input, and run forward progation. * Let $a^{(G)}$ be the corresponding hidden layer activation. Content Cost Function $J_{content}(C,G)$We will define the content cost function as:$$J_{content}(C,G) = \frac{1}{4 \times n_H \times n_W \times n_C}\sum _{ \text{all entries}} (a^{(C)} - a^{(G)})^2\tag{1} $$* Here, $n_H, n_W$ and $n_C$ are the height, width and number of channels of the hidden layer you have chosen, and appear in a normalization term in the cost. * For clarity, note that $a^{(C)}$ and $a^{(G)}$ are the 3D volumes corresponding to a hidden layer's activations. * In order to compute the cost $J_{content}(C,G)$, it might also be convenient to unroll these 3D volumes into a 2D matrix, as shown below.* Technically this unrolling step isn't needed to compute $J_{content}$, but it will be good practice for when you do need to carry out a similar operation later for computing the style cost $J_{style}$. **Exercise:** Compute the "content cost" using TensorFlow. **Instructions**: The 3 steps to implement this function are:1. Retrieve dimensions from `a_G`: - To retrieve dimensions from a tensor `X`, use: `X.get_shape().as_list()`2. Unroll `a_C` and `a_G` as explained in the picture above - You'll likey want to use these functions: [tf.transpose](https://www.tensorflow.org/versions/r1.15/api_docs/python/tf/transpose) and [tf.reshape](https://www.tensorflow.org/versions/r1.15/api_docs/python/tf/reshape).3. Compute the content cost: - You'll likely want to use these functions: [tf.reduce_sum](https://www.tensorflow.org/api_docs/python/tf/reduce_sum), [tf.square](https://www.tensorflow.org/api_docs/python/tf/square) and [tf.subtract](https://www.tensorflow.org/api_docs/python/tf/subtract). Additional Hints for "Unrolling"* To unroll the tensor, we want the shape to change from $(m,n_H,n_W,n_C)$ to $(m, n_H \times n_W, n_C)$.* `tf.reshape(tensor, shape)` takes a list of integers that represent the desired output shape.* For the `shape` parameter, a `-1` tells the function to choose the correct dimension size so that the output tensor still contains all the values of the original tensor.* So tf.reshape(a_C, shape=[m, n_H * n_W, n_C]) gives the same result as tf.reshape(a_C, shape=[m, -1, n_C]).* If you prefer to re-order the dimensions, you can use `tf.transpose(tensor, perm)`, where `perm` is a list of integers containing the original index of the dimensions. * For example, `tf.transpose(a_C, perm=[0,3,1,2])` changes the dimensions from $(m, n_H, n_W, n_C)$ to $(m, n_C, n_H, n_W)$.* There is more than one way to unroll the tensors.* Notice that it's not necessary to use tf.transpose to 'unroll' the tensors in this case but this is a useful function to practice and understand for other situations that you'll encounter.
###Code
# GRADED FUNCTION: compute_content_cost
def compute_content_cost(a_C, a_G):
"""
Computes the content cost
Arguments:
a_C -- tensor of dimension (1, n_H, n_W, n_C), hidden layer activations representing content of the image C
a_G -- tensor of dimension (1, n_H, n_W, n_C), hidden layer activations representing content of the image G
Returns:
J_content -- scalar that you compute using equation 1 above.
"""
### START CODE HERE ###
# Retrieve dimensions from a_G (≈1 line)
m, n_H, n_W, n_C = a_G.get_shape().as_list()
# Reshape a_C and a_G (≈2 lines)
a_C_unrolled = tf.reshape(a_C, shape=[m, -1, n_C])
a_G_unrolled = tf.reshape(a_G, shape=[m, -1, n_C])
# compute the cost with tensorflow (≈1 line)
J_content = 1 / (4 * n_H * n_W * n_C) * tf.reduce_sum(tf.square(tf.subtract(a_C_unrolled, a_G_unrolled)))
### END CODE HERE ###
return J_content
tf.reset_default_graph()
with tf.Session() as test:
tf.set_random_seed(1)
a_C = tf.random_normal([1, 4, 4, 3], mean=1, stddev=4)
a_G = tf.random_normal([1, 4, 4, 3], mean=1, stddev=4)
J_content = compute_content_cost(a_C, a_G)
print("J_content = " + str(J_content.eval()))
###Output
J_content = 6.7655926
###Markdown
**Expected Output**: **J_content** 6.76559 What you should remember- The content cost takes a hidden layer activation of the neural network, and measures how different $a^{(C)}$ and $a^{(G)}$ are. - When we minimize the content cost later, this will help make sure $G$ has similar content as $C$. 3.2 - Computing the style costFor our running example, we will use the following style image:
###Code
style_image = imageio.imread("images/monet_800600.jpg")
imshow(style_image);
###Output
_____no_output_____
###Markdown
This was painted in the style of *[impressionism](https://en.wikipedia.org/wiki/Impressionism)*.Lets see how you can now define a "style" cost function $J_{style}(S,G)$. 3.2.1 - Style matrix Gram matrix* The style matrix is also called a "Gram matrix." * In linear algebra, the Gram matrix G of a set of vectors $(v_{1},\dots ,v_{n})$ is the matrix of dot products, whose entries are ${\displaystyle G_{ij} = v_{i}^T v_{j} = np.dot(v_{i}, v_{j}) }$. * In other words, $G_{ij}$ compares how similar $v_i$ is to $v_j$: If they are highly similar, you would expect them to have a large dot product, and thus for $G_{ij}$ to be large. Two meanings of the variable $G$* Note that there is an unfortunate collision in the variable names used here. We are following common terminology used in the literature. * $G$ is used to denote the Style matrix (or Gram matrix) * $G$ also denotes the generated image. * For this assignment, we will use $G_{gram}$ to refer to the Gram matrix, and $G$ to denote the generated image. Compute $G_{gram}$In Neural Style Transfer (NST), you can compute the Style matrix by multiplying the "unrolled" filter matrix with its transpose:$$\mathbf{G}_{gram} = \mathbf{A}_{unrolled} \mathbf{A}_{unrolled}^T$$ $G_{(gram)i,j}$: correlationThe result is a matrix of dimension $(n_C,n_C)$ where $n_C$ is the number of filters (channels). The value $G_{(gram)i,j}$ measures how similar the activations of filter $i$ are to the activations of filter $j$. $G_{(gram),i,i}$: prevalence of patterns or textures* The diagonal elements $G_{(gram)ii}$ measure how "active" a filter $i$ is. * For example, suppose filter $i$ is detecting vertical textures in the image. Then $G_{(gram)ii}$ measures how common vertical textures are in the image as a whole.* If $G_{(gram)ii}$ is large, this means that the image has a lot of vertical texture. By capturing the prevalence of different types of features ($G_{(gram)ii}$), as well as how much different features occur together ($G_{(gram)ij}$), the Style matrix $G_{gram}$ measures the style of an image. **Exercise**:* Using TensorFlow, implement a function that computes the Gram matrix of a matrix A. * The formula is: The gram matrix of A is $G_A = AA^T$. * You may use these functions: [matmul](https://www.tensorflow.org/api_docs/python/tf/matmul) and [transpose](https://www.tensorflow.org/api_docs/python/tf/transpose).
###Code
# GRADED FUNCTION: gram_matrix
def gram_matrix(A):
"""
Argument:
A -- matrix of shape (n_C, n_H*n_W)
Returns:
GA -- Gram matrix of A, of shape (n_C, n_C)
"""
### START CODE HERE ### (≈1 line)
GA = tf.matmul(A, A, transpose_b=True)
### END CODE HERE ###
return GA
tf.reset_default_graph()
with tf.Session() as test:
tf.set_random_seed(1)
A = tf.random_normal([3, 2*1], mean=1, stddev=4)
GA = gram_matrix(A)
print("GA = \n" + str(GA.eval()))
###Output
GA =
[[ 6.422305 -4.429122 -2.096682]
[-4.429122 19.465837 19.563871]
[-2.096682 19.563871 20.686462]]
###Markdown
**Expected Output**: **GA** [[ 6.42230511 -4.42912197 -2.09668207] [ -4.42912197 19.46583748 19.56387138] [ -2.09668207 19.56387138 20.6864624 ]] 3.2.2 - Style cost Your goal will be to minimize the distance between the Gram matrix of the "style" image S and the gram matrix of the "generated" image G. * For now, we are using only a single hidden layer $a^{[l]}$. * The corresponding style cost for this layer is defined as: $$J_{style}^{[l]}(S,G) = \frac{1}{4 \times {n_C}^2 \times (n_H \times n_W)^2} \sum _{i=1}^{n_C}\sum_{j=1}^{n_C}(G^{(S)}_{(gram)i,j} - G^{(G)}_{(gram)i,j})^2\tag{2} $$* $G_{gram}^{(S)}$ Gram matrix of the "style" image.* $G_{gram}^{(G)}$ Gram matrix of the "generated" image.* Remember, this cost is computed using the hidden layer activations for a particular hidden layer in the network $a^{[l]}$ **Exercise**: Compute the style cost for a single layer. **Instructions**: The 3 steps to implement this function are:1. Retrieve dimensions from the hidden layer activations a_G: - To retrieve dimensions from a tensor X, use: `X.get_shape().as_list()`2. Unroll the hidden layer activations a_S and a_G into 2D matrices, as explained in the picture above (see the images in the sections "computing the content cost" and "style matrix"). - You may use [tf.transpose](https://www.tensorflow.org/versions/r1.15/api_docs/python/tf/transpose) and [tf.reshape](https://www.tensorflow.org/versions/r1.15/api_docs/python/tf/reshape).3. Compute the Style matrix of the images S and G. (Use the function you had previously written.) 4. Compute the Style cost: - You may find [tf.reduce_sum](https://www.tensorflow.org/api_docs/python/tf/reduce_sum), [tf.square](https://www.tensorflow.org/api_docs/python/tf/square) and [tf.subtract](https://www.tensorflow.org/api_docs/python/tf/subtract) useful. Additional Hints* Since the activation dimensions are $(m, n_H, n_W, n_C)$ whereas the desired unrolled matrix shape is $(n_C, n_H*n_W)$, the order of the filter dimension $n_C$ is changed. So `tf.transpose` can be used to change the order of the filter dimension.* for the product $\mathbf{G}_{gram} = \mathbf{A}_{} \mathbf{A}_{}^T$, you will also need to specify the `perm` parameter for the `tf.transpose` function.
###Code
# GRADED FUNCTION: compute_layer_style_cost
def compute_layer_style_cost(a_S, a_G):
"""
Arguments:
a_S -- tensor of dimension (1, n_H, n_W, n_C), hidden layer activations representing style of the image S
a_G -- tensor of dimension (1, n_H, n_W, n_C), hidden layer activations representing style of the image G
Returns:
J_style_layer -- tensor representing a scalar value, style cost defined above by equation (2)
"""
### START CODE HERE ###
# Retrieve dimensions from a_G (≈1 line)
m, n_H, n_W, n_C = a_G.get_shape().as_list()
# Reshape the images to have them of shape (n_C, n_H*n_W) (≈2 lines)
a_S = tf.transpose(tf.reshape(a_S, shape=[-1, n_C]))
a_G = tf.transpose(tf.reshape(a_G, shape=[-1, n_C]))
# Computing gram_matrices for both images S and G (≈2 lines)
GS = gram_matrix(a_S)
GG = gram_matrix(a_G)
# Computing the loss (≈1 line)
J_style_layer = 1 / (4 * np.square(n_C) * np.square(n_H * n_W)) * tf.reduce_sum(tf.square(tf.subtract(GS, GG)))
### END CODE HERE ###
return J_style_layer
tf.reset_default_graph()
with tf.Session() as test:
tf.set_random_seed(1)
a_S = tf.random_normal([1, 4, 4, 3], mean=1, stddev=4)
a_G = tf.random_normal([1, 4, 4, 3], mean=1, stddev=4)
J_style_layer = compute_layer_style_cost(a_S, a_G)
print("J_style_layer = " + str(J_style_layer.eval()))
###Output
J_style_layer = 9.190278
###Markdown
**Expected Output**: **J_style_layer** 9.19028 3.2.3 Style Weights* So far you have captured the style from only one layer. * We'll get better results if we "merge" style costs from several different layers. * Each layer will be given weights ($\lambda^{[l]}$) that reflect how much each layer will contribute to the style.* After completing this exercise, feel free to come back and experiment with different weights to see how it changes the generated image $G$.* By default, we'll give each layer equal weight, and the weights add up to 1. ($\sum_{l}^L\lambda^{[l]} = 1$)
###Code
STYLE_LAYERS = [
('conv1_1', 0.2),
('conv2_1', 0.2),
('conv3_1', 0.2),
('conv4_1', 0.2),
('conv5_1', 0.2)]
###Output
_____no_output_____
###Markdown
You can combine the style costs for different layers as follows:$$J_{style}(S,G) = \sum_{l} \lambda^{[l]} J^{[l]}_{style}(S,G)$$where the values for $\lambda^{[l]}$ are given in `STYLE_LAYERS`. Exercise: compute style cost* We've implemented a compute_style_cost(...) function. * It calls your `compute_layer_style_cost(...)` several times, and weights their results using the values in `STYLE_LAYERS`. * Please read over it to make sure you understand what it's doing. Description of `compute_style_cost`For each layer:* Select the activation (the output tensor) of the current layer.* Get the style of the style image "S" from the current layer.* Get the style of the generated image "G" from the current layer.* Compute the "style cost" for the current layer* Add the weighted style cost to the overall style cost (J_style)Once you're done with the loop: * Return the overall style cost.
###Code
def compute_style_cost(model, STYLE_LAYERS):
"""
Computes the overall style cost from several chosen layers
Arguments:
model -- our tensorflow model
STYLE_LAYERS -- A python list containing:
- the names of the layers we would like to extract style from
- a coefficient for each of them
Returns:
J_style -- tensor representing a scalar value, style cost defined above by equation (2)
"""
# initialize the overall style cost
J_style = 0
for layer_name, coeff in STYLE_LAYERS:
# Select the output tensor of the currently selected layer
out = model[layer_name]
# Set a_S to be the hidden layer activation from the layer we have selected, by running the session on out
a_S = sess.run(out)
# Set a_G to be the hidden layer activation from same layer. Here, a_G references model[layer_name]
# and isn't evaluated yet. Later in the code, we'll assign the image G as the model input, so that
# when we run the session, this will be the activations drawn from the appropriate layer, with G as input.
a_G = out
# Compute style_cost for the current layer
J_style_layer = compute_layer_style_cost(a_S, a_G)
# Add coeff * J_style_layer of this layer to overall style cost
J_style += coeff * J_style_layer
return J_style
###Output
_____no_output_____
###Markdown
**Note**: In the inner-loop of the for-loop above, `a_G` is a tensor and hasn't been evaluated yet. It will be evaluated and updated at each iteration when we run the TensorFlow graph in model_nn() below.<!-- How do you choose the coefficients for each layer? The deeper layers capture higher-level concepts, and the features in the deeper layers are less localized in the image relative to each other. So if you want the generated image to softly follow the style image, try choosing larger weights for deeper layers and smaller weights for the first layers. In contrast, if you want the generated image to strongly follow the style image, try choosing smaller weights for deeper layers and larger weights for the first layers!--> What you should remember- The style of an image can be represented using the Gram matrix of a hidden layer's activations. - We get even better results by combining this representation from multiple different layers. - This is in contrast to the content representation, where usually using just a single hidden layer is sufficient.- Minimizing the style cost will cause the image $G$ to follow the style of the image $S$. 3.3 - Defining the total cost to optimize Finally, let's create a cost function that minimizes both the style and the content cost. The formula is: $$J(G) = \alpha J_{content}(C,G) + \beta J_{style}(S,G)$$**Exercise**: Implement the total cost function which includes both the content cost and the style cost.
###Code
# GRADED FUNCTION: total_cost
def total_cost(J_content, J_style, alpha = 10, beta = 40):
"""
Computes the total cost function
Arguments:
J_content -- content cost coded above
J_style -- style cost coded above
alpha -- hyperparameter weighting the importance of the content cost
beta -- hyperparameter weighting the importance of the style cost
Returns:
J -- total cost as defined by the formula above.
"""
### START CODE HERE ### (≈1 line)
J = alpha * J_content + beta * J_style
### END CODE HERE ###
return J
tf.reset_default_graph()
with tf.Session() as test:
np.random.seed(3)
J_content = np.random.randn()
J_style = np.random.randn()
J = total_cost(J_content, J_style)
print("J = " + str(J))
###Output
J = 35.34667875478276
###Markdown
**Expected Output**: **J** 35.34667875478276 What you should remember- The total cost is a linear combination of the content cost $J_{content}(C,G)$ and the style cost $J_{style}(S,G)$.- $\alpha$ and $\beta$ are hyperparameters that control the relative weighting between content and style. 4 - Solving the optimization problem Finally, let's put everything together to implement Neural Style Transfer!Here's what the program will have to do:1. Create an Interactive Session2. Load the content image 3. Load the style image4. Randomly initialize the image to be generated 5. Load the VGG19 model7. Build the TensorFlow graph: - Run the content image through the VGG19 model and compute the content cost - Run the style image through the VGG19 model and compute the style cost - Compute the total cost - Define the optimizer and the learning rate8. Initialize the TensorFlow graph and run it for a large number of iterations, updating the generated image at every step.Lets go through the individual steps in detail. Interactive SessionsYou've previously implemented the overall cost $J(G)$. We'll now set up TensorFlow to optimize this with respect to $G$. * To do so, your program has to reset the graph and use an "[Interactive Session](https://www.tensorflow.org/api_docs/python/tf/InteractiveSession)". * Unlike a regular session, the "Interactive Session" installs itself as the default session to build a graph. * This allows you to run variables without constantly needing to refer to the session object (calling "sess.run()"), which simplifies the code. Start the interactive session.
###Code
# Reset the graph
tf.reset_default_graph()
# Start interactive session
sess = tf.InteractiveSession()
###Output
_____no_output_____
###Markdown
Content imageLet's load, reshape, and normalize our "content" image (the Louvre museum picture):
###Code
content_image = imageio.imread("images/louvre_small.jpg")
content_image = reshape_and_normalize_image(content_image)
###Output
_____no_output_____
###Markdown
Style imageLet's load, reshape and normalize our "style" image (Claude Monet's painting):
###Code
style_image = imageio.imread("images/monet.jpg")
style_image = reshape_and_normalize_image(style_image)
###Output
_____no_output_____
###Markdown
Generated image correlated with content imageNow, we initialize the "generated" image as a noisy image created from the content_image.* The generated image is slightly correlated with the content image.* By initializing the pixels of the generated image to be mostly noise but slightly correlated with the content image, this will help the content of the "generated" image more rapidly match the content of the "content" image. * Feel free to look in `nst_utils.py` to see the details of `generate_noise_image(...)`; to do so, click "File-->Open..." at the upper-left corner of this Jupyter notebook.
###Code
generated_image = generate_noise_image(content_image)
imshow(generated_image[0]);
###Output
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
###Markdown
Load pre-trained VGG19 modelNext, as explained in part (2), let's load the VGG19 model.
###Code
model = load_vgg_model("pretrained-model/imagenet-vgg-verydeep-19.mat")
###Output
_____no_output_____
###Markdown
Content CostTo get the program to compute the content cost, we will now assign `a_C` and `a_G` to be the appropriate hidden layer activations. We will use layer `conv4_2` to compute the content cost. The code below does the following:1. Assign the content image to be the input to the VGG model.2. Set a_C to be the tensor giving the hidden layer activation for layer "conv4_2".3. Set a_G to be the tensor giving the hidden layer activation for the same layer. 4. Compute the content cost using a_C and a_G.**Note**: At this point, a_G is a tensor and hasn't been evaluated. It will be evaluated and updated at each iteration when we run the Tensorflow graph in model_nn() below.
###Code
# Assign the content image to be the input of the VGG model.
sess.run(model['input'].assign(content_image))
# Select the output tensor of layer conv4_2
out = model['conv4_2']
# Set a_C to be the hidden layer activation from the layer we have selected
a_C = sess.run(out)
# Set a_G to be the hidden layer activation from same layer. Here, a_G references model['conv4_2']
# and isn't evaluated yet. Later in the code, we'll assign the image G as the model input, so that
# when we run the session, this will be the activations drawn from the appropriate layer, with G as input.
a_G = out
# Compute the content cost
J_content = compute_content_cost(a_C, a_G)
###Output
_____no_output_____
###Markdown
Style cost
###Code
# Assign the input of the model to be the "style" image
sess.run(model['input'].assign(style_image))
# Compute the style cost
J_style = compute_style_cost(model, STYLE_LAYERS)
###Output
_____no_output_____
###Markdown
Exercise: total cost* Now that you have J_content and J_style, compute the total cost J by calling `total_cost()`. * Use `alpha = 10` and `beta = 40`.
###Code
### START CODE HERE ### (1 line)
J = total_cost(J_content, J_style, alpha = 10, beta = 40)
### END CODE HERE ###
###Output
_____no_output_____
###Markdown
Optimizer* Use the Adam optimizer to minimize the total cost `J`.* Use a learning rate of 2.0. * [Adam Optimizer documentation](https://www.tensorflow.org/api_docs/python/tf/train/AdamOptimizer)
###Code
# define optimizer (1 line)
optimizer = tf.train.AdamOptimizer(2.0)
# define train_step (1 line)
train_step = optimizer.minimize(J)
###Output
_____no_output_____
###Markdown
Exercise: implement the model* Implement the model_nn() function. * The function **initializes** the variables of the tensorflow graph, * **assigns** the input image (initial generated image) as the input of the VGG19 model * and **runs** the `train_step` tensor (it was created in the code above this function) for a large number of steps. Hints* To initialize global variables, use this: ```Pythonsess.run(tf.global_variables_initializer())```* Run `sess.run()` to evaluate a variable.* [assign](https://www.tensorflow.org/versions/r1.14/api_docs/python/tf/assign) can be used like this:```pythonmodel["input"].assign(image)```
###Code
def model_nn(sess, input_image, num_iterations = 200):
# Initialize global variables (you need to run the session on the initializer)
### START CODE HERE ### (1 line)
sess.run(tf.global_variables_initializer())
### END CODE HERE ###
# Run the noisy input image (initial generated image) through the model. Use assign().
### START CODE HERE ### (1 line)
sess.run(model["input"].assign(input_image))
### END CODE HERE ###
for i in range(num_iterations):
# Run the session on the train_step to minimize the total cost
### START CODE HERE ### (1 line)
sess.run(train_step)
### END CODE HERE ###
# Compute the generated image by running the session on the current model['input']
### START CODE HERE ### (1 line)
generated_image = sess.run(model["input"])
### END CODE HERE ###
# Print every 20 iteration.
if i%20 == 0:
Jt, Jc, Js = sess.run([J, J_content, J_style])
print("Iteration " + str(i) + " :")
print("total cost = " + str(Jt))
print("content cost = " + str(Jc))
print("style cost = " + str(Js))
# save current generated image in the "/output" directory
save_image("output/" + str(i) + ".png", generated_image)
# save last generated image
save_image('output/generated_image.jpg', generated_image)
return generated_image
###Output
_____no_output_____
###Markdown
Run the following cell to generate an artistic image. It should take about 3min on CPU for every 20 iterations but you start observing attractive results after ≈140 iterations. Neural Style Transfer is generally trained using GPUs.
###Code
model_nn(sess, generated_image)
###Output
Iteration 0 :
total cost = 5050359300.0
content cost = 7877.628
style cost = 126257010.0
Iteration 20 :
total cost = 943289200.0
content cost = 15187.985
style cost = 23578434.0
Iteration 40 :
total cost = 484994880.0
content cost = 16783.08
style cost = 12120676.0
Iteration 60 :
total cost = 312619300.0
content cost = 17464.705
style cost = 7811116.0
Iteration 80 :
total cost = 228180690.0
content cost = 17715.86
style cost = 5700088.5
Iteration 100 :
total cost = 180770450.0
content cost = 17898.697
style cost = 4514786.5
Iteration 120 :
total cost = 150001810.0
content cost = 18034.877
style cost = 3745536.2
Iteration 140 :
total cost = 127735680.0
content cost = 18191.066
style cost = 3188844.2
Iteration 160 :
total cost = 110733200.0
content cost = 18345.844
style cost = 2763743.5
Iteration 180 :
total cost = 97375270.0
content cost = 18495.053
style cost = 2429758.0
|
examples/How-To.ipynb | ###Markdown
df-and-order how-to! What is df-and-order anyway? Using `df-and-order` your interactions with dataframes become very clean and predictable.Say you've been working on some project for one month already and you had a bunch of experiments. Your working directory ended up like this: data/ ├── raw_df_proj1.csv ├── raw_df_new_prj1.csv ├── cleaned_df_v1.csv ├── cleaned_df_the_best.csv ├── cleaned_df.csv └── cleaned_df_improved.csvLooks familiar? :) Except the namings it would be challenging to find how exactly those files were generated. How to reproduce the result? It'd be feasible to find the roots ( at least if you use some VCS ) yet very time-consuming.`df-and-order` was made to tackle these problems. In every task it always starts with some intial, commonly raw dataframe. It could be some logs, backend table etc. Then we come to play with it, transform it somehow to finally get a nice&clean dataframe. `df-and-order` assigns a config file to every raw dataframe. The config will contain all the useful metadata and more importantly: declaration of every transformation performed on the dataframe. Just by looking at the config file we would be able to say how some transformation was done. `df-and-order` assumes that you already have a dataframe to work with. ( unfortunately it can't provide it for you... )The only thing the lib wants you to do is to organize your dataframes in separate folders. The lib is config-based so it's nice to have a folder that contains all at once:- the initial dataframe - a config for it - all transformed variations of the initial dataframe.You should pick a unique identifier for each dataframe, it will serve as the folder name and the filename for the initial dataframe.Example of such structure: data/ ├── unique_df_id_1/ - folder with all artifacts for a df with id unique_df_id_1 │ ├── unique_df_id_1.csv - initial dataframe │ ├── df_config.yaml - contains metadata and declared transformations │ ├── transform_1_unique_df_id_1.csv - first transformed df │ └── transform_2_unique_df_id_1.csv - second transformed df ├── unique_df_id_2/ - same goes with other dataframes │ ├── ... │ └── ... └── unique_df_id_3/ ├── ... ├── ... ├── ... └── ... --- 0. We need a dataframe! We are going to create it by hand!
###Code
import pandas as pd
example_df = pd.DataFrame({
'num_col': [1,2,3,4,5],
'str_col': ['one', 'two', 'three', 'four', 'five'],
'date_col': ['2020-05-17', '2020-05-18', '2020-05-19', '2020-05-20', '2020-05-21'],
'redundant_col': [0, 0, 0, 0, 0]
})
example_df
###Output
_____no_output_____
###Markdown
What an amazing dataframe we have! Let's choose an id for our dataframe. It can be anything, but unique in your data folder.
###Code
example_df_id = 'super_demo_df_2020'
###Output
_____no_output_____
###Markdown
Now let's create a folder for it.
###Code
import os
df_folder_path = os.path.join('data', example_df_id)
if not os.path.exists(df_folder_path):
os.makedirs(df_folder_path)
###Output
_____no_output_____
###Markdown
The only thing left is to save our dataframe there.
###Code
filename = example_df_id + '.csv'
example_df.to_csv(os.path.join(df_folder_path, filename), index=False)
!ls -l data/$example_df_id
###Output
total 8
-rw-r--r-- 1 ilya.tyutin staff 138 Jul 9 20:40 super_demo_df_2020.csv
###Markdown
Hooray! Next step is to create a config file. 1. Config file Config file contains all metadata we find useful and all transformations needed as well.`DfReader` operates in your data folder and knows where to locate all dataframes and configs for them. We will create new config using `DfReader` instance.
###Code
import pandas as pd
# in case you've cloned the repo without installing the lib via pip
import sys
sys.path.append('../')
from df_and_order.df_reader import DfReader
from df_and_order.df_cache import DfCache
###Output
_____no_output_____
###Markdown
DfReader is able to work with any format you want by using `DfCache` subclasses. Each subclass provides logic how to save/load a dataframe. See the example below, where we create simple pandas wrapper for saving/loading csv files:
###Code
class CsvDfCache(DfCache):
# just a basic wrapper around pandas csv built-in methods.
def _save(self, df: pd.DataFrame, path: str, *args, **kwargs):
df.to_csv(path, index=False, *args, **kwargs)
def _load(self, path: str, *args, **kwargs) -> pd.DataFrame:
return pd.read_csv(path, *args, **kwargs)
###Output
_____no_output_____
###Markdown
Just as I mentioned earlier, we first need an instance of `DfReader`.
###Code
# we must declare which format our dataframes saved in
df_format = 'csv'
# can be any path you want, in our case it's 'data' folder
dir_path = 'data/'
reader = DfReader(dir_path=dir_path, format_to_cache_map={
# DfReader now knows how to work with csv files.
df_format: CsvDfCache()
})
###Output
_____no_output_____
###Markdown
We are all set for now and ready to create a config!
###Code
# you may want to provide any additional information for describing a dataset
# here, as an example, we save the info about the dataset's author
metadata = {'author': 'Data Man'}
# the unique id we came up with above.
df_id = example_df_id
# other information is already available for us
reader.create_df_config(df_id=df_id, # config will store dataframe id as well
initial_df_format=df_format, # in which format initial dataframe is saved
metadata=metadata)
###Output
_____no_output_____
###Markdown
Done! let's take a look at the config file.
###Code
!cat data/$example_df_id/df_config.yaml
###Output
df_id: super_demo_df_2020
initial_df_format: csv
metadata:
author: Data Man
###Markdown
Simple as that. 2. Reading a dataframe
###Code
reader.read(df_id=df_id)
###Output
_____no_output_____
###Markdown
I started the section with the code right away because it's so simple and intuitive, no need for comments! :)You just tell `DfReader` a dataframe id and you get the dataframe right back. No more hardcoded paths and mixed up formats. Once you set up `DfReader` - everything just works. Close your eyes and imagine how beneficial it is when working in the same repository with many fellow colleagues. No more shared notebooks with hardcoded paths leading to who-knows-how generated dataframes. Still not convinced df-and-order is useful? Just watch! It's a good idea to hide all the logic behind your own subclass:
###Code
class AmazingDfReader(DfReader):
def __init__(self):
# while working in some repo, our data is usually stored in some specific
# place we can provide a path for. Ideally you should write some path generator
# to be able to run the code from any place in your repository.
dir_path = 'data'
reader = super().__init__(dir_path=dir_path, format_to_cache_map={
# here we list all the formats we want to work with
'csv': CsvDfCache()
})
###Output
_____no_output_____
###Markdown
Enjoy the next cell:
###Code
amazing_reader = AmazingDfReader()
amazing_reader.read(df_id=df_id)
###Output
_____no_output_____
###Markdown
Now you see how cool it is? Anybody can use AmazingDfReader across the codebase in a super clean way without bothering how it's configured! 3. Transforms Very often our initial dataframe is the raw one and needs to be transformed in some way. e.g. we want still need the initial dataframe since it contains some important information, nonetheless we can't use it to fit our model. No doubt, it requires some changes.`df-and-order` supports `in-memory` transformations as well as `permanent` ones. The only difference is that in the permanent case we store the resulting dataframe on disk next to the initial df. You can see a transformation as a combination of one or many steps.e.g. we may want to: - first drop column 'redundant_col' - then convert column 'date_col' from str to date Do it all in memory onlyBehind the scenes each step represents a class with the only one method called `transform`. It takes a df and returns a df. Here's the intuitive example: class DropColsTransformStep(DfTransformStep): """ Simply drops some undesired columns from a dataframe. """ def __init__(self, cols: List[str]): self._cols_to_drop = cols def transform(self, df: pd.DataFrame) -> pd.DataFrame: return df.drop(self._cols_to_drop, axis=1)Then we wrap it in the `DfTransformStepConfig` class that doesn't perform the transformation but rather just describes the step:The easiest way to initialize `DfTransformStepConfig` is by passing `DfTransformStep` subclass type along with the init parameters: DfTransformStepConfig.from_step_type(step_type=DropColsTransformStep, params={'cols': ['redundant_col']}), Important note here:`DfTransformStep` suclass should be stored in the separate python file, not in some notebook etc. Otherwise, `df-and-order` will not be able to locate it. Another way is to provide the full module path for your `DfTransformStep` suclass, including the class name. Choose whatever suits you. DfTransformStepConfig(module_path='df_and_order.steps.DropColsTransformStep', params={'cols': ['redundant_col']}),In both cases `params` will be passed to init method of the specified `DfTransformStep` suclass.All the transforms declarations will be translated to the config file. If it feels overwhelming, just follow the following example and everything will become clear: We want to remove `redundant_col` since it doesn't provide any useful information and we also need to convert `date_col` to datetime. Since our dataframe is quite small, we will do all the transformations in memory, without any intermediates.
###Code
from df_and_order.df_transform import DfTransformConfig
from df_and_order.df_transform_step import DfTransformStepConfig
from df_and_order.steps.pd import DropColsTransformStep, DatesTransformStep
# we describe all the steps required
in_memory_steps = [
DfTransformStepConfig.from_step_type(step_type=DropColsTransformStep,
params={'cols': ['redundant_col']}),
DfTransformStepConfig.from_step_type(step_type=DatesTransformStep,
params={'cols': ['date_col']})
]
# arbitrary unique id for our transformation
example_transform_id = 'model_input'
# here's the instance of our entire transform
example_transform = DfTransformConfig(transform_id=example_transform_id,
df_format=df_format,
in_memory_steps=in_memory_steps)
transformed_df = amazing_reader.read(df_id=df_id,
transform=example_transform)
transformed_df
transformed_df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 num_col 5 non-null int64
1 str_col 5 non-null object
2 date_col 5 non-null datetime64[ns]
dtypes: datetime64[ns](1), int64(1), object(1)
memory usage: 248.0+ bytes
###Markdown
**Pretty rad, isn't it?** Our transform is now visible in the config:
###Code
!cat data/$example_df_id/df_config.yaml
###Output
df_id: super_demo_df_2020
initial_df_format: csv
metadata:
author: Data Man
transforms:
model_input:
df_format: csv
in_memory:
- module_path: df_and_order.steps.pd.DropColsTransformStep
params:
cols:
- redundant_col
- module_path: df_and_order.steps.pd.DatesTransformStep
params:
cols:
- date_col
###Markdown
Note: you are free to edit the config file manually as well! Once a transform is declared in the config file you can just pass `transform_id` to the `DfReader.read` method. See:
###Code
amazing_reader.read(df_id=df_id, transform_id=example_transform_id)
###Output
_____no_output_____
###Markdown
Maybe you want to switch to your initial dataframe? No problem! Just don't pass `transform_id`.
###Code
initial_df = amazing_reader.read(df_id=df_id)
initial_df
initial_df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 4 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 num_col 5 non-null int64
1 str_col 5 non-null object
2 date_col 5 non-null object
3 redundant_col 5 non-null int64
dtypes: int64(2), object(2)
memory usage: 288.0+ bytes
###Markdown
Finally, let's cover the case when we want to persist a transform's result. It's a good idea to remove `redundant_col` once and for all.
###Code
# we describe all the steps required
in_memory_steps = [
DfTransformStepConfig.from_step_type(step_type=DatesTransformStep,
params={'cols': ['date_col']})
]
# let's just move DropColsTransformStep from in_memory to permanent steps
permanent_steps = [
DfTransformStepConfig.from_step_type(step_type=DropColsTransformStep,
params={'cols': ['redundant_col']}),
]
# arbitrary unique id for our transformation
permanent_transform_id = 'model_input_permanent'
# here's the instance of our entire transform
permanent_transform = DfTransformConfig(transform_id=permanent_transform_id,
df_format=df_format,
in_memory_steps=in_memory_steps,
permanent_steps=permanent_steps)
final_df = amazing_reader.read(df_id=df_id,
transform=permanent_transform)
final_df
!cat data/$example_df_id/df_config.yaml
!ls -l data/$example_df_id/
###Output
total 24
-rw-r--r-- 1 ilya.tyutin staff 657 Jul 9 20:41 df_config.yaml
-rw-r--r-- 1 ilya.tyutin staff 114 Jul 9 20:41 model_input_permanent_super_demo_df_2020.csv
-rw-r--r-- 1 ilya.tyutin staff 138 Jul 9 20:40 super_demo_df_2020.csv
###Markdown
Notice that we now have `model_input_permanent_super_demo_df_2020.csv` file stored to the disk.Every time after calling `read` with the transform_id - it recovers from the file.
###Code
amazing_reader.read(df_id=df_id,
transform=permanent_transform)
###Output
_____no_output_____
###Markdown
Important note: `in-memory` transforms run everytime when your read a dataframe, no matter it was stored on the disk or not. That's it, now you are ready to try df-and-order power in your own projects. Some advanced stuff Reacting to changes in transformations codebase Obviously, even after having all the transformation steps declared in the config file, it doesn't prevent us from code changes in those steps subclasses. Once a step is changed, we have an outdated transformed dataframe on the disk. `df-and-order` has a built-in safety mechanism for avoiding such cases.It compares the creation date of the persisted dataframe with the last modification date of any of the permanent steps. Meaning if a permanent step we used to transform the dataframe was changed afterwards - we can no longer use it. It's crucial while working in the same repo with others. All your team members must read the same dataframe using the same config. Example:
###Code
from example_steps.steps import DummyTransformStep
!cat example_steps/steps.py
###Output
from df_and_order.df_transform_step import DfTransformStep
class DummyTransformStep(DfTransformStep):
def transform(self, df):
return df
###Markdown
The transform above does literally nothing, but bear with me.
###Code
permanent_steps = [
DfTransformStepConfig.from_step_type(step_type=DummyTransformStep, params={})
]
dummy_transform_id = 'dummy'
dummy_transform = DfTransformConfig(transform_id=dummy_transform_id,
df_format=df_format,
permanent_steps=permanent_steps)
amazing_reader.read(df_id=df_id,
transform=dummy_transform)
!cat data/super_demo_df_2020/df_config.yaml
!ls -l data/super_demo_df_2020/
###Output
total 32
-rw-r--r-- 1 ilya.tyutin staff 758 Jul 9 20:41 df_config.yaml
-rw-r--r-- 1 ilya.tyutin staff 138 Jul 9 20:41 dummy_super_demo_df_2020.csv
-rw-r--r-- 1 ilya.tyutin staff 114 Jul 9 20:41 model_input_permanent_super_demo_df_2020.csv
-rw-r--r-- 1 ilya.tyutin staff 138 Jul 9 20:40 super_demo_df_2020.csv
###Markdown
Nothing new so far. But now let's change the transform step file.
###Code
with open('example_steps/steps.py', "a") as file:
file.write('\n')
###Output
_____no_output_____
###Markdown
If we then try to read the transformed dataframe - it crashes since the code of our dummy step was modified after the dataframe was persisted.
###Code
amazing_reader.read(df_id=df_id, transform_id=dummy_transform_id)
###Output
_____no_output_____
###Markdown
There are two ways to deal with it. First one is to force the read operation by passing `forced=True`:
###Code
amazing_reader.read(df_id=df_id, transform_id=dummy_transform_id, forced=True)
###Output
Warning: ['example_steps.steps.DummyTransformStep'] steps of dummy transform were changed since the df was generated, reading it anyway because the operation was forced
Warning: ['example_steps.steps.DummyTransformStep'] steps of dummy transform were changed since the df was generated, reading it anyway because the operation was forced
###Markdown
It can save you time when you are sure that your data will be consistent with your expectations yet this way is certainly not recommended.Yeah, it can be annoying to get such an error after some minor changes, e.g. something was renamed or blank lines were removed.But it's better to get an error rather than outdated wrong dataframe.If we remove the file and try again - everything works just fine.
###Code
!rm data/$example_df_id/dummy_super_demo_df_2020.csv
amazing_reader.read(df_id=df_id, transform_id=dummy_transform_id)
###Output
_____no_output_____
###Markdown
Note on in-memory transforms If your transform consists of both in-memory and permanent steps, your in-memory steps are not allowed to change the shape of df. Remember, in-memory steps are applied every time your read a dataframe.
###Code
# made up example when we remove some cols in memory and then perform
# some permanent transform step that will cause our dataframe to be persisted
in_memory_steps = [
DfTransformStepConfig.from_step_type(step_type=DropColsTransformStep,
params={'cols': ['redundant_col']}),
]
permanent_steps = [
DfTransformStepConfig.from_step_type(step_type=DatesTransformStep,
params={'cols': ['date_col']})
]
# arbitrary unique id for our transformation
bad_in_memory_transform_id = 'bad_in_memory'
# here's the instance of our entire transform
bad_in_memory_transform = DfTransformConfig(transform_id=bad_in_memory_transform_id,
df_format='csv',
in_memory_steps=in_memory_steps,
permanent_steps=permanent_steps)
amazing_reader.read(df_id=df_id, transform=bad_in_memory_transform)
###Output
_____no_output_____ |
Hive/12-1 Helpdesk - Easy.ipynb | ###Markdown
Help Desk - Easy ScenarioA software company has been successful in selling its products to a number of customer organisations, and there is now a high demand for technical support. There is already a system in place for logging support calls taken over the telephone and assigning them to engineers, but it is based on a series of spreadsheets. With the growing volume of data, using the spreadsheet system is becoming slow, and there is a significant risk that errors will be made.
###Code
# Prerequesites
from pyhive import hive
%load_ext sql
%sql hive://[email protected]:10000/sqlzoo
%config SqlMagic.displaylimit = 20
###Output
_____no_output_____
###Markdown
1.There are three issues that include the words "index" and "Oracle". Find the call_date for each of them```+---------------------+----------+| call_date | call_ref |+---------------------+----------+| 2017-08-12 16:00:00 | 1308 || 2017-08-16 14:54:00 | 1697 || 2017-08-16 19:12:00 | 1731 |+---------------------+----------+```
###Code
%%sql
SELECT Call_date, Call_ref
FROM Issue
WHERE Detail LIKE '%index%' AND Detail LIKE '%Oracle%'
###Output
* hive://[email protected]:10000/sqlzoo
Done.
###Markdown
2.Samantha Hall made three calls on 2017-08-14. Show the date and time for each```+---------------------+------------+-----------+| call_date | first_name | last_name |+---------------------+------------+-----------+| 2017-08-14 10:10:00 | Samantha | Hall || 2017-08-14 10:49:00 | Samantha | Hall || 2017-08-14 18:18:00 | Samantha | Hall |+---------------------+------------+-----------+```
###Code
%%sql
SELECT Call_date, First_name, Last_name
FROM Issue JOIN Caller ON (Issue.Caller_id=Caller.Caller_id)
WHERE First_name='Samantha' AND Last_name='Hall' AND DATE_FORMAT(Call_date, 'yyyy-MM-dd')='2017-08-14'
###Output
* hive://[email protected]:10000/sqlzoo
Done.
###Markdown
3.There are 500 calls in the system (roughly). Write a query that shows the number that have each status.```+--------+--------+| status | Volume |+--------+--------+| Closed | 486 || Open | 10 |+--------+--------+```
###Code
%%sql
SELECT Status, COUNT(1) Volume FROM Issue
GROUP BY Status
###Output
* hive://[email protected]:10000/sqlzoo
Done.
###Markdown
4.Calls are not normally assigned to a manager but it does happen. How many calls have been assigned to staff who are at Manager Level?```+------+| mlcc |+------+| 51 |+------+```
###Code
%%sql
SELECT COUNT(1) mlcc
FROM Issue JOIN Staff ON (Issue.Assigned_to=Staff.Staff_code)
JOIN Level ON (Staff.Level_code=Level.Level_code)
WHERE Manager='Y'
###Output
* hive://[email protected]:10000/sqlzoo
Done.
###Markdown
5.Show the manager for each shift. Your output should include the shift date and type; also the first and last name of the manager.```+------------+------------+------------+-----------+| Shift_date | Shift_type | first_name | last_name |+------------+------------+------------+-----------+| 2017-08-12 | Early | Logan | Butler || 2017-08-12 | Late | Ava | Ellis || 2017-08-13 | Early | Ava | Ellis || 2017-08-13 | Late | Ava | Ellis || 2017-08-14 | Early | Logan | Butler || 2017-08-14 | Late | Logan | Butler || 2017-08-15 | Early | Logan | Butler || 2017-08-15 | Late | Logan | Butler || 2017-08-16 | Early | Logan | Butler || 2017-08-16 | Late | Logan | Butler |+------------+------------+------------+-----------+```
###Code
%%sql
SELECT DISTINCT DATE_FORMAT(Shift_date, 'yyyy-MM-dd') Shift_date, Shift_type,
First_name, Last_name
FROM Shift JOIN Staff ON (Shift.Manager=Staff.Staff_code)
ORDER BY Shift_date, Shift_type
###Output
* hive://[email protected]:10000/sqlzoo
Done.
|
examples/PAINS_filter-Copy1.ipynb | ###Markdown
PAINS pan-assay interference substancesduring actual activity screening, there are some structural patterns that hit frequently and might seem promising at the first glance. They do, however, have other issues that render them useless for pharma application. Sadly, this is ofthen found after some time & money investment. PAINS is a list of common time-waster patterns, and it is advisable to remove them from your virtual library. Better formulations at:https://www.nature.com/news/chemistry-chemical-con-artists-foil-drug-discovery-1.15991Let's see how many drugbank compounds would have gone down the drain if the PAINS filters were applied:
###Code
from rdkit import Chem
from rdkit.Chem import Draw
drugbank_input = Chem.SDMolSupplier('../data/drugbank.sdf')
drugbank = [m for m in drugbank_input if m]
with open('../data/PAINS/p_l15.txt') as p:
pains_l15 = [(Chem.MolFromSmarts(smarts), comment.lstrip('<regId="').split('(')[0]) for smarts, comment in (line.split() for line in p)]
with open('../data/PAINS/p_l150.txt') as p:
pains_l150 = [(Chem.MolFromSmarts(smarts), comment.lstrip('<regId="').split('(')[0]) for smarts, comment in (line.split() for line in p)]
with open('../data/PAINS/p_l150.txt') as p:
pains_m150 = [(Chem.MolFromSmarts(smarts), comment.lstrip('<regId="').split('(')[0]) for smarts, comment in (line.split() for line in p)]
len(pains_l15), len(pains_l150), len(pains_m150)
pains_all = pains_l15 + pains_l150 + pains_m150
drugbank_pains_all = [tuple((m.HasSubstructMatch(pain) for pain, comment in pains_all)) for m in drugbank]
len(drugbank_pains_l15), drugbank_pains_l15[0]
drugbank_pains_l150 = [tuple((m.HasSubstructMatch(pain) for pain, comment in pains_l150)) for m in drugbank]
drugbank_pains_m150 = [tuple((m.HasSubstructMatch(pain) for pain, comment in pains_m150)) for m in drugbank]
###Output
_____no_output_____
###Markdown
What DrugBank entries match some PAINS?
###Code
painsfails_all = [i for i, patterns in enumerate(drugbank_pains_all) if any(patterns)]
len(drugbank_pains_all), len(painsfails_all)
###Output
_____no_output_____
###Markdown
So, 16 compounds from DrugBank match at least one PAINS pattern from the largest l15 PAINS set. Which ones?
###Code
painsfails_mols = [drugbank[i] for i in painsfails_l15]
painsfails_hits = [[pains_l15[j][1] for j, match in enumerate(drugbank_pains_l15[i]) if match] for i in painsfails_l15]
painsfails_hits
Draw.MolsToGridImage(painsfails_mols,
legends=[", ".join(fails) for fails in painsfails_hits])
###Output
_____no_output_____ |
Model backlog/Train/89-melanoma-5fold-EfficientNetB6 step decay basic aug.ipynb | ###Markdown
Dependencies
###Code
#@title
!pip install --quiet efficientnet
# !pip install --quiet image-classifiers
#@title
import warnings, json, re, glob, math
from scripts_step_lr_schedulers import *
from melanoma_utility_scripts import *
# from kaggle_datasets import KaggleDatasets
from sklearn.model_selection import KFold
import tensorflow.keras.layers as L
import tensorflow.keras.backend as K
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, LearningRateScheduler
from tensorflow.keras import optimizers, layers, metrics, losses, Model
import tensorflow_addons as tfa
import efficientnet.tfkeras as efn
# from classification_models.tfkeras import Classifiers
SEED = 42
seed_everything(SEED)
warnings.filterwarnings("ignore")
###Output
_____no_output_____
###Markdown
TPU configuration
###Code
#@title
strategy, tpu = set_up_strategy()
REPLICAS = strategy.num_replicas_in_sync
print("REPLICAS: ", REPLICAS)
AUTO = tf.data.experimental.AUTOTUNE
###Output
_____no_output_____
###Markdown
Model parameters
###Code
#@title
config = {
"HEIGHT": 384,
"WIDTH": 384,
"CHANNELS": 3,
"BATCH_SIZE": 32,
"EPOCHS": 12,
"LEARNING_RATE": 3e-4,
"ES_PATIENCE": 5,
"N_FOLDS": 5,
"N_USED_FOLDS": 5,
"TTA_STEPS": 11,
"BASE_MODEL": 'EfficientNetB6',
"BASE_MODEL_WEIGHTS": 'imagenet',
"DATASET_PATH": 'melanoma-384x384'
}
with open(MODEL_BASE_PATH + 'config.json', 'w') as json_file:
json.dump(json.loads(json.dumps(config)), json_file)
config
###Output
_____no_output_____
###Markdown
Load data
###Code
#@title
database_base_path = COLAB_BASE_PATH + 'Data/'
train = pd.read_csv(database_base_path + 'train.csv')
test = pd.read_csv(database_base_path + 'test.csv')
print('Train samples: %d' % len(train))
display(train.head())
print(f'Test samples: {len(test)}')
display(test.head())
GCS_PATH = 'gs://kds-e73569ee9d44308363027e79908294593e80b1e12e18e57ef065397c'
TRAINING_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/train*.tfrec')
TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/test*.tfrec')
###Output
Train samples: 33126
###Markdown
Augmentations
###Code
#@title
def data_augment(image):
p_crop = tf.random.uniform([1], minval=0, maxval=1, dtype='float32')
if p_crop > .5:
image = tf.image.random_crop(image, size=[int(config['HEIGHT']*.9), int(config['WIDTH']*.9), config['CHANNELS']])
else:
image = tf.image.central_crop(image, central_fraction=.9)
image = tf.image.resize(image, size=[config['HEIGHT'], config['WIDTH']])
image = transform_rotation(image, config['HEIGHT'], rotation=180.)
image = transform_shift(image, config['HEIGHT'], h_shift=8., w_shift=8.)
image = transform_shear(image, config['HEIGHT'], shear=2.)
image = tf.image.random_flip_left_right(image)
image = tf.image.random_saturation(image, 0.7, 1.3)
image = tf.image.random_contrast(image, 0.8, 1.2)
image = tf.image.random_brightness(image, 0.1)
return image
def data_augment_tta(image):
p_crop = tf.random.uniform([1], minval=0, maxval=1, dtype='float32')
if p_crop > .5:
image = tf.image.random_crop(image, size=[int(config['HEIGHT']*.9), int(config['WIDTH']*.9), config['CHANNELS']])
else:
image = tf.image.central_crop(image, central_fraction=.9)
image = tf.image.resize(image, size=[config['HEIGHT'], config['WIDTH']])
image = tf.image.random_flip_left_right(image)
image = tf.image.random_saturation(image, 0.7, 1.3)
image = tf.image.random_contrast(image, 0.8, 1.2)
image = tf.image.random_brightness(image, 0.1)
return image
def data_augment_spatial(image):
p_spatial = tf.random.uniform([1], minval=0, maxval=1, dtype='float32')
image = tf.image.random_flip_left_right(image)
image = tf.image.random_flip_up_down(image)
if p_spatial > .75:
image = tf.image.transpose(image)
return image
def data_augment_rotate(image):
p_rotate = tf.random.uniform([1], minval=0, maxval=1, dtype='float32')
if p_rotate > .66:
image = tf.image.rot90(image, k=3) # rotate 270º
elif p_rotate > .33:
image = tf.image.rot90(image, k=2) # rotate 180º
else:
image = tf.image.rot90(image, k=1) # rotate 90º
return image
def data_augment_crop(image):
p_crop = tf.random.uniform([1], minval=0, maxval=1, dtype='float32')
if p_crop > .5:
if p_crop > .9:
image = tf.image.random_crop(image, size=[int(config['HEIGHT']*.7), int(config['WIDTH']*.7), config['CHANNELS']])
elif p_crop > .7:
image = tf.image.random_crop(image, size=[int(config['HEIGHT']*.8), int(config['WIDTH']*.8), config['CHANNELS']])
else:
image = tf.image.random_crop(image, size=[int(config['HEIGHT']*.9), int(config['WIDTH']*.9), config['CHANNELS']])
else:
if p_crop > .4:
image = tf.image.central_crop(image, central_fraction=.5)
elif p_crop > .2:
image = tf.image.central_crop(image, central_fraction=.7)
else:
image = tf.image.central_crop(image, central_fraction=.8)
image = tf.image.resize(image, size=[config['HEIGHT'], config['WIDTH']])
return image
def data_augment_cutout(image, min_mask_size=(int(config['HEIGHT'] * .05), int(config['HEIGHT'] * .05)),
max_mask_size=(int(config['HEIGHT'] * .25), int(config['HEIGHT'] * .25))):
p_cutout = tf.random.uniform([1], minval=0, maxval=1, dtype='float32')
if p_cutout > .9: # 3 cut outs
image = random_cutout(image, config['HEIGHT'], config['WIDTH'],
min_mask_size=min_mask_size, max_mask_size=max_mask_size, k=3)
elif p_cutout > .75: # 2 cut outs
image = random_cutout(image, config['HEIGHT'], config['WIDTH'],
min_mask_size=min_mask_size, max_mask_size=max_mask_size, k=2)
else: # 1 cut out
image = random_cutout(image, config['HEIGHT'], config['WIDTH'],
min_mask_size=min_mask_size, max_mask_size=max_mask_size, k=1)
return image
###Output
_____no_output_____
###Markdown
Auxiliary functions
###Code
#@title
def read_labeled_tfrecord(example):
tfrec_format = {
'image' : tf.io.FixedLenFeature([], tf.string),
'image_name' : tf.io.FixedLenFeature([], tf.string),
'patient_id' : tf.io.FixedLenFeature([], tf.int64),
'sex' : tf.io.FixedLenFeature([], tf.int64),
'age_approx' : tf.io.FixedLenFeature([], tf.int64),
'anatom_site_general_challenge': tf.io.FixedLenFeature([], tf.int64),
'diagnosis' : tf.io.FixedLenFeature([], tf.int64),
'target' : tf.io.FixedLenFeature([], tf.int64)
}
example = tf.io.parse_single_example(example, tfrec_format)
return example['image'], example['target']
def read_unlabeled_tfrecord(example, return_image_name):
tfrec_format = {
'image' : tf.io.FixedLenFeature([], tf.string),
'image_name' : tf.io.FixedLenFeature([], tf.string),
}
example = tf.io.parse_single_example(example, tfrec_format)
return example['image'], example['image_name'] if return_image_name else 0
def prepare_image(img, augment=None, dim=256):
img = tf.image.decode_jpeg(img, channels=3)
img = tf.cast(img, tf.float32) / 255.0
if augment:
img = augment(img)
img = tf.reshape(img, [dim,dim, 3])
return img
def get_dataset(files, augment=None, shuffle=False, repeat=False,
labeled=True, return_image_names=True, batch_size=16, dim=256):
ds = tf.data.TFRecordDataset(files, num_parallel_reads=AUTO)
ds = ds.cache()
if repeat:
ds = ds.repeat()
if shuffle:
ds = ds.shuffle(1024*8)
opt = tf.data.Options()
opt.experimental_deterministic = False
ds = ds.with_options(opt)
if labeled:
ds = ds.map(read_labeled_tfrecord, num_parallel_calls=AUTO)
else:
ds = ds.map(lambda example: read_unlabeled_tfrecord(example, return_image_names),
num_parallel_calls=AUTO)
ds = ds.map(lambda img, imgname_or_label: (prepare_image(img, augment=augment, dim=dim),
imgname_or_label),
num_parallel_calls=AUTO)
ds = ds.batch(batch_size * REPLICAS)
ds = ds.prefetch(AUTO)
return ds
def count_data_items(filenames):
n = [int(re.compile(r"-([0-9]*)\.").search(filename).group(1))
for filename in filenames]
return np.sum(n)
###Output
_____no_output_____
###Markdown
Learning rate scheduler
###Code
#@title
lr_min = 1e-6
lr_start = 5e-6
lr_max = config['LEARNING_RATE']
steps_per_epoch = 24519 // config['BATCH_SIZE']
total_steps = config['EPOCHS'] * steps_per_epoch
warmup_steps = steps_per_epoch * 5
hold_max_steps = 0
step_decay = .8
step_size = steps_per_epoch * 1
rng = [i for i in range(0, total_steps, config['BATCH_SIZE'])]
y = [step_schedule_with_warmup(tf.cast(x, tf.float32), step_size=step_size,
warmup_steps=warmup_steps, hold_max_steps=hold_max_steps,
lr_start=lr_start, lr_max=lr_max, step_decay=step_decay) for x in rng]
sns.set(style="whitegrid")
fig, ax = plt.subplots(figsize=(20, 6))
plt.plot(rng, y)
print("Learning rate schedule: {:.3g} to {:.3g} to {:.3g}".format(y[0], max(y), y[-1]))
###Output
Learning rate schedule: 5e-06 to 0.0003 to 7.86e-05
###Markdown
Model
###Code
#@title
# Initial bias
pos = len(train[train['target'] == 1])
neg = len(train[train['target'] == 0])
initial_bias = np.log([pos/neg])
print('Bias')
print(pos)
print(neg)
print(initial_bias)
# class weights
total = len(train)
weight_for_0 = (1 / neg)*(total)/2.0
weight_for_1 = (1 / pos)*(total)/2.0
class_weight = {0: weight_for_0, 1: weight_for_1}
print('Class weight')
print(class_weight)
def model_fn(input_shape):
input_image = L.Input(shape=input_shape, name='input_image')
base_model = efn.EfficientNetB6(weights=config['BASE_MODEL_WEIGHTS'],
include_top=False)
x = base_model(input_image)
x = L.GlobalAveragePooling2D()(x)
output = L.Dense(1, activation='sigmoid', name='output',
bias_initializer=tf.keras.initializers.Constant(initial_bias))(x)
model = Model(inputs=input_image, outputs=output)
return model
###Output
_____no_output_____
###Markdown
Training
###Code
skf = KFold(n_splits=config['N_FOLDS'], shuffle=True, random_state=SEED)
oof_pred = []; oof_tar = []; oof_val = []; oof_names = []; oof_folds = []; history_list = []; oof_pred_light = []
preds = np.zeros((count_data_items(TEST_FILENAMES), 1))
preds_light = np.zeros((count_data_items(TEST_FILENAMES), 1))
for fold,(idxT,idxV) in enumerate(skf.split(np.arange(15))):
print('\nFOLD: %d' % (fold+1))
if tpu: tf.tpu.experimental.initialize_tpu_system(tpu)
# CREATE TRAIN AND VALIDATION SUBSETS
TRAINING_FILENAMES = tf.io.gfile.glob([GCS_PATH + '/train%.2i*.tfrec'%x for x in idxT])
np.random.shuffle(TRAINING_FILENAMES)
files_valid = tf.io.gfile.glob([GCS_PATH + '/train%.2i*.tfrec'%x for x in idxV])
TEST_FILENAMES = np.sort(np.array(tf.io.gfile.glob(GCS_PATH + '/test*.tfrec')))
ct_train = count_data_items(TRAINING_FILENAMES)
ct_valid = count_data_items(files_valid)
ct_test = count_data_items(TEST_FILENAMES)
STEPS_VALID = config['TTA_STEPS'] * ct_valid/config['BATCH_SIZE']/4/REPLICAS
STEPS_TEST = config['TTA_STEPS'] * ct_test/config['BATCH_SIZE']/4/REPLICAS
steps_per_epoch = ct_train // config['BATCH_SIZE']
total_steps = config['EPOCHS'] * steps_per_epoch
warmup_steps = steps_per_epoch * 5
# BUILD MODEL
K.clear_session()
model_path = f'model_fold_{fold}.h5'
with strategy.scope():
model = model_fn((config['HEIGHT'], config['WIDTH'], config['CHANNELS']))
lr = lambda: step_schedule_with_warmup(tf.cast(optimizer.iterations, tf.float32),
step_size=step_size, warmup_steps=warmup_steps,
hold_max_steps=hold_max_steps, lr_start=lr_start,
lr_max=lr_max, step_decay=step_decay)
optimizer = optimizers.Adam(learning_rate=lr)
model.compile(optimizer=optimizer, metrics=['AUC'],
loss=losses.BinaryCrossentropy(label_smoothing=0.05))
checkpoint = ModelCheckpoint((MODEL_BASE_PATH + model_path), monitor='val_loss', mode='min',
save_best_only=True, save_weights_only=True)
# TRAIN
print('Training...')
history = model.fit(get_dataset(TRAINING_FILENAMES, augment=data_augment, shuffle=True, repeat=True,
dim=config['HEIGHT'], batch_size=config['BATCH_SIZE']),
validation_data=get_dataset(files_valid, augment=None, shuffle=False,
repeat=False, dim=config['HEIGHT']),
steps_per_epoch=steps_per_epoch//REPLICAS,
epochs=config['EPOCHS'],
callbacks=[checkpoint],
verbose=2).history
history_list.append(history)
# save last epoch weights
model.save_weights(MODEL_BASE_PATH + 'last_' + model_path)
print('Loading best model...')
model.load_weights(MODEL_BASE_PATH + model_path)
# PREDICT OOF USING TTA
print('Predicting OOF with TTA...')
ds_valid = get_dataset(files_valid, labeled=False, return_image_names=False, augment=data_augment, repeat=True,
shuffle=False, dim=config['HEIGHT'], batch_size=config['BATCH_SIZE']*4)
pred = model.predict(ds_valid, steps=STEPS_VALID, verbose=2)[:config['TTA_STEPS']*ct_valid,]
oof_pred.append(np.mean(pred.reshape((ct_valid, config['TTA_STEPS']), order='F'),axis=1) )
# PREDICT OOF USING TTA (light)
print('Predicting OOF with TTA (light)...')
ds_valid = get_dataset(files_valid, labeled=False, return_image_names=False, augment=data_augment_tta, repeat=True,
shuffle=False, dim=config['HEIGHT'], batch_size=config['BATCH_SIZE']*4)
pred = model.predict(ds_valid, steps=STEPS_VALID, verbose=2)[:config['TTA_STEPS']*ct_valid,]
oof_pred_light.append(np.mean(pred.reshape((ct_valid, config['TTA_STEPS']), order='F'),axis=1) )
# GET OOF TARGETS AND NAMES
ds_valid = get_dataset(files_valid, augment=None, repeat=False, dim=config['HEIGHT'],
labeled=True, return_image_names=True)
oof_tar.append(np.array([target.numpy() for img, target in iter(ds_valid.unbatch())]) )
oof_folds.append( np.ones_like(oof_tar[-1],dtype='int8')*fold )
ds = get_dataset(files_valid, augment=None, repeat=False, dim=config['HEIGHT'],
labeled=False, return_image_names=True)
oof_names.append(np.array([img_name.numpy().decode("utf-8") for img, img_name in iter(ds.unbatch())]))
# PREDICT TEST USING TTA
print('Predicting Test with TTA...')
ds_test = get_dataset(TEST_FILENAMES,labeled=False,return_image_names=False,augment=data_augment,
repeat=True,shuffle=False,dim=config['HEIGHT'],batch_size=config['BATCH_SIZE']*4)
pred = model.predict(ds_test,steps=STEPS_TEST,verbose=2)[:config['TTA_STEPS']*ct_test,]
preds[:,0] += np.mean(pred.reshape((ct_test, config['TTA_STEPS']), order='F'),axis=1) * (1/config['N_USED_FOLDS'])
# PREDICT TEST USING TTA (light)
print('Predicting Test with TTA...')
ds_test = get_dataset(TEST_FILENAMES,labeled=False,return_image_names=False,augment=data_augment_tta,
repeat=True,shuffle=False,dim=config['HEIGHT'],batch_size=config['BATCH_SIZE']*4)
pred = model.predict(ds_test,steps=STEPS_TEST,verbose=2)[:config['TTA_STEPS']*ct_test,]
preds_light[:,0] += np.mean(pred.reshape((ct_test, config['TTA_STEPS']), order='F'),axis=1) * (1/config['N_USED_FOLDS'])
# REPORT RESULTS
auc = roc_auc_score(oof_tar[-1], oof_pred[-1])
auc_light = roc_auc_score(oof_tar[-1], oof_pred_light[-1])
oof_val.append(np.max(history['val_auc'] ))
print('#### FOLD %i OOF AUC without TTA = %.3f, with TTA = %.3f with TTA (light) = %.3f' %
(fold+1, oof_val[-1], auc, auc_light))
###Output
FOLD: 1
WARNING:tensorflow:TPU system grpc://10.97.97.234:8470 has already been initialized. Reinitializing the TPU can cause previously created variables on TPU to be lost.
###Markdown
Model loss graph
###Code
#@title
for n_fold, history in enumerate(history_list):
print(f'Fold: {n_fold + 1}')
##### Plot metrics #####
plt.figure(figsize=(15,5))
plt.plot(np.arange(config['EPOCHS']), history['auc'],'-o',label='Train AUC',color='#ff7f0e')
plt.plot(np.arange(config['EPOCHS']), history['val_auc'],'-o',label='Val AUC',color='#1f77b4')
x = np.argmax(history['val_auc'])
y = np.max(history['val_auc'])
xdist = plt.xlim()[1] - plt.xlim()[0]
ydist = plt.ylim()[1] - plt.ylim()[0]
plt.scatter(x,y,s=200,color='#1f77b4')
plt.text(x-0.03*xdist,y-0.13*ydist,'max auc\n%.2f'%y,size=14)
plt.ylabel('AUC',size=14)
plt.xlabel('Epoch',size=14)
plt.legend(loc=2)
plt2 = plt.gca().twinx()
plt2.plot(np.arange(config['EPOCHS']), history['loss'],'-o',label='Train Loss',color='#2ca02c')
plt2.plot(np.arange(config['EPOCHS']), history['val_loss'],'-o',label='Val Loss',color='#d62728')
x = np.argmin(history['val_loss'])
y = np.min(history['val_loss'])
ydist = plt.ylim()[1] - plt.ylim()[0]
plt.scatter(x,y,s=200,color='#d62728')
plt.text(x-0.03*xdist,y+0.05*ydist,'min loss',size=14)
plt.ylabel('Loss',size=14)
plt.title('FOLD %i - Image Size %i' % (n_fold+1, config['HEIGHT']), size=18)
plt.legend(loc=3)
plt.show()
###Output
Fold: 1
###Markdown
Model loss graph aggregated
###Code
#@title
plot_metrics_agg(history_list, config['N_USED_FOLDS'])
###Output
_____no_output_____
###Markdown
Model evaluation
###Code
#@title
# COMPUTE OVERALL OOF AUC (light)
oof = np.concatenate(oof_pred_light)
true = np.concatenate(oof_tar)
names = np.concatenate(oof_names)
folds = np.concatenate(oof_folds)
auc = roc_auc_score(true, oof)
print('Overall OOF AUC with TTA (light) = %.3f' % auc)
# COMPUTE OVERALL OOF AUC
oof = np.concatenate(oof_pred)
true = np.concatenate(oof_tar)
names = np.concatenate(oof_names)
folds = np.concatenate(oof_folds)
auc = roc_auc_score(true, oof)
print('Overall OOF AUC with TTA = %.3f' % auc)
# SAVE OOF TO DISK
df_oof = pd.DataFrame(dict(image_name=names, target=true, pred=oof, fold=folds))
df_oof.to_csv('oof.csv', index=False)
df_oof.head()
###Output
Overall OOF AUC with TTA (light) = 0.899
Overall OOF AUC with TTA = 0.908
###Markdown
Visualize test predictions
###Code
#@title
ds = get_dataset(TEST_FILENAMES, augment=False, repeat=False, dim=config['HEIGHT'],
labeled=False, return_image_names=True)
image_names = np.array([img_name.numpy().decode("utf-8") for img, img_name in iter(ds.unbatch())])
submission = pd.DataFrame(dict(image_name=image_names, target=preds[:,0], target_light=preds_light[:,0]))
submission = submission.sort_values('image_name')
print(f"Test predictions {len(submission[submission['target'] > .5])}|{len(submission[submission['target'] <= .5])}")
print('Top 10 samples')
display(submission.head(10))
print('Top 10 positive samples')
display(submission.query('target > .5').head(10))
fig = plt.subplots(figsize=(20, 5))
plt.hist(submission['target'], bins=100)
plt.show()
###Output
Test predictions 26|10956
Top 10 samples
###Markdown
Test set predictions
###Code
#@title
submission['target_blend'] = (submission['target'] * .5) + (submission['target_light'] * .5)
display(submission.head(10))
display(submission.describe().T)
submission[['image_name', 'target']].to_csv(SUBMISSION_PATH, index=False)
### LAST ###
submission_light = submission[['image_name', 'target_light']]
submission_light.columns = ['image_name', 'target']
submission_light.to_csv(SUBMISSION_LIGHT_PATH, index=False)
### BLEND ###
submission_blend = submission[['image_name', 'target_blend']]
submission_blend.columns = ['image_name', 'target']
submission_blend.to_csv(SUBMISSION_BLEND_PATH, index=False)
###Output
_____no_output_____ |
notebooks/numpy_basics.ipynb | ###Markdown
Numpy - multidimensional data arrays Original by J.R. Johansson (jrjohansson at gmail.com), modified for this courseThe latest version of this [IPython notebook](http://ipython.org/notebook.html) lecture is available at [http://github.com/jrjohansson/scientific-python-lectures](http://github.com/jrjohansson/scientific-python-lectures).The other notebooks in this lecture series are indexed at [http://jrjohansson.github.io](http://jrjohansson.github.io).
###Code
# what is this line all about?!? Answer when we talk about plotting
%matplotlib inline
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Introduction The `numpy` package (module) is used in almost all numerical computation using Python. It is a package that provide high-performance vector, matrix and higher-dimensional data structures for Python. It is implemented in C and Fortran so when calculations are vectorized (formulated with vectors and matrices), performance is very good. To use `numpy` you need to import the module, using for example:
###Code
import numpy as np # save 3 characters!!
###Output
_____no_output_____
###Markdown
In the `numpy` package the terminology used for vectors, matrices and higher-dimensional data sets is *array*. Creating `numpy` arrays There are a number of ways to initialize new numpy arrays, for example from* a Python list or tuples* using functions that are dedicated to generating numpy arrays, such as `arange`, `linspace`, etc.* reading data from files From lists For example, to create new vector and matrix arrays from Python lists we can use the `numpy.array` function.
###Code
# a vector: the argument to the array function is a Python list
v = np.array([1,2,3,4])
v
# a matrix: the argument to the array function is a nested Python list
M = np.array([[1, 2], [3, 4]])
M
###Output
_____no_output_____
###Markdown
The `v` and `M` objects are both of the type `ndarray` that the `numpy` module provides.
###Code
type(v), type(M)
###Output
_____no_output_____
###Markdown
The difference between the `v` and `M` arrays is only their shapes. We can get information about the shape of an array by using the `ndarray.shape` property.
###Code
v.shape
M.shape
###Output
_____no_output_____
###Markdown
The number of elements in the array is available through the `ndarray.size` property:
###Code
M.size
###Output
_____no_output_____
###Markdown
Equivalently, we could use the function `numpy.shape` and `numpy.size`
###Code
np.shape(M)
np.size(M)
###Output
_____no_output_____
###Markdown
So far the `numpy.ndarray` looks awefully much like a Python list (or nested list). Why not simply use Python lists for computations instead of creating a new array type? There are several reasons:* Python lists are very general. They can contain any kind of object. They are dynamically typed. They do not support mathematical functions such as matrix and dot multiplications, etc. Implementing such functions for Python lists would not be very efficient because of the dynamic typing.* Numpy arrays are **statically typed** and **homogeneous**. The type of the elements is determined when the array is created.* Numpy arrays are memory efficient.* Because of the static typing, fast implementation of mathematical functions such as multiplication and addition of `numpy` arrays can be implemented in a compiled language (C and Fortran is used).Using the `dtype` (data type) property of an `ndarray`, we can see what type the data of an array has:
###Code
M.dtype
###Output
_____no_output_____
###Markdown
We get an error if we try to assign a value of the wrong type to an element in a numpy array:
###Code
M[0,0] = "hello"
###Output
_____no_output_____
###Markdown
If we want, we can explicitly define the type of the array data when we create it, using the `dtype` keyword argument:
###Code
M = np.array([[1, 2], [3, 4]], dtype=complex)
M
###Output
_____no_output_____
###Markdown
Common data types that can be used with `dtype` are: `int`, `float`, `complex`, `bool`, `object`, etc.We can also explicitly define the bit size of the data types, for example: `np.int64'`, `np.int16`, `np.float128`, `complex128`. Using array-generating functions For larger arrays it is inpractical to initialize the data manually, using explicit python lists. Instead we can use one of the many functions in `numpy` that generate arrays of different forms. Some of the more common are: arange
###Code
# create a range
x = np.arange(0, 10, 1) # arguments: start, stop, step
x
x = np.arange(-1, 1, 0.1)
x
###Output
_____no_output_____
###Markdown
linspace and logspace
###Code
# using linspace, both end points ARE included
np.linspace(0, 10, 25)
np.logspace(0, 10, 10, base=np.e)
###Output
_____no_output_____
###Markdown
mgrid
###Code
x, y = np.mgrid[0:5, 0:5] # similar to meshgrid in MATLAB
x
y
###Output
_____no_output_____
###Markdown
random data
###Code
from numpy import random
# uniform random numbers in [0,1]
random.rand(5,5)
# standard normal distributed random numbers
random.randn(5,5)
###Output
_____no_output_____
###Markdown
diag
###Code
# a diagonal matrix
np.diag([1,2,3])
# diagonal with offset from the main diagonal
np.diag([1,2,3], k=1)
###Output
_____no_output_____
###Markdown
zeros and ones
###Code
np.zeros((3,3))
np.ones((3,3))
###Output
_____no_output_____
###Markdown
File I/O Comma-separated values (CSV) A very common file format for data files is comma-separated values (CSV), or related formats such as TSV (tab-separated values). To read data from such files into Numpy arrays we can use the `numpy.genfromtxt` function. For example,
###Code
with open('stockholm_td_adj.dat', 'r') as fid:
for i, line in enumerate(fid):
print(line.strip())
if i > 5:
break
data = np.genfromtxt('stockholm_td_adj.dat')
data.shape
fig, ax = plt.subplots(figsize=(14,4))
ax.plot(data[:,0]+data[:,1]/12.0+data[:,2]/365, data[:,5])
ax.axis('tight')
ax.set_title('tempeatures in Stockholm')
ax.set_xlabel('year')
ax.set_ylabel('temperature (C)');
###Output
_____no_output_____
###Markdown
Using `numpy.savetxt` we can store a Numpy array to a file in CSV format:
###Code
M = random.rand(3,3)
M
np.savetxt("random-matrix.csv", M)
with open('random-matrix.csv', 'r') as fid:
for i, line in enumerate(fid):
print(line.strip())
if i > 5:
break
np.savetxt("random-matrix.csv", M, fmt='%.5f') # fmt specifies the format
!cat random-matrix.csv
###Output
0.31348 0.16179 0.72986
0.51429 0.92244 0.08114
0.69069 0.07507 0.93375
###Markdown
Numpy's native file format Useful when storing and reading back numpy array data. Use the functions `numpy.save` and `numpy.load`:
###Code
np.save("random-matrix.npy", M)
!dir random-matrix.npy
np.load("random-matrix.npy")
###Output
_____no_output_____
###Markdown
More properties of the numpy arrays
###Code
M.itemsize # bytes per element
M.nbytes # number of bytes
M.ndim # number of dimensions
###Output
_____no_output_____
###Markdown
Manipulating arrays Indexing We can index elements in an array using square brackets and indices:
###Code
# v is a vector, and has only one dimension, taking one index
v[0]
# M is a matrix, or a 2 dimensional array, taking two indices
M[1,1]
###Output
_____no_output_____
###Markdown
If we omit an index of a multidimensional array it returns the whole row (or, in general, a N-1 dimensional array)
###Code
M
M[1]
###Output
_____no_output_____
###Markdown
The same thing can be achieved with using `:` instead of an index:
###Code
M[1,:] # row 1
M[:,1] # column 1
###Output
_____no_output_____
###Markdown
We can assign new values to elements in an array using indexing:
###Code
M[0,0] = 1
M
# also works for rows and columns
M[1,:] = 0
M[:,2] = -1
M
###Output
_____no_output_____
###Markdown
Index slicing Index slicing is the technical name for the syntax `M[lower:upper:step]` to extract part of an array:
###Code
A = np.array([1,2,3,4,5])
A
A[1:3]
###Output
_____no_output_____
###Markdown
Array slices are *mutable*: if they are assigned a new value the original array from which the slice was extracted is modified:
###Code
A[1:3] = [-2,-3]
A
###Output
_____no_output_____
###Markdown
We can omit any of the three parameters in `M[lower:upper:step]`:
###Code
A[::] # lower, upper, step all take the default values
A[::2] # step is 2, lower and upper defaults to the beginning and end of the array
A[:3] # first three elements
A[3:] # elements from index 3
###Output
_____no_output_____
###Markdown
Negative indices counts from the end of the array (positive index from the begining):
###Code
A =np. array([1,2,3,4,5])
A[-1]
A[-3:] # the last three elements
###Output
_____no_output_____
###Markdown
Index slicing works exactly the same way for multidimensional arrays:
###Code
A = np.array([[n+m*10 for n in range(5)] for m in range(5)])
A
# a block from the original array
A[1:4, 1:4]
A[4:6, 4:6]
# strides
A[::2, ::2]
###Output
_____no_output_____
###Markdown
Fancy indexing Fancy indexing is the name for when an array or list is used in-place of an index:
###Code
row_indices = [1, 2, 3]
A[row_indices]
col_indices = [1, 2, -1] # remember, index -1 means the last element
A[row_indices, col_indices]
###Output
_____no_output_____
###Markdown
We can also use index masks: If the index mask is an Numpy array of data type `bool`, then an element is selected (True) or not (False) depending on the value of the index mask at the position of each element:
###Code
B = np.array([n for n in range(5)])
B
row_mask = np.array([True, False, True, False, False])
B[row_mask]
# same thing
row_mask = np.array([1,0,1,0,0], dtype=bool)
B[row_mask]
###Output
_____no_output_____
###Markdown
This feature is very useful to conditionally select elements from an array, using for example comparison operators:
###Code
x = np.arange(0, 10, 0.5)
x
mask = (5 < x) * (x < 7.5)
print((5<x))
print((x<7.5))
mask
x[mask]
###Output
_____no_output_____
###Markdown
Functions for extracting data from arrays and creating arrays nonzero The index mask can be converted to position index using the `nonzero` function. Note that the output is a tuple
###Code
indices = np.nonzero(mask)
indices
x[indices] # this indexing is equivalent to the fancy indexing x[mask]
###Output
_____no_output_____
###Markdown
diag With the diag function we can also extract the diagonal and subdiagonals of an array:
###Code
print(A)
np.diag(A)
np.diag(A, -1)
###Output
_____no_output_____
###Markdown
take The `take` function is similar to fancy indexing described above:
###Code
v2 = np.arange(-3,3)
v2
row_indices = [1, 3, 5]
v2[row_indices] # fancy indexing
v2[[True, False]]
v2.take(row_indices)
###Output
_____no_output_____
###Markdown
But `take` also works on lists and other objects:
###Code
np.take([-3, -2, -1, 0, 1, 2], row_indices)
###Output
_____no_output_____
###Markdown
choose Constructs an array by picking elements from several arrays:
###Code
which = [1, 0, 1, 0]
choices = [[-2,-2,-2,-2], [5,5,5,5]]
np.choose(which, choices)
###Output
_____no_output_____
###Markdown
Linear algebra Vectorizing code is the key to writing efficient numerical calculation with Python/Numpy. That means that as much as possible of a program should be formulated in terms of matrix and vector operations, like matrix-matrix multiplication. Scalar-array operations We can use the usual arithmetic operators to multiply, add, subtract, and divide arrays with scalar numbers.
###Code
v1 = np.arange(0, 5)
v1 * 2
v1 + 2
A * 2, A + 2
###Output
_____no_output_____
###Markdown
Element-wise array-array operations When we add, subtract, multiply and divide arrays with each other, the default behaviour is **element-wise** operations:
###Code
A * A # element-wise multiplication
v1 * v1
###Output
_____no_output_____
###Markdown
If we multiply arrays with compatible shapes, we get an element-wise multiplication of each row, this is called broadcasting:
###Code
print('shapes', A.shape, v1.shape)
print('A', A)
print ('v1',v1)
A * v1
###Output
_____no_output_____
###Markdown
Matrix algebra What about matrix mutiplication? There are two ways. We can either use the `dot` function, which applies a matrix-matrix, matrix-vector, or inner vector multiplication to its two arguments:
###Code
np.dot(A, A)
np.dot(A, v1)
np.dot(v1, v1)
###Output
_____no_output_____
###Markdown
Alternatively, we can use the '@' operator
###Code
A @ A
A @ v1
# inner product
v2 = v1.reshape(1,5)
print('shape', v2.shape, v2.T.shape)
v3 = v2.T @ v2
print(v3, v3.shape)
#Matrix multiplication has highest precedence
print(v1 + A @ v1)
print(v1 + (A @ v1))
print ((v1 + A) @ v1)
###Output
[ 30 131 232 333 434]
[ 30 131 232 333 434]
[ 60 160 260 360 460]
###Markdown
If we try to add, subtract or multiply objects with incomplatible shapes we get an error:
###Code
v = np.array([[1,2,3,4,5,6]]).T
np.shape(A), np.shape(v)
M * v
###Output
_____no_output_____
###Markdown
See also the related functions: `np.inner`, `np.outer`, `np.cross`, `np.kron`, `np.tensordot`. Try for example `help(np.kron)`. Array/Matrix transformations Above we have used the `.T` to transpose the matrix object `v`. We could also have used the `transpose` function to accomplish the same thing. Other mathematical functions that transform matrix objects are:
###Code
C = np.array([[1j, 2j], [3j, 4j]])
C
np.conjugate(C)
###Output
_____no_output_____
###Markdown
Hermitian conjugate: transpose + conjugate
###Code
np.conjugate(np.transpose(C))
###Output
_____no_output_____
###Markdown
We can extract the real and imaginary parts of complex-valued arrays using `real` and `imag`:
###Code
np.real(C) # same as: C.real
np.imag(C) # same as: C.imag
###Output
_____no_output_____
###Markdown
Or the complex argument and absolute value
###Code
np.angle(C+1) # heads up MATLAB Users, angle is used instead of arg
np.abs(C)
###Output
_____no_output_____
###Markdown
Matrix computations Inverse
###Code
np.linalg.inv(C) # equivalent to C.I
np.linalg.inv(C) @ C
###Output
_____no_output_____
###Markdown
Determinant
###Code
np.linalg.det(C)
np.linalg.det(np.linalg.inv(C))
###Output
_____no_output_____
###Markdown
Data processing Often it is useful to store datasets in Numpy arrays. Numpy provides a number of functions to calculate statistics of datasets in arrays. For example, let's calculate some properties from the Stockholm temperature dataset used above.
###Code
# reminder, the tempeature dataset is stored in the data variable:
np.shape(data)
###Output
_____no_output_____
###Markdown
mean
###Code
# the temperature data is in column 3
np.mean(data[:,3])
###Output
_____no_output_____
###Markdown
The daily mean temperature in Stockholm over the last 200 years has been about 6.2 C. standard deviations and variance
###Code
np.std(data[:,3]), np.var(data[:,3])
###Output
_____no_output_____
###Markdown
min and max
###Code
# lowest daily average temperature
data[:,3].min()
# highest daily average temperature
data[:,3].max()
###Output
_____no_output_____
###Markdown
sum, prod, and trace
###Code
d = np.arange(0, 10)
d
# sum up all elements
np.sum(d)
# product of all elements
np.prod(d+1)
# cummulative sum
np.cumsum(d)
# cummulative product
np.cumprod(d+1)
# same as: diag(A).sum()
np.trace(A)
###Output
_____no_output_____
###Markdown
Computations on subsets of arrays We can compute with subsets of the data in an array using indexing, fancy indexing, and the other methods of extracting data from an array (described above).For example, let's go back to the temperature dataset:
###Code
options = np.get_printoptions()
np.set_printoptions(precision=1,suppress=True)
print(data[:3, :])
np.set_printoptions(options)
###Output
[[1800. 1. 1. -6.1 -6.1 -6.1 1. ]
[1800. 1. 2. -15.4 -15.4 -15.4 1. ]
[1800. 1. 3. -15. -15. -15. 1. ]]
###Markdown
The dataformat is: year, month, day, daily average temperature, low, high, location.If we are interested in the average temperature only in a particular month, say February, then we can create a index mask and use it to select only the data for that month using:
###Code
np.unique(data[:,1]) # the month column takes values from 1 to 12
mask_feb = data[:,1] == 2
# the temperature data is in column 3
np.mean(data[mask_feb,3])
###Output
_____no_output_____
###Markdown
With these tools we have very powerful data processing capabilities at our disposal. For example, to extract the average monthly average temperatures for each month of the year only takes a few lines of code:
###Code
months = np.arange(1,13)
monthly_mean = [np.mean(data[data[:,1] == month, 3]) for month in months]
fig, ax = plt.subplots()
ax.bar(months, monthly_mean)
ax.set_xlabel("Month")
ax.set_ylabel("Monthly avg. temp.");
###Output
_____no_output_____
###Markdown
Calculations with higher-dimensional data When functions such as `np.min`, `np.max`, etc. are applied to a multidimensional arrays, it is sometimes useful to apply the calculation to the entire array, and sometimes only on a row or column basis. Using the `axis` argument we can specify how these functions should behave:
###Code
import numpy as np
m = np.random.random((3,3))
# global max
m.max()
# max in each column
m.max(axis=0)
# max in each row
m.max(axis=1)
###Output
_____no_output_____
###Markdown
Many other functions and methods accept the same (optional) `axis` keyword argument. Reshaping, resizing and stacking arrays The shape of an Numpy array can be modified without copying the underlaying data, which makes it a fast operation even for large arrays.
###Code
A = np.arange(24).reshape(6,4)
n, m = A.shape
B = A.reshape((1,n*m))
print(B)
print(B.base is A)
B[0,0:5] = 5 # modify the array
B
A # and the original variable is also changed. B is only a different view of the same data
###Output
_____no_output_____
###Markdown
We can also use the function `flatten` to make a higher-dimensional array into a vector. But this function create a copy of the data.
###Code
B = A.flatten()
B
B[0:5] = 10
print(B)
print('B.base', B.base)
A # now A has not changed, because B's data is a copy of A's, not refering to the same data
###Output
_____no_output_____
###Markdown
Adding a new dimension: newaxis With `newaxis`, we can insert new dimensions in an array, for example converting a vector to a column or row matrix:
###Code
v = np.array([1,2,3])
np.shape(v)
# make a column matrix of the vector v
new_v = v[:, np.newaxis]
print(new_v.base is v)
# column matrix
v[:,np.newaxis].shape
# row matrix
v[np.newaxis,:].shape
###Output
_____no_output_____
###Markdown
Stacking and repeating arrays Using function `np.repeat`, `np.tile`, `np.vstack`, `np.hstack`, and `np.concatenate` we can create larger vectors and matrices from smaller ones: tile and repeat
###Code
a = np.array([[1, 2], [3, 4]])
# repeat each element 3 times
np.repeat(a, 3)
# tile the matrix 3 times
np.tile(a, 3)
###Output
_____no_output_____
###Markdown
concatenate
###Code
b = np.array([[5, 6]])
np.concatenate((a, b), axis=0)
np.concatenate((a, b.T), axis=1)
###Output
_____no_output_____
###Markdown
hstack and vstack
###Code
np.vstack((a,b))
np.hstack((a,b.T))
###Output
_____no_output_____
###Markdown
Copy and "deep copy" To achieve high performance, assignments in Python usually do not copy the underlaying objects. This is important for example when objects are passed between functions, to avoid an excessive amount of memory copying when it is not necessary (technical term: pass by reference).
###Code
A = np.array([[1, 2], [3, 4]])
A
# now B is referring to the same array data as A
B = A
# changing B affects A
B[0,0] = 10
B
A
###Output
_____no_output_____
###Markdown
If we want to avoid this behavior, so that when we get a new completely independent object `B` copied from `A`, then we need to do a so-called "deep copy" using the function `np.copy`:
###Code
B = np.copy(A)
# now, if we modify B, A is not affected
B[0,0] = -5
B
A
###Output
_____no_output_____
###Markdown
Iterating over array elements Generally, we want to avoid iterating over the elements of arrays whenever we can (at all costs). The reason is that in a interpreted language like Python (or MATLAB), iterations are really slow compared to vectorized operations. However, sometimes iterations are unavoidable. For such cases, the Python `for` loop is the most convenient way to iterate over an array:
###Code
v = np.array([1,2,3,4])
for element in v:
print(element)
M = np.array([[1,2], [3,4]])
for row in M:
print("row", row)
for element in row:
print(element)
###Output
row [1 2]
1
2
row [3 4]
3
4
###Markdown
When we need to iterate over each element of an array and modify its elements, it is convenient to use the `enumerate` function to obtain both the element and its index in the `for` loop:
###Code
for row_idx, row in enumerate(M):
print("row_idx", row_idx, "row", row)
for col_idx, element in enumerate(row):
print("col_idx", col_idx, "element", element)
# update the matrix M: square each element
M[row_idx, col_idx] = element ** 2
# each element in M is now squared
M
###Output
_____no_output_____
###Markdown
Vectorizing functions As mentioned several times by now, to get good performance we should try to avoid looping over elements in our vectors and matrices, and instead use vectorized algorithms. The first step in converting a scalar algorithm to a vectorized algorithm is to make sure that the functions we write work with vector inputs.
###Code
def Theta(x):
"""
Scalar implemenation of the Heaviside step function.
"""
if x >= 0:
return 1
else:
return 0
Theta(np.array([-3,-2,-1,0,1,2,3]))
###Output
_____no_output_____
###Markdown
OK, that didn't work because we didn't write the `Theta` function so that it can handle a vector input... To get a vectorized version of Theta we can use the Numpy function `vectorize`. In many cases it can automatically vectorize a function:
###Code
Theta_vec = np.vectorize(Theta)
Theta_vec(np.array([-3,-2,-1,0,1,2,3]))
###Output
_____no_output_____
###Markdown
We can also implement the function to accept a vector input from the beginning (requires more effort but might give better performance):
###Code
def Theta(x):
"""
Vector-aware implemenation of the Heaviside step function.
"""
return 1 * (x >= 0)
Theta(np.array([-3,-2,-1,0,1,2,3]))
# still works for scalars as well
Theta(-1.2), Theta(2.6)
###Output
_____no_output_____
###Markdown
Using arrays in conditions When using arrays in conditions,for example `if` statements and other boolean expressions, one needs to use `any` or `all`, which requires that any or all elements in the array evalutes to `True`:
###Code
M
if (M > 5).any():
print("at least one element in M is larger than 5")
else:
print("no element in M is larger than 5")
if (M > 5).all():
print("all elements in M are larger than 5")
else:
print("all elements in M are not larger than 5")
###Output
all elements in M are not larger than 5
###Markdown
Type casting Since Numpy arrays are *statically typed*, the type of an array does not change once created. But we can explicitly cast an array of some type to another using the `astype` method (see also the similar `np.asarray` function). This always create a new array of new type:
###Code
M.dtype
M2 = M.astype(float)
M2
M2.dtype
M3 = M.astype(bool)
M3
###Output
_____no_output_____
###Markdown
Further reading * http://numpy.scipy.org* http://www.scipy-lectures.org/* https://docs.scipy.org/doc/numpy/user/numpy-for-matlab-users.html - A Numpy guide for MATLAB users* [MATLAB to Python migration white paper](https://www.enthought.com/white-paper-matlab-to-python/) ExerciseWrite a function ``f(a, b, c)`` that returns $a^b - c$. Forma `24x12x6` array containing values in parameter ranges ``[0,1] x[0,1] x [0,1]`` (i.e., 24 equispaced values for a, 12 for b, and 6 for c).Hints:You can make ``np.ogrid`` give a number of points in given rangewith ``np.ogrid[0:1:1/20]`` or ``np.ogrid[0:1:1/20, 0:1:1/30]``. Alternative: `np.linspace`)**Reminder** Python functions:```pythondef f(a, b, c): return some_result``` Approximate the 3-d integral$\int_0^1\int_0^1\int_0^1(a^b-c)da\,db\,dc$over this volume with the mean. The exact result is: $\ln 2 -\frac{1}{2}\approx0.1931\ldots$ --- what is your relative error?(Hints: use elementwise operations and broadcasting)
###Code
def f(a, b, c):
return a**b - c
a, b, c = np.ogrid[0:1:24j, 0:1:12j, 0:1:6j]
a = np.linspace(0, 1, 24)
b = np.linspace(0, 1, 12)
c = np.linspace(0, 1, 6)
samples = f(a[:,np.newaxis,np.newaxis],
b[np.newaxis,:,np.newaxis],
c[np.newaxis,np.newaxis,:])
print(a[:,np.newaxis,np.newaxis].shape)
integral = samples.mean()
print(integral)
print("Approximation:", integral)
print("Exact:", np.log(2) - 0.5)
###Output
Approximation: 0.1888423460296792
Exact: 0.1931471805599453
|
LDA topic modeling-BIGDATA (aggragated).ipynb | ###Markdown
Now we are doing all of these steps for the whole available text (data_list2 here):
###Code
texts = []
# loop through document list
for i in data_list2:
# clean and tokenize document string
raw = i.lower()
tokens = tokenizer.tokenize(raw)
# remove stop words from tokens
stopped_tokens = [i for i in tokens if not i in en_stop]
# stem tokens
stemmed_tokens = [p_stemmer.stem(i) for i in stopped_tokens]
# add tokens to list
texts.append(stemmed_tokens)
# turn our tokenized documents into a id <-> term dictionary
dictionary = corpora.Dictionary(texts)
# convert tokenized documents into a document-term matrix
corpus = [dictionary.doc2bow(text) for text in texts]
# generate LDA model
ldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics=10, id2word = dictionary, passes=20)
print(ldamodel.print_topics(num_topics=10, num_words=10))
print(ldamodel.print_topics(num_topics=5, num_words=10))
print(ldamodel)
type(ldamodel)
print(dictionary)
type(dictionary)
type(corpus)
len (corpus)
print(corpus)
###Output
[[(0, 1), (1, 1), (2, 1), (3, 1), (4, 3), (5, 1), (6, 1), (7, 2), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (19, 1), (20, 1), (21, 1), (22, 1), (23, 1), (24, 1), (25, 1), (26, 1), (27, 1), (28, 1), (29, 1)], [(1, 1), (4, 7), (5, 2), (7, 5), (12, 1), (17, 2), (30, 3), (31, 1), (32, 2), (33, 1), (34, 2), (35, 1), (36, 3), (37, 1), (38, 1), (39, 1), (40, 1), (41, 1), (42, 1), (43, 1), (44, 1), (45, 1), (46, 1), (47, 1), (48, 1), (49, 1), (50, 1), (51, 1), (52, 1), (53, 1), (54, 1), (55, 1), (56, 2), (57, 1), (58, 1), (59, 1), (60, 1), (61, 1), (62, 1), (63, 2), (64, 1), (65, 1), (66, 1), (67, 1), (68, 1), (69, 1), (70, 1), (71, 2), (72, 1), (73, 1), (74, 1), (75, 1), (76, 1), (77, 1), (78, 1), (79, 1), (80, 1), (81, 1), (82, 1), (83, 1), (84, 1), (85, 1), (86, 1), (87, 1), (88, 1), (89, 1)], [(4, 1), (5, 1), (6, 2), (12, 1), (18, 1), (56, 1), (67, 1), (69, 1), (83, 1), (90, 2), (91, 2), (92, 1), (93, 1), (94, 1), (95, 1), (96, 1), (97, 1), (98, 1), (99, 1), (100, 1), (101, 1), (102, 1), (103, 1), (104, 2), (105, 1), (106, 2), (107, 1), (108, 1), (109, 1), (110, 1), (111, 1), (112, 1), (113, 1), (114, 1), (115, 1), (116, 1), (117, 1), (118, 1), (119, 1), (120, 1), (121, 1), (122, 1), (123, 1), (124, 1), (125, 1), (126, 1), (127, 1), (128, 1), (129, 1), (130, 1), (131, 1)], [(71, 1), (91, 1), (92, 1), (124, 1), (132, 1), (133, 1), (134, 1), (135, 1), (136, 1), (137, 1)], [(1, 1), (3, 1), (4, 1), (13, 1), (57, 1), (91, 2), (92, 1), (108, 1), (138, 1), (139, 1), (140, 1), (141, 1), (142, 1), (143, 1), (144, 1), (145, 1)], [(4, 2), (5, 1), (17, 2), (45, 1), (62, 1), (66, 1), (100, 1), (110, 1), (129, 1), (146, 1), (147, 1), (148, 1), (149, 1), (150, 1), (151, 1), (152, 1), (153, 1), (154, 1), (155, 1), (156, 1)], [(38, 1), (91, 2), (92, 2), (104, 1), (110, 2), (131, 1), (138, 1), (157, 1), (158, 1), (159, 1), (160, 1), (161, 1), (162, 1), (163, 1), (164, 1), (165, 1)], [(4, 4), (5, 7), (7, 3), (9, 2), (17, 1), (34, 1), (35, 1), (43, 1), (50, 1), (56, 1), (63, 1), (71, 1), (72, 1), (82, 1), (91, 2), (92, 1), (97, 1), (100, 3), (104, 2), (108, 1), (110, 1), (112, 1), (114, 1), (130, 1), (131, 1), (147, 1), (148, 1), (153, 1), (154, 1), (158, 1), (159, 1), (166, 1), (167, 2), (168, 2), (169, 1), (170, 1), (171, 1), (172, 1), (173, 1), (174, 1), (175, 1), (176, 1), (177, 1), (178, 2), (179, 1), (180, 1), (181, 1), (182, 2), (183, 1), (184, 1), (185, 1), (186, 1), (187, 2), (188, 1), (189, 1), (190, 1), (191, 1), (192, 1), (193, 1), (194, 1), (195, 1), (196, 1), (197, 1), (198, 1), (199, 1), (200, 1)], [(4, 1), (19, 1), (201, 1), (202, 1), (203, 1), (204, 1), (205, 1), (206, 1), (207, 1), (208, 1), (209, 1)], [(1, 1), (4, 1), (5, 2), (66, 1), (90, 1), (91, 3), (92, 2), (100, 3), (104, 2), (108, 1), (112, 1), (138, 1), (141, 1), (170, 1), (208, 1), (210, 1), (211, 1), (212, 1), (213, 1), (214, 1), (215, 1), (216, 1), (217, 1), (218, 1)], [(4, 1), (5, 1), (80, 1), (91, 2), (100, 1), (108, 2), (121, 1), (145, 1), (169, 1), (219, 1), (220, 1), (221, 1), (222, 1), (223, 1), (224, 1), (225, 1), (226, 1), (227, 1), (228, 1), (229, 1), (230, 1)], [(1, 1), (16, 1), (130, 1), (141, 1), (169, 1), (197, 1), (214, 1), (231, 1), (232, 1), (233, 1), (234, 1), (235, 1), (236, 1), (237, 1)], [(4, 1), (63, 1), (73, 1), (106, 1), (108, 1), (238, 1), (239, 1), (240, 1), (241, 1)], [(1, 1), (4, 2), (7, 2), (17, 1), (30, 1), (32, 1), (35, 1), (40, 1), (47, 1), (50, 1), (51, 1), (90, 1), (91, 1), (92, 1), (100, 2), (104, 1), (110, 1), (123, 2), (129, 1), (141, 2), (154, 1), (159, 1), (177, 1), (200, 1), (201, 1), (212, 1), (214, 1), (231, 4), (232, 2), (233, 1), (242, 1), (243, 2), (244, 1), (245, 1), (246, 1), (247, 1), (248, 1), (249, 1), (250, 1), (251, 1), (252, 2), (253, 1), (254, 4), (255, 1), (256, 1), (257, 1), (258, 1), (259, 2), (260, 1), (261, 2), (262, 1), (263, 1), (264, 1), (265, 1), (266, 1), (267, 1), (268, 1), (269, 1), (270, 1), (271, 2), (272, 2), (273, 1), (274, 1), (275, 2), (276, 1), (277, 1), (278, 1), (279, 1), (280, 1), (281, 1), (282, 1), (283, 1), (284, 1), (285, 1), (286, 1), (287, 1), (288, 1), (289, 1), (290, 1), (291, 1), (292, 1), (293, 1), (294, 1), (295, 1), (296, 1), (297, 1), (298, 1), (299, 1), (300, 1), (301, 1), (302, 1), (303, 1), (304, 1), (305, 1), (306, 1), (307, 1), (308, 1), (309, 1)], [(2, 1), (5, 1), (43, 1), (52, 1), (102, 1), (124, 1), (149, 1), (157, 1), (159, 1), (167, 1), (200, 2), (225, 1), (277, 1), (310, 1), (311, 1), (312, 1), (313, 1), (314, 1), (315, 1), (316, 1), (317, 1), (318, 1), (319, 1), (320, 1)], [(90, 1), (131, 1), (155, 1), (158, 1), (167, 1), (182, 1), (224, 1), (261, 1), (321, 1), (322, 1), (323, 1), (324, 1), (325, 1), (326, 1), (327, 1), (328, 1)], [(1, 1), (4, 1), (34, 1), (62, 1), (72, 1), (84, 1), (91, 1), (92, 1), (97, 1), (100, 2), (104, 1), (121, 1), (130, 1), (139, 1), (154, 1), (157, 1), (214, 1), (225, 1), (254, 1), (300, 1), (310, 1), (329, 1), (330, 1), (331, 1), (332, 1), (333, 1), (334, 1)], [(4, 1), (69, 1), (90, 2), (121, 1), (156, 1), (216, 1), (238, 2), (254, 1), (280, 1), (325, 1), (326, 1), (335, 1), (336, 1), (337, 1), (338, 1), (339, 1), (340, 1), (341, 1), (342, 1), (343, 2), (344, 1), (345, 1), (346, 1), (347, 1), (348, 1)], [(4, 1), (17, 2), (26, 1), (35, 1), (37, 1), (40, 1), (75, 1), (84, 2), (86, 1), (101, 1), (102, 2), (106, 1), (111, 1), (117, 1), (139, 1), (141, 1), (164, 1), (167, 1), (169, 1), (171, 2), (200, 1), (225, 1), (231, 1), (239, 1), (254, 1), (257, 1), (278, 1), (279, 2), (292, 1), (321, 1), (335, 1), (341, 1), (349, 1), (350, 1), (351, 1), (352, 1), (353, 1), (354, 1), (355, 1), (356, 2), (357, 1), (358, 2), (359, 1), (360, 1), (361, 1), (362, 1), (363, 1), (364, 1), (365, 1), (366, 1), (367, 1), (368, 1), (369, 1), (370, 1), (371, 1), (372, 1), (373, 1), (374, 1), (375, 1), (376, 1), (377, 1)], [(1, 4), (2, 2), (4, 6), (5, 2), (7, 2), (17, 4), (18, 1), (25, 1), (26, 1), (35, 3), (36, 1), (49, 1), (50, 2), (51, 1), (52, 1), (63, 1), (74, 1), (82, 1), (84, 1), (91, 4), (92, 2), (100, 1), (101, 2), (102, 2), (104, 2), (108, 1), (109, 1), (111, 1), (123, 1), (124, 1), (129, 1), (133, 1), (146, 2), (147, 3), (149, 1), (159, 2), (162, 1), (169, 1), (171, 1), (178, 1), (181, 1), (185, 1), (187, 1), (193, 1), (196, 1), (200, 1), (207, 1), (214, 2), (223, 1), (230, 1), (231, 1), (232, 1), (235, 1), (252, 2), (254, 3), (261, 1), (271, 1), (272, 1), (295, 1), (312, 1), (322, 1), (346, 3), (353, 1), (356, 1), (370, 1), (374, 2), (378, 2), (379, 1), (380, 1), (381, 1), (382, 1), (383, 1), (384, 5), (385, 3), (386, 1), (387, 1), (388, 1), (389, 1), (390, 2), (391, 1), (392, 1), (393, 2), (394, 1), (395, 1), (396, 1), (397, 2), (398, 1), (399, 1), (400, 1), (401, 1), (402, 1), (403, 2), (404, 1), (405, 2), (406, 1), (407, 1), (408, 1), (409, 1), (410, 2), (411, 1), (412, 1), (413, 1), (414, 1), (415, 1), (416, 2), (417, 1), (418, 1), (419, 1), (420, 1), (421, 1), (422, 1), (423, 1), (424, 1), (425, 1), (426, 1), (427, 1), (428, 1), (429, 1), (430, 1), (431, 1), (432, 1), (433, 1), (434, 1), (435, 1), (436, 2), (437, 1), (438, 1), (439, 1), (440, 2), (441, 1), (442, 2), (443, 1), (444, 1), (445, 1), (446, 1), (447, 1), (448, 4), (449, 1), (450, 1), (451, 1), (452, 1), (453, 1), (454, 1), (455, 1), (456, 2), (457, 1), (458, 1), (459, 1), (460, 2), (461, 1), (462, 1), (463, 1), (464, 1), (465, 1), (466, 1)], [(1, 2), (2, 1), (3, 1), (4, 4), (5, 1), (35, 2), (53, 1), (83, 1), (91, 1), (104, 1), (108, 1), (130, 1), (142, 1), (147, 1), (153, 1), (211, 1), (214, 1), (216, 1), (226, 1), (235, 1), (243, 1), (272, 1), (325, 1), (387, 1), (453, 1), (467, 1), (468, 1), (469, 1), (470, 1), (471, 1), (472, 1), (473, 1), (474, 1), (475, 1), (476, 1), (477, 1)], [(2, 1), (3, 2), (4, 2), (17, 1), (57, 1), (159, 1), (225, 1), (235, 1), (386, 1), (478, 1), (479, 1), (480, 1)], [(2, 1), (4, 2), (16, 2), (17, 2), (52, 1), (57, 1), (72, 1), (91, 3), (92, 1), (100, 2), (146, 1), (149, 1), (164, 1), (185, 1), (206, 1), (213, 1), (235, 1), (262, 1), (264, 1), (273, 1), (334, 2), (387, 2), (409, 1), (416, 2), (434, 1), (469, 1), (481, 1), (482, 1), (483, 1), (484, 1), (485, 1), (486, 1), (487, 1), (488, 1), (489, 1), (490, 1), (491, 1), (492, 1)], [(4, 2), (5, 1), (12, 1), (16, 1), (35, 1), (49, 2), (50, 1), (51, 1), (63, 1), (65, 1), (66, 1), (81, 1), (90, 1), (104, 5), (106, 1), (111, 1), (130, 1), (131, 2), (139, 1), (169, 1), (170, 1), (182, 1), (185, 2), (217, 3), (219, 1), (254, 5), (270, 1), (312, 2), (321, 1), (349, 1), (356, 1), (434, 1), (489, 1), (493, 1), (494, 1), (495, 1), (496, 1), (497, 1), (498, 1), (499, 1), (500, 1), (501, 1), (502, 1), (503, 1), (504, 1), (505, 1), (506, 1), (507, 1), (508, 1), (509, 1), (510, 1), (511, 1), (512, 1), (513, 1), (514, 1), (515, 1)], [(5, 2), (9, 1), (14, 2), (17, 3), (19, 1), (34, 1), (51, 1), (57, 2), (59, 1), (69, 1), (77, 1), (90, 1), (91, 5), (92, 5), (104, 1), (110, 1), (111, 1), (121, 2), (139, 1), (146, 2), (169, 2), (170, 1), (178, 1), (213, 1), (217, 1), (220, 1), (231, 3), (238, 3), (254, 1), (269, 1), (273, 1), (275, 1), (283, 1), (334, 1), (335, 1), (356, 1), (388, 1), (405, 1), (474, 1), (498, 1), (502, 3), (503, 5), (516, 1), (517, 1), (518, 3), (519, 3), (520, 1), (521, 1), (522, 2), (523, 1), (524, 1), (525, 1), (526, 1), (527, 1), (528, 1), (529, 1), (530, 1), (531, 1), (532, 1), (533, 1), (534, 1), (535, 1), (536, 1), (537, 1), (538, 1), (539, 1), (540, 1), (541, 1), (542, 1), (543, 1), (544, 1), (545, 1), (546, 1), (547, 2), (548, 1), (549, 1), (550, 1), (551, 1), (552, 1), (553, 1), (554, 1), (555, 1), (556, 1), (557, 1), (558, 1)], [(2, 2), (5, 1), (16, 1), (90, 1), (91, 2), (100, 1), (107, 1), (108, 1), (139, 1), (147, 1), (158, 1), (171, 1), (182, 1), (213, 1), (300, 1), (335, 1), (356, 1), (386, 1), (559, 1), (560, 2), (561, 1), (562, 1), (563, 1), (564, 1), (565, 1), (566, 1), (567, 1), (568, 1), (569, 1), (570, 1), (571, 1), (572, 1), (573, 1), (574, 1), (575, 1), (576, 1)], [(4, 1), (14, 1), (35, 1), (36, 1), (75, 1), (90, 1), (100, 1), (170, 1), (210, 1), (282, 1), (356, 1), (548, 1), (577, 1), (578, 1), (579, 1), (580, 1), (581, 1), (582, 1)], [(4, 3), (92, 1), (104, 1), (108, 1), (117, 1), (217, 1), (225, 2), (238, 1), (285, 1), (556, 1), (583, 1), (584, 1), (585, 1), (586, 1)], [(1, 1), (2, 1), (4, 5), (5, 3), (7, 1), (17, 2), (31, 1), (35, 1), (50, 1), (52, 1), (56, 1), (74, 1), (91, 1), (92, 1), (110, 1), (124, 1), (139, 1), (141, 1), (154, 1), (157, 1), (168, 1), (196, 1), (229, 1), (272, 1), (279, 1), (300, 1), (338, 1), (416, 1), (472, 1), (473, 1), (587, 2), (588, 1), (589, 1), (590, 2), (591, 1), (592, 1), (593, 1), (594, 1), (595, 1), (596, 1), (597, 1), (598, 1), (599, 1), (600, 1), (601, 1), (602, 1), (603, 1), (604, 1), (605, 1), (606, 1), (607, 1)], [(4, 2), (7, 2), (11, 1), (25, 1), (26, 1), (34, 1), (35, 1), (36, 1), (52, 2), (91, 1), (104, 1), (108, 1), (112, 1), (147, 2), (154, 1), (187, 1), (199, 1), (228, 1), (231, 1), (238, 1), (252, 1), (264, 1), (292, 1), (335, 1), (405, 1), (560, 1), (571, 2), (572, 2), (608, 1), (609, 1), (610, 1), (611, 1), (612, 1), (613, 1), (614, 1), (615, 1), (616, 1), (617, 1), (618, 1), (619, 1), (620, 1), (621, 1), (622, 1), (623, 1), (624, 1), (625, 1), (626, 1), (627, 1), (628, 1)]]
|
examples/examples-gpu/nyc-taxi-snowflake/rf-scikit.ipynb | ###Markdown
Random forest classification Single-node scikit-learn
###Code
import os
MODEL_PATH = 'models'
if not os.path.exists(MODEL_PATH):
os.makedirs(MODEL_PATH)
numeric_feat = [
'pickup_weekday',
'pickup_hour',
'pickup_week_hour',
'pickup_minute',
'passenger_count',
]
categorical_feat = [
'pickup_taxizone_id',
'dropoff_taxizone_id',
]
features = numeric_feat + categorical_feat
y_col = 'high_tip'
###Output
_____no_output_____
###Markdown
Load data and feature engineeringLoad a full month for this exercise
###Code
import os
import pandas as pd
import snowflake.connector
SNOWFLAKE_ACCOUNT = os.environ['SNOWFLAKE_ACCOUNT']
SNOWFLAKE_USER = os.environ['SNOWFLAKE_USER']
SNOWFLAKE_PASSWORD = os.environ['SNOWFLAKE_PASSWORD']
SNOWFLAKE_WAREHOUSE = os.environ['SNOWFLAKE_WAREHOUSE']
TAXI_DATABASE = os.environ['TAXI_DATABASE']
TAXI_SCHEMA = os.environ['TAXI_SCHEMA']
conn_info = {
'account': SNOWFLAKE_ACCOUNT,
'user': SNOWFLAKE_USER,
'password': SNOWFLAKE_PASSWORD,
'warehouse': SNOWFLAKE_WAREHOUSE,
'database': TAXI_DATABASE,
'schema': TAXI_SCHEMA,
}
conn = snowflake.connector.connect(**conn_info)
query = """
SELECT
pickup_taxizone_id,
dropoff_taxizone_id,
passenger_count,
DIV0(tip_amount, fare_amount) > 0.2 AS high_tip,
DAYOFWEEKISO(pickup_datetime) - 1 AS pickup_weekday,
WEEKOFYEAR(pickup_datetime) AS pickup_weekofyear,
HOUR(pickup_datetime) AS pickup_hour,
(pickup_weekday * 24) + pickup_hour AS pickup_week_hour,
MINUTE(pickup_datetime) AS pickup_minute
FROM taxi_yellow
WHERE
DATE_TRUNC('MONTH', pickup_datetime) = %s
"""
taxi = conn.cursor().execute(query, '2019-01-01')
columns = [x[0] for x in taxi.description]
taxi = pd.DataFrame(taxi.fetchall(), columns=columns)
taxi.columns = taxi.columns.str.lower()
print(f'Num rows: {len(taxi)}, Size: {taxi.memory_usage(deep=True).sum() / 1e6} MB')
taxi_train = taxi[features + [y_col]]
taxi_train.high_tip.value_counts()
taxi_train.head()
###Output
_____no_output_____
###Markdown
Train modelSetting `n_jobs=-1` tells scikit-learn to use all available cores on this machine to train models. Note that scikit-learn does NOT use the GPU, its using CPU cores here.
###Code
from sklearn.ensemble import RandomForestClassifier
rfc = RandomForestClassifier(n_estimators=100, max_depth=10, random_state=42, n_jobs=-1)
%%time
_ = rfc.fit(taxi_train[features], taxi_train[y_col])
###Output
_____no_output_____
###Markdown
Save model
###Code
import cloudpickle
with open(f'{MODEL_PATH}/random_forest_scikit.pkl', 'wb') as f:
cloudpickle.dump(rfc, f)
###Output
_____no_output_____
###Markdown
Calculate metrics on test setUse a different month for test set
###Code
taxi = conn.cursor().execute(query, '2019-02-01')
columns = [x[0] for x in taxi.description]
taxi = pd.DataFrame(taxi.fetchall(), columns=columns)
taxi.columns = taxi.columns.str.lower()
taxi_test = taxi
from sklearn.metrics import roc_auc_score
preds = rfc.predict_proba(taxi_test[features])[:, 1]
roc_auc_score(taxi_test[y_col], preds)
###Output
_____no_output_____ |
InceptionResnet_V2_no_real_time_augmentation.ipynb | ###Markdown
*Data Science Unit 4 Sprint 3 Assignment 2* Convolutional Neural Networks (CNNs) Pre - Trained ModelLoad a pretrained network from Keras, [ResNet50](https://tfhub.dev/google/imagenet/resnet_v1_50/classification/1) - a 50 layer deep network trained to recognize [1000 objects](https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt). Starting usage:```pythonimport numpy as npfrom tensorflow.keras.applications.resnet50 import ResNet50from tensorflow.keras.preprocessing import imagefrom tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictionsfrom tensorflow.keras.layers import Dense, GlobalAveragePooling2D()from tensorflow.keras.models import Model This is the functional APIresnet = ResNet50(weights='imagenet', include_top=False)```The `include_top` parameter in `ResNet50` will remove the full connected layers from the ResNet model. The next step is to turn off the training of the ResNet layers. We want to use the learned parameters without updating them in future training passes. ```pythonfor layer in resnet.layers: layer.trainable = False```Using the Keras functional API, we will need to additional additional full connected layers to our model. We we removed the top layers, we removed all preivous fully connected layers. In other words, we kept only the feature processing portions of our network. You can expert with additional layers beyond what's listed here. The `GlobalAveragePooling2D` layer functions as a really fancy flatten function by taking the average of each of the last convolutional layer outputs (which is two dimensional still). ```pythonx = res.outputx = GlobalAveragePooling2D()(x) This layer is a really fancy flattenx = Dense(1024, activation='relu')(x)predictions = Dense(1, activation='sigmoid')(x)model = Model(res.input, predictions)```Your assignment is to apply the transfer learning above to classify images of Mountains (`./data/mountain/*`) and images of forests (`./data/forest/*`). Treat mountains as the postive class (1) and the forest images as the negative (zero). Steps to complete assignment: 1. Load in Image Data into numpy arrays (`X`) 2. Create a `y` for the labels3. Train your model with pretrained layers from resnet4. Report your model's accuracy Load in DataCheck out out [`skimage`](https://scikit-image.org/) for useful functions related to processing the images. In particular checkout the documentation for `skimage.io.imread_collection` and `skimage.transform.resize`. IMPORT LIBRARIES
###Code
import os
import matplotlib.pyplot as plt
from google.colab import drive
drive.mount('/content/drive')
import tensorflow
from tensorflow import keras
from tensorflow.keras import backend as K
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Flatten, Dense, Dropout
from tensorflow.keras.applications.inception_resnet_v2 import InceptionResNetV2, preprocess_input
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
import numpy as np
import tensorflow
import pandas as pd
import itertools
from tensorflow import keras
from tensorflow.keras.preprocessing import image
from tensorflow.keras.layers import Dense, Input, GlobalAveragePooling2D, Flatten, Conv2D, MaxPooling2D
from tensorflow.keras.models import Model, Sequential # This is the functional API
from tensorflow.keras.optimizers import RMSprop, Adam, Nadam
from tensorflow.keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.keras.preprocessing import image
from tensorflow.keras import applications
from keras.utils.np_utils import to_categorical
from sklearn import metrics
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
%matplotlib inline
import math
import sys
import cv2
###Output
Using TensorFlow backend.
###Markdown
Loading data and creating x and y vectors
###Code
from skimage.io import imread_collection
from skimage.transform import resize
import numpy as np
from sklearn.model_selection import train_test_split
#Data path
forests = '/content/drive/My Drive/Lambda DS_Unit 4 Deep Learning/CNNs/data_mixed/forest/*.jpg'
mountains = '/content/drive/My Drive/Lambda DS_Unit 4 Deep Learning/CNNs/data_mixed/mountain/*.jpg'
#creating a collection with the available images
forests = imread_collection(forests).concatenate()
mountains = imread_collection(mountains).concatenate()
y_0 = np.zeros(forests.shape[0])
y_1 = np.ones(mountains.shape[0])
X = np.concatenate([forests, mountains])
X = resize(X, (702,255,255,3))
y = np.concatenate([y_0, y_1])
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=.25)
#y_train = np_utils.to_categorical(y_train, num_classes)
#y_test = np_utils.to_categorical(y_test, num_classes)
#datagen = ImageDataGenerator(preprocessing_function=preprocess_input) #shuffle=True
###Output
_____no_output_____
###Markdown
CREATING THE MODEL
###Code
FREEZE_LAYERS=2
net = InceptionResNetV2(include_top=False,
weights='imagenet',
input_tensor=None,
input_shape=(255, 255, 3))
x = net.output
x = Flatten()(x)
x = Dropout(0.5)(x)
output_layer = Dense(1, activation='sigmoid')(x)
net_final = Model(inputs=net.input, outputs=output_layer)
for layer in net_final.layers[:FREEZE_LAYERS]:
layer.trainable = False
for layer in net_final.layers[FREEZE_LAYERS:]:
layer.trainable = True
net_final.compile(optimizer=Adam(lr=0.001),
loss='binary_crossentropy', metrics=['accuracy'])
print(net_final.summary())
###Output
_____no_output_____
###Markdown
TRAIN AND VALIDATE ON DATA
###Code
rlrop = ReduceLROnPlateau(monitor='val_accuracy', mode='max', min_delta=0.01, factor=0.2, patience=1)
epochs = 10
batch_size=8
stop = EarlyStopping(monitor='val_accuracy', mode='max', min_delta=0.01, patience=3, verbose=1)
filepath="/content/drive/My Drive/Lambda DS_Unit 4 Deep Learning/CNNs/data/inception_class_code.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='auto', save_freq='epoch')
NUM_EPOCHS = 20
BATCH_SIZE = 8
stop = EarlyStopping(monitor='val_accuracy', mode='max', min_delta=0.01, patience=3, verbose=1)
filepath="/content/drive/My Drive/Lambda DS_Unit 4 Deep Learning/CNNs/data/best_inceptionresnetV2_model_scenes_assignment_imagegenerator_sigmoid_binary3.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max', save_freq='epoch')
net_final.fit(x_train, y_train, batch_size=8,
steps_per_epoch=len(x_train)/BATCH_SIZE, epochs=epochs,
validation_data=(x_test, y_test),
callbacks=[stop, checkpoint, rlrop],
validation_steps=len(x_test)/BATCH_SIZE)
#TEST
# predicting images
import numpy as np
from PIL import Image
img = image.load_img('/content/drive/My Drive/Lambda DS_Unit 4 Deep Learning/CNNs/data/validation/mountain/n199031.jpg', target_size=(255, 255))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
images = np.vstack([x])
classes = net_final.predict(images, batch_size=1)
if classes ==1:
print('Image is a mountain')
else:
print('Image is a forest')
print(classes)
###Output
Image is a mountain
[[1.]]
|
toy_models/chi2_and_t_student.ipynb | ###Markdown
- chi2: sum of X^2 with known variance & mean --> CI for estimated variance of knwon distribution- Student: mean estimator distribution with unknown variance (i.e. estimated too) --> confidence interval for the true mean
###Code
def sample_student(N):
# mu = 0
X = np.random.randn(N)
x_mean = np.mean(X)
S = np.sqrt( np.sum( (X - x_mean)**2 )/( N-1 ) )
t = x_mean / (S / np.sqrt(N))
return t
N = 100
samples = [sample_student(N) for _ in range(2000)]
x = np.linspace(-np.max(samples), np.max(samples), 123)
pdf = stats.t.pdf(x, N-1, loc=0, scale=1)
plt.hist(samples, density=True, bins=20)
plt.plot(x, pdf, linewidth=4)
plt.xlabel('t');
plt.ylabel('PDF t (student)');
###Output
_____no_output_____
###Markdown
Distribution of the maxhttps://math.stackexchange.com/a/89037
###Code
gamma = 0.5772156649015328
def sample_max(N):
# mu = 0
X = np.random.randn(N)
return np.max(X)
N = 400
samples = [sample_max(N) for _ in range(2000)]
plt.hist(samples, density=True, bins=20)
plt.xlabel('max');
plt.ylabel('PDF (max))');
x = np.linspace(-np.max(samples), np.max(samples), 123)
pdf = stats.t.pdf(x, N-1, loc=0, scale=1)
plt.plot(x, pdf, linewidth=4)
###Output
_____no_output_____ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.