path
stringlengths
7
265
concatenated_notebook
stringlengths
46
17M
notebooks/.ipynb_checkpoints/TensorBoard-checkpoint.ipynb
###Markdown Vizualizing with TensorBoard Connect (click link)http://localhost:8082 ###Code import subprocess as subp import time import os from IPython.display import HTML # Start Tensor Board logdir = '/tmp/test' !rm -rf $logdir !mkdir -p $logdir port = 8082 tb = subp.Popen(["tensorboard", "--logdir=" + logdir,"--host=0.0.0.0", "--port=" + str(port)]) time.sleep(15) !ls $logdir # Load TensorBoard also in this notebook: HTML('<iframe src=http://localhost:8082 width=1024 height=900></iframe>') from IPython.display import YouTubeVideo print ("code: https://github.com/MorvanZhou/tutorials/tree/master/tensorflowTUT") print ("playlist: https://www.youtube.com/playlist?list=PLXO45tsB95cJHXaDKpbwr5fC_CCYylw1f") print "Video for this notebook" YouTubeVideo('FtxpjxFi2vk') YouTubeVideo('zp5EtBvwQbw') # Run a linear fit and add stuff to the tensorboard! # Let's do something, and see what things look like on the TensorBoard! import tensorflow as tf import pandas as pd import sys import tqdm if 'seaborn' not in sys.modules: %pylab inline import seaborn sess = tf.InteractiveSession() # generate data n_data = 500 x_data = np.random.rand(n_data).astype(np.float32) y_data = x_data*0.1 + 0.3 + np.random.normal(loc=0, scale=0.013, size=n_data) plt.scatter(x_data, y_data, marker='x') # Define a few variables and processes within some layer with tf.name_scope('fit_parameters'): m = tf.Variable(tf.random_uniform([1],-1.,1.), name="var_m") tf.histogram_summary("m", m) c = tf.Variable(tf.random_uniform([1],-0.5,0.5), name="var_c") tf.histogram_summary("c", c) with tf.name_scope('inputs'): x_holder = tf.placeholder(tf.float32, name='x_generated') y_holder = tf.placeholder(tf.float32, name='y_generated') with tf.name_scope('fitter'): y = m*x_holder + c penalty = tf.reduce_mean(tf.square(y - y_holder)) tf.scalar_summary("lsq", penalty) optimizer = tf.train.GradientDescentOptimizer(0.2) train = optimizer.minimize(penalty) init = tf.initialize_all_variables() sess.run(init) merged = tf.merge_all_summaries() writer = tf.train.SummaryWriter('/tmp/test', sess.graph) fig = plt.figure() ax = fig.add_subplot(111) ax.scatter(x_data, y_data, marker='x') run = lambda f: sess.run(f, feed_dict={x_holder: x_data, y_holder: y_data}) n_step = 200 for step in tqdm.tqdm(range(n_step)): run(train) if (10*step)%n_step==0: print(run(penalty),run(m),run(c)) result = sess.run(merged, feed_dict={x_holder: x_data, y_holder: y_data}) writer.add_summary(result, step) ax.scatter(run(x_holder), run(y), marker='.', color='purple', alpha=(0.01 + step*step*step/float(n_step*n_step*n_step))) # Kill if necessary tb.kill() ###Output _____no_output_____
OHW_development/developmental_materials/xtractomaticpy-Copy1.ipynb
###Markdown XtractomaticpyA Notebook for loading in environmental data, transforming it into points and into a `pandas` df, and combining it with the tag data. ###Code import datetime as dt import xarray as xr import numpy as np import pandas as pd from typing import Dict, Union import fsspec import matplotlib.pyplot as plt from datetime import datetime shark_dir = "track_shark144020.csv" track_ex = pd.read_csv(shark_dir, parse_dates=['datetime']) # in pandas, read_csv # track_ex["lon"] = np.where( # track_ex["lon"] < 180, # track_ex["lon"] + 360, # track_ex["lon"]) lat_min = track_ex["lat"].min() - 2.0 lat_max = track_ex["lat"].max() + 2.0 lon_min = track_ex["lon"].min() - 2.0 lon_max = track_ex["lon"].max() + 2.0 xy_bbox = dict(latitude=slice(lat_min,lat_max), longitude=slice(lon_min,lon_max)) plt.plot(track_ex.lon,track_ex.lat) xy_bbox track_ex # grab track data for a few tag datapoints track_2014 = track_ex.iloc[0:100] track_2014 ###Output _____no_output_____ ###Markdown load in environmental datahere is the SST from MUR ###Code # bring in data for SST file_location = 's3://mur-sst/zarr' ikey = fsspec.get_mapper(file_location, anon=True) ds_sst = xr.open_zarr(ikey,consolidated=True) ds_sst ###Output _____no_output_____ ###Markdown here is the SSH and velocity from CMEMS ###Code ds_ssh = xr.open_dataset("data/ssh_data.nc") # Converting the lat lon to -180 180 with xr.set_options(keep_attrs=True): ds_ssh = ds_ssh.assign({'longitude':(((ds_ssh.longitude + 180) % 360) - 180)}) ds_ssh_renamed = ds_ssh.rename({'latitude':'lat', 'longitude':'lon', 'time':'time'}) ds_ssh_renamed ###Output _____no_output_____ ###Markdown here is the Ocean Color data from MODISA, accessed via OpenDap ###Code # calculate year day for time range of tagged data day_list = [] year_list = [] for index, row in track_2014.iterrows(): row_time = pd.to_datetime(row["datetime"]) day_of_year = datetime(row_time.year, row_time.month, row_time.day).timetuple().tm_yday year_list.append(row_time.year) day_list.append(day_of_year) day_string = [str(x) for x in day_list] year_string = [str(x) for x in year_list] # create access url for ocean color on opendap, merge datafiles to xarray url = [] base_dir = 'https://oceandata.sci.gsfc.nasa.gov/opendap/hyrax/MODISA/L3SMI/' suffix = '.L3m_DAY_CHL_chlor_a_4km.nc' k = 0 for day in day_string: url.append('https://oceandata.sci.gsfc.nasa.gov:443/opendap/MODISA/L3SMI/' + year_string[k] +'/' + day + '/A'+year_string[k] + day + '.L3m_DAY_CHL_chlor_a_4km.nc') k = k+1 def add_id(ds): ds.coords['time_coverage_start'] = pd.to_datetime(ds.attrs['time_coverage_start']) return ds chl = xr.open_mfdataset(url, combine = 'nested', concat_dim='time_coverage_start', preprocess=add_id) chl = chl.sel( lat=slice(lat_max, lat_min), lon=slice(lon_min,lon_max)) # these data have the lat indexed backwards... no idea why, but it works this way chl # extraction and combining here # for each environmental data set, change the object name in the envdata_point() function def fuction_dataset_point(**kwargs) -> Dict[str, Union[float, int]]: pass def extract(function_dataset_point, df: pd.DataFrame, map_coordinates: Dict[str, str], rename_variables: Dict[str, str]) -> pd.DataFrame: """ function_dataset_point: environmental data in a point format, to be transformed df tag data in a pandas format map_coordinates: key is name of column in dataframe, value is the name of the coordinate in dataset rename_variables: TBD """ def get_row(row) -> Dict[str, Union[float, int]]: extract_coordinates = {} for key, val in map_coordinates.items(): extract_coordinates[val] = row[key] result = function_dataset_point(**extract_coordinates) # rename variables here and transform result return result return df.apply( lambda row: get_row(row), axis=1, result_type="expand" ) # CHANGE YOUR ENVIRONMENTAL DATA SOURCE HERE def ssh_point(lat, lon, time) -> Dict[str, Union[float, int]]: # change variable here ds = ds_ssh_renamed.sel(lat=lat, lon=lon, time=time, method="nearest") # change variable here results = {} for var in ds.variables: if var not in ds.coords: results[var] = ds[var].values return results result = pd.concat([track_2014, extract(ssh_point, # change variable here track_2014, {"lat": "lat", "lon": "lon", "datetime": "time"}, # change variable here, df : ds {} ) ], axis=1) result # sst in Kelvin? result.to_csv('ssh_matched.csv') result['mag'] = (result['ugos']**2 + result[vgos**2) ** 0.5 result ###Output /tmp/ipykernel_1613/354212438.py:1: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access result.mag = (result.ugos**2 + result.vgos**2) ** 0.5
SRC/.ipynb_checkpoints/Practica2final-checkpoint.ipynb
###Markdown Actividad II Ejercicio 1 ###Code path = '/Users/Arroy javier/Desktop/MCIB/MCIB-19-P/data/Señales adquirid/' fname = 'Med4.txt' data = np.loadtxt(path+fname,delimiter='\t',dtype='str') t=data[ : , 0] SO2=data[:,1] RESP=data[:,2] EMG=data[:,3] PUL=data[:,4] ECG=data[:,5] t = t.astype('float')*60 SO2 = SO2.astype('float') RESP = RESP.astype('float') EMG = EMG.astype('float') PUL = PUL.astype('float') ECG = ECG.astype('float') ###Output _____no_output_____ ###Markdown IMPLEMENTACIÓN Eleccion de la mejor ventana ###Code W1=windows(SO2) W1=windows(ECG) W1=windows(EMG) W1=windows(PUL) W1=windows(RESP) path = '/Users/Arroy javier/Desktop/MCIB/MCIB-19-P/data/Señales adquirid/' fname = 'Lect1.txt' data1 = np.loadtxt(path+fname,delimiter='\t',dtype='str') t1=data1[:,0] SO21=data1[:,1] RESP1=data1[:,2] EMG1=data1[:,3] PUL1=data1[:,4] ECG1=data1[:,5] t1 = t1.astype('float')*60 SO21 = SO2.astype('float') RESP1 = RESP.astype('float') EMG1 = EMG.astype('float') PUL1 = PUL.astype('float') ECG1 = ECG.astype('float') plt.figure(figsize = (15, 3)) plt.subplot(1,2,1) plt.title("SO2, Ejercicio") PS1= PSD (SO2) plt.subplot(1,2,2) PS11= PSD (SO21) plt.title("SO2, Lectura") plt.figure(figsize = (15, 3)) plt.subplot(1,2,1) PS2= PSD (EMG) plt.title("EMG, Ejercicio") plt.subplot(1,2,2) PS21= PSD (EMG1) plt.title("EMG, Lectura") plt.figure(figsize = (15, 3)) plt.subplot(1,2,1) PS3= PSD (PUL) plt.title("PUL, Ejercicio") plt.subplot(1,2,2) PS31= PSD (PUL1) plt.title("PUL, Lectura") plt.figure(figsize = (15, 3)) plt.subplot(1,2,1) PS4= PSD (RESP) plt.title("RESP, Ejercicio") plt.subplot(1,2,2) PS41= PSD (RESP1) plt.title("RESP, Lectura") plt.figure(figsize = (15, 3)) plt.subplot(1,2,1) PS5= PSD (ECG) plt.title("ECG, Ejercicio") plt.subplot(1,2,2) PS51= PSD (ECG1) plt.title("ECG, Lectura") plt.show() ###Output _____no_output_____ ###Markdown Ejercicio 2 ###Code def filt_but(s, w, ord = 5): w = w/500 [b,a] =butter(ord, w, 'low') x = filtfilt(b, a, s) return (x,b,a) # Filtro notch para quitar el ruido de 60 Hz en algunas señales [b_n, a_n] = signal.iirnotch(60.0, 70.0, 1000) plt.figure(figsize = (25, 4)) Respuesta(b_n,a_n) #SO2 [SO2_f, b, a]= filt_but(SO2, 10) plt.figure(figsize = (25, 4)) plt.subplot(1,2,1) plt.plot(t[40000:55000], SO2[40000:55000],color = 'red', label="Señal sin filtrar") plt.plot(t[40000:55000], SO2_f[40000:55000],color = 'green', label="Señal filtrada") plt.legend(loc='best') plt.title("Señal de saturación de O2, Ejercicio") plt.grid(True) plt.subplot(1,4,3) Respuesta(b,a) #RESP [RESP_f, b, a]= filt_but(RESP, 10) plt.figure(figsize = (25, 4)) plt.subplot(1,2,1) plt.plot(t[40000:55000], RESP[40000:55000],color = 'red', label="Señal sin filtrar") plt.plot(t[40000:55000], RESP_f[40000:55000],color = 'green', label="Señal filtrada") plt.legend(loc='best') plt.title("Señal de respiración, Ejercicio") plt.grid(True) plt.subplot(1,4,3) Respuesta(b,a) #EMG EMG_n= filtfilt(b_n, a_n, EMG) [EMG_f, b, a]= filt_but(EMG_n, 150) plt.figure(figsize = (25, 4)) plt.subplot(1,2,1) plt.plot(t[40000:55000], EMG[40000:55000],color = 'red', label="Señal sin filtrar") plt.plot(t[40000:55000], EMG_f[40000:55000],color = 'green', label="Señal filtrada") plt.legend(loc='best') plt.title("Señal de EMG, Ejercicio") plt.grid(True) plt.subplot(1,4,3) Respuesta(b,a) #PUL [PUL_f, b, a]= filt_but(PUL, 5) plt.figure(figsize = (25, 4)) plt.subplot(1,2,1) plt.plot(t[40000:55000], PUL[40000:55000],color = 'red', label="Señal sin filtrar") plt.plot(t[40000:55000], PUL_f[40000:55000],color = 'green', label="Señal filtrada") plt.legend(loc='best') plt.title("Señal de pulso, Ejercicio") plt.grid(True) plt.subplot(1,4,3) Respuesta(b,a) #ECG ECG_n= filtfilt(b_n, a_n, ECG) [ECG_f, b, a]= filt_but(ECG_n, 100) plt.figure(figsize = (25, 4)) plt.subplot(1,2,1) plt.plot(t[40000:55000], ECG[40000:55000],color = 'red', label="Señal sin filtrar") plt.plot(t[40000:55000], ECG_f[40000:55000],color = 'green', label="Señal filtrada") plt.legend(loc='best') plt.title("Señal de ECG, Ejercicio") plt.grid(True) plt.subplot(1,4,3) Respuesta(b,a) plt.show() ###Output _____no_output_____ ###Markdown Ejercicio 3 ###Code #Tendencia ---- Filtrado from scipy import signal ECG_ten = ten_lin (ECG, t,1000) ECG_ten_n= filtfilt(b_n, a_n, ECG_ten) [ECG_ten_fil, b, a]= filt_but(ECG_ten_n, 100) # Filtrado ---- Tendencia ECG_f_n= filtfilt(b_n, a_n, ECG) [ECG_fil, b1, a1]= filt_but(ECG_f_n, 100) ECG_fil_ten = ten_lin (ECG_fil, t,1000) plt.figure(figsize = (15, 4)) plt.plot(t[45000:60000], ECG[45000:60000]+30,color = 'red', label="Señal sin procesar") plt.plot(t[45000:60000], ECG_ten_fil[45000:60000],color = 'red', label="Tendencia -> Filtrado") plt.plot(t[45000:60000], ECG_fil_ten[45000:60000],color = 'green', label="Filtrado -> Tendencia") plt.legend(loc='best') plt.title("Señal de EEG, Ejercicio") plt.ylim(-5,60) plt.xlim(45,60) plt.grid(True) ###Output _____no_output_____
snail-and-well/poad_solution_snail.ipynb
###Markdown Snail and wellA snail falls at the bottom of a 125 cm well. Each day the snail rises 30 cm. But at night, while sleeping, slides 20 cm because the walls are wet. How many days does it take to escape from the well?TIP: http://puzzles.nigelcoldwell.co.uk/sixtytwo.htm Solución ###Code # Assign problem data to variables with representative names # well height, daily advance, night retreat, accumulated distance well_height = 125 daily_advance = 30 nightly_retreat = 20 accumulated_distance = 0 # Assign 0 to the variable that represents the solution days = 0 # Write the code that solves the problem while accumulated_distance < well_height: accumulated_distance += daily_advance days += 0.5 if accumulated_distance >= well_height: break else: accumulated_distance -= nightly_retreat days += 0.5 # Print the result with print('Days =', days) print('Days =', days) ###Output Days = 10.5 ###Markdown Goals1. Treatment of variables2. Use of loop **while**3. Use of conditional **if-else**4. Print in console BonusThe distance traveled by the snail is now defined by a list.```advance_cm = [30, 21, 33, 77, 44, 45, 23, 45, 12, 34, 55]```How long does it take to raise the well?What is its maximum displacement in one day? And its minimum?What is its average speed during the day?What is the standard deviation of its displacement during the day? ###Code # Assign problem data to variables with representative names # well height, daily advance, night retreat, accumulated distance advance_cm = [30, 21, 33, 77, 44, 45, 23, 45, 12, 34, 55] well_height = 125 daily_advance = 30 nightly_retreat = 20 accumulated_distance = 0 # Assign 0 to the variable that represents the solution days = 0 # Write the code that solves the problem while accumulated_distance < well_height: for i in advance_cm: accumulated_distance += i days += 0.5 if accumulated_distance >= well_height: break else: accumulated_distance -= nightly_retreat days += 0.5 # Print the result with print('Days =', days) print('Days =', days) # What is its maximum displacement in a day? And its minimum? daily_displacement = [] for i in advance_cm: daily_displacement.append(i - 20) #print(daily_displacement) print("The snail's maximum displacement in a single day is " + str(max(daily_displacement)) + " cm.") print("The snail's minimum displacement in a single day is " + str(min(daily_displacement)) + " cm.") # What is its average progress? print("The snail's average daily progress is " + str(sum(daily_displacement)/len(daily_displacement)) + " cm.") # What is the standard deviation of the snail's displacement during the day? mean_displacement = (sum(daily_displacement)/len(daily_displacement)) summation = 0 for i in daily_displacement: summation += (i - mean_displacement) ** 2 std_dev = ((summation / len(daily_displacement)) ** 0.5) print("The standard deviation of the snail's daily displacement, in centimers, is ", std_dev) ###Output Days = 4.5 The snail's maximum displacement in a single day is 57 cm. The snail's minimum displacement in a single day is -8 cm. The snail's average daily progress is 18.09090909090909 cm. The standard deviation of the snail's daily displacement, in centimers, is 17.159437082600803
test/.ipynb_checkpoints/3_create_map_consorcio-checkpoint.ipynb
###Markdown Introdução ###Code import io import os import re import time import json import folium import random import requests import numpy as np import pandas as pd import seaborn as sns import geopandas as gpd from folium import plugins from osgeo import gdal, osr from folium.plugins import DualMap from PIL import Image from bs4 import BeautifulSoup from tqdm.notebook import trange, tqdm from paths import * ###Output _____no_output_____ ###Markdown *Function* Legend ###Code def modify_header_legend(map_folium): """ """ import folium import branca as bc # Header to Add head = """ {% macro header(this, kwargs) %} <script src="https://code.jquery.com/ui/1.12.1/jquery-ui.js"></script> <script> $( function() { $( ".maplegend" ).draggable({ start: function (event, ui) { $(this).css({ right: "auto", top: "auto", bottom: "auto" }); } }); }); </script> {% endmacro %} """ # Add Header macro = bc.element.MacroElement() macro._template = bc.element.Template(head) map_folium.get_root().add_child(macro) # CSS to Add (on Header) css = """ {% macro header(this, kwargs) %} <style type='text/css'> .maplegend { position: absolute; z-index:9999; background-color: rgba(255, 255, 255, 1); border-radius: 5px; border: 2px solid #bbb; padding: 10px; font-size:12px; right: 10px; bottom: 20px; } .maplegend .legend-title { text-align: left; margin-bottom: 5px; font-weight: bold; font-size: 90%; } .maplegend .legend-scale ul { margin: 0; margin-bottom: 0px; padding: 0; float: left; list-style: none; } .maplegend .legend-scale ul li { font-size: 80%; list-style: none; margin-left: 0; line-height: 18px; margin-bottom: 2px; } .maplegend ul.legend-labels li span { display: block; float: left; height: 16px; width: 30px; margin-right: 5px; margin-left: 0; border: 0px solid #ccc; opacity: 0.3; } .maplegend .legend-source { font-size: 80%; color: #777; clear: both; } .maplegend a { color: #777; } </style> {% endmacro %} """ # Add CSS (on Header) macro = bc.element.MacroElement() macro._template = bc.element.Template(css) map_folium.get_root().add_child(macro) return map_folium def add_categorical_legend(map_folium, title, color_by_label): """ """ import folium import branca as bc body = f""" <div id='maplegend {title}' class='maplegend'> <div class='legend-title'>{title}</div> <div class='legend-scale'> <ul class='legend-labels'>""" # Loop Categories for label, color in color_by_label.items(): body += f""" <li><span style='background:{color}'></span>{label}</li>""" body += """ </ul> </div> </div> """ # Add Body body = bc.element.Element(body) map_folium.get_root().html.add_child(body) return map_folium ###Output _____no_output_____ ###Markdown Layer: UGRHIs ###Code # Lê o arquivo csv com o nome dos municípios df = pd.read_csv( 'https://raw.githubusercontent.com/open-geodata/sp/main/data/tabs/tab_municipio_ugrhi.csv', ) # Lê o arquivo csv com o nome dos municípios gdf = gpd.read_file( 'https://raw.githubusercontent.com/open-geodata/sp/main/data/shps/sp_250k_wgs84.geojson', ) gdf.drop(['municipio_nome'], axis=1, inplace=True) gdf['id_municipio'] = gdf['id_municipio'].astype(int) gdf['geometry'] = gdf.simplify(0.0015) # Merge gdf = gdf.merge( df, on='id_municipio', how='left' ) # Save geojson gdf.to_file( os.path.join(shps_path, 'sp_ugrhi.geojson'), driver='GeoJSON', encoding='utf-8' ) # Results gdf.head() # Seleciona colunas df_ugrhi = gdf[['id_municipio', 'nome_ugrhi']].copy() # Salva Tabela df_ugrhi.to_csv( os.path.join(tabs_path, 'tab_municipio_ugrhi.csv'), index=False, ) def add_lyr_ugrhi(): # Input gdf = gpd.read_file(os.path.join(shps_path, 'sp_ugrhi.geojson')) gdf = gdf.to_crs(epsg=4326) # Column with category col_categories = 'nome_ugrhi' # Set palette palette_polygon = 'Paired' # Get list of unique values categories = set(gdf[col_categories]) categories = list(categories) categories.sort() # See the palette chosed pal = sns.color_palette(palette_polygon, n_colors=len(categories)) # Set dictionary color_polygon = dict(zip(categories, pal.as_hex())) # lyr = folium.GeoJson( gdf, name='UGRHIs', smooth_factor=1.0, zoom_on_click=False, show=False, embed=False, style_function=lambda x: { 'fillColor': color_polygon[x['properties'][col_categories]], 'color': color_polygon[x['properties'][col_categories]], 'weight': 0.5, 'fillOpacity': 0.2, }, highlight_function=lambda x: { 'weight': 2, 'fillOpacity': 0.6, }, tooltip=folium.features.GeoJsonTooltip( fields=['municipio_nome', 'nome_ugrhi'], aliases=['Munícipio', 'UGRHI'], sticky=True, opacity=0.9, direction='right', ), # popup=folium.GeoJsonPopup( # ['popup'], # parse_html=False, # max_width='400', # show=False, # labels=False, # sticky=True, # ) ) return lyr ###Output _____no_output_____ ###Markdown Layer: RMs ###Code # Lê o arquivo csv com o nome dos municípios df = pd.read_csv( 'https://raw.githubusercontent.com/michelmetran/sp/main/data/tabs/tab_rms.csv', ) # Lê o arquivo csv com o nome dos municípios gdf = gpd.read_file( 'https://raw.githubusercontent.com/michelmetran/sp/main/data/shps/sp_250k_wgs84.geojson', ) gdf.drop(['municipio_nome'], axis=1, inplace=True) gdf['id_municipio'] = gdf['id_municipio'].astype(int) gdf['geometry'] = gdf.simplify(0.0015) # Merge gdf = gdf.merge( df, on='id_municipio', how='right' ) # Save geojson gdf.to_file( os.path.join(shps_path, 'sp_rms.geojson'), driver='GeoJSON', encoding='utf-8' ) # Results gdf.head() df_rm = gdf[['id_municipio', 'nome_rm']].copy() df_rm.to_csv( os.path.join(tabs_path, 'tab_municipio_rm.csv'), index=False, ) def add_lyr_rms(m): # Input gdf = gpd.read_file(os.path.join(shps_path, 'sp_rms.geojson')) gdf = gdf.to_crs(epsg=4326) # Column with category col_categories = 'nome_rm' # Set palette palette_polygon = 'Paired' # Get list of unique values categories = set(gdf[col_categories]) categories = list(categories) categories.sort() # See the palette chosed pal = sns.dark_palette('#808080', reverse=True, as_cmap=False, n_colors=len(categories)) # Set dictionary color_polygon = dict(zip(categories, pal.as_hex())) stripes = plugins.pattern.StripePattern( angle=-45 ) stripes.add_to(m) # Layer lyr = folium.GeoJson( gdf, name='RMs e AUs', smooth_factor=1.0, zoom_on_click=False, show=False, embed=False, style_function=lambda x: { 'fillColor': color_polygon[x['properties'][col_categories]], 'color': color_polygon[x['properties'][col_categories]], 'weight': 2, 'fillOpacity': 0.3, 'fillPattern': stripes, }, highlight_function=lambda x: { 'weight': 3, 'fillOpacity': 0.6, }, tooltip=folium.features.GeoJsonTooltip( fields=['municipio_nome', 'nome_rm'], aliases=['Munícipio', 'RM|AU'], sticky=True, opacity=0.9, direction='right', ), # popup=folium.GeoJsonPopup( # ['popup'], # parse_html=False, # max_width='400', # show=False, # labels=False, # sticky=True, # ) ) return lyr ###Output _____no_output_____ ###Markdown Layer: Consócio ###Code # Lê o arquivo csv com o nome dos municípios df = pd.read_csv( os.path.join(tabs_path, 'tab_municipio_consorcio.csv'), ) df = df[df['consorcio'] == 1].copy() #df.drop(['consorcio'], axis=1, inplace=True) # Lê o arquivo csv com o nome dos municípios gdf = gpd.read_file( 'https://raw.githubusercontent.com/open-geodata/sp/main/data/shps/sp_250k_wgs84.geojson', ) gdf.drop(['municipio_nome'], axis=1, inplace=True) gdf['id_municipio'] = gdf['id_municipio'].astype(int) gdf['geometry'] = gdf.simplify(0.0015) # Merge gdf = gdf.merge( df, on='id_municipio', how='right' ) # Save geojson gdf.to_file( os.path.join(shps_path, 'sp_consorcio.geojson'), driver='GeoJSON', encoding='utf-8' ) # Results gdf.head() def add_lyr_consorcio(m): # Input gdf = gpd.read_file(os.path.join(shps_path, 'sp_consorcio.geojson')) gdf = gdf.to_crs(epsg=4326) # Padrão stripes = plugins.pattern.StripePattern(angle=-45) stripes.add_to(m) # Layers lyr = folium.GeoJson( gdf, name='Consórcio', smooth_factor=1.0, zoom_on_click=False, show=True, embed=False, style_function=lambda x: { 'fillColor': '#808080', 'color': '#808080', 'weight': 2, 'fillOpacity': 0.3, 'fillPattern': stripes, }, highlight_function=lambda x: { 'weight': 3, 'fillOpacity': 0.6, }, tooltip=folium.features.GeoJsonTooltip( fields=['municipio_nome'], aliases=['Munícipio'], sticky=True, opacity=0.9, direction='right', ), ) return lyr ###Output _____no_output_____ ###Markdown Layer: URAEs ###Code # Lê o arquivo csv com o nome dos municípios df = pd.read_csv( 'https://raw.githubusercontent.com/michelmetran/pl251/main/data/tabs/tab_municipio_pl251.csv', #os.path.join('data', 'tabs', 'tab_municipio_pl251.csv'), ) # Lê o arquivo csv com o nome dos municípios gdf = gpd.read_file( 'https://raw.githubusercontent.com/michelmetran/sp/main/data/shps/sp_250k_wgs84.geojson', ) gdf.drop(['municipio_nome'], axis=1, inplace=True) gdf['id_municipio'] = gdf['id_municipio'].astype(int) gdf['geometry'] = gdf.simplify(0.0015) # Merge gdf = gdf.merge( df, on='id_municipio', how='left' ) # Delete Columns gdf.drop(['id'], axis=1, inplace=True) # Save geojson gdf.to_file( os.path.join(shps_path, 'sp_urae.geojson'), driver='GeoJSON', encoding='utf-8' ) # Results gdf.head() def add_lyr_urae(): # Input gdf = gpd.read_file(os.path.join(shps_path, 'sp_urae.geojson')) gdf = gdf.to_crs(epsg=4326) # Column with category col_categories = 'unidade' # Set dictionary color_polygon = dict() color_polygon['URAE 1 - Sudeste'] = '#0505B4' color_polygon['URAE 2 - Centro'] = '#FF2E2F' color_polygon['URAE 3 - Leste'] = '#FEFF01' color_polygon['URAE 4 - Norte'] = '#31B505' # Calculate PopUps gdf['popup'] = gdf.apply(popup_html, axis=1) # Layer lyr = folium.GeoJson( gdf, name='URAEs', smooth_factor=1.0, zoom_on_click=False, embed=False, show=True, style_function=lambda x: { 'fillColor': color_polygon[x['properties'][col_categories]], 'color': color_polygon[x['properties'][col_categories]], 'weight': 0.3, 'fillOpacity': 0.3, }, highlight_function=lambda x: { 'weight': 2, 'fillOpacity': 0.6, }, tooltip=folium.features.GeoJsonTooltip( fields=['municipio_nome', 'unidade'], aliases=['Munícipio', 'Unidade'], sticky=True, opacity=0.9, direction='right', ), popup=folium.GeoJsonPopup( fields=['popup'], parse_html=False, max_width='400', show=False, labels=False, sticky=True, ) ) return lyr, color_polygon # Add Field def popup_html(row): html = """ <div> <p><b>{}</b> pertence à: <h4><b>{}</b></h4></p> </div> """.format( '' if pd.isnull(row['municipio_nome']) else '{}'.format(row['municipio_nome']), '' if pd.isnull(row['unidade']) else '{}'.format(row['unidade']), ) html = html.replace('\n','') html = re.sub('\s\s+' , ' ', html) # Remove Espaços no meio html = html.strip() return html ###Output _____no_output_____ ###Markdown Folium Map ###Code def get_map(input_geojson): # Input gdf = gpd.read_file(input_geojson) gdf = gdf.to_crs(epsg=4326) sw = gdf.bounds[['miny', 'minx']].min().values.tolist() ne = gdf.bounds[['maxy', 'maxx']].max().values.tolist() bounds = [sw, ne] # Zoom min_zoom = 10 max_zoom = 18 # Create Map m = folium.Map( #zoom_start=10, min_zoom=min_zoom, max_zoom=max_zoom, max_bounds=True, #zoom_delta=0.1, min_lat=bounds[0][0]*(101/100), min_lon=bounds[0][1]*(101/100), max_lat=bounds[1][0]*(99/100), max_lon=bounds[1][1]*(99/100), tiles=None, ) # Add Base Map folium.TileLayer( 'cartodbpositron', name='BaseMap', control=False, ).add_to(m) # Add Layers lyr_urae, color_polygon = add_lyr_urae() m.add_child(lyr_urae) m.add_child(add_lyr_ugrhi()) m.add_child(add_lyr_rms(m)) m.add_child(add_lyr_consorcio(m)) # Add Map Legend m = modify_header_legend(m) m = add_categorical_legend( m , 'URAEs', color_by_label=color_polygon ) # Plugins m.fit_bounds(bounds) plugins.Fullscreen( position='topleft', title='Clique para Maximizar', title_cancel='Mininizar', ).add_to(m) folium.LayerControl( 'topright', collapsed=True ).add_to(m) return m # Mapa m = get_map( os.path.join(shps_path, 'sp_consorcio.geojson') ) m.save(os.path.join(maps_path, 'consorcio_map.html')) # Figura img_data = m._to_png(5) img = Image.open(io.BytesIO(img_data)) img.save(os.path.join(imgs_path, 'zoom_consorcio.png')) # Results m # Mapa m = get_map( os.path.join(shps_path, 'sp_urae.geojson') ) # Figura img_data = m._to_png(5) img = Image.open(io.BytesIO(img_data)) img.save(os.path.join(imgs_path, 'zoom_sp.png')) # Results m ###Output _____no_output_____
demos/Day 4 - image classification.ipynb
###Markdown 1 st column is the label - 0, 1, 2 ... 9 Rest are pixel values ###Code y_train = df_training.iloc[:, 0].values X_train = df_training.iloc[:, 1:].values np.min(X_train), np.max(X_train) plt.hist(X_train.flatten()); X_train.shape, X_train.flatten().shape 60000 * 784 28 * 28 pd.Series(y_train).unique() pd.Series(y_train).value_counts().sort_index().plot.bar() plt.imshow(X_train[0].reshape(28, 28), cmap = "gray") X_train[0] fig, _ = plt.subplots(5, 5, figsize=(10, 8)) for i, ax in enumerate(fig.axes): ax.imshow(X_train[i].reshape(28, 28), cmap = "gray") ax.set_title(y_train[i]) plt.tight_layout() X_train_std = (X_train - np.mean(X_train, axis = 0)) / (np.std(X_train, axis = 0) + 1e-6) X_train_std[0] fig, _ = plt.subplots(5, 5, figsize=(10, 8)) for i, ax in enumerate(fig.axes): ax.imshow(X_train_std[i].reshape(28, 28), cmap = "gray") ax.set_title(y_train[i]) plt.tight_layout() pd.DataFrame(X_train_std).describe() def load_data(path): df = pd.read_csv(path, header = None) y = df.iloc[:, 0].values X = df.iloc[:, 1:].values/255 return X, y X_train, y_train = load_data("/data/MNIST/mnist_train.csv") X_test, y_test = load_data("/data/MNIST/mnist_test.csv") from sklearn import linear_model %%time est = linear_model.SGDClassifier(n_jobs=4, tol=1e-4, eta0 = 0.15, learning_rate = "invscaling", alpha = 0.01, max_iter= 100) est.fit(X_train, y_train) print("training accuracy: ", est.score(X_train, y_train), "\ntest accuracy: ", est.score(X_test, y_test)) import tensorflow as tf ###Output /Users/abulbasar/anaconda3/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`. from ._conv import register_converters as _register_converters ###Markdown tensor = multi dimensional array- constant - constant used for computation- variable - intermediate variables to store data ... these are mutable. typicaly used to store weights- placeholders like function argumnet - typically used to supply training features and labels to tensorflow graph ###Code a = tf.constant(3.4) b = tf.constant(4.5) x = tf.constant(np.array([[1, 2, 3], [4, 5, 9]])) c = a + b print("a", a) print("b", b) print("c", c) print("x", x) with tf.Session() as sess: c_ = sess.run(c) print("c value", c_) a = tf.placeholder("float32") b = tf.placeholder("float32") c = a + b with tf.Session() as sess: values = {a: 2.3, b: 7.8} print(sess.run(c, feed_dict=values)) X = tf.placeholder("float32", (None, 784)) y = tf.placeholder("int32", (None,)) num_classes = 10 learning_rate = 0.1 max_iters = 50 print("X", X) print("y", y) y_oh = tf.one_hot(y, num_classes) print("y_oh", y_oh) W = tf.Variable(tf.truncated_normal((784, 10), stddev=1/num_classes)) b = tf.Variable(0.0) print("W",W) print("b", b) Z = tf.matmul(X, W) + b print("Z", Z) logits = Z print("logits", logits) y_probs = tf.nn.softmax(logits) print(") y_pred = tf.argmax(logits, axis=1, output_type=tf.int32) print("y_probs", y_probs) loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_oh, logits = logits) # record level amount of "error" cost = tf.reduce_mean(loss) # aggregated "error" match = tf.equal(y, y_pred) print("match", match) accuracy = tf.reduce_mean(tf.cast(match, "float32")) print("accuracy", accuracy) opt = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) #W_ = sess.run(W) #print("Initial value of W", W_) #plt.hist(W_.flatten(), bins = 100); values = {X: X_train, y: y_train} costs = [] for i in range(max_iters): _, cost_, accuracy_ = sess.run([opt, cost, accuracy], feed_dict=values) costs.append(cost_) print("i: %d, cost: %f, accuracy: %f" % (i, cost_, accuracy_)) pd.Series(costs).plot() import keras from keras.layers import Dense, InputLayer from time import time tensorboard = keras.callbacks.TensorBoard(log_dir="/tmp/tf/tensorboard/%d/" % time()) model = keras.Sequential() model.add(InputLayer((784,))) model.add(Dense(400, activation="relu")) model.add(Dense(100, activation="relu")) model.add(Dense(10, activation="softmax")) model.compile(loss=keras.losses.categorical_crossentropy , optimizer="sgd" , metrics=["accuracy"]) model.fit(X_train, keras.utils.to_categorical(y_train), epochs=10, callbacks=[tensorboard]) y_train.shape keras.utils.to_categorical(y_train).shape keras.utils.to_categorical(y_train)[:5] np.argmax(keras.utils.to_categorical(y_train)[:5], axis = 1)[:5] y_train[:5] ###Output _____no_output_____ ###Markdown TensorboardOpen command prompt and go to the directory of tensorboard```$ tensorboard --logdir=.``` ###Code Y_probs = model.predict_proba(X_test) Y_probs.shape Y_probs[1] print(np.argmax(Y_probs, axis = 1)[1]) plt.imshow(X_test[1].reshape(28, 28), cmap="gray") from skimage.io import imread img = imread("https://yourshot.nationalgeographic.com/u/fQYSUbVfts-T7odkrFJckdiFeHvab0GWOfzhj7tYdC0uglagsDNfPyOWLSTLFY4MU96_S349qkXL9l9Mgn-gPMRAD0h5UkQaN8K32bvT0TXvGmuPnhPdkOfNh7Ff-vdRes_SyPhkPSxSyt3hw6fLkhJC05gvEuSj5_EHHGcmhXRQjKPBnfETUI6DFh9livSBrj_75XZpJzjPsiUxm-PDGS5xImHZ3OE/") img.shape plt.imshow(img) type(img) from skimage.color import rgb2gray plt.imshow(rgb2gray(img), cmap= "gray") from sklearn.decomposition import PCA from sklearn import preprocessing X_train.shape scaler = preprocessing.StandardScaler() X_train_std = scaler.fit_transform(X_train) pca = PCA() pca.fit(X_train_std) plt.bar(range(784), pca.explained_variance_ratio_) explained = np.cumsum(pca.explained_variance_ratio_) pd.Series(explained)[explained>0.99] np.sum(pca.explained_variance_ratio_[:543]) pca = PCA(543) X_train_pca = pca.fit_transform(X_train) X_train_pca.shape X_train_recon = pca.inverse_transform(X_train_pca) X_train_recon.shape fig, axes = plt.subplots(2, 10, figsize = (15, 4)) for i in range(10): axes[0][i].imshow(X_train[i].reshape(28, 28), cmap = "gray") axes[1][i].imshow(X_train_recon[i].reshape(28, 28), cmap = "gray") X_train_noisy = X_train[:10] X_train_noisy = X_train_sub + 0.3 * np.random.randn(* X_train_sub.shape) X_train_noisy.shape X_train_noisy_recon = pca.inverse_transform(pca.transform(X_train_noisy)) fig, axes = plt.subplots(2, 10, figsize = (15, 4)) for i in range(10): axes[0][i].imshow(X_train_noisy[i].reshape(28, 28), cmap = "gray") axes[1][i].imshow(X_train_noisy_recon[i].reshape(28, 28), cmap = "gray") X_train_pca.shape tensorboard = keras.callbacks.TensorBoard(log_dir="/tmp/tf/tensorboard/%d/" % time()) model = keras.Sequential() model.add(InputLayer((543,))) model.add(Dense(400, activation="relu")) model.add(Dense(100, activation="relu")) model.add(Dense(10, activation="softmax")) model.compile(loss=keras.losses.categorical_crossentropy , optimizer="sgd" , metrics=["accuracy"]) model.fit(X_train_pca, keras.utils.to_categorical(y_train), epochs=10, callbacks=[tensorboard]) X_test_pca = pca.transform(X_test) model.evaluate(X_test_pca, keras.utils.to_categorical(y_test)) from keras.layers import Conv2D, MaxPooling2D, Flatten tensorboard = keras.callbacks.TensorBoard(log_dir="/tmp/tf/tensorboard/%d/" % time()) model = keras.Sequential() model.add(InputLayer((28,28,1))) model.add(Conv2D(32, (3, 3), activation="relu")) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(32, (3, 3), activation="relu")) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(400, activation="relu")) model.add(Dense(100, activation="relu")) model.add(Dense(10, activation="softmax")) model.compile(loss=keras.losses.categorical_crossentropy , optimizer="sgd" , metrics=["accuracy"]) model.fit(X_train.reshape((-1, 28, 28, 1)) , keras.utils.to_categorical(y_train) , epochs=10 , callbacks=[tensorboard]) model.evaluate(X_test.reshape((-1, 28, 28, 1)) , keras.utils.to_categorical(y_test)) X_train[0].reshape((28, 28, 1)).shape ###Output _____no_output_____
MyNotebooks/R/FirstMoverAdvantage.ipynb
###Markdown First-mover advantage Author: Snigdhayan MahantaI will formulate a problem in terms of a hypothetical match-making game. Suppose there is a blind dating game, where a woman has to select a partner among various players based on conversation only. The rules are as follows:1. There are 10 players and the game ends only if she selects one of them.2. There is a predetermined order in which she gets to speak with the players.3. In each round she selects one conversation topic and she is allowed to discuss only that topic with all the 10 players.3. At the end of each conversation she must either select the player (thus ending the game) or proceed to the next one.4. If she has spoken with all 10 players and not selected anybody, then she can start another round of conversations based on a new topic.Among all players there is one (let us say the 8th player), who is her true "soul mate". In other words, with this person the expected enjoyment over all possible conversation topics is maximum. For simplicity, let us assume that all others have a low probability of being selected (let us say capped at 0.4) and the 8th player has a high probability of being selected at any round of conversation. Suppose we play this game with 1000 different women. How many of them will actually select the 8th player? ###Code # Context information n_outcomes <- 2 # select partner or proceed to the next one outcomes <- c(1:n_outcomes) winningOutcome <- 1 # 1 signifies partner selection players <- c(1:10) # in ascending order of priority - 7 candidates before the 8th one # Probabilistic winner - the player with the highest probability of winning (8th one) probabilistic_winner <- 8 gen_winner_prob <- function() { vector <- sample(c(1:100), size=length(outcomes), replace=TRUE) vector[winningOutcome] <- 1.5*max(vector) # increase the probability of winning winner_prob <- vector/sum(vector) # scale it to make it a probability vector return(winner_prob) } gen_winner_prob()[1] # Others - all others have a low probability of winning probability_cap <- 0.4 gen_others_prob <- function() { p <- runif(1, min=0.01, max=probability_cap) # probability of winning capped at probability_cap vector <- sample(c(1:100), size=length(outcomes)-1, replace=TRUE) vector <- ((1-p)*vector)/sum(vector) others_prob <- c(p, vector) # set p as the probability of winning for others return(others_prob) } gen_others_prob()[1] # Play - one round so that each player gets a chance according to the order of priority play <- function() { for (i in players) { if (i == probabilistic_winner) { winner_prob <- gen_winner_prob() x <- sample(outcomes, size=1, replace=TRUE, prob = winner_prob) # use high probability of winning } if (i != probabilistic_winner) { others_prob <- gen_others_prob() x <- sample(outcomes, size=1, replace=TRUE, prob = others_prob) # no enhanced probability of winning } if (x==winningOutcome) return(i) } } # Game - keep playing until the game ends with a winner game <- function() { counter <- 1 outcome <- integer(2) while (TRUE) { result <- play() outcome[2] <- counter if (is.null(result)==FALSE) { outcome[1] <- result return(outcome) } else { counter <- counter + 1 } } } # Define simulation parameters and run it n_trials <- 1000 # play the same game with 1000 different women gameLength <- integer(n_trials) # keep track of the number of rounds needed to complete the game victoryTable <- integer(n_trials) # keep track of the winners for (i in c(1:n_trials)) { winner <- game() gameLength[i] <- winner[2] victoryTable[i] <- winner[1] } # Show the frequency table of the victories of the players frequency_victory <- table(victoryTable) print(frequency_victory) # Show the victory table as a barplot - look at the 8th column!! barplot(frequency_victory, xlab = "Players", ylab = "No. of victories", main = "Victory table") # Show the frequency table of the lengths of the games frequency_gameLength <- table(gameLength) print(frequency_gameLength) # Show the histogram of the lengths of the games hist(gameLength, col = "gray", xlab = "No. of rounds", ylab = "No. of games", main = "Game length distribution") ###Output _____no_output_____
02_own_module_and_files_solutions.ipynb
###Markdown Info Sections- [Writing-own-modules](Writing-own-modules) - [Develop a test case](Develop-a-test-case) - [Save the function to a file](Save-the-function-to-a-file) - [Create our own module](Create-our-own-module) Writing own modules Develop a test case At first we need a function that do some work. ###Code ages = {'Andi': 88, 'Andrew': 78, 'Andy': 85, 'Emily': 6, 'Karren': 45, 'Lewis': 19, 'Peter': 19, 'Petra': 24, 'Sue': 23} locations = { 'Berlin' : ['Andi', 'Karren', 'Peter', 'Petra'], 'Stuttgart' : ['Andrew', 'Emily'], 'Munich' : ['Sue'], 'Hamburg' : ['Andy', 'Lewis'], } ###Output _____no_output_____ ###Markdown Let's assume we got some big dataset customer, a brief representative test set is presented above. We got the task to format the data nicely with the following requirements: Format of the output: ` is years old and lives in .` The output should be ordered by the `age` of the persons.Find a way to represent the data in the given way ###Code #@solution # Version 1 # using OrderedDict from collections import OrderedDict sorted_list = sorted(ages.items(), key=lambda x: x[1]) ages_ordered = OrderedDict(sorted_list) for name, age in ages_ordered.items(): city = [city for city, members in locations.items() if name in members][0] print('{} is {} years old and lives in {}.'.format(name, age, city)) #@solution # Version 2 # iterate over a list of tuples sorted_list = sorted(ages.items(), key=lambda x: x[1]) for name, age in sorted_list: city = [city for city, members in locations.items() if name in members][0] print('{} is {} years old and lives in {}.'.format(name, age, city)) #@solution # Version 3 # iterate over a list of tuples with inner loop sorted_list = sorted(ages.items(), key=lambda x: x[1]) for name, age in sorted_list: found = None for city, members in locations.items(): if name in members: found=city break assert found is not None, "No City found" city = found print('{} is {} years old and lives in {}.'.format(name, age, city)) ###Output Emily is 6 years old and lives in Stuttgart. Lewis is 19 years old and lives in Hamburg. Peter is 19 years old and lives in Berlin. Sue is 23 years old and lives in Munich. Petra is 24 years old and lives in Berlin. Karren is 45 years old and lives in Berlin. Andrew is 78 years old and lives in Stuttgart. Andy is 85 years old and lives in Hamburg. Andi is 88 years old and lives in Berlin. ###Markdown Put the routine to find the city to a `name` into a function. So we got an easy loop with only: ```python city = get_city(name) print(" is years old and lives in ")``` ###Code #@solution def get_city(name): "Function to get the city for a person" found = None for city, members in locations.items(): if name in members: found=city return found assert found is not None, "No City found" sorted_list = sorted(ages.items(), key=lambda x: x[1]) for name, age in sorted_list: city = get_city(name) print('{} is {} years old and lives in {}.'.format(name, age, city)) ###Output Emily is 6 years old and lives in Stuttgart. Lewis is 19 years old and lives in Hamburg. Peter is 19 years old and lives in Berlin. Sue is 23 years old and lives in Munich. Petra is 24 years old and lives in Berlin. Karren is 45 years old and lives in Berlin. Andrew is 78 years old and lives in Stuttgart. Andy is 85 years old and lives in Hamburg. Andi is 88 years old and lives in Berlin. ###Markdown Save the function to a file Task: store every thing into a function. one function `get_city(name) -> city` one function `main(ages, locations) -> None` (but prints in the format ` is years old and lives in) ` test `main()` ###Code #@solution def get_city(name): "Function to get the city for a person" found = None for city, members in locations.items(): if name in members: found=city return found assert found is not None, "No City found" def main(ages, locations): sorted_list = sorted(ages.items(), key=lambda x: x[1]) for name, age in sorted_list: city = get_city(name) print('{} is {} years old and lives in {}.'.format(name, age, city)) main(ages, locations) ###Output Emily is 6 years old and lives in Stuttgart. Lewis is 19 years old and lives in Hamburg. Peter is 19 years old and lives in Berlin. Sue is 23 years old and lives in Munich. Petra is 24 years old and lives in Berlin. Karren is 45 years old and lives in Berlin. Andrew is 78 years old and lives in Stuttgart. Andy is 85 years old and lives in Hamburg. Andi is 88 years old and lives in Berlin. ###Markdown You can save the content of a cell into a `.py` file using so call [Built-in magic commands](https://ipython.readthedocs.io/en/stable/interactive/magics.html)* Line magics start with `%command` are commands only run in that line* cell magics start with `%%comannd` and are executed for the whole cell- use `%%writefile myscript.py` to write the content of a cell to a file ###Code #@solution %%writefile myroutine.py # Note: %%writefile myroutine.py has to be the first command of a line. (remove #@solution) def get_city(name): "Function to get the city for a person" found = None for city, members in locations.items(): if name in members: found=city return found assert found is not None, "No City found" def main(ages, locations): "our main function" sorted_list = sorted(ages.items(), key=lambda x: x[1]) for name, age in sorted_list: city = get_city(name) print('{} is {} years old and lives in {}.'.format(name, age, city)) ###Output _____no_output_____ ###Markdown The content is now written to `myroutine.py`.But not executed for us. We can check it by using `help(main)` and see if our `docstring` is here. ###Code #@solution help(main) ###Output Help on function main in module __main__: main(ages, locations) ###Markdown Let's `import` `myroutine` and check it out ###Code #@solution import myroutine #@solution myroutine.main(ages, locations) ###Output _____no_output_____ ###Markdown This should fail since we defined to use `location` from the global variable space.Which is **not** defined in the variable space of the module `myroutine`. ###Code #@solution 'locations' in vars() #@solution 'locations' in vars(myroutine) ###Output _____no_output_____ ###Markdown To make it work we have to rewrite the `py` file and provide `locations` as `argument` so we can pass it to the function. ###Code #@solution %%writefile myroutine.py # Note: %%writefile myroutine.py has to be the first command of a line. (remove #@solution) def get_city(name, locations): "Function to get the city for a person" found = None for city, members in locations.items(): if name in members: found=city return found assert found is not None, "No City found" def main(ages, locations): "our main function" sorted_list = sorted(ages.items(), key=lambda x: x[1]) for name, age in sorted_list: city = get_city(name, locations) print('{} is {} years old and lives in {}.'.format(name, age, city)) #@solution import myroutine myroutine.main(ages, locations) ###Output _____no_output_____ ###Markdown Since we allready imported the `myroutine` is is not changed again.To fix this we can:* **restart** the kernel (kernel restart), **redfine the variables**, and **import** it again* use the funtction `reload` from `importlib` (`python3` only!)```pythonfrom importlib import reloadreload(myroutine)myroutine.main(ages, locations)```* use **line magic** `%run myroutine` to execute the code, then the functions are also registered in our `__main__` space! and things work nicely ###Code #@solution from importlib import reload reload(myroutine) myroutine.main(ages, locations) #@solution %run myroutine main(ages, locations) # check how get_city looks like help(get_city) ages = {'Andi': 88, 'Andrew': 78, 'Andy': 85, 'Emily': 6, 'Karren': 45, 'Lewis': 19, 'Peter': 19, 'Petra': 24, 'Sue': 23} locations = { 'Berlin' : ['Andi', 'Karren', 'Peter', 'Petra'], 'Stuttgart' : ['Andrew', 'Emily'], 'Munich' : ['Sue'], 'Hamburg' : ['Andy', 'Lewis'], } import myroutine myroutine.main(ages, locations) ###Output _____no_output_____ ###Markdown We can also import individual functions from it ###Code #@solution from myroutine import get_city help(get_city) ###Output _____no_output_____ ###Markdown Create an output The native way to handle context of a file is the `open()` function.It's most important arguments are `file` and `mode`.- `file` : filename- `mode` : - `r` : read - `w` : write - `a` : append - `rb` : read in binary format - `wb` : write in binary format - `ab` : append in binary format Create a file pointer (`fp`) to the file using `r` (read) mode. ###Code #@solution fp = open('myroutine.py', 'r') fp ###Output _____no_output_____ ###Markdown Read the content. ###Code #@solution content = fp.read() print(content) ###Output _____no_output_____ ###Markdown As you can see here is the content of your file.As in other languages we have to close our file pointer (`fp`) again. ###Code #@solution fp.close() ###Output _____no_output_____ ###Markdown We can also save something to a file the same way. ###Code #@solution fp = open("my_output.txt", 'w') fp.write("A new file") fp.write("A second line") fp.close() ###Output _____no_output_____ ###Markdown Let's read the context. ###Code #@solution print(open("my_output.txt").read()) ###Output A new fileA second line ###Markdown Seems as we don't have a second line here. Let's fix it. ###Code #@solution fp = open("my_output.txt", 'w') fp.write("A new file\n") # note the \n char here, meaning \newline fp.write("A second line\n") fp.close() #@solution print(open("my_output.txt").read()) ###Output A new file A second line ###Markdown If we want to `print` the lines one by one. We can get them as seperatly. ###Code #@solution fp = open("my_output.txt", 'r') lines = fp.readlines() fp.close() #@solution lines ###Output _____no_output_____ ###Markdown Now you know the very basics how to `read` and `write` a file. More is provided on the last day in the lecture about parsing a config file. Our customer requires us to store the output into a file which he can use. Task: Extend the main function so we can print the output into a variable file. check the code with: ```pythonprint(open('results.txt').read())``` here are our functions again and the variables ###Code ages = {'Andi': 88, 'Andrew': 78, 'Andy': 85, 'Emily': 6, 'Karren': 45, 'Lewis': 19, 'Peter': 19, 'Petra': 24, 'Sue': 23} locations = { 'Berlin' : ['Andi', 'Karren', 'Peter', 'Petra'], 'Stuttgart' : ['Andrew', 'Emily'], 'Munich' : ['Sue'], 'Hamburg' : ['Andy', 'Lewis'], } #%%writefile myroutine.py def get_city(name, locations): "Function to get the city for a person" found = None for city, members in locations.items(): if name in members: found=city return found assert found is not None, "No City found" def main(ages, locations): "our main function" sorted_list = sorted(ages.items(), key=lambda x: x[1]) for name, age in sorted_list: city = get_city(name, locations) print('{} is {} years old and lives in {}.'.format(name, age, city)) #@solution #%%writefile myroutine.py def get_city(name, locations): "Function to get the city for a person" found = None for city, members in locations.items(): if name in members: found=city return found assert found is not None, "No City found" def main(ages, locations, fname='results.txt'): "our main function" sorted_list = sorted(ages.items(), key=lambda x: x[1]) fp = open(fname, 'w') for name, age in sorted_list: city = get_city(name, locations) fp.write('{} is {} years old and lives in {}.\n'.format(name, age, city)) fp.close() ###Output _____no_output_____ ###Markdown Update our file `myroutine.py` again ###Code #@solution %%writefile myroutine.py # Note: %%writefile myroutine.py has to be the first command of a line. (remove #@solution) def get_city(name, locations): "Function to get the city for a person" found = None for city, members in locations.items(): if name in members: found=city return found assert found is not None, "No City found" def main(ages, locations, fname='results.txt'): "our main function" sorted_list = sorted(ages.items(), key=lambda x: x[1]) fp = open(fname, 'w') for name, age in sorted_list: city = get_city(name, locations) fp.write('{} is {} years old and lives in {}.\n'.format(name, age, city)) fp.close() ###Output _____no_output_____ ###Markdown Create our own real module Structure of a module```text.└── module_name ├── __init__.py ├── somefunc.py └── submodule ├── __init__.py └── someotherfunc.py``` Let's create this structure by hand. ###Code #@solution # create folders structure with python import os os.mkdir("test_module") os.mkdir("test_module/submodule") open(os.path.join('test_module', '__init__.py'), 'w').close() open(os.path.join('test_module', 'submodule', '__init__.py'), 'w').close() from shutil import copy2 copy2('myroutine.py', os.path.join('test_module', 'myroutine.py')) ###Output _____no_output_____ ###Markdown Let's test our module ###Code #@solution import test_module #@solution test_module.myroutine #@solution %%writefile test_module/__init__.py from . import myroutine ###Output _____no_output_____ ###Markdown You could also use `from .myroutine import *` to get all functions in `test_modules`. ###Code #@solution reload(test_module) #@solution help(test_module.myroutine.get_city) ###Output _____no_output_____ ###Markdown If you want to add `docstrings`, place them at the top of the file. Then you get an help for your module. ###Code #@solution %%writefile test_module/submodule/__init__.py """ Some DocString """ ###Output _____no_output_____ ###Markdown We also have to add the package to the main `__init__.py`. ###Code #@solution %%writefile test_module/__init__.py from . import (myroutine, submodule) #@solution reload(test_module) #@solution help(test_module.submodule) #@solution # cleanup os.remove(os.path.join('test_module', 'submodule', '__init__.py')) os.rmdir("test_module/submodule") os.remove(os.path.join('test_module', 'myroutine.py')) os.remove(os.path.join('test_module', '__init__.py')) os.rmdir("test_module") ###Output _____no_output_____ ###Markdown Run a own file Let's make our file `myroutine.py` importable but also executable as python script. ###Code %%writefile myroutine.py def get_city(name, locations): "Function to get the city for a person" found = None for city, members in locations.items(): if name in members: found=city return found assert found is not None, "No City found" def main(ages, locations, fname='results.txt'): "our main function" sorted_list = sorted(ages.items(), key=lambda x: x[1]) fp = open(fname, 'w') for name, age in sorted_list: city = get_city(name, locations) fp.write('{} is {} years old and lives in {}.\n'.format(name, age, city)) fp.close() ###Output Writing myroutine.py ###Markdown As first test we add a print statement.```pythonprint("My awesome script")``` ###Code #@solution %%writefile myroutine.py def get_city(name, locations): "Function to get the city for a person" found = None for city, members in locations.items(): if name in members: found=city return found assert found is not None, "No City found" def main(ages, locations, fname='results.txt'): "our main function" sorted_list = sorted(ages.items(), key=lambda x: x[1]) fp = open(fname, 'w') for name, age in sorted_list: city = get_city(name, locations) fp.write('{} is {} years old and lives in {}.\n'.format(name, age, city)) fp.close() print("My awesome script") ###Output _____no_output_____ ###Markdown Let's test it ###Code #@solution %run myroutine.py ###Output _____no_output_____ ###Markdown Let's reload it ###Code #@solution reload(myroutine) ###Output _____no_output_____ ###Markdown It seems that everything in the script is executed when imported, but maybe we can avoid it. If we want to have a file which we can `import` as module to get its `functions` but alsois able to be used as script, we can use a construct as:```pythonif __name__ == '__main__': command()```To make sure some code is only run during execution and not when imported. ###Code #@solution __name__ ###Output _____no_output_____ ###Markdown Let's test it in the script by adding.```pythonprint('My name is {}'.format(__name__))``` ###Code #@solution %%writefile myroutine.py def get_city(name, locations): "Function to get the city for a person" found = None for city, members in locations.items(): if name in members: found=city return found assert found is not None, "No City found" def main(ages, locations, fname='results.txt'): "our main function" sorted_list = sorted(ages.items(), key=lambda x: x[1]) fp = open(fname, 'w') for name, age in sorted_list: city = get_city(name, locations) fp.write('{} is {} years old and lives in {}.\n'.format(name, age, city)) fp.close() print("My awesome script") print('My name is {}'.format(__name__)) #@solution %run myroutine.py #@solution reload(myroutine) ###Output _____no_output_____ ###Markdown So it seems `__name__` depends on the location where its execute, let's use it. ###Code #@solution %%writefile myroutine.py def get_city(name, locations): "Function to get the city for a person" found = None for city, members in locations.items(): if name in members: found=city return found assert found is not None, "No City found" def main(ages, locations, fname='results.txt'): "our main function" sorted_list = sorted(ages.items(), key=lambda x: x[1]) fp = open(fname, 'w') for name, age in sorted_list: city = get_city(name, locations) fp.write('{} is {} years old and lives in {}.\n'.format(name, age, city)) fp.close() if __name__ == '__main__': print("My awesome script") print('My name is {}'.format(__name__)) #@solution %run myroutine.py #@solution reload(myroutine) ###Output _____no_output_____ ###Markdown Everything seems to work now. sIf we want to make that script usable we need some data.Checkout `customer_data.json`. We can import the data using the package `json`.```pythonimport jsonfp = open("customer_data.json")json.load(fp) fp : is a file pointer or directlyjson.load(open("customer_data.json"))``` ###Code #@solution import json #@solution data = json.load(open("customer_data.json")) ###Output _____no_output_____ ###Markdown let's test it ###Code #@solution len(data) #@solution data[0] #@solution ages, locations = data ###Output _____no_output_____ ###Markdown Now we extent our script so we can give it an arbitrary file. ###Code #@solution %%writefile myroutine.py def get_city(name, locations): "Function to get the city for a person" found = None for city, members in locations.items(): if name in members: found=city return found assert found is not None, "No City found" def main(ages, locations, fname='results.txt'): "our main function" sorted_list = sorted(ages.items(), key=lambda x: x[1]) fp = open(fname, 'w') for name, age in sorted_list: city = get_city(name, locations) fp.write('{} is {} years old and lives in {}.\n'.format(name, age, city)) fp.close() if __name__ == '__main__': import json ages, locations = json.load(open("customer_data.json")) main(ages, locations, fname='results.txt') ###Output _____no_output_____ ###Markdown If we want to provide the names via command line we can use the `sys` package. ###Code #@solution import sys ###Output _____no_output_____ ###Markdown We can access the command line arguments with `sys.argv`. ###Code #@solution sys.argv ###Output _____no_output_____ ###Markdown Let's put it in our function and make sure it will exit if the input is not provided. ###Code #@solution %%writefile myroutine.py def get_city(name, locations): "Function to get the city for a person" found = None for city, members in locations.items(): if name in members: found=city return found assert found is not None, "No City found" def main(ages, locations, fname='results.txt'): "our main function" sorted_list = sorted(ages.items(), key=lambda x: x[1]) fp = open(fname, 'w') for name, age in sorted_list: city = get_city(name, locations) fp.write('{} is {} years old and lives in {}.\n'.format(name, age, city)) fp.close() if __name__ == '__main__': import sys import json if len(sys.argv) != 3: print('run: python myroutine.py input.json output.txt') sys.exit(1) ages, locations = json.load(open(sys.argv[1])) main(ages, locations, fname=sys.argv[2]) #@solution %run myroutine.py #@solution %run myroutine.py customer_data.json results1.txt ###Output _____no_output_____
tensorflow-in-sagemaker-workshop/4_Deploying_your_TensorFlow_model.ipynb
###Markdown Amazon SageMaker を使った学習済 Keras モデルのデプロイこのノートブックは、[SageMaker TensorFlow Serving コンテナ](https://github.com/aws/sagemaker-tensorflow-serving-container) でリアルタイム推論を実行する方法を示します。 TensorFlow Servingコンテナは、スクリプトモードのデフォルトの推論方法です。より詳細なドキュメントについては、[こちら](https://github.com/aws/sagemaker-python-sdk/blob/master/src/sagemaker/tensorflow/deploying_tensorflow_serving.rst)にアクセスしてください。Amazon SageMaker python SDK でモデルをデプロイするためには、Estimator クラスの deploy()メソッドを使って SageMaker モデルを作成します。このモデルはエンドポイントにデプロイされ、リアルタイムで予測リクエストを処理します。 スクリプトモードでトレーニングしたため、エンドポイントには TensorFlow Serving コンテナを使用します。 このサービングコンテナは、SageMaker ホスティングプロトコルと互換性のあるWebサーバーの実装を実行します。 独自の推論コードの使用 ドキュメントでは、SageMaker が推論コンテナを実行する方法について説明しています。Keras/TensorFlow モデルをデプロイするためには、 TensorFlow SavedModel 形式でモデルを保存する必要があります。本ハンズオンでは学習スクリプトの中で既に`def save_model(model, output):`として実装されています。入力データと出力データの形式は、[TensorFlow Serving REST API](https://www.tensorflow.org/tfx/serving/api_rest) の Predictメソッドのリクエストとレスポンスの形式に直接対応しています。 SageMaker の TensforFlow Serving エンドポイントは、単純化されたJSON形式、行区切りのJSONオブジェクト ("jsons" または "jsonlines")、CSV データなど、TensorFlow REST API の一部ではない追加の入力形式も受け入れることができます。 学習済モデルのデプロイ新しくモデルを学習する変わりに、これまでに学習されたモデルを活用しましょう。 ###Code import os import sagemaker from sagemaker import get_execution_role sagemaker_session = sagemaker.Session() role = get_execution_role() from sagemaker.tensorflow import TensorFlow estimator = TensorFlow(base_job_name='cifar10', entry_point='cifar10_keras_sm.py', source_dir='training_script', role=role, framework_version='1.12.0', py_version='py3', hyperparameters={'epochs' : 5}, train_instance_count=1, train_instance_type='ml.p2.xlarge') ###Output _____no_output_____ ###Markdown 今回は既に学習済のモデルを使うこととします。下記のセルにある`training_job_name`をこれまでの学習ジョブの名前に書き換えて下さい。 ###Code estimator = estimator.attach(training_job_name='cifar10-2019-10-01-05-56-25-075') ## 以前に実施した学習ジョブの名前を使います predictor = estimator.deploy(initial_instance_count=1,instance_type='ml.m4.xlarge') ###Output _____no_output_____ ###Markdown 推論の実行エンドポイントが想定通りに動作するか、ランダムデータを用いて推論してみましょう。 ###Code # Creating fake prediction data import numpy as np data = np.random.randn(1, 32, 32, 3) print("Predicted class is {}".format(np.argmax(predictor.predict(data)['predictions']))) ###Output _____no_output_____ ###Markdown テストデートを用いたモデル精度の測定テストデータセットを用いて混合行列を作成し、モデルの精度を測ってみましょう。 ###Code from keras.datasets import cifar10 from keras.preprocessing.image import ImageDataGenerator from sklearn.metrics import confusion_matrix datagen = ImageDataGenerator() (x_train, y_train), (x_test, y_test) = cifar10.load_data() def predict(data): predictions = predictor.predict(data)['predictions'] return predictions batch_size = 128 predicted = [] actual = [] batches = 0 for data in datagen.flow(x_test,y_test,batch_size=batch_size): for i,prediction in enumerate(predict(data[0])): predicted.append(np.argmax(prediction)) actual.append(data[1][i][0]) batches += 1 if batches >= len(x_test) / batch_size: break from sklearn.metrics import accuracy_score, confusion_matrix accuracy = accuracy_score(y_pred=predicted,y_true=actual) display('Average accuracy: {}%'.format(round(accuracy*100,2))) %matplotlib inline import seaborn as sn import pandas as pd import matplotlib.pyplot as plt cm = confusion_matrix(y_pred=predicted,y_true=actual) cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] sn.set(rc={'figure.figsize':(11.7,8.27)}) sn.set(font_scale=1.4) sn.heatmap(cm, annot=True,annot_kws={"size": 10}) ###Output _____no_output_____ ###Markdown ヒートマップを使うことで、それぞれのラベル毎の正解率を計算することが出来ます。 エンドポイントを削除する余分なコストが発生しないように、検証が終わったら上記で作成したエンドポイントを削除しましょう。 ###Code sagemaker_session.delete_endpoint(predictor.endpoint) ###Output _____no_output_____
train_test_save_basic.ipynb
###Markdown Data Analysis ###Code data = pd.read_csv("train.csv",engine = 'python') data.tail() colum = data.columns for i in colum: print(f'{len(set(data[i]))} different values in the {i} column') print(f"\ntotal number of examples {len(data)}") #Host, link, Time(ET), Time(GMT),is of no use for trainig the function data = data.drop(["Host", "Link", "Date(ET)", "Time(ET)", "time(GMT)"], axis=1) colum = data.columns for i in colum: print(f'{len(set(data[i]))} different values in the {i} column') print(f"\ntotal number of examples {len(data)}") list(set(data["Source"])) # differnet values in "Source" column # repalcing FACEBOOK to Facebook data.replace(to_replace='FACEBOOK', value='Facebook',inplace=True) # Now there are only 4 different values in "Source" column Counter(data.loc[:,"Source"]) # # distribution of differnet values in "Source" column Counter(data.iloc[:,[-1]]['Patient_Tag']) # distribution of labels in the "Patien_Tag" column # It's an unbalanced data dummy = {} for i in list(set(data["Source"])): print(i,"---", Counter(data.iloc[:,[0,-1]][data['Source'] == i]['Patient_Tag'])) # distribution of labels with reference to each values in "Source" column replace_ = {} for index, i in enumerate(list(set(data["Source"])),start=1): replace_[index] = i data.replace(to_replace=i, value=index,inplace=True) data.fillna('UNK',inplace=True) list(set(data["Source"])) data.fillna('UNK',inplace=True) ###Output _____no_output_____ ###Markdown Vocab creation ###Code import re rep_with = ['.', '?', '/', '\n', '(', ')','[', ']', '{', '}', '-','"','!', '|' ] def rep_(sent): for i in rep_with: sent = sent.replace(i,' ').replace('$', ' ').replace(',','').replace("'",'') return sent import re import num2words def n2w(text): return re.sub(r"(\d+)", lambda x: num2words.num2words(int(x.group(0))), text) def preprocess(data,pos): sent = [] for i in range(len(data)): try:sent.append(n2w(rep_(data.iloc[i,pos]))) except:print(data.iloc[i,pos]) return sent sent = preprocess(data, 2) sent[0] from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer vectorizer = TfidfVectorizer() vectorizer.fit(sent) # summarize print("length of vocabulry --",len(vectorizer.vocabulary_)) # print(vectorizer_1.vocabulary_) # print(vectorizer_2.idf_) vector = vectorizer.transform(sent) ###Output _____no_output_____ ###Markdown Training ###Code x = np.array(vector.toarray()) # text m = np.array(data['Source']).reshape(1157,1) # source x = np.concatenate((m,x),axis=1) # source + text y = np.array(data.loc[:,'Patient_Tag']) # label x.shape, y.shape from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x,y, test_size=0.2) from sklearn.linear_model import LogisticRegression model = LogisticRegression(class_weight='balanced', C=1.5) model.fit(x_train, y_train) predictions = model.predict(x_test) score = model.score(x_test,y_test) score from sklearn import svm model = svm.SVC(class_weight='balanced', C=1.5) model.fit(x_train, y_train,) predictions = model.predict(x_test) score = model.score(x_test,y_test) score import xgboost as xgb data_dmatrix = xgb.DMatrix(data=x_train,label=y_train) xg_reg = xgb.XGBClassifier(objective ='reg:linear', colsample_bytree = 0.3, learning_rate = 0.05, max_depth = 100, alpha = 0.75, n_estimators = 100, n_jobs = 32) xg_reg.fit(x_train,y_train) predictions = xg_reg.predict(x_test) score = xg_reg.score(x_test,y_test) score params = {"objective":"reg:linear",'colsample_bytree': 0.3,'learning_rate': 0.1, 'max_depth': 5, 'alpha': 10} cv_results = xgb.cv(dtrain=data_dmatrix, params=params, nfold=3,num_boost_round=50, early_stopping_rounds=10,metrics="merror", as_pandas=True, seed=123) cv_results ###Output _____no_output_____ ###Markdown Results analysis ###Code import seaborn as sns from sklearn import metrics import matplotlib.pyplot as plt cm = metrics.confusion_matrix(y_test, predictions) plt.figure(figsize=(9,9)) sns.heatmap(cm, annot=True, fmt=".3f", linewidths=.5, square = True, cmap = 'Blues_r'); plt.ylabel('Actual label'); plt.xlabel('Predicted label'); all_sample_title = 'Accuracy Score: {0}'.format(score) plt.title(all_sample_title, size = 15); ###Output _____no_output_____ ###Markdown Testing ###Code data_ = pd.read_csv("test.csv",engine='python') data_ = data_.drop(["Host", "Link", "Date(ET)", "Time(ET)", "time(GMT)", "Unnamed: 9", "Index"], axis=1) data_.replace(to_replace='FACEBOOK', value='Facebook',inplace=True) replace_ = {} for index, i in enumerate(list(set(data_["Source"])),start=1): replace_[index] = i data_.replace(to_replace=i, value=index,inplace=True) data_.fillna('UNK',inplace=True) sent = preprocess(data_, 2) data_.head() vector = vectorizer.transform(sent) x = np.array(vector.toarray()) m = np.array(data_['Source']).reshape(571,1) # source x = np.concatenate((m,x),axis=1) # source + text # predictions = model.predict(x) predictions = xg_reg.predict(x) predictions.shape ###Output _____no_output_____ ###Markdown Saving ###Code format = 'Index,Patient_Tag\n' for index, i in enumerate(predictions,start=1): format+=str(index)+','+str(i) + '\n' with open("submission_7.csv", "w") as f: f.write(format.strip()) ###Output _____no_output_____
notebooks/2.0-rs-engineer_features.ipynb
###Markdown Build Features 1. Features on Center_id ###Code # Create a copy of the datasets to build features on train_df = train.copy() val_df = val.copy() test_df = test.copy() full_original_train_data_df = full_original_train_data.copy() full_original_test_data_df = full_original_test_data.copy() train_df.head(2) # Generate 1st derived feature --> Discount train_df['discount'] = train_df['base_price'] - train_df['checkout_price'] val_df['discount'] = val_df['base_price'] - val_df['checkout_price'] test_df['discount'] = test_df['base_price'] - test_df['checkout_price'] full_original_train_data_df['discount'] = full_original_train_data_df['base_price'] - full_original_train_data_df['checkout_price'] full_original_test_data_df['discount'] = full_original_test_data_df['base_price'] - full_original_test_data_df['checkout_price'] # Compute the mean values of Checkout Price (CP) and discount (D) for all meal_ids within a week--center_id combination train_df_1 = build_features.features_by_center(train_df) val_df_1 = build_features.features_by_center(val_df) test_df_1 = build_features.features_by_center(test_df) full_original_train_data_df_1 = build_features.features_by_center(full_original_train_data_df) full_original_test_data_df_1 = build_features.features_by_center(full_original_test_data_df) print(train_df_1.shape) print(val_df_1.shape) print(test_df_1.shape) print(full_original_train_data_df_1.shape) print(full_original_test_data_df_1.shape) # Compute the total meals for each week--center_id combination train_df_2 = build_features.total_meals_by_center(train_df_1) val_df_2 = build_features.total_meals_by_center(val_df_1) test_df_2 = build_features.total_meals_by_center(test_df_1) full_original_train_data_df_2 = build_features.total_meals_by_center(full_original_train_data_df_1) full_original_test_data_df_2 = build_features.total_meals_by_center(full_original_test_data_df_1) print(train_df_2.shape) print(val_df_2.shape) print(test_df_2.shape) print(full_original_train_data_df_2.shape) print(full_original_test_data_df_2.shape) ###Output (407243, 53) (32929, 53) (16376, 53) (456548, 53) (32573, 52) ###Markdown Features on EP and HF ###Code # Find the number of meal_ids by category and cuisine that were featured on Homepage and number of meal_ids # that were promoted by emailers train_df_3 = build_features.features_by_ep_or_hf(train_df_2) val_df_3 = build_features.features_by_ep_or_hf(val_df_2) test_df_3 = build_features.features_by_ep_or_hf(test_df_2) full_original_train_data_df_3 = build_features.features_by_ep_or_hf(full_original_train_data_df_2) full_original_test_data_df_3 = build_features.features_by_ep_or_hf(full_original_test_data_df_2) print(train_df_3.shape) print(val_df_3.shape) print(test_df_3.shape) print(full_original_train_data_df_3.shape) print(full_original_test_data_df_3.shape) ###Output (407243, 57) (32929, 57) (16376, 57) (456548, 57) (32573, 56) ###Markdown Features on City and Region ###Code # Compute total and mean operating area for each region and city and # ratio of center op area to total region op area and city op area train_df_4 = build_features.features_by_city_or_region(train_df_3) val_df_4 = build_features.features_by_city_or_region(val_df_3) test_df_4 = build_features.features_by_city_or_region(test_df_3) full_original_train_data_df_4 = build_features.features_by_city_or_region(full_original_train_data_df_3) full_original_test_data_df_4 = build_features.features_by_city_or_region(full_original_test_data_df_3) print(train_df_4.shape) print(val_df_4.shape) print(test_df_4.shape) print(full_original_train_data_df_4.shape) print(full_original_test_data_df_4.shape) ###Output (407243, 61) (32929, 61) (16376, 61) (456548, 61) (32573, 60) ###Markdown Temporal Features ###Code # Check if a meal--center combination was promoted by email or featured on homepage last week # or the week before and the cumulative sum of all previous promotions and features train_df_5 = build_features.temporal_features_set_1(train_df_4) val_df_5 = build_features.temporal_features_set_1(val_df_4) test_df_5 = build_features.temporal_features_set_1(test_df_4) full_original_train_data_df_5 = build_features.temporal_features_set_1(full_original_train_data_df_4) full_original_test_data_df_5 = build_features.temporal_features_set_1(full_original_test_data_df_4) print(train_df_5.shape) print(val_df_5.shape) print(test_df_5.shape) print(full_original_train_data_df_5.shape) print(full_original_test_data_df_5.shape) # Compute last week checkout price and last week discount of each meal--center combination # and check if current set of checkout price and discount is greater than last week's train_df_6 = build_features.temporal_features_set_2(train_df_5) val_df_6 = build_features.temporal_features_set_2(val_df_5) test_df_6 = build_features.temporal_features_set_2(test_df_5) full_original_train_data_df_6 = build_features.temporal_features_set_2(full_original_train_data_df_5) full_original_test_data_df_6 = build_features.temporal_features_set_2(full_original_test_data_df_5) print(train_df_6.shape) print(val_df_6.shape) print(test_df_6.shape) print(full_original_train_data_df_6.shape) print(full_original_test_data_df_6.shape) # Create Label Encoder features for different set of cuisine and categories train_df_7 = build_features.features_by_cui_or_cat(train_df_6) val_df_7 = build_features.features_by_cui_or_cat(val_df_6) test_df_7 = build_features.features_by_cui_or_cat(test_df_6) full_original_train_data_df_7 = build_features.features_by_cui_or_cat(full_original_train_data_df_6) full_original_test_data_df_7 = build_features.features_by_cui_or_cat(full_original_test_data_df_6) print(train_df_7.shape) print(val_df_7.shape) print(test_df_7.shape) print(full_original_train_data_df_7.shape) print(full_original_test_data_df_7.shape) # Create a separate dataframe for features train_features = train_df_7.drop(['num_orders'], axis=1).copy() val_features = val_df_7.drop(['num_orders'], axis=1).copy() test_features = test_df_7.drop(['num_orders'], axis=1).copy() full_original_train_features = full_original_train_data_df_7.drop(['num_orders'], axis=1).copy() full_original_test_features = full_original_test_data_df_7.copy() # Create a separate dataframe for targets train_target = train_df_7['num_orders'].copy() val_target = val_df_7['num_orders'].copy() test_target = test_df_7['num_orders'].copy() full_original_train_target = full_original_train_data_df_7['num_orders'].copy() # Save the features dataframe to disk train_features.to_csv(r'../data/processed/built_features/train_features.csv', index=False) val_features.to_csv(r'../data/processed/built_features/val_features.csv', index=False) test_features.to_csv(r'../data/processed/built_features/test_features.csv', index=False) full_original_train_features.to_csv(r'../data/processed/built_features/full_original_train_features.csv', index=False) full_original_test_features.to_csv(r'../data/processed/built_features/full_original_test_features.csv', index=False) # Save the target dataframe to disk train_target.to_csv(r'../data/processed/target/train_target.csv', index=False) val_target.to_csv(r'../data/processed/target/val_target.csv', index=False) test_target.to_csv(r'../data/processed/target/test_target.csv', index=False) full_original_train_target.to_csv(r'../data/processed/target/full_original_train_target.csv', index=False) ###Output _____no_output_____
test/02_adjust_data.ipynb
###Markdown DAEE ###Code #!pip3 install traquitanas --upgrade import os import shutil import glob import numpy as np import pandas as pd import geopandas as gpd from datetime import datetime from shapely.geometry import Point from traquitanas import utils from paths import * ###Output _____no_output_____ ###Markdown Dados Tabulares Read Data ###Code latest_file = max(glob.glob(f'{bruto_path}/*', recursive=True), key=os.path.getmtime) latest_file newestFileCreationDate = datetime.utcfromtimestamp(os.path.getctime(latest_file)) newestFileCreationDate # my_file = os.path.join(latest_file) # ddd enc = utils.predict_encoding(my_file, n_lines=1000) enc df = pd.read_csv( os.path.join(latest_file), sep=';', skiprows=1, #encoding=enc, encoding='latin1', dtype={ # Identificação #'QTD': 'quantidade', #'COD BACIA': 'cod_bacia', 'NOME RIO/AQUIFERO': 'object', 'COD RIO/POCO(DAEE)': 'object', 'DIST_FOZ(Km)': 'object', # Administrativo #'USUARIO': 'usuario', #'AUTOS(DAEE)': 'autos_daee', #'USO': 'uso', #'SEQ': 'seq', #'FINALID_USO': 'finalidade_uso', #'SITUAC_ADMIN(DAEE)': 'situacao_administrativa', # Vazão #'VAZAO(M3/H)': 'vazao_m3h', #'HORA/DIA': 'hora_dia', #'DIA/MES': 'dia_mes', #'MES/ANO': 'mes_ano', # Coordenada 'UTM_NORTE(Km)': 'object', 'UTM_LESTE(Km)': 'object', 'UTM_MC': 'object', } ) df.info() df.head() ###Output _____no_output_____ ###Markdown Rename Columns ###Code # Colunas Originiais #display(list(df.columns)) # Renomear Colunas dict_columns = { # Identificação 'QTD': 'quantidade', 'COD BACIA': 'cod_bacia', 'NOME RIO/AQUIFERO': 'nome_rio_aquifero', 'COD RIO/POCO(DAEE)': 'cod_rio_poco', 'DIST_FOZ(Km)': 'dist_foz_km', # Administrativo 'USUARIO': 'usuario', 'AUTOS(DAEE)': 'autos_daee', 'USO': 'uso', 'SEQ': 'seq', 'FINALID_USO': 'finalidade_uso', 'SITUAC_ADMIN(DAEE)': 'situacao_administrativa', # Vazão 'VAZAO(M3/H)': 'vazao_m3h', 'HORA/DIA': 'hora_dia', 'DIA/MES': 'dia_mes', 'MES/ANO': 'mes_ano', # Coordenada 'UTM_NORTE(Km)': 'utm_norte_km', 'UTM_LESTE(Km)': 'utm_leste_km', 'UTM_MC': 'utm_mc', } # Rename Columns df.rename(columns=dict_columns, inplace=True, errors='ignore') df ###Output _____no_output_____ ###Markdown Fix Columns ###Code # Drop Columns df.drop('quantidade', axis=1, inplace=True, errors='ignore') # Distância da Foz df['dist_foz_km'] = df['dist_foz_km'].str.replace(',', '').astype(float) df['dist_foz_km'] = pd.to_numeric(df['dist_foz_km']) # Vazão df['vazao_m3h'] = df['vazao_m3h'].str.replace(',', '').astype(float) df['vazao_m3h'] = pd.to_numeric(df['vazao_m3h']) # UTM Norte df['utm_norte_km'] = df['utm_norte_km'].str.replace(',', '').astype(float) df['utm_norte_km'] = pd.to_numeric(df['utm_norte_km']) df['utm_norte_m'] = df['utm_norte_km'] * 1000 df['utm_norte_m'] = df['utm_norte_m'].astype(int) # UTM Leste df['utm_leste_km'] = df['utm_leste_km'].str.replace(',', '').astype(float) df['utm_leste_km'] = pd.to_numeric(df['utm_leste_km']) df['utm_leste_m'] = df['utm_leste_km'] * 1000 df['utm_leste_m'] = df['utm_leste_m'].astype(int) # UTM MC df['utm_mc'] = df['utm_mc'].str.replace(',', '').astype(float) df['utm_mc'] = pd.to_numeric(df['utm_mc']) df['utm_mc'] = df['utm_mc'].astype(int) # Results df # Se o dtype for object, strip for col in df.columns: if df[col].dtype == 'O': df[col] = df[col].str.strip() else: pass print('>>> {}'.format(col)) print(list(df[col][0:10])) print(' ') df.info() df.head() # Lista Colunas list_columns = list(df.columns) display(list_columns) # Lista Colunas # list_columns = [ # 'cod_bacia', #'nome_rio_aquifero', #'cod_rio_poco', #'dist_foz_km', #'usuario', #'autos_daee', #'uso', #'seq', #'finalidade_uso', #'situacao_administrativa', #'vazao_m3h', #'hora_dia', #'dia_mes', #'mes_ano', #'utm_norte_km', #'utm_leste_km', #'utm_mc', #'utm_norte_m', #'utm_leste_m' # ] # Lista Colunas #for col in df.columns: display(set(df[col])) ###Output _____no_output_____ ###Markdown Erros ###Code # Erro: Horas por Dia maior que 24 mask = df['hora_dia'] <= 24 mask =~ mask df.loc[mask, 'erro_data'] = 'Erro na hora/dia/mês/ano' # Erro: Dias por mês maior que 31 mask = df['dia_mes'] <= 31 mask =~ mask df.loc[mask, 'erro_data'] = 'Erro na hora/dia/mês/ano' # Erro: Mês por Ano 12 mask = df['mes_ano'] <= 12 mask =~ mask df.loc[mask, 'erro_data'] = 'Erro na hora/dia/mês/ano' # Erro: Coordenadas sem definição mask = df['utm_leste_km'] == 0 df.loc[mask, 'erro_coordenada'] = 'Erro na coordenada' # Erro: Coordenadas sem definição mask = df['utm_norte_km'] == 0 df.loc[mask, 'erro_coordenada'] = 'Erro na coordenada' # Erro: Coordenadas sem definição de Meridicano Central mask = (df['utm_mc'] == 45) | (df['utm_mc'] == 51) mask =~ mask df.loc[mask, 'erro_coordenada'] = 'Erro na coordenada' # Results df ###Output _____no_output_____ ###Markdown Export ###Code df.to_csv( os.path.join(output_path_tab, 'tab_daee.csv'), index=False, ) ###Output _____no_output_____
PythonJupyterNotebooks/Week10-Day3-Activity3.ipynb
###Markdown Linear Regression Error and Accuracy Metrics ###Code # Import libraries and dependencies import pandas as pd from sklearn.linear_model import LinearRegression import matplotlib.pyplot as plt from pathlib import Path %matplotlib inline # Read CSV, parse dates, and set index csvpath = Path('Resources/Week10-Day3-Activity3-weather_data_austin_2010.csv') df = pd.read_csv(csvpath, parse_dates=True, index_col='Date') df.head() # Slice data df = df.loc['2010 May':'2010 Jul'] df.head() # Plot temperature df['Temperature'].plot() ###Output _____no_output_____ ###Markdown Set Features (X) and Target (y)In this example, we want to use the week of the year to predict the temperature ###Code # Create a DataFrame of the input features (X) # Export the Temperature column as a DataFrame X = df['Temperature'].to_frame() print(f"X has {X.shape[0]} rows and {X.shape[1]} column of data.") X.head() # Generate a column with the week of the year X['Week_of_Year'] = X.index.weekofyear X.head() # Binary encode the weekly column to use as new input features for the model X_binary_encoded = pd.get_dummies(X, columns=['Week_of_Year']) X_binary_encoded.head() # The target Temperature column must be deleted from the input features X_binary_encoded = X_binary_encoded.drop('Temperature', axis=1) X_binary_encoded.head() # The target for our model is to predict the Temperature y = df['Temperature'].copy() ###Output _____no_output_____ ###Markdown Linear Regression ###Code # Create the Linear Regression model model = LinearRegression() # Fit the model to the data (Train the model) model.fit(X_binary_encoded, y) # Predict the temperature using the week of the year as inputs predictions = model.predict(X_binary_encoded) ###Output _____no_output_____ ###Markdown Evaluating the Model ###Code import numpy as np from sklearn.metrics import mean_squared_error, r2_score # The default scoring method is the R2 score score = model.score(X_binary_encoded, y) r2 = r2_score(y, predictions) print(f"Score: {score}, r2: {r2}") # Mean Squared Error mse = mean_squared_error(y, predictions) mse # Root Mean Squared Error rmse = np.sqrt(mse) rmse # Standard deviation of the temperature np.std(y) ###Output _____no_output_____ ###Markdown Plotting the Regression Line ###Code # Plot the regression line plt.scatter(X['Week_of_Year'], y) plt.plot(X['Week_of_Year'], predictions, color='red') ###Output /Users/satishsurath/opt/anaconda3/envs/pyvizenv/lib/python3.7/site-packages/matplotlib/cbook/__init__.py:1377: FutureWarning: Support for multi-dimensional indexing (e.g. `obj[:, None]`) is deprecated and will be removed in a future version. Convert to a numpy array before indexing instead. x[:, None] /Users/satishsurath/opt/anaconda3/envs/pyvizenv/lib/python3.7/site-packages/matplotlib/axes/_base.py:237: FutureWarning: Support for multi-dimensional indexing (e.g. `obj[:, None]`) is deprecated and will be removed in a future version. Convert to a numpy array before indexing instead. x = x[:, np.newaxis]
Solving QC/Vector Operations.ipynb
###Markdown **Vector Operations** ###Code from numpy import array vector1 = array([1, 2, 3]) vector2 = array([2, 3, 4]) print("dot product of", vector1,", ",vector2,"is") product = vector1.dot(vector2) print(product) scalarval = 0.3 scalarprod = scalarval*vector1 print("scalar multiplied",scalarval,",",vector1,"is") print(scalarprod) print("sum of", vector1,", ",vector2,"is") sum = vector1 + vector2 print(sum) print("difference of", vector1,", ",vector2,"is") diff = vector1 - vector2 print(diff) print("product of", vector1,", ",vector2,"is") prod = vector1 * vector2 print(prod) print("divison of", vector1,", ",vector2,"is") dividedby = vector1 / vector2 print(dividedby) ###Output divison of [1 2 3] , [2 3 4] is [0.5 0.66666667 0.75 ]
notebooks/05-00 Statistics/UCSL_Optimization.ipynb
###Markdown Optimization problem:$$F(x)\to min(max)$$$$x^*=argmin(max)_X F(x)$$Common family of iterative optimization methods is known as "gradient descent":$$x^{j+1}=x^j-(+)\lambda_j \triangledown F(x^j),$$where $\nabla F(x)$ denotes a gradient vector of $F$ in the point $x$, while $\lambda_j$ are certain real positive numbers picked up with respect to $F(x^{j+1}))F(x^j)$. Optimization example. Consider several points on the map and look for a such a centroid location that sum of distances from it to the given points is minimal:$$\sum\limits_i \sqrt{(x_i-x^*)^2+(y_i-y^*)^2}\to min$$ ###Code #import all the required libraries import numpy as np from numpy import random from scipy import stats import matplotlib.pyplot as plt %pylab inline random.seed(2015) #fix an initial point of the random generator #create 20 random locations with uniformly distributed coordinates between 0 and 10 x=stats.uniform.rvs(0,10,20) y=stats.uniform.rvs(0,10,20) #plot the points plt.figure() plt.plot(x,y,'go') plt.xlim(0,10) plt.ylim(0,10) from scipy.optimize import minimize def f(xy): #function computing cumulative distance from a considered centroid point to all the given ones return sum(np.sqrt((np.square(x-xy[0])+np.square(y-xy[1])))) def f_der(xy): #gradient of the function above with respect to the coordinates x,y of the considered point f_=np.sqrt((np.square(x-xy[0])+np.square(y-xy[1]))) return -np.array([sum((x-xy[0])/f_),sum((y-xy[1])/f_)]) #plot the given points plt.figure() plt.plot(x,y,'go') plt.xlim(0,10) plt.ylim(0,10) #set up initial centroid location prevpoint=[0, 1]; def visiter(params): #visualize current iteration results connecting previous centroid location to the new one global prevpoint plt.plot([prevpoint[0],params[0]],[prevpoint[1],params[1]],'b:o') plt.plot(params[0],params[1],'ro') prevpoint=params print(params) #find an optimal centroid location minimizing the cumulative distance to the given points (f) using the gradient (f_der) #with respect to given accuracy gtol, visualizing results of each iteration minimize(f, prevpoint, method='BFGS', jac=f_der, options={'gtol': 1e-4, 'disp': True}, callback=visiter) ###Output [ 3.80114465 5.36184751] [ 3.76705774 5.93304754] [ 4.03379298 5.92249482] [ 4.03495211 5.92641014] [ 4.034886 5.92639866] Optimization terminated successfully. Current function value: 74.264646 Iterations: 5 Function evaluations: 9 Gradient evaluations: 9
caption/notebooks/Concept Detection - Logistic Regression.ipynb
###Markdown ImageCLEF 2018 concept detector - logistic regressionThis notebook performs multi-label classification of biomedical concepts with logistic regression. The feature sets, built separately, are loaded from HDF5 files.You may read more about this approach in our working notes:> Eduardo Pinho and Carlos Costa. _Feature Learning with Adversarial Networks for Concept Detection in Medical Images: UA.PT Bioinformatics at ImageCLEF 2018_, CLEF working notes, CEUR, 2018. Instructions of use1. Run preamble cells below.2. Pick an existing representation kind, run the respective data set loading and training bundle harness creation cells.3. Choose the number of epochs to train, run respective cell.4. View the results with the following cell, go to step 3 at will to keep on training.5. When done, print the test set predictions in the following cell. HDF5 data formatAll feature files must contain these two datasets:- `/data`: (N, D), 32-bit float containing the feature vectors- `/id`: (N,), variably-lengthed UTF-8 string containing the image ID (the file name without the extension) ###Code import json import random import time import matplotlib.pyplot as plt import numpy as np import tensorflow as tf from util import * from lin import * %matplotlib inline tf.logging.set_verbosity(tf.logging.INFO) ###Output _____no_output_____ ###Markdown Read concept list (in frequency order)The following cell creates a list of concepts and their counts in descending order of frequency. This allows us to focus on classifying more balanced labels (they are generally very sparse). ###Code with open("./vocabulary.csv", encoding="utf-8") as file: CONCEPT_LIST = [] CONCEPT_COUNT = [] for x in file: parts = x.strip().split('\t') CONCEPT_LIST.append(parts[0]) CONCEPT_COUNT.append(int(parts[1])) CONCEPT_COUNT = np.array(CONCEPT_COUNT) CONCEPT_MAP = {cname: v for (v, cname) in enumerate(CONCEPT_LIST)} print("Number of concepts:", len(CONCEPT_MAP)) ###Output _____no_output_____ ###Markdown Read ground truthPlease **add the concept list file** to this directory, or modify the file path below. ###Code labels_all = build_labels('./ConceptDetectionTraining2018-Concepts.csv', CONCEPT_MAP) ###Output _____no_output_____ ###Markdown Label statisticsThe constants below are specific to the ImageCLEF 2018 caption task. ###Code N_SAMPLES = 223859 N_TESTING_SAMPLES = 9938 N_LABELED_SAMPLES = N_SAMPLES - len(labels_all) print("{} items in full data set without labels ({:.4}% of set)".format( N_LABELED_SAMPLES, N_LABELED_SAMPLES * 100.0 / N_SAMPLES)) N_AVERAGE_LABELS = np.mean([len(c) for c in labels_all.values()]) print("Each labeled item contains {} labels on average".format(N_AVERAGE_LABELS)) ###Output _____no_output_____ ###Markdown Make train-val split partitionIn order to obtain some feedback on the training process, the training set is split into two parts. In this code, 10% of the data set was separated for tuning the classifiers. ###Code N_VALIDATION = N_SAMPLES // 10 N_TRAINING_SAMPLES = N_SAMPLES - N_VALIDATION RANDOM_SEED = 63359405 print("Using {} validation samples (out of {})".format(N_VALIDATION, N_SAMPLES)) random.seed(RANDOM_SEED) all_indices = list(range(N_SAMPLES)) val_indices = random.sample(all_indices, k=N_VALIDATION) train_indices = np.delete(all_indices, val_indices) assert len(train_indices) + len(val_indices) == N_SAMPLES ###Output _____no_output_____ ###Markdown Evaluation with Logistic RegressionThe following constants may be adjusted to select which concepts to classify, starting from the most frequent ones. ###Code N_TRAIN = 500 # just these most frequent features N_TRAIN_OFFSET = 0 # skip these most frequent features first # -------------- AUTOMATICALLY CALCULATED, DO NOT MODIFY -------------- CONCEPTS_TO_TRAIN = CONCEPT_LIST[N_TRAIN_OFFSET:N_TRAIN_OFFSET + N_TRAIN] # calculate the probability of each concept (based on its frequency in the training set) CONCEPTS_PROB = CONCEPT_COUNT[N_TRAIN_OFFSET:N_TRAIN_OFFSET + N_TRAIN] / N_SAMPLES ###Output _____no_output_____ ###Markdown Operating point thresholdsChoose a list of operating point thresholds to consider in the fine-tuning process. A threshold of 0.5 maximizes accuracy, but is not very useful in this context, since the concepts are very sparse and infrequent. On the other hand, excessively low thresholds will yield too many concepts, decreasing precision. By defining multiple thresholds, we are searching for the one that will maximize the $F_1$ score. ###Code thresholds = [0.06, 0.0625, 0.07, 0.075, 0.08, 0.1, 0.125, 0.15, 0.175] ###Output _____no_output_____ ###Markdown Bags of Colors The following code uses features based on an implementation of bags of colors. Please see [this repository](https://github.com/Enet4/bag-of-colors-nb) for the implementation. It was only written after the 2018 challenge.The following cell loads the training set, splits it, and loads the testing set. Please make sure that you have both the train and testing feature files. If they have a different name, feel free to change them below. ###Code boc_dset = Datasets.from_h5_files_partition( './bocs-256-train.h5', train_indices, './bocs-256-test.h5', labels_all, CONCEPTS_TO_TRAIN, N_TRAIN_OFFSET, normalizer_fn=max_normalize) ###Output _____no_output_____ ###Markdown The following code creates a model for logistic regression and respective estimator. ###Code model_fn = build_model_fn( n_classes=N_TRAIN, x_shape=[boc_dset.train_x.shape[1]], learning_rate=0.05, thresholds=thresholds ) boc_estimator = tf.estimator.Estimator(model_fn=model_fn, config=get_config('boc')) boc_bundle = TrainBundle() train_and_eval_boc = build_train_and_eval_function( boc_estimator, boc_bundle, boc_dset, thresholds, CONCEPTS_TO_TRAIN) ###Output _____no_output_____ ###Markdown The next cell performs the actual training, evaluation, and test predictions. It can be run multiple times. Consider trying a small number of epochs as the argument and running the cell multiple times to see the outcomes earlier. ###Code boc_f1, boc_test_predictions = train_and_eval_boc(10) ###Output _____no_output_____ ###Markdown The following cell shows the progression of $F_1$ scores with training. ###Code show_eval(boc_bundle, thresholds, name="boc") print("Best F1:", boc_f1) ###Output _____no_output_____ ###Markdown Finally, the submission file can be built with the following cell. ###Code # write predictions to file print_predictions(boc_test_predictions, boc_bundle.all_metrics, key="lin-boc-{}-o{}".format(N_TRAIN, N_TRAIN_OFFSET)) ###Output _____no_output_____ ###Markdown This pipeline replicates itself below for other kinds of visual features. Adversarial Auto-EncoderPlease see [imageclef-aae](https://github.com/bioinformatics-ua/imageclef-toolkit/tree/master/caption/imageclef-aae) to train an adversarial auto-encoder. ###Code aae_dset = Datasets.from_pair_files_partition( './aae-features-train.h5', './aae-list-train.txt', train_indices, './aae-features-test.h5', './aae-list-test.txt', labels_all, CONCEPTS_TO_TRAIN, offset=N_TRAIN_OFFSET ) model_fn = build_model_fn( n_classes=N_TRAIN, x_shape=[aae_val_x.shape[1]], learning_rate=0.05, thresholds=thresholds ) aae_estimator = tf.estimator.Estimator(model_fn=model_fn, config=get_config('aae')) aae_bundle = TrainBundle() train_and_eval_aae = build_train_and_eval_function( aae_estimator, aae_bundle, aae_dset, thresholds, CONCEPTS_TO_TRAIN) aae_f1, aae_test_predictions = train_and_eval_aae(5) show_eval(aae_bundle, thresholds, name="aae") print("Best F1:", aae_f1) # write predictions to file print_predictions(aae_test_predictions, aae_bundle.all_metrics, key="aae-{}-o{}".format(N_TRAIN, N_TRAIN_OFFSET)) ###Output _____no_output_____ ###Markdown Flipped-Adversarial Auto-EncoderPlease see [imageclef-aae](https://github.com/bioinformatics-ua/imageclef-toolkit/tree/master/caption/imageclef-aae) to train a flipped-adversarial auto-encoder. ###Code faae_dset = Datasets.from_h5_files_partition( './faae-features-train.h5', train_indices, './aae-features-test.h5', labels_all, CONCEPTS_TO_TRAIN, offset=N_TRAIN_OFFSET ) model_fn = build_model_fn( n_classes=N_TRAIN, x_shape=[faae_dset.train_x.shape[1]], learning_rate=0.05, thresholds=thresholds ) faae_estimator = tf.estimator.Estimator(model_fn=model_fn, config=get_config('faae')) faae_bundle = TrainBundle() train_and_eval_faae = build_train_and_eval_function( faae_estimator, faae_bundle, faae_dset, thresholds, CONCEPTS_TO_TRAIN) faae_f1, faae_test_predictions = train_and_eval_faae(5) show_eval(aae_bundle, thresholds, name="faae") print("Best F1:", faae_f1) # write predictions to file print_predictions(faae_test_predictions, faae_bundle.all_metrics, key="faae-{}-o{}".format(N_TRAIN, N_TRAIN_OFFSET)) ###Output _____no_output_____
SVM/svm_demo_iris_AIML_TA_session_Oct30.ipynb
###Markdown ###Code import numpy as np #for the following statement to compile successfully, you need the scikit-learn package. from sklearn.datasets import load_iris #for loading iris dataset from sklearn.svm import LinearSVC #linear svm from scikit learn np.random.seed(1000) #for reproducibility iris = load_iris() #check the shape of iris data features print('iris data shape:', iris.data.shape) A = iris.data #check the shape of iris target labels print('iris target shape:', iris.target.shape) #We can print first 5 samples of iris data and check print('Features of first five samples of iris data:') print(A[0:5]) #How many labels does iris data have? #print the unique classes print('unique classes: ',np.unique(iris.target)) n = iris.data.shape[0] #Number of data points or samples d = iris.data.shape[1] #Dimension of data points #In the following code, we create a nx1 vector of target labels y = 1.0*np.ones([A.shape[0],1]) for i in range(iris.target.shape[0]): if iris.target[i] == 1: y[i] = 1 else: y[i] = -1 #Create an index array indexarr = np.arange(n) #index array np.random.shuffle(indexarr) #shuffle the indices #print('shuffled indices of samples:') #print(indexarr) #Use the samples corresponding to first 80% of indexarr for training num_train = int(0.8*n) #Use the remaining 20% samples for testing num_test = n-num_train print('num_train: ',num_train, 'num_test: ', num_test) #Use the first 80% of indexarr to create the train data features and train labels train_features = A[indexarr[0:num_train]] train_label = y[indexarr[0:num_train]] print('shape of train data features:') print(train_features.shape) print('shape of train data labels') print(train_label.shape) #Use remaining 20% of indexarr to create the test data and test labels test_features = A[indexarr[num_train:n]] test_label = y[indexarr[num_train:n]] print('shape of test data features:') print(test_features.shape) print('shape of test data labels') print(test_label.shape) clf = LinearSVC(random_state=0, tol=1e-5) #max_iter=10000 #Get the trained model in clf_model train_label = np.reshape(train_label,(train_label.shape[0],)) clf_model = clf.fit(train_features,train_label) #We will now use the trained model for predition on the test data set predicted_labels = clf_model.predict(test_features) #reshaping predicted_labels so that it is of the same shape as test_label predicted_labels = np.reshape(predicted_labels.data,(num_test,1)) #If you wish to see how predicted_labels and test_label look, you can print them and check #print(predicted_labels) #print(test_label) #compute test set error and test set accuracy test_error = np.sum(0.5*np.abs(predicted_labels-test_label))/len(test_label)*100.0#Try to understand this equation test_accuracy = 100.0-test_error #print the test set accuracy print('test set accuracy:', test_accuracy) #We can also use the trained model clf_model to predict the train set predicted_train_labels = clf_model.predict(train_features) #reshaping predicted_train_labels so that it is of the same shape as train_label predicted_train_labels = np.reshape(predicted_train_labels.data,(num_train,1)) #compute train set error and train set accuracy train_label = np.reshape(train_label,(num_train,1)) train_error = np.sum(0.5*np.abs(predicted_train_labels-train_label))/len(train_label)*100.0 train_accuracy = 100.0-train_error #print the train set accuracy print('train set accuracy:', train_accuracy) from sklearn.metrics import confusion_matrix cm_train = confusion_matrix(train_label, predicted_train_labels) print("Confusion Matrix (train set):") print(cm_train) cm_test = confusion_matrix(test_label, predicted_labels) print("Confusion Matrix (test set):") print(cm_test) #Plotting the confusion matrix for better interpretation import seaborn as sns import matplotlib.pyplot as plt fig, axes = plt.subplots(ncols=2, figsize=(6, 3)) ax1, ax2 = axes sns.heatmap(cm_train, annot=True, fmt='g', ax=ax1) sns.heatmap(cm_test, annot=True, fmt='g', ax=ax2) ax1.set_title('Train Data') ax1.xaxis.set_ticklabels(['-1', '1']) ax1.yaxis.set_ticklabels(['-1', '1']) ax2.set_title('Test Data') ax2.xaxis.set_ticklabels(['-1', '1']) ax2.yaxis.set_ticklabels(['-1', '1']) fig.suptitle('Confusion Matrix') from sklearn.metrics import classification_report train_cr = classification_report(train_label, predicted_train_labels) print("Train Report:") print(train_cr) test_cr = classification_report(test_label, predicted_labels) print("Test Report:") print(test_cr) #Note that Iris data has three labels 0, 1 and 2. In the code above, #we have considered label 1 as class 1 and other two labels as class -1. #Exercise 1: #Now, modify the code so that label 0 is considered as class 1 and #other labels are considered as class -1. #Consider the same setup as used in the code above, #where we take first 80% as train data, and remaining 20% as test data #Train the svm using only the train features and labels. #Find the test set accuracy and train set accuracy and report them. #Exercise 2: #Modify the code so that label 2 is considered as class 1 and #other labels are considered as class -1. #Consider the same setup as used in the code above, #where we take first 80% as train data, and remaining 20% as test data #Train the decision tree using only the train features and labels. #Find the test accuracy and train accuracy and report them. ###Output _____no_output_____
module4-gradient-boosting/module4-model_iterpretation.ipynb
###Markdown "age";"job";"marital";"education";"default";"housing";"loan";"contact";"month";"day_of_week";"duration";"campaign";"pdays";"previous";"poutcome";"emp.var.rate";"cons.price.idx";"cons.conf.idx";"euribor3m";"nr.employed";"y![image.png](attachment:image.png) ###Code import pandas as pd pd.set_option('display.max_rows', 5000) pd.set_option('display.max_columns', 5000) from datetime import datetime import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline from sklearn.tree import DecisionTreeClassifier from sklearn.impute import SimpleImputer imputer = SimpleImputer() import numpy as np import category_encoders as ce from sklearn.pipeline import Pipeline from sklearn.pipeline import make_pipeline from ipywidgets import interact from sklearn.tree import DecisionTreeClassifier from sklearn.linear_model import LogisticRegression from mlxtend.plotting import plot_decision_regions from sklearn.tree import DecisionTreeClassifier from sklearn.linear_model import LogisticRegression from mlxtend.plotting import plot_decision_regions from sklearn.model_selection import cross_val_score, cross_val_predict from sklearn.ensemble import RandomForestClassifier, VotingClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.model_selection import GridSearchCV from sklearn.decomposition import PCA from xgboost import XGBClassifier from sklearn.model_selection import train_test_split from sklearn.metrics import roc_auc_score data_dir = '~/lambda/DS-Unit-4-Sprint-1-Tree-Ensembles/module1-decision-trees/' emps = ['nurse', 'doctor', 'lead', 'plumber', 'coal', 'accountant', 'attorney'] manager = ['manager','vp', 'president', 'vice president''director', 'executive', 'superintendent', 'captain'] it = ['tech','computers', 'it', 'data', 'progammer', 'analyst'] df = pd.read_csv(data_dir + 'train_features.csv') df.head() earliest_cr_line = df.earliest_cr_line.apply(lambda d: datetime.strptime(d,'%b-%Y')) df_earliest_cr_line_min = earliest_cr_line.min() means = {} c = 0.5 subgrade_means = {'A': c, 'B': c, 'C': c, 'D': c, 'E': c, 'F': c, 'G': c} count_null_subgrade_and_grade = 0 def wrangle(X, make_means=False): X = X.copy() # Drop some columns X = X.drop(columns='id') # id is random X = X.drop(columns=['member_id', 'url', 'desc']) # All null X = X.drop(columns='title') # Duplicative of purpose if make_means: for i in range(65,72): c = chr(i) subgrade_means[c] = X[X.grade == c].sub_grade.apply(lambda s: float(s[1])).mean() / 10 print(subgrade_means) # Transform sub_grade from "A1" - "G5" to 1.1 - 7.5 def wrangle_sub_grade(o): x = o[1] grade = o[0] if isinstance(x, float): if isinstance(grade, float): count_null_subgrade_and_grade += 1 return float(3) return float(ord(grade[0]) - 64) + subgrade_means[grade[0]] first_digit = ord(x[0]) - 64 second_digit = int(x[1]) return first_digit + second_digit/10 # X['sub_grade'] = X['sub_grade'].apply(wrangle_sub_grade) X['revol_util'] = X['revol_util'].str.strip('%').astype(float) X.sec_app_earliest_cr_line.fillna(False, inplace=True) X.earliest_cr_line = X.earliest_cr_line.apply( lambda d: datetime.strptime(d, '%b-%Y')) X.earliest_cr_line = (X.earliest_cr_line - df_earliest_cr_line_min) / np.timedelta64(1, 'D') X.term = X.term.apply(lambda t: int(t[1:3])) X['int_rates'] = X.int_rate.apply(lambda r: float(r[:-1])) X.drop(['int_rate'], axis=1, inplace=True) X['ngrade'] = X[['grade', 'sub_grade']].apply(wrangle_sub_grade, axis=1) X.drop(['grade', 'sub_grade'], axis=1, inplace=True) def el(e): if isinstance(e, float) or e[0] == '<': return 0 return int(e[0:(2 if e[1] == '0' else 1)]) X.emp_length = X.emp_length.apply(el) def wrangle_emp(x): if isinstance(x, float): return 'No' for s in emps: if x.find(s) >= 0: return s return 'Yes' def wrangle_list(x, l): if isinstance(x, float): return False for s in l: if x.find(s) >= 0: return True return False # Create features for three employee titles: teacher, manager, owner X['emp_title'] = X['emp_title'].str.lower() X['emp_title_emp'] = X['emp_title'].apply(lambda x: wrangle_emp(x)) X['emp_title_manager'] = X['emp_title'].apply( lambda x: wrangle_list(x, manager)) X['emp_title_it'] = X['emp_title'].apply(lambda x: wrangle_list(x, it)) # Drop categoricals with high cardinality X = X.drop(columns=['emp_title']) # Transform features with many nulls to binary flags many_nulls = ['sec_app_mths_since_last_major_derog', 'sec_app_revol_util', 'sec_app_mort_acc', 'dti_joint', 'sec_app_collections_12_mths_ex_med', 'sec_app_chargeoff_within_12_mths', 'sec_app_num_rev_accts', 'sec_app_open_act_il', 'sec_app_open_acc', 'revol_bal_joint', 'annual_inc_joint', 'sec_app_inq_last_6mths', 'mths_since_last_record', 'mths_since_recent_bc_dlq', 'mths_since_last_major_derog', 'mths_since_recent_revol_delinq', 'mths_since_last_delinq', 'il_util', 'mths_since_recent_inq', 'mo_sin_old_il_acct', 'mths_since_rcnt_il', 'num_tl_120dpd_2m', 'bc_util', 'percent_bc_gt_75', 'bc_open_to_buy', 'mths_since_recent_bc'] for col in many_nulls: try: X[col] = X[col].apply(lambda x: 0 if isinstance(x, float) else x) except: print(col) if make_means: clist = X.select_dtypes(include=[np.number]).columns.tolist() for col in clist: means[col] = X[col].mean() # For features with few nulls, do mean imputation for col in X: if X[col].isnull().sum() > 0: X[col] = X[col].fillna(means[col]) print('count_null_subgrade_and_grade', count_null_subgrade_and_grade) # Return the wrangled dataframe return X df_train_y = pd.read_csv(data_dir + 'train_labels.csv') df_train_y.dtypes df_ = wrangle(df, True) df_.head() clist = df_.select_dtypes(exclude=[np.number]).columns.tolist() one_hot_columns = [] binary_columns = [] max_one = 15 for c in clist: if len(df_[c].unique()) > max_one: binary_columns.append(c) else: one_hot_columns.append(c) testdf = pd.read_csv(data_dir + 'test_features.csv') testdf.head() testdf_ = wrangle(testdf) df_train_y = pd.read_csv(data_dir + 'train_labels.csv') y = df_train_y.charged_off.values X_train = df_ y_train = y # pca = PCA() pipe = Pipeline(steps = [ ('be', ce.BinaryEncoder(cols=binary_columns)), ('one', ce.OneHotEncoder(use_cat_names=True,cols=one_hot_columns)), # ('pca', pca), ('gb', GradientBoostingClassifier())] ) cross_val_score(pipe, X_train, y_train, cv=5, scoring='accuracy', verbose=10, n_jobs=-1) ###Output _____no_output_____ ###Markdown array([0.85403974, 0.85388793, 0.853623 , 0.85468274, 0.85426603]) ###Code param_grid = { # 'pca__n_components': [28], "gb__loss" : ['exponential'], "gb__learning_rate" : [0.1], "gb__n_estimators": [180], "gb__min_samples_leaf": [3], "gb__min_impurity_decrease": [1.2], "gb__max_depth": [3] } # Fit on the train set, with grid search cross-validation gs = GridSearchCV(pipe, param_grid=param_grid, cv=5, n_jobs=8, scoring='roc_auc', verbose=1) gsf = gs.fit(X_train, y_train) print('Best Parameter (roc_auc score=%0.3f):' % gsf.best_score_) print(gsf.best_params_) ###Output _____no_output_____ ###Markdown Best Parameter (roc_auc score=0.734):{'gb__learning_rate': 0.1, 'gb__loss': 'exponential', 'gb__min_samples_leaf': 3, 'gb__n_estimators': 180}Best Parameter (CV score=0.734):{'gb__learning_rate': 0.1, 'gb__loss': 'exponential', 'gb__n_estimators': 150} ###Code print(count_null_subgrade_and_grade) ###Output _____no_output_____ ###Markdown Best Parameter (roc_auc score=0.734):{'gb__learning_rate': 0.1, 'gb__loss': 'exponential', 'gb__min_samples_leaf': 3, 'gb__n_estimators': 180}Best Parameter (CV score=0.734):{'gb__learning_rate': 0.1, 'gb__loss': 'exponential', 'gb__n_estimators': 150} ###Code #Plot the PCA spectrum pca.fit(X) fig, (ax0, ax1) = plt.subplots(nrows=2, sharex=True, figsize=(6, 6)) ax0.plot(pca.explained_variance_ratio_, linewidth=2) ax0.set_ylabel('PCA explained variance') print(pca.explained_variance_ratio_) ax0.axvline(search.best_estimator_.named_steps['pca'].n_components, linestyle=':', label='n_components chosen') ax0.legend(prop=dict(size=12)) # For each number of components, find the best classifier results results = pd.DataFrame(search.cv_results_) components_col = 'param_pca__n_components' best_clfs = results.groupby(components_col).apply( lambda g: g.nlargest(1, 'mean_test_score')) xgb = XGBClassifier() xgb ?xgb.gamma # pca = PCA() pipe = Pipeline(steps = [ ('be', ce.BinaryEncoder(cols=binary_columns)), ('one', ce.OneHotEncoder(use_cat_names=True,cols=one_hot_columns)), # ('pca', pca), ('xgb', XGBClassifier())] ) cross_val_score(pipe, X_train, y_train, cv=5, scoring='accuracy', verbose=10, n_jobs=-1) param_grid = { # 'pca__n_components': [28], "xgb__booster": ["dart"], "xgb__gamma": [8], "xgb__learning_rate": [0.1], "xgb__n_estimators": [130], # "gb__min_samples_leaf": [3], # "gb__min_impurity_decrease": [1.2], "xgb__max_depth": [4] } # Fit on the train set, with grid search cross-validation gs = GridSearchCV(pipe, param_grid=param_grid, cv=60, n_jobs=-1, scoring='roc_auc', verbose=1) gsf = gs.fit(df_, y) print('Best Parameter (roc_auc score=%0.3f):' % gsf.best_score_) print(gsf.best_params_) py = gsf.predict_proba(testdf_)[:, 1] with open('submit.csv', 'w') as file: file.write('id,charged_off\n') for id, charged_off in zip(testdf.id,py): # if charged_off > 0: # print('a charge off') file.write(f"{id},{charged_off}") file.write('\n') X = df_ y = y X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42, stratify=y) gsf = gs.fit(X_train, y_train) print('Best Parameter train (roc_auc score=%0.3f):' % gsf.best_score_) print(gsf.best_params_) py = gsf.predict_proba(X_train)[:,1] predicted_y_test = gsf.predict_proba(X_test) roc_auc_score(y_test,predicted_y_test[:,1]) testdf = pd.read_csv(data_dir + 'test_features.csv') testdf_ = wrangle(testdf) py = gsf.predict_proba(testdf_)[:, 1] with open('submit.csv', 'w') as file: file.write('id,charged_off\n') for id, charged_off in zip(testdf.id,py): # if charged_off > 0: # print('a charge off') file.write(f"{id},{charged_off}") file.write('\n') import matplotlib.pyplot as plt from sklearn.metrics import roc_auc_score, roc_curve from sklearn.model_selection import cross_val_predict y_pred_proba = cross_val_predict(pipe, X_train, y_train, cv=5, n_jobs=-1, method='predict_proba')[:, 1] fpr, tpr, thresholds = roc_curve(y_train, y_pred_proba) plt.plot(fpr, tpr) plt.title('ROC curve') plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') print('Area under the Receiver Operating Characteristic curve:', roc_auc_score(y_train, y_pred_proba)) fprdf = pd.DataFrame({'False Positive Rate': fpr, 'True Positive Rate': tpr, 'Threshold': thresholds}) fprdf.head() # Filenames of your submissions you want to ensemble files = ['dart', 'xgb', 'gbc'] target_name = 'charged_off' submissions = (pd.read_csv(f"submit.{file}.csv")[ [target_name]] for file in files) ensemble = pd.concat(submissions, axis='columns') majority_vote = ensemble.mode(axis='columns')[0] sample_submission = pd.read_csv(data_dir + 'sample_submission.csv') submission = sample_submission.copy() submission[target_name] = majority_vote submission.to_csv('my-ultimate-ensemble-submission.csv', index=False) df.columns.tolist() df_ = wrangle(df, True) encoders = Pipeline([ ('binary', ce.BinaryEncoder(cols=binary_columns)), ('onehot', ce.OneHotEncoder(use_cat_names=True,cols=one_hot_columns)) # ('DecisionTree', DecisionTreeClassifier(max_depth=17, class_weight='balanced')) ]) dff = encoders.fit(df_) dft_ = dff.transform(df_) X_train, X_test, y_train, y_test = train_test_split( dft_, y, test_size=0.2, random_state=42, stratify=y) import xgboost as xgb dtrain = xgb.DMatrix(X_train, label=y_train) dtest = xgb.DMatrix(X_test, label=y_test) params = { 'boost': 'dart', 'gamma': 14, "n_estimators": 140, # 'monotone_constraints': '(0)', # no constraint 'max_depth': 4, 'eta': 0.1, 'silent': 1, 'n_jobs': -1, 'seed': 0, 'eval_metric': 'rmse'} # # With early stopping # bst_cv = xgb.cv(params, dtrain, num_boost_round=1000, nfold=5, early_stopping_rounds=10, as_pandas=True) # len(bst_cv) r = 8.5 X_train['y'] = y_train X_train.y.iloc[0:2] def weight_row(_, row): if row.mths_since_last_delinq > 0 and row.mths_since_last_delinq < 12: return r if row.i == 1 else 0 return 1 dtrain.set_weight([weight_row(i, row) for i,row in X_train.iterrows()]) bst = xgb.train(params, dtrain, num_boost_round=326) # print(bst.eval(dtest)) X_train.drop('y', index=1, inplace = True) y_prob = bst.predict(dtrain) # y_prob[0:5] roc_auc_score(y_train, y_prob) y_prob = bst.predict(dtest) roc_auc_score(y_test, y_prob) ###Output _____no_output_____
Unit 2 - CWT Power Spectrum and Wavelet Coherence of Water and Energy.ipynb
###Markdown Data Collection and Plotting ###Code # R package which includes Quantitative Financial Modelling Frameworks. #https://www.rdocumentation.org/packages/quantmod #install.packages("quantmod") require(quantmod) # An R package for Wavelet analysis and reconstruction of time series, # cross-wavelets and phase-difference (with filtering options), # significance with simulation algorithms. # https://www.rdocumentation.org/packages/WaveletComp/versions/1.0 #install.packages("WaveletComp") require(WaveletComp) # An R Package of time series tools and utilities; Rmetrics - Financial Time Series Objects #https://www.rdocumentation.org/packages/timeSeries #install.packages("timeSeries") require(timeSeries) # An R package with a collection of econometric functions for performance and risk analysis #https://www.rdocumentation.org/packages/PerformanceAnalytics #install.packages("PerformanceAnalytics") require(PerformanceAnalytics) # biwavelet: Conduct Univariate and Bivariate Wavelet Analyses # https://www.rdocumentation.org/packages/biwavelet #install.packages("biwavelet") require(biwavelet) # Identify the tickers of interest tickers <- c("CGW","XLE") # Download these tickers from Yahoo for the dates in the presentation getSymbols(tickers,src="yahoo", from = "2007-06-01",to = "2018-01-26") # Merge all the Price series into one dataframe AllPrices <- do.call(merge, lapply(tickers, function(x) get(x))) #Some of these series have (NA) missing values for dates when others # do not have missing values in the series so we interpolate for these values AllPrices$CGW.Close <- interpNA(AllPrices$CGW.Close) AllPrices$XLE.Close <- interpNA(AllPrices$XLE.Close) #log prices price.pair <- log(cbind(AllPrices$XLE.Close ,AllPrices$CGW.Close, AllPrices$SPY.Close)) #Identify Names, key dates, and key labels colnames(price.pair) <- c("Energy (XLE)", "Water (CGW)") key.dates <- c("2007-08-09","2011-04-01","2014-07-01", "2016-01-04"); key.labels <- c("Start of Financial Crisis: Seizure in the banking system", "End of Financial Crisis (Peak)", "Start of Oil Glut (Peak)", "End of Oil Glut (Trough)") #Plot the timeseries and the key dates chart.TimeSeries(price.pair, colorset = rainbow10equal[c(1,7,2)], legend.loc = "bottomright", ylab = "Prices (logscale)", main = "Price Series: Exchange Traded Funds (ETFs)", las = 3, lwd = 0.5, cex.main = 1.5, cex.legend = 0.9, pch = " ", cex.labels = 0.8, lty = c(1,1,1),event.lines = key.dates, event.labels = key.labels, event.color = c("darkslategrey", "darkslategrey", "darkslategrey", "darkslategrey")) ###Output _____no_output_____ ###Markdown Wavelet Analysis (CWT Power Spectrum) ###Code #Set up the correct data frame rCGW <- as.data.frame((AllPrices$CGW.Close)) rXLE <- as.data.frame((AllPrices$XLE.Close)) #Retrieve specific dates for this time frame date1 <- index(AllPrices) #save Prices in Matrix form rW <- cbind(1:(length(AllPrices$CGW.Close)), rCGW$CGW.Close[1: length(AllPrices$CGW.Close)]) rE <- cbind(1:(length(AllPrices$XLE.Close)), rXLE$XLE.Close[1: length(AllPrices$XLE.Close)]) #Continuous Wavelet Transform (CWT) Power Spectrum of the Energy Series my.data.E = data.frame(x = rE[,2]) #1/dt = number of intervals per time unit #sampling resolution on time domain #1/dj = number of suboctaves (voices per octave) #sampling resolution on frequency domain my.E = analyze.wavelet(my.data.E, "x", loess.span = 0, dt = 1/250, dj = 1/64, make.pval = F, verbose = F, n.sim = 1000) #Plot the Power Spectrum par(mar=c(6,6,4,2)+0.1) wt.image(my.E, periodlab = " ",timelab = " " , main = " ", legend.params = list(lab = "wavelet power levels", mar = 5.1, cex = 6, n.ticks = 10), color.key = "quantile", lwd = 2, plot.ridge = FALSE) #Add lines for Investment Horizons clip(0,0.92,0,1) abline(h = 0.338, col = "brown", lty = 1, lwd = 2) abline(h = 0.678, col = "brown", lty = 1, lwd = 2) title("CWT Power Spectrum: Energy Prices", cex.main = 1.8, xlab = "Days (250 days per year)", ylab = " ", cex.lab = 2) #Continuous Wavelet Transform (CWT) Power Spectrum of the Water Series my.data.W = data.frame(x = rW[,2]) my.W = analyze.wavelet(my.data.W, "x", loess.span = 0, dt = 1/250, dj = 1/64, make.pval = F, verbose = F, n.sim = 1000) #Plot the Power Spectrum par(mar=c(6,6,4,2)+0.1) wt.image(my.W, periodlab = " ",timelab = " ", main = " ", legend.params = list(lab = "wavelet power levels", mar = 4.7, n.ticks = 10), color.key = "quantile", lwd = 2, plot.ridge = FALSE) #Add lines for Investment Horizons clip(0,0.92,0,1) abline(h = 0.338, col = "brown", lty = 1, lwd = 2) abline(h = 0.678, col = "brown", lty = 1, lwd = 2) title("CWT Power Spectrum: Water Prices", cex.main = 1.8, xlab = "Days(250 days per year)", ylab = " ", cex.lab = 1.8) ###Output _____no_output_____ ###Markdown Wavelet Coherence Plot Water and Energy ###Code #Wavelet Coherence Plot Water and Energy # For help on the exact arguments of this function see: ?wtc # d1: Time series 1 in matrix format (n rows x 2 columns). # The first column should contain the time steps and the second column should contain the values. # d2: Time series 2 in matrix format (n rows x 2 columns). # The first column should contain the time steps and the second column should contain the values. # quiet: Do not display progress bar. # nrands: Number of Monte Carlo randomizations. wtc.rWE=wtc(d1 = rW, d2 = rE, quiet = TRUE, nrands = 100) #Add the dates to the axis of the squared coherence plot wtc.rWE$xaxis <- date1 #Plotting the wavelet Squared Coherence par(oma=c(0, 0, 0, 1), mar=c(5, 5, 5, 5) + 0.1) plot(wtc.rWE, plot.cb=TRUE, plot.phase=TRUE, xlab = "Year", ylab = "Scale (Days)", cex = 1.6, lty.coi = 1, col.coi = "grey", lwd.coi = 2, lwd.sig = 2, cex.lab = 1.8) #Add annual lines and lines to distinguish between investment horizons n = length(rW[, 1]) abline(v = seq(250, n, 250), h = 1:16, col = "brown", lty = 1, lwd = 1) title("WSC: Water and Energy Prices", cex.main = 1.8) ###Output _____no_output_____
TeachingDocs/Quick_Reference_Guides/Assignment_QuickRef.ipynb
###Markdown This Notebook - Goals - FOR EDINA**What?:**- Example question and answer styles for notebook assignments and tutorials.**Who?:**- This notebook is for teachers and those creating educational materials.**Why?:**- Provides template from which to copy and paste or get ideas.**Noteable features to exploit:**- Markdown elements incorporated- Demonstrates inbuilt library use**How?:**- Brief set of potential question/answer formats- Demonstrates range of elements required in teaching (for example creating alert boxes) Notes for teachersThis template is intended to give examples of many different question and answer styles, as well as showing some examples of how to format your worksheets for use with nbgrader. You can use this as a base for your own worksheets by downloading the file and uploading it to your noteable account. If you are looking to cut and paste questions into a worksheet, please see the preformatted skeleton [here](https://github.com/edina/Exemplars2020/blob/master/TeachingDocs/Templates/Assignment_Skeleton.ipynb), which may be more efficient than editing this reference sheet.For a list of libraries available on each notebook, follow [this link](https://noteable.edina.ac.uk/notebooks_descriptions/). How to use this sheetclick on any cell and press Enter to view and edit the markdown/code in that cell. Press Shift + Enter to run the cell (this will also show you the formatted written cells).You can delete a cell by clicking the scissor symbol in the toolbar above. Similarly, you can copy and paste cells using the adjacent buttons.The "+" button on the toolbar will produce a new code cell, you can switch to markdown using the dropdown in the toolbar above. Assignment TitleImportant information such as due date of the assignment and % contribution to overall grade. Instructions to studentsBefore you submit, ensure that the notebook can be run from start to finish by pressing the "Restart & Run All" option in the "Kernel" menu above.If the assignment was fetched from the assignments tab, do not change the name of the assignment file(s).Cells which are left blank for your responses will either require a text response or a code response. This will be clear from the question, but you should check that a text response is written in a markdown cell, and a code response is written in a code cell (as indicated in the toolbar). Code answersIn questions that require you to write code, there will usually be a code cell containing: ANSWER HEREraise NotImplementedError() When you are ready to write your answer, delete raise NotImplementedError() and write your code. Text answersFor questions with a text answer, there will be a markdown cell following the question. There will usually be an indication that the cell is intended for your answer such as "YOUR ANSWER HERE". ###Code # this section enables use of specific libraries in the document # this can usually be ignored by students # command style instruction to plot inside notebook %matplotlib inline import numpy as np # import libraries used in the assignment (add more as required): import matplotlib import matplotlib.pyplot as plt # hide unnecessary warnings import warnings warnings.filterwarnings('ignore') ###Output _____no_output_____ ###Markdown Assignment contentText outlining the content covered in the assignment, possibly including a reference [(Jones)[1]](References). There may be some [links](https://www.google.com) to useful content. Question 1This question is about some content. Please write the equation for the circumference of a circle in latex.Hint: To use latex inline, surround the latex code with \$. Latex code on its own line should be surrounded by \$\$. $$ C = 2\pi r $$ Question 2Given your answer above, write a python function in the cell below that calculates the circumference of a circle, given the radius.Function specification:- Function name - cirm- Input - radius of circle- Output - circumference of circle, correct to 2 decimal placesHint: You will need to import the 'math' module in order to use $\pi$ in your calculation. ###Code import math # function cirm def cirm(n): ### BEGIN SOLUTION return 2*math.pi*n ### END SOLUTION ### BEGIN HIDDEN TESTS assert round(cirm(1),2) == 6.28 assert round(cirm(2),2) == 12.57 assert round(cirm(3),2) == 18.85 assert round(cirm(4),2) == 25.13 assert round(cirm(5),2) == 31.42 ### END HIDDEN TESTS ###Output _____no_output_____ ###Markdown Now use python to calculate the circumference of circles with radii:\[1, 2, 3, 4, 5\]Function specification:- Function name - cirms- Input - list of radii: \[1, 2, 3, 4, 5\]- Output - list of associated circumferences, rounded to 2 decimal places ###Code def cirms(ns): cs = [] for n in ns: ### BEGIN SOLUTION cs.append(round(cirm(n),2)) ### END SOLUTION return cs ### BEGIN HIDDEN TESTS assert cirms([1,2,3,4,5]) == [6.28, 12.57, 18.85, 25.13, 31.42] ### END HIDDEN TESTS ###Output _____no_output_____ ###Markdown Question 3In a maths lesson, Bethany is asked to guess the circumference of the circles. Here are her guesses:| Radius | Guess ||--------|-------|| 1 | 6 || 2 | 14 || 3 | 20 || 4 | 25 || 5 | 35 |In the cell below, create a table similar to that above, also including the actual values (as calculated in question 2). ###Code # this code cell stores Bethany's guesses in a list called gs gs = [6, 14, 20, 25, 35] ###Output _____no_output_____ ###Markdown ANSWER SPACE FOR Q3 Question 4Produce a suitable plot for Bethany's guesses. Again, consider suitable library functions to achieve this. ###Code x = np.linspace(1, 5,250) # numbers 1 to 5, 250 samples - using numpy #plt.plot(x,cirm(x),label="true data", color="red") # matlab style plot plt.scatter([1,2,3,4,5], gs, label="guesses") # Bethany's guesses plt.legend(); ###Output _____no_output_____ ###Markdown Question 5Plot the graph from question 4 again, but this time include a line showing the true circumference values._Hint: you can copy and paste cells by using the toolbar above._ ###Code x = np.linspace(1, 5,250) # numbers 1 to 5, 250 samples - using numpy plt.plot(x,cirm(x),label="true data", color="red") # matlab style plot plt.scatter([1,2,3,4,5], gs, label="guesses") # Bethany's guesses plt.legend(); ###Output _____no_output_____
Example_1_Quick_Functions.ipynb
###Markdown *Electricity-Data-Pipeline*: Example 1 _Click_ [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/desenk/Electricity-Data-Pipeline/blob/master/Example_1_Quick_Functions.ipynb) _to open this notebook in Google Colab_ 1. Clone the Electricity-Data-Pipeline project and download all required packages ###Code #Please comment out if you are NOT using Google Colab or Jupyter Notebook !git clone https://github.com/desenk/Electricity-Data-Pipeline.git %cd ./Electricity-Data-Pipeline !pip install -r requirements.txt ###Output Cloning into 'Electricity-Data-Pipeline'... remote: Enumerating objects: 211, done. remote: Counting objects: 100% (211/211), done. remote: Compressing objects: 100% (108/108), done. remote: Total 211 (delta 107), reused 186 (delta 92), pack-reused 0 Receiving objects: 100% (211/211), 337.31 KiB | 611.00 KiB/s, done. Resolving deltas: 100% (107/107), done. /content/Electricity-Data-Pipeline Requirement already satisfied: lxml in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 4)) (4.2.6) Requirement already satisfied: urllib3 in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 5)) (1.24.3) Requirement already satisfied: pandas in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 6)) (1.1.5) Requirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 7)) (1.19.5) Requirement already satisfied: matplotlib in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 8)) (3.2.2) Requirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.6/dist-packages (from pandas->-r requirements.txt (line 6)) (2018.9) Requirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.6/dist-packages (from pandas->-r requirements.txt (line 6)) (2.8.1) Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->-r requirements.txt (line 8)) (2.4.7) Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib->-r requirements.txt (line 8)) (0.10.0) Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->-r requirements.txt (line 8)) (1.3.1) Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.6/dist-packages (from python-dateutil>=2.7.3->pandas->-r requirements.txt (line 6)) (1.15.0) ###Markdown 2. Import the `Electricity-Data-Pipeline` modules ###Code from pipeline.BMRS_helpers import * from pipeline.range_import_helpers import * import matplotlib.pyplot as plt ###Output _____no_output_____ ###Markdown 3. Get your API keyPlease ensure that you obtain an API key from Elexon following the instructions below (adapted from [here](https://www.elexon.co.uk/documents/training-guidance/bsc-guidance-notes/bmrs-api-and-data-push-user-guide-2/)):1. Register [here](https://www.elexonportal.co.uk/registration/newuser?cachebust=3apx5qnzf9) as a new user > Check your inbox for the verification email2. Sign-in & Access your API Key> Log-in -> Click on `my profile` -> Copy the `scripting key`3. Paste the API Key in the `api_key.txt` file (NO quotes or other characters - just the copied key)> Google Colab: `folder icon` (on the left)>Electricity-Data-Pipeline>api_key.txt4. Check that your API Key is saved correctly ###Code get_APIKey() ###Output _____no_output_____ ###Markdown 4. Import raw data using quick BMRS helper functions List of functions Below is a list of all the `` functions defined so far:_**Table 1:** List of the quick BMRS helper functions_`Electricity-Data-Pipeline` Quick Functions | Description | Resolution | Inputs ------------ | ------------- | ------------ | ------------**`demand()`** | Rolling System Demand | 5 min | demand(start_date = 'YYYY-MM-DD', end_date = 'YYYY-MM-DD', save_to_csv = False)**`temperature()`** | Average Daily Temperature in Britain | Daily | "**`generation()`** | Half-hourly Generation by Fuel Type | Halfhourly (30 min) | "**`frequency()`** | System Frequency | 15 sec | "**`initial_demand_national()`** | Initial National Demand Out-turn | Halfhourly (30 min) | "**`initial_demand_transmission()`** | Initial Transmission System Demand Out-turn | Halfhourly (30 min) | "**`demand_forecast_national()`** | National Demand Forecast | Halfhourly (30 min) | "**`demand_forecast_transmission()`** | Transmission System Demand Forecast | Halfhourly (30 min) | "**`imbalance_volume()`** | Imbalance Volume | Halfhourly (30 min) | "**`loss_of_load()`** | Loss of Load and De-rated Margin | Halfhourly (30 min) | "**`imbalance_price()`** | Imbalance Price | Halfhourly (30 min) | "**`derived_system_data()`** | Derived System Data | Halfhourly (30 min) | "**`extract_data()`** | Uses BMRS data label and tries different methods | depends on dataset of choice | extract_data(report_name = 'TEMP', start_date = 'YYYY-MM-DD', end_date = 'YYYY-MM-DD', save_to_csv = True)________________________________________________**Table 2:** List of the data extractions functions for a week or longer periods._`Electricity-Data-Pipeline` Function for Weekly/Long-term Imports | Description | Range | Inputs ------------ | ------------- | ------------ | ------------**`extract_data_weekly()`** | Extracts data for a week from the start_date using the function names from the table above| Fixed - Weekly | extract_data_weekly(func_name = demand , start_date = 'YYYY-MM-DD', save_to_csv = True)**`extract_data_range()`** | Extracts data for long timeframes | Variable | extract_data_range(func_name = temperature, start_date = 'YYYY-MM-DD', end_date = 'YYYY-MM-DD', save_to_csv = False)**`extract_data_range_with_BMRS_label()`** | Same as above but using BMRS report names rather than the function names from the table above | Variable | data_extract_range_with_BMRS_label(report_name = 'TEMP', start_date = 'YYYY-MM-DD', end_date = 'YYYY-MM-DD', save_to_csv = False) Here, we demonstrate the quick functions used for short-term data import (See Table 1).Please note that these functions are subject to capping and time-out limits imposed by the data provider. Please look at the range import functions to overcome this (See Table 2). Demand example with default dates ###Code demand = demand() demand %matplotlib inline plt.style.use('seaborn') plt.plot(demand['fuelTypeGeneration']) ###Output _____no_output_____ ###Markdown Generation example with custom dates & save_to_csv option enabled ###Code generation_default = generation() #default dates generation_default.head() #function(start_date = '2020-03-28', end_date = '2020-03-31, save_to_csv = True) generation_custom = generation(start_date = '2020-03-28', end_date = '2020-03-31', save_to_csv = True) generation_custom ###Output saved FUELHH_2020-03-28_2020-03-31.csv
notebooks/examples/3_Data_Scientist.ipynb
###Markdown 4. Data Scientist - Create ML models with Spark ###Code from pyspark.ml import Pipeline from pyspark.ml.classification import DecisionTreeClassifier from pyspark.ml.feature import OneHotEncoder, StringIndexer, VectorAssembler from pyspark.ml.evaluation import MulticlassClassificationEvaluator import numpy as np from sklearn.metrics import accuracy_score, precision_score, recall_score, roc_auc_score, f1_score from pyspark.sql import SparkSession spark = SparkSession.builder \ .appName('Spark - Data Scientist Demo') \ .config('spark.jars', '/usr/lib/spark/jars/spark-bigquery-latest.jar') \ .config("spark.jars.packages", "com.google.cloud.spark:spark-bigquery-with-dependencies_2.12:0.18.0") \ .getOrCreate() ###Output _____no_output_____ ###Markdown --properties spark:spark.jars=gs://spark-lib/bigquery/spark-bigquery-with-dependencies_2.12-0.18.0.jar ###Code spark.conf.get("spark.app.id") spark.sparkContext._jvm.scala.util.Properties.versionString() #!ls /usr/lib/spark/jars/ ###Output _____no_output_____ ###Markdown Create a Spark DataFrame from hive table ###Code table = "datalake-vol2:datalake_vol2_raw.banking_marketing_train" df_banking_marketing_train = spark.read \ .format("bigquery") \ .option("table", table) \ .load() df_banking_marketing_train.createOrReplaceTempView("bank_marketing_train_view") data = spark.sql(""" SELECT * FROM bank_marketing_train_view """).cache() ###Output _____no_output_____ ###Markdown Cache the DataFrame in memory ###Code data.cache() data.columns data.groupBy("Deposit").count().show() ###Output +-------+-----+ |Deposit|count| +-------+-----+ | 1|36009| | 2| 4771| +-------+-----+ ###Markdown Split training and test data ###Code (train_data, test_data) = data.randomSplit([0.7, 0.3], seed=42) train_data.groupBy("Deposit").count().show() train_data.count() test_data.count() ###Output _____no_output_____ ###Markdown Create Spark ML Pipeline Train a RandomForestClassifier model ###Code train_data = train_data.drop('call_id') train_data #test_data = test_data.drop('call_id') spark.version #predictions.select("call_id").show(5) from pyspark.ml.feature import OneHotEncoder, StringIndexer from pyspark.ml.classification import RandomForestClassifier categorical_cols = [field for (field, data_type) in train_data.dtypes if ((data_type == "string") & (field != 'Deposit'))] index_output_cols = [x + "_Index" for x in categorical_cols] ohe_output_cols = [x + "_OHE" for x in categorical_cols] categorical_string_indexer = StringIndexer( inputCols=categorical_cols, outputCols=index_output_cols, handleInvalid="skip") ohe_encoder = OneHotEncoder( inputCols=index_output_cols, outputCols=ohe_output_cols) numeric_cols = [field for (field, data_type) in train_data.dtypes if (((data_type == "double") | (data_type == "int") | (data_type == "bigint")) & (field != 'Deposit'))] assembler_inputs = ohe_output_cols + numeric_cols vec_assembler = VectorAssembler( inputCols=assembler_inputs, outputCol="features") label_string_indexer = StringIndexer(). \ setInputCol("Deposit"). \ setOutputCol("label") # Train a RandomForestClassifier model. rf = RandomForestClassifier(labelCol="label", featuresCol="features") pipeline = Pipeline(stages=[ categorical_string_indexer, ohe_encoder, vec_assembler, label_string_indexer, rf ]) # Train model on training data pipeline_model = pipeline.fit(train_data) # Make predictions on test. tests = pipeline_model.transform(test_data) # Select example rows to display. tests.select("prediction", "label", "features").show(5) test_data.show(5) ###Output +--------------------+---+----------+-------------+---------+-------+-------+-------+-----+---------+---+-----+--------+--------+-----+--------+--------+-------+ | call_id|Age| Job|MaritalStatus|Education|Default|Balance|Housing| Loan| Contact|Day|Month|Duration|Campaign|PDays|Previous|POutcome|Deposit| +--------------------+---+----------+-------------+---------+-------+-------+-------+-----+---------+---+-----+--------+--------+-----+--------+--------+-------+ |000458ba-5ab1-4f7...| 54|management| married| tertiary| false| 7249| true| true| cellular| 4| feb| 102| 2| 77| 1| failure| 1| |000d9fb1-7eeb-46a...| 58|management| divorced|secondary| false| 3161| false|false|telephone| 30| jul| 542| 2| -1| 0| unknown| 2| |0010dfae-c527-462...| 53| admin.| divorced|secondary| false| 315| true|false| unknown| 5| may| 181| 2| -1| 0| unknown| 1| |001218b4-4a10-446...| 31|management| married|secondary| false| 2019| false|false| unknown| 7| may| 380| 2| -1| 0| unknown| 1| |00132e04-4340-4d2...| 57|technician| married| tertiary| false| 1103| false|false| cellular| 4| may| 399| 2| -1| 0| unknown| 1| +--------------------+---+----------+-------------+---------+-------+-------+-------+-----+---------+---+-----+--------+--------+-----+--------+--------+-------+ only showing top 5 rows ###Markdown As the dataset is imbalanced a good metric is AUC: Area Under the ROC Curve. [Learn more about AUC here.](https://developers.google.com/machine-learning/crash-course/classification/roc-and-aucAUC) ###Code from pyspark.ml.evaluation import BinaryClassificationEvaluator binaryEvaluator = BinaryClassificationEvaluator(labelCol="label") auc = binaryEvaluator.evaluate(tests, {binaryEvaluator.metricName: "areaUnderROC"}) print(auc) tests_np = np.array((tests.select("label","prediction").collect())) tests_np tests_np = np.array((tests.select("label","prediction").collect())) np_acc = accuracy_score(tests_np[:,0], tests_np[:,1]) np_f1 = f1_score(tests_np[:,0], tests_np[:,1]) np_precision = precision_score(tests_np[:,0], tests_np[:,1]) np_recall = recall_score(tests_np[:,0], tests_np[:,1]) np_auc = roc_auc_score(tests_np[:,0], tests_np[:,1]) print("f1:", np_f1) print("precision:", np_precision) print("recall:", np_recall) # import package that will generate the confusion matrix scores from sklearn.metrics import confusion_matrix # import packages that will help display the scores import pandas as pd confusion_matrix_scores = confusion_matrix(tests_np[:,0], tests_np[:,1], labels=[1, 0]) # display scores as a heatmap df = pd.DataFrame(confusion_matrix_scores, columns = ["Predicted True", "Predicted Not True"], index = ["Actually True", "Actually Not True"]) df.head() ###Output _____no_output_____ ###Markdown Save model_pipeline ###Code from pyspark.ml import Pipeline, PipelineModel model_path = 'gs://datalake-vol2-data/' pipeline_model.write().overwrite().save(model_path) loaded_pipeline_model = PipelineModel.load(model_path) # Make predictions using loaded model tests = loaded_pipeline_model.transform(test_data) tests.show(5) bq_table_path = 'datalake_vol2_annotated.bank_test' schema_inline = tests.schema.simpleString().replace('struct<', '').replace('>', '').replace('int', 'int64').replace('bigint64', 'int64').replace('double', 'numeric').replace('vector', 'STRING') !bq mk --table \ {bq_table_path} \ {schema_inline} tests.write \ .format("bigquery") \ .option("table", 'datalake-vol2:datalake_vol2_annotated.bank_test') \ .option("temporaryGcsBucket", "datalake-vol2-data") \ .mode('overwrite') \ .save() %%bigquery SELECT * FROM datalake_vol2_annotated.INFORMATION_SCHEMA.TABLES; ###Output Query complete after 0.00s: 100%|██████████| 1/1 [00:00<00:00, 381.40query/s] Downloading: 100%|██████████| 3/3 [00:01<00:00, 2.09rows/s] ###Markdown Predict Results ###Code path_to_predict_csv = "gs://datalake-vol2-data/banking_predict_set.csv" df_bank_predict_from_csv = spark \ .read \ .option("inferSchema" , "true") \ .option("header" , "true") \ .csv(path_to_predict_csv) df_bank_predict_from_csv.printSchema() # Make predictions on test. predictions = loaded_pipeline_model.transform(df_bank_predict_from_csv) # Select example rows to display. predictions.select("prediction", "label", "features").show(5) predictions.show(5) ###Output +--------------------+---+-----------+-------------+---------+-------+-------+-------+-----+--------+---+-----+--------+--------+-----+--------+--------+-------+-----------+---------+-------------------+--------------+-------------+---------------+-------------+--------------+-------------+-------------+-----------------+--------------+--------------------+-----+--------------------+--------------------+----------+ | call_id|Age| Job|MaritalStatus|Education|Default|Balance|Housing| Loan| Contact|Day|Month|Duration|Campaign|PDays|Previous|POutcome|Deposit|Month_Index|Job_Index|MaritalStatus_Index|POutcome_Index|Contact_Index|Education_Index| POutcome_OHE| Job_OHE| Contact_OHE|Education_OHE|MaritalStatus_OHE| Month_OHE| features|label| rawPrediction| probability|prediction| +--------------------+---+-----------+-------------+---------+-------+-------+-------+-----+--------+---+-----+--------+--------+-----+--------+--------+-------+-----------+---------+-------------------+--------------+-------------+---------------+-------------+--------------+-------------+-------------+-----------------+--------------+--------------------+-----+--------------------+--------------------+----------+ |2d33a1aa-fcb2-474...| 32|blue-collar| married| primary| false| -56| true| true|cellular| 17| apr| 405| 2| 305| 20| other| 1| 5.0| 0.0| 0.0| 2.0| 0.0| 2.0|(3,[2],[1.0])|(11,[0],[1.0])|(2,[0],[1.0])|(3,[2],[1.0])| (2,[0],[1.0])|(11,[5],[1.0])|(39,[0,11,15,16,2...| 0.0|[16.2193031425206...|[0.81096515712603...| 0.0| |5b85bc40-b1fb-4ec...| 32| admin.| single|secondary| false| 103| true|false|cellular| 17| apr| 158| 1| 337| 1| other| 1| 5.0| 3.0| 1.0| 2.0| 0.0| 0.0|(3,[2],[1.0])|(11,[3],[1.0])|(2,[0],[1.0])|(3,[0],[1.0])| (2,[1],[1.0])|(11,[5],[1.0])|(39,[3,12,13,16,2...| 0.0|[16.8207028939665...|[0.84103514469832...| 0.0| |8692a0b6-b4bd-470...| 29|blue-collar| single|secondary| false| 314| true|false|cellular| 9| apr| 161| 1| 316| 2| other| 1| 5.0| 0.0| 1.0| 2.0| 0.0| 0.0|(3,[2],[1.0])|(11,[0],[1.0])|(2,[0],[1.0])|(3,[0],[1.0])| (2,[1],[1.0])|(11,[5],[1.0])|(39,[0,12,13,16,2...| 0.0|[17.0002656621162...|[0.85001328310581...| 0.0| |74c1a1a5-eaee-40a...| 60| retired| married| primary| false| 0| true|false|cellular| 17| apr| 68| 3| 343| 1| other| 1| 5.0| 5.0| 0.0| 2.0| 0.0| 2.0|(3,[2],[1.0])|(11,[5],[1.0])|(2,[0],[1.0])|(3,[2],[1.0])| (2,[0],[1.0])|(11,[5],[1.0])|(39,[5,11,15,16,2...| 0.0|[17.1415638782710...|[0.85707819391355...| 0.0| |d74e8230-30eb-4f5...| 33| management| single| tertiary| false| 1423| true|false|cellular| 16| apr| 633| 3| 248| 2| other| 1| 5.0| 1.0| 1.0| 2.0| 0.0| 1.0|(3,[2],[1.0])|(11,[1],[1.0])|(2,[0],[1.0])|(3,[1],[1.0])| (2,[1],[1.0])|(11,[5],[1.0])|(39,[1,12,14,16,2...| 0.0|[13.7538902877193...|[0.68769451438596...| 0.0| +--------------------+---+-----------+-------------+---------+-------+-------+-------+-----+--------+---+-----+--------+--------+-----+--------+--------+-------+-----------+---------+-------------------+--------------+-------------+---------------+-------------+--------------+-------------+-------------+-----------------+--------------+--------------------+-----+--------------------+--------------------+----------+ only showing top 5 rows ###Markdown Join Data ###Code path_to_join_csv = "gs://datalake-vol2-data/banking_join.csv" df_bank_join_from_csv = spark \ .read \ .option("inferSchema" , "true") \ .option("header" , "true") \ .csv(path_to_join_csv) df_bank_join_from_csv.printSchema() predictions = predictions.toPandas() df_bank_join_from_csv = df_bank_join_from_csv.toPandas() result = pd.merge(predictions, df_bank_join_from_csv, on=['call_id','call_id']) result ###Output _____no_output_____ ###Markdown Store result to Enriched Zone ###Code result = spark.createDataFrame(result) bq_result_table_path = 'datalake-vol2:datalake_vol2_enriched.bank_result' schema_inline = result.schema.simpleString().replace('struct<', '').replace('>', '').replace('int', 'int64').replace('bigint64', 'int64').replace('double', 'numeric').replace('vector', 'STRING') !bq mk --table \ {bq_result_table_path} \ {schema_inline} result.write \ .format("bigquery") \ .option("table", bq_result_table_path) \ .option("temporaryGcsBucket", "datalake-vol2-data") \ .mode('overwrite') \ .save() %%bigquery SELECT * FROM datalake_vol2_enriched.INFORMATION_SCHEMA.TABLES; ###Output Query complete after 0.00s: 100%|██████████| 1/1 [00:00<00:00, 360.61query/s] Downloading: 100%|██████████| 1/1 [00:02<00:00, 2.61s/rows]
convolutional_filters_and_edge_detection/Finding Edges and Custom Kernels.ipynb
###Markdown Creating a Filter, Edge Detection Import resources and display image ###Code import matplotlib.pyplot as plt import matplotlib.image as mpimg import cv2 import numpy as np %matplotlib inline # Read in the image image = mpimg.imread('images/curved_lane.jpg') plt.imshow(image) ###Output _____no_output_____ ###Markdown Convert the image to grayscale ###Code # Convert to grayscale for filtering gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) plt.imshow(gray, cmap='gray') ###Output _____no_output_____ ###Markdown TODO: Create a custom kernelBelow, you've been given one common type of edge detection filter: a Sobel operator.The Sobel filter is very commonly used in edge detection and in finding patterns in intensity in an image. Applying a Sobel filter to an image is a way of **taking (an approximation) of the derivative of the image** in the x or y direction, separately. The operators look as follows.**It's up to you to create a Sobel x operator and apply it to the given image.**For a challenge, see if you can put the image through a series of filters: first one that blurs the image (takes an average of pixels), and then one that detects the edges. ###Code # Create a custom kernel # 3x3 array for edge detection sobel_y = np.array([[ -1, -2, -1], [ 0, 0, 0], [ 1, 2, 1]]) ## TODO: Create and apply a Sobel x operator sobel_x = np.array([[ -1, 0, 1], [ -2, 0, 2], [ -1, 0, 1]]) # Filter the image using filter2D, which has inputs: (grayscale image, bit-depth, kernel) filtered_image = cv2.filter2D(gray, -1, sobel_y) plt.imshow(filtered_image, cmap='gray') ###Output _____no_output_____ ###Markdown Test out other filters!You're encouraged to create other kinds of filters and apply them to see what happens! As an **optional exercise**, try the following:* Create a filter with decimal value weights.* Create a 5x5 filter* Apply your filters to the other images in the `images` directory. ###Code filtered_image = cv2.filter2D(gray, -1, sobel_x) plt.imshow(filtered_image, cmap='gray') ###Output _____no_output_____
Qunova Computing/training.ipynb
###Markdown This notebook scripts demonstrates the training process of the model introduced at [arXiv:2001.03622](https://arxiv.org/abs/2001.03622).The model consists of two parts, which are quantum embedding and variational classifier. ###Code import numpy as np from pyquil_circuits import PauliFeatureMap, VariationalCircuit from QVC import PyquilVariationalClassifier from utils import load_data, bf ###Output _____no_output_____ ###Markdown We employ the quantum embedding part with Pauli feature map, represented at [arXiv:1804.11326](https://arxiv.org/abs/1804.11326).You can find our implementation of the pauli feature map [here](https://github.com/QuNovaComputing/Hackathon2021/blob/qunovacomputing/Qunova%20Computing/pyquil_circuits.pyL101).We have the dataset with the dimension of 3, we put 3 as the number of qubits. ###Code qfm = PauliFeatureMap(num_qubits=3, rep=2) print(qfm.circuit) ###Output DECLARE data REAL[3] H 0 RZ(data[0]) 0 H 1 RZ(data[1]) 1 H 2 RZ(data[2]) 2 CNOT 0 1 RZ((pi - data[0])*(pi - data[1])) 1 CNOT 0 1 CNOT 1 2 RZ((pi - data[1])*(pi - data[2])) 2 CNOT 1 2 H 0 RZ(data[0]) 0 H 1 RZ(data[1]) 1 H 2 RZ(data[2]) 2 CNOT 0 1 RZ((pi - data[0])*(pi - data[1])) 1 CNOT 0 1 CNOT 1 2 RZ((pi - data[1])*(pi - data[2])) 2 CNOT 1 2 DECLARE ro BIT[3] MEASURE 0 ro[0] MEASURE 1 ro[1] MEASURE 2 ro[2] ###Markdown For the variational classifier, we used the circuit as below. The specific implementation can be found [here](https://github.com/QuNovaComputing/Hackathon2021/blob/qunovacomputing/Qunova%20Computing/pyquil_circuits.pyL130). ###Code vc = VariationalCircuit(num_qubits=3, rep=2) print(vc.circuit) ###Output DECLARE param REAL[16] RY(param[0]) 0 RY(param[1]) 1 RZ(param[2]) 0 RZ(param[3]) 1 CZ 0 1 RY(param[4]) 1 RY(param[5]) 2 RZ(param[6]) 1 RZ(param[7]) 2 CZ 1 2 RY(param[8]) 0 RY(param[9]) 1 RZ(param[10]) 0 RZ(param[11]) 1 CZ 0 1 RY(param[12]) 1 RY(param[13]) 2 RZ(param[14]) 1 RZ(param[15]) 2 CZ 1 2 DECLARE ro BIT[3] MEASURE 0 ro[0] MEASURE 1 ro[1] MEASURE 2 ro[2] ###Markdown We load the data with the number of principle comonents of 3. Also, we divide the data into the training data(80%) and test data(20%).Specifying the random seed, we can repeat the experiment with the same condition. ###Code seed = 30 train_data, train_labels, test_data, test_labels = load_data(test_size=0.2, num_PCs=3, seed=seed) print(f"# of training data = {len(train_data)}") print(f"# of test data = {len(test_data)}") ###Output # of training data = 81 # of test data = 21 ###Markdown Combining two parts, we construct the variational classifier. ###Code qvc = PyquilVariationalClassifier(qfm, vc, bool_ftn=bf, use_bias=False) ###Output _____no_output_____ ###Markdown Start training. We use the optimizer with [SPSA](https://qiskit.org/documentation/stubs/qiskit.algorithms.optimizers.SPSA.html),with initial values with [1.0, ...].To monitor the process, you can use `tensorboard`.`tensorboard --logdir=./runs/zzzpfm_c12v3_zzzpfm_c12v3_pyquil` ###Code print("Start training") point, value, nfev = qvc.train(train_data, (-1) ** train_labels, 'zzzpfm_c12v3_pyquil', test_data=test_data, test_label=(-1) ** test_labels, spsa_maxiter=250) print("Training Done") print(f"optimal params = {point}") print(f"final training loss = {value}") print(f"function evaluations = {nfev}") ###Output Start training Training Done optimal params = [-0.779126 2.18269376 0.04466904 0.15724436 -0.8130034 0.6916049 2.22174957 1.19136551 1.306887 0.42488094 0.93505012 -0.45540283 0.74377079 0.35898721 0.92695085 1.26341594] final training loss = 0.8395061728395061 function evaluations = 750 ###Markdown Save the training result. ###Code np.save('./npy_files/TrainData_zzpfmc12_pyquil.npy', train_data) np.save('./npy_files/TestData_zzpfmc12_pyquil.npy', test_data) np.save('./npy_files/TrainLabels_zzpfmc12_pyquil.npy', train_labels) np.save('./npy_files/TestLabels_zzpfmc12_pyquil.npy', test_labels) np.save('./npy_files/Optimal_param_zzpfmc12_pyquil.npy', point) ###Output _____no_output_____
Testing_Networks/Notebooks/0_Deterministic.ipynb
###Markdown Deterministic Model ###Code N = 2000 intervals = 100 p=1.0 gamma = 1/6 sigmas = [1/4, 1000] R0 = 1.5 #use 1.5 and 4.0 beta = R0*gamma T_total = 90 #use 65 and 12 intervals = T_total*2 repetitions = 200 lambda1 = ((-sigma-gamma)/(2)) + (1/2)*np.sqrt((sigma-gamma)**2 + 4*sigma*beta) est = 1/(lambda1) print(est, 1/(beta-gamma)) colors_status = ['indigo', 'darkred'] fig, ax = plt.subplots(figsize=(14,10)) for sigma in sigmas: if (sigma == 1/4): mymodel = SEIRSModel(initN = 2000, beta = beta, sigma = sigma, gamma = gamma, initI=1) mymodel.run(T = T_total) lambda1, lambda2, time, E_solution, I_solution, sol_total_approx, I_max_2 = run_deterministic(N, beta, sigma, gamma, p, T_total, folder = Text_files_path +'Deterministic/Single_trajectory') ax.plot(time, I_solution, color = 'darkred', ls = '-', lw = 4, label = 'Infected') ax.plot(time, E_solution, color = 'darkorange', ls = '-', lw = 4, label = 'Exposed') ax.plot(mymodel.tseries[::50], mymodel.numI[::50], color = 'darkred', marker = 'o', ls = '', lw = 4, ms = 8) ax.plot(mymodel.tseries[::50], mymodel.numE[::50], color = 'darkorange', marker = 'o', ls = '', lw = 4, ms = 8) if (sigma == 1000): mymodel = SEIRSModel(initN = 2000, beta = beta, sigma = sigma, gamma = gamma, initI=1) mymodel.run(T = T_total) lambda1, lambda2, time, E_solution, I_solution, sol_total_approx, I_max_2 = run_deterministic(N, beta, sigma, gamma, p, T_total, folder = Text_files_path +'Deterministic/Single_trajectory') ax.plot(time, I_solution, color = 'darkred', ls = '-', lw = 4, alpha = .6) ax.plot(mymodel.tseries[::50], mymodel.numI[::50], color = 'darkred', marker = 'o', ls = '', lw = 4, ms = 8, alpha = .6) ax.text(x = 30, y = 50, s = 'SIR', fontsize = 42, color = 'black') ax.text(x = 45, y = 1.2, s = 'SEIR', fontsize = 42, color = 'black') ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.set_ylim(bottom = .5, top = 2e2) ax.set_xlim(-0.1, 70) my_plot_layout(ax = ax, yscale = 'log', xlabel = 'Time', ylabel = 'Individuals', x_fontsize = 30, y_fontsize = 30) ax.legend(fontsize = 28, loc = 2) fig.savefig('../Figures/Deterministic/Figure_deterministic.pdf') sigma = 1/4 gamma = 1/6 beta = np.linspace(0, 4.5)*gamma GT0 = (((beta)/(gamma))) GT = (np.sqrt(1-4*((sigma*gamma-sigma*beta)/(sigma+gamma)**2))) seaborn.set_style('ticks') seaborn.despine() fig, ax = plt.subplots(figsize = (12,8)) ax.plot(beta/gamma, GT0, color='darkblue', label = '$SIR$', linewidth = 4) ax.plot(beta/gamma, GT, color='darkgreen', label = '$SEIR$', linewidth = 4) ax.hlines(1,0,4.5, linestyles='dashed', linewidth = 4, color = 'silver') ax.set_xlabel(r'$\beta/\gamma$', fontsize = 30) ax.set_ylabel(r'$R_0$', fontsize = 30) ax.tick_params('both', labelsize=30) ax.legend(fontsize = 26, frameon=False) fig.savefig('../Figures/Deterministic/growth_rates.pdf') sigma = 1/4 gamma = 1/6 beta = np.linspace(1, 4.5)*gamma GT0 = (((beta)/(gamma))) GT = (np.sqrt(1-4*((sigma*gamma-sigma*beta)/(sigma+gamma)**2))) seaborn.set_style('ticks') seaborn.despine() fig, ax = plt.subplots(figsize = (12,8)) ax.plot(beta/gamma, 1/(GT0-1), color='darkblue', label = '$R_0$', linewidth = 4) ax.plot(beta/gamma, 1/(GT-1), color='darkgreen', label = '$R_0^*$', linewidth = 4) ax.vlines(1,0.1,100, linestyles='dashed', linewidth = 4) ax.set_xlabel(r'$\beta/\gamma$', fontsize = 30) ax.set_ylabel(r'Effective reproductive rate', fontsize = 30) ax.tick_params('both', labelsize=30) ax.set_yscale('log') ax.legend(fontsize = 26, frameon=False) fig.savefig('../Figures/Deterministic/establishment.pdf') #Run SIR model N = 8000 n0 = 1 beta = 1/2 gamma = 1/6 T_time = 80 model = EpiModel(N=N, I0=n0, beta=beta, gamma=gamma) t, S, n, R = model.run(runtime=T_time, dt= 0.1) fig, ax = plt.subplots(figsize=(12,8)) ax.stackplot(t, np.array([n,R,S])/N,labels=['S','I', 'R'], colors = ['r', 'b', 'g'], alpha = 0.4) ax.set_yticklabels(['{:,.0%}'.format(y) for y in ax.get_yticks()], size = 12) ax.set_xticklabels(ax.get_xticks(), size = 12) ax.set_xlabel('days', fontsize = 16) ax.set_ylabel('percent of population', fontsize = 16) ax.legend(fontsize = 16) fig.savefig('../Figures/Deterministic/example_epidemics.pdf') #Run SIR model N = 8000 n0 = 1 beta = 1/2 gamma = 1/6 T_total = 10 #use 25 for \beta = 1/4 and 10 for \beta = 1/2 model = EpiModel(N=N, I0=n0, beta=beta, gamma=gamma) t, S, n, R = model.run(runtime=T_total, dt= 0.1) I_max = n[-1] print(I_max) infec = np.array([1]) time_infec = np.array([0]) for i in range(int(T_total)): infec = np.append(infec,np.exp(((beta/gamma - 1)/6)*(i+1))) #time_infec = np.append(time_infec,time_infec[-1]+(6*np.log(2))/(2)) time_infec = np.append(time_infec,time_infec[-1]+1) fig, ax = plt.subplots(figsize=(12,8)) #ax.stackplot(t, [S,n,R],labels=['Susceptible','Infected', 'Recovered']) #ax.plot(t, S, label = 'Susceptible') ax.plot(t, n,'-', label='Infected SIR') ax.plot(time_infec, infec,'.',ms = 12, label='Infected $R_0$ approx.') ax.legend(fontsize=14) ax.set_xlabel('Time [days]', fontsize = 16) ax.set_ylabel('Indiv.', fontsize = 16) ax.set_title(r'$R_0 = %.01f$ and $N = %.0f$'%(beta/gamma, N), fontsize = 18) ax.tick_params(labelsize = 17) plt.xlim(0,T_total) plt.ylim(1,I_max) #plt.yscale('log') plt.savefig('../Figures/Deterministic/7_days_no_replacement/dynamics_R0%.1f_N%.0f.pdf'%(beta/gamma, N)) folder = '../Figures/Deterministic/7_days_no_replacement/' sample_sizes = [100, 250, 400] colors = ['b', 'g', 'r'] #plot_prob_time(T_total, sample_sizes, beta/gamma, N, time_infec, infec, colors, folder = folder) #plot_prob_ind(I_max, sample_sizes, beta/gamma, N, time_infec, infec, colors) plot_cum_prob_time(T_total, sample_sizes, beta/gamma, N, time_infec, infec, colors, folder = folder) #plot_cum_prob_ind(I_max, sample_sizes, beta/gamma, N, time_infec, infec, colors) a = 1 ###Output _____no_output_____
Regression/Linear Models/ARDRegressor_StandardScaler_QuantileTransformer.ipynb
###Markdown ARDRegressor with StandardScaler & Quantile Transformer This Code template is for regression analysis using ARDRegressor algorithm with StandardScaler and feature transformation technique QuantileTransformer in a pipeline. Required Packages ###Code import warnings import numpy as np import pandas as pd import seaborn as se import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler, QuantileTransformer from sklearn.pipeline import make_pipeline from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error from sklearn.linear_model import ARDRegression warnings.filterwarnings('ignore') ###Output _____no_output_____ ###Markdown InitializationFilepath of CSV file ###Code #filepath file_path= "" ###Output _____no_output_____ ###Markdown List of features which are required for model training . ###Code #x_values features=[] ###Output _____no_output_____ ###Markdown Target feature for prediction. ###Code #y_value target='' ###Output _____no_output_____ ###Markdown Data FetchingPandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry. ###Code df=pd.read_csv(file_path) df.head() ###Output _____no_output_____ ###Markdown Feature SelectionsIt is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.We will assign all the required input features to X and target/outcome to Y. ###Code X=df[features] Y=df[target] ###Output _____no_output_____ ###Markdown Data PreprocessingSince the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes. ###Code def NullClearner(df): if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])): df.fillna(df.mean(),inplace=True) return df elif(isinstance(df, pd.Series)): df.fillna(df.mode()[0],inplace=True) return df else:return df def EncodeX(df): return pd.get_dummies(df) ###Output _____no_output_____ ###Markdown Calling preprocessing functions on the feature and target set. ###Code x=X.columns.to_list() for i in x: X[i]=NullClearner(X[i]) X=EncodeX(X) Y=NullClearner(Y) X.head() ###Output _____no_output_____ ###Markdown Correlation MapIn order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns. ###Code f,ax = plt.subplots(figsize=(18, 18)) matrix = np.triu(X.corr()) se.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix) plt.show() ###Output _____no_output_____ ###Markdown Data SplittingThe train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data. ###Code x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=123) ###Output _____no_output_____ ###Markdown Data RescalingStandardize features by removing the mean and scaling to unit varianceThe standard score of a sample x is calculated as:z = (x - u) / swhere u is the mean of the training samples or zero if with_mean=False, and s is the standard deviation of the training samples or one if with_std=False.Refer [API](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html) for the parameters Feature TransformationThis method transforms the features to follow a uniform or a normal distribution. Therefore, for a given feature, this transformation tends to spread out the most frequent values. It also reduces the impact of (marginal) outliers: this is therefore a robust preprocessing scheme.Refer [API](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.QuantileTransformer.html) for the parameters ModelBayesian ARD regression.Fit the weights of a regression model, using an ARD prior. The weights of the regression model are assumed to be in Gaussian distributions. Also estimate the parameters lambda (precisions of the distributions of the weights) and alpha (precision of the distribution of the noise). The estimation is done by an iterative procedures (Evidence Maximization) Parameters:> - **n_iter: int, default=300** -> Maximum number of iterations.> - **tol: float, default=1e-3** -> Stop the algorithm if w has converged.> - **alpha_1: float, default=1e-6** -> Hyper-parameter : shape parameter for the Gamma distribution prior over the alpha parameter.> - **alpha_2: float, default=1e-6** -> Hyper-parameter : inverse scale parameter (rate parameter) for the Gamma distribution prior over the alpha parameter.> - **lambda_1: float, default=1e-6** -> Hyper-parameter : shape parameter for the Gamma distribution prior over the lambda parameter.> - **lambda_2: float, default=1e-6** -> Hyper-parameter : inverse scale parameter (rate parameter) for the Gamma distribution prior over the lambda parameter.> - **compute_score: bool, default=False** -> If True, compute the objective function at each step of the model.> - **threshold_lambda: float, default=10 000** -> threshold for removing (pruning) weights with high precision from the computation.> - **fit_intercept: bool, default=True** -> whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (i.e. data is expected to be centered).> - **normalize: bool, default=False** -> This parameter is ignored when fit_intercept is set to False. If True, the regressors X will be normalized before regression by subtracting the mean and dividing by the l2-norm. If you wish to standardize, please use StandardScaler before calling fit on an estimator with normalize=False.> - **copy_X: bool, default=True** -> If True, X will be copied; else, it may be overwritten.> - **verbose: bool, default=False** -> Verbose mode when fitting the model. ###Code model=make_pipeline(StandardScaler(),QuantileTransformer(),ARDRegression()) model.fit(x_train,y_train) ###Output _____no_output_____ ###Markdown Model AccuracyWe will use the trained model to make a prediction on the test set.Then use the predicted value for measuring the accuracy of our model.score: The score function returns the coefficient of determination R2 of the prediction. ###Code print("Accuracy score {:.2f} %\n".format(model.score(x_test,y_test)*100)) ###Output Accuracy score 72.55 % ###Markdown > **r2_score**: The **r2_score** function computes the percentage variablility explained by our model, either the fraction or the count of correct predictions. > **mae**: The **mean abosolute error** function calculates the amount of total error(absolute average distance between the real data and the predicted data) by our model. > **mse**: The **mean squared error** function squares the error(penalizes the model for large errors) by our model. ###Code y_pred=model.predict(x_test) print("R2 Score: {:.2f} %".format(r2_score(y_test,y_pred)*100)) print("Mean Absolute Error {:.2f}".format(mean_absolute_error(y_test,y_pred))) print("Mean Squared Error {:.2f}".format(mean_squared_error(y_test,y_pred))) ###Output R2 Score: 72.55 % Mean Absolute Error 307.28 Mean Squared Error 126077.43 ###Markdown Prediction PlotFirst, we make use of a plot to plot the actual observations, with x_train on the x-axis and y_train on the y-axis.For the regression line, we will use x_train on the x-axis and then the predictions of the x_train observations on the y-axis. ###Code plt.figure(figsize=(14,10)) plt.plot(range(20),y_test[0:20], color = "green") plt.plot(range(20),model.predict(x_test[0:20]), color = "red") plt.legend(["Actual","prediction"]) plt.title("Predicted vs True Value") plt.xlabel("Record number") plt.ylabel(target) plt.show() ###Output _____no_output_____
tensorflow_for_beginners/3. Linear Regression.ipynb
###Markdown 그래프를 그리기 위해서 `matplotlib`을 임포트 합니다. `%matplotlib inline`은 새로운 창을 띄우지 않고 주피터 노트북 안에 이미지를 삽입하여 줍니다. ###Code import matplotlib.pyplot as plt %matplotlib inline ###Output _____no_output_____ ###Markdown 텐서플로우를 `tf` 란 이름으로 임포트 하세요. `tf.Session()`을 사용하여 세션 객체를 하나 만드세요.`sess = tf.Session()` 임의의 샘플 데이터를 만들려고 합니다. 평균 0, 표준 편차 0.55 인 샘플 데이터 1000개를 만듭니다.`x_raw = tf.random_normal([...], mean=.., stddev=..)x = sess.run(x_raw)` ###Code x_raw = ... x = ... ###Output _____no_output_____ ###Markdown 위에서 x 축의 값을 만들었으니 이에 상응하는 y 축의 값을 만들려고 합니다. y 값은 0.1*x+0.3 을 만족하되 실제 데이터처럼 보이게 하려고 난수를 조금 섞어서 만듭니다. 여기서는 평균 0, 표준 편차 0.03 인 정규 분포 난수를 만듭니다.`y_raw = 0.1 * x + 0.3 + tf.random_normal([...], mean=.., stddev=..)y = sess.run(y_raw)` ###Code y_raw = ... y = ... ###Output _____no_output_____ ###Markdown 만든 샘플 데이터를 산점도로 나타내보겠습니다. plot 명령에 x, y 축의 값을 전달하고 산점도 표시는 원 모양 `'o'`으로 하고 테두리 선을 검은색으로 그리도록 하겠습니다.`plt.plot(x, y, 'o', markeredgecolor='k')` 선형 회귀에서 사용할 두개의 변수 W 와 b 를 만들고 직선 방정식을 구성합니다.`W = tf.Variable(tf.zeros([.]))b = ...(tf.zeros([.]))y_hat = W * x + b` ###Code W = ... b = ... y_hat = ... ###Output _____no_output_____ ###Markdown 회귀에서의 손실함수는 평균 제곱 오차(mean squared error)입니다. 텐서플로우에서 사용하는 오차 함수 `tf.loss.mean_squared_error()`를 사용하여 손실 함수를 위한 노드를 만듭니다. 이 함수에 전달할 매개변수는 정답 y와 예측한 값 y_hat 입니다.`loss = tf.losses.mean_squared_error(y, y_hat)`경사하강법은 텐서플로우 `tf.train.GradientDescentOptimizer()`에 구현되어 있습니다. 경사하강법 학습속도를 0.5로 주고 최적화 연산을 만듭니다.`optimizer = tf.train.GradientDescentOptimizer(0.5)``optimizer.minimize()`함수에 손실 함수 객체를 넘겨주어 학습할 최종 객체를 생성합니다.`train = optimizer.minimize(loss)` ###Code loss = ... optimizer = ... train = ... ###Output _____no_output_____ ###Markdown 계산 그래프에 필요한 변수를 초기화합니다. ###Code init = ... sess.run(init) ###Output _____no_output_____ ###Markdown `sess.run()` 메소드를 이용해 필요한 연산을 수행할 수 있습니다. 반드시 수행할 것은 `train`이고 화면 출력을 위해 W, b, loss 를 계산해서 값을 반환 받겠습니다.`_, w_, b_, c = sess.run([train, W, b, loss])`반환 받은 c 는 costs 리스트에 추가하여 나중에 손실함수 그래프를 그리겠습니다. w_, b_ 를 이용해 위 산점도에 직선이 어떻게 맞춰지는지 그림으로 표현합니다.`plt.plot(x, w_ * x + b_)` ###Code costs = [] for step in range(10): _, w_, b_, c = ... costs.append(c) print(step, w_, b_, c) # 산포도 그리기 plt.plot(x, y, 'o', markeredgecolor='k') # 직선 그리기 plt.plot(...) # x, y 축 레이블링을 하고 각 축의 최대, 최소값 범위를 지정합니다. plt.xlabel('x') plt.xlim(-2,2) plt.ylim(0.1,0.6) plt.ylabel('y') plt.show() ###Output _____no_output_____
Wavefunctions/CuspCorrection.ipynb
###Markdown Cusp Correction for Gaussian OrbitalsFrom "Scheme for adding electron-nucleus cusps to Gaussian orbitals" A. Ma, D. Towler, N. D. Drummond, R. J. Needs, Journal of Chemical Physics 122, 224322(2005) https://doi.org/10.1063/1.1940588 ###Code phi = Symbol('phi') phi_t = Symbol('phitilde') eta = Symbol('eta') psi = Symbol('psi') psi_t = Symbol('psitilde') phi, phi_t, eta, psi, psi_t ###Output _____no_output_____ ###Markdown Each orbital can be divided into two parts - the s-type functions on the current center, and everything else.* $\phi$ The s-type functions on the current center (original functions, no cusp correction)* $\eta$ All non-s-type functions on the current center, and all functions from other centers (no need for cusp correction)* $\psi$ Total uncorrected orbital ($ = \phi + \eta$)* $\tilde{\phi}$ Cusp-corrected s-type functions on the current center* $\tilde{\psi}$ Total cusp-corrected orbital ($ = \tilde{\phi} + \eta$)Inside some cutoff radius ($r_c$) the s-type part of the orbital is replaced with (Eqn 7 in the paper): ###Code C = Symbol('C') p = Symbol('p') p_sym = p r = Symbol('r',real=True,nonnegative=True) R = Symbol('R') eq_phi1 = Eq(phi_t, C + sign(phi_t(0))*exp(p(r))) eq_R = Eq(R(r), sign(phi_t(0))*exp(p(r))) eq_phi2 = Eq(phi_t, C + R(r)) display(eq_phi1) display(eq_R) display(eq_phi2) alpha = IndexedBase('alpha',shape=(5,)) ###Output _____no_output_____ ###Markdown Where $p$ is a polynomial with the $\alpha$'s as coefficients ###Code p = alpha[0] + alpha[1]*r + alpha[2]*r**2 + alpha[3]*r**3 + alpha[4]*r**4 Eq(p_sym, p) rc = Symbol('r_c') R_def = exp(p) R_def ###Output _____no_output_____ ###Markdown Solve for polynomial coefficientsNow to express the $\alpha$'s in terms of various contraints on the wavefunction (The value of the wavefunction and derivatives at the constraint points are the $X$'s) ###Code X1,X2,X3,X4,X5 = symbols('X_1 X_2 X_3 X_4 X_5') # Constraints # Value of phi tilde matches orbital at r_c eq1 = Eq(p.subs(r,rc), X1) eq1 # derivative of phi tilde matches orbital at r_c eq2 = Eq(diff(p,r).subs(r,rc), X2) eq2 # 2nd derivative of phi tilde matches orbital at r_c eq3 = Eq((diff(p,r,2)+diff(p,r)**2).subs(r,rc),X3) eq3 # Cusp condition - derivative at zero eq4 = Eq(diff(p,r).subs(r,0),X4) eq4 # Value of phi tilde at 0 eq5 = Eq(p.subs(r,0),X5) eq5 ###Output _____no_output_____ ###Markdown Solve for the polynomial coefficients ($\alpha$'s) in terms of the wavefunction and derivative values ($X$'s). These should match Eqn 14 in the paper. ###Code sln = solve([eq1, eq2, eq3, eq4, eq5],[alpha[0], alpha[1], alpha[2], alpha[3], alpha[4]])[0] sln Eq(alpha[2],simplify(sln[2])) Eq(alpha[3],expand(sln[3])) Eq(alpha[4],expand(sln[4])) # Expand in terms of X's p_X = p.subs({alpha[i]:sln[i] for i in range(5)}) display(p_X) c_p_X = expand(p_X) for sym in [X5, X4, X3, X2, X1]: c_p_X = collect(c_p_X, sym) display(c_p_X) ###Output _____no_output_____ ###Markdown Effective local energy Fit this to an 'ideal local energy' to get the final parameter ###Code def del_spherical(e, r): """Compute Laplacian for expression e with respect to symbol r. Currently works only with radial dependence""" t1 = r*r*diff(e, r) t2 = diff(t1, r)/(r*r) return simplify(t2) # Effective one-electron local energy p_sym = Symbol('p') phi_tilde = exp(p_sym(r)) Zeff = Symbol('Z_eff') El = -S.Half * del_spherical(phi_tilde, r)/phi_tilde - Zeff/r #print R_def #print del_spherical(R_def, r) display(El) El_sym = El.subs(p_sym(r), p).doit() El_sym def eval_local_energy(gto, alpha_vals, r_val, Zeff_val): slist = {alpha[0]:alpha_vals[0], alpha[1]:alpha_vals[1], alpha[2]: alpha_vals[2], alpha[3]:alpha_vals[3], alpha[4]:alpha_vals[4], Zeff:Zeff_val, r:r_val} return El_sym.subs(slist).evalf() def get_current_local_energy(gto, xs, rc_val, alpha_vals, Zeff_val): EL_curr = [] EL_at_rc = eval_local_energy(gto, alpha_vals, rc_val, Zeff_val) dE = -EL_at_rc #print 'dE = ',dE for x in xs: if x < rc_val: el = eval_local_energy(gto, alpha_vals, x, Zeff_val) EL_curr.append(el + dE) else: val, grad, lap = [g[0] for g in gto.eval_vgl(x, 0.0, 0.0)] real_el = -.5*lap / val - Zeff_val/x EL_curr.append(real_el + dE) return EL_curr ###Output _____no_output_____ ###Markdown Evaluate for He orbital ###Code basis_set, he_MO = read_qmcpack.parse_qmc_wf('he_sto3g.wfj.xml',['He']) he_gto = gaussian_orbitals.GTO(basis_set['He']) rc_val = 0.1 he_Z_val = 2.0 xvals = np.linspace(start=-2.0, stop=2.0, num=40) yvals = np.array([he_gto.eval_v(x, 0.0, 0.0)[0] for x in xvals]) he_gto.eval_v(1.1, 0.0, 0.0) plt.plot(xvals, yvals) def compute_EL(X5_val): xslist = {X1:X1_val, X2:X2_val, X3:X3_val, X4:X4_val, X5:X5_val, rc:rc_val} alpha_vals = [s.subs(xslist) for s in sln] aslist = {alpha0:alpha_vals[0], alpha1:alpha_vals[1], alpha2:alpha_vals[2], alpha3:alpha_vals[3], alpha4:alpha_vals[4], Zeff:Zeff_val} Elof_r = El.subs(aslist) return Elof_r xs = np.linspace(start=0.012, stop=1.2*rc_val, num=10) xs ###Output _____no_output_____ ###Markdown Coefficients from the paper to fit an 'ideal' effective one-electron local energy ###Code beta0 = Symbol('beta_0') beta_vals = [beta0, 3.25819, -15.0126, 33.7308, -42.8705, 31.2276, -12.1316, 1.94692] El_terms = [beta_vals[n]*r**(n+1) for n in range(1,8)] EL_ideal_sym = beta0 + sum(El_terms) EL_ideal_sym # Compute ideal local energy at a point def compute_ideal_EL(r_val, Z_val, beta0_val=0.0): Z = Symbol('Z') slist = {beta0: beta0_val, Z:Z_val, r:r_val} return (Z*Z*EL_ideal_sym).subs(slist).evalf() # Choose beta_0 El_orig_at_rc = compute_ideal_EL(rc_val, he_Z_val) Z_val = he_Z_val print 'EL orig at r_c',El_orig_at_rc beta0_val = -(El_orig_at_rc)/Z_val/Z_val beta0_val EL_ideal = [compute_ideal_EL(rval,he_Z_val, beta0_val) for rval in xs] EL_ideal # Evaluate values of X's def evalX(phi_func, rc_val, C_val, Z_val, phi_at_zero, eta_at_zero=0.0): X = [0.0]*5 phi_at_rc, grad_at_rc, lapl_at_rc = phi_func(rc_val) X[0] = log(abs(phi_at_rc - C_val)) X[1] = grad_at_rc[0] / (phi_at_rc - C_val) X[2] = (lapl_at_rc - 2.0*grad_at_rc[0]/rc_val)/(phi_at_rc - C_val) X[3] = -Z_val * (phi_at_zero + eta_at_zero) / (phi_at_zero - C_val) X[4] = log(abs(phi_at_zero - C_val)) return X def create_phi_func(gto): def phi_func(r_val): val,grad,lap = gto.eval_vgl(r_val, 0.0, 0.0) return val[0], grad[0], lap[0] return phi_func Xvals = [0.0]*5 C_val = 0.0 he_Z_val = 2.0 he_phi = create_phi_func(he_gto) evalX(he_phi, rc_val, C_val, he_Z_val, he_phi(0.0)[0]) def solve_for_alpha(Xvals): xslist = {X1:Xvals[0], X2:Xvals[1], X3:Xvals[2], X4:Xvals[3], X5:Xvals[4], rc:rc_val} alpha_vals = [s.subs(xslist) for s in sln] return alpha_vals he_alpha_vals = solve_for_alpha(Xvals) print rc_val EL_curr = get_current_local_energy(he_gto, xs, rc_val, he_alpha_vals, he_Z_val) EL_curr plt.plot(xs, EL_ideal, xs, EL_curr) def compute_chi2(EL_ideal, EL_curr): return sum([(e1-e2)**2 for e1,e2 in zip(EL_ideal, EL_curr)]) compute_chi2(EL_ideal, EL_curr) def compute_one_cycle(phi_func, gto, rc_val, Z_val, phi_at_zero, eta_at_zero=0.0): C_val = 0.0 X = evalX(phi_func, rc_val, C_val, Z_val, phi_at_zero, eta_at_zero) alpha_vals = solve_for_alpha(X) EL_curr = get_current_local_energy(he_gto, xs, rc_val, alpha_vals, Z_val) chi2 = compute_chi2(EL_ideal, EL_curr) return chi2, alpha_vals, EL_curr phi_at_zero = he_phi(0.0)[0] EL_curr = [] for ioffset in range(10): chi2, alpha_vals, EL_curr = compute_one_cycle(he_phi, he_gto, rc_val, he_Z_val, phi_at_zero+.01*ioffset) print chi2 # See the local energy and ideal local energy change as phi(0) changes fig, ax = plt.subplots(1,2) plt.subplots_adjust(wspace = 0.5) chi2, alpha_vals, EL_curr = compute_one_cycle(he_phi, he_gto, rc_val, he_Z_val, phi_at_zero) chi2 = float(chi2) ax[0].plot(xs, EL_ideal,label="Ideal local energy") ax[0].set_ylabel("Energy") ax[0].set_xlabel("r") line, = ax[0].plot(xs, EL_curr, label="Local energy") ax[0].legend() chis = [chi2] offsets = [0.0] ax[1].set_xlim(-0.01, 20*0.01) ax[1].set_ylim(0.0, chi2) ax[1].set_ylabel("$\chi^2$") ax[1].set_xlabel("$\phi(0)$") line_chi, = ax[1].plot(offsets, chis, 'bo') def animate_chi2(ioffset): offset = ioffset*0.01 chi2, alpha_vals, EL_curr = compute_one_cycle(he_phi, he_gto, rc_val, he_Z_val, phi_at_zero + offset) print chi2, offset line.set_ydata(EL_curr) offsets.append(offset) chis.append(chi2) line_chi.set_xdata(offsets) line_chi.set_ydata(chis) #line_chi.plot(offsets, chis) return line, # Uncomment the following to see the animation #ani = animation.FuncAnimation(fig, animate_chi2, np.arange(1,20), interval=100, blit=True, repeat=False) #plt.show() # Interactive plot with r_c and phi(0) adjustable phi_slider = ipywidgets.FloatSlider(value=phi_at_zero, min=phi_at_zero/2.0, max=phi_at_zero*2.0) rc_slider = ipywidgets.FloatSlider(value=rc_val,min=rc_val/1.5,max=rc_val*1.5) print rc_val #plt.plot(xs, EL_curr, xs, EL_ideal) fig2 = plt.figure() ax2 = fig2.add_subplot(1,1,1) ax2.set_xlabel("r") ax2.set_ylabel("Local energy") line2, = ax2.plot(xs,EL_ideal) line3, = ax2.plot(xs, EL_curr) def update(phi0=1.0, rc_new=0.1): chi2, alpha_vals, EL_curr = compute_one_cycle(he_phi, he_gto, rc_new, he_Z_val, phi0) line3.set_ydata(EL_curr) fig2.canvas.draw() # Uncomment to activate the interactive version #interact(update, phi0 = phi_slider, rc_new=rc_slider) def chi2_opt(x): phi_at_zero = x[0] rc_val = x[1] chi2, alpha_vals, EL_curr = compute_one_cycle(he_phi, he_gto, rc_val, he_Z_val, phi_at_zero) return float(chi2) phi_at_zero = float(he_phi(0.0)[0]) print 'starting phi(0) = ',phi_at_zero # Optimize phi_0 and rc simultaneously # This optimization to find the minimum chi2 can take a while. scipy.optimize.minimize(chi2_opt,[phi_at_zero, rc_val]) ###Output starting phi(0) = 0.999603733514 ###Markdown Dividing the wavefunction Into $\phi$ and $\eta$ pieces. This is done in QMCPACK by writing zeros to the coefficient matrix. ###Code # For Neon with DEF2-SVP ne_basis_set, ne_MO_matrix = read_qmcpack.parse_qmc_wf('ne_def2_svp.wfnoj.xml',['Ne']) #for cg in ne_basis_set: # print cg print ne_MO_matrix.shape #ne_MO_matrix ne_basis_set['Ne'] c_phi = ne_MO_matrix.copy() c_eta = ne_MO_matrix.copy() basis_by_index = gaussian_orbitals.get_ijk_inverse_index(ne_basis_set['Ne']) # Loop over MO for mo_idx in range(ne_MO_matrix.shape[0]): for ao_idx in range(ne_MO_matrix.shape[1]): # Loop over centers (for Ne atom, there is only one) # If s-type basis_set, angular_info = basis_by_index[mo_idx] if basis_set.orbtype == 0: # s-type, part of phi but not eta c_eta[mo_idx, ao_idx] = 0.0 else: # not s-type, part of eta but not phi c_phi[mo_idx, ao_idx] = 0.0 c_phi ne_phi_mo = gaussian_orbitals.MO(gaussian_orbitals.GTO(ne_basis_set['Ne']), c_phi) ne_phi_mo.eval_v(0.0, 0.0, 0.0) ne_eta_mo = gaussian_orbitals.MO(gaussian_orbitals.GTO(ne_basis_set['Ne']), c_eta) ne_eta_mo.eval_v(0.0, 0.0, 0.0) mo_idx = 0 gto1 = gaussian_orbitals.GTO(ne_basis_set['Ne'][mo_idx:mo_idx+1]) ne_phi_mo1 = gaussian_orbitals.MO(gaussian_orbitals.GTO(ne_basis_set['Ne']), c_phi[mo_idx:mo_idx+1,:]) print ne_phi_mo1.eval_v(0.0, 0.0, 0.0) ne_eta_mo1 = gaussian_orbitals.MO(gaussian_orbitals.GTO(ne_basis_set['Ne']), c_eta[mo_idx:mo_idx+1,:]) print ne_eta_mo1.eval_v(0.0, 0.0, 0.0) Xvals = [0.0]*5 C_val = 0.0 ne_Z_val = 10.0 ne_phi = create_phi_func(ne_phi_mo1) ne_eta_at_zero = ne_eta_mo1.eval_v(0.0,0.0,0.0)[0] evalX(ne_phi, rc_val, C_val, ne_Z_val, ne_phi(0.0)[0], ne_eta_at_zero) ###Output _____no_output_____
GS_CNN_Embeddings.ipynb
###Markdown Setting Workspace ###Code # !pip install ipython-autotime # !pip install transformers %load_ext autotime # from google.colab import drive # drive.mount('/content/drive') # import os # os.chdir('/content/drive/My Drive/HLT/') # !pwd ###Output time: 0 ns (started: 2021-08-15 22:16:38 +02:00) ###Markdown Import Libraries ###Code import json from sklearn.model_selection import GridSearchCV from tensorflow.keras.models import Sequential from tensorflow.keras import layers from tensorflow.keras.layers import Dense from tensorflow.keras.wrappers.scikit_learn import KerasClassifier from tensorflow.keras.regularizers import L2 from tensorflow.keras.losses import MSE from tensorflow.keras.layers import Dropout import pandas as pd from keras.preprocessing.sequence import pad_sequences from tensorflow import keras from keras.preprocessing.text import Tokenizer import numpy as np from sklearn.model_selection import GridSearchCV from sklearn.model_selection import StratifiedKFold import matplotlib.pyplot as plt from tensorflow.keras.layers import Input, Embedding, Concatenate, TimeDistributed, Bidirectional, GRU from tensorflow.keras.layers import LSTM, Dense,Flatten,Conv2D,Conv1D,GlobalMaxPooling1D,GlobalMaxPool1D, GlobalAveragePooling1D, MaxPooling1D from sklearn.metrics import classification_report,roc_auc_score,roc_curve,r2_score,recall_score,confusion_matrix,precision_recall_curve import seaborn as sns from tensorflow.keras.optimizers import Adam from tensorflow.keras.optimizers import RMSprop pd.set_option('display.max_colwidth', -1) #show all text in col pd.set_option('display.max_rows', None) #show all rows ###Output time: 0 ns (started: 2021-08-15 22:16:42 +02:00) ###Markdown Support Functions ###Code def make_confusion_matrix(cf, group_names=None, categories='auto', count=True, percent=True, cbar=True, xyticks=True, xyplotlabels=True, sum_stats=True, figsize=None, cmap='Blues', title=None): ''' This function will make a pretty plot of an sklearn Confusion Matrix cm using a Seaborn heatmap visualization. Arguments --------- cf: confusion matrix to be passed in group_names: List of strings that represent the labels row by row to be shown in each square. categories: List of strings containing the categories to be displayed on the x,y axis. Default is 'auto' count: If True, show the raw number in the confusion matrix. Default is True. normalize: If True, show the proportions for each category. Default is True. cbar: If True, show the color bar. The cbar values are based off the values in the confusion matrix. Default is True. xyticks: If True, show x and y ticks. Default is True. xyplotlabels: If True, show 'True Label' and 'Predicted Label' on the figure. Default is True. sum_stats: If True, display summary statistics below the figure. Default is True. figsize: Tuple representing the figure size. Default will be the matplotlib rcParams value. cmap: Colormap of the values displayed from matplotlib.pyplot.cm. Default is 'Blues' See http://matplotlib.org/examples/color/colormaps_reference.html title: Title for the heatmap. Default is None. ''' # CODE TO GENERATE TEXT INSIDE EACH SQUARE blanks = ['' for i in range(cf.size)] if group_names and len(group_names)==cf.size: group_labels = ["{}\n".format(value) for value in group_names] else: group_labels = blanks if count: group_counts = ["{0:0.0f}\n".format(value) for value in cf.flatten()] else: group_counts = blanks if percent: group_percentages = ["{0:.2%}".format(value) for value in cf.flatten()/np.sum(cf)] else: group_percentages = blanks box_labels = [f"{v1}{v2}{v3}".strip() for v1, v2, v3 in zip(group_labels,group_counts,group_percentages)] box_labels = np.asarray(box_labels).reshape(cf.shape[0],cf.shape[1]) # CODE TO GENERATE SUMMARY STATISTICS & TEXT FOR SUMMARY STATS if sum_stats: #Accuracy is sum of diagonal divided by total observations accuracy = np.trace(cf) / float(np.sum(cf)) #if it is a binary confusion matrix, show some more stats if len(cf)==2: #Metrics for Binary Confusion Matrices precision = cf[1,1] / sum(cf[:,1]) recall = cf[1,1] / sum(cf[1,:]) f1_score = 2*precision*recall / (precision + recall) stats_text = "\n\nAccuracy={:0.3f}\nPrecision={:0.3f}\nRecall={:0.3f}\nF1 Score={:0.3f}".format( accuracy,precision,recall,f1_score) else: stats_text = "\n\nAccuracy={:0.3f}".format(accuracy) else: stats_text = "" # SET FIGURE PARAMETERS ACCORDING TO OTHER ARGUMENTS if figsize==None: #Get default figure size if not set figsize = plt.rcParams.get('figure.figsize') if xyticks==False: #Do not show categories if xyticks is False categories=False # MAKE THE HEATMAP VISUALIZATION plt.figure(figsize=figsize) sns.heatmap(cf,annot=box_labels,fmt="",cmap=cmap,cbar=cbar,xticklabels=categories,yticklabels=categories) if xyplotlabels: plt.ylabel('True label') plt.xlabel('Predicted label' + stats_text) else: plt.xlabel(stats_text) if title: plt.title(title) ###Output time: 0 ns (started: 2021-08-15 22:16:42 +02:00) ###Markdown Load the Dataset ###Code train_df = pd.read_csv("datasets/train.csv") train_df['headline']=train_df['headline'].apply(str) train_df.head() test_df = pd.read_csv("datasets/test.csv") test_df['headline']=test_df['headline'].apply(str) test_df.head() print("Train size:{}".format(train_df.shape)) print("Test size:{}".format(test_df.shape)) ###Output Train size:(22702, 2) Test size:(4007, 2) time: 0 ns (started: 2021-08-15 22:16:43 +02:00) ###Markdown Splitting the set ###Code train_y = train_df['is_sarcastic'] test_y = test_df['is_sarcastic'] train_x = train_df['headline'] test_x = test_df['headline'] train_x.shape,train_y.shape,test_x.shape,test_y.shape ###Output _____no_output_____ ###Markdown Glove ###Code txt_best_grids = "grid_results/cnn/best_results_cnn_embedding_v1.txt" # name of the txt file with all best results of all runned grid searches grid_results_name = 'grid_results/cnn/grid_results_cnn_embedding_v1.csv' # name of the csv file with all tests of the current grid search # Build Static Embedding on top of a CNN maxlen=26 max_features=200 embed_size=300 #Tokenizing steps- must be remembered tokenizer=Tokenizer(num_words=max_features) tokenizer.fit_on_texts(list(train_x)) train_x=tokenizer.texts_to_sequences(train_x) test_x=tokenizer.texts_to_sequences(test_x) #Pad the sequence- To allow same length for all vectorized words train_x=pad_sequences(train_x,maxlen=maxlen) test_x=pad_sequences(test_x,maxlen=maxlen) print("Padded and Tokenized Training Sequence".format(),train_x.shape) print("Target Values Shape".format(),train_y.shape) print("Padded and Tokenized Testing Sequence".format(),test_y.shape) print("Target Values Shape".format(),test_y.shape) # Function to create model, required for KerasClassifier def create_model(neurons_layer_1=60, activation='relu', neurons_layer_2 = 16, dropout_1 = 0.5, dropout_2 = 0.2, neurons_layer_3 = 20, learn_rate = 0.1, kernel_size = 3, neurons_layer_4 = 10, dropout_3 = 0.1): # create model model = Sequential() model.add(Embedding(max_features,embed_size,input_length=maxlen)) model.add(Conv1D(neurons_layer_1,activation='relu', kernel_size= kernel_size)) model.add(GlobalMaxPool1D()) model.add(Dense(neurons_layer_2, activation='relu')) model.add(Dropout(dropout_1)) model.add(Dense(neurons_layer_3, activation="relu")) model.add(Dropout(dropout_2)) model.add(Dense(neurons_layer_4, activation="relu")) model.add(Dropout(dropout_3)) model.add(Dense(1,activation='sigmoid')) # Compile model optimizer = Adam(learning_rate=learn_rate) # Compile model model.compile(optimizer=optimizer, loss='binary_crossentropy',metrics=['accuracy']) return model BATCH_SIZE = train_x.shape[0] # create model nn = KerasClassifier(build_fn=create_model, verbose=2) #verbose = 0 # define the grid search parameters batch_size = [128, 256] epochs = [20] learn_rate = [0.0001, 0.001, 0.01] # momentum = [0.0]#, 0.2, 0.4, 0.9] # nesterov = [False] neurons_layer_1 = [256] neurons_layer_2 = [128] neurons_layer_3 = [64] dropout_1 = [0.5] dropout_2 = [0.5, 0.3] # init_mode = ['glorot_uniform'] activation = ['relu']#, 'tanh', 'sigmoid'] # weight_decay = [0.1]#, 0.01, 0.001, 0.0001] kernel_size = [5, 3, maxlen] neurons_layer_4 = [32] dropout_3 = [0.3, 0.1] param_grid = dict(batch_size=batch_size, epochs=epochs, neurons_layer_1=neurons_layer_1, activation=activation, neurons_layer_2=neurons_layer_2, neurons_layer_3 = neurons_layer_3, dropout_1 = dropout_1, dropout_2 = dropout_2, learn_rate=learn_rate, kernel_size = kernel_size, neurons_layer_4 = neurons_layer_4, dropout_3 = dropout_3) grid = GridSearchCV(estimator=nn, param_grid=param_grid, n_jobs=6, cv=StratifiedKFold(3), return_train_score=True, verbose=True) grid_result = grid.fit(train_x, train_y) opt_df = grid_result.best_estimator_ print("{}".format(grid_result.best_params_)) print("Detailed classification report:") print() print("The model is trained on the full development set.") print("The scores are computed on the full evaluation set.") print() y_true, y_pred = test_y, grid.predict(test_x) print(classification_report(y_true, y_pred)) print() # txt file with all best results of all runned grid searches grid_results_name_str = f'Results wrt: {grid_results_name} \n' grid_params_str = f'Grid on: {json.dumps(grid_result.param_grid)} \n' grid_results_str = f'Best: {grid_result.best_score_} using {grid_result.best_params_} \n' with open(txt_best_grids, "a") as file_object: file_object.write(grid_results_name_str) file_object.write(grid_params_str) file_object.write(grid_results_str) file_object.write('\n') print(grid_results_str) # grid.cv_results_ # csv file with all tests of the current grid search df = pd.DataFrame(grid.cv_results_)[['rank_test_score','mean_test_score', 'std_test_score','mean_train_score', 'std_train_score','param_activation', 'param_batch_size','param_epochs', 'param_neurons_layer_1', 'param_neurons_layer_2', 'param_neurons_layer_3', 'param_dropout_1', 'param_dropout_2', 'param_learn_rate', 'param_dropout_3', 'param_neurons_layer_4', 'param_kernel_size', 'mean_fit_time']].sort_values(by='rank_test_score') df.rename(columns={'param_activation': 'activation', 'param_batch_size': 'batch_size', 'param_epochs': 'epochs', 'param_neurons_layer_1': 'neurons_layer_1', 'param_neurons_layer_2': 'neurons_layer_2', 'param_neurons_layer_3': 'neurons_layer_3', 'param_neurons_layer_4': 'neurons_layer_4', 'param_dropout_1': 'dropout_1', 'param_dropout_2': 'dropout_2', 'param_dropout_3': 'dropout_3', 'param_kernel_size': 'kernel_size', 'param_learn_rate':'learning_rate', 'mean_test_score': 'mean_val_score', 'std_test_score': 'std_val_score', 'rank_test_score': 'rank_val_score'}, inplace=True) df.mean_train_score *= -1 df.mean_val_score *= -1 df df.to_csv(grid_results_name) result = opt_df.score(test_x, test_y) print("Accuracy: %.2f%%" % (result*100.0)) y_pred = opt_df.predict(test_x) cf_matrix = confusion_matrix(y_pred, test_y) labels = ['sarcastic','not_sarcastic'] cf_matrix make_confusion_matrix(cf_matrix,categories=labels, figsize=(8,6), cbar=False) # roc curve for classes fpr = {} tpr = {} thresh ={} n_class = 2 probs = opt_df.predict_proba(test_x) for i in range(n_class): fpr[i], tpr[i], thresh[i] = roc_curve(test_y, probs[:,i], pos_label=i) random_probs = [0 for i in range(len(test_y))] p_fpr, p_tpr, _ = roc_curve(test_y, random_probs, pos_label=1) plt.plot(fpr[0], tpr[0], linestyle='--',color='orange', label='Sarcastic') plt.plot(fpr[1], tpr[1], linestyle='--',color='green', label='Not Sarcastic') plt.plot(p_fpr, p_tpr, linestyle='--', color='blue') plt.title('Multiclass ROC curve') plt.xlabel('False Positive Rate') plt.ylabel('True Positive rate') plt.legend(loc='best') # plt.savefig('Multiclass ROC',dpi=300) ###Output _____no_output_____
src/tensorboard/hyperparameter_tuning_with_hparams.ipynb
###Markdown Copyright 2019 The TensorFlow Authors. ###Code #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ###Output _____no_output_____ ###Markdown Hyperparameter Tuning with the HParams Dashboard View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook When building machine learning models, you need to choose various [hyperparameters](https://en.wikipedia.org/wiki/Hyperparameter_(machine_learning)), such as the dropout rate in a layer or the learning rate. These decisions impact model metrics, such as accuracy. Therefore, an important step in the machine learning workflow is to identify the best hyperparameters for your problem, which often involves experimentation. This process is known as "Hyperparameter Optimization" or "Hyperparameter Tuning".The HParams dashboard in TensorBoard provides several tools to help with this process of identifying the best experiment or most promising sets of hyperparameters. This tutorial will focus on the following steps:1. Experiment setup and HParams summary2. Adapt TensorFlow runs to log hyperparameters and metrics3. Start runs and log them all under one parent directory4. Visualize the results in TensorBoard's HParams dashboardNote: The HParams summary APIs and dashboard UI are in a preview stage and will change over time. Start by installing TF 2.0 and loading the TensorBoard notebook extension: ###Code # Load the TensorBoard notebook extension %load_ext tensorboard # Clear any logs from previous runs !rm -rf ./logs/ ###Output _____no_output_____ ###Markdown Import TensorFlow and the TensorBoard HParams plugin: ###Code import tensorflow as tf from tensorboard.plugins.hparams import api as hp ###Output 2021-07-29 14:17:25.100583: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcudart.so.11.0'; dlerror: libcudart.so.11.0: cannot open shared object file: No such file or directory 2021-07-29 14:17:25.100605: I tensorflow/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine. ###Markdown Download the [FashionMNIST](https://github.com/zalandoresearch/fashion-mnist) dataset and scale it: ###Code fashion_mnist = tf.keras.datasets.fashion_mnist (x_train, y_train),(x_test, y_test) = fashion_mnist.load_data() x_train, x_test = x_train / 255.0, x_test / 255.0 ###Output _____no_output_____ ###Markdown 1. Experiment setup and the HParams experiment summaryExperiment with three hyperparameters in the model:1. Number of units in the first dense layer2. Dropout rate in the dropout layer3. OptimizerList the values to try, and log an experiment configuration to TensorBoard. This step is optional: you can provide domain information to enable more precise filtering of hyperparameters in the UI, and you can specify which metrics should be displayed. ###Code HP_NUM_UNITS = hp.HParam('num_units', hp.Discrete([16, 32])) HP_DROPOUT = hp.HParam('dropout', hp.RealInterval(0.1, 0.2)) HP_OPTIMIZER = hp.HParam('optimizer', hp.Discrete(['adam', 'sgd'])) METRIC_ACCURACY = 'accuracy' with tf.summary.create_file_writer('logs/hparam_tuning').as_default(): hp.hparams_config( hparams=[HP_NUM_UNITS, HP_DROPOUT, HP_OPTIMIZER], metrics=[hp.Metric(METRIC_ACCURACY, display_name='Accuracy')], ) ###Output 2021-07-29 14:17:26.991066: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcuda.so.1'; dlerror: libcuda.so.1: cannot open shared object file: No such file or directory 2021-07-29 14:17:26.991094: W tensorflow/stream_executor/cuda/cuda_driver.cc:326] failed call to cuInit: UNKNOWN ERROR (303) 2021-07-29 14:17:26.991120: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:156] kernel driver does not appear to be running on this host (masternode): /proc/driver/nvidia/version does not exist 2021-07-29 14:17:26.991367: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags. ###Markdown If you choose to skip this step, you can use a string literal wherever you would otherwise use an `HParam` value: e.g., `hparams['dropout']` instead of `hparams[HP_DROPOUT]`. 2. Adapt TensorFlow runs to log hyperparameters and metricsThe model will be quite simple: two dense layers with a dropout layer between them. The training code will look familiar, although the hyperparameters are no longer hardcoded. Instead, the hyperparameters are provided in an `hparams` dictionary and used throughout the training function: ###Code def train_test_model(hparams): model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(hparams[HP_NUM_UNITS], activation=tf.nn.relu), tf.keras.layers.Dropout(hparams[HP_DROPOUT]), tf.keras.layers.Dense(10, activation=tf.nn.softmax), ]) model.compile( optimizer=hparams[HP_OPTIMIZER], loss='sparse_categorical_crossentropy', metrics=['accuracy'], ) model.fit(x_train, y_train, epochs=1) # Run with 1 epoch to speed things up for demo purposes _, accuracy = model.evaluate(x_test, y_test) return accuracy ###Output _____no_output_____ ###Markdown For each run, log an hparams summary with the hyperparameters and final accuracy: ###Code def run(run_dir, hparams): with tf.summary.create_file_writer(run_dir).as_default(): hp.hparams(hparams) # record the values used in this trial accuracy = train_test_model(hparams) tf.summary.scalar(METRIC_ACCURACY, accuracy, step=1) ###Output _____no_output_____ ###Markdown When training Keras models, you can use callbacks instead of writing these directly:```pythonmodel.fit( ..., callbacks=[ tf.keras.callbacks.TensorBoard(logdir), log metrics hp.KerasCallback(logdir, hparams), log hparams ],)``` 3. Start runs and log them all under one parent directoryYou can now try multiple experiments, training each one with a different set of hyperparameters. For simplicity, use a grid search: try all combinations of the discrete parameters and just the lower and upper bounds of the real-valued parameter. For more complex scenarios, it might be more effective to choose each hyperparameter value randomly (this is called a random search). There are more advanced methods that can be used.Run a few experiments, which will take a few minutes: ###Code session_num = 0 for num_units in HP_NUM_UNITS.domain.values: for dropout_rate in (HP_DROPOUT.domain.min_value, HP_DROPOUT.domain.max_value): for optimizer in HP_OPTIMIZER.domain.values: hparams = { HP_NUM_UNITS: num_units, HP_DROPOUT: dropout_rate, HP_OPTIMIZER: optimizer, } run_name = "run-%d" % session_num print('--- Starting trial: %s' % run_name) print({h.name: hparams[h] for h in hparams}) run('logs/hparam_tuning/' + run_name, hparams) session_num += 1 ###Output --- Starting trial: run-0 {'num_units': 16, 'dropout': 0.1, 'optimizer': 'adam'} ###Markdown 4. Visualize the results in TensorBoard's HParams plugin The HParams dashboard can now be opened. Start TensorBoard and click on "HParams" at the top. ###Code %tensorboard --logdir logs/hparam_tuning ###Output _____no_output_____ ###Markdown --> The left pane of the dashboard provides filtering capabilities that are active across all the views in the HParams dashboard:- Filter which hyperparameters/metrics are shown in the dashboard- Filter which hyperparameter/metrics values are shown in the dashboard- Filter on run status (running, success, ...)- Sort by hyperparameter/metric in the table view- Number of session groups to show (useful for performance when there are many experiments) The HParams dashboard has three different views, with various useful information:* The **Table View** lists the runs, their hyperparameters, and their metrics.* The **Parallel Coordinates View** shows each run as a line going through an axis for each hyperparemeter and metric. Click and drag the mouse on any axis to mark a region which will highlight only the runs that pass through it. This can be useful for identifying which groups of hyperparameters are most important. The axes themselves can be re-ordered by dragging them.* The **Scatter Plot View** shows plots comparing each hyperparameter/metric with each metric. This can help identify correlations. Click and drag to select a region in a specific plot and highlight those sessions across the other plots. A table row, a parallel coordinates line, and a scatter plot market can be clicked to see a plot of the metrics as a function of training steps for that session (although in this tutorial only one step is used for each run). To further explore the capabilities of the HParams dashboard, download a set of pregenerated logs with more experiments: ###Code %%bash wget -q 'https://storage.googleapis.com/download.tensorflow.org/tensorboard/hparams_demo_logs.zip' unzip -q hparams_demo_logs.zip -d logs/hparam_demo ###Output _____no_output_____ ###Markdown View these logs in TensorBoard: ###Code %tensorboard --logdir logs/hparam_demo ###Output _____no_output_____
play_arXiv.ipynb
###Markdown Train a word-level GPT on ML arXiv abstractsThe arXiv dataset on [Kaggle](https://www.kaggle.com/Cornell-University/arxiv) provides meta-data on thousands of papers published over the past decades. In this post, we take all the abstracts from papers in the field of Machine Learning (or related fields) then train GPT on it. We use Andrej Karpathy's [minGPT](https://github.com/karpathy/minGPT) - a PyTorch re-implementation of OpenAI's [GPT](https://github.com/openai/gpt-3) that "tries to be small, clean, interpretable and educational" (it is.)We train our model a single GPU available on Google Colab and feed it some prompts, which we then get it to predict an entire Machine Learning abstract! ###Code from google.colab import drive # import drive from google colab ROOT = "/content/drive" # default location for the drive print(ROOT) # print content of ROOT (Optional) drive.mount(ROOT) # we mount the google drive at /content/drive # This is necessary to ensure that paths are correct for importing data from the google drive folder # insert correct root for minGPT code minGPT_DIR = '/minGPT/' %cd $minGPT_DIR # set up logging import logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) # make deterministic from mingpt.utils import set_seed set_seed(42) import numpy as np import torch import torch.nn as nn from torch.nn import functional as F import re import numpy as np import pandas as pd import os import json pd.set_option('float_format', '{:f}'.format) ###Output _____no_output_____ ###Markdown Let's load the data, using `yield` below to avoid memory problems with the huge json file. ###Code file_path = 'arxiv-metadata-oai-snapshot.json' def get_metadata(): with open(file_path, 'r') as f: for line in f: yield line ###Output _____no_output_____ ###Markdown We'll just look at papers from the past 10 years and select those part of the three categories arXiv tags AI papers in:- 'cs.AI': 'Artificial Intelligence'- 'cs.LG': 'Machine Learning'- 'stat.ML': 'Machine Learning'That gets us 4673 abstracts to work with! ###Code ai_list = ['cs.AI','cs.LG','stat.ML'] abstracts = [] metadata = get_metadata() # loop over all papers for paper in metadata: # extract single paper paper_dict = json.loads(paper) version = paper_dict.get('versions') category = paper_dict.get('categories') try: try: year = int(paper_dict.get('journal-ref')[-4:]) ### Example Format: "Phys.Rev.D76:013009,2007" except: year = int(paper_dict.get('journal-ref')[-5:-1]) ### Example Format: "Phys.Rev.D76:013009,(2007)" if any(ele in category for ele in ai_list) and 2010<year<2021: abstracts.append(paper_dict.get('abstract')) except: pass len(abstracts) ###Output _____no_output_____ ###Markdown Next we need to preprocess the abstracts as follows, after which we get a corpus of 857,479 words. ###Code # string whitespace at end of words, replace new lines by space and add 'end of sentence' token f = lambda x: x.strip().replace("\n"," ") + " #EOS" abstracts = [f(x) for x in abstracts] # seperate all words and punctuation abstracts = [re.findall(r"[\w']+|[.,!?;]", x) for x in abstracts] # turn list of lists in to single list abstracts = [j for i in abstracts for j in i] len(abstracts) import math from torch.utils.data import Dataset class WordDataset(Dataset): def __init__(self, data, block_size): words = sorted(list(set(data))) data_size, vocab_size = len(data), len(words) print('data has %d words, %d unique.' % (data_size, vocab_size)) self.stoi = { ch:i for i,ch in enumerate(words) } self.itos = { i:ch for i,ch in enumerate(words) } self.block_size = block_size self.vocab_size = vocab_size self.data = data def __len__(self): return len(self.data) - self.block_size def __getitem__(self, idx): # grab a chunk of (block_size + 1) characters from the data chunk = self.data[idx:idx + self.block_size + 1] # encode every word to an integer dix = [self.stoi[s] for s in chunk] """ # See https://github.com/karpathy/minGPT/blob/master/play_char.ipynb for # explainer of Dataset construction """ x = torch.tensor(dix[:-1], dtype=torch.long) y = torch.tensor(dix[1:], dtype=torch.long) return x, y ###Output _____no_output_____ ###Markdown With our Dataset object defined we can load our dataset with a block size of 128 appropriate since the average abstract in arXiv 122 words long (see prev [post](https://kushmadlani.github.io/arxiv-eda/)): ###Code block_size = 128 # sets spatial extent of the model for its context train_dataset = WordDataset(abstracts, block_size) ###Output data has 857479 words, 25921 unique. ###Markdown Let's load a GPT! In the Character level transformer example Karpathy wrote up he built a 'GPT-1' with 8 layers and 8 heads - here we halve that to 4 layers and 4 attention heads so to able to train it on a Colab GPU (I guess we call this 'GPT-0.5'). ###Code from mingpt.model import GPT, GPTConfig mconf = GPTConfig(train_dataset.vocab_size, train_dataset.block_size, n_layer=4, n_head=4, n_embd=256) model = GPT(mconf) from mingpt.trainer import Trainer, TrainerConfig # initialize a trainer instance and kick off training tconf = TrainerConfig(max_epochs=2, batch_size=128, learning_rate=6e-4, lr_decay=True, warmup_tokens=256*20, final_tokens=2*len(train_dataset)*block_size, num_workers=4) trainer = Trainer(model, train_dataset, None, tconf) trainer.train() ###Output epoch 1 iter 6698: train loss 1.35257. lr 3.000110e-04: 100%|██████████| 6699/6699 [24:41<00:00, 4.52it/s] epoch 2 iter 6698: train loss 0.94379. lr 6.000000e-05: 100%|██████████| 6699/6699 [24:45<00:00, 4.51it/s] ###Markdown Model trained! Let's generate some Machine Learning abstracts... ###Code # alright, let's sample some word-level abstracts from mingpt.utils import sample context = ['This', 'paper', 'discusses'] x = torch.tensor([train_dataset.stoi[s] for s in context], dtype=torch.long)[None,...].to(trainer.device) y = sample(model, x, 150, temperature=1.0, sample=True, top_k=10)[0] completion = ' '.join([train_dataset.itos[int(i)] for i in y]) print(completion) context = ['We', 'introduce', 'the'] x = torch.tensor([train_dataset.stoi[s] for s in context], dtype=torch.long)[None,...].to(trainer.device) y = sample(model, x, 150, temperature=1.0, sample=True, top_k=10)[0] completion = ' '.join([train_dataset.itos[int(i)] for i in y]) print(completion) context = ['Our', 'work', 'has', 'focused', 'on'] x = torch.tensor([train_dataset.stoi[s] for s in context], dtype=torch.long)[None,...].to(trainer.device) y = sample(model, x, 200, temperature=1.0, sample=True, top_k=10)[0] completion = ' '.join([train_dataset.itos[int(i)] for i in y]) print(completion) context = ['Our', 'work', 'has', 'focused', 'on'] x = torch.tensor([train_dataset.stoi[s] for s in context], dtype=torch.long)[None,...].to(trainer.device) y = sample(model, x, 200, temperature=1.0, sample=True, top_k=10)[0] completion = ' '.join([train_dataset.itos[int(i)] for i in y]) print(completion) context = ['This', 'paper', 'considers'] x = torch.tensor([train_dataset.stoi[s] for s in context], dtype=torch.long)[None,...].to(trainer.device) y = sample(model, x, 150, temperature=1.0, sample=True, top_k=10)[0] completion = ' '.join([train_dataset.itos[int(i)] for i in y]) print(completion) ###Output This paper considers the problem of finding a single optimal clustering that minimizes a specific number of disagreements i . e . , the sum of the number of observed missing edges within clusters . The objective of most promising intelligent algorithms appear to be evaluated on the basis of similarity matrix . However , most of the problems have with high probability , that they are designed for the pair of clusters are distinct from observational data . The optimal clustering must pass through a grid like time varying quality . We develop a new algorithm to learn K coordinate dictionaries , with dimensions m_k times p_k up to estimation error varepsilon_k is shown to be max_ k in K mathcal O m_kp_k 3 varepsilon_k 2 . EOS Understanding the causes of crime is a longstanding issue in researcher's agenda . While it is a hard task to extract causality from data
docs/contents/pbc/box_angles_from_box_vectors.ipynb
###Markdown Angles from vectors ###Code molsys = msm.convert(msm.demo['Met-enkephalin']['vacuum.msmpk']) molsys_cub = msm.build.solvate(molsys, box_geometry='cubic', clearance='14.0 angstroms', engine='PDBFixer') molsys_oct = msm.build.solvate(molsys, box_geometry='truncated octahedral', clearance='14.0 angstroms', engine='PDBFixer') molsys_dod = msm.build.solvate(molsys, box_geometry='rhombic dodecahedral', clearance='14.0 angstroms', engine='PDBFixer') box = msm.get(molsys_cub, target='system', box=True) msm.pbc.box_angles_from_box_vectors(box) box = msm.get(molsys_oct, target='system', box=True) msm.pbc.box_angles_from_box_vectors(box) box = msm.get(molsys_dod, target='system', box=True) msm.pbc.box_angles_from_box_vectors(box) ###Output _____no_output_____
DEMO_Result/week11_gan/homework_ready.ipynb
###Markdown ![embedding_mapping.png](https://github.com/yandexdataschool/nlp_course/raw/master/resources/embedding_mapping.png) Homework: Un(semi)-supervised word translation learning Homework based on [Conneau et al. 2018](https://arxiv.org/abs/1710.04087) article.In the homework we offer you to train a mapping between Ukrainian word vectors and Russian word vectors just like in the first homework of the NLP course. But unlike the first homework this mapping will be build (almost) unsupervised: without parallel data (pairs of corresponding words in Ukrainian and Russian). ###Code %env KERAS_BACKEND=tensorflow %env CUDA_VISIBLE_DEVICES=1 %load_ext autoreload %autoreload 2 import tensorflow as tf import keras from keras.models import Sequential from keras import layers as L import numpy as np import gensim from IPython import display from tqdm import tnrange import matplotlib.pyplot as plt %matplotlib inline !wget https://www.dropbox.com/s/cnwyfbfa44mqxph/ukr_rus.train.txt?dl=1 -O ./ukr_rus.train.txt !wget https://www.dropbox.com/s/78otz1d4d9b0284/ukr_rus.test.txt?dl=1 -O ./ukr_rus.test.txt !wget https://www.dropbox.com/s/210m7gwqkikpsxd/uk.w2v.bin?dl=1 -O ./uk.w2v.bin !wget https://www.dropbox.com/s/3luwyjdmofsdfjz/ru.w2v.bin?dl=1 -O ./ru.w2v.bin ru_embs = gensim.models.KeyedVectors.load_word2vec_format("ru.w2v.bin", binary=True) uk_embs = gensim.models.KeyedVectors.load_word2vec_format("uk.w2v.bin", binary=True) x = uk_embs.vectors[:50000] y = ru_embs.vectors[:50000] def precision(pairs, uk_vectors, topn=1): """ TODO maybe insert docstring """ assert len(pairs) == len(uk_vectors) num_matches = 0 for i, (uk, ru) in enumerate(pairs): num_matches += ru in set(w[0] for w in ru_embs.most_similar([uk_vectors[i]], topn=topn)) return num_matches / len(pairs) def load_word_pairs(filename): uk_ru_pairs = [] uk_vectors = [] ru_vectors = [] with open(filename, "r") as inpf: for line in inpf: uk, ru = line.rstrip().split("\t") if uk not in uk_embs or ru not in ru_embs: continue uk_ru_pairs.append((uk, ru)) uk_vectors.append(uk_embs[uk]) ru_vectors.append(ru_embs[ru]) return uk_ru_pairs, np.array(uk_vectors), np.array(ru_vectors) uk_ru_test, x_test, y_test = load_word_pairs("ukr_rus.test.txt") uk_ru_train, x_train, y_train = load_word_pairs("ukr_rus.train.txt") precision(uk_ru_test, x_test, 5) ###Output /usr/local/lib/python3.6/dist-packages/gensim/matutils.py:737: FutureWarning: Conversion of the second argument of issubdtype from `int` to `np.signedinteger` is deprecated. In future, it will be treated as `np.int64 == np.dtype(int).type`. if np.issubdtype(vec.dtype, np.int): ###Markdown Reminder Embedding space mapping Let $x_i \in \mathrm{R}^d$ be the distributed representation of word $i$ in the source language, and $y_i \in \mathrm{R}^d$ is the vector representation of its translation. Our purpose is to learn such linear transform $W$ that minimizes euclidian distance between $Wx_i$ and $y_i$ for some subset of word embeddings. Thus we can formulate so-called Procrustes problem:$$W^*= \arg\min_W \sum_{i=1}^n||Wx_i - y_i||_2$$or$$W^*= \arg\min_W ||WX - Y||_F$$where $||*||_F$ - Frobenius norm.In Greek mythology, Procrustes or "the stretcher" was a rogue smith and bandit from Attica who attacked people by stretching them or cutting off their legs, so as to force them to fit the size of an iron bed. We make same bad things with source embedding space. Our Procrustean bed is target embedding space. But wait...$W^*= \arg\min_W \sum_{i=1}^n||Wx_i - y_i||_2$ looks like simple multiple linear regression (without intercept fit). So let's code. Orthogonal Procrustean Problem It can be shown (see original paper) that a self-consistent linear mapping between semantic spaces should be orthogonal. TODO simplify phrasesWe can restrict transform $W$ to be orthogonal. Then we will solve next problem:$$W^*= \arg\min_W ||WX - Y||_F \text{, where: } W^TW = I$$$$I \text{- identity matrix}$$Instead of making yet another regression problem we can find optimal orthogonal transformation using singular value decomposition. It turns out that optimal transformation $W^*$ can be expressed via SVD components:$$X^TY=U\Sigma V^T\text{, singular value decompostion}$$$$W^*=UV^T$$ Word translation learning using GAN (8 points) Generator If $\mathcal{X}=\{x_1,...,x_n\} \subset \mathrm{R}^d$ - source embedding set, and $\mathcal{Y}=\{y_1,...,y_m\} \subset \mathrm{R}^d$ - target embedding set, then discriminator is simply orthogonal mapping that can be defined as square matrix: $W\in O_d(\mathrm{R})$.In terms of neural network, generator is a network with single linear layer with orthogonality constraint and without nonlinearity after it.The generator input is a source embedding $x_i$, the generator output is a mapped source embedding $Wx_i$ ###Code EMB_SIZE = 300 import keras, keras.layers as L def build_generator(emb_size): # TIPS: use keras.Sequential and keras.initializers # YOUR_CODE model = Sequential() model.add(L.InputLayer(input_shape=[emb_size])) model.add(L.Dense(units=emb_size, activation=None, use_bias=None, kernel_initializer=keras.initializers.Identity())) return model generator = build_generator(EMB_SIZE) ###Output _____no_output_____ ###Markdown Discriminator Discriminator is a neural network that should discriminate between objects from $W\mathcal{X}$ (mapped source embeddings) and objects from $\mathcal{Y}$ (target embeddings).Just like in original article for discriminator we will use a multilayer perceptron with two hidden layers of size 2048, and Leaky-ReLU activation functions. The input to the discriminator is corrupted with dropout noisewith a rate of 0.1.The discriminator input is either mapped source embedding $Wx_i$ or target embedding $y_j$, the discriminator output is a probability of input to be from source distribution $p_D=p_D(source=1)$ ###Code def build_discriminator(emb_size): # YOUR_CODE model = Sequential() model.add(L.InputLayer(input_shape=[emb_size])) model.add(L.Dense(units=2048, activation=None)) model.add(L.LeakyReLU(0.2)) model.add(L.Dense(units=2048, activation=None)) model.add(L.LeakyReLU(0.2)) model.add(L.Dense(1, activation=None)) return model discriminator = build_discriminator(EMB_SIZE) ###Output _____no_output_____ ###Markdown Discriminator loss The purpose of discriminator is to maximize output probability for mapped source embeddings $p_D(source=1|Wx_i)$ and minimize probability for target embeddings $p_D(source=1|y_j)$. The last is equivalent to maximization of $p_D(source=0|y_j)$. Thus, we can train this classifier with standard cross-entropy loss: $$\mathcal{L}_D(\theta_D|W)=-\frac{1}{n}\sum_{i=1}^n\log p_D(source=1|Wx_i)-\frac{1}{m}\sum_{i=1}^m\log p_D(source=0|y_i)$$Equivalent:$$\mathcal{L}_D(\theta_D|W)=-\frac{1}{n}\sum_{i=1}^n\log p_D(source=1|Wx_i)-\frac{1}{m}\sum_{i=1}^m\log (1-p_D(source=1|y_i))$$**NB:** We minimize $\mathcal{L}_D(\theta_D|W)$ with respect discriminator parameters $\theta_D$. The matrix $W$ is fixed. ###Code X = tf.placeholder(tf.float32, [None, EMB_SIZE]) Y = tf.placeholder(tf.float32, [None, EMB_SIZE]) W = generator.weights[0] WX = generator(X) logp_wx_is_real = tf.log_sigmoid(discriminator(WX)) logp_wx_is_fake = tf.log_sigmoid(-discriminator(WX)) logp_y_is_real = tf.log_sigmoid(discriminator(Y)) L_d_source = -tf.reduce_mean(logp_wx_is_fake) L_d_target = -tf.reduce_mean(logp_y_is_real) L_d = L_d_source + L_d_target ###Output _____no_output_____ ###Markdown As suggested Goodfellow (2016) it is useful to use soft targets instead hard ones. In case label smoothing:$$\mathcal{L}_D(\theta_D|W)=\mathcal{L}_{D_1}+\mathcal{L}_{D_2}$$Where:$$\mathcal{L}_{D_1}=\frac{1}{n}\sum_{i=1}^n[(1-\alpha)\log p_D(source=1|Wx_i) + \alpha\log p_D(source=0|Wx_i)]$$$$\mathcal{L}_{D_2}=\frac{1}{m}\sum_{i=1}^n[(1-\alpha)\log p_D(source=0|Wx_i) + \alpha\log p_D(source=1|Wx_i)]$$ ###Code # YOUR CODE HERE IF YOU REALLY WANT TO USE LABEL SMOOTHING ###Output _____no_output_____ ###Markdown Generator loss The purpose of generator is to fool discriminator, i.e. to produce mapping $W\mathcal{X}$ indistinguishable from $\mathcal{Y}$. Therefore we should turn over discriminator loss, minimize output probability for mapped source embeddings $p_D(source=1|Wx_i)$ and minimize probability for target embeddings $p_D(source=0|y_j)$.$$\mathcal{L}_G(W|\theta_D)=-\frac{1}{n}\sum_{i=1}^n\log (1-p_D(source=1|Wx_i))-\frac{1}{m}\sum_{i=1}^m\log p_D(source=1|y_i)$$**NB:** We minimize $\mathcal{L}_G(W|\theta_D)$ with respect matrix $W$ coefficients. Disciminator parameters $\theta_D$ is fixed. Because gradients do not flow through generator for target samples:$$\mathcal{L}_G(W|\theta_D)=-\frac{1}{n}\sum_{i=1}^n\log (1-p_D(source=1|Wx_i))$$ In contrast with original article to be more stable we allow you to add a supervised component of loss - MSE for small number of fixed pairs vectors from $\mathcal{X}$ and $\mathcal{Y}$.$$\mathcal{L}_G(W|\theta_D)=-\frac{1}{n}\sum_{i=1}^n\log (1-p_D(source=1|Wx_i))+\gamma \frac{1}{N}\sum_{k}^N(Wx_k-y_k)^2$$ ###Code X_pair = tf.placeholder('float32', [None, EMB_SIZE]) Y_pair = tf.placeholder('float32', [None, EMB_SIZE]) L_g_source = -tf.reduce_mean(logp_wx_is_real) L_mse = tf.losses.mean_squared_error(Y_pair, generator(X_pair)) L_g = L_mse * 100 + L_g_source ###Output _____no_output_____ ###Markdown Orthogonality constraintConneau et al. propose to use a simple update step to ensure that the matrix $W$ stays close to anorthogonal matrix during training:$$W \gets (1+\beta)W-\beta(WW^T)W$$ ###Code BETA = tf.constant(0.1) # TIPS: USE tf.assing W_new = (1 + BETA) * W - BETA * tf.matmul(tf.matmul(W, tf.transpose(W)), W) orthogonolize = tf.assign(W, W_new) ###Output _____no_output_____ ###Markdown Training ###Code LEARNING_RATE = 0.1 # GRADIENT DESCENT OPTIMIZER? gen_optim = tf.train.GradientDescentOptimizer(LEARNING_RATE).minimize(L_g, var_list=generator.trainable_weights) dis_optim = tf.train.GradientDescentOptimizer(LEARNING_RATE).minimize(L_d, var_list=discriminator.trainable_weights) BATCH_SIZE = 32 def sample_batch(bsize): x_batch = x[np.random.choice(np.arange(x.shape[0]), size=bsize)] y_batch = y[np.random.choice(np.arange(y.shape[0]), size=bsize)] return x_batch, y_batch def discriminator_step(): sess.run(dis_optim, {X: x_batch, Y:y_batch}) def generator_step(): sess.run(gen_optim, {X: x_batch, X_pair:x_train[:50], Y_pair:y_train[:50]}) def orthogonolize_step(): sess.run(orthogonolize) def get_metrics(): feed_dict = { X: x_test, Y: y_test, X_pair: x_train[:50], Y_pair: y_train[:50] } loss_g, loss_d, logp_x, logp_y, wx = sess.run([L_g, L_d, logp_wx_is_real, logp_y_is_real, WX], feed_dict) return loss_g, loss_d, np.exp(logp_x), np.exp(logp_y), wx sess = keras.backend.get_session() sess.run(tf.global_variables_initializer()) N_EPOCHS = 10 EPOCH_SIZE = 1000 DIS_STEPS = 5 GEN_STEPS = 1 gen_loss_history = [] dis_loss_history = [] prec_history = [] for epoch_num in range(N_EPOCHS): print("Epoch: {}".format(epoch_num + 1)) for batch_num in range(EPOCH_SIZE): for _ in range(DIS_STEPS): x_batch, y_batch = sample_batch(BATCH_SIZE) discriminator_step() for _ in range(GEN_STEPS): x_batch, y_batch = sample_batch(BATCH_SIZE) generator_step() orthogonolize_step() if batch_num % 10 == 0: display.clear_output(wait=True) loss_g, loss_d, p_x, p_y, wx = get_metrics() gen_loss_history.append(loss_g) dis_loss_history.append(loss_d) if batch_num % 100 == 0: prec_history.append(precision(uk_ru_test, wx, 5)) plt.figure(figsize=(15,15)) plt.subplot(212) plt.plot(gen_loss_history, label="Generator loss") plt.plot(dis_loss_history, label="Discriminator loss") plt.legend(loc='best') plt.subplot(221) plt.title('Mapped vs target data') plt.hist(p_x, label='D(Y)', alpha=0.5,range=[0,1], bins=20) plt.hist(p_y, label='D(WX)',alpha=0.5,range=[0,1], bins=20) plt.legend(loc='best') plt.subplot(222) plt.title('Precision top5') plt.plot(prec_history) plt.show() ###Output _____no_output_____
1_csharp-first-step/.ipynb_checkpoints/3_Perform basic string formatting in C#-checkpoint.ipynb
###Markdown Perform basic string formatting in CSource: https://docs.microsoft.com/en-us/learn/modules/csharp-basic-formatting/1-introductionAs a software developer, you'll need to write C code to combine and format literal and variable data to create a new value. That value might be displayed, saved to file or sent across the network. Fortunately, C provides many ways to combine and format data.Suppose you want to display the output of a command-line application you're writing. You want to display values including literal text, text in variables, numeric data, and textual data in other languages. How would you format it correctly so that the user can understand what your application is communicating to them?In this module, you'll use character escape sequences to format literal strings of text to include special characters including tabs and line feeds -- even characters from different languages like Kanji or Cyrillic script! You'll learn how to concatenate two strings together, and will use string interpolation to create a literal string template with replaceable parts.By the end of this module, you'll be able to control how your data is displayed to end users of your applications. Character Escape SequencesAn **escape character** sequence is a special instruction to the runtime that you want to insert a special character that will affect the output of your string. In C, the escape character sequences begin with a backslash `\` and then include another character. For example, the `\n` sequence will add a new line, and a `\t` sequence will add a tab.The following code uses escape character sequences to add whitespace. ###Code // New Line Console.WriteLine("Hello\nWorld!"); // Tab Console.WriteLine("Hello\tWorld!"); ###Output Hello World! Hello World! ###Markdown What if you need to insert a double-quotation mark in a literal string? If you don't use the character escape sequence, you'll confuse the compiler because it will think you want to terminate the string prematurely ... and will not understand the purpose of the characters after the second double-quotation mark. ###Code Console.WriteLine("Hello "World"!"); ###Output (1,27): error CS1003: Syntax error, ',' expected (1,32): error CS1003: Syntax error, ',' expected ###Markdown To handle that situation, use the \" escape sequence. ###Code Console.WriteLine("Hello \"World\"!"); ###Output Hello "World"! ###Markdown What if you need to use the backslash for other purposes, like to display a file path? ###Code Console.WriteLine("c:\source\repos"); ###Output (1,22): error CS1009: Unrecognized escape sequence ###Markdown The problem is the sequence `\s`. The `\r` doesn't produce an error because it is a valid escape sequence for a carriage return. However, it's unlikely that you would want to use a carriage return in this context.To solve the problem, you use the `\\` to display a single backslash. ###Code Console.WriteLine("c:\\source\\repos"); ###Output c:\source\repos ###Markdown Format the output of the command-line application using character escape sequencesTo create the mockup of our command line tool, add the following code in the editor. ###Code Console.WriteLine("Generating invoices for customer \"ABC Corp\" ...\n"); Console.WriteLine("Invoice: 1021\t\tComplete!"); Console.WriteLine("Invoice: 1022\t\tComplete!"); Console.WriteLine("\nOutput Directory:\t"); ###Output Generating invoices for customer "ABC Corp" ... Invoice: 1021 Complete! Invoice: 1022 Complete! Output Directory: ###Markdown Verbatim String LiteralA verbatim string literal will keep all whitespace and characters without the need to escape the backslash. To create a verbatim string, use the `@` directive before the literal string. ###Code Console.WriteLine(@" c:\source\repos (this is where your code goes)"); ###Output c:\source\repos (this is where your code goes) ###Markdown Format the output of the command-line application using a verbatim literal string ###Code Console.WriteLine("Generating invoices for customer \"ABC Corp\" ...\n"); Console.WriteLine("Invoice: 1021\t\tComplete!"); Console.WriteLine("Invoice: 1022\t\tComplete!"); Console.WriteLine("\nOutput Directory:\t"); Console.Write(@"c:\invoices"); ###Output Generating invoices for customer "ABC Corp" ... Invoice: 1021 Complete! Invoice: 1022 Complete! Output Directory: c:\invoices ###Markdown Unicode Escape CharactersYou can also add encoded characters in literal strings using the `\u` escape sequence, then a four-character code representing some character in Unicode (UTF-16). ###Code // Kon'nichiwa World Console.WriteLine("\u3053\u3093\u306B\u3061\u306F World!"); ###Output こんにちは World! ###Markdown Format the output of the command-line application using unicode escape charactersTo complete the mocked up command-line user interface, we'll add a phrase in Japanese that translates to "To generate Japanese invoices", then provides a verbatim literal string with the application executable with a flag. We'll also add some escape sequences for formatting.Add the following code to your application. ###Code Console.WriteLine("Generating invoices for customer \"ABC Corp\" ...\n"); Console.WriteLine("Invoice: 1021\t\tComplete!"); Console.WriteLine("Invoice: 1022\t\tComplete!"); Console.WriteLine("\nOutput Directory:\t"); Console.Write(@"c:\invoices"); // To generate Japanese invoices: // Nihon no seikyū-sho o seisei suru ni wa: Console.Write("\n\n\u65e5\u672c\u306e\u8acb\u6c42\u66f8\u3092\u751f\u6210\u3059\u308b\u306b\u306f\uff1a\n\t"); Console.WriteLine(@"c:\invoices\app.exe -j"); ###Output Generating invoices for customer "ABC Corp" ... Invoice: 1021 Complete! Invoice: 1022 Complete! Output Directory: c:\invoices 日本の請求書を生成するには: c:\invoices\app.exe -j
Python and Numpy/Numpy Indexing and Selection.ipynb
###Markdown NumPy Indexing and SelectionIn this lecture we will discuss how to select elements or groups of elements from an array. ###Code import numpy as np from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" #Creating sample array arr = np.arange(0,11) #Show arr ###Output _____no_output_____ ###Markdown Bracket Indexing and SelectionThe simplest way to pick one or some elements of an array looks very similar to python lists: ###Code #Get a value at an index arr[8] #Get values in a range arr[1:5] #Get values in a range arr[0:5] ###Output _____no_output_____ ###Markdown BroadcastingNumpy arrays differ from a normal Python list because of their ability to broadcast: ###Code #Setting a value with index range (Broadcasting) arr[0:5]=100 #Show arr a=np.random.randn(5,5) b=np.random.randn(5,1) a b c=a+b c # Reset array, we'll see why I had to reset in a moment arr = np.arange(0,11) #Show arr #Important notes on Slices slice_of_arr = arr[0:6] #Show slice slice_of_arr #Change Slice slice_of_arr[:]=99 #Show Slice again slice_of_arr ###Output _____no_output_____ ###Markdown Now note the changes also occur in our original array! ###Code arr ###Output _____no_output_____ ###Markdown Data is not copied, it's a view of the original array! This avoids memory problems! ###Code #To get a copy, need to be explicit arr_copy = arr.copy() arr_copy arr_copy[:]=10 arr ###Output _____no_output_____ ###Markdown Indexing a 2D array (matrices)The general format is **arr_2d[row][col]** or **arr_2d[row,col]**. I recommend usually using the comma notation for clarity. ###Code arr_2d = np.array(([5,10,15],[20,25,30],[35,40,45])) #Show arr_2d #Indexing row arr_2d[1] # Format is arr_2d[row][col] or arr_2d[row,col] # Getting individual element value arr_2d[1][0] # Getting individual element value arr_2d[1,0] # 2D array slicing #Shape (2,2) from top right corner arr_2d[:2,1:] #Shape bottom row arr_2d[2] #Shape bottom row arr_2d[2,:] arr_2d[-1,:] arr_2d[::-1] arr_2d[0:,::-1] ###Output _____no_output_____ ###Markdown Fancy IndexingFancy indexing allows you to select entire rows or columns out of order,to show this, let's quickly build out a numpy array: ###Code #Set up matrix arr2d = np.zeros((10,10)) #Length of array arr_length = arr2d.shape[1] #Set up array for i in range(arr_length): arr2d[i] = i arr2d ###Output _____no_output_____ ###Markdown Fancy indexing allows the following ###Code arr2d[[2,4,6,8]] #Allows in any order arr2d[[6,4,2,7]] ###Output _____no_output_____ ###Markdown More Indexing HelpIndexing a 2d matrix can be a bit confusing at first, especially when you start to add in step size. Try google image searching NumPy indexing to fins useful images, like this one: SelectionLet's briefly go over how to use brackets for selection based off of comparison operators. ###Code arr = np.arange(1,11) arr arr > 4 bool_arr = arr>4 bool_arr arr[bool_arr] arr[arr>2] x = 2 arr[arr>x] ###Output _____no_output_____
pytorch/fastai-pytorch-tutorial-scratch-notes.ipynb
###Markdown 2018/9/15-16 WNixalohttps://github.com/fastai/fastai_v1/blob/master/dev_nb/001a_nn_basics.ipynb ###Code from pathlib import Path import requests data_path = Path('data') path = data_path/'mnist' path.mkdir(parents=True, exist_ok=True) url = 'http://deeplearning.net/data/mnist/' filename = 'mnist.pkl.gz' (path/filename) if not (path/filename).exists(): content = requests.get(url+filename).content (path/filename).open('wb').write(content) import pickle, gzip with gzip.open(path/filename, 'rb') as f: ((x_train, y_train), (x_valid, y_valid), _) = pickle.load(f, encoding='latin-1') %matplotlib inline from matplotlib import pyplot import numpy as np pyplot.imshow(x_train[0].reshape((28,28)), cmap="gray") x_train.shape import torch x_train,y_train,x_valid,y_valid = map(torch.tensor, (x_train,y_train,x_valid,y_valid)) n,c = x_train.shape x_train, x_train.shape, y_train.min(), y_train.max() import math weights = torch.rand(784, 10)/math.sqrt(784) weights.requires_grad_() bias = torch.zeros(10, requires_grad=True) def log_softmax(x): return x - x.exp().sum(-1).log().unsqueeze(-1) def model(xb): return log_softmax(xb @ weights + bias) xb.shape, xb.sum(-1).shape ###Output _____no_output_____ ###Markdown the `torch.Tensor.sum(dim)` call takes an integer argument as the axis along which to sum. This applies to NumPy arrays as well.In this case `xb.sum(-1)` will turn a 64x784 tensor into a size 64 tensor. This creates a tensor with each element being the total sum of its corresponding size 784 (28x28 flattened) image from the minibatch. ###Code bs = 64 xb = x_train[0:bs] # a mini-batch from x preds = model(xb) preds[0], preds.shape def nll(input, target): return -input[range(target.shape[0]), target].mean() loss_func = nll yb = y_train[0:bs] loss_func(preds, yb) preds[0] ((x_train[0:bs]@weights+bias) - (x_train[0:bs]@weights+bias).exp().sum(-1).log().unsqueeze(-1))[0] preds[0] nll(preds, yb) -preds[range(yb.shape[0]), yb].mean() type(preds) preds[range(0)] preds[0] preds[range(1)] preds[range(2)] preds[:2] type(preds) np.array([[range(10)]])[range(1)] A = np.array([[range(10)]]) A.shape A[range(2)] A.shape len(A[0]) A.shape[0] A[0] A[range(1)] xb.sum() xb.numpy().sum(-1) xb.sum(-1) ###Output _____no_output_____ ###Markdown [`torch.unsqueeze`](https://pytorch.org/docs/stable/torch.htmltorch.unsqueeze) returns a tensor with a dimension of size **1** inserted at the specified position.>the returned tensor shares the smae underlying data with this tensor. ###Code xb.sum(-1) xb[0].sum() ###Output _____no_output_____ ###Markdown taking a look at what `.unsqueeze` does; what does the tensor look like right before `unsqueeze` is applied to it? ###Code xb.exp().sum(-1).log() xb.exp().sum(-1).log()[0] ###Output _____no_output_____ ###Markdown making sure I didn't need parentheses there ###Code (xb.exp().sum(-1).log())[0] xb.exp().sum(-1).log().unsqueeze(-1)[:10] np.array([i for i in range(10)]).shape torch.Tensor([i for i in range(10)]).shape xb.exp().sum(-1).log().unsqueeze(-1).numpy().shape ###Output _____no_output_____ ###Markdown Okay so `.unsqueeze` turns the size 64 tensor into a 64x1 tensor, so it's nicely packaged up with the first element being the 64-long vector ... or something like that right? ###Code xb.exp().sum(-1).log()[:10] ###Output _____no_output_____ ###Markdown The unsqueezed tensor doesn't look as 'nice'.. I guess. So it's packaged into a single column vector because we'll need that for the linear algebra we'll do to it later yeah? ###Code preds.unsqueeze(-1).shape ###Output _____no_output_____ ###Markdown Oh this is cool. I was wondering how `.unsqeeze` worked for tensors with multiple items in multiple dimensions (ie: not just a single row vector). Well *this* is what it does: ###Code preds.unsqueeze(-1)[:2] ###Output _____no_output_____ ###Markdown So `.unsqueeze` turns our size 64x10 ... *ohhhhhhhh* I misread:>torch.unsqueeze returns a tensor with a dimension of size 1 inserted at the specified position.doesn't mean it repackages the original tensor into a 1-dimensional tensor. I was wonder how it knew how long to make it (you'd have to just concatenate everything, but then in what order?).No, a size-1 dimension is inserted where you tell it. So if it's an (X,Y) matrix, you go and give it a Z dimension, but that Z only contains the original (X,Y), ie: the only thing added is a dimension.Okay, interesting. Not exactly sure yet why we want 3 dimensions, but I kinda get it. Is it related to our data being 28x28x1? Wait isn't PyTorch's ordering N x [C x H x W] ? So it's unrelated then? Or useful for returning 64x784 to 64x28x28? I think that's not the case? Don't know.---So what's up with the `input[range(..` thing?: ###Code # logsoftmax(xb) ls_xb = log_softmax(xb) log_softmax(xb@weights+bias)[0] (xb@weights).shape xb.shape (xb@weights).shape ###Output _____no_output_____ ###Markdown Oh this is where I was confused. I'm not throwing `xb` into Log Softmax. I'm throwing `xb` • `w` + `bias`. The shape going into the log softmax function is *not* 64x784, it's 64x10. Yeah that makes sense. *well duh it has to*. Each value in the tensor is an activation for a class, for each image in the minibatch. So by the magic of machine learning, each activation encapsulates the effect of the weights and biases on that input element with respect to that class.So that means that the `.unsqueeze` oepration is *not* going to be giving a 64x784 vector. ###Code # for reference: xb = x_train[0:bs] yb = y_train[0:bs] def log_softmax(x): return x - x.exp().sum(-1).log().unsqueeze(-1) def model(xb): return log_softmax(xb @ weights + bias) preds = model(xb) def nll(input, target): return -input[range(target.shape[0]), target].mean() loss = nll(preds, yb) loss ###Output _____no_output_____ ###Markdown **Note** the loss equals that in cell `Out[25]` above as it should.---Back to teasing this apart by hand.The minibatch: ###Code xb, xb.shape ###Output _____no_output_____ ###Markdown The minibatch's activations as they head into the Log Softmax: ###Code (xb @ weights + bias)[:2] (xb @ weights + bias).shape ###Output _____no_output_____ ###Markdown The minibatch activations after the Log Softmax and before heading into Negative Log Likelihood: ###Code log_softmax(xb@weights+bias)[:2] log_softmax(xb@weights+bias).shape ###Output _____no_output_____ ###Markdown The loss value computed via NLL on the Log Softmax activations: ###Code nll(log_softmax(xb@weights+bias), yb) ###Output _____no_output_____ ###Markdown *Okay*. Now questions. What is indexing `input` by `[range(target.shape[0]), target]` supposed to be doing? I established before that `A[range(n)]` is valid if `n ≤ A.shape[0]`. So what's going on is I'm range-indexing the 1st dimension of the LogSoftmax activations with the length of the target tensor, and the rest of the dimension indices being the ..*target tensor itself?*That means the index is this: ###Code [range(yb.shape[0]), yb] ###Output _____no_output_____ ###Markdown Okay. What does it look like when I index a tensor – forget range-idx for now – with another tensor? ###Code xb[yb] ###Output _____no_output_____ ###Markdown *Okay..* ###Code xb.shape, yb.shape array_1 = np.array([[str(j)+str(i) for i in range(10)] for j in range(5)]) array_1 array_2 = np.array([i for i in range(len(array_1[0]))]) array_2 ###Output _____no_output_____ ###Markdown Uh, moment of truth: ###Code array_1[range(array_2.shape[0]), array_2] ###Output _____no_output_____ ###Markdown Oof course. What happened. Is it.. yes. I'm indexing the wrong array. Also no value in target is greater than the number of classes ... oh... oh *ffs*. Okay.I range index by the length of `target`'s first dim to get the entire first dim of the LogSoftmax activations, and each vector in that index is itself indexed by the value of the target.Less-shitty English: take the first dimension of the activations; that should be batch_size x num_classes activations; so: `num_classes` values in each of `batch_size` vectors; Now for each of those vectors, pull out the value indexed by the corresponding index-value in the target tensor.Oh I see. So just now I was confused that there was redundant work being done. *yeah kinda*. It's Linear Algebra. See, the weights and biases produce the entire output-activations tensor. Meaning: the dot-product & addition operation creates probabilities for every class for every image in the minibatch. Yeah that can be a lot; linalg exists in a block-like world & it's easy to get carried away (*I think*).And that answers another question: the loss function here only cares about how wrong the correct class was. Looks like the incorrect classes are totally ignored (hence a bit of mental hesitation for me because it looks like 90% of the information is being thrown away (it is)). *Now*, that's not what's going on when the Log Softmax is being computed. Gotta think about that a moment..*could activations for non-target classes affect the target-activations during the Log Softmax step, before they're disgarded in the NLL?*```xb - xb.exp().sum(-1).log().unsqueeze(-1)```is the magic line (`xb` is `x` in the definition). ###Code # for reference (again): xb = x_train[0:bs] yb = y_train[0:bs] def log_softmax(x): return x - x.exp().sum(-1).log().unsqueeze(-1) def model(xb): return log_softmax(xb @ weights + bias) preds = model(xb) def nll(input, target): return -input[range(target.shape[0]), target].mean() loss = nll(preds, yb) ###Output _____no_output_____ ###Markdown When the activations are activating, only the weights and biases are having a say. *Right*? ###Code xb.shape, weights.shape np.array([[1,1,1],[2,2,2],[3,3,3]]) @ np.array([[1],[2],[3]]) np.array([[1,1,1],[2,2,2],[-11,0,3]]) @ np.array([[1],[2],[3]]) ###Output _____no_output_____ ###Markdown Right.Now what about the Log Softmax operation itself? Well okay I can simulate this by hand: ###Code yb.type() # batch size of 3 xb_tmp = np.array([[1,1,1,1,1],[2,2,2,2,2],[3,3,3,3,3]]) yb_tmp = np.array([0,1,2]) # 4 classes c = 4 w_tmp = np.array([[i for i in range(c)] for j in range(xb_tmp.shape[1])]) xb_tmp = torch.Tensor(xb_tmp) yb_tmp = torch.tensor(yb_tmp, dtype=torch.int64) # see: https://pytorch.org/docs/stable/tensors.html#torch-tensor w_tmp = torch.Tensor(w_tmp) ###Output _____no_output_____ ###Markdown *umm....**...*So it's `torch.tensor` not `torch.Tensor`? Got a lot of errors trying to specify a datatype with capital T. Alright then. ###Code torch.tensor([[1, 2, 3]],dtype=torch.int32) xb_tmp.shape, yb_tmp.shape, w_tmp.shape xb.shape, yb.shape, weights.shape actv_tmp = log_softmax(xb_tmp @ w_tmp) actv_tmp nll(actv_tmp, yb_tmp) ###Output _____no_output_____ ###Markdown Good it works. Now to change things. The question was if any of the dropped values (non-target index) had any effect on the loss - since the loss was only calculated on error from the correct target. Basically: *is there any lateral flow of information?*So I'll check this by editing values in the softmax activation that are *not* of the correct index.Wait that shouldn't have an effect anyway. No the question is if information earlier in the stream had an effect later on. *It is 4:12 am..* *Aha*. My question was if the activations that created the non-target class probabilities had any effect on target classes. Which is asking if there is crossing of information in the ... *oh*.I confused myself with the minibatches. Ignore those, there'd be something *very* wrong if there was cross-talk between them. I want to know if there is cross-talk within an individual tensor as it travels through the model. ###Code # batch size of 3 xb_tmp = np.array([[0,1,1,0,0]]) yb_tmp = np.array([1]) # 4 classes c = 4 w_tmp = np.array([[i for i in range(c)] for j in range(xb_tmp.shape[1])]) xb_tmp = torch.Tensor(xb_tmp) yb_tmp = torch.tensor(yb_tmp, dtype=torch.int64) # see: https://pytorch.org/docs/stable/tensors.html#torch-tensor w_tmp = torch.Tensor(w_tmp) xb_tmp @ w_tmp # LogSoftmax(activations) actv_tmp = log_softmax(xb_tmp @ w_tmp) actv_tmp # NLL Loss loss = nll(actv_tmp, yb_tmp) loss def cross_test(x, y): # batch size of 3 xb_tmp = np.array(x) yb_tmp = np.array(y) # 4 classes c = 4 w_tmp = np.array([[i for i in range(c)] for j in range(xb_tmp.shape[1])]) xb_tmp = torch.Tensor(xb_tmp) yb_tmp = torch.tensor(yb_tmp, dtype=torch.int64) # see: https://pytorch.org/docs/stable/tensors.html#torch-tensor w_tmp = torch.Tensor(w_tmp) print(f'Activation: {xb_tmp @ w_tmp}') # LogSoftmax(activations) actv_tmp = log_softmax(xb_tmp @ w_tmp) print(f'Log Softmax: {actv_tmp}') # NLL Loss loss = nll(actv_tmp, yb_tmp) print(f'NLL Loss: {loss}') w_tmp cross_test([[1,1,1,1,1]], [1]) cross_test([[1,1,1,1,0]], [1]) cross_test([[1,1,1,0,0]], [1]) cross_test([[1,1,1,1,0]], [1]) cross_test([[1,1,0,0,0]], [1]) ###Output Activation: tensor([[0., 2., 4., 6.]]) Log Softmax: tensor([[-6.1451, -4.1451, -2.1451, -0.1451]]) NLL Loss: 4.145078182220459
SIADS_505/assignment4/InSon_assignment4.ipynb
###Markdown Assignment 4 DescriptionIn this assignment you must read in a file of metropolitan regions and associated sports teams from [assets/wikipedia_data.html](assets/wikipedia_data.html) and answer some questions about each metropolitan region. Each of these regions may have one or more teams from the "Big 4": NFL (football, in [assets/nfl.csv](assets/nfl.csv)), MLB (baseball, in [assets/mlb.csv](assets/mlb.csv)), NBA (basketball, in [assets/nba.csv](assets/nba.csv) or NHL (hockey, in [assets/nhl.csv](assets/nhl.csv)). Please keep in mind that all questions are from the perspective of the metropolitan region, and that this file is the "source of authority" for the location of a given sports team. Thus teams which are commonly known by a different area (e.g. "Oakland Raiders") need to be mapped into the metropolitan region given (e.g. San Francisco Bay Area). This will require some human data understanding outside of the data you've been given (e.g. you will have to hand-code some names, and might need to google to find out where teams are)!For each sport I would like you to answer the question: **what is the win/loss ratio's correlation with the population of the city it is in?** Remember that to calculate the correlation with [`pearsonr`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html), so you are going to send in two ordered lists of values, the populations from the wikipedia_data.html file and the win/loss ratio for a given sport in the same order. Average the win/loss ratios for those cities which have multiple teams of a single sport. Each sport is worth an equal amount in this assignment (20%\*4=80%) of the grade for this assignment. You should only use sports data **from year 2018** for your analysis but use populations data **from year 2016** -- this is important! Notes1. Do not including data about the MLS or CFL in any of the work you are doing, we're only interested in the Big 4 in this assignment.2. I highly suggest that you first tackle the four correlation questions in order, as they are all similar and worth the majority of grades for this assignment. This is by design!3. It's fair game to talk with peers about high level strategy as well as the relationship between metropolitan areas and sports teams. However, do not post code solving aspects of the assignment (including such as dictionaries mapping areas to teams, or regexes which will clean up names).4. There may be more teams than the assert statements test, remember to collapse multiple teams in one city into a single value! Question 1For this question, calculate the win/loss ratio's correlation with the population of the city it is in for the **NHL** using **2018** data. ###Code import pandas as pd import numpy as np import scipy.stats as stats import re import pandas as pd import numpy as np import scipy.stats as stats import re def nhl_correlation(): # YOUR CODE HERE nhl_df=pd.read_csv("assets/nhl.csv") cities=pd.read_html("assets/wikipedia_data.html")[1] cities=cities.iloc[:-1,[0,3,5,6,7,8]] # Only retain 2018 year nhl_df = nhl_df[nhl_df.year == 2018] #nfl_df # Remove row 0, 9, 18, 26 which are sub-titles nhl_df = nhl_df.drop(nhl_df.index[[0, 9, 18, 26]]) # Convert Win and Lose columns from string to integer nhl_df.W = nhl_df.W.astype(int) nhl_df.L = nhl_df.L.astype(int) # Broadcasting Win-Loss Ratio (Win probability) nhl_df['WL_Ratio'] = nhl_df['W'] / (nhl_df['W'] + nhl_df['L']) team_metro_dict = {'Tampa Bay Lightning*': 'Tampa Bay Area', 'Boston Bruins*': 'Boston', 'Toronto Maple Leafs*': 'Toronto', 'Florida Panthers': 'Miami–Fort Lauderdale', 'Detroit Red Wings': 'Detroit', 'Montreal Canadiens': 'Montreal', 'Ottawa Senators': 'Ottawa', 'Buffalo Sabres': 'Buffalo', 'Washington Capitals*': 'Washington, D.C.', 'Pittsburgh Penguins*': 'Pittsburgh', 'Philadelphia Flyers*': 'Philadelphia', 'Columbus Blue Jackets*': 'Columbus', 'New Jersey Devils*': 'New York City', 'Carolina Hurricanes': 'Raleigh', 'New York Islanders': 'New York City', 'New York Rangers': 'New York City', 'Nashville Predators*': 'Nashville', 'Winnipeg Jets*': 'Winnipeg', 'Minnesota Wild*': 'Minneapolis–Saint Paul', 'Colorado Avalanche*': 'Denver', 'St. Louis Blues': 'St. Louis', 'Dallas Stars': 'Dallas–Fort Worth', 'Chicago Blackhawks': 'Chicago', 'Vegas Golden Knights*': 'Las Vegas', 'Anaheim Ducks*': 'Los Angeles', 'San Jose Sharks*': 'San Francisco Bay Area', 'Los Angeles Kings*': 'Los Angeles', 'Calgary Flames': 'Calgary', 'Edmonton Oilers': 'Edmonton', 'Vancouver Canucks': 'Vancouver', 'Arizona Coyotes': 'Phoenix'} nhl_df['Metropolitan area']= nhl_df['team'].map(team_metro_dict) win_loss_by_region = nhl_df[['WL_Ratio', 'Metropolitan area']].groupby('Metropolitan area').mean() #cities['NHL'] pattern1 = "^\[note\s\d+\]" pattern2 = "\w+" #re.findall(pattern, cities['NHL']) #cities[cities['NHL'].str.contains(pattern1, regex = True)] cities = cities[(cities['NHL'].str.contains(pattern2, regex = True)) & (~cities['NHL'].str.contains(pattern1, regex = True))] cities = cities[['Metropolitan area', 'Population (2016 est.)[8]']].rename(columns={"Population (2016 est.)[8]":'Population'}) cities = cities.set_index('Metropolitan area') win_loss_pop_by_region = pd.merge(win_loss_by_region, cities, how = 'left', left_index = True, right_index = True) win_loss_pop_by_region['Population'] = win_loss_pop_by_region['Population'].astype(int) #raise NotImplementedError() #population_by_region = [] # pass in metropolitan area population from cities #win_loss_by_region = [] # pass in win/loss ratio from nhl_df in the same order as cities["Metropolitan area"] #assert len(population_by_region) == len(win_loss_by_region), "Q1: Your lists must be the same length" #assert len(population_by_region) == 28, "Q1: There should be 28 teams being analysed for NHL" return stats.pearsonr(win_loss_pop_by_region['WL_Ratio'], win_loss_pop_by_region['Population'])[0] nhl_correlation() ###Output _____no_output_____ ###Markdown Question 2For this question, calculate the win/loss ratio's correlation with the population of the city it is in for the **NBA** using **2018** data. ###Code import pandas as pd import numpy as np import scipy.stats as stats import re def nba_correlation(): # YOUR CODE HERE #raise NotImplementedError() nba_df=pd.read_csv("assets/nba.csv") cities=pd.read_html("assets/wikipedia_data.html")[1] cities=cities.iloc[:-1,[0,3,5,6,7,8]] # Only retain 2018 year nba_df = nba_df[nba_df.year == 2018] # Convert Win and Lose columns from string to integer nba_df.W = nba_df.W.astype(int) nba_df.L = nba_df.L.astype(int) nba_df.team.astype(str) # Broadcasting Win-Loss Ratio (Win probability) nba_df['WL_Ratio'] = nba_df['W'] / (nba_df['W'] + nba_df['L']) # Remove ranking nba_df['team'] = nba_df['team'].str.replace('\(\d+\)', '', regex = True) ## \xa0 is actually non-breaking space in Latin1 (ISO 8859-1), also chr(160). You should replace it with a space nba_df['team'] = nba_df['team'].str.replace('\xa0', '', regex = True) nba_df['team'] = nba_df['team'].str.replace('*', '', regex = True) nba_metro_dict = {'Toronto Raptors': 'Toronto', 'Boston Celtics': 'Boston', 'Philadelphia 76ers': 'Philadelphia', 'Cleveland Cavaliers': 'Cleveland', 'Indiana Pacers': 'Indianapolis', 'Miami Heat': 'Miami–Fort Lauderdale', 'Milwaukee Bucks': 'Milwaukee', 'Washington Wizards': 'Washington, D.C.', 'Detroit Pistons': 'Detroit', 'Charlotte Hornets': 'Charlotte', 'New York Knicks': 'New York City', 'Brooklyn Nets': 'New York City', 'Chicago Bulls': 'Chicago', 'Orlando Magic': 'Orlando', 'Atlanta Hawks': 'Atlanta', 'Houston Rockets': 'Houston', 'Golden State Warriors': 'San Francisco Bay Area', 'Portland Trail Blazers': 'Portland', 'Oklahoma City Thunder': 'Oklahoma City', 'Utah Jazz': 'Salt Lake City', 'New Orleans Pelicans': 'New Orleans', 'San Antonio Spurs': 'San Antonio', 'Minnesota Timberwolves': 'Minneapolis–Saint Paul', 'Denver Nuggets': 'Denver', 'Los Angeles Clippers': 'Los Angeles', 'Los Angeles Lakers': 'Los Angeles', 'Sacramento Kings': 'Sacramento', 'Dallas Mavericks': 'Dallas–Fort Worth', 'Memphis Grizzlies': 'Memphis', 'Phoenix Suns': 'Phoenix'} nba_df['Metropolitan area'] = nba_df['team'].map(nba_metro_dict) win_loss_by_region = nba_df[['WL_Ratio', 'Metropolitan area']].groupby('Metropolitan area').mean() cities pattern1 = "^\[note\s\d+\]" pattern2 = "\w+" # #re.findall(pattern, cities['NHL']) # cities[cities['NBA'].str.contains(pattern1, regex = True)] cities = cities[(cities['NBA'].str.contains(pattern2, regex = True)) & (~cities['NBA'].str.contains(pattern1, regex = True))] cities = cities[['Metropolitan area', 'Population (2016 est.)[8]']].rename(columns={"Population (2016 est.)[8]":'Population'}) cities = cities.set_index('Metropolitan area') win_loss_pop_by_region = pd.merge(win_loss_by_region, cities, how = 'left', left_index = True, right_index = True) win_loss_pop_by_region['Population'] = win_loss_pop_by_region['Population'].astype(int) #population_by_region = [] # pass in metropolitan area population from cities #win_loss_by_region = [] # pass in win/loss ratio from nba_df in the same order as cities["Metropolitan area"] #assert len(population_by_region) == len(win_loss_by_region), "Q2: Your lists must be the same length" #assert len(population_by_region) == 28, "Q2: There should be 28 teams being analysed for NBA" return stats.pearsonr(win_loss_pop_by_region['WL_Ratio'], win_loss_pop_by_region['Population'])[0] nba_correlation() ###Output _____no_output_____ ###Markdown Question 3For this question, calculate the win/loss ratio's correlation with the population of the city it is in for the **MLB** using **2018** data. ###Code import pandas as pd import numpy as np import scipy.stats as stats import re def mlb_correlation(): mlb_df=pd.read_csv("assets/mlb.csv") cities=pd.read_html("assets/wikipedia_data.html")[1] cities=cities.iloc[:-1,[0,3,5,6,7,8]] # Only retain 2018 year mlb_df = mlb_df[mlb_df.year == 2018] #nfl_df # Convert Win and Lose columns from string to integer mlb_df.W = mlb_df.W.astype(int) mlb_df.L = mlb_df.L.astype(int) mlb_df.team.astype(str) # Broadcasting Win-Loss Ratio (Win probability) mlb_df['WL_Ratio'] = mlb_df['W'] / (mlb_df['W'] + mlb_df['L']) # Remove ranking # nba_df['team'] = nba_df['team'].str.replace('\(\d+\)', '', regex = True) # ## \xa0 is actually non-breaking space in Latin1 (ISO 8859-1), also chr(160). You should replace it with a space # nba_df['team'] = nba_df['team'].str.replace('\xa0', '', regex = True) # nba_df['team'] = nba_df['team'].str.replace('*', '', regex = True) mlb_metro_dict = {'Boston Red Sox': 'Boston', 'New York Yankees': 'New York City', 'Tampa Bay Rays': 'Tampa Bay Area', 'Toronto Blue Jays': 'Toronto', 'Baltimore Orioles': 'Baltimore', 'Cleveland Indians': 'Cleveland', 'Minnesota Twins': 'Minneapolis–Saint Paul', 'Detroit Tigers': 'Detroit', 'Chicago White Sox': 'Chicago', 'Kansas City Royals': 'Kansas City', 'Houston Astros': 'Houston', 'Oakland Athletics': 'San Francisco Bay Area', 'Seattle Mariners': 'Seattle', 'Los Angeles Angels': 'Los Angeles', 'Texas Rangers': 'Dallas–Fort Worth', 'Atlanta Braves': 'Atlanta', 'Washington Nationals': 'Washington, D.C.', 'Philadelphia Phillies': 'Philadelphia', 'New York Mets': 'New York City', 'Miami Marlins': 'Miami–Fort Lauderdale', 'Milwaukee Brewers': 'Milwaukee', 'Chicago Cubs': 'Chicago', 'St. Louis Cardinals': 'St. Louis', 'Pittsburgh Pirates': 'Pittsburgh', 'Cincinnati Reds': 'Cincinnati', 'Los Angeles Dodgers': 'Los Angeles', 'Colorado Rockies': 'Denver', 'Arizona Diamondbacks': 'Phoenix', 'San Francisco Giants': 'San Francisco Bay Area', 'San Diego Padres': 'San Diego'} mlb_df['Metropolitan area'] = mlb_df['team'].map(mlb_metro_dict) win_loss_by_region = mlb_df[['WL_Ratio', 'Metropolitan area']].groupby('Metropolitan area').mean() pattern1 = "^\[note\s\d+\]" pattern2 = "\w+" # #re.findall(pattern, cities['NHL']) # cities[cities['NBA'].str.contains(pattern1, regex = True)] cities = cities[(cities['MLB'].str.contains(pattern2, regex = True)) & (~cities['MLB'].str.contains(pattern1, regex = True))] cities = cities[['Metropolitan area', 'Population (2016 est.)[8]']].rename(columns={"Population (2016 est.)[8]":'Population'}) cities = cities.set_index('Metropolitan area') win_loss_pop_by_region = pd.merge(win_loss_by_region, cities, how = 'left', left_index = True, right_index = True) win_loss_pop_by_region['Population'] = win_loss_pop_by_region['Population'].astype(int) # YOUR CODE HERE #raise NotImplementedError() #population_by_region = [] # pass in metropolitan area population from cities #win_loss_by_region = [] # pass in win/loss ratio from mlb_df in the same order as cities["Metropolitan area"] #assert len(population_by_region) == len(win_loss_by_region), "Q3: Your lists must be the same length" #assert len(population_by_region) == 26, "Q3: There should be 26 teams being analysed for MLB" return stats.pearsonr(win_loss_pop_by_region['WL_Ratio'], win_loss_pop_by_region['Population'])[0] mlb_correlation() ###Output _____no_output_____ ###Markdown Question 4For this question, calculate the win/loss ratio's correlation with the population of the city it is in for the **NFL** using **2018** data. ###Code import pandas as pd import numpy as np import scipy.stats as stats import re def nfl_correlation(): # YOUR CODE HERE #raise NotImplementedError() nfl_df=pd.read_csv("assets/nfl.csv") cities=pd.read_html("assets/wikipedia_data.html")[1] cities=cities.iloc[:-1,[0,3,5,6,7,8]] # Only retain 2018 year nfl_df = nfl_df[nfl_df.year == 2018] nfl_df = nfl_df.drop(nfl_df.index[[0, 5, 10, 15, 20, 25, 30, 35]]) # Convert Win and Lose columns from string to integer nfl_df.W = nfl_df.W.astype(int) nfl_df.L = nfl_df.L.astype(int) nfl_df.team.astype(str) # Broadcasting Win-Loss Ratio (Win probability) nfl_df['WL_Ratio'] = nfl_df['W'] / (nfl_df['W'] + nfl_df['L']) # Remove ranking # nba_df['team'] = nba_df['team'].str.replace('\(\d+\)', '', regex = True) # Remove special characters nfl_df['team'] = nfl_df['team'].str.replace('*', '', regex = True) nfl_df['team'] = nfl_df['team'].str.replace('+', '', regex = True) nfl_metro_dict = {'New England Patriots': 'Boston', 'Miami Dolphins': 'Miami–Fort Lauderdale', 'Buffalo Bills': 'Buffalo', 'New York Jets': 'New York City', 'Baltimore Ravens': 'Baltimore', 'Pittsburgh Steelers': 'Pittsburgh', 'Cleveland Browns': 'Cleveland', 'Cincinnati Bengals': 'Cincinnati', 'Houston Texans': 'Houston', 'Indianapolis Colts': 'Indianapolis', 'Tennessee Titans': 'Nashville', 'Jacksonville Jaguars': 'Jacksonville', 'Kansas City Chiefs': 'Kansas City', 'Los Angeles Chargers': 'Los Angeles', 'Denver Broncos': 'Denver', 'Oakland Raiders': 'San Francisco Bay Area', 'Dallas Cowboys': 'Dallas–Fort Worth', 'Philadelphia Eagles': 'Philadelphia', 'Washington Redskins': 'Washington, D.C.', 'New York Giants': 'New York City', 'Chicago Bears': 'Chicago', 'Minnesota Vikings': 'Minneapolis–Saint Paul', 'Green Bay Packers': 'Green Bay', 'Detroit Lions': 'Detroit', 'New Orleans Saints': 'New Orleans', 'Carolina Panthers': 'Charlotte', 'Atlanta Falcons': 'Atlanta', 'Tampa Bay Buccaneers': 'Tampa Bay Area', 'Los Angeles Rams': 'Los Angeles', 'Seattle Seahawks': 'Seattle', 'San Francisco 49ers': 'San Francisco Bay Area', 'Arizona Cardinals': 'Phoenix'} nfl_df['Metropolitan area'] = nfl_df['team'].map(nfl_metro_dict) win_loss_by_region = nfl_df[['WL_Ratio', 'Metropolitan area']].groupby('Metropolitan area').mean() pattern1 = "^\[note\s\d+\]" pattern2 = "\w+" # #re.findall(pattern, cities['NHL']) # cities[cities['NBA'].str.contains(pattern1, regex = True)] cities = cities[(cities['NFL'].str.contains(pattern2, regex = True)) & (~cities['NFL'].str.contains(pattern1, regex = True))] cities = cities[['Metropolitan area', 'Population (2016 est.)[8]']].rename(columns={"Population (2016 est.)[8]":'Population'}) cities = cities.set_index('Metropolitan area') win_loss_pop_by_region = pd.merge(win_loss_by_region, cities, how = 'left', left_index = True, right_index = True) win_loss_pop_by_region['Population'] = win_loss_pop_by_region['Population'].astype(int) #population_by_region = [] # pass in metropolitan area population from cities #win_loss_by_region = [] # pass in win/loss ratio from nfl_df in the same order as cities["Metropolitan area"] #assert len(population_by_region) == len(win_loss_by_region), "Q4: Your lists must be the same length" #assert len(population_by_region) == 29, "Q4: There should be 29 teams being analysed for NFL" return stats.pearsonr(win_loss_pop_by_region['WL_Ratio'], win_loss_pop_by_region['Population'])[0] nfl_correlation() ###Output _____no_output_____ ###Markdown Question 5In this question I would like you to explore the hypothesis that **given that an area has two sports teams in different sports, those teams will perform the same within their respective sports**. How I would like to see this explored is with a series of paired t-tests (so use [`ttest_rel`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.ttest_rel.html)) between all pairs of sports. Are there any sports where we can reject the null hypothesis? Again, average values where a sport has multiple teams in one region. Remember, you will only be including, for each sport, cities which have teams engaged in that sport, drop others as appropriate. This question is worth 20% of the grade for this assignment. ###Code help(stats.ttest_rel) import pandas as pd import numpy as np import scipy.stats as stats import re def sports_team_performance(): # YOUR CODE HERE #raise NotImplementedError() mlb_df=pd.read_csv("assets/mlb.csv") nhl_df=pd.read_csv("assets/nhl.csv") nba_df=pd.read_csv("assets/nba.csv") nfl_df=pd.read_csv("assets/nfl.csv") cities=pd.read_html("assets/wikipedia_data.html")[1] cities=cities.iloc[:-1,[0,3,5,6,7,8]] # Step 1: NHL # Only retain 2018 year nhl_df = nhl_df[nhl_df.year == 2018] # Remove row 0, 9, 18, 26 which are sub-titles nhl_df = nhl_df.drop(nhl_df.index[[0, 9, 18, 26]]) # Convert Win and Lose columns from string to integer nhl_df.W = nhl_df.W.astype(int) nhl_df.L = nhl_df.L.astype(int) # Broadcasting Win-Loss Ratio (Win probability) nhl_df['WL_Ratio'] = nhl_df['W'] / (nhl_df['W'] + nhl_df['L']) team_metro_dict = {'Tampa Bay Lightning*': 'Tampa Bay Area', 'Boston Bruins*': 'Boston', 'Toronto Maple Leafs*': 'Toronto', 'Florida Panthers': 'Miami–Fort Lauderdale', 'Detroit Red Wings': 'Detroit', 'Montreal Canadiens': 'Montreal', 'Ottawa Senators': 'Ottawa', 'Buffalo Sabres': 'Buffalo', 'Washington Capitals*': 'Washington, D.C.', 'Pittsburgh Penguins*': 'Pittsburgh', 'Philadelphia Flyers*': 'Philadelphia', 'Columbus Blue Jackets*': 'Columbus', 'New Jersey Devils*': 'New York City', 'Carolina Hurricanes': 'Raleigh', 'New York Islanders': 'New York City', 'New York Rangers': 'New York City', 'Nashville Predators*': 'Nashville', 'Winnipeg Jets*': 'Winnipeg', 'Minnesota Wild*': 'Minneapolis–Saint Paul', 'Colorado Avalanche*': 'Denver', 'St. Louis Blues': 'St. Louis', 'Dallas Stars': 'Dallas–Fort Worth', 'Chicago Blackhawks': 'Chicago', 'Vegas Golden Knights*': 'Las Vegas', 'Anaheim Ducks*': 'Los Angeles', 'San Jose Sharks*': 'San Francisco Bay Area', 'Los Angeles Kings*': 'Los Angeles', 'Calgary Flames': 'Calgary', 'Edmonton Oilers': 'Edmonton', 'Vancouver Canucks': 'Vancouver', 'Arizona Coyotes': 'Phoenix'} nhl_df['Metropolitan area']= nhl_df['team'].map(team_metro_dict) win_loss_by_region = nhl_df[['WL_Ratio', 'Metropolitan area']].groupby('Metropolitan area').mean() #cities['NHL'] pattern1 = "^\[note\s\d+\]" pattern2 = "\w+" #re.findall(pattern, cities['NHL']) #cities[cities['NHL'].str.contains(pattern1, regex = True)] cities = cities[(cities['NHL'].str.contains(pattern2, regex = True)) & (~cities['NHL'].str.contains(pattern1, regex = True))] cities = cities[['Metropolitan area', 'Population (2016 est.)[8]']].rename(columns={"Population (2016 est.)[8]":'Population'}) cities = cities.set_index('Metropolitan area') nhl_output = pd.merge(win_loss_by_region, cities, how = 'left', left_index = True, right_index = True) nhl_output['Population'] = nhl_output['Population'].astype(int) # Step 2: NBA cities=pd.read_html("assets/wikipedia_data.html")[1] cities=cities.iloc[:-1,[0,3,5,6,7,8]] # Only retain 2018 year nba_df = nba_df[nba_df.year == 2018] # Convert Win and Lose columns from string to integer nba_df.W = nba_df.W.astype(int) nba_df.L = nba_df.L.astype(int) nba_df.team.astype(str) # Broadcasting Win-Loss Ratio (Win probability) nba_df['WL_Ratio'] = nba_df['W'] / (nba_df['W'] + nba_df['L']) # Remove ranking nba_df['team'] = nba_df['team'].str.replace('\(\d+\)', '', regex = True) ## \xa0 is actually non-breaking space in Latin1 (ISO 8859-1), also chr(160). You should replace it with a space nba_df['team'] = nba_df['team'].str.replace('\xa0', '', regex = True) nba_df['team'] = nba_df['team'].str.replace('*', '', regex = True) nba_metro_dict = {'Toronto Raptors': 'Toronto', 'Boston Celtics': 'Boston', 'Philadelphia 76ers': 'Philadelphia', 'Cleveland Cavaliers': 'Cleveland', 'Indiana Pacers': 'Indianapolis', 'Miami Heat': 'Miami–Fort Lauderdale', 'Milwaukee Bucks': 'Milwaukee', 'Washington Wizards': 'Washington, D.C.', 'Detroit Pistons': 'Detroit', 'Charlotte Hornets': 'Charlotte', 'New York Knicks': 'New York City', 'Brooklyn Nets': 'New York City', 'Chicago Bulls': 'Chicago', 'Orlando Magic': 'Orlando', 'Atlanta Hawks': 'Atlanta', 'Houston Rockets': 'Houston', 'Golden State Warriors': 'San Francisco Bay Area', 'Portland Trail Blazers': 'Portland', 'Oklahoma City Thunder': 'Oklahoma City', 'Utah Jazz': 'Salt Lake City', 'New Orleans Pelicans': 'New Orleans', 'San Antonio Spurs': 'San Antonio', 'Minnesota Timberwolves': 'Minneapolis–Saint Paul', 'Denver Nuggets': 'Denver', 'Los Angeles Clippers': 'Los Angeles', 'Los Angeles Lakers': 'Los Angeles', 'Sacramento Kings': 'Sacramento', 'Dallas Mavericks': 'Dallas–Fort Worth', 'Memphis Grizzlies': 'Memphis', 'Phoenix Suns': 'Phoenix'} nba_df['Metropolitan area'] = nba_df['team'].map(nba_metro_dict) win_loss_by_region = nba_df[['WL_Ratio', 'Metropolitan area']].groupby('Metropolitan area').mean() cities pattern1 = "^\[note\s\d+\]" pattern2 = "\w+" # #re.findall(pattern, cities['NHL']) # cities[cities['NBA'].str.contains(pattern1, regex = True)] cities = cities[(cities['NBA'].str.contains(pattern2, regex = True)) & (~cities['NBA'].str.contains(pattern1, regex = True))] cities = cities[['Metropolitan area', 'Population (2016 est.)[8]']].rename(columns={"Population (2016 est.)[8]":'Population'}) cities = cities.set_index('Metropolitan area') nba_output = pd.merge(win_loss_by_region, cities, how = 'left', left_index = True, right_index = True) nba_output['Population'] = nba_output['Population'].astype(int) # Step 3: MLB cities=pd.read_html("assets/wikipedia_data.html")[1] cities=cities.iloc[:-1,[0,3,5,6,7,8]] # Only retain 2018 year mlb_df = mlb_df[mlb_df.year == 2018] # Convert Win and Lose columns from string to integer mlb_df.W = mlb_df.W.astype(int) mlb_df.L = mlb_df.L.astype(int) mlb_df.team.astype(str) # Broadcasting Win-Loss Ratio (Win probability) mlb_df['WL_Ratio'] = mlb_df['W'] / (mlb_df['W'] + mlb_df['L']) # Remove ranking # nba_df['team'] = nba_df['team'].str.replace('\(\d+\)', '', regex = True) # ## \xa0 is actually non-breaking space in Latin1 (ISO 8859-1), also chr(160). You should replace it with a space # nba_df['team'] = nba_df['team'].str.replace('\xa0', '', regex = True) # nba_df['team'] = nba_df['team'].str.replace('*', '', regex = True) mlb_metro_dict = {'Boston Red Sox': 'Boston', 'New York Yankees': 'New York City', 'Tampa Bay Rays': 'Tampa Bay Area', 'Toronto Blue Jays': 'Toronto', 'Baltimore Orioles': 'Baltimore', 'Cleveland Indians': 'Cleveland', 'Minnesota Twins': 'Minneapolis–Saint Paul', 'Detroit Tigers': 'Detroit', 'Chicago White Sox': 'Chicago', 'Kansas City Royals': 'Kansas City', 'Houston Astros': 'Houston', 'Oakland Athletics': 'San Francisco Bay Area', 'Seattle Mariners': 'Seattle', 'Los Angeles Angels': 'Los Angeles', 'Texas Rangers': 'Dallas–Fort Worth', 'Atlanta Braves': 'Atlanta', 'Washington Nationals': 'Washington, D.C.', 'Philadelphia Phillies': 'Philadelphia', 'New York Mets': 'New York City', 'Miami Marlins': 'Miami–Fort Lauderdale', 'Milwaukee Brewers': 'Milwaukee', 'Chicago Cubs': 'Chicago', 'St. Louis Cardinals': 'St. Louis', 'Pittsburgh Pirates': 'Pittsburgh', 'Cincinnati Reds': 'Cincinnati', 'Los Angeles Dodgers': 'Los Angeles', 'Colorado Rockies': 'Denver', 'Arizona Diamondbacks': 'Phoenix', 'San Francisco Giants': 'San Francisco Bay Area', 'San Diego Padres': 'San Diego'} mlb_df['Metropolitan area'] = mlb_df['team'].map(mlb_metro_dict) win_loss_by_region = mlb_df[['WL_Ratio', 'Metropolitan area']].groupby('Metropolitan area').mean() pattern1 = "^\[note\s\d+\]" pattern2 = "\w+" # #re.findall(pattern, cities['NHL']) # cities[cities['NBA'].str.contains(pattern1, regex = True)] cities = cities[(cities['MLB'].str.contains(pattern2, regex = True)) & (~cities['MLB'].str.contains(pattern1, regex = True))] cities = cities[['Metropolitan area', 'Population (2016 est.)[8]']].rename(columns={"Population (2016 est.)[8]":'Population'}) cities = cities.set_index('Metropolitan area') mlb_output = pd.merge(win_loss_by_region, cities, how = 'left', left_index = True, right_index = True) mlb_output['Population'] = mlb_output['Population'].astype(int) # Step 4: NFL cities=pd.read_html("assets/wikipedia_data.html")[1] cities=cities.iloc[:-1,[0,3,5,6,7,8]] # Only retain 2018 year nfl_df = nfl_df[nfl_df.year == 2018] nfl_df = nfl_df.drop(nfl_df.index[[0, 5, 10, 15, 20, 25, 30, 35]]) # Convert Win and Lose columns from string to integer nfl_df.W = nfl_df.W.astype(int) nfl_df.L = nfl_df.L.astype(int) nfl_df.team.astype(str) # Broadcasting Win-Loss Ratio (Win probability) nfl_df['WL_Ratio'] = nfl_df['W'] / (nfl_df['W'] + nfl_df['L']) # Remove ranking # nba_df['team'] = nba_df['team'].str.replace('\(\d+\)', '', regex = True) # Remove special characters nfl_df['team'] = nfl_df['team'].str.replace('*', '', regex = True) nfl_df['team'] = nfl_df['team'].str.replace('+', '', regex = True) nfl_metro_dict = {'New England Patriots': 'Boston', 'Miami Dolphins': 'Miami–Fort Lauderdale', 'Buffalo Bills': 'Buffalo', 'New York Jets': 'New York City', 'Baltimore Ravens': 'Baltimore', 'Pittsburgh Steelers': 'Pittsburgh', 'Cleveland Browns': 'Cleveland', 'Cincinnati Bengals': 'Cincinnati', 'Houston Texans': 'Houston', 'Indianapolis Colts': 'Indianapolis', 'Tennessee Titans': 'Nashville', 'Jacksonville Jaguars': 'Jacksonville', 'Kansas City Chiefs': 'Kansas City', 'Los Angeles Chargers': 'Los Angeles', 'Denver Broncos': 'Denver', 'Oakland Raiders': 'San Francisco Bay Area', 'Dallas Cowboys': 'Dallas–Fort Worth', 'Philadelphia Eagles': 'Philadelphia', 'Washington Redskins': 'Washington, D.C.', 'New York Giants': 'New York City', 'Chicago Bears': 'Chicago', 'Minnesota Vikings': 'Minneapolis–Saint Paul', 'Green Bay Packers': 'Green Bay', 'Detroit Lions': 'Detroit', 'New Orleans Saints': 'New Orleans', 'Carolina Panthers': 'Charlotte', 'Atlanta Falcons': 'Atlanta', 'Tampa Bay Buccaneers': 'Tampa Bay Area', 'Los Angeles Rams': 'Los Angeles', 'Seattle Seahawks': 'Seattle', 'San Francisco 49ers': 'San Francisco Bay Area', 'Arizona Cardinals': 'Phoenix'} nfl_df['Metropolitan area'] = nfl_df['team'].map(nfl_metro_dict) win_loss_by_region = nfl_df[['WL_Ratio', 'Metropolitan area']].groupby('Metropolitan area').mean() pattern1 = "^\[note\s\d+\]" pattern2 = "\w+" # #re.findall(pattern, cities['NHL']) # cities[cities['NBA'].str.contains(pattern1, regex = True)] cities = cities[(cities['NFL'].str.contains(pattern2, regex = True)) & (~cities['NFL'].str.contains(pattern1, regex = True))] cities = cities[['Metropolitan area', 'Population (2016 est.)[8]']].rename(columns={"Population (2016 est.)[8]":'Population'}) cities = cities.set_index('Metropolitan area') nfl_output = pd.merge(win_loss_by_region, cities, how = 'left', left_index = True, right_index = True) nfl_output['Population'] = nfl_output['Population'].astype(int) # Step 5: Merging 4 output datasets final_output = nhl_output.rename(columns = {'WL_Ratio': 'WL_Ratio_NHL', 'Population': 'Population_NHL'}) def has_team(pop): if pop > 0: return 1 else: return 0 final_output['NHL'] = final_output['Population_NHL'].apply(has_team) final_output = pd.merge(final_output, nba_output, how = 'outer', left_index = True, right_index = True).rename(columns = {'WL_Ratio': 'WL_Ratio_NBA', 'Population': 'Population_NBA'}) final_output['NBA'] = final_output['Population_NBA'].apply(has_team) final_output = pd.merge(final_output, mlb_output, how = 'outer', left_index = True, right_index = True).rename(columns = {'WL_Ratio': 'WL_Ratio_MLB', 'Population': 'Population_MLB'}) final_output['MLB'] = final_output['Population_MLB'].apply(has_team) final_output = pd.merge(final_output, nfl_output, how = 'outer', left_index = True, right_index = True).rename(columns = {'WL_Ratio': 'WL_Ratio_NFL', 'Population': 'Population_NFL'}) final_output['NFL'] = final_output['Population_NFL'].apply(has_team) # Note: p_values is a full dataframe, so df.loc["NFL","NBA"] should be the same as df.loc["NBA","NFL"] and # df.loc["NFL","NFL"] should return np.nan sports = ['NFL', 'NBA', 'NHL', 'MLB'] p_values = pd.DataFrame({k:np.nan for k in sports}, index=sports) #test_pairs = final_output[(final_output['NFL'] == 1) & (final_output['NBA'] == 1)] for sport1 in sports: for sport2 in sports: test_pairs = final_output[(final_output[sport1] == 1) & (final_output[sport2] == 1)] p_values.loc[sport1, sport2] = stats.ttest_rel(test_pairs['WL_Ratio_{}'.format(sport1)], test_pairs['WL_Ratio_{}'.format(sport2)])[1] assert abs(p_values.loc["NBA", "NHL"] - 0.02) <= 1e-2, "The NBA-NHL p-value should be around 0.02" assert abs(p_values.loc["MLB", "NFL"] - 0.80) <= 1e-2, "The MLB-NFL p-value should be around 0.80" return p_values #return sports_team_performance() ###Output C:\Users\sonso\Anaconda3\lib\site-packages\scipy\stats\_distn_infrastructure.py:1932: RuntimeWarning: invalid value encountered in less_equal cond2 = cond0 & (x <= _a)
Section 06 - Methods & Functions/Lec 47 - Interaction between Functions.ipynb
###Markdown Shuffling & storing: ###Code def shuffle_list(mylist): shuffle(mylist) return (mylist) result = shuffle_list(example) result ###Output _____no_output_____ ###Markdown Three Cup Monte game: Function to shuffle & return: ###Code def shuffle_list(mylist): shuffle(mylist) return (mylist) ###Output _____no_output_____ ###Markdown Creating game list: ###Code mylist = [' ','O',' '] shuffle_list(mylist) ###Output _____no_output_____ ###Markdown Function to get user guess: ###Code def player_guess(): guess = '' while guess not in ['0','1','2']: guess = input ("Enter a no.: 0, 1, or 2 = ") return int(guess) player_guess() ###Output Enter a no.: 0, 1, or 2 = 8 Enter a no.: 0, 1, or 2 = 2 ###Markdown Function to combine other 2 func: ###Code def check_guess(mylist,guess): if mylist[guess] == 'O': print ("Correct!") else: print ("Wrong!") print (mylist) ###Output _____no_output_____ ###Markdown Logic to call functions in order: ###Code mylist = [' ','O',' '] mixedup_list = shuffle_list(mylist) guess = player_guess() check_guess(mixedup_list,guess) ###Output Wrong! ['O', ' ', ' '] ###Markdown *** Complete 3 cup monte game: ###Code def shuffle_list(mylist): shuffle(mylist) return (mylist) mylist = [' ','O',' '] def player_guess(): guess = '' while guess not in ['0','1','2']: guess = input ("Enter a no.: 0, 1, or 2 = ") return int(guess) def check_guess(mylist,guess): if mylist[guess] == 'O': print ("Correct!") else: print ("Wrong!") print (mylist) mylist = [' ','O',' '] mixedup_list = shuffle_list(mylist) guess = player_guess() check_guess(mixedup_list,guess) ###Output Enter a no.: 0, 1, or 2 = 5 Enter a no.: 0, 1, or 2 = 2 Wrong! ['O', ' ', ' ']
fun_coding/codility/6_numberofdiskintersections.ipynb
###Markdown ###Code A = [1, 5, 2, 1, 4, 0] a = [] for i,v in enumerate(A): a.append([j for j in range(i-v,v+i+1)]) a # 1st try. O(N**2), 87% correctness, Performace == 0% lol def solution(A): if len(A) > 100000: return -1 a = [] for i,v in enumerate(A): a.append([j for j in range(i-v,v+i+1)]) count = 0 while_count = 0 d = [] while while_count < len(A): b = a.pop(0) for i in a: if any(j in b for j in i): d.append([b,i]) count += 1 while_count += 1 a.append(b) answer = count // 2 return answer if answer <= 10000000 else -1 A = [1, 5, 2, 1, 4, 0] a = [] for i,v in enumerate(A): a.append((i-v,v+i)) a type(a[0][0]) # 2nd try O(N**2), 100% correctness, 0% performance def solution(A): a = [] for i,v in enumerate(A): a.append((i-v,v+i)) while_count = 0 count = 0 while while_count < len(A): b = a.pop(0) for i in a: if (b[0] >= i[0] and b[0] <= i[1]) or (b[0] <= i[0] and b[1] >= i[0]): count += 1 while_count += 1 a.append(b) answer = count // 2 return answer if answer <= 10000000 else -1 solution([1, 2147483647, 0]) a = [] for i,v in enumerate(A): a.append((i-v,v+i)) a # 3rd def solution(A): a = [(i-v, v+i) for i,v in enumerate(A)] count = 0 for j in range(len(A)): b = a.pop(0) for i in a: if (b[0] >= i[0] and b[0] <= i[1]) or (b[0] <= i[0] and b[1] >= i[0]): count += 1 a.append(b) answer = count // 2 return answer if answer <= 10000000 else -1 solution([1, 5, 2, 1, 4, 0]) a = [(i-v, v+i) for i,v in enumerate(A)] a # 3rd def solution(A): a = [(i-v, v+i) for i,v in enumerate(A)] count = 0 for j in range(len(A)): b = a.pop(0) for i in a: if (b[0] >= i[0] and b[0] <= i[1]) or (b[0] <= i[0] and b[1] >= i[0]): count += 1 a.append(b) answer = count // 2 return answer if answer <= 10000000 else -1 A = [1, 5, 2, 1, 4, 0] a = [] b = [] for j,k in enumerate(A): a.append(j-k) b.append(j+k) a.sort() b.sort() print(a) print(b) while_counter = 0 counter = 0 3 4 4 5 6 # O(N) or O(N*log(N)) # upper 보다 작은 lower 들은 반드시 가장 작은 upper 보다 큰 반지름을 갖는다. = 접점 def solution(A): a = [] b = [] for j,k in enumerate(A): a.append(j-k) b.append(j+k) a.sort() b.sort() d = 0 counter = 0 n = len(A) for i in range(n): while (d < n and a[d] <= b[i]): counter += d+1-1 counter -= i d = d + 1 if counter > 10000000: return -1 return counter solution(A) A if 1 => [-4, -1, 0, 0] if 4 => [-1, 0, 0, 2] if 4 => [0, 0, 2] if 5 => [0, 2, 5] if 6 => [2, 5] if 8 => [5] 3 3 2 2 1 0 upper 보다 작은 lower 들은 반드시 가장 작은 upper 보다 큰 반지름을 갖는다. = 접점 ###Output _____no_output_____
notebooks/First EDA.ipynb
###Markdown Checking in With MyselfSo far, I've learned that: I should not delete unknown gender, because it is a significant predictor of NDF Users make their first booking within 24 hours of creating their account, suggesting they browse before account creation If users complete more actions on the site, they are slightly more likely to book than NDF English is far and away the most common language; US is far and away the most common destination Removing English and US from the mix, the most common destination is often the home country of the language ###Code monthdf = userdf[userdf['date_first_booking'].notnull()] monthdf['month_booked'] = monthdf['date_first_booking'].dt.month.astype(int) sns.countplot(data=monthdf, x='country_destination', hue='month_booked') # Seems like the bookings are unevenly distributed throughout the year. # This could be a due to seasonality, but it also could be that our dataset # itself is incomplete/skewed. sns.countplot(data=monthdf, x='month_booked', hue='country_destination') monthdf2 = monthdf[monthdf['country_destination'] != 'US'] monthdf2 = monthdf2[monthdf2['country_destination'] != 'other'] sns.countplot(data=monthdf2, x='month_booked', hue='country_destination') # With no countries obviously bucking the trend, it seems this # exploration hasn't given us any insights. # Let's try it on the raw data... rawuserdf['timestamp_first_active'] = pd.to_datetime(rawuserdf['timestamp_first_active'], format='%Y%m%d%H%M%S') rawuserdf['date_first_booking'] = pd.to_datetime(rawuserdf['date_first_booking']) rawuserdf['days_thinking'] = rawuserdf['date_first_booking'] - rawuserdf['timestamp_first_active'] rawmonthdf = rawuserdf[rawuserdf['date_first_booking'].notnull()] rawmonthdf['month_booked'] = rawmonthdf['date_first_booking'].dt.month.astype(int) sns.countplot(data=rawmonthdf, x='country_destination', hue='month_booked') # It looks like this 'seasonal effect' (again, could be a fluke in the data) # is actually far more pronounced when we remove incomplete values. This would # suggest that the incomplete/NaN values are actually correlated to time of year. # At this point, I need to change my interim data before doing any more exploration. # It's clear the assumptions I made when deleting data were wrong. ###Output _____no_output_____
part-01-foundation/01-algorithms/03-trees-hashtables.ipynb
###Markdown Setup ###Code import time import math import matplotlib.pyplot as plt import numpy as np import random as rnd np.random.seed(123) rnd.seed(123) ###Output _____no_output_____ ###Markdown Trees and Hash Tables: Theory- Trees- Hash Tables Base Algorithms- Binary Search Tree - search - add - remove - traverse: BFS - traverse: DFS (preorder, inorder, postorder) - predecessor, successor - reconstruct BST - Hash tables - search - add - remove Sample Problems and Solutions Largest BST DescriptionGiven a tree, find largest Binary Search Tree among all possible subtrees Solution ###Code # Assume: # class Node(): # def __init__(self, v=None, l=None, r=None): # self.v = val # self.l = l # self.r = r # complete the function below def find_largest_bst(root): if root == None: return 0 def _helper(node): # base if node.l == None and node.r == None: return 1, True, node.v, node.v # recursive part ## check children if node.l != None: lval, lbst, llo, lhi = _helper(node.l) else: lval, lbst, llo, lhi = 0, True, node.v, node.v if node.r != None: rval, rbst, rlo, rhi = _helper(node.r) else: rval, rbst, rlo, rhi = 0, True, node.v, node.v ## combine results if lhi <= node.v and node.v <= rlo and lbst and rbst: return 1 + lval + rval, True, llo, rhi else: return max(lval, rval), False, llo, rhi val, bst, lo, hi = _helper(root) return val ###Output _____no_output_____
prediction_tasks.ipynb
###Markdown This notebook shows the prediction result in Sec 3.2 (including Table 3-5) in note that the participant is borderline (0), healthy (1) or bipolar (2). ###Code import os import random import numpy as np import datetime import h5py import time import csv import math import scipy import copy import iisignature from datetime import date import matplotlib.dates as mdates import matplotlib.pyplot as plt from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score from sklearn.preprocessing import LabelEncoder from sklearn.metrics import r2_score from sklearn.metrics import roc_curve, auc from sklearn.model_selection import train_test_split from sklearn.multiclass import OneVsRestClassifier from prediction_functions import * ###Output _____no_output_____ ###Markdown Load cohort dataset ###Code test_path='./all-true-colours-matlab-2017-02-27-18-38-12-nick/' participants_list, participants_data_list, participants_time_list=loadParticipants(test_path) Participants=make_classes(participants_data_list,participants_time_list,\ participants_list) cohort=cleaning_sameweek_data(cleaning_same_data(Participants)) class_dic={0: "BPD", 1: "HC", 2: "BD"} ###Output _____no_output_____ ###Markdown **1.** Run state prediction in Sec 3.2.1, with the following two models in one function 'comprehensive_model': * missing-response-incorporated signature-based predictive model (MRSCM, level2) * naive predictive model ###Code if __name__ == "__main__": sample_size=50 minlen=10 for class_ in [int(0), int(1),int(2)]: print('Class', class_dic[class_], 'with min length',minlen) print("____________________") accuracy=comprehensive_model(Participants,\ class_,\ minlen=10,\ training=0.7,\ sample_size=10,\ cumsum=True) print("ASRM state accuracy for naive prediction model and MRSPM (level 2):") print(accuracy[0]) print("QIDS state accuracy for naive prediction model and MRSPM (level 2):") print(accuracy[1]) print('\n') ###Output Class BPD with min length 10 ____________________ ASRM state accuracy for naive prediction model and MRSPM (level 2): [0.6175, 0.7062] QIDS state accuracy for naive prediction model and MRSPM (level 2): [0.595, 0.6475] Class HC with min length 10 ____________________ ASRM state accuracy for naive prediction model and MRSPM (level 2): [0.70125, 0.79875] QIDS state accuracy for naive prediction model and MRSPM (level 2): [0.71625, 0.78875] Class BD with min length 10 ____________________ ASRM state accuracy for naive prediction model and MRSPM (level 2): [0.586, 0.652] QIDS state accuracy for naive prediction model and MRSPM (level 2): [0.556, 0.602] ###Markdown **2.** Run score prediction in Sec 3.2.2, with the following two models in one function 'comprehensive_nomissing_model': * missing-response-incorporated signature-based predictive model (scoreMRSCM, level2) * naive predictive model ###Code if __name__ == "__main__": sample_size=50 minlen=10 for class_ in [int(0), int(1),int(2)]: print('Class', class_dic[class_], 'with min length',minlen) print("____________________") accuracy,mae=comprehensive_nomissing_model(Participants,\ class_,\ minlen=minlen,\ sample_size=sample_size,\ scaling=False) print("MAE of ASRM score prediction for naive predictive model and scoreMRSPM (level 2):") print(mae[0]) print("MAE of QIDS score prediction for naive predictive model and scoreMRSPM (level 2):") print(mae[1]) print('\n') ###Output Class BPD with min length 10 ____________________ MAE of ASRM score prediction for naive predictive model and scoreMRSPM (level 2): [2.57167095, 2.11735167] MAE of QIDS score prediction for naive predictive model and scoreMRSPM (level 2): [4.67122329, 3.74499667] Class HC with min length 10 ____________________ MAE of ASRM score prediction for naive predictive model and scoreMRSPM (level 2): [1.13730578, 0.82641092] MAE of QIDS score prediction for naive predictive model and scoreMRSPM (level 2): [1.89942284, 1.53168044] Class BD with min length 10 ____________________ MAE of ASRM score prediction for naive predictive model and scoreMRSPM (level 2): [3.28666201, 2.38695222] MAE of QIDS score prediction for naive predictive model and scoreMRSPM (level 2): [4.60083124, 3.43730616] ###Markdown **3.** Run severity prediction in Sec 3.2.2, with the following two models in one function 'comprehensive_nomissing_model': * missing-response-incorporated signature-based predictive model (scoreMRSCM, level2) but with parameter "scaling" in 'comprehensive_nomissing_model' to be True to map the raw predicted score to corresponding severity of symptoms ###Code if __name__ == "__main__": sample_size=50 minlen=10 for class_ in [int(0), int(1),int(2)]: print('Class', class_dic[class_], 'with min length',minlen) print("____________________") accuracy,mae=comprehensive_nomissing_model(Participants,\ class_,\ minlen=minlen,\ sample_size=sample_size,\ scaling=True) print("accuracy and MAE of (ASRM) severity prediction from scoreMRSPM (level 2):") print([accuracy[0][-1],mae[0][-1]]) print("accuracy and MAE of (QIDS) severity prediction from scoreMRSPM (level 2):") print([accuracy[1][-1],mae[1][-1]]) print('\n') ###Output Class BPD with min length 10 ____________________ accuracy and MAE of (ASRM) severity prediction from scoreMRSPM (level 2): [0.82433, 0.625] accuracy and MAE of (QIDS) severity prediction from scoreMRSPM (level 2): [0.697524, 0.79427] Class HC with min length 10 ____________________ accuracy and MAE of (ASRM) severity prediction from scoreMRSPM (level 2): [0.95825, 0.19069] accuracy and MAE of (QIDS) severity prediction from scoreMRSPM (level 2): [0.949011, 0.13825] Class BD with min length 10 ____________________ accuracy and MAE of (ASRM) severity prediction from scoreMRSPM (level 2): [0.74327, 1.04623] accuracy and MAE of (QIDS) severity prediction from scoreMRSPM (level 2): [0.76425, 0.684]
Notebooks/easy_track/algorithms/decision_trees_and_random_forest.ipynb
###Markdown Decision Tree and Random Forest Classifiers---In this tutorial, we will understand how to implement a Decision Tree classifier and a Random Forest classifier to perform classification tasks. We will use both the classifiers to perform a binary classification task (i.e., 2 possible outcomes for the target label) and then compare their performance. Importing Project Dependencies---Let us begin the project by importing the necessary modules. ###Code import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.tree import plot_tree # for plotting the decision tree ###Output _____no_output_____ ###Markdown Importing the Dataset---For this tutorial, we will be working on a dataset that contains several parameters which are considered important during the application for Masters Programs. The aim here is to predict whether an applicant will get accepted into the university or not. For this, we are considering the threshold for selection to be 0.85.The parameters included are :* GRE Scores ( out of 340 )* TOEFL Scores ( out of 120 )* University Rating ( out of 5 )* Statement of Purpose ( out of 5)* Letter of Recommendation Strength ( out of 5 )* Undergraduate GPA ( out of 10 )* Research Experience ( either 0 or 1 )* Chance of Admit ( ranging from 0 to 1 ) ###Code df = pd.read_csv('https://raw.githubusercontent.com/OneStep-elecTRON/ContentSection/main/Datasets/university_admissions.csv') # readin the csv file df.columns = ['Serial No.', 'GRE Score', 'TOEFL Score', 'University Rating', 'SOP', 'LOR', 'CGPA', 'Research', 'Chance of Admit'] # modifying column names df.head() ###Output _____no_output_____ ###Markdown Now, let us add the __Accepted__ column to our dataframe based on the 85% Chance of Admit criteria. ###Code df['Accepted'] = df['Chance of Admit'].apply(lambda x: 1 if x >= 0.85 else 0) # creating Accepted column on the basis of 0.85 Chance of admit threshold df.head() ###Output _____no_output_____ ###Markdown Data Wrangling & EDA---For the first step in cleaning our data, first we will drop the __Serial No.__ column, then see if the data needs any further cleaning or preprocessing. ###Code df.drop(columns = ['Serial No.'], axis = 1, inplace = True) # dropping the Serial No. column df.info() ###Output <class 'pandas.core.frame.DataFrame'> RangeIndex: 500 entries, 0 to 499 Data columns (total 9 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 GRE Score 500 non-null int64 1 TOEFL Score 500 non-null int64 2 University Rating 500 non-null int64 3 SOP 500 non-null float64 4 LOR 500 non-null float64 5 CGPA 500 non-null float64 6 Research 500 non-null int64 7 Chance of Admit 500 non-null float64 8 Accepted 500 non-null int64 dtypes: float64(4), int64(5) memory usage: 35.3 KB ###Markdown As we can see, there are no null values within our dataset. Let us now observe the correlation heatmap of our dataset. ###Code sns.heatmap(df.corr()) ###Output _____no_output_____ ###Markdown As we can see here, all the columns show an average correlation w.r.t. our target variable. Thus, we will be using all the columns as the feature variables for training our classifiers.Let us have a look at the statistical analysis of our data. ###Code df.describe() ###Output _____no_output_____ ###Markdown As we can see here, the values within our dataset vary within the range \[1e+2, 1e0]. This might cause a data imbalance while training the model which can result in one feature affecting the final output more than the other feature variables. To tackle this imbalance, we will have to standardize the values within our dataset. ###Code from sklearn.preprocessing import StandardScaler X = df[['GRE Score', 'TOEFL Score', 'University Rating', 'SOP', 'LOR', 'CGPA', 'Research']] # selecting the features scaler = StandardScaler() # creating StandardScaler class object X_scaled = scaler.fit_transform(X) # performing standardization on X X_scaled ###Output _____no_output_____ ###Markdown Now, before we train the model, let us split our dataset into training and test sets. ###Code from sklearn.model_selection import train_test_split # importing train_test_split method y = df['Accepted'] # target variable X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.2, random_state=42) X_train.shape[0], X_test.shape[0] # printing the number of values in the test and training set ###Output _____no_output_____ ###Markdown With this, we are done pre-processing and exploring our data. In the next step, we will create and train our classifiers. Modeling- Decision Tree Classifier---In this section, we will see how to implement a simple decision tree, and then how to apply different hyperparameter optimization and regularization techniques to improve the performance of our classifier. ###Code # Step 1- Importing the DecisionTreeClassifier class from sklearn from sklearn.tree import DecisionTreeClassifier # Step 2- Creating the decision tree model object decisionTree = DecisionTreeClassifier() # Step 3- Fitting the model with our data decisionTree.fit(X_train, y_train) # Step 4- Generating labels based on the decision tree model predictions y_predicted = decisionTree.predict(X_test) ###Output _____no_output_____ ###Markdown With this, we have completed training our model and also generated the predictions for our test data. Before we move on to calculating the accuracy of our model, let us first visualize our tree. ###Code # visualizing the decision tree fig = plt.figure(figsize=(15,15)) _ = plot_tree(decisionTree, feature_names=X.columns, class_names=np.unique(y_train.astype('str')), filled=True) ###Output _____no_output_____ ###Markdown As we can see, the depth of our decision tree is 9. You can double-click and zoom on the image to see in detail the criteria the classifier used for splitting at each node within the tree. Now, for the next step, let us have a look at the accuracy of our classifier. For the accuracy measure, we will be preferring the F1-score here.F1 score lies in the range \[0,1], with a F1 score of 1 denoting 0 false negative and false positive values. It gives us a rough idea of how many wrong predictions the model made. ###Code # Step 1- Importing the metrics functions from sklearn.metrics import classification_report, confusion_matrix from sklearn.metrics import f1_score, accuracy_score # Step 2- Printing the F1 score print("F1 Score = ", f1_score(y_test, y_predicted), end="\n") # Step 3- Printing precision, recall, accuracy and other metrics pd.DataFrame(classification_report(y_test, y_predicted, output_dict = True)) ###Output F1 Score = 0.9333333333333332 ###Markdown As we can see, we got an initial accuracy of 97% and an F1 score of 93.33 (for class '1'). Now, let us see how we can improve our model's performance. Here, we are going to use a regularization technique called __Pruning__. What we basically do in pruning is that we restrict the depth of the tree. This prevents the model from overfitting on the training data (the deeper a decision tree, the higher the chances of overfitting). We will train different decision trees with varying depths and then select the one with the highest F1 score as our final Decision Tree Classifier. ###Code f1_score_list = [] accuracy_score_list = [] # calculating f1 scores for decision trees with different depths for depth in range(3,10): tree = DecisionTreeClassifier(max_depth = depth) tree.fit(X_train, y_train) y_predicted = tree.predict(X_test) f1 = f1_score(y_test, y_predicted) acc = accuracy_score(y_test, y_predicted) f1_score_list.append(f1) accuracy_score_list.append(acc) depth_values = [depth for depth in range(3,10)] # plotting f1 scores for different tree depths plt.plot(depth_values, f1_score_list) plt.plot(depth_values, accuracy_score_list) plt.legend(["F1 score", "Accuracy"], loc ="lower right") plt.xlabel('Depth') plt.show() ###Output _____no_output_____ ###Markdown As we can see from the above graph, the best accuracy and F1 score that our decision tree classifiers achieved is 0.97 and 0.93 (same as without regularization). However, we can achieve the same result with a depth of 5 (less complex model architecture should be preferred whenever possible over a more complex one). So we will train our final model with a max depth of 5 and visuallize our decision tree classifier. NOTE- This time, since we will be training the final model, we will train it on the entire dataset instead of only the training set. ###Code # training the tree with max_depth = 5 decisionTree = DecisionTreeClassifier(max_depth = 5) decisionTree.fit(X_scaled, y) # visualizing the decision tree fig = plt.figure(figsize=(15,15)) _ = plot_tree(decisionTree, feature_names=X.columns, class_names=np.unique(y_train.astype('str')), filled=True) ###Output _____no_output_____ ###Markdown Modeling- Random Forest Classifier---In this section, we will create and train our Random Forest Classifier model. ###Code # Step 1- Importing the RandomForestClassifier class from sklearn from sklearn.ensemble import RandomForestClassifier # Step 2- Creating the decision tree model object forest = RandomForestClassifier() # Step 3- Fitting the model with our data forest.fit(X_train, y_train) # Step 4- Generating labels based on the decision tree model predictions y_predicted = forest.predict(X_test) ###Output _____no_output_____ ###Markdown Now, let us check the accuracy metrics for our forest classifier model. ###Code # Step 1- Printing the F1 score print("F1 Score = ", f1_score(y_test, y_predicted), end="\n") # Step 2- Printing precision, recall, accuracy and other metrics pd.DataFrame(classification_report(y_test, y_predicted, output_dict = True)) ###Output F1 Score = 0.9302325581395349 ###Markdown As we can see, with an F1 score of 0.85 and an accuracy of 0.94, our Random Forest Classifier is initially performing worse as compared to the decision tree. Now, let us tweak a few hyperparameters and retrain another classifier. For this, we will be using GridSearchCV that you learned about earlier. The hyperparameters we will be experimenting with are-* max_depth: Denotes the maximum depths of the decision trees in the forest* n_estimators: Number of trees used within the forest ###Code from sklearn.model_selection import GridSearchCV # setting the parameters to be tested max_depths = range(3,10,2) n_estimators_list = range(10,101,10) parameters = {'max_depth': max_depths, 'n_estimators': n_estimators_list} # getting the best parameters using the grid search CV forest = RandomForestClassifier() clf = GridSearchCV(forest, parameters, scoring = 'f1', cv = 5) clf.fit(X_scaled, y) print('Best hyperparameters are: ', clf.best_params_) ###Output Best hyperparameters are: {'max_depth': 7, 'n_estimators': 10} ###Markdown Now, let us train our final Random Forest Classifier with the derived best parameters then we will compare its performance against the 0.93 F1 score of the Decision Tree Classifier. ###Code # training the final forest model forest = RandomForestClassifier(max_depth = 7, n_estimators = 40) forest.fit(X_train, y_train) y_predicted = forest.predict(X_test) # Step 1- Printing the F1 score print("F1 Score = ", f1_score(y_test, y_predicted), end="\n") ###Output F1 Score = 0.8837209302325582
04-debug/seminar.ipynb
###Markdown Отладка моделей[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/m12sl/dl-hse-2021/blob/master/04-debug/seminar.ipynb)План семинара:- [ ] Освоить LR scheduling- [ ] Написать LR range test- [ ] Разобраться с подсчетом валидационных и тренировочных метрик - [ ] Добавим логгирование норм градиентов- [ ] Посмотрим на forward-hook- [ ] Classier Trainer LR SchedulingДва типа расписаний:- по эпохам (StepLR, ReduceLROnPlateau, ...) ``` scheduler = StepLR(optimizer, step_size=30, gamma=0.1) for epoch in range(epochs): train(...) validate(...) scheduler.step() ```- по батчам (Cosine, Cyclic, 1cycle, ...) ``` scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=0.01, max_lr=0.1) for epoch in range(epochs): train(...) for batch in data_loader: train_batch(...) scheduler.step() validate(...) ``` Выбор оптимального LRДля выбора оптимального LR удобно использовать т.н. Learning Rate Range Test, часто процедуру называют просто find_lr. Под капотом проход по тренировочной эпохе с lr, изменяемым на каждом батче по формуле:$$\mathrm{it} = \frac{\mathrm{step}}{\mathrm{total steps}}\\\mathrm{lr} = \exp\left\{ (1 - t ) \log a + t \log b\right\}$$Чтобы поменять LR для всех оптимизируемых параметров, можно пройтись по ним циклом:```for param_group in optimizer.param_groups: param_group['lr'] = lr```_картинка из бложика [Jeremy Jordan](https://www.jeremyjordan.me/nn-learning-rate/)_Идея приема простая: пока LR меньше некоторого порога на каждом шаге градиентного спуска веса просто не меняются (в частности из-за особенностей операций с плавающей точкой).При очень большом LR мы шагаем слишком далеко и уходим от точки экстремума. Оптимальный LR лежит где-то между ними. Экспоненциальная формула изменения LR позволяет с должным качеством найти хорошую точку.Если интересно: [статья , в которой эту технику предложили и активно использовали](https://arxiv.org/pdf/1506.01186.pdf).**Some math notes**У типов данных с плавающей точкой есть арифметические особенности:$$ fp32x + \delta == x,\,\mathrm{если}\; \delta < 5.96 \cdot 10^{-8} x$$К слову, это еще одна причина присматривать за величинами активаций, нормировать данные и таргет в случае регрессии. Можно было бы перейти на float64, но (вычислительно и по памяти) дешевле быть аккуратными на float32._картинка из статьи [NVIDIA](https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/)_ МетрикиTL; DR:- тренировочные метрики записывать без сглаживания с каждого батча- валидационные собирать за всю валидацию и рисовать одной точкой**Особенности TB**:- При отображении прореживает точки по global_step- Чтобы рисовать на одном графике надо писать в разные папки (завести отдельные train_ и val_writer) Обновим Trainer ###Code from copy import deepcopy from tqdm import tqdm import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import Dataset, DataLoader from torch.utils.tensorboard import SummaryWriter class VeryModel(nn.Module): def __init__(self, lr_scheduler=None, lr_scheduler_type=None): super().__init__() self.lr_scheduler = lr_scheduler self.lr_scheduler_type = lr_scheduler_type if lr_scheduler_type not in [None, 'per_batch', 'per_epoch']: raise ValueError("lr_scheduler_type must be one of: None, 'per_batch', 'per_epoch'. " f"Not: {lr_scheduler_type}") self.inner = nn.Sequential(nn.Linear(784, 100), nn.ReLU(), nn.Linear(100, 10)) def forward(self, x): return self.inner(x) def compute_all(self, batch): # удобно сделать функцию, в которой вычисляется лосс по пришедшему батчу x = batch['sample'] / 255.0 y = batch['label'] logits = self.inner(x) loss = F.cross_entropy(logits, y) acc = (logits.argmax(axis=1) == y).float().mean().cpu().numpy() metrics = dict(acc=acc) return loss, metrics def post_train_batch(self): # called after every train batch if self.lr_scheduler is not None and self.lr_scheduler_type == 'per_batch': self.lr_scheduler.step() def post_val_batch(self): pass def post_train_stage(self): pass def post_val_stage(self, val_loss): # called after every end of val stage (equals to epoch end) if self.lr_scheduler is not None and self.lr_scheduler_type == 'per_epoch': self.lr_scheduler.step(val_loss) class Trainer: def __init__(self, model: nn.Module, optimizer, train_dataset: Dataset, val_dataset: Dataset, tboard_log_dir: str = './tboard_logs/', batch_size: int = 128): self.model = model self.optimizer = optimizer self.train_dataset = train_dataset self.val_dataset = val_dataset self.batch_size = batch_size self.device = 'cpu' if torch.cuda.is_available(): self.device = torch.cuda.current_device() self.model = self.model.to(self.device) self.global_step = 0 self.log_writer = SummaryWriter(log_dir=tboard_log_dir) self.cache = self.cache_states() def save_checkpoint(self, path): torch.save(self.model.state_dict(), path) def train(self, num_epochs: int): model = self.model optimizer = self.optimizer train_loader = DataLoader(self.train_dataset, shuffle=True, pin_memory=True, batch_size=self.batch_size) val_loader = DataLoader(self.val_dataset, shuffle=False, pin_memory=True, batch_size=self.batch_size) best_loss = float('inf') for epoch in range(num_epochs): model.train() for batch in tqdm(train_loader): batch = {k: v.to(self.device) for k, v in batch.items()} loss, details = model.compute_all(batch) optimizer.zero_grad() loss.backward() optimizer.step() # for name, p in model.named_parameters(): if "weight" in name: v = np.linalg.norm(p.grad.data.cpu().numpy()) self.log_writer.add_scalar(f"grad_{name}", v, global_step=self.global_step) model.post_train_batch() for k, v in details.items(): self.log_writer.add_scalar(k, v, global_step=self.global_step) self.global_step += 1 model.eval() val_losses = [] for batch in tqdm(val_loader): batch = {k: v.to(self.device) for k, v in batch.items()} loss, details = model.compute_all(batch) val_losses.append(loss.item()) val_loss = np.mean(val_losses) model.post_val_stage(val_loss) if val_loss < best_loss: self.save_checkpoint("./best_checkpoint.pth") best_loss = val_loss def find_lr(self, min_lr: float = 1e-6, max_lr: float = 1e-1, num_lrs: int = 20, smooth_beta: float = 0.8) -> dict: lrs = np.geomspace(start=min_lr, stop=max_lr, num=num_lrs) logs = {'lr': [], 'loss': [], 'avg_loss': []} avg_loss = None model, optimizer = self.model, self.optimizer train_loader = DataLoader(self.train_dataset, shuffle=True, batch_size=self.batch_size) model.train() for lr, batch in tqdm(zip(lrs, train_loader), desc='finding LR', total=num_lrs): # apply new lr for param_group in self.optimizer.param_groups: param_group['lr'] = lr # train step batch = {k: v.to(self.device) for k, v in batch.items()} loss, details = model.compute_all(batch) optimizer.zero_grad() loss.backward() optimizer.step() # calculate smoothed loss if avg_loss is None: avg_loss = loss else: avg_loss = smooth_beta * avg_loss + (1 - smooth_beta) * loss # store values into logs logs['lr'].append(lr) logs['avg_loss'].append(avg_loss) logs['loss'].append(loss) logs.update({key: np.array(val) for key, val in logs.items()}) self.rollback_states() return logs def cache_states(self): cache_dict = {'model_state': deepcopy(self.model.state_dict()), 'optimizer_state': deepcopy(self.optimizer.state_dict())} return cache_dict def rollback_states(self): self.model.load_state_dict(self.cache['model_state']) self.optimizer.load_state_dict(self.cache['optimizer_state']) ###Output _____no_output_____ ###Markdown ХукиВ случае, если нет возможности использовать интерактивную отладку или добавить print, очень удобным может оказаться добавление forward/backward хуков: функций, которые сработают при вызове forward ###Code watches = {} def hook_fn(module, inp, out): watches[module] = out.detach() for name, layer in nn._modules.items(): layer.register_forward_hook(hook_fn) ###Output _____no_output_____
notebooks/006_evaluation/007_ligands_targeting_multiple_kinase_groups.ipynb
###Markdown Ligands targeting multiple kinase groupsExtract from the pooled Karaman-Davis profiling dataset ligands that target mulitple kinase groups. We can treat those as "unexpected off-targets". ###Code %load_ext autoreload %autoreload 2 from IPython.display import display, HTML, Markdown import pandas as pd from opencadd.databases import klifs from src import data klifs_session = klifs.setup_remote() ###Output _____no_output_____ ###Markdown Load profiling data ###Code profiling_data = data.profiling.load("karaman-davis") profiling_data.index.name = "kinase.name" ###Output No measurements 20611 One measurement 6296 Two identical measurements 1291 Two measurements <= or > cutoff 100; keep lower value 30 One measurement <=, one > cutoff 100 but difference <= 100; keep lower value 2 One measurement <=, one > cutoff 100 but difference > 100; remove values 10 dtype: int64 ###Markdown Add kinase groups to kinases in profiling data ###Code kinases = klifs_session.kinases.by_kinase_name(profiling_data.index.to_list()) kinases = kinases[kinases["species.klifs"] == "Human"] kinases.shape def fetch_kinase_names(klifs_name, gene_name, profiling_names): """ Kinase names used in profiling dataset can be part of KLIFS "kinase.klifs_name" or "kinase.gene_name" columns. Add new column to KLIFS data that maps to profiling dataset names. """ if klifs_name == gene_name: return klifs_name else: if klifs_name in profiling_names and gene_name in profiling_names: raise KeyError( f"One KLIFS kinase ({klifs_name}, {gene_name}) mapped to two profiling kinases." ) elif klifs_name in profiling_names: return klifs_name elif gene_name in profiling_names: return gene_name else: return None # Iterate through KLIFS data and add column with kinase name as used in profiling data! kinases["kinase.name"] = kinases.apply( lambda x: fetch_kinase_names( x["kinase.klifs_name"], x["kinase.gene_name"], profiling_data.index ), axis=1, ) # Add kinase family and group to profiling data (as index!) profiling_data = pd.merge( kinases[["kinase.name", "kinase.family", "kinase.group"]], profiling_data.reset_index() ).set_index(["kinase.name", "kinase.family", "kinase.group"]) profiling_data.head() ###Output _____no_output_____ ###Markdown Report ligands targeting multiple kinase groups ###Code def bioactivities_against_multiple_kinase_groups(bioactivities, activity_cutoff): """ Filter input bioactivities for bioactivities below given cutoff. Return bioactivities only if measured against multiple kinase groups. """ bioactivities = bioactivities.sort_values() bioactivities = bioactivities[bioactivities <= activity_cutoff] kinase_groups = bioactivities.index.get_level_values(2).unique() if len(kinase_groups) > 1: bioactivities.name = "bioactivity" return bioactivities else: return None def get_pkidb_ligand_info(ligand_name): """ Get brand name for ligand from PKIDB if available. """ pkidb = data.ligands.pkidb() ligand_pkidb = pkidb[pkidb.apply(lambda x: ligand_name.upper() in x["Synonyms"], axis=1)] if len(ligand_pkidb) == 0: return None elif len(ligand_pkidb) == 1: ligand_name_pkidb = ligand_pkidb["BrandName"].iloc[0] ligand_type = ligand_pkidb["Type"].iloc[0] return f"{ligand_name_pkidb} | Type {ligand_type}" else: raise ValueError(f"Multiple entries in PKIDB for {ligand_name}.") ACTIVITY_CUTOFF = 5 for ligand_name, bioactivities in profiling_data.iteritems(): # Omit highly promiscuous ligands if ligand_name in ["Staurosporine"]: continue bioactivities = bioactivities_against_multiple_kinase_groups(bioactivities, ACTIVITY_CUTOFF) if bioactivities is not None: # Add Markdown section of ligand name (and brand name if applicable) ligand_pkidb_info = get_pkidb_ligand_info(ligand_name) if ligand_pkidb_info is not None: display(Markdown(f"### {ligand_name} (PKIDB: {ligand_pkidb_info})")) else: display(Markdown(f"### {ligand_name}")) # Add bioactivites as DataFrame display(HTML(bioactivities.reset_index().to_html())) ###Output _____no_output_____
Agents/Maze_Solver_Agent4.ipynb
###Markdown Maze Solving Agent 4 with complete branch memoryThis agent have better understanding of its environment and accordingly takes its random move. Here it does not know places it has not gone yet but keeps track of the branches gone so far and accordingly takes the required action in future (i.e. knows which location to go and where from there). This helps avoiding repeated motions in straight line paths. ###Code import random import numpy as np ''' This class is made to either take in a given maze in an array form along with the max limit and goal state or just randomly make a maze (It does not necessarily have to be a functional maze) which might not have a path too but it useful to test if our agent and know of it will react and which action it will take ''' class maze_environment: def __init__(self, limit=None, maze=None, goal=None): if(maze is None and limit is None): self.limit = random.randint(5, 15) self.goal = (self.limit-1, self.limit-1) self.maze = [] for i in range(self.limit): row = [] for j in range(self.limit): row.append(random.choice(['F', 'B'])) self.maze.append(row) self.maze[0][0] = 'F' self.maze[self.limit-1][self.limit-1] = 'F' else: self.maze = maze self.limit = limit self.goal = goal def get_limit(self): return self.limit def show_maze(self): for i in range(self.limit): for j in range(self.limit): print(self.maze[i][j], end=" ") print() def get_goal(self): return self.goal def get_position(self, x, y): if(x > self.limit-1 or y > self.limit-1): return 'B' if(x < 0 or y < 0): return 'B' return self.maze[x][y] class maze_solver_full_memory: def __init__(self): self.x = 0 self.y = 0 self.total_steps = 0 self.prev_move = -1 self.branches = {} def tell_pos(self): print("I am currently at the position ({}, {})".format(self.x, self.y)) def move(self, env): if(self.x == env.get_goal()[0] and self.y == env.get_goal()[1]): print("You have solved the maze and reached the end...") return False elif(self.total_steps >= env.limit*env.limit): print("Did not halt, mostly unsolvable maze...") return False else: self.surrondings = [env.get_position(self.x+1, self.y), env.get_position(self.x, self.y+1), env.get_position(self.x-1, self.y), env.get_position(self.x, self.y-1)] possible_moves = [] for i in range(len(self.surrondings)): if self.surrondings[i] == 'F': possible_moves.append(i) if(self.prev_move == 0): if(2 in possible_moves): possible_moves.remove(2) elif(self.prev_move == 2): if(0 in possible_moves): possible_moves.remove(0) elif(self.prev_move == 1): if(3 in possible_moves): possible_moves.remove(3) elif(self.prev_move == 3): if(1 in possible_moves): possible_moves.remove(1) move_turn = 1 if(len(possible_moves) == 1): current_move = possible_moves[0] elif(len(possible_moves) > 1): if((self.x, self.y) not in self.branches): current_move = random.choice(possible_moves) possible_moves.remove(current_move) self.branches[(self.x, self.y)] = possible_moves else: if(self.branches[(self.x, self.y)]): current_move = random.choice(self.branches[(self.x, self.y)]) self.branches[(self.x, self.y)].remove(current_move) else: current_move = random.choice(possible_moves) possible_moves.remove(current_move) self.branches[(self.x, self.y)] = possible_moves else: if(not self.branches): print("Unsolvable maze, no route to exit...") return False last_choice_position = self.branches.popitem() self.x = last_choice_position[0][0] self.y = last_choice_position[0][1] self.branches[(self.x, self.y)] = last_choice_position[1] self.total_steps = 0 move_turn = 0 if(move_turn): if(current_move == 0): self.x += 1 self.prev_move = 0 print("Go Down") elif(current_move == 1): self.y += 1 self.prev_move = 1 print("Go Right") elif(current_move == 2): self.x -= 1 self.prev_move = 2 print("Go Up") elif(current_move == 3): self.y -= 1 self.prev_move = 3 print("Go Left") else: print("Maze is blocked for all the places...") return False self.total_steps += 1 return True #Custom maze 1 - Single path to goal cust_maze = [['F', 'B', 'B', 'B', 'F'], ['F', 'B', 'B', 'B', 'B'], ['F', 'F', 'F', 'B', 'B'], ['B', 'B', 'F', 'F', 'F'], ['B', 'B', 'B', 'B', 'F']] my_env = maze_environment(limit=5, maze=cust_maze, goal=(4, 4)) my__agent = maze_solver_full_memory() print("Limit is: ", my_env.get_limit()) print("Goal is: ", my_env.get_goal()) my_env.show_maze() status = True while(status): status = my__agent.move(my_env) my__agent.tell_pos() cust_maze3 = [['F', 'F', 'F', 'B', 'B'], ['F', 'F', 'B', 'B', 'F'], ['F', 'B', 'B', 'B', 'B'], ['F', 'B', 'F', 'F', 'F'], ['F', 'F', 'F', 'B', 'F']] my_env4 = maze_environment(limit=5, maze=cust_maze3, goal=(4, 4)) my_agent4 = maze_solver_full_memory() print("Limit is: ", my_env4.get_limit()) print("Goal is: ", my_env4.get_goal()) my_env4.show_maze() status = True while(status): status = my_agent4.move(my_env4) my_agent4.tell_pos() ###Output Go Right I am currently at the position (0, 1) Go Right I am currently at the position (0, 2) I am currently at the position (0, 1) Go Down I am currently at the position (1, 1) Go Left I am currently at the position (1, 0) Go Down I am currently at the position (2, 0) Go Down I am currently at the position (3, 0) Go Down I am currently at the position (4, 0) Go Right I am currently at the position (4, 1) Go Right I am currently at the position (4, 2) Go Up I am currently at the position (3, 2) Go Right I am currently at the position (3, 3) Go Right I am currently at the position (3, 4) Go Down I am currently at the position (4, 4) You have solved the maze and reached the end... I am currently at the position (4, 4)
clase_8_Taller_2ipynb.ipynb
###Markdown **Ejercicio de funciones**--- ***Ejercicio de practica**---Se le pide a la analista realizar una estimacion relevante sobre la poblacion de los paises en via de desarrollo.Para ello de inicio se solicita que se cree un programa que permita ingresar como prueba 5 paises y su respectiva poblacion e identificar que pais tiene la mayor cantidad de habitantes. ###Code def poblacion(): pais=[] for x in range(5): nombre=input("Ingresa el nombre del pais:") cant=int(input("Ingrese la cantidad de habitantes")) pais.append((nombre,cant)) return pais def view(pais): print("Habitantes por pais:") for x in range(len(pais)): print(pais[x][0],pais[x][1]) def mayor(pais): p=0 for x in range(1,len(pais)): if pais[x][1]>pais[p][1]: p=x print("El pais con mayor cantidade de habitantes es:", pais[p][0]) pais=poblacion() view(pais) mayor(pais) ###Output Ingresa el nombre del pais:a Ingrese la cantidad de habitantes10 Ingresa el nombre del pais:c Ingrese la cantidad de habitantes45 Ingresa el nombre del pais:chi Ingrese la cantidad de habitantes45 Ingresa el nombre del pais:pe Ingrese la cantidad de habitantes15 Ingresa el nombre del pais:bo Ingrese la cantidad de habitantes4 Habitantes por pais: a 10 c 45 chi 45 pe 15 bo 4 El pais con mayor cantidade de habitantes es: c El pais con mayor cantidade de habitantes es: c El pais con mayor cantidade de habitantes es: c El pais con mayor cantidade de habitantes es: c ###Markdown **Taller 2**---Se solicita realizar un programa que permita identificar el niver de cumplimiento del area de ventas de la empresa AVA. Entre los principales requerimientos se encuentran los siguientes:1. Permite ingresar la cantidad de vendedores del area de ventas.2. Permite ingresar la puntuacion por cada uno de elllos .(Escala de 1 a 10)3. El programa debe permitir identificar el vendedor con mejor revdimiento y el peor rendimiento.4. Obtener tambien el promedio general del nivel de cumplimiento del area de ventas.Nota: Si hay empate en la puntuacion para definir el primer y ultimo puesto, se define por orden alfabetico. ###Code se=[1,10,7,9,3,15] sorted(se) ###Output _____no_output_____ ###Markdown ###Code #Solucion ejercicio: # CANTIDAD DE VENDEDORES DEL AREA DE VENTAS # PUNTUACION POR CADA VENDEDOR ESCALA DE 1 A 10 # VENDEDOR CON MEJOR Y PEOR RENDIMIENTO # PROMEDIO GENERAL DEL NIVEL DE CUMPLIMIENTO DEL AREA DE VENTAS def cumplimientov(): vendedor=[] for x in range(5): nombre=(input("Ingresa el nombre del vendedor:")) puntuacion=(input("Ingresa la puntuacion de 1 a 10;")) vendedor.append((nombre,puntuacion)) return vendedor def view(vendedor): print("Puntuacion por vendedor:") for x in range(len(vendedor)): print(vendedor[x][0],vendedor[x][1]) def mayor(vendedor): v=0 for x in range(1,len(vendedor)): if vendedor[x][1]>vendedor[v][1]: v=x print("El vendedor con la mas alta puntuacion es:",vendedor[v][0]) def menor(vendedor): v=0 for x in range(1,len(vendedor)): if vendedor[x][1]<vendedor[v][1]: v=x print("El vendedor con la menor puntuacion es:",vendedor[v][0]) def ven(camilo,juan,luis,pedro,santiago): suma=camilo+juan+luis+pedro+santiago promedio=suma/5 return suma,promedio suma,promedio=vendedores(1,2,3,4,4) print("La suma de las ventas es:", suma) print("El promedio de ventas es:", promedio) vendedor=cumplimientov() view(vendedor) mayor(vendedor) menor(vendedor) ven(camilo,juan,luis,pedro,santiago) ###Output La suma de las ventas es: 14 El promedio de ventas es: 2.8 Ingresa el nombre del vendedor:LUIS Ingresa la puntuacion de 1 a 10;1 Ingresa el nombre del vendedor:CAMILO Ingresa la puntuacion de 1 a 10;3 Ingresa el nombre del vendedor:JUAN Ingresa la puntuacion de 1 a 10;7 Ingresa el nombre del vendedor:CARLOS Ingresa la puntuacion de 1 a 10;8 Ingresa el nombre del vendedor:MIGUEL Ingresa la puntuacion de 1 a 10;9 Puntuacion por vendedor: LUIS 1 CAMILO 3 JUAN 7 CARLOS 8 MIGUEL 9 El vendedor con la mas alta puntuacion es: MIGUEL El vendedor con la menor puntuacion es: LUIS
hw1/SVM.ipynb
###Markdown **poly kernel** ###Code clf = svm.SVC(kernel='poly', C=0.1, degree=3) clf.fit(training_samples, training_labels) pred_labels = clf.predict(testing_samples) print(testing_labels, pred_labels) print('ACC score: {}'.format(accuracy_score(testing_labels, pred_labels))) ###Output [ 1. 1. -1.] [ 1. 1. -1.] ACC score: 1.0 ###Markdown **linear kernel** ###Code clf = svm.SVC(kernel='linear', C=0.1) clf.fit(training_samples, training_labels) pred_labels = clf.predict(testing_samples) print(testing_labels, pred_labels) print('ACC score: {}'.format(accuracy_score(testing_labels, pred_labels))) ###Output [ 1. 1. -1.] [ 1. 1. -1.] ACC score: 1.0 ###Markdown **rbf kernel** ###Code clf = svm.SVC(kernel='rbf') clf.fit(training_samples, training_labels) pred_labels = clf.predict(testing_samples) print(testing_labels, pred_labels) print('ACC score: {}'.format(accuracy_score(testing_labels, pred_labels))) ###Output [ 1. 1. -1.] [ 1. 1. -1.] ACC score: 1.0 ###Markdown **sigmoid kernel** ###Code clf = svm.SVC(kernel='sigmoid') clf.fit(training_samples, training_labels) pred_labels = clf.predict(testing_samples) print(testing_labels, pred_labels) print('ACC score: {}'.format(accuracy_score(testing_labels, pred_labels))) ###Output [ 1. 1. -1.] [-1. -1. -1.] ACC score: 0.3333333333333333
notebook/02-Preprocessing/01_SST2_Huggingface_preprocesing.ipynb
###Markdown The Stanford Sentiment Treebank The Stanford Sentiment Treebank consists of sentences from movie reviews and human annotations of their sentiment. The task is to predict the sentiment of a given sentence. We use the two-way (positive/negative) class split, and use only sentence-level labels. ###Code from IPython.display import display, Markdown with open('../../doc/env_variables_setup.md', 'r') as fh: content = fh.read() display(Markdown(content)) ###Output _____no_output_____ ###Markdown Import Packages ###Code import tensorflow as tf import tensorflow_datasets from tensorflow.keras.utils import to_categorical from transformers import ( BertConfig, BertTokenizer, XLMRobertaTokenizer, TFBertModel, TFXLMRobertaModel, TFBertForSequenceClassification, glue_convert_examples_to_features, glue_processors ) from sklearn.metrics import confusion_matrix, accuracy_score from sklearn.metrics import classification_report import matplotlib.pyplot as plt from google.cloud import storage import math import numpy as np import os import glob import time from datetime import timedelta import shutil from datetime import datetime import pickle import re ###Output _____no_output_____ ###Markdown Check configuration ###Code print(tf.version.GIT_VERSION, tf.version.VERSION) print(tf.keras.__version__) gpus = tf.config.list_physical_devices('GPU') if len(gpus)>0: for gpu in gpus: print('Name:', gpu.name, ' Type:', gpu.device_type) else: print('No GPU available !!!!') ###Output No GPU available !!!! ###Markdown Define Paths ###Code try: data_dir=os.environ['PATH_DATASETS'] except KeyError: print('missing PATH_DATASETS') try: tensorboard_dir=os.environ['PATH_TENSORBOARD'] except KeyError: print('missing PATH_TENSORBOARD') try: savemodel_dir=os.environ['PATH_SAVE_MODEL'] except KeyError: print('missing PATH_SAVE_MODEL') ###Output _____no_output_____ ###Markdown Import local packages ###Code import preprocessing.preprocessing as pp import utils.model_metrics as mm import utils.model_utils as mu import importlib importlib.reload(pp); importlib.reload(mm); importlib.reload(mu); ###Output _____no_output_____ ###Markdown Loading a data from Tensorflow Datasets ###Code data, info = tensorflow_datasets.load(name='glue/sst2', data_dir=data_dir, with_info=True) ###Output INFO:absl:Load dataset info from /Users/tarrade/tensorflow_datasets/glue/sst2/1.0.0 INFO:absl:Field info.description from disk and from code do not match. Keeping the one from code. INFO:absl:Field info.citation from disk and from code do not match. Keeping the one from code. INFO:absl:Reusing dataset glue (/Users/tarrade/tensorflow_datasets/glue/sst2/1.0.0) INFO:absl:Constructing tf.data.Dataset for split None, from /Users/tarrade/tensorflow_datasets/glue/sst2/1.0.0 ###Markdown Checking baics info from the metadata ###Code info pp.print_info_dataset(info) ###Output Labels: ['negative', 'positive'] Number of label: 2 Structure of the data: dict_keys(['sentence', 'label', 'idx']) Number of entries: Train dataset: 67349 Test dataset: 1821 Valid dataset: 872 ###Markdown Checking baics info from the metadata ###Code data data.keys() pp.print_info_data(data['train']) ###Output # Structure of the data: <PrefetchDataset shapes: {idx: (), label: (), sentence: ()}, types: {idx: tf.int32, label: tf.int64, sentence: tf.string}> # Output shape of one entry: {'idx': TensorShape([]), 'label': TensorShape([]), 'sentence': TensorShape([])} # Output types of one entry: {'idx': tf.int32, 'label': tf.int64, 'sentence': tf.string} # Output typesof one entry: {'idx': <class 'tensorflow.python.framework.ops.Tensor'>, 'label': <class 'tensorflow.python.framework.ops.Tensor'>, 'sentence': <class 'tensorflow.python.framework.ops.Tensor'>} # Shape of the data: (67349,) ---> 67349 entries ---> 1 dim dict structure dim: 3 [idx / label / sentence ] [() / () / () ] [int32 / int64 / bytes ] # Examples of data: {'idx': 16399, 'label': 0, 'sentence': b'for the uninitiated plays better on video with the sound '} {'idx': 1680, 'label': 0, 'sentence': b'like a giant commercial for universal studios , where much of th' b'e action takes place '} {'idx': 47917, 'label': 1, 'sentence': b'company once again dazzle and delight us '} {'idx': 17307, 'label': 1, 'sentence': b"'s no surprise that as a director washington demands and receive" b's excellent performances , from himself and from newcomer derek ' b'luke '} ###Markdown Define parameters of the model ###Code # models #MODELS = [(TFBertModel, BertTokenizer, 'bert-base-multilingual-uncased'), # (OpenAIGPTModel, OpenAIGPTTokenizer, 'openai-gpt'), # (GPT2Model, GPT2Tokenizer, 'gpt2'), # (CTRLModel, CTRLTokenizer, 'ctrl'), # (TransfoXLModel, TransfoXLTokenizer, 'transfo-xl-wt103'), # (XLNetModel, XLNetTokenizer, 'xlnet-base-cased'), # (XLMModel, XLMTokenizer, 'xlm-mlm-enfr-1024'), # (DistilBertModel, DistilBertTokenizer, 'distilbert-base-cased'), # (RobertaModel, RobertaTokenizer, 'roberta-base'), # (XLMRobertaModel, XLMRobertaTokenizer, 'xlm-roberta-base'), # ] MODELS = [(TFBertModel, BertTokenizer, 'bert-base-multilingual-uncased'), (TFXLMRobertaModel, XLMRobertaTokenizer, 'jplu/tf-xlm-roberta-base')] model_index = 0 # BERT model_class = MODELS[model_index][0] # i.e TFBertModel tokenizer_class = MODELS[model_index][1] # i.e BertTokenizer pretrained_weights = MODELS[model_index][2] #'i.e bert-base-multilingual-uncased' # Maxium length, becarefull BERT max length is 512! MAX_LENGTH = 128 # define parameters BATCH_SIZE_TRAIN = 32 BATCH_SIZE_TEST = 32 BATCH_SIZE_VALID = 64 EPOCH = 2 # extract parameters size_train_dataset = info.splits['train'].num_examples size_test_dataset = info.splits['test'].num_examples size_valid_dataset = info.splits['validation'].num_examples number_label = info.features["label"].num_classes # computer parameter STEP_EPOCH_TRAIN = math.ceil(size_train_dataset/BATCH_SIZE_TRAIN) STEP_EPOCH_TEST = math.ceil(size_test_dataset/BATCH_SIZE_TEST) STEP_EPOCH_VALID = math.ceil(size_test_dataset/BATCH_SIZE_VALID) print('Dataset size: {:6}/{:6}/{:6}'.format(size_train_dataset, size_test_dataset, size_valid_dataset)) print('Batch size: {:6}/{:6}/{:6}'.format(BATCH_SIZE_TRAIN, BATCH_SIZE_TEST, BATCH_SIZE_VALID)) print('Step per epoch: {:6}/{:6}/{:6}'.format(STEP_EPOCH_TRAIN, STEP_EPOCH_TEST, STEP_EPOCH_VALID)) print('Total number of batch: {:6}/{:6}/{:6}'.format(STEP_EPOCH_TRAIN*(EPOCH+1), STEP_EPOCH_TEST*(EPOCH+1), STEP_EPOCH_VALID*(EPOCH+1))) ###Output Dataset size: 67349/ 1821/ 872 Batch size: 32/ 32/ 64 Step per epoch: 2105/ 57/ 29 Total number of batch: 6315/ 171/ 87 ###Markdown Tokenizer and prepare data for BERT ###Code # Define the checkpoint directory to store the checkpoints pretrained_model_dir=savemodel_dir+'/pretrained_model/'+pretrained_weights os.makedirs(pretrained_model_dir, exist_ok=True) # Load tokenizer tokenizer = tokenizer_class.from_pretrained(pretrained_weights, cache_dir=pretrained_model_dir) # recap of input dataset print(data['train']) print(tf.data.experimental.cardinality(data['train'])) print(tf.data.experimental.cardinality(data['test'])) print(tf.data.experimental.cardinality(data['validation'])) # super slow since looping over all data #print(len(list(data['train']))) # Prepare data for BERT train_dataset = glue_convert_examples_to_features(data['train'], tokenizer, max_length=MAX_LENGTH, task='sst-2') test_dataset = glue_convert_examples_to_features(data['test'], tokenizer, max_length=MAX_LENGTH, task='sst-2') valid_dataset = glue_convert_examples_to_features(data['validation'], tokenizer, max_length=MAX_LENGTH, task='sst-2') # adding the number of entries if tf.version.VERSION[0:5]=='2.2.0': train_dataset=train_dataset.apply(tf.data.experimental.assert_cardinality(tf.data.experimental.cardinality(data['train']))) test_dataset=test_dataset.apply(tf.data.experimental.assert_cardinality(tf.data.experimental.cardinality(data['test']))) valid_dataset=valid_dataset.apply(tf.data.experimental.assert_cardinality(tf.data.experimental.cardinality(data['validation']))) # recap of pre processing dataset print(train_dataset) if tf.version.VERSION[0:5]=='2.2.0': print(tf.data.experimental.cardinality(train_dataset)) print(tf.data.experimental.cardinality(test_dataset)) print(tf.data.experimental.cardinality(valid_dataset)) # super slow since looping over all data #print(len(list(train_dataset))) else: print(size_train_dataset) print(size_test_dataset) print(size_valid_dataset) ###Output <FlatMapDataset shapes: ({input_ids: (None,), attention_mask: (None,), token_type_ids: (None,)}, ()), types: ({input_ids: tf.int32, attention_mask: tf.int32, token_type_ids: tf.int32}, tf.int64)> 67349 1821 872 ###Markdown Check the final data ###Code pp.print_info_data(train_dataset,print_example=False) pp.print_detail_tokeniser(train_dataset, tokenizer) ###Output input_ids ----> attention_mask token_type_ids modified text 101 ----> 1 1 [ C L S ] 10139 ----> 1 1 f o r 10103 ----> 1 1 t h e 18768 ----> 1 1 u n i 45611 ----> 1 1 # # n i t i 21096 ----> 1 1 # # a t e d 17173 ----> 1 1 p l a y s 16197 ----> 1 1 b e t t e r 10125 ----> 1 1 o n 11379 ----> 1 1 v i d e o 10171 ----> 1 1 w i t h 10103 ----> 1 1 t h e 14127 ----> 1 1 s o u n d 102 ----> 1 1 [ S E P ] 0 ----> 0 0 [ P A D ] 0 ----> 0 0 [ P A D ] 0 ----> 0 0 [ P A D ] 0 ----> 0 0 [ P A D ] 0 ----> 0 0 [ P A D ] 0 ----> 0 0 [ P A D ] 0 ----> 0 0 [ P A D ] 0 ----> 0 0 [ P A D ] ###Markdown Save data as TFRecord files ###Code # Create directory to save TFRecord files tfrecord_data_dir=data_dir+'/tfrecord/sst2' os.makedirs(tfrecord_data_dir, exist_ok=True) pp.write_tf_data_into_tfrecord(train_dataset,tfrecord_data_dir+'/train_dataset') pp.write_tf_data_into_tfrecord(test_dataset,tfrecord_data_dir+'/test_dataset') pp.write_tf_data_into_tfrecord(valid_dataset,tfrecord_data_dir+'/valid_dataset') ###Output _____no_output_____ ###Markdown Read data from TFRecord files (sanity check) ###Code # TFRecords encode and store data train_files = tf.data.TFRecordDataset(tfrecord_data_dir+'/train_dataset.tfrecord') test_files = tf.data.TFRecordDataset(tfrecord_data_dir+'/test_dataset.tfrecord') valid_files = tf.data.TFRecordDataset(tfrecord_data_dir+'/valid_dataset.tfrecord') train_dataset2 = train_files.map(pp.parse_tfrecord_glue_files) test_dataset2 = test_files.map(pp.parse_tfrecord_glue_files) valid_dataset2 = valid_files.map(pp.parse_tfrecord_glue_files) # adding the number of entries if tf.version.VERSION[0:5]=='2.2.0': train_dataset2=train_dataset2.apply(tf.data.experimental.assert_cardinality(train_dataset2.reduce(0, lambda x, _: x + 1).numpy())) test_dataset2=test_dataset2.apply(tf.data.experimental.assert_cardinality(test_dataset2.reduce(0, lambda x, _: x + 1).numpy())) valid_dataset2=valid_dataset2.apply(tf.data.experimental.assert_cardinality(valid_dataset2.reduce(0, lambda x, _: x + 1).numpy())) if tf.version.VERSION[0:5]=='2.2.0': print(tf.data.experimental.cardinality(train_dataset2)) print(tf.data.experimental.cardinality(test_dataset2)) print(tf.data.experimental.cardinality(valid_dataset2)) else: print(train_dataset2.reduce(0, lambda x, _: x + 1).numpy()) print(test_dataset2.reduce(0, lambda x, _: x + 1).numpy()) print(valid_dataset2.reduce(0, lambda x, _: x + 1).numpy()) pp.print_info_data(train_dataset2,print_example=False) pp.print_detail_tokeniser(train_dataset2, tokenizer) train_dataset train_dataset2 for i in train_dataset2: print(i) break for i in train_dataset: print(i) break ###Output ({'input_ids': <tf.Tensor: shape=(128,), dtype=int32, numpy= array([ 101, 10139, 10103, 18768, 45611, 21096, 17173, 16197, 10125, 11379, 10171, 10103, 14127, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=int32)>, 'attention_mask': <tf.Tensor: shape=(128,), dtype=int32, numpy= array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=int32)>, 'token_type_ids': <tf.Tensor: shape=(128,), dtype=int32, numpy= array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=int32)>}, <tf.Tensor: shape=(), dtype=int64, numpy=0>)
DeepLearningPrerequisites/Pandas.ipynb
###Markdown PandasPanas is a library that makes it very easy to read, write and manipulate data. This section is focused purely on the fundamentals. We want to answer questions like: - how to load CSV? - how to write CSV? - what is the dataframe? - how is it different from NumPy array? - how to do basic operations on dataframes? - how to use `apply()` function? - how to plot data with Pandas? Loading in dataPandas is particularly useful for data, which is structured as a table. Tabular data, when it's stored in a file, it is usually in the form of a CSV or a TSV that stands for comma separated values and tabs separated values. You can also use Panas for reading from an Excel spreadsheet, since that has a similar structure.To start we are going to import Pandas. Convention is to import is as `pd`. ###Code import pandas as pd ###Output _____no_output_____ ###Markdown Next, we're going to download a CSV file. ###Code !!wget https://raw.githubusercontent.com/lazyprogrammer/machine_learning_examples/master/tf2.0/sbux.csv ###Output _____no_output_____ ###Markdown Then we are going to read in the CSV file. ###Code df = pd.read_csv('sbux.csv') df ###Output _____no_output_____ ###Markdown And note that this command works directly with you or else as well. ###Code df = pd.read_csv('https://raw.githubusercontent.com/lazyprogrammer/machine_learning_examples/master/tf2.0/sbux.csv') df ###Output _____no_output_____ ###Markdown For comaprison, we can look at the content of CSV file with a unix command. ###Code !head sbux.csv ###Output date,open,high,low,close,volume,Name 2013-02-08,27.92,28.325,27.92,28.185,7146296,SBUX 2013-02-11,28.26,28.26,27.93,28.07,5457354,SBUX 2013-02-12,28.0,28.275,27.975,28.13,8665592,SBUX 2013-02-13,28.23,28.23,27.75,27.915,7022056,SBUX 2013-02-14,27.765,27.905,27.675,27.775,8899188,SBUX 2013-02-15,27.805,27.85,27.085,27.17,18195730,SBUX 2013-02-19,27.18,27.305,27.01,27.225,11760912,SBUX 2013-02-20,27.3,27.42,26.59,26.655,12472506,SBUX 2013-02-21,26.535,26.82,26.26,26.675,13896450,SBUX ###Markdown As we can see, those are the stock prices for Starbucks starting in February 2013.Pandas has analogus command. ###Code df.head() ###Output _____no_output_____ ###Markdown If you're in a notebook that shows you a nicely formatted preview of the top of your dataframe. You can also set the number of rows you want to see as an argument. ###Code df.head(10) ###Output _____no_output_____ ###Markdown And just like in unix, there's a `tail` command. ###Code df.tail() ###Output _____no_output_____ ###Markdown Finally, there's an `info()` function that tells us some important information about the dataframe, such as what kind of index the data frame uses, how many columns it has, the data types for those columns and how much memory it takes up. ###Code df.info() ###Output <class 'pandas.core.frame.DataFrame'> RangeIndex: 1259 entries, 0 to 1258 Data columns (total 7 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 date 1259 non-null object 1 open 1259 non-null float64 2 high 1259 non-null float64 3 low 1259 non-null float64 4 close 1259 non-null float64 5 volume 1259 non-null int64 6 Name 1259 non-null object dtypes: float64(4), int64(1), object(2) memory usage: 69.0+ KB ###Markdown Selecting row and columnsThis is analogous to indexing an array. So, for example, with an NumPy array, I can ask to give me the element at row 0, column 0. And in that case, I would use the square bracket notation and pass in `[0, 0]`. So let's see if that works with a data frame. ###Code df[0, 0] ###Output _____no_output_____ ###Markdown As you can see, this does not work.Before we do anything else, let's check the columns of the data frame by using the attribute called `columns`. ###Code df.columns ###Output _____no_output_____ ###Markdown This returns an index object with the column names.Note that you can also do assignment on this attribute with a list of column names, so let's say I don't like the fact that the name column is the only one that is capitalized, since it offends my sense of uniformity. So let's change that to lowercase. ###Code df.columns = ['date', 'open', 'high', 'low', 'close', 'volume', 'name'] df.columns ###Output _____no_output_____ ###Markdown All right, so here's an idea, what if I pass in one of these column names into the square brackets? ###Code df['open'] ###Output _____no_output_____ ###Markdown As you can see, this returns the open column of the data frame, we can also select multiple columns by using a list of column names. ###Code df[['open', 'close']] ###Output _____no_output_____ ###Markdown And then returns both columns.Now just out of curiosity, let's check the data type for the open column. ###Code type(df['open']) ###Output _____no_output_____ ###Markdown Interesting, so it's a Series.Now let's check the type of the open and close columns. ###Code type(df[['open', 'close']]) ###Output _____no_output_____ ###Markdown So this is a dataframe. The lesson here is that when you only have one dimension and pandas, it's typically stored as a `Series`. If it's two dimensional, it's a `DataFrame`.At this point, you might be thinking Panas is very weird because square brackets are used to select columns, whereas in NumPy and every other kind of array, the square brackets would usually select the rows.The obvious question now is how do we select a row in a dataframe? The answer is that we can accomplish this using the `iloc` and `loc`. ###Code df.iloc[0] df.loc[0] ###Output _____no_output_____ ###Markdown You might be wondering what's the difference? The difference is that `iloc` is used for integer indices no matter what, whereas `loc` selects the row by the index label and it just so happens that in our data frame they are one and the same.To demonstrate this difference let's load in our data frame again, but this time will specify that the date column should be the index. ###Code df2 = pd.read_csv('sbux.csv', index_col='date') df2.head() ###Output _____no_output_____ ###Markdown As you can see, the date column appears to have some kind of special status, in fact, it's the index for this dataframe. ###Code df2.loc['2013-02-12'] ###Output _____no_output_____ ###Markdown And by the way, if we check the tape of this row, we can see that it's also a series, so both individual rose and individual columns are series objects. ###Code type(df2.loc['2013-02-12']) ###Output _____no_output_____ ###Markdown Now let's talk about how we can select multiple rows of a dataframe. Suppose I want all the rows where the open price was greater than 64. ###Code df[df['open'] > 64] ###Output _____no_output_____ ###Markdown Now suppose I want all the rows where that name is not Starbucks. ###Code df[df['name'] != 'SBUX'] ###Output _____no_output_____ ###Markdown Ok, so we have no rows where the name is not Starbucks. So it seems that using the square bracket notation, I can pass in something like a Boolean. code like this works from the inside out, so let's check what this Boolean thing actually is. ###Code df['name'] != 'SBUX' type(df['name'] != 'SBUX') ###Output _____no_output_____ ###Markdown So perhaps not unsurprisingly, it's a series containing boolean values. So the square brackets on a dataframe except a Boolean Series as input.Now, oddly, this behavior does match how NumPy array works. ###Code import numpy as np A = np.arange(10) A ###Output _____no_output_____ ###Markdown Now, let's say I just want to keep the even numbers. ###Code A[A % 2 == 0] type(A % 2 == 0) ###Output _____no_output_____ ###Markdown Now, in building machine learning algorithms, you usually want to work with arrays of numbers and not dataframes, which can contain all kinds of objects.So how can we convert a DataFrame into an NumPy array? We can use the `values` attribute. ###Code df.values ###Output _____no_output_____ ###Markdown Now, unfortunately, this gives us the `dtype=object`, which is not what we want if we're doing machine learning, because now there are strings inside of this array. So let's see what happens if we only select the numerical columns. ###Code A = df[['open', 'close']].values A ###Output _____no_output_____ ###Markdown Ok, so now we have a proper array of numbers. ###Code type(A) ###Output _____no_output_____ ###Markdown And as expected, it is an NumPy array. All right, so suppose now that we've done what we needed to do with our DataFrame, we would like to save it to a file. This is accomplished with the `to_csv()` function. So let's say I want to keep only the open and close columns. ###Code smalldf = df[['open', 'close']] smalldf smalldf.to_csv('output.csv') ###Output _____no_output_____ ###Markdown Now we can use the unix command to see what's in our file. ###Code !head output.csv ###Output ,open,close 0,27.92,28.185 1,28.26,28.07 2,28.0,28.13 3,28.23,27.915 4,27.765,27.775 5,27.805,27.17 6,27.18,27.225 7,27.3,26.655 8,26.535,26.675 ###Markdown Now, unfortunately, there seems to be a pretty useless index column in our file. Luckily, we can get rid of this, ###Code smalldf.to_csv('output.csv', index=False) !head output.csv ###Output open,close 27.92,28.185 28.26,28.07 28.0,28.13 28.23,27.915 27.765,27.775 27.805,27.17 27.18,27.225 27.3,26.655 26.535,26.675 ###Markdown The `apply() `functionThe typical use case for the appli function would be similar to the scenario where we want to do some operation on each element of a list, for example, if we want it to square each item. Of course, in Python, we know that for loops are slow, so we would like to avoid them if possible. The `apply()` function can be used if you want to do the same operation on each row of a DataFrame or each column of a DataFrame. In other words, it does what you want to do with a for loop without having to actually write a for loop.Suppose I want to have a column called `year` or I take the existing date column, parse out the year and convert it to an integer.So let's start by writing a function that accepts as input a single row of a data frame. ###Code def date_to_year(row): return int(row['date'].split('-')[0]) ###Output _____no_output_____ ###Markdown Next we will apply this function on each row ouf our DataFrame. `axis=1` will iterate over rows. ###Code df.apply(date_to_year, axis=1) ###Output _____no_output_____ ###Markdown And now we can assign the series to a new column. ###Code df['year'] = df.apply(date_to_year, axis=1) df.head() ###Output _____no_output_____ ###Markdown Plotting with PandasPandas makes plotting were easy since it provides instance methods on both Series and DataFrame that authomaticaly generate plots. ###Code df['open'].hist() ###Output _____no_output_____ ###Markdown This creates a histogram. ###Code df['open'].plot() ###Output _____no_output_____ ###Markdown This creates a line chart.By the way, these method names correspond to their matplotlib versions, which makes them easy to remember.We can also do more interesting plots like the box plot. ###Code df[['open', 'high', 'low', 'close']].plot.box() ###Output _____no_output_____ ###Markdown Another plot that's very useful for getting a quick summary of your data is the scatter matrix. ###Code from pandas.plotting import scatter_matrix scatter_matrix( df[['open', 'high', 'low', 'close']], alpha=0.2, figsize=(6, 6) ); ###Output _____no_output_____ ###Markdown This is a scatter matrix. Basically, this plot shows the linear correlation between each of the data columns. On the diagonal, we get a histogram of each individual column, so it lets us see the distribution of our data. In other words, this is a statistical summary of the data. We see what kind of distribution each column has and how they are related to one another.Prameter `alpha = 0.2`, makes the dots have transparency and `figsize`, makes the plot a little bigger so we can see it better. ExerciseMain point: - learn to make use of documentation, don't use blogs and tutorialsMain goal:- generate, plot, and save the "Donut dataset" with quadratic featuresSteps:- generate the donut dataset (or the concentric circles dataset) - $x_1$, $x_2$ and $y$- create DataFrame from generated array, with columns called $x_1$, $x_2$ and $y$- plot the data- derive three new columns based on $x_1$ and $x_2$ - a quadratic feature expansion: ${x_1}^2$, ${x_2}^2$ and $x_1*x_2$ - you may find that the `apply()` function is useful here - name these new columns appropriately- save yourDataFrame to CSV without any headers and without any index columns - thus your CSV should contain only the numbers that would be stored if it was an NumPy array ###Code # adapted from zg1seg - https://forum.lazyprogrammer.me/viewtopic.php?t=94#top import numpy as np import pandas as pd import matplotlib.pyplot as plot def create_donut(radius, size=1000): # assume that arr is in polar coordinates arr = np.array([np.linspace(0, 2 * np.pi, size), np.random.randn(size)]).T + radius cartesian_arr = np.array([arr[:, 1] * np.cos(arr[:, 0]), arr[:, 1] * np.sin(arr[:, 0])]).T return cartesian_arr outerCircle = create_donut(10) innerCircle = create_donut(5) dfo = pd.DataFrame(outerCircle, columns=["x1", "x2"]) dfo["y"] = 1 dfi = pd.DataFrame(innerCircle, columns=["x1", "x2"]) dfi["y"] = 0 # generate DataFrame for result csv df_result = pd.concat([dfi, dfo], ignore_index=True) df_result["x1^2"] = df_result["x1"] ** 2 df_result["x2^2"] = df_result["x2"] ** 2 df_result["x1*x2"] = df_result["x1"] * df_result["x2"] # rearrange columns df_result = df_result[["x1", "x2", "x1^2", "x2^2", "x1*x2", "y"]] # shuffle to mix up "y" values df_result = df_result.sample(frac=1.0) df_result.to_csv("result.csv", header=False, index=False) # plot ax = dfo.plot(x=0, y=1, kind="scatter", color="gold") dfi.plot(x=0, y=1, kind="scatter", color="indigo", ax=ax, figsize=(5, 5), legend=False) plot.show() ###Output _____no_output_____
pyspark-examples/nb10-sql-dataframes.ipynb
###Markdown Spark SQL and Data Frames [Introduction to Spark with Python, by Jose A. Dianes](http://jadianes.github.io/spark-py-notebooks) This notebook will introduce Spark capabilities to deal with data in a structured way. Basically, everything turns around the concept of *Data Frame* and using *SQL language* to query them. We will see how the data frame abstraction, very popular in other data analytics ecosystems (e.g. R and Python/Pandas), it is very powerful when performing exploratory data analysis. In fact, it is very easy to express data queries when used together with the SQL language. Moreover, Spark distributes this column-based data structure transparently, in order to make the querying process as efficient as possible. ###Code data_file = "s3://gwu-bigdata/kdd1999/kddcup.data" raw_data = sc.textFile(data_file).cache() ###Output _____no_output_____ ###Markdown Getting a Data Frame A Spark `DataFrame` is a distributed collection of data organized into named columns. It is conceptually equivalent to a table in a relational database or a data frame in R or Pandas. They can be constructed from a wide array of sources such as a existing RDD in our case. The entry point into all SQL functionality in Spark is the `SQLContext` class. To create a basic instance, all we need is a `SparkContext` reference. Since we are running Spark in shell mode (using pySpark) we can use the global context object `sc` for this purpose. ###Code from pyspark.sql import SQLContext sqlContext = SQLContext(sc) ###Output _____no_output_____ ###Markdown Inferring the schema With a `SQLContext`, we are ready to create a `DataFrame` from our existing RDD. But first we need to tell Spark SQL the schema in our data. Spark SQL can convert an RDD of `Row` objects to a `DataFrame`. Rows are constructed by passing a list of key/value pairs as *kwargs* to the `Row` class. The keys define the column names, and the types are inferred by looking at the first row. Therefore, it is important that there is no missing data in the first row of the RDD in order to properly infer the schema. In our case, we first need to split the comma separated data, and then use the information in KDD's 1999 task description to obtain the [column names](http://kdd.ics.uci.edu/databases/kddcup99/kddcup.names). ###Code from pyspark.sql import Row csv_data = raw_data.map(lambda l: l.split(",")) row_data = csv_data.map(lambda p: Row( duration=int(p[0]), protocol_type=p[1], service=p[2], flag=p[3], src_bytes=int(p[4]), dst_bytes=int(p[5]) ) ) ###Output _____no_output_____ ###Markdown Once we have our RDD of `Row` we can infer and register the schema. ###Code interactions_df = sqlContext.createDataFrame(row_data) interactions_df.registerTempTable("interactions") ###Output _____no_output_____ ###Markdown Now we can run SQL queries over our data frame that has been registered as a table. ###Code # Select tcp network interactions with more than 1 second duration and no transfer from destination tcp_interactions = sqlContext.sql(""" SELECT duration, dst_bytes FROM interactions WHERE protocol_type = 'tcp' AND duration > 1000 AND dst_bytes = 0 """) tcp_interactions.show() ###Output +--------+---------+ |duration|dst_bytes| +--------+---------+ | 1028| 0| | 5057| 0| | 5059| 0| | 5051| 0| | 5056| 0| | 5051| 0| | 5039| 0| | 5062| 0| | 5041| 0| | 5056| 0| | 5064| 0| | 5043| 0| | 5061| 0| | 5049| 0| | 5061| 0| | 5048| 0| | 5047| 0| | 5044| 0| | 5063| 0| | 5068| 0| +--------+---------+ only showing top 20 rows ###Markdown The results of SQL queries are RDDs and support all the normal RDD operations. ###Code # Output duration together with dst_bytes tcp_interactions_out = tcp_interactions.map(lambda p: "Duration: {}, Dest. bytes: {}".format(p.duration, p.dst_bytes)) for ti_out in tcp_interactions_out.collect(): print ti_out ###Output Duration: 5057, Dest. bytes: 0 Duration: 5059, Dest. bytes: 0 Duration: 5051, Dest. bytes: 0 Duration: 5056, Dest. bytes: 0 Duration: 5051, Dest. bytes: 0 Duration: 5039, Dest. bytes: 0 Duration: 5062, Dest. bytes: 0 Duration: 5041, Dest. bytes: 0 Duration: 5056, Dest. bytes: 0 Duration: 5064, Dest. bytes: 0 Duration: 5043, Dest. bytes: 0 Duration: 5061, Dest. bytes: 0 Duration: 5049, Dest. bytes: 0 Duration: 5061, Dest. bytes: 0 Duration: 5048, Dest. bytes: 0 Duration: 5047, Dest. bytes: 0 Duration: 5044, Dest. bytes: 0 Duration: 5063, Dest. bytes: 0 Duration: 5068, Dest. bytes: 0 Duration: 5062, Dest. bytes: 0 Duration: 5046, Dest. bytes: 0 Duration: 5052, Dest. bytes: 0 Duration: 5044, Dest. bytes: 0 Duration: 5054, Dest. bytes: 0 Duration: 5039, Dest. bytes: 0 Duration: 5058, Dest. bytes: 0 Duration: 5051, Dest. bytes: 0 Duration: 5032, Dest. bytes: 0 Duration: 5063, Dest. bytes: 0 Duration: 5040, Dest. bytes: 0 Duration: 5051, Dest. bytes: 0 Duration: 5066, Dest. bytes: 0 Duration: 5044, Dest. bytes: 0 Duration: 5051, Dest. bytes: 0 Duration: 5036, Dest. bytes: 0 Duration: 5055, Dest. bytes: 0 Duration: 2426, Dest. bytes: 0 Duration: 5047, Dest. bytes: 0 Duration: 5057, Dest. bytes: 0 Duration: 5037, Dest. bytes: 0 Duration: 5057, Dest. bytes: 0 Duration: 5062, Dest. bytes: 0 Duration: 5051, Dest. bytes: 0 Duration: 5051, Dest. bytes: 0 Duration: 5053, Dest. bytes: 0 Duration: 5064, Dest. bytes: 0 Duration: 5044, Dest. bytes: 0 Duration: 5051, Dest. bytes: 0 Duration: 5033, Dest. bytes: 0 Duration: 5066, Dest. bytes: 0 Duration: 5063, Dest. bytes: 0 Duration: 5056, Dest. bytes: 0 Duration: 5042, Dest. bytes: 0 Duration: 5063, Dest. bytes: 0 Duration: 5060, Dest. bytes: 0 Duration: 5056, Dest. bytes: 0 Duration: 5049, Dest. bytes: 0 Duration: 5043, Dest. bytes: 0 Duration: 5039, Dest. bytes: 0 Duration: 5041, Dest. bytes: 0 Duration: 42448, Dest. bytes: 0 Duration: 42088, Dest. bytes: 0 Duration: 41065, Dest. bytes: 0 Duration: 40929, Dest. bytes: 0 Duration: 40806, Dest. bytes: 0 Duration: 40682, Dest. bytes: 0 Duration: 40571, Dest. bytes: 0 Duration: 40448, Dest. bytes: 0 Duration: 40339, Dest. bytes: 0 Duration: 40232, Dest. bytes: 0 Duration: 40121, Dest. bytes: 0 Duration: 36783, Dest. bytes: 0 Duration: 36674, Dest. bytes: 0 Duration: 36570, Dest. bytes: 0 Duration: 36467, Dest. bytes: 0 Duration: 36323, Dest. bytes: 0 Duration: 36204, Dest. bytes: 0 Duration: 32038, Dest. bytes: 0 Duration: 31925, Dest. bytes: 0 Duration: 31809, Dest. bytes: 0 Duration: 31709, Dest. bytes: 0 Duration: 31601, Dest. bytes: 0 Duration: 31501, Dest. bytes: 0 Duration: 31401, Dest. bytes: 0 Duration: 31301, Dest. bytes: 0 Duration: 31194, Dest. bytes: 0 Duration: 31061, Dest. bytes: 0 Duration: 30935, Dest. bytes: 0 Duration: 30835, Dest. bytes: 0 Duration: 30735, Dest. bytes: 0 Duration: 30619, Dest. bytes: 0 Duration: 30518, Dest. bytes: 0 Duration: 30418, Dest. bytes: 0 Duration: 30317, Dest. bytes: 0 Duration: 30217, Dest. bytes: 0 Duration: 30077, Dest. bytes: 0 Duration: 25420, Dest. bytes: 0 Duration: 22921, Dest. bytes: 0 Duration: 22821, Dest. bytes: 0 Duration: 22721, Dest. bytes: 0 Duration: 22616, Dest. bytes: 0 Duration: 22516, Dest. bytes: 0 Duration: 22416, Dest. bytes: 0 Duration: 22316, Dest. bytes: 0 Duration: 22216, Dest. bytes: 0 Duration: 21987, Dest. bytes: 0 Duration: 21887, Dest. bytes: 0 Duration: 21767, Dest. bytes: 0 Duration: 21661, Dest. bytes: 0 Duration: 21561, Dest. bytes: 0 Duration: 21455, Dest. bytes: 0 Duration: 21334, Dest. bytes: 0 Duration: 21223, Dest. bytes: 0 Duration: 21123, Dest. bytes: 0 Duration: 20983, Dest. bytes: 0 Duration: 14682, Dest. bytes: 0 Duration: 14420, Dest. bytes: 0 Duration: 14319, Dest. bytes: 0 Duration: 14198, Dest. bytes: 0 Duration: 14098, Dest. bytes: 0 Duration: 13998, Dest. bytes: 0 Duration: 13898, Dest. bytes: 0 Duration: 13796, Dest. bytes: 0 Duration: 13678, Dest. bytes: 0 Duration: 13578, Dest. bytes: 0 Duration: 13448, Dest. bytes: 0 Duration: 13348, Dest. bytes: 0 Duration: 13241, Dest. bytes: 0 Duration: 13141, Dest. bytes: 0 Duration: 13033, Dest. bytes: 0 Duration: 12933, Dest. bytes: 0 Duration: 12833, Dest. bytes: 0 Duration: 12733, Dest. bytes: 0 Duration: 12001, Dest. bytes: 0 Duration: 5678, Dest. bytes: 0 Duration: 5010, Dest. bytes: 0 Duration: 1298, Dest. bytes: 0 Duration: 1031, Dest. bytes: 0 Duration: 36438, Dest. bytes: 0 ###Markdown We can easily have a look at our data frame schema using `printSchema`. ###Code interactions_df.printSchema() ###Output root |-- dst_bytes: long (nullable = true) |-- duration: long (nullable = true) |-- flag: string (nullable = true) |-- protocol_type: string (nullable = true) |-- service: string (nullable = true) |-- src_bytes: long (nullable = true) ###Markdown Queries as `DataFrame` operations Spark `DataFrame` provides a domain-specific language for structured data manipulation. This language includes methods we can concatenate in order to do selection, filtering, grouping, etc. For example, let's say we want to count how many interactions are there for each protocol type. We can proceed as follows. ###Code from time import time t0 = time() interactions_df.select("protocol_type", "duration", "dst_bytes").groupBy("protocol_type").count().show() tt = time() - t0 print "Query performed in {} seconds".format(round(tt,3)) ###Output protocol_type count udp 20354 tcp 190065 icmp 283602 Query performed in 20.568 seconds ###Markdown Now imagine that we want to count how many interactions last more than 1 second, with no data transfer from destination, grouped by protocol type. We can just add to filter calls to the previous. ###Code t0 = time() interactions_df.select("protocol_type", "duration", "dst_bytes").filter(interactions_df.duration>1000).filter(interactions_df.dst_bytes==0).groupBy("protocol_type").count().show() tt = time() - t0 print "Query performed in {} seconds".format(round(tt,3)) ###Output protocol_type count tcp 139 Query performed in 16.641 seconds ###Markdown We can use this to perform some [exploratory data analysis](http://en.wikipedia.org/wiki/Exploratory_data_analysis). Let's count how many attack and normal interactions we have. First we need to add the label column to our data. ###Code def get_label_type(label): if label!="normal.": return "attack" else: return "normal" row_labeled_data = csv_data.map(lambda p: Row( duration=int(p[0]), protocol_type=p[1], service=p[2], flag=p[3], src_bytes=int(p[4]), dst_bytes=int(p[5]), label=get_label_type(p[41]) ) ) interactions_labeled_df = sqlContext.createDataFrame(row_labeled_data) ###Output _____no_output_____ ###Markdown This time we don't need to register the schema since we are going to use the OO query interface. Let's check the previous actually works by counting attack and normal data in our data frame. ###Code t0 = time() interactions_labeled_df.select("label").groupBy("label").count().show() tt = time() - t0 print "Query performed in {} seconds".format(round(tt,3)) ###Output label count attack 396743 normal 97278 Query performed in 17.325 seconds ###Markdown Now we want to count them by label and protocol type, in order to see how important the protocol type is to detect when an interaction is or not an attack. ###Code t0 = time() interactions_labeled_df.select("label", "protocol_type").groupBy("label", "protocol_type").count().show() tt = time() - t0 print "Query performed in {} seconds".format(round(tt,3)) ###Output label protocol_type count attack udp 1177 attack tcp 113252 attack icmp 282314 normal udp 19177 normal tcp 76813 normal icmp 1288 Query performed in 17.253 seconds ###Markdown At first sight it seems that *udp* interactions are in lower proportion between network attacks versus other protocol types. And we can do much more sophisticated groupings. For example, add to the previous a "split" based on data transfer from target. ###Code t0 = time() interactions_labeled_df.select("label", "protocol_type", "dst_bytes").groupBy("label", "protocol_type", interactions_labeled_df.dst_bytes==0).count().show() tt = time() - t0 print "Query performed in {} seconds".format(round(tt,3)) ###Output label protocol_type (dst_bytes = 0) count normal icmp true 1288 attack udp true 1166 attack udp false 11 normal udp true 3594 normal udp false 15583 attack tcp true 110583 attack tcp false 2669 normal tcp true 9313 normal tcp false 67500 attack icmp true 282314 Query performed in 17.284 seconds
red10/Red10-Intro.ipynb
###Markdown ![Intro](http://i.imgur.com/65dsKzX.png) Why build Red10?I wanted to use machine learning to accurately predict buy and sell opportunities for stocks.While building the original version, I realized how important having a machine learning data store is to this type of intelligent application stack. Today's talk is about using Redis as a database for 1000s of machine learning models. By using Redis as the data store, it allows my team to focus on making better predictions through iterative improvements in an ever-changing market.Once the API was defined, I realized this approach would work for any dataset...not just playing stocks. (Please see Appendix D for how Red10 works with the IRIS dataset) Objectives1. Enable dataset evolution and feature engineering - create 1000s of machine learning models and find the best performing ones as you refine your data1. Manage **unique** machine learning models - how can I find accurate models and benchmark them?1. Apply **DevOps for machine learning** - treat models like build artifacts1. Simple iterative workflow: upload dataset, run job, evaluate1. Automatic analysis, model training, prediction and forecasting supported out of the box1. Export/Import models across environments1. Running pre-trained machine learning models on IoT, healthcare, drones, other resource constrained environments wanting predictive capabilities What is a Machine Learning Model?An algorithm is the general approach you will take. The model is what you get when you run the algorithm over your training data and what you use to make predictions on new data. You can generate a new model with the same algorithm with different data, or a different model from the same data with a different algorithm.Source: https://www.quora.com/What-is-the-difference-between-machine-learning-model-and-ML-algorithmPlease refer to [Appendix G](https://redten.io:8900/notebooks/notebooks/redten/RedTen-Intro.ipynbAppendix-G---Terminology-and-Definitions) for more defintions. Machine Learning ExampleIterative approach for navigating data to find the best predictions![Iterative approach for navigating data to find the best predictions](http://i.imgur.com/fAZGfFY.jpg) Navigating with a "simple" Model configuration - Test 1![Models learn by trial/error through data - here is a possible outcome](http://i.imgur.com/2QdciOm.jpg) Navigating with a different Model configuration - Test 2![Models learn by trial/error through data - here is a possible outcome](http://i.imgur.com/blyf8LT.jpg) Navigating with a different Model configuration - Test 3![Models learn by trial/error through data - here is a possible outcome](http://i.imgur.com/m8aEYZz.jpg) Navigating with a different Model configuration - Test 4![Models learn by trial/error through data - here is a possible outcome](http://i.imgur.com/nvDohTD.jpg) Navigating with the best case Model configuration![Models learn by trial/error through data - here is a possible outcome](http://i.imgur.com/WfAhHlm.jpg) Redis as a Machine Learning Data StoreRedis is a great scalable, in-memory storage solution for handling CRUD machine learning use cases. OriginAfter using Redis for years to handle: caching, pub/sub and auto-reloading capabilities on restart, it was an obvious first choice as a scalable storage solution for many pre-trained machine learning models. In my humble opinion, pulling gigabytes of pickled objects from a database would take too long and is not an ideal use case for a relational or nosql database (mysql/postgres/oracle/mongo). Redis Machine Learning Data Store Responsibilities1. Store pre-trained models (it takes time and compute power to build them)1. Store model accuracies and predictions (this may be broken out into a separate instance in the future)1. Provide an API for making new predictions from stored models1. Provide a naming system for tracking deployed models across environments (focused on reducing model in-memory collisions)1. Provide a model deployment API (import/export) - DevOps for machine learning1. Implement automatic model reloading - using rdb snapshots1. Stability and scaling Over 10,000 Machine Learning ModelsHere's an analysis of the Redis machine learning data store after it broke through 10,000 pre-trained models in-memory.__Anyone that can use a REST API can use Red10 to do the same thing__ ###Code from __future__ import print_function import sys, os, requests, json, datetime # Load the environment and login the user from src.common.load_redten_ipython_env import user_token, user_login, csv_file, run_job, core, api_urls, ppj, rt_url, rt_user, rt_pass, rt_email, lg, good, boom, anmt, mark, ppj, uni_key, rest_login_as_user, rest_full_login, wait_for_job_to_finish, wait_on_job, get_job_analysis, get_job_results, get_analysis_manifest, get_job_cache_manifest, build_prediction_results, build_forecast_results, get_job_cache_manifest, search_ml_jobs, show_logs, show_errors, ipyImage, ipyHTML, ipyDisplay, pd, np # header lg("") good("Starting Redis Key Analysis: " + str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))) lg("") # store the model mappings by dataset name models_iris = {} models_spy = {} models_xle = {} models_xlf = {} models_xli = {} models_xlu = {} total_models_in_redis = 0 # walk the redis dbs for db_idx in range(0, 16): db_file = "/tmp/db-" + str(db_idx) + "-keys" os.system("echo \"select " + str(db_idx) + " \n keys *\" | redis-cli -p 6100 > " + str(db_file)) database_keys = {} if os.path.exists(db_file): # if the file exists with open(db_file) as f: # open it lines_to_parse = f.readlines() # read all the lines for org_line in lines_to_parse: cur_line = org_line.rstrip("\n").strip().lstrip() # remove the newline and any other whitespace if str(cur_line) != "OK" and str(cur_line) != "": database_keys[cur_line] = True # if it's not the redis OK response or an empty string # for all lines # with the db file open else: boom("Failed parsing Redis db_file=" + str(db_file)) # end of parsing db_file if len(database_keys) > 1: for idx,k in enumerate(database_keys): # ignore the predictions and accuracies if "_PredictionsDF" not in k and "_Accuracy" not in k: num_underscores = len(str(k).split("_")) # ignore the analysis/manifest keys if num_underscores > 2: if "_IRIS" in k: models_iris[k] = True total_models_in_redis += 1 elif "_SPY" in k: models_spy[k] = True total_models_in_redis += 1 elif "_XLE" in k: models_xle[k] = True total_models_in_redis += 1 elif "_XLF" in k: models_xlf[k] = True total_models_in_redis += 1 elif "_XLI" in k: models_xli[k] = True total_models_in_redis += 1 elif "_XLU" in k: models_xlu[k] = True total_models_in_redis += 1 # end of checking it's not an analysis/manifest key # end of if it's not a prediction and not an accuarcy key # for the large db keyset lg("IRIS models=" + str(len(models_iris)), 5) lg("SPY models=" + str(len(models_spy)), 5) lg("XLE models=" + str(len(models_xle)), 5) lg("XLF models=" + str(len(models_xlf)), 5) lg("XLI models=" + str(len(models_xli)), 5) lg("XLU models=" + str(len(models_xlu)), 5) lg("") lg("---------------------------------------------") anmt("Total Pre-trained Machine Learning Models in Redis:") boom(str(total_models_in_redis)) lg("---------------------------------------------") lg("") # end of if there's database keys in the redis instance # end for all db files ###Output Logged in: https://redten.io Starting Redis Key Analysis: 2017-05-23 18:20:04 IRIS models=18 SPY models=2263 XLE models=2077 XLF models=2170 XLI models=2108 XLU models=2170 --------------------------------------------- Total Pre-trained Machine Learning Models in Redis: 10806 --------------------------------------------- ###Markdown Redis Machine Learning Data Store in ActionHere's the machine learning data store saving all +10,000 pre-trained machine learning models to an rdb file which can then be moved to other environments and entirely different systems to make predictions.![Storing Over 10000 Pre-trained Machine Learning Models in Redis](http://i.imgur.com/5waiLdD.gif) Generalized Data Science Workflow1. Understanding what is important to the product or business - What can we improve?1. Iterating on datasets (collect, build, define, implement, feature engineering, etc.)1. Teaching algorithms to make predictions - What algorithm(s) should we use?1. Evaluating predictive success - How can we benchmark success between models?1. Model tuning - Can we improve the success rate by changing how the model learns?1. Model deployment - How do we take this awesome model to production or deploy it to a mobile app/IoT/drone?1. Regression testing predictive success with new data points - How good is that model from last year (model deprecation)? __Additional reading__- https://www.quora.com/What-is-a-typical-day-like-as-a-data-scientist- http://www.kdnuggets.com/2016/11/ibm-dsx-data-science-experience.html- https://www.teamleada.com/handbook The Machine Learning EcosystemGreat tools with a complex, ever-changing user manual Perception![Space man...space](http://i.imgur.com/Sz7EUCA.jpg) And the RealitySorry kerbals. We'll get to space next time I promise!![Space is super easy](http://i.imgur.com/BnFxfAu.gif) definitely a data problem... The toolchains Easy to use vs granular control![Space Shuttle Endeavour's Control Panels](http://i.imgur.com/gkPWm7I.jpg) What data can we use?1. Pricing1. Sales1. User events1. Accounting1. Real Estate1. Fraud1. Risk1. Does it have numbers? What algorithm should we use?1. Who wins the Kaggle competitions? [eXtreme gradient boosting (XGB) won a bunch in 2016](http://www.kdnuggets.com/2016/03/xgboost-implementing-winningest-kaggle-algorithm-spark-flink.html)1. Many, many more choices![Linear Regression](http://i.imgur.com/ChidNeu.png)(source: http://aiplaybook.a16z.com/docs/guides/dl)1. Start iterating in a notebookhttp://nbviewer.jupyter.org/github/jmsteinw/Notebooks/blob/master/XG_Boost_NB.ipynb How does XGB work? 1. Time is a factor when training models![Unique Models](http://i.imgur.com/rbRvbas.png) 2. Highly parameterized```def __init__(self, max_depth=3, learning_rate=0.1, n_estimators=100, silent=True, objective="reg:linear", nthread=-1, gamma=0, min_child_weight=1, max_delta_step=0, subsample=1, colsample_bytree=1, colsample_bylevel=1, reg_alpha=0, reg_lambda=1, scale_pos_weight=1, base_score=0.5, seed=0, missing=None):```https://github.com/dmlc/xgboost/blob/master/python-package/xgboost/sklearn.py What's the support story? 1. Updates are a breeze![Don't worry I got this](https://media.giphy.com/media/2ZFuPKWcSw16E/giphy.gif) 2. You're gonna need a bigger boat![Eats memory like pacman...chomp](http://i.imgur.com/KTVTwIh.png) 3. What if we just add some new data for this conference?SELL SELL SELL!![awww yea my own flash crash indicator perfect](http://i.imgur.com/qSAtWX2.png) 4. More data does not always lead to better predictions![Just Keep Stirring](http://i.imgur.com/rxGROk8.png) How does Red10 work?1. Two modes: manual and cloud service1. Runs anywhere with docker (virtualbox, on-prem, AWS, OpenShift, Swarm, Kubernetes)Please refer to [Appendix A](https://redten.io:8900/notebooks/notebooks/redten/RedTen-Intro.ipynbAppendix-A---What-is-Red10?) for more details on Red10's architecture. 1. Original, manual version using the GitHub RepoThe github repo: https://github.com/jay-johnson/sci-pype is built for using this workflow:![Sci-pype Machine Learning Workflow](http://i.imgur.com/UsLbBE2.jpg) 2. Red10 - multi-tenant, machine learning REST API built with Jupyter integration![AWS Deployment](http://i.imgur.com/LD1jvKY.png) Where can this be used?![Machine Learning Use Cases](http://i.imgur.com/Ua49NAP.jpg)Using Red10 for price forecasting: What's next? [Pricing Multi-Model Forecast](https://redten.io/forecast/) Please refer to the appendices for architecture slides and developer-centric tooling for reviewing offline. Appendix A - What is Red10?1. A containerized, distributed machine learning platform for streamlining analysis to build higly predictive models1. Multi-tenant REST API wrapping https://github.com/jay-johnson/sci-pype inside of Django REST Framework with JWT authentication.1. Users build, train and predict using machine learning models (https://github.com/dmlc/xgboost) housed in Redis (Tensorflow coming soon).1. Export and import pre-trained models with archival to S3.1. Use the same API to make new predictions using pre-trained models.1. Streamline analysis - every column in a dataset will be analyzed and compared.1. Ensemble learning - every column gets a distinct, trained model for helping improve predictive accuracy.1. Horizontally scalable machine learning cloud - maximize infrastructure by scaling up the number of Celery workers consuming published jobs out of Redis.1. S3 integration - images, analysis, predictions, export + import pre-trained models, manifest file.1. Security (pre-trained models, analysis, predictions) - the user must present a secret key to access any of the machine learning job's analysis and predictions.1. Swagger API - easy to build new web applications derived from the service layer.1. Centralized logging (Elasticsearch + Logstash + Kibana) - logs from Django REST Framework + Celery are published to logstash over Redis.1. Search historical analysis using Elasticsearch - machine learning jobs are automatically published as json documents to Elasticsearch on successful completion.1. Dockerized - runs anywhere with docker (virtualbox, on-prem, AWS, OpenShift, Swarm, K8). Appendix B - Red10 Machine Learning Workflow Build, Train, Analyze and Store![Easy as 1-2-3](http://i.imgur.com/HuRpcws.png) Make New Predictions![Once trained and stored, new predictions use the same api](http://i.imgur.com/XWBZiGz.png) Moving Model Artifacts - Export / Import![Treat trained models like build artifacts](http://i.imgur.com/TyDi3xD.png) Appendix C - Red10 Manifest Mapping to Models in Redis Storing Multiple Models in Redis![Forecast storing multiple models in Redis](http://i.imgur.com/oDS9iSE.gif) Analysis Archival in S3![Bucket per user or co-located](http://i.imgur.com/lEET3Fx.png) Appendix D - Additional Presentations 1. [IRIS Multi-Model Predictions](https://redten.io/predictions/) 2. [Screen recorded - IRIS Multi-Model Predictions](https://redten.io/recorded-iris/) 3. [Screen recorded - Pricing Multi-Model Forecast](https://redten.io/recorded-forecast/) Appendix E - Redis Key Overview (Non-ML Data Store)Here is an overview of how Red10 uses Redis databases and keys for handling everything outside of the machine learning use cases. ###Code from __future__ import print_function import sys, os, requests, json, datetime # Load the environment and login the user from src.common.load_redten_ipython_env import user_token, user_login, csv_file, run_job, core, api_urls, ppj, rt_url, rt_user, rt_pass, rt_email, lg, good, boom, anmt, mark, ppj, uni_key, rest_login_as_user, rest_full_login, wait_for_job_to_finish, wait_on_job, get_job_analysis, get_job_results, get_analysis_manifest, get_job_cache_manifest, build_prediction_results, build_forecast_results, get_job_cache_manifest, search_ml_jobs, show_logs, show_errors, ipyImage, ipyHTML, ipyDisplay, pd, np # header lg("") good("Starting Redis Key Analysis: " + str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))) lg("") # walk the redis dbs for db_idx in range(0, 16): db_file = "/tmp/db-" + str(db_idx) + "-keys" os.system("echo \"select " + str(db_idx) + " \n keys *\" | redis-cli > " + str(db_file)) database_keys = {} if os.path.exists(db_file): # if the file exists with open(db_file) as f: # open it lines_to_parse = f.readlines() # read all the lines for org_line in lines_to_parse: cur_line = org_line.rstrip("\n").strip().lstrip() # remove the newline and any other whitespace if str(cur_line) != "OK" and str(cur_line) != "": database_keys[cur_line] = True # if it's not the redis OK response # for all lines # with the db file open else: boom("Failed parsing Redis db_file=" + str(db_file)) # end of parsing db_file anmt("Redis DB=" + str(db_idx) + " keys=" + str(len(database_keys))) # used for manual inspection sample_cache_record = {} if db_idx == 0: equity_ticker = "SPY" num_equities = 0 detailed_keys = [] for idx,k in enumerate(database_keys): if "EQTY_" in str(k) or "EQID_" in str(k): num_equities += 1 else: lg(" - key=" + str(idx) + " name: " + str(k)) detailed_keys.append(k) # Pull this record from the "STOCKS:EQ_DAILY_SPY" # redis location => <cache app name:redis key> if "EQ_DAILY_SPY" == k: sample_cache_record = core.get_cache_from_redis(core.m_rds["STOCKS"], k, False, False) # end of pulling a sample # for the large db keyset if len(sample_cache_record) > 0: lg("") lg("Daily Sticks for Ticker(" + str(equity_ticker) + ") StartDate(" + str(sample_cache_record["Record"]["StartDate"]) + ") Sticks(" + str(len(sample_cache_record["Record"]["Sticks"])) + ")", 6) lg("Date, High, Low, Open, Close, Volume", 5) for idx,record in enumerate(reversed(sample_cache_record["Record"]["Sticks"])): lg(record["Date"] + ", " + record["High"] + ", " + record["Low"] + ", " + record["Open"] + ", " + record["Close"] + ", " + record["Volume"]) # stop after a few if idx > 10: break # end for all sticks in the cache lg("") # end of inspecting the sample record lg("DB=" + str(db_idx) + " detailed_keys=" + str(len(detailed_keys)) + " equities=" + str(num_equities)) else: for k in database_keys: if "session:" in k: lg(" - key: session:<redacted>") else: lg(" - key: " + str(k)) # end of for all keys # end of for the post processing keyset in this redis db lg("---------------------------------------------") lg("") # end for all db files ###Output Starting Redis Key Analysis: 2017-05-23 18:20:41 Redis DB=0 keys=16856 - key=279 name: DS_01_day_XLF_STICKS - key=421 name: _STATS_DAILY_SUMMARY_XLF - key=949 name: _OPTS_XLF_2017-05-19 - key=1159 name: _ALLBESTSSPREADS_XLU - key=1166 name: _ALLBESTSSPREADS_XLF - key=1167 name: _ALLBESTSSPREADS_XLE - key=1171 name: _ALLBESTSSPREADS_XLI - key=1368 name: _OPTS_XLU_2017-06-16 - key=1758 name: _390_min_XLU - key=1843 name: _LST_SPY_PRICING - key=3408 name: _390_min_XLI - key=3768 name: _OPTS_XLI_LATEST - key=4979 name: _OPTS_SPY_2017-06-16 - key=5013 name: _OPTS_XLF_2017-06-16 - key=5595 name: _STATS_DAILY_SUMMARY_XLE - key=5599 name: _STATS_DAILY_SUMMARY_XLI - key=5601 name: _STATS_DAILY_SUMMARY_XLU - key=6129 name: _LAST_390_min_XLI - key=6131 name: _LAST_390_min_XLF - key=6132 name: _LAST_390_min_XLE - key=6138 name: _LAST_390_min_XLU - key=6589 name: _OPTS_XLE_2017-05-19 - key=6654 name: _OPTS_XLU_LATEST - key=6713 name: _STATS_DAILY_SUMMARY_SPY - key=8038 name: _OPTS_XLF_LATEST - key=8536 name: ALL_EQUITIES_BY_TICKER - key=8539 name: _LST_XLI_PRICING - key=8860 name: EQ_DAILY_XLU - key=8864 name: EQ_DAILY_XLI - key=8874 name: EQ_DAILY_XLE - key=8877 name: EQ_DAILY_XLF - key=8967 name: DS_01_day_XLE_STICKS - key=9194 name: DS_01_day_XLI_STICKS - key=9510 name: _OPTS_XLI_2017-06-16 - key=10467 name: _OPTS_XLI_2017-05-19 - key=10921 name: _LST_XLE_PRICING - key=11725 name: EQ_DAILY_SPY - key=11951 name: _OPTS_XLE_2017-06-16 - key=12526 name: _OPTS_SPY_2017-05-19 - key=12592 name: _LST_XLU_PRICING - key=12829 name: _OPTS_XLU_2017-05-19 - key=12916 name: ALL_EQUITIES_BY_ID - key=13100 name: DS_01_day_SPY_STICKS - key=13112 name: _ALLBESTSSPREADS_SPY - key=13131 name: DS_01_day_XLU_STICKS - key=13965 name: _OPTS_XLE_LATEST - key=14607 name: _390_min_XLE - key=14610 name: _390_min_XLF - key=14754 name: _390_min_SPY - key=15028 name: _LAST_390_min_SPY - key=15201 name: _LST_XLF_PRICING - key=16781 name: _OPTS_SPY_LATEST Daily Sticks for Ticker(SPY) StartDate(03-05-2009) Sticks(2052) Date, High, Low, Open, Close, Volume 05-19-2017, 237.75, 235.43, 235.73, 235.82, 107120216 05-18-2017, 237.75, 235.43, 235.73, 236.77, 106236399 05-17-2017, 238.64, 235.75, 238.10, 235.82, 170138013 05-16-2017, 240.67, 239.63, 240.64, 240.08, 51241791 05-15-2017, 240.44, 239.45, 239.47, 240.30, 60025663 05-12-2017, 239.43, 238.67, 239.09, 238.98, 51877428 05-11-2017, 239.46, 238.13, 239.35, 239.87, 23215447 05-10-2017, 240.19, 239.04, 239.96, 239.66, 51363200 05-09-2017, 240.19, 239.04, 239.96, 239.44, 51363200 05-08-2017, 239.92, 239.17, 239.75, 239.66, 48385730 05-05-2017, 239.72, 238.68, 239.19, 239.70, 62001269 05-04-2017, 238.92, 237.78, 238.83, 238.76, 61462732 DB=0 detailed_keys=52 equities=16804 --------------------------------------------- Redis DB=1 keys=1 - key: session:<redacted> --------------------------------------------- Redis DB=2 keys=0 --------------------------------------------- Redis DB=3 keys=10 - key: q:ccd68184a206285b77fab4da306ffbc5 - key: conj:auth_user_user_permissions:user_id=80 - key: schemes:auth_user_user_permissions - key: q:49c845196a7df1562b071f61363cf581 - key: schemes:auth_user_groups - key: conj:auth_user:username=jay - key: schemes:auth_user - key: _USER_2 - key: q:d8a63b9c4ef2116ec214e19be8c3826e - key: conj:auth_user_groups:user_id=80 --------------------------------------------- Redis DB=4 keys=4 - key: _kombu.binding.reply.celery.pidbox - key: _kombu.binding.celery - key: _kombu.binding.celeryev - key: _kombu.binding.celery.pidbox --------------------------------------------- Redis DB=5 keys=0 --------------------------------------------- Redis DB=6 keys=0 --------------------------------------------- Redis DB=7 keys=0 --------------------------------------------- Redis DB=8 keys=0 --------------------------------------------- Redis DB=9 keys=0 --------------------------------------------- Redis DB=10 keys=0 --------------------------------------------- Redis DB=11 keys=0 --------------------------------------------- Redis DB=12 keys=0 --------------------------------------------- Redis DB=13 keys=0 --------------------------------------------- Redis DB=14 keys=0 --------------------------------------------- Redis DB=15 keys=0 --------------------------------------------- ###Markdown Appendix F - Debugging Tools Develop with Swagger![Red10 Swagger](http://i.imgur.com/fYvg1mh.gif) Search Jobs in ElasticsearchRed10 is running an ELK stack for searching by user, dataset identifiers, and jobs.![ELK](http://i.imgur.com/cUCebca.png)Once a job completes it is automatically published to Elasticsearch ###Code from __future__ import print_function import sys, os, requests, json, datetime # Load the environment and login the user from src.common.load_redten_ipython_env import user_token, user_login, csv_file, run_job, core, api_urls, ppj, rt_url, rt_user, rt_pass, rt_email, lg, good, boom, anmt, mark, ppj, uni_key, rest_login_as_user, rest_full_login, wait_for_job_to_finish, wait_on_job, get_job_analysis, get_job_results, get_analysis_manifest, get_job_cache_manifest, build_prediction_results, build_forecast_results, get_job_cache_manifest, search_ml_jobs, show_logs, show_errors, ipyImage, ipyHTML, ipyDisplay, pd, np search_req = { "title" : "", # job title with completion "dsname" : "SPY", # dataset name with completion "desc" : "", # description with completion "features" : "", # feature search "target_column" : "" # name of target column for this analysis } job_search = {} job_res = {} if len(search_req) == 0 : boom("Please create a valid search request") else: job_res = search_ml_jobs(search_req) if job_res["status"] != "SUCCESS": boom("Job=" + str(job_id) + " failed with status=" + str(job_res["status"]) + " err=" + str(job_res["error"])) else: job_search = job_res["record"] anmt("Job Matches=" + str(len(job_search))) lg(ppj(job_search), 5) # found jobs # end of searching for job ###Output Searching ML Jobs url=https://redten.io/ml/search/ SUCCESS - Job Search Response Status=200 Reason=OK Found Job={'target_column': '', 'desc': '', 'dsname': 'SPY', 'features': '', 'title': ''} results Job Matches=1 { "jobs": [ { "algo_name": "xgb-regressor", "desc": "Forecast simulation - 2017-05-23 22:00:09", "ds_name": "SPY", "feature_column_names": [ "FHigh", "FLow", "FOpen", "FClose", "FVolume" ], "ignore_features": [ "Ticker", "Date", "FDate", "FPrice", "DcsnDate", "Decision" ], "images": [ { "id": 12006, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_555_12006_75950bb597ae44cf.png", "title": "SPY Close forecast overlay between 2017-02-23 00:00:00 - 2017-05-12 00:00:00" }, { "id": 12007, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_555_12007_981709fca5e94062.png", "title": "SPY-2-555 10-Days - Predictive Accuracy\nPredicted Open 10 Days vs Actual Open 10 Days" }, { "id": 12008, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_555_12008_6e0222bdc5ab4adf.png", "title": "SPY-2-555 5-Days - Predictive Accuracy\nPredicted Low 5 Days vs Actual Low 5 Days" }, { "id": 12009, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_555_12009_c70ec6e3690e466c.png", "title": "SPY-2-555 15-Days - Predictive Accuracy\nPredicted Open 15 Days vs Actual Open 15 Days" }, { "id": 12010, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_555_12010_17b819f13e914bfa.png", "title": "SPY-2-555 25-Days - Predictive Accuracy\nPredicted High 25 Days vs Actual High 25 Days" }, { "id": 12011, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_555_12011_f65ee56325394208.png", "title": "SPY-2-555 30-Days - Predictive Accuracy\nPredicted Open 30 Days vs Actual Open 30 Days" }, { "id": 12012, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_555_12012_1a9331b9b8e24145.png", "title": "SPY-2-555 20-Days - Predictive Accuracy\nPredicted High 20 Days vs Actual High 20 Days" }, { "id": 12013, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_555_12013_3b28fd99ff354edb.png", "title": "SPY-2-555 20-Days - Predictive Accuracy\nPredicted Volume 20 Days vs Actual Volume 20 Days" }, { "id": 12014, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_555_12014_5d8cfd12bc7f49bf.png", "title": "SPY-2-555 25-Days - Predictive Accuracy\nPredicted Volume 25 Days vs Actual Volume 25 Days" }, { "id": 12015, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_555_12015_5ee300ed25504c2b.png", "title": "SPY-2-555 30-Days - Predictive Accuracy\nPredicted Low 30 Days vs Actual Low 30 Days" }, { "id": 12016, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_555_12016_029cb3266c46413d.png", "title": "SPY-2-555 20-Days - Predictive Accuracy\nPredicted Close 20 Days vs Actual Close 20 Days" }, { "id": 12017, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_555_12017_3089d07bad8c4b85.png", "title": "SPY-2-555 5-Days - Predictive Accuracy\nPredicted Volume 5 Days vs Actual Volume 5 Days" }, { "id": 12018, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_555_12018_dd20dc4f941946e4.png", "title": "SPY-2-555 25-Days - Predictive Accuracy\nPredicted Close 25 Days vs Actual Close 25 Days" }, { "id": 12019, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_555_12019_813dbe0a739d4aa4.png", "title": "SPY-2-555 10-Days - Predictive Accuracy\nPredicted Low 10 Days vs Actual Low 10 Days" }, { "id": 12020, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_555_12020_5b859fd5b3ca417e.png", "title": "SPY-2-555 5-Days - Predictive Accuracy\nPredicted Open 5 Days vs Actual Open 5 Days" }, { "id": 12021, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_555_12021_3e6c529854ec47dd.png", "title": "SPY-2-555 15-Days - Predictive Accuracy\nPredicted Low 15 Days vs Actual Low 15 Days" }, { "id": 12022, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_555_12022_f335bee9141a4d39.png", "title": "SPY-2-555 10-Days - Predictive Accuracy\nPredicted High 10 Days vs Actual High 10 Days" }, { "id": 12023, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_555_12023_916304a8e7184119.png", "title": "SPY-2-555 15-Days - Predictive Accuracy\nPredicted High 15 Days vs Actual High 15 Days" }, { "id": 12024, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_555_12024_3a4a00f45ddb4472.png", "title": "SPY-2-555 25-Days - Predictive Accuracy\nPredicted Open 25 Days vs Actual Open 25 Days" }, { "id": 12025, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_555_12025_beec39f18bb84469.png", "title": "SPY-2-555 30-Days - Predictive Accuracy\nPredicted High 30 Days vs Actual High 30 Days" }, { "id": 12026, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_555_12026_bf5288b75d994cde.png", "title": "SPY-2-555 20-Days - Predictive Accuracy\nPredicted Open 20 Days vs Actual Open 20 Days" }, { "id": 12027, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_555_12027_2620b246cfd54a68.png", "title": "SPY-2-555 10-Days - Predictive Accuracy\nPredicted Volume 10 Days vs Actual Volume 10 Days" }, { "id": 12028, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_555_12028_f5d022f810ba4283.png", "title": "SPY-2-555 30-Days - Predictive Accuracy\nPredicted Volume 30 Days vs Actual Volume 30 Days" }, { "id": 12029, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_555_12029_c7e56bc6a77748d6.png", "title": "SPY-2-555 15-Days - Predictive Accuracy\nPredicted Volume 15 Days vs Actual Volume 15 Days" }, { "id": 12030, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_555_12030_bdcb901323804a6b.png", "title": "SPY-2-555 20-Days - Predictive Accuracy\nPredicted Low 20 Days vs Actual Low 20 Days" }, { "id": 12031, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_555_12031_19276b3843ea467e.png", "title": "SPY-2-555 25-Days - Predictive Accuracy\nPredicted Low 25 Days vs Actual Low 25 Days" }, { "id": 12032, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_555_12032_bf48ad03bdf7426e.png", "title": "SPY-2-555 30-Days - Predictive Accuracy\nPredicted Close 30 Days vs Actual Close 30 Days" }, { "id": 12033, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_555_12033_42cb601864994eb2.png", "title": "SPY-2-555 15-Days - Predictive Accuracy\nPredicted Close 15 Days vs Actual Close 15 Days" }, { "id": 12034, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_555_12034_b74469132a794f8e.png", "title": "SPY-2-555 10-Days - Predictive Accuracy\nPredicted Close 10 Days vs Actual Close 10 Days" }, { "id": 12035, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_555_12035_1cb38442136f4719.png", "title": "SPY-2-555 5-Days - Predictive Accuracy\nPredicted High 5 Days vs Actual High 5 Days" }, { "id": 12036, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_555_12036_b5cb1f3c8a554dd2.png", "title": "SPY-2-555 5-Days - Predictive Accuracy\nPredicted Close 5 Days vs Actual Close 5 Days" } ], "job_id": 555, "ml_type": "Playbook-UnitsAhead", "prediction_type": "forecast", "result_id": "", "target_column_name": "FClose", "target_column_values": [ "GoodBuys", "BadBuys", "Not Finished" ], "title": "SPY Forecast v5 - 6b8e2873aaa84e3a8eadf5f260151c7", "train_id": 446, "user_id": 2, "username": "jay" }, { "algo_name": "xgb-regressor", "desc": "Forecast simulation - 2017-05-23 22:24:08", "ds_name": "SPY", "feature_column_names": [ "FHigh", "FLow", "FOpen", "FClose", "FVolume" ], "ignore_features": [ "Ticker", "Date", "FDate", "FPrice", "DcsnDate", "Decision" ], "images": [ { "id": 12037, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_556_12037_0523b881d6a64baa.png", "title": "SPY Close forecast overlay between 2017-02-23 00:00:00 - 2017-05-12 00:00:00" }, { "id": 12038, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_556_12038_c8fc6f8d4e294881.png", "title": "SPY-2-556 10-Days - Predictive Accuracy\nPredicted Open 10 Days vs Actual Open 10 Days" }, { "id": 12039, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_556_12039_dba3efb9b56c4bb5.png", "title": "SPY-2-556 5-Days - Predictive Accuracy\nPredicted Low 5 Days vs Actual Low 5 Days" }, { "id": 12040, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_556_12040_a3e6aafa5e6241c4.png", "title": "SPY-2-556 15-Days - Predictive Accuracy\nPredicted Open 15 Days vs Actual Open 15 Days" }, { "id": 12041, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_556_12041_cf802c6530714ae7.png", "title": "SPY-2-556 25-Days - Predictive Accuracy\nPredicted High 25 Days vs Actual High 25 Days" }, { "id": 12042, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_556_12042_152cb94cf47f4557.png", "title": "SPY-2-556 30-Days - Predictive Accuracy\nPredicted Open 30 Days vs Actual Open 30 Days" }, { "id": 12043, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_556_12043_ddeb69b9254b4595.png", "title": "SPY-2-556 20-Days - Predictive Accuracy\nPredicted High 20 Days vs Actual High 20 Days" }, { "id": 12044, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_556_12044_f64ef76a00cb4bb9.png", "title": "SPY-2-556 20-Days - Predictive Accuracy\nPredicted Volume 20 Days vs Actual Volume 20 Days" }, { "id": 12045, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_556_12045_db89689ffd49445c.png", "title": "SPY-2-556 25-Days - Predictive Accuracy\nPredicted Volume 25 Days vs Actual Volume 25 Days" }, { "id": 12046, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_556_12046_8c585f82cd0948c5.png", "title": "SPY-2-556 30-Days - Predictive Accuracy\nPredicted Low 30 Days vs Actual Low 30 Days" }, { "id": 12047, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_556_12047_eba8de31f57e466a.png", "title": "SPY-2-556 20-Days - Predictive Accuracy\nPredicted Close 20 Days vs Actual Close 20 Days" }, { "id": 12048, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_556_12048_7f90d455f7df4e2f.png", "title": "SPY-2-556 5-Days - Predictive Accuracy\nPredicted Volume 5 Days vs Actual Volume 5 Days" }, { "id": 12049, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_556_12049_49d5e69d8c104eb2.png", "title": "SPY-2-556 25-Days - Predictive Accuracy\nPredicted Close 25 Days vs Actual Close 25 Days" }, { "id": 12050, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_556_12050_430a5186b3d945e4.png", "title": "SPY-2-556 10-Days - Predictive Accuracy\nPredicted Low 10 Days vs Actual Low 10 Days" }, { "id": 12051, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_556_12051_184561aa8af3489a.png", "title": "SPY-2-556 5-Days - Predictive Accuracy\nPredicted Open 5 Days vs Actual Open 5 Days" }, { "id": 12052, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_556_12052_26fbe6058c114619.png", "title": "SPY-2-556 15-Days - Predictive Accuracy\nPredicted Low 15 Days vs Actual Low 15 Days" }, { "id": 12053, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_556_12053_de8af6d2ab474eb4.png", "title": "SPY-2-556 10-Days - Predictive Accuracy\nPredicted High 10 Days vs Actual High 10 Days" }, { "id": 12054, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_556_12054_72baf6fe1cff49dd.png", "title": "SPY-2-556 15-Days - Predictive Accuracy\nPredicted High 15 Days vs Actual High 15 Days" }, { "id": 12055, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_556_12055_76b743196398499f.png", "title": "SPY-2-556 25-Days - Predictive Accuracy\nPredicted Open 25 Days vs Actual Open 25 Days" }, { "id": 12056, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_556_12056_53ca82cd037c4ebb.png", "title": "SPY-2-556 30-Days - Predictive Accuracy\nPredicted High 30 Days vs Actual High 30 Days" }, { "id": 12057, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_556_12057_ceced8e2215749e5.png", "title": "SPY-2-556 20-Days - Predictive Accuracy\nPredicted Open 20 Days vs Actual Open 20 Days" }, { "id": 12058, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_556_12058_660025f112e04a51.png", "title": "SPY-2-556 10-Days - Predictive Accuracy\nPredicted Volume 10 Days vs Actual Volume 10 Days" }, { "id": 12059, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_556_12059_974874fc3c0c4c4b.png", "title": "SPY-2-556 30-Days - Predictive Accuracy\nPredicted Volume 30 Days vs Actual Volume 30 Days" }, { "id": 12060, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_556_12060_c3c6ab2f04ae4669.png", "title": "SPY-2-556 15-Days - Predictive Accuracy\nPredicted Volume 15 Days vs Actual Volume 15 Days" }, { "id": 12061, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_556_12061_087ef25764154835.png", "title": "SPY-2-556 20-Days - Predictive Accuracy\nPredicted Low 20 Days vs Actual Low 20 Days" }, { "id": 12062, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_556_12062_50476dc42b95485a.png", "title": "SPY-2-556 25-Days - Predictive Accuracy\nPredicted Low 25 Days vs Actual Low 25 Days" }, { "id": 12063, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_556_12063_3f0f9b7a91e6477d.png", "title": "SPY-2-556 30-Days - Predictive Accuracy\nPredicted Close 30 Days vs Actual Close 30 Days" }, { "id": 12064, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_556_12064_4929471da7b848dc.png", "title": "SPY-2-556 15-Days - Predictive Accuracy\nPredicted Close 15 Days vs Actual Close 15 Days" }, { "id": 12065, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_556_12065_c42e1ef3ecda4a50.png", "title": "SPY-2-556 10-Days - Predictive Accuracy\nPredicted Close 10 Days vs Actual Close 10 Days" }, { "id": 12066, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_556_12066_bd242932692b48f1.png", "title": "SPY-2-556 5-Days - Predictive Accuracy\nPredicted High 5 Days vs Actual High 5 Days" }, { "id": 12067, "image": "https://rt-media.s3.amazonaws.com/media/imagesml/20170523/2_556_12067_a1d9483443844be5.png", "title": "SPY-2-556 5-Days - Predictive Accuracy\nPredicted Close 5 Days vs Actual Close 5 Days" } ], "job_id": 556, "ml_type": "Playbook-UnitsAhead", "prediction_type": "forecast", "result_id": "", "target_column_name": "FClose", "target_column_values": [ "GoodBuys", "BadBuys", "Not Finished" ], "title": "SPY Forecast v5 - 1394ae9d74c24d5886f9608d3dd6328", "train_id": 447, "user_id": 2, "username": "jay" } ] } ###Markdown Pulling the Latest Error Logs from Elasticsearch ###Code boom("Finding latest error logs:") show_errors(limit=50) ###Output Finding latest error logs: 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=ec9440aa1c764cf4a33dcdfe6026f88 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=f650f23f0a664927a96a71875ca3068 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=08ceca09a27b4f78b930a005c274dc9 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=9ebef890badc4c1580465d8b91dd63d 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=f3f20e7324e44627b3f92a7799eaa3a 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=2a9c417e87f64166bd8c2a6a6599f84 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=68362d7935b04f8388923c4ed824818 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=68e882a944904f7bb6d936720b97721 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=b5746788a0044d00babb9f07214c8b2 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=21ab697410204f3fa09d834b196fbd8 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=3ae35c7e805c44838c8b3e2d2d2ce95 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=9b8e9a2dbea14c21aed734e139cd283 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=b0ef1d2dcb514c50a64f64be3437b5a 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=6705994cb85a45e0ba176d6c17d0d80 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=01faad42d3aa472fa72e66db8c1c330 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=c4d361075640431e85064ad8bef3876 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=0c5fc6baa3df481f9fdbd48a7da19ab 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=a63349d3704d40bb9099aaa1c59bcae 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=15a8bab5c07c4c2d84a7fe040e143e3 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=321ead788d814c698e99d9eeabc91e7 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=04b1492f378c49b2acf3676afb0b02c 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=1d128b7d59704452a7a9ed37a4e7063 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=856f6f937d804a54bb68784db8583b9 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=009cc460d514460db81e98b970ee1b1 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=a1bdaff8368e460f9a76d2dafcfb84b 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=a668ae7464184d15befb3e18879c55f 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=e2326e5dfde341d19d9544ad095eab4 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=5f447746158f447a802e68c9d5dc6ee 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=7a600f7bc45e40cfb3f369ff7fcf59f 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=f775d4103f384d62bc1c89c84b20b54 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=625b6a5dbb314d5ab640a41599dbac1 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=99b27252cf174cc1a7fc4291d554d36 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=30734ba6aa7e4ba4afd2452d8803d12 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=2e5cf52864914bcfab1ee59206dfcd6 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=37ca18e73883420b9ef6c7aeda4b8ca 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=43c78c5e10854315ab89ee646ceea12 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=e7d6c4494ae44d7c91e8481ff2577cc 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=d3a540fd215847c3a7eb3e8f9e27c78 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=f5daabc7d2174a3482a9e1233424514 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=4817db6ced5b4e9ead9a5ba8d685f54 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=59c5ac1d733849c4b18b095f297d2eb 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=c055451ab361489684cc1d62c8418aa 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=9de37c7a76fa461ba2f21e0c5e92b1b 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=dc4ac1e3783449ee99e684964d9628d 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=277d0ea8fe324c818bc175d3dcc4327 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=e8b4a17841b2499f9016159de9e93f5 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=fc0a3c976ae04f89b0782d5ae2f72b7 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=acba2d491992455d9d0008fce60bb61 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=8e7b2e356a6b48bda2dd73ea055e8dd 2017-05-24 06:38:28 - ERROR - Demo error logs from celery workers - uuid=6bade4fdeb2b442d91001d4d992d88e ###Markdown Pulling the Latest Logs from Elasticsearch ###Code anmt("Finding latest logs:") show_logs(limit=50) ###Output Finding latest logs: 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=75e406e1515d451c8af06136c52e478 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=ffb1ac5fe34f4130a8eeea4fe188ef9 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=1f6954c4cd5e46ed9b0b0584199d725 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=c9a5a8f696a04c4bb7716a559718224 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=2b3850f444cd4e0cad5f1f1ae72168c 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=d056dc9c3c604d9a916a86127d7e31a 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=8d111ebd1eee4c68ac031dcd1f4c530 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=5aa041864fb24c34988b259330d1332 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=fd2647c217304a9ab87d1f31adb6ea9 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=c7b051014cb8461f856e336c4b684ec 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=9451dd6b9bfe4748aef42f2a9ebb551 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=db6ac99b562143e8acfda7206644b59 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=a828e9a85a024c2d812b163c874a09f 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=720b3ce6457140dc816ae094fc902fb 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=79b83fd71b584590ac88498ebd99032 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=51f781ab22634605af5618749e2113f 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=7ba3ade713ee4c8faf98d2a63bdf79a 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=18584ef9b0574ee2a5ff78a28333bd3 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=a51fd7d46b7f42bb8b8c80699e8dad9 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=6c3d5d2c36ed41818dc1cec48178ee5 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=a477245220a546a1b697862505ea53b 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=a7653b5c882840ccb29e980beff1621 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=427d8a2d7eb647bf9cf59ad704213e8 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=93c7a418edd447a89318800392add05 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=d9fa8ae747fc446898be13043797d49 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=a1fc8c434970469489908ebdbd31118 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=db23a9cc9c494536bbaa09f4f8f070a 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=d9dde5fa49f44c04a8ba4e5990677fb 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=20c0577a6cfa4c9d92d2fa798ef1188 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=75b215377d924619b1b3b9c01a9f119 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=afb7256c2de349dd9aa38b1b8b01f2d 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=87cee4e35160499ca49560d7c0c85d5 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=6f841da50bfe4662850a09f6299fbc9 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=eb29413c5dc746d2818982063d6c613 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=2fd636b6e23f4d209f05a83ae8d6ef0 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=e6c5f3f333c040499e9a6a8d530716f 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=061a8f54530546c0827b45fbaa06955 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=d76f25a2cb5c41638a1c741fbb48229 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=f6d0a64a41f5427ba02672db494c911 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=5e7c427eaab946a3871db50790aefbf 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=f17bd62c8a9d4acbb80c6e635d8bf71 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=40c679067cc84134822da83b931b081 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=e194a7fa4acf4b95b43bca93d32883b 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=78932d799e014291b69833aa919a394 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=a6eee95461d14b3aa4cb56fa9f7bb42 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=507bf75f459148268c4224e7aee4c0c 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=ff3541b5926b4f42a430a2d6fd6adc6 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=2e82480ae1a14529be7444746057acb 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=46cbfb98c761424ba45f99730c81456 2017-05-24 06:38:28 - INFO - Demo info logs from celery workers - uuid=db0e216d8938432b93fb3a7c7c5dc7b
AI/1_Diplomado_IA_y_DL_UNAL/1_Tareas/.ipynb_checkpoints/Desarrollo_Tarea_Regr_Lineal-checkpoint.ipynb
###Markdown DESARROLLO TAREA DE REGRECIÓN LINEAL. POR: FELIPE GÓMEZ CASTAÑO 1. INTRODUCCIÓN ###Code import numpy as np import pandas as pd import matplotlib.pyplot as plt %matplotlib inline from google.colab import drive drive.mount('/content/gdrive') ruta_DB = 'gdrive/My Drive/FGC/7_AI/3_Data_Bases/1_UN_Intr_AI_and_DL/1_kc_house_data/kc_house_data.csv' df = pd.read_csv(ruta_DB) area = df['sqft_living'] precio = df['price'] plt.figure (facecolor='gray' , figsize=(10,10)) plt.scatter(area,price) plt.xlabel('Area [ft²]',fontsize = 11) plt.ylabel('Precio [USD]',fontsize = 11) plt.title('Precio vs Area',fontsize = 15) plt.grid() a = np.array([8,6]) b = np.array([[9,6], [6,3]]) a@b@b@b ###Output _____no_output_____
sm-model-train-compile/5_neo_inference.ipynb
###Markdown Module 5. (Optional) Check DLR inference results in the cloud---**[Caution] In order to run this module successfully, the instance type of the SageMaker notebook instance must be `ml.m4.xlarge`.** ###Code %load_ext autoreload %autoreload 2 %store -r try: model_cloud_cpu_s3_path print("[OK] You can proceed.") except NameError: print("+"*60) print("[ERROR] Please run previous notebooks and before you continue.") print("+"*60) !pip install dlr==1.8.0 ###Output _____no_output_____ ###Markdown Extract Model Artifacts If you continue to do hands-on deploying Greengrass ML component in the Cloud9 environment, please take note of the output of the last part of `4.1_neo_compile.ipynb`. You must run the shell command below on Cloud9. ###Code %%bash -s {model_cloud_cpu_s3_path} cd /home/ec2-user/SageMaker/aiot-e2e-sagemaker-greengrass-v2-nvidia-jetson/sm-model-train-compile rm -rf model_cpu mkdir model_cpu && cd model_cpu aws s3 cp $1 . --recursive tar -xzvf model-ml_m4.tar.gz && rm model-ml_m4.tar.gz ###Output _____no_output_____ ###Markdown Prediction TestThe code below is the same code as `ggv2-deploy-on-device/artifacts/test_dlr.py`. ###Code import logging, sys import cv2 import glob import json import numpy as np import dlr from dlr import DLRModel def load_classes_dict(filename='classes_dict.json'): with open(filename, 'r') as fp: classes_dict = json.load(fp) classes_dict = {int(k):v for k,v in classes_dict.items()} return classes_dict def load_image(image_path): image_data = cv2.imread(image_path) image_data = cv2.cvtColor(image_data, cv2.COLOR_BGR2RGB) return image_data def preprocess_image(image, image_shape=(224,224)): cvimage = cv2.resize(image, image_shape) img = np.asarray(cvimage, dtype='float32') img /= 255.0 # scale 0 to 1 mean = np.array([0.485, 0.456, 0.406]) std = np.array([0.229, 0.224, 0.225]) img = (img - mean) / std img = np.transpose(img, (2,0,1)) img = np.expand_dims(img, axis=0) # e.g., [1x3x224x224] return img def softmax(x): x_exp = np.exp(x - np.max(x)) f_x = x_exp / np.sum(x_exp) return f_x device = 'cpu' model = DLRModel(f'model_{device}', device) sample_image_dir = 'sample_images' classes_dict = load_classes_dict('classes_dict.json') extensions = (f"{sample_image_dir}/*.jpg", f"{sample_image_dir}/*.jpeg") img_filelist = [f for f_ in [glob.glob(e) for e in extensions] for f in f_] print(img_filelist) for img_filepath in img_filelist[:-1]: ground_truth = img_filepath.split('/')[-1] img = load_image(img_filepath) img_data = preprocess_image(img) output = model.run(img_data) probs = softmax(output[0][0]) sort_classes_by_probs = np.argsort(probs)[::-1] idx = sort_classes_by_probs[0] print("+"*80) print(f'predicted = {classes_dict[idx]}, {probs[idx]*100:.2f}%') print(f'ground_truth = {ground_truth}') ###Output _____no_output_____
notebooks/stop-changes.ipynb
###Markdown OverviewThis Jupyter Notebook takes in data from a Google Sheet that contains stop change details and their associated high level categories and outputs a JSON file for each line to be used in the MyBus tool.The output file is used by the MyBus tool's results page and contains the Stop-level changes that are displayed there.Run all cells to output files in the format `-changes.json`As of 5/28/21, this should output data for 141 lines. ###Code import pandas as pd import numpy as np from zipfile import ZipFile DATA_INPUT_PATH = '../data/input/' DATA_OUTPUT_PATH = '../data/stop-changes/' SCRATCH_PATH = 'scratch/' LINES_PATH = '../data/lines.json' GOOGLE_SHEET_LINK = "https://docs.google.com/spreadsheets/d/e/2PACX-1vRg9oVntzWoLImtA3AFGTQYZoTa7JPMYG1TqjV__zIxpvaBhHG_ZWPyg_jaKt7TSVke2ZH3nEmGdzi3/pub?gid=1103678554&single=true&output=csv" ###Output _____no_output_____ ###Markdown Load GTFS DataUse `lines.json`, which was generated from the `line-dropdown` notebook.Extract `stops.zip` to the scratch folder. ###Code lines = pd.read_json(LINES_PATH) lines_array = lines.loc[:, 'route_number'].values lines.head() ###Output _____no_output_____ ###Markdown Extract Zipped StopsSince the stops.txt file is huge, we must zip it up and put it into the stops.zip so that we avoid checking in a big file into GitHub. ###Code with ZipFile('../data/input/stops.zip', 'r') as zf: zf.extractall(SCRATCH_PATH) STOPS_PATH = SCRATCH_PATH + 'stops.txt' stops = pd.read_csv(STOPS_PATH, usecols=['stop_id','stop_name'], dtype={'stop_id':'int','stop_name':'string'}) stops.head() ###Output _____no_output_____ ###Markdown Load Stop Changes DataData was compiled from spreadsheets and slides provided by Service Planners. Pull the data directly from the Google Sheet. ###Code # stop_changes = pd.read_csv(DATA_INPUT_PATH + 'stop_changes - ALL.csv', # usecols={'line', 'stop_id', 'service_canceled', 'service_changed', 'service_replaced', 'stop_canceled', 'stop_relocated', 'route_changed', 'owl_service_canceled'}) stop_changes = pd.read_csv(GOOGLE_SHEET_LINK, usecols={"line","direction","CHANGE","stop_id","ST_DIR","ON_STREET","AT_BET","AT_STREET","BETWEEN_ST","NEAR_FAR","stop_added","service_canceled","service_changed","service_replaced","stop_canceled","stop_relocated","route_changed","owl_service_added"}) stop_changes.head() # merge with stops.txt to get stop names #stop_changes.stop_id = stop_changes.stop_id.astype('int') stop_changes_with_names = pd.merge(stop_changes, stops, how='inner', on='stop_id') stop_changes_with_names = stop_changes_with_names[["line","direction","CHANGE","stop_id","ST_DIR","ON_STREET","AT_BET","AT_STREET","BETWEEN_ST","NEAR_FAR","stop_added","service_canceled","service_changed","service_replaced","stop_canceled","stop_relocated","route_changed","owl_service_added"]] stop_changes_with_names.to_csv(SCRATCH_PATH + 'stop_changes_with_names.csv') stop_changes_with_names.head() ###Output _____no_output_____ ###Markdown Analyze Data Stops in `stop_changes` not in GTFSOutputs results to file `stops-not-in-gtfs.json` in the `scratch` folder. ###Code stops_not_in_gtfs = stop_changes[~stop_changes.stop_id.isin(stops.stop_id)] stops_not_in_gtfs = stops_not_in_gtfs[['line', 'stop_id']] stops_not_in_gtfs = stops_not_in_gtfs.sort_values(['line', 'stop_id']) stops_not_in_gtfs.to_json(SCRATCH_PATH + 'stops-not-in-gtfs.json', orient='records') stops_not_in_gtfs.head(20) ###Output _____no_output_____ ###Markdown Duplicate `line` & `stop_id` combosOutputs results to file `stops-changes-duplicates.json` in the `scratch` folder. ###Code # Find duplicate line + stop_id combos # exclude non-existent stop_ids stop_changes_existing_stopids = stop_changes.loc[stop_changes['stop_id'] != 0] stop_changes_existing_stopids = stop_changes_existing_stopids.groupby(['line', 'stop_id', 'direction']).size().reset_index(name="count") stop_changes_existing_stopids = stop_changes_existing_stopids.loc[stop_changes_existing_stopids['count'] > 1] # Output file with the stops that have duplicate rows. filtered_combos = pd.merge(stop_changes_existing_stopids, stop_changes, how='inner', on=['line', 'stop_id', 'direction']).sort_values(by=['line', 'stop_id', 'direction']) filtered_combos.to_csv(SCRATCH_PATH + 'stop-changes-duplicates.csv') filtered_combos.head(20) ###Output _____no_output_____ ###Markdown Add Stops for Restored ServiceLines 110 and 550 have restored service. The stops do not exist in the June 2021 GTFS data. Reference GitHub [issue 62](https://github.com/LACMTA/mybus/issues/62). ###Code restored_stops = pd.read_csv('../data/input/restored_stops.csv', usecols={'line', 'stop_id','direction'}, dtype={'line':'int','stop_id':'string','direction':'string'}) restored_stops['service_canceled'] = False restored_stops['service_changed'] = False restored_stops['service_replaced'] = False restored_stops['stop_canceled'] = False restored_stops['stop_relocated'] = False restored_stops['route_changed'] = False restored_stops['owl_service_canceled'] = False restored_stops['replaced_by_micro'] = False restored_stops['service_restored'] = True stop_changes stop_changes = pd.concat([stop_changes,restored_stops]) stop_changes ###Output _____no_output_____ ###Markdown Output Stop Changes DataIterate through lines.json and output a file for each line. ###Code # Loop through line numbers and output a file for each line with all data for that line line_count = 1 stop_count = 0 for line in lines_array: subset = stop_changes[stop_changes.line == line] filename = DATA_OUTPUT_PATH + str(line) + '-changes.json' stop_count += len(subset) print(subset[['line', 'direction', 'stop_id']]) subset.to_json(filename, orient='records') print('********** ' + 'Line ' + filename + ' created (' + str(len(subset)) + ')') print('********** ' + str(stop_count) + ' total stops') print('********** ' + str(line_count) + ' total lines') line_count += 1 ###Output line direction stop_id 0 2 eastbound 8030 1 2 eastbound 8011 2 2 eastbound 6393 3 2 eastbound 8009 4 2 eastbound 2421 .. ... ... ... 113 2 westbound 4654 114 2 westbound 3357 115 2 westbound 3361 116 2 westbound 3358 117 2 westbound 3359 [118 rows x 3 columns] ********** Line ../data/stop-changes/2-changes.json created (118) ********** 118 total stops ********** 1 total lines line direction stop_id 118 4 eastbound 680 119 4 eastbound 12411 120 4 westbound 11900 121 4 westbound 9221 122 4 eastbound 3587 123 4 westbound 136 124 4 westbound 8097 ********** Line ../data/stop-changes/4-changes.json created (7) ********** 125 total stops ********** 2 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/10-changes.json created (0) ********** 125 total stops ********** 3 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/14-changes.json created (0) ********** 125 total stops ********** 4 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/16-changes.json created (0) ********** 125 total stops ********** 5 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/18-changes.json created (0) ********** 125 total stops ********** 6 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/20-changes.json created (0) ********** 125 total stops ********** 7 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/28-changes.json created (0) ********** 125 total stops ********** 8 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/30-changes.json created (0) ********** 125 total stops ********** 9 total lines line direction stop_id 125 33 eastbound 6974 126 33 eastbound 140717 127 33 eastbound 6924 128 33 eastbound 6934 129 33 eastbound 6929 130 33 eastbound 6945 131 33 eastbound 6972 132 33 westbound 15322 133 33 westbound 15274 134 33 westbound 15288 135 33 westbound 15264 136 33 westbound 15277 137 33 westbound 15323 ********** Line ../data/stop-changes/33-changes.json created (13) ********** 138 total stops ********** 10 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/35-changes.json created (0) ********** 138 total stops ********** 11 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/37-changes.json created (0) ********** 138 total stops ********** 12 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/38-changes.json created (0) ********** 138 total stops ********** 13 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/40-changes.json created (0) ********** 138 total stops ********** 14 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/45-changes.json created (0) ********** 138 total stops ********** 15 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/48-changes.json created (0) ********** 138 total stops ********** 16 total lines line direction stop_id 138 51 northbound 1854 139 51 northbound 140944 140 51 southbound 140944 141 51 northbound 15737 142 51 northbound 7403 143 51 northbound 15745 144 51 northbound 8644 145 51 northbound 7375 146 51 northbound 15765 147 51 northbound 15763 148 51 northbound 5702 149 51 northbound 8473 150 51 southbound 8473 151 51 southbound 14089 152 51 southbound 7409 153 51 southbound 7411 154 51 southbound 19129 155 51 southbound 46 156 51 southbound 7386 157 51 southbound 15758 158 51 southbound 7377 ********** Line ../data/stop-changes/51-changes.json created (21) ********** 159 total stops ********** 17 total lines line direction stop_id 159 53 northbound 5417 160 53 northbound 30007 161 53 northbound 4528 162 53 northbound 14171 163 53 northbound 6 164 53 northbound 9674 165 53 northbound 798 166 53 northbound 820 167 53 northbound 30007 168 53 southbound 3703 169 53 southbound 1505 170 53 southbound 9345 171 53 southbound 9669 172 53 southbound 8597 173 53 southbound 140758 174 53 southbound 30007 175 53 northbound 4528 176 53 southbound 140758 177 53 southbound 5423 178 53 southbound 30007 179 53 northbound 1136 180 53 northbound 1142 181 53 northbound 9667 182 53 northbound 8818 183 53 northbound 283 184 53 northbound 10052 185 53 southbound 1738 186 53 southbound 10051 187 53 southbound 8871 188 53 southbound 292 189 53 southbound 1134 190 53 southbound 9675 191 53 southbound 9671 192 53 southbound 1136 193 53 southbound 1142 194 53 southbound 9667 195 53 southbound 8818 ********** Line ../data/stop-changes/53-changes.json created (37) ********** 196 total stops ********** 18 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/55-changes.json created (0) ********** 196 total stops ********** 19 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/60-changes.json created (0) ********** 196 total stops ********** 20 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/62-changes.json created (0) ********** 196 total stops ********** 21 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/66-changes.json created (0) ********** 196 total stops ********** 22 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/70-changes.json created (0) ********** 196 total stops ********** 23 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/76-changes.json created (0) ********** 196 total stops ********** 24 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/78-changes.json created (0) ********** 196 total stops ********** 25 total lines line direction stop_id 196 79 eastbound 6952 197 79 eastbound 5372 198 79 eastbound 5388 199 79 eastbound 5370 200 79 eastbound 99 .. ... ... ... 338 79 westbound 13553 339 79 westbound 13549 340 79 westbound 7256 341 79 westbound 6535 342 79 westbound 16040 [147 rows x 3 columns] ********** Line ../data/stop-changes/79-changes.json created (147) ********** 343 total stops ********** 26 total lines line direction stop_id 343 81 northbound 5013 344 81 northbound 3527 345 81 southbound 12011 346 81 southbound 13450 ********** Line ../data/stop-changes/81-changes.json created (4) ********** 347 total stops ********** 27 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/90-changes.json created (0) ********** 347 total stops ********** 28 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/92-changes.json created (0) ********** 347 total stops ********** 29 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/94-changes.json created (0) ********** 347 total stops ********** 30 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/96-changes.json created (0) ********** 347 total stops ********** 31 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/102-changes.json created (0) ********** 347 total stops ********** 32 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/105-changes.json created (0) ********** 347 total stops ********** 33 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/106-changes.json created (0) ********** 347 total stops ********** 34 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/108-changes.json created (0) ********** 347 total stops ********** 35 total lines line direction stop_id 347 110 eastbound 5412 348 110 eastbound 5414 349 110 westbound 5416 350 110 eastbound 1447 351 110 eastbound 2101 352 110 eastbound 1446 353 110 eastbound 2068 354 110 eastbound 2069 355 110 eastbound 14511 356 110 eastbound 14510 357 110 eastbound 1999 358 110 westbound 10549 359 110 westbound 6130 360 110 westbound 6131 361 110 westbound 10620 362 110 westbound 10619 363 110 westbound 9977 364 110 westbound 10642 365 110 westbound 9978 0 110 Eastbound 65300037 1 110 Westbound 65300037 2 110 Eastbound 176153 3 110 Eastbound 2701 4 110 Eastbound 11194 5 110 Westbound 2702 6 110 Eastbound 2703 7 110 Westbound 11197 8 110 Eastbound 2704 9 110 Eastbound 140878 10 110 Westbound 20501 11 110 Eastbound 2705 12 110 Westbound 11195 13 110 Eastbound 2706 14 110 Westbound 16943 15 110 Westbound 14660 16 110 Westbound 12124 17 110 Westbound 8683 ********** Line ../data/stop-changes/110-changes.json created (37) ********** 384 total stops ********** 36 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/111-changes.json created (0) ********** 384 total stops ********** 37 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/115-changes.json created (0) ********** 384 total stops ********** 38 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/117-changes.json created (0) ********** 384 total stops ********** 39 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/120-changes.json created (0) ********** 384 total stops ********** 40 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/125-changes.json created (0) ********** 384 total stops ********** 41 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/127-changes.json created (0) ********** 384 total stops ********** 42 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/128-changes.json created (0) ********** 384 total stops ********** 43 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/130-changes.json created (0) ********** 384 total stops ********** 44 total lines line direction stop_id 366 150 westbound 5507 367 150 westbound 15444 ********** Line ../data/stop-changes/150-changes.json created (2) ********** 386 total stops ********** 45 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/152-changes.json created (0) ********** 386 total stops ********** 46 total lines line direction stop_id 368 154 eastbound 30001 369 154 eastbound 15548 370 154 eastbound 15544 371 154 eastbound 5373 372 154 eastbound 5294 373 154 eastbound 5369 374 154 westbound 5291 375 154 westbound 5295 376 154 westbound 5374 377 154 westbound 7196 378 154 westbound 7200 379 154 westbound 20002 380 154 eastbound 159 381 154 eastbound 11300 382 154 eastbound 11306 383 154 eastbound 11301 384 154 eastbound 4191 385 154 eastbound 4179 386 154 eastbound 4194 387 154 eastbound 4195 388 154 eastbound 4172 389 154 eastbound 4175 390 154 eastbound 3458 391 154 eastbound 3459 392 154 westbound 11949 393 154 westbound 11948 394 154 westbound 12655 395 154 westbound 12652 396 154 westbound 12670 397 154 westbound 12677 398 154 westbound 12659 399 154 westbound 12674 400 154 westbound 2812 401 154 westbound 2818 402 154 westbound 2811 403 154 westbound 159 ********** Line ../data/stop-changes/154-changes.json created (36) ********** 422 total stops ********** 47 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/155-changes.json created (0) ********** 422 total stops ********** 48 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/158-changes.json created (0) ********** 422 total stops ********** 49 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/161-changes.json created (0) ********** 422 total stops ********** 50 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/162-changes.json created (0) ********** 422 total stops ********** 51 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/164-changes.json created (0) ********** 422 total stops ********** 52 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/165-changes.json created (0) ********** 422 total stops ********** 53 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/166-changes.json created (0) ********** 422 total stops ********** 54 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/167-changes.json created (0) ********** 422 total stops ********** 55 total lines line direction stop_id 404 169 eastbound 5505 405 169 westbound 5507 406 169 westbound 5505 407 169 eastbound 15457 408 169 westbound 15457 ********** Line ../data/stop-changes/169-changes.json created (5) ********** 427 total stops ********** 56 total lines line direction stop_id 409 177 northbound 3464 410 177 northbound 3466 411 177 northbound 3470 412 177 northbound 5161 413 177 northbound 5168 414 177 southbound 5173 415 177 southbound 11972 416 177 southbound 11962 417 177 southbound 11958 418 177 southbound 10395 419 177 northbound 1705 420 177 northbound 9824 421 177 southbound 10260 422 177 southbound 10261 ********** Line ../data/stop-changes/177-changes.json created (14) ********** 441 total stops ********** 57 total lines line direction stop_id 507 180 eastbound 2474 508 180 eastbound 7088 509 180 eastbound 3050 510 180 eastbound 762 511 180 eastbound 763 512 180 eastbound 1343 513 180 westbound 9875 514 180 westbound 9317 515 180 westbound 9319 516 180 westbound 15433 517 180 westbound 11007 ********** Line ../data/stop-changes/180-changes.json created (11) ********** 452 total stops ********** 58 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/182-changes.json created (0) ********** 452 total stops ********** 59 total lines line direction stop_id 518 200 northbound 2459 519 200 northbound 11234 520 200 northbound 4997 521 200 northbound 4981 522 200 northbound 4980 .. ... ... ... 594 200 southbound 13437 595 200 southbound 2738 596 200 southbound 2459 597 200 southbound 7760 598 200 southbound 2738 [81 rows x 3 columns] ********** Line ../data/stop-changes/200-changes.json created (81) ********** 533 total stops ********** 60 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/202-changes.json created (0) ********** 533 total stops ********** 61 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/204-changes.json created (0) ********** 533 total stops ********** 62 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/205-changes.json created (0) ********** 533 total stops ********** 63 total lines line direction stop_id 599 206 northbound 2474 600 206 southbound 11007 ********** Line ../data/stop-changes/206-changes.json created (2) ********** 535 total stops ********** 64 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/207-changes.json created (0) ********** 535 total stops ********** 65 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/209-changes.json created (0) ********** 535 total stops ********** 66 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/210-changes.json created (0) ********** 535 total stops ********** 67 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/211-changes.json created (0) ********** 535 total stops ********** 68 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/212-changes.json created (0) ********** 535 total stops ********** 69 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/215-changes.json created (0) ********** 535 total stops ********** 70 total lines line direction stop_id 602 217 northbound 4968 603 217 northbound 4967 604 217 northbound 2474 605 217 southbound 11007 606 217 southbound 13410 607 217 southbound 13411 ********** Line ../data/stop-changes/217-changes.json created (6) ********** 541 total stops ********** 71 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/218-changes.json created (0) ********** 541 total stops ********** 72 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/222-changes.json created (0) ********** 541 total stops ********** 73 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/224-changes.json created (0) ********** 541 total stops ********** 74 total lines line direction stop_id 608 230 northbound 2525 609 230 northbound 2524 610 230 northbound 11066 611 230 northbound 11065 612 230 northbound 2542 613 230 northbound 2543 614 230 northbound 2550 615 230 northbound 3425 616 230 northbound 3439 617 230 northbound 3454 618 230 northbound 3327 619 230 northbound 4417 620 230 southbound 3327 621 230 southbound 3446 622 230 southbound 3443 623 230 southbound 3433 624 230 southbound 6123 625 230 southbound 20085 626 230 southbound 3468 627 230 southbound 2564 628 230 southbound 2528 629 230 southbound 2529 630 230 southbound 11063 631 230 southbound 11062 ********** Line ../data/stop-changes/230-changes.json created (24) ********** 565 total stops ********** 75 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/232-changes.json created (0) ********** 565 total stops ********** 76 total lines line direction stop_id 632 233 northbound 14439 633 233 southbound 5208 634 233 southbound 5175 635 233 northbound 2955 636 233 northbound 20004 637 233 southbound 8263 638 233 southbound 16556 639 233 southbound 3565 640 233 southbound 16771 641 233 southbound 6042 ********** Line ../data/stop-changes/233-changes.json created (10) ********** 575 total stops ********** 77 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/234-changes.json created (0) ********** 575 total stops ********** 78 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/235-changes.json created (0) ********** 575 total stops ********** 79 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/236-changes.json created (0) ********** 575 total stops ********** 80 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/237-changes.json created (0) ********** 575 total stops ********** 81 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/240-changes.json created (0) ********** 575 total stops ********** 82 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/242-changes.json created (0) ********** 575 total stops ********** 83 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/243-changes.json created (0) ********** 575 total stops ********** 84 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/244-changes.json created (0) ********** 575 total stops ********** 85 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/246-changes.json created (0) ********** 575 total stops ********** 86 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/251-changes.json created (0) ********** 575 total stops ********** 87 total lines line direction stop_id 642 256 southbound 5160 643 256 northbound 9826 644 256 northbound 9853 645 256 northbound 1594 646 256 northbound 9856 647 256 northbound 9857 648 256 northbound 9858 649 256 northbound 9871 650 256 northbound 9877 651 256 northbound 9887 652 256 northbound 9923 653 256 northbound 9944 654 256 northbound 9972 655 256 northbound 9999 656 256 northbound 10000 657 256 northbound 10053 658 256 northbound 10055 659 256 northbound 10057 660 256 northbound 10059 661 256 northbound 10061 662 256 northbound 10092 663 256 northbound 10099 664 256 northbound 3451 665 256 northbound 3455 666 256 northbound 10105 667 256 southbound 11946 668 256 southbound 3452 669 256 southbound 11292 670 256 southbound 11297 671 256 southbound 11302 672 256 southbound 11311 673 256 southbound 11325 674 256 southbound 11222 675 256 southbound 11327 676 256 southbound 13000087 677 256 southbound 13000088 678 256 southbound 13000089 679 256 southbound 13000091 680 256 southbound 13000092 681 256 southbound 13000093 682 256 southbound 13000094 683 256 southbound 13000095 684 256 southbound 13000096 685 256 southbound 13000097 686 256 southbound 13000098 687 256 southbound 13000099 688 256 southbound 13000100 689 256 southbound 9826 ********** Line ../data/stop-changes/256-changes.json created (48) ********** 623 total stops ********** 88 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/258-changes.json created (0) ********** 623 total stops ********** 89 total lines line direction stop_id 690 260 northbound 1705 691 260 northbound 1866 692 260 southbound 1866 693 260 southbound 10395 694 260 northbound 3467 695 260 northbound 3466 696 260 northbound 3470 697 260 northbound 3482 698 260 northbound 18015 699 260 northbound 1851 700 260 northbound 3475 701 260 northbound 1849 702 260 northbound 3485 703 260 northbound 3481 704 260 northbound 1487 705 260 northbound 3483 706 260 northbound 3474 707 260 northbound 3469 708 260 northbound 7502 709 260 northbound 3476 710 260 northbound 1846 711 260 northbound 3477 712 260 northbound 11968 713 260 northbound 11963 714 260 northbound 3480 715 260 northbound 11973 716 260 northbound 954 717 260 northbound 11957 718 260 southbound 954 719 260 southbound 11979 720 260 southbound 11971 721 260 southbound 11975 722 260 southbound 10402 723 260 southbound 11967 724 260 southbound 3488 725 260 southbound 18016 726 260 southbound 11972 727 260 southbound 11962 728 260 southbound 11958 ********** Line ../data/stop-changes/260-changes.json created (39) ********** 662 total stops ********** 90 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/265-changes.json created (0) ********** 662 total stops ********** 91 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/266-changes.json created (0) ********** 662 total stops ********** 92 total lines line direction stop_id 729 267 northbound 4673 730 267 northbound 11957 731 267 southbound 11957 732 267 northbound 4959 733 267 northbound 501 ********** Line ../data/stop-changes/267-changes.json created (5) ********** 667 total stops ********** 93 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/268-changes.json created (0) ********** 667 total stops ********** 94 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/287-changes.json created (0) ********** 667 total stops ********** 95 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/294-changes.json created (0) ********** 667 total stops ********** 96 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/344-changes.json created (0) ********** 667 total stops ********** 97 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/460-changes.json created (0) ********** 667 total stops ********** 98 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/487-changes.json created (0) ********** 667 total stops ********** 99 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/489-changes.json created (0) ********** 667 total stops ********** 100 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/501-changes.json created (0) ********** 667 total stops ********** 101 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/534-changes.json created (0) ********** 667 total stops ********** 102 total lines line direction stop_id 18 550 Northbound 30005 19 550 Northbound 2321 20 550 Northbound 10855 21 550 Northbound 2320 22 550 Northbound 2322 23 550 Northbound 7262 24 550 Northbound 1886 25 550 Northbound 10378 26 550 Northbound 10390 27 550 Northbound 5635 28 550 Northbound 5656 29 550 Northbound 7766 30 550 Northbound 7752 31 550 Northbound 2457 32 550 Southbound 2457 33 550 Southbound 2458 34 550 Southbound 140951 35 550 Southbound 14039 36 550 Southbound 1841 37 550 Southbound 12002 38 550 Southbound 1830 39 550 Southbound 10437 40 550 Southbound 13424 41 550 Southbound 15616 42 550 Southbound 10994 43 550 Southbound 10853 44 550 Southbound 2324 45 550 Southbound 10846 ********** Line ../data/stop-changes/550-changes.json created (28) ********** 695 total stops ********** 103 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/577-changes.json created (0) ********** 695 total stops ********** 104 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/601-changes.json created (0) ********** 695 total stops ********** 105 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/602-changes.json created (0) ********** 695 total stops ********** 106 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/603-changes.json created (0) ********** 695 total stops ********** 107 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/605-changes.json created (0) ********** 695 total stops ********** 108 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/611-changes.json created (0) ********** 695 total stops ********** 109 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/617-changes.json created (0) ********** 695 total stops ********** 110 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/662-changes.json created (0) ********** 695 total stops ********** 111 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/665-changes.json created (0) ********** 695 total stops ********** 112 total lines line direction stop_id 803 686 northbound 1343 804 686 southbound 9875 ********** Line ../data/stop-changes/686-changes.json created (2) ********** 697 total stops ********** 113 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/690-changes.json created (0) ********** 697 total stops ********** 114 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/690-changes.json created (0) ********** 697 total stops ********** 115 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/720-changes.json created (0) ********** 697 total stops ********** 116 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/754-changes.json created (0) ********** 697 total stops ********** 117 total lines line direction stop_id 805 761 northbound 14439 806 761 southbound 5208 807 761 southbound 5175 808 761 northbound 2955 809 761 northbound 20004 810 761 southbound 8263 811 761 southbound 16556 812 761 southbound 3565 813 761 southbound 16771 814 761 southbound 6042 ********** Line ../data/stop-changes/761-changes.json created (10) ********** 707 total stops ********** 118 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/854-changes.json created (0) ********** 707 total stops ********** 119 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/901-changes.json created (0) ********** 707 total stops ********** 120 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/910-changes.json created (0) ********** 707 total stops ********** 121 total lines Empty DataFrame Columns: [line, direction, stop_id] Index: [] ********** Line ../data/stop-changes/950-changes.json created (0) ********** 707 total stops ********** 122 total lines
Machine-Learning/Linear-Regression/Linear Regression with Python (USA_Housing case).ipynb
###Markdown Linear Regression with Pythonuse USA_Housing.csv.The data contains the following columns:* 'Avg. Area Income': Avg. Income of residents of the city house is located in.* 'Avg. Area House Age': Avg Age of Houses in same city* 'Avg. Area Number of Rooms': Avg Number of Rooms for Houses in same city* 'Avg. Area Number of Bedrooms': Avg Number of Bedrooms for Houses in same city* 'Area Population': Population of city house is located in* 'Price': Price that the house sold at* 'Address': Address for the house **Let's get started!** Check out the datalet's get our environment ready with the libraries we'll need and then import the data! Import Libraries ###Code import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline ###Output _____no_output_____ ###Markdown Check out the Data ###Code USAhousing = pd.read_csv('USA_Housing.csv') USAhousing.head() USAhousing.info() USAhousing.describe() USAhousing.columns ###Output _____no_output_____ ###Markdown EDALet's create some simple plots to check out the data! ###Code sns.pairplot(USAhousing) sns.distplot(USAhousing['Price']) sns.heatmap(USAhousing.corr()) ###Output _____no_output_____ ###Markdown Training a Linear Regression ModelLet's now begin to train out regression model! We will need to first split up our data into an X array that contains the features to train on, and a y array with the target variable, in this case the Price column. We will toss out the Address column because it only has text info that the linear regression model can't use. X and y arrays ###Code X = USAhousing[['Avg. Area Income', 'Avg. Area House Age', 'Avg. Area Number of Rooms', 'Avg. Area Number of Bedrooms', 'Area Population']] y = USAhousing['Price'] ###Output _____no_output_____ ###Markdown Train Test SplitNow let's split the data into a training set and a testing set. We will train out model on the training set and then use the test set to evaluate the model. ###Code from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=101) ###Output _____no_output_____ ###Markdown Creating and Training the Model ###Code from sklearn.linear_model import LinearRegression lm = LinearRegression() lm.fit(X_train,y_train) ###Output _____no_output_____ ###Markdown Model EvaluationLet's evaluate the model by checking out it's coefficients and how we can interpret them. ###Code # print the intercept print(lm.intercept_) coeff_df = pd.DataFrame(lm.coef_,X.columns,columns=['Coefficient']) coeff_df ###Output _____no_output_____ ###Markdown Interpreting the coefficients:- Holding all other features fixed, a 1 unit increase in **Avg. Area Income** is associated with an **increase of \$21.52 **.- Holding all other features fixed, a 1 unit increase in **Avg. Area House Age** is associated with an **increase of \$164883.28 **.- Holding all other features fixed, a 1 unit increase in **Avg. Area Number of Rooms** is associated with an **increase of \$122368.67 **.- Holding all other features fixed, a 1 unit increase in **Avg. Area Number of Bedrooms** is associated with an **increase of \$2233.80 **.- Holding all other features fixed, a 1 unit increase in **Area Population** is associated with an **increase of \$15.15 **.Does this make sense? Probably not because I made up this data. If you want real data to repeat this sort of analysis, check out the [boston dataset](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_boston.html): from sklearn.datasets import load_boston boston = load_boston() print(boston.DESCR) boston_df = boston.data Predictions from our ModelLet's grab predictions off our test set and see how well it did! ###Code predictions = lm.predict(X_test) plt.scatter(y_test,predictions) ###Output _____no_output_____ ###Markdown **Residual Histogram** ###Code sns.distplot((y_test-predictions),bins=50); ###Output _____no_output_____ ###Markdown Regression Evaluation MetricsHere are three common evaluation metrics for regression problems:**Mean Absolute Error** (MAE) is the mean of the absolute value of the errors:$$\frac 1n\sum_{i=1}^n|y_i-\hat{y}_i|$$**Mean Squared Error** (MSE) is the mean of the squared errors:$$\frac 1n\sum_{i=1}^n(y_i-\hat{y}_i)^2$$**Root Mean Squared Error** (RMSE) is the square root of the mean of the squared errors:$$\sqrt{\frac 1n\sum_{i=1}^n(y_i-\hat{y}_i)^2}$$Comparing these metrics:- **MAE** is the easiest to understand, because it's the average error.- **MSE** is more popular than MAE, because MSE "punishes" larger errors, which tends to be useful in the real world.- **RMSE** is even more popular than MSE, because RMSE is interpretable in the "y" units.All of these are **loss functions**, because we want to minimize them. ###Code from sklearn import metrics print('MAE:', metrics.mean_absolute_error(y_test, predictions)) print('MSE:', metrics.mean_squared_error(y_test, predictions)) print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, predictions))) ###Output _____no_output_____
Week 07/SLU12 - Linear Algebra & Numpy - Part 1/Learning Notebook 1 - Vectors and NumPy.ipynb
###Markdown SLU12 - Linear Algebra & NumPy, Part 1 Learning Notebook 1/2 - vectors *Here we start with the most important concept in Linear Algebra, the vector, and learn about the NumPy library. In Learning Notebook 2/2 we extend the concept of a vector and all its properties to matrices.* --- **What's in this notebook**1. [Vectors](1.-Vectors) 1.1 [Vector definition](1.1-Vector-definition) - vector definition, representations and __the transpose__ of a vector 1.2 [Vector norm](1.2-Vector-norm) 1.3 [Vector operations: multiplication by scalar and addition](1.3-Vector-operations:-multiplication-by-scalar-and-addition) 1.4 [Linear combinations and linear independence](1.4-Linear-combinations-and-linear-independence) 1.5 [Representing all vectors in space](1.5-Representing-all-vectors-in-space) 1.6 [Dot product](1.6-Dot-product) 1.7 [Orthogonal vectors](1.7-Orthogonal-vectors) 1.8 [Vectors recap](1.8-Vectors-recap) 2. [Introduction to NumPy arrays](2.-Introduction-to-NumPy-arrays) 2.1 [The NumPy package](2.1-The-NumPy-package) 2.2 [The `ndarray`](2.2-The-ndarray) - the numpy array, dimensions and shape of an array 2.3 [Vectors and linear algebra using NumPy](2.3-Vectors-and-linear-algebra-using-NumPy) - basic linear algebra operations on vectors --- Imports ###Code # numpy is the package we're going to learn about # it is a widespread convention to import numpy using the alias np # this convention makes your code more readable, so do use it import numpy as np # auxiliary stuff import utils ###Output _____no_output_____ ###Markdown --- Quick disclaimer> By now you probably know a lot about programming in Python, so you deserve to take a few moments to feel proud of yourself. Do it! :)> > ...> > Feeling proud? Awesome.> > > Now, to actually become a real data professional, you need Mathematics. There is no possible scenario in which you become a good, responsible data scientist without a good foundation of Linear Algebra, Calculus and Statistics.> >> This is, of course, if you want to avoid joining the uncool, damaging side of data science. The side that fails to measure bias in the data, propagates wrongful conclusions and misuses black-box algorithms.Disclaimer done. Ready? Let's do this!!--- Linear Algebra in SLU12 and SLU13By the end of SLUs 12 and 13, you'll be familiar with all the linear algebra you need to read the matrix form solution to the *multiple linear regression algorithm*, the most popular starting point for machine learning students: 🙀$$ \mathbf{\beta} = (X^TX)^{-1}(X^T\mathbf{y})$$Advices to navigate these SLUs:**1 - Don't rush through the materials**: linear algebra takes time, so go slowly. It's ok if one week is not enough. It will depend a lot on each person's background and familiarity with the subject**2 - Keep to the order of the sections and don't skip sections**: the sections were carefully put together in a **sequential** manner so if you skip sections you will lose important info that will be necessary later on**3 - Grab a paper and solve the (optional) *pen and paper exercises*** that appear throughout the learning notebooks. They help you stop and exercise what you have just learned as you go through the materials**4 - Reach out on Slack** if you get stuck on a concept or exercise. I definitely recommend you try to solve it on your own for a while before doing so though, as this helps with the learning process. If you can't really get there, **then** reach out. --- 0. Intro What is Linear Algebra?The formal definition of Linear algebra as per [Wikipedia](https://en.wikipedia.org/wiki/Linear_algebra) is that it is the branch of Mathematics concerning linear equations and functions, and how we represent them in vector spaces and matrices.A quite simplistic, yet more understandable, way of definining it would be that [linear algebra is about](https://machinelearningmastery.com/gentle-introduction-linear-algebra/) using "arithmetic on columns of numbers called __vectors__ and arrays of numbers called __matrices__, to create new columns and arrays of numbers." --- 1. Vectors Vectors in 1-Dimensional (1D) spaces Consider the kitten below, walking timidly at a speed of $0.1$ m/s in order to reach his food bowl:The __velocity__ of the kitten is a quantity **defined by both a magnitude and a direction**, which we call the velocity **vector** of the kitten, $\mathbf{v} = [0.1]$ m/s:- The value $0.1$ corresponds to a **magnitude** which tells us how fast the kitten is walking;- We know the kitten is walking in a __straight line__ to the food bowl, so we can define his **direction**.We could draw this 1D (1-dimensional) vector in a 1D [coordinate system](https://en.wikipedia.org/wiki/Coordinate_system), as follows:---Our kitten has now eaten all his food, and he's staring at you, asking for a refill:As he stares at you, not making a move, we can represent his zero velocity by the zero vector $\mathbf{v} = [0]$ m/s. --- Vectors in 2-Dimensional (2D) spaces A **2-dimensional vector** belongs to the 2-dimensional real coordinate space, $\mathbb{R}^2$, and we can plot it using a Cartesian coordinate system.Notice that the vector $[-1, 2]$ has its tail located at the origin of the x-y plane, $(0,0)$, and its tip (head) located at the point $(-1, 2)$. To go from tail to tip, we walk 1 step leftwards (x-coordinate = -1) and 2 steps upwards (y-coordinate = 2).> 📌 In linear algebra it is standard to root the vector at the origin of the coordinate system. We can draw the 2D-vector of a cyclist's velocity climbing a hill at 12 km/h, $\mathbf{v}$, on the xy-plane, where y is perpendicular to the centre of the Earth: > 📝 **Pen and paper exercise 1**: Grab a pen (actually pencil+rubber might be better) and a piece of paper and draw the vectors $[-1, 2]$, $[2, -1]$ and $[1, 2]$ on the xy-plane. Notice that they all have the same *magnitude* (length) but different *direction*s. Vectors in 3-Dimensional (3D) spaces--- A **3-dimensional vector** belongs to the 3-dimensional real coordinate space, $\mathbb{R}^3$. We can draw it on the xyz coordinate system, using the same logic as for the xy-plane.For example suppose you have a 3D vector given by $[2,4,3]$. To get from the tail to the tip of this vector, you would: - For the x coordinate, walk 2 steps parallelly to the x axis,- For the 2nd component (y coordinate), walk 4 steps parallelly to the y axis,- For the 3rd element (z coordinate), walk 3 steps parallelly to the z axisYou can play with your own 3-D vectors in this applet: https://www.intmath.com/vectors/3d-space-interactive-applet.php. --- ...So what about a **4-dimensional vector**?The human brain is not able to *visualize* more than 3 dimensions, although it's possible to overcome this limitation with some brilliant [interactive visualizations](https://ciechanow.ski/tesseract/) or [Carl Sagan](https://vimeo.com/199561184)....However, it doesn't really matter whether or not you can imagine vectors in 4D. In linear algebra you can extend properties of vectors and matrices, and the operations between them, to **any number of dimensions**. Vectors in $n$-dimensional spaces--- Although it is an abstract concept, we can intuitively imagine all vectors with 2 dimensions as living in the 2D space. If we extend this to sets of all vectors in 3D, 4D, etc... we can define this concept in a more general manner:> **Real coordinate space**> > An $m$-dimensional vector belongs to a [real coordinate space](https://en.wikipedia.org/wiki/Real_coordinate_space) of $m$ dimensions, denoted by $\mathbb{R}^m$, where we have the set of all different $m$-dimensional vectors. 1.1 Vector definition--- What is a vector? An $m$-dimensional vector $\mathbf{x}$ is an ordered list of $m$ scalars represented as $\mathbf{x} = \left[x_1, x_2,..., x_m\right]$, $x_i \in \mathbb{R}$. It has a magnitude and a direction.> **Mathematical Notation explained**> > - $x_i \in \mathbb{R}$ means that each scalar $x_i$ in the vector belongs ($\in$) to the set of all real numbers ($\mathbb{R}$)> - $m$ belongs to the set of all positive integer numbers, $m \in \mathbb{Z}^+$> - when describing vectors we usually use square brackets `[]` and **not** round brackets `()`, although these might be used somewhere else.> - Vectors are usually represented by bold lowercase letters and scalars by a non-bold lowercase letter. However, you might find different notations (e.g. arrow over lowercase letter, $\overrightarrow{v}$, or arrow and uppercase such as the [force vector](https://en.wikipedia.org/wiki/Force) $\overrightarrow{F}$) Vectors can be defined **either** by their **magnitude and direction (geometrically) or** as a **list of numbers (numerically)**: > - **Geometrically** speaking, a vector can be visually represented by an arrow pointing in space, with a given *magnitude* (length), and a *direction*, describing where the arrow points to. > - **Numerically** speaking, you can think of the same vector as an ordered list of scalars (real numbers). In Math everything has a clear definition, even the concept of equality. So the **Equality of vectors** is described as:> * $\mathbf{u}$ and $\mathbf{v}$ are equal if they have the same magnitude and direction, **which implies** that the ordered lists which represent them are equal, **element-wise**. Lastly, there are several ways in which we can represent vectors. We can represent **the same** vector in several ways. For example, we can represent a given *4-dimensional vector* as:* an ordered list, $\left[0,\; -1,\; 2.6,\; \sqrt{3}\right]$, * a **row vector**,$ \begin{bmatrix} 0 & -1 & 2.6 & \sqrt{3}\\ \end{bmatrix}$, * or its **transpose**, a **column vector**,$ \begin{bmatrix} 0 & -1 & 2.6 & \sqrt{3}\\ \end{bmatrix}^T = \begin{bmatrix} 0 \\ -1 \\ 2.6 \\ \sqrt{3} \\ \end{bmatrix}$.The relevance of the type of representation we use will become evident when we introduce matrices. For now, just know that the row representation of a vector is called the **transpose** of its column representation, and vice versa. 1.2 Vector norm--- **Definition** The norm of an $m$-dimensional vector $\mathbf{x} = \left[x_1, x_2, ..., x_m\right]$, $x_i\in \mathbb{R}$, also known as the magnitude or length, is defined as $\|\mathbf{x}\| = \sqrt{x_1^2 + x_2^2 + ... + x_m^2}$. **Explanation** The **magnitude** of a 2-dimensional vector, also called the **norm** or the **length**, can be determined by the [Pythagorean theorem](https://en.wikipedia.org/wiki/Pythagorean_theorem), which says that "In a right angled triangle, the square of the hypotenuse is equal to the sum of the squares of the other two sides".On the xy-plane below, the dashed lines represent the two other sides of a right angled triangle, and the hypothenuse corresponds to the length of the vector:We can represent the vector on the image by an ordered list: $\mathbf{a} = [a_1, a_2]$, with components $a_1 = -1$ ($x$ coordinate) and $a_2 = 2$ ($y$ coordinate).Let's use the Pythagorean theorem to **find the norm of $\mathbf{a}$**, $\| a\|$:$$\| a\|^2 = a_1^2 + a_2^2$$$$\| a\| = \sqrt{a_1^2 + a_2^2} = \sqrt{(-1)^2 + (2)^2} = \sqrt{5} $$You can actually use this formula with any $m$-dimensional vector. (*It's a kind of magic...* 🎵) > 📝 **Pen and paper exercise 2**: Calculate the norm of the vectors $[-1, 2]$, $[2, -1]$ and $[1, 2]$, which you've drawn in the last exercise. You should find that they all have the same norm. Find one more vector with the same norm as them. >> **Notice that** there is an infinite number of 2-D vectors with the same norm (length). --- Ever asked yourself...[*bad pun source*](https://math.stackexchange.com/questions/62789/what-does-linear-mean-in-linear-algebracomment146861_62789)Now to the serious answer: linear algebra is "linear" because it's about linear transformations.You might remember learning about $y = mx$, the equation for the line passing through the origin $(0,0)$, in math class: ###Code # run this cell and check the plots below utils.plot_school_functions() ###Output _____no_output_____ ###Markdown All transformations we do on our data are based on linear relations, just like the line on the **first** plot. We don't need to worry about polynomial functions, exponentials, or other evil sorceries. Only simple, beautiful, linear magic. 😍😃Simple, yet powerful. --- 1.3 Vector operations: multiplication by scalar and addition--- 1.3.1 Multiplying a vector by a scalar What happens with a vector if we multiply it by a scalar?Consider the vector $\mathbf{u}=\begin{bmatrix}1\\2\\\end{bmatrix}$. On the image below you can see several vectors that result from multiplying the vector $\mathbf{u}$ by different scalars. **Multiplying $\mathbf{u}$ by -1:** $\hspace{5cm}\mathbf{v}=-1\cdot \mathbf{u}=-1\cdot\begin{bmatrix}1\\2\\\end{bmatrix}$ $=\begin{bmatrix}-1\times 1\\-1\times 2\\\end{bmatrix}=$ $\begin{bmatrix}-1\\-2\\\end{bmatrix}$Multiplying a 2D vector by $-1$ causes it to rotate $180^{\circ}$ ($\pi$ radians) around the origin. Its *magnitude* (norm) remains the same, but the *direction* changes. **Multiplying $\mathbf{u}$ by 0:**$\hspace{5cm} 0 \cdot \mathbf{u}=0\cdot\begin{bmatrix}1\\2\\\end{bmatrix} = $ $\begin{bmatrix}0\\0\\\end{bmatrix}$Multiplying any vector by $0$ results in a vector with the same dimension, where all components are zero (the zero vector). **Multiplying $\mathbf{u}$ by 2**$:\hspace{5cm} \mathbf{w}=2\cdot \mathbf{u}=2\cdot\begin{bmatrix}1\\2\\\end{bmatrix}$ $= \begin{bmatrix}2\\4\\\end{bmatrix}$Multiplying a vector by a positive scalar increases its *magnitude* but does not affect its *direction*. **Multiplying $\mathbf{u}$ by 1:**$\hspace{5cm} 1\cdot\mathbf{u}=1\cdot\begin{bmatrix}1\\2\\\end{bmatrix} = $ $\begin{bmatrix}1\times 1\\1\times 2\\\end{bmatrix} = \begin{bmatrix}1\\2\\\end{bmatrix} = \mathbf{u}$Multiplying any vector by the scalar 1 does not change the vector (**identity property**). > Note that the dot symbol $\cdot $ in the expressions above denotes multiplication, however as we'll see in a few sections, when it is written **between two vectors** it means **dot product** instead. > 📝 **Pen and paper exercise 3**: Multiply the vector $\mathbf{u}$ by the scalar -2. What happens to its *magnitude* and *direction*? --- 1.3.2 Addition and subtraction To add two $m$-dimensional vectors, we simply add the corresponding components from each vector.For example, we can add vectors $\mathbf{u} = \begin{bmatrix} 1\\ 2\\\end{bmatrix}$ and $\mathbf{v} = \begin{bmatrix} 3\\ 1\\\end{bmatrix}$ as follows: $\hspace{.2cm} \mathbf{w} = \mathbf{u} + \mathbf{v} = \begin{bmatrix} 1\\ 2\\\end{bmatrix} + $ $\begin{bmatrix} 3\\ 1\\\end{bmatrix} = $ $\begin{bmatrix} 1 + 3\\ 2 + 1\\\end{bmatrix} = $ $\begin{bmatrix} 4\\ 3\\\end{bmatrix}$**Geometrical visualization:**The vectors $\mathbf{u}$, $\mathbf{v}$ and $\mathbf{w}$ are plotted in the image above. Notice how we place the tail (origin) of $\mathbf{b}$, which is equivalent to the vector $\mathbf{v}$ (same magnitude and direction), to the tip of vector $\mathbf{u}$. Hence we walk 3 steps rightwards (x-coordinate of $\mathbf{v}$ = 3) and 1 step upwards (y-coordinate of $\mathbf{v}$ = 1), getting to the tip of $\mathbf{w}$, the result of adding $\mathbf{u}$ and $\mathbf{v}$.**Vector addition is commutative!**Notice that we could also start at vector $\mathbf{v}$, add vector $\mathbf{a}$ (the equivalent of vector $\mathbf{u}$), and we would still get the vector $\mathbf{w}$ as a result. This means that the **addition between vectors is commutative**. The same applies to any two or more $m$-dimensional vectors added together. > ❗ You **cannot** add vectors with different dimensions!!> > For example, if you tried to add $[1, 2]$ with $[1, 2, 3]$, you would have no corresponding component on the 1st vector to add to the 3rd component of the second vector. > 📝 **Pen and paper exercise 4**: Draw the vector $\mathbf{x} = \mathbf{u} - \mathbf{v}$ on the xy-plane:> - Multiply the vector $\mathbf{v}$ by the scalar $-1$ and draw the resulting vector, $\mathbf{-v}$;> - Add $\mathbf{-v}$ to vector $\mathbf{u}$; **Properties of vector addition and scalar multiplication (for any $m$-dimensional vectors):**$\;\;\text{1. }\;\; \mathbf{u} + \mathbf{v} = \mathbf{v} + \mathbf{u}$$\;\;\text{2. }\;\; \mathbf{u} + \mathbf{0} = \mathbf{u}$$\;\;\text{3. }\;\; c\left(\mathbf{u} + \mathbf{v}\right) = c\mathbf{u} + c\mathbf{v},\hspace{.2cm} c\in \mathbb{R}$$\;\;\text{4. }\;\; \left(cd\right)\mathbf{u} = c\left(d\mathbf{u}\right),\hspace{.2cm} c,d \in \mathbb{R}$$\;\;\text{5. }\;\; \mathbf{u} + (\mathbf{v} + \mathbf{w}) = (\mathbf{u} + \mathbf{v}) + \mathbf{w}$$\;\;\text{6. }\;\; \mathbf{u} + (-\mathbf{u}) = \mathbf{0}$$\;\;\text{7. }\;\; (c + d) \mathbf{u} = c \mathbf{u} + d \mathbf{u}$$\;\;\text{8. }\;\; 1\mathbf{u} = \mathbf{u}$ > 📝 **Pen and paper exercise 5 (this one is for the skeptical minds)**: Choose 2 of the properties above and check their veracity.>> You can use, for example, the vectors $\mathbf{u} = \begin{bmatrix}1\\ 2\end{bmatrix},\;\;$> $\mathbf{v} = \begin{bmatrix}2\\ 4\end{bmatrix},\;\;$> $\mathbf{w} = \begin{bmatrix}0\\ -1\end{bmatrix}\;\;$> and the scalars (real numbers) $c=-0.2$ and $d=\frac{1}{4}$. If you "find out" that any of the properties is wrong, I'm sorry but you probably made some error on the arithmetics along the way.Trust me, **Linear Algebra will never fail you.** --- --- 1.4 Linear combinations and linear independence--- > 📌 The concept of linear (in)dependence is extremely important in data science! 1.4.1 Linear combination Every time we scale vectors and add them together, we're performing a **linear combination**. This is what it looks like for 2 $m$-dimensional vectors:$$c_1 \cdot \begin{bmatrix} u_1\\ u_2\\ ... \\ u_m\\\end{bmatrix}+c_2\cdot\begin{bmatrix}v_1\\v_2\\\dots\\v_m\\\end{bmatrix}=\begin{bmatrix}c_1\cdot u_1 + c_2\cdot v_1\\ c_1\cdot u_2 + c_2\cdot v_2\\ ... \\ c_1\cdot u_m + c_2\cdot v_m\\\end{bmatrix},\hspace{.2cm} c_i\in \mathbb{R}$$In the *pen and paper* exercise **4**, you basically performed a linear combination between vectors using scalars $1$ and $-1$.---We can have a linear combination of $n$ vectors, as follows:$$c_1\cdot \mathbf{x_1} + c_2\cdot \mathbf{x_2} + ... + c_n\cdot \mathbf{x_n},\hspace{.2cm} c_i\in \mathbb{R},\hspace{.2cm} \mathbf{x_i}\in\mathbb{R}^m$$Note that $\mathbf{x_1}, \mathbf{x_2},..., \mathbf{x_n}$ are **not vector components but actual vectors** (bold lowercase letter). --- ExampleFor $\mathbf{v_1} = \begin{bmatrix}1\\-1\end{bmatrix}$, $\mathbf{v_2} = \begin{bmatrix}2\\2\end{bmatrix}$ and $\mathbf{w} = \begin{bmatrix}4\\0\end{bmatrix}$, we have $w = 2\cdot \mathbf{v_1} + 1\cdot\mathbf{v_2}$. Thus $\mathbf{w}$ can be written as a linear combination of $\mathbf{v_1}$ and $\mathbf{v_2}$. --- 1.4.2 Linear independence **Definition**If we have two vectors $\mathbf{a}$ and $\mathbf{b}$, and $\mathbf{b}$ can be expressed as $c\cdot \mathbf{a}, c\in \mathbb{R}$, we say that $\mathbf{a}$ and $\mathbf{b}$ are **linearly dependent**, or collinear. If either one cannot be expressed as a linear combination of the other, then we say they are **linearly independent**, or non-collinear. **Explanation**For example, $[2, 1]$ and $[4, 2]$ are linearly dependent. You can find a constant that multiplied by one will give you the other. For example, $[2, 1]\times 2 = [4, 2]$ or $[4, 2]\times \frac{1}{2} = [2, 1]$The vectors $[2, 1]$ and $[0, 1]$, on the other hand, are linearly independent. As much as you may try, you can never find a constant that multiplied by one of them will yield the other. --- Generally, a set of vectors is said to be [linearly dependent](https://en.wikipedia.org/wiki/Linear_independence) **if at least one of the vectors in the set** can be defined as a **linear combination** of the others. --- 1.5 Representing all vectors in space--- In the image above we can see the resulting vectors of 4 distinct linear combinations of vectors $\mathbf{u}=[1,2]$ and $\mathbf{v}=[3,1]$, namely:- $1\cdot \mathbf{u} + 1\cdot \mathbf{v}$- $2\cdot \mathbf{u} + 1\cdot \mathbf{v}$- $-1\cdot \mathbf{u} + 1\cdot \mathbf{v}$- $1\cdot \mathbf{u} + (-1)\cdot \mathbf{v}$❗ If you had the time (*infinite time*) to plot all possible linear combinations of vectors $\mathbf{u}$ and $\mathbf{v}$, you would fill the entire xy-plane, and get **all 2-dimensional vectors, this is, all vectors in $\mathbb{R}^2$**. **But only because** you used **2** vectors in **2-D** that are **linearly independent**. 💡We could **NOT** create the set of all the 2-dimensional vectors if our vectors were **linearly Dependent**. --- 🤨 Skeptics corner Feeling skeptic about what you just read?If this doesn't yet make sense to you, write down some linear combinations, i.e. ($c\mathbf{u} + d\mathbf{v}$), for the collinear vectors $\mathbf{u}=[1,2]$ and $\mathbf{v} = [2,4]$, varying scalars $c$ and $d$ as much as you want.As you do it, are you're getting stuck on the same line?... 😏[See who got stuck on the line too](https://www.theguardian.com/politics/video/2012/aug/01/boris-johnson-stuck-zip-wire-video) --- So, let's wrap it up:> We can define an $n$-dimensional space with linear combinations of $n$ linearly independent vectors:> > - in a **2D** space, we need **2 and only 2 linearly independent vectors** to define all other 2-dimensional vectors as linear combinations of these 2 vectors;> - in a **3D** space, we need **3 and only 3 linearly independent vectors** to define all other 3-dimensional vectors as linear combinations of these 3 vectors;> - and so on and so forth.This also means that, for example, if you define 3 vectors in 2-dimensional space, one of them will be a linear combination of the other two!You see, linear algebra is a minimalist: if you can use only 2 vectors to represent a 2D space, why waste a 3rd one? 1.6 Dot product--- Definition We already know how to multiply vectors by scalars and add vectors together. But can we multiply one vector by another? Yes we can! Actually, we can do it in [several ways](https://en.wikipedia.org/wiki/Multiplication_of_vectors). However, let's just focus on the most simple one: the **dot product**, a.k.a. the **scalar product** -- because, well, the result is a scalar. The dot product of two $m$-dimensional vectors $\mathbf{u}=[u_1, u_2, ..., u_m]$ and $\mathbf{v}=[v_1, v_2, ..., v_m]$ is a scalar given by $\mathbf{u}\cdot \mathbf{v} = u_1 v_1 + u_2 v_2 + ... + u_m v_m$ Consider the vectors $\mathbf{a} = [1, 2, 0]$ and $\mathbf{b} = [-1, 4, -2]$. The dot product between $\mathbf{a}$ and $\mathbf{b}$ is:$$\mathbf{a}\cdot \mathbf{b} = 1\times (-1) + 2\times 4 + 0\times (-2) = -1 + 8 + 0 = 7$$ --- **Another way to determine the dot product**You might also find the dot product of two vectors written as $\mathbf{u} \cdot \mathbf{v} = \|u\|\|v\|\cos{\theta} = \|v\|\|u\|\cos{\theta}$.This means that $\mathbf{u} \cdot \mathbf{v}$ is the magnitude of $\mathbf{v}$ times the magnitude of the component of $\mathbf{u}$ that points along $\mathbf{v}$, namely $\|u\|\cos{\theta}$ (projection of $\mathbf{u}$ onto $\mathbf{v}$):We can therefore determine the angle between any two *non zero* vectors by using the relation: $\;\;\;\;\mathbf{u} \cdot \mathbf{v} = \|u\|\|v\|\cos{\theta} \iff \cos{\theta} = \frac{\mathbf{u} \cdot \mathbf{v}} {\|u\|\|v\|}$ --- > 📝 **Pen and paper exercise 6 (for the Math nerds)**: Find the angle $\theta$ (in degrees or radians) between the vectors $\mathbf{u} = \begin{bmatrix}1\\ 0\end{bmatrix}$ and $\mathbf{v} = \begin{bmatrix}2\\ 2\sqrt{3}\end{bmatrix}$ using the formula $\mathbf{u} \cdot \mathbf{v} = \|u\|\|v\|\cos{\theta}$.> - Calculate $\mathbf{u}\cdot\mathbf{v}$ using the [dot product formula](1.6-Dot-product);> - Find $\|u\|\|v\|$ using the formula for the [norm of a vector](1.2-Vector-norm).>> You get **3 ⭐️s** if you solve this using a calculator and **5 ⭐️s** if you can solve it without a calculator (you may use the [unit circle](https://en.wikipedia.org/wiki/Unit_circle) below). Once more, all of this applies to any two vectors in any $m$-dimensional real space. --- 1.6.2 Properties of the dot product You don't need to memorize them all, just know they exist.$\;\;\;\;\;\text{1. }\;\; \mathbf{u} \cdot \mathbf{u} = \|\mathbf{u}\|^2$$\;\;\;\;\;\text{2. }\;\; \mathbf{0} \cdot \mathbf{u} = \mathbf{0}$$\;\;\;\;\;\text{3. }\;\; \mathbf{u} \cdot \mathbf{v} = \mathbf{v} \cdot \mathbf{u}$$\;\;\;\;\;\text{4. }\;\; (c \mathbf{u}) \cdot \mathbf{v} = c (\mathbf{u} \cdot \mathbf{v})$$\;\;\;\;\;\text{5. }\;\; \mathbf{u} \cdot \mathbf{v} = \|\mathbf{u}\|\|\mathbf{v}\|\cos{\theta}$$\;\;\;\;\;\text{6. }\;\; \mathbf{u} \cdot (\mathbf{v} + \mathbf{w}) = \mathbf{u} \cdot \mathbf{v} + \mathbf{u} \cdot \mathbf{w}$Remember that $c$ is a scalar (non-bold lowercase letter) and $\mathbf{u}$, $\mathbf{v}$ and $\mathbf{w}$ represent vectors (bold lowercase letters). Also, we have here a special vector, the **zero vector**, where all elements are equal to zero, which we denote by $\mathbf{0}$. If you're still unsure about dot product properties, check this [video](https://www.youtube.com/watch?v=rVQ3G9epCjw). 1.7 Orthogonal vectors--- Two vectors $\mathbf{u}$ and $\mathbf{v}$ are said to be orthogonal if their dot product is equal to zero: $\;\;\mathbf{u}\cdot \mathbf{v} = \mathbf{0}$ If we think about the formula $\mathbf{u} \cdot \mathbf{v} = \|v\|\|u\|\cos{\theta}$, we see that $\|u\|\cos{\theta}$ (projection of vector $\mathbf{u}$ onto $\mathbf{v}$) fits in a point at the tail of $\mathbf{v}$, having magnitude zero: > 📝 **Pen and paper exercise 7**: Determine the dot product between vectors $[1,0]$ and $[0,-2]$. Are they orthogonal? --- 1.8 Vectors recap 1. Vectors can be represented as **ordered lists of scalars**. They have both **magnitude and direction**;2. The **transpose** of the row vector is a column vector, and vice-versa;3. The **norm**/magnitude/length of an $m$-dimensional vector $\mathbf{x}$ is given by $\| \mathbf{x}\| = \sqrt{x_1^2 + x_2^2 + ... + x_m^2}$;4. We can represent the set of all $m$-dimensional vectors using linear combinations of $m$ linearly independent vectors (also $m$-dimensional);5. Several properties of addition and multiplication by scalars are generalizable for vectors, such as commutativity, associativity and distributivity;6. The **dot product** between two vectors, $\mathbf{u} \cdot \mathbf{v}$, can be defined in 2 equivalent manners: (i) $\mathbf{u} \cdot \mathbf{v} = \|u\|\|v\|\cos{\theta}$, where $\theta$ refers to the angle between $\mathbf{u}$ and $\mathbf{v}$, or (ii) $\mathbf{u} \cdot \mathbf{v} = u_1 v_1 + u_2 v_2 + ... + u_m v_m.$ --- Break time!Look through your window and watch the world outside. You were blind and now you see: everywhere there are vectors, everywhere there is linear algebra...See you soon! --- 2. Introduction to NumPy arrays--- No old school pen and paper on this section!! Time to put Python and your machine to work. 💻🐍 2.1 The NumPy package--- You might have noticed we have imported a package at the beginning of the notebook, which goes by the name of ``numpy``:```python it is a widespread convention to import numpy using the alias np this convention makes your code more readable, so do use itimport numpy as np````numpy` is commonly imported with the alias `np`. This means that every time we instantiate a new object from NumPy, call a NumPy function or use a module from it, we'll use `np` instead of `numpy`. I know it's fun to be different, and make up your own aliases, but for other's sake, please use the standard alias which is `np`.[NumPy](https://numpy.org/) is the fundamental package for scientific computing with Python. Among many other amazing possibilities, it allows us to work efficiently with vectors and matrices, performing lots of linear algebra operations.In the `requirements.txt` file, you can see that we are using version 1.20 of NumPy. A detailed reference documentation of the functions and classes contained in this package is available in the [NumPy reference](https://numpy.org/doc/1.20/reference/index.html) webpage. You can also download the [pdf version](https://numpy.org/doc/1.20/numpy-ref.pdf) here. 2.1.1 I have lists in Python. Why do I even need NumPy? You might remember lists from SLU04. You already know that you can represent a vector by an ordered list, as follows```pythonu = [2, -1, 0, 2, 0.4, 3, 6, 0, 1] 9-dimensional vector``` Using Python lists and for loops, we could implement some basic operations. At first, you might think this is a reasonable approach for small tasks such as linear combinations of vectors (recall [section 1.4](1.4-Linear-combinations-and-linear-independence)). But is it? Let's run a simulation to compare using Python lists versus using NumPy (do not worry about the code behind this). We'll see how long it would take to compute a simple linear combination of the form $2\mathbf{u} + 2\mathbf{v}$ between two vectors of length $10^{6}$, using NumPy *versus* Python lists. **Creating a linear combination using NumPy** ###Code # perform linear combination using NumPy magic numpy_duration = utils.lincomb_numpy() print("Using NumPy arrays we took {:.2} seconds to perform a linear combination.".format(numpy_duration)) ###Output Using NumPy arrays we took 0.018 seconds to perform a linear combination. ###Markdown **Creating a linear combination using Python lists and `for` loops** ###Code # perform linear combination using Python lists and for loops python_duration = utils.lincomb_lists() print("Using Python lists we took {:.2} seconds to perform a linear combination.".format(python_duration)) ###Output Using Python lists we took 0.14 seconds to perform a linear combination. ###Markdown How much faster was NumPy?... ###Code print("Python lists approach was {} times SLOWER than NumPy!!".format(int(python_duration/numpy_duration))) ###Output Python lists approach was 7 times SLOWER than NumPy!! ###Markdown This might look like a small difference at first sight, but as you start to scale your analysis it will become a difference of minutes and hours...Ain't nobody got time for that!!Other than being much faster, NumPy is also awesome because: - It uses less memory to store the same amount of data - It interfaces with libraries you'll probaby use in Data Science, such as Pandas, Scikit-learn, Matplotlib and many others; - It supports a great variety of numerical types; - It has a comprehensive list of [functions, modules and objects](https://numpy.org/doc/1.20/reference/index.html) useful for linear algebra. By the way, now is the time to start getting comfortable reading documentation. You can either: - refer to documentation pages (for example, you could consult the [documentation webpage for version 1.20.0 of NumPy](https://numpy.org/doc/1.20/) to check its functionalities); - access the docstring using `?` inside a jupyter cell. ###Code # write the name of the function followed by a quotation mark # The docstring with basic information on the function # should appear at the lower part of your browser when you run this cell print? ###Output _____no_output_____ ###Markdown You can close the pager with the docstring when you're done. 2.2 The `ndarray`--- The main object in NumPy is the [NumPy array](https://numpy.org/doc/1.20/reference/arrays.ndarray.html), or simply `ndarray`. An ndarray is a collection of items, all of the same size and type. You can think of arrays as tables (2 dimensions), but you can actually have arrays of 3, 4 or 5 dimensions, and so on. Here we'll deal mainly with ndarrays of 1 or 2 dimensions, where the items stored are numbers. 2.2.1 Create an `ndarray` Let's create an array to represent a vector, using [np.array()](https://numpy.org/doc/1.20/reference/generated/numpy.array.html): ###Code u = [0, 1, 2] # this is a Python list representing a vector a = np.array(u) # create a NumPy array object using the Python list above a ###Output _____no_output_____ ###Markdown You can also create a table of numbers (a matrix) from a list of lists, using `np.array()`: ###Code # notice we input 1 list with 3 lists in it b = np.array([[0, 1, 2], # 1st row [3, 4, 5], # 2nd row [6, 7, 8]]) # 3rd row print(b) # you can use Python's built-in function print() to print an array ###Output [[0 1 2] [3 4 5] [6 7 8]] ###Markdown 2.2.2 Dimensions and shape of an array If you are to manipulate arrays correctly, you definitely need to know what **shape** and **dimension** mean in terms of NumPy arrays. **a) Number of array dimensions** Let's start by checking the array dimensions of `a`, which represents a 3-dimensional vector, using the attribute **`ndim`**: ###Code a = np.array([0, 1, 2]) # a 3-dimensional row vector a.ndim # number of array dimensions ###Output _____no_output_____ ###Markdown Wait, what?! Our array has 1 dimension?... But our vector is 3-dimensional!... What's happening? Similarly to when you nest a list inside another list, which is inside another list, and so on... you can also create as many *axes* (**array dimensions**) in arrays as you wish. You could access the first axis (*axis 0*) with the first pair of brackets `[]`, the second axis (*axis 1*) with the second pair of brackets `[]`, and so on. So we can store any number of elements in a 1-dimensional ndarray:- vector: `[1, 2, 3, 4]` has 4 dimensions, but we can store it in a 1D array, `np.array([1, 2, 3, 4])` - vector: `[1, 2, 3, 4, 0, 0, 1]` has 7 dimensions, but we can store it in a 1D array, `np.array([1, 2, 3, 4, 0, 0, 1])` ... A 2D array (2 dimensions) has 2 axes. You can think of it as a table of numbers (matrix): ###Code # you can think of b as a table (matrix) represented by a numpy array b = np.array( [[0, 1, 2], [3, 4, 5], [6, 7, 8]] ) b.ndim # b is a 2D array ###Output _____no_output_____ ###Markdown We can access the first row in the table like this: ###Code b[0] # access 1st row in b ###Output _____no_output_____ ###Markdown We can access the second element of the first row in the table as follows: ###Code b[0][1] # access 2nd element of the 1st row in b ###Output _____no_output_____ ###Markdown We can't access a third dimension because the array is 2D. Thus, the following command will throw an `IndexError` (which we'll catch, because we're awesome): ###Code # trying to access an element in the 3rd axis of a 2D array does not compute try: b[0][2][0] except IndexError as e: print("IndexError:", e) ###Output IndexError: invalid index to scalar variable. ###Markdown **Represent a row vector vs. its transpose in an ndarray** We can represent a column vector with a 2D array (2 axes): ###Code a = np.array([[0], [1], [2]]) # a 2D numpy array, a 3-dimensional COLUMN vector a.ndim # number of array dimensions ###Output _____no_output_____ ###Markdown Notice the difference between a 1D array: ###Code np.array([0, 1, 2]) ###Output _____no_output_____ ###Markdown and a 2D array with the same elements: ###Code np.array([[0, 1, 2]]) ###Output _____no_output_____ ###Markdown In the 2D array, we have one extra outside square bracket (just like in a nested list). **b) Shape of an array** The other attribute you should understand is the **shape** of the array. An array's shape is a *tuple of integers* which indicates the size of the array in each dimension (axis). Hence, for a table (matrix) with $m$ rows and $n$ columns, the shape will be $(m, n)$.The length of the shape tuple corresponds to the number of axes, given by `.ndim`, as we just saw. ###Code a = np.array([[0], [1], [2]]) # same vector as in the last code cell a.shape # shape of the array (number of elements in axis 0, number of elements in axis 1) ###Output _____no_output_____ ###Markdown Above, we see that axis 0 has size 3 (3 rows in the column vector) and axis 1 has size 1 (1 column).Luckily for us, we wont need more than 2 dimensions to represent vectors and matrices. --- Notice that the underlying class which creates the NumPy array is the class `numpy.ndarray`. However, it is advisable to construct arrays using its built-in functions, such as `array`, `zeros` or `ones`.For simplicity, we'll refer to NumPy arrays and arrays interchangeably throughout this notebook (note that these are **not** the Python [`array.array`](https://www.tutorialspoint.com/python/python_arrays.htm) objects).```Pythonif (student.question == "What is a class?") or (student.question == "What is an object?"): print("How dare you?! You go review SLU09!")```You can check a quick explanation of `ndarrays` [here](https://www.tutorialspoint.com/numpy/numpy_ndarray_object.htm). --- 2.3 Vectors and linear algebra using NumPy Time to have some fun! Let's put all the knowledge we gathered about vectors to use. 2.3.1 Representing vectors with ndarrays: transpose, `reshape()` Remember learning that the transpose of a row vector is a column vector and vice-versa? NumPy has the transpose implemented as an attribute of arrays.If we start with a 4-dimensional column vector, represented by an array of shape `(4, 1)`, we'll have `2` axes (`.ndim` = 2): ###Code a = np.array([[0], [1], [2], [3]]) # a 4-dimensional column vector print("a:\n", a, "\n") print("a.shape:", a.shape) print("a.ndim:", a.ndim) ###Output a: [[0] [1] [2] [3]] a.shape: (4, 1) a.ndim: 2 ###Markdown Getting the attribute `.T` (for transpose) of the array will return a row vector represented by a 2D array, just as we expected: ###Code a_T = a.T # the transpose of a print("a_T:", a_T, "\n") print("a_T.shape:", a_T.shape) print("a_T.ndim:", a.ndim) ###Output a_T: [[0 1 2 3]] a_T.shape: (1, 4) a_T.ndim: 2 ###Markdown **What would happen if you used a 1D array?** If we try to transpose an array with only 1 dimension (thus the tuple shape has only 1 element), we get exactly the same shape!! ###Code print("shape of 1D array: ", np.array([0,1,2,3]).shape) print("shape of the transpose of 1D array: ", np.array([0,1,2,3]).T.shape) ###Output shape of 1D array: (4,) shape of the transpose of 1D array: (4,) ###Markdown For a 1D array, we cannot get the transpose of our vector using `.T`! --- `reshape()` The `reshape` method allows you to reshape an array of data to any given shape. For example, for the 1-dimensional array below, `array_1d`, with 6 elements (shape = `(6,)`)... ###Code array_1d = np.array([0, 1, 2, 3, 4, 5]) # 1-dimensional array, with 6 elements, shape is (6, ) print(array_1d) print("shape: ", array_1d.shape) ###Output [0 1 2 3 4 5] shape: (6,) ###Markdown ...we can reshape it to a 2-dimensional array with the **same** 6 elements, displayed in a 2-dimensional array of shape `(3, 2)`: ###Code array_reshaped = array_1d.reshape((3, 2)) # reshape to a table! print(array_reshaped) print("shape: ", array_reshaped.shape) ###Output [[0 1] [2 3] [4 5]] shape: (3, 2) ###Markdown We can't however reshape to a shape which is not compatible with the number of elements we have (say, for example, `(4, 2)`): ###Code # we can't reshape to a table with 8 entries 'cause we only have 6 elements in the array! # we'll just catch that ValueError, that NumPy will throw at us, flawlessly try: array_1d.reshape((4, 2)) except ValueError as e: print("ValueError:", e) ###Output ValueError: cannot reshape array of size 6 into shape (4,2) ###Markdown --- **Creating row and column vectors with `reshape()`** To **get the transpose of a vector represented by a 1D array in NumPy**, we would need to **first reshape** the array to 2D, using `.reshape()`, with argument `(1, -1)`, to get a **row vector**, or `(-1, 1)` to get a **column vector**. Consider a 1-dimensional array representing a 4-dimensional vector: ###Code a = np.array([0,1,2,3]) # vector represented by a 1D array a.shape # shape of array a ###Output _____no_output_____ ###Markdown We can convert this 1D array to a 2D array row vector using `reshape()` with argument `(1, -1)`: ###Code a_row = a.reshape((1, -1)) # use reshape to get a 2D array representation of a row vector print("a_row:\n", a_row, "\n") print("a_row.shape:", a_row.shape) ###Output a_row: [[0 1 2 3]] a_row.shape: (1, 4) ###Markdown We could also convert it directly to a column vector using `reshape()` with argument `(-1, 1)`: ###Code a_column = a.reshape((-1, 1)) # use reshape to get a 2D array representation of a column vector print("a_column:\n", a_column, "\n") print("a_column.shape:", a_column.shape) ###Output a_column: [[0] [1] [2] [3]] a_column.shape: (4, 1) ###Markdown Because we now have our vector in a 2D array, we could use the transpose attribute safely: ###Code # same result print(a_column.T) ###Output [[0 1 2 3]] ###Markdown > 📌 **Tip**: Errors due to incompatible NumPy shapes and dimensions are a very common issue when using libraries which build on the `ndarray` object, such as some data science libraries you'll learn about. But since you're now aware of the difference between dimension and shape in ndarrays, you'll be much quicker to stop and debug such issues later on! --- NumPy also has a module called [`numpy.linalg`](https://numpy.org/doc/1.20/reference/routines.linalg.html), which is entirely dedicated to linear algebra operations. How amazing is that? 2.3.2 Vector norm using [`numpy.linalg.norm()`](https://numpy.org/doc/1.20/reference/generated/numpy.linalg.norm.html) [Just like most humans](https://vimeo.com/379750591), vectors always conform to the norm (recall section [1.2 Vector norm](1.2-Vector-norm)):$$\|\mathbf{u}\| = \sqrt{u_1^2 + u_2^2 + ... + u_m^2} = \sqrt{\sum_{i}^{m} u_i^2}$$ ###Code np.linalg.norm? ###Output _____no_output_____ ###Markdown Ok, from the docstring we see that `np.linalg.norm` expects an array. Let's determine the norm of the vector $\begin{bmatrix}-1\\ 2\end{bmatrix}$, as we did in section [1.2 Vector norm](1.2-Vector-norm), using the Pythagorean theorem: ###Code a = np.array([[-1], [2]]) np.linalg.norm(a) ###Output _____no_output_____ ###Markdown Which is actually the square root of $5$: ###Code # np.sqrt() computes the square root value of its input np.sqrt(5) ###Output _____no_output_____ ###Markdown 2.3.3 Vector operations (multiplication by scalars and addition) Remember linear combinations being about multiplying vectors by scalars and adding them together?$$c\; \mathbf{u} + d\; \mathbf{v}$$ ###Code # let's create two ndarrays representing 4-dimensional vectors u = np.array([1, 0, 1, 1]) v = np.array([1, -2, 0, 1]) # print vectors to check your lucky numbers print(f"u = {u}") print(f"v = {v}\n") ###Output u = [1 0 1 1] v = [ 1 -2 0 1] ###Markdown We can use the [numeric operators](https://docs.python.org/3/library/stdtypes.htmlnumeric-types-int-float-complex) we already know with NumPy. ###Code # multiplication by a scalar -(1/2) * u # addition u + v # linear combination (-(1/2) * u) + (1 * v) ###Output _____no_output_____ ###Markdown 2.3.4 Dot product Finally, the dot product:$$\mathbf{u} \cdot \mathbf{v} = u_1 v_1 + ... + u_m v_m$$ Let's use [`numpy.dot`](https://numpy.org/doc/1.20/reference/generated/numpy.dot.html?highlight=dot%20product) to determine the dot product of two vectors! ###Code # create two vectors using numpy arrays u = np.array([-1, 2, 2]) # row vector, 1D array v = np.array([-2, 1, 1]) # row vector, 1D array # determine the dot product between vectors u and v np.dot(u, v) # create two vectors using numpy arrays u = np.array([-1, 2, 2]).reshape((1, 3)) # row vector, 2D array v = np.array([-2, 1, 1]) # row vector, 1D array # determine the dot product between vectors u and v np.dot(u, v) # let's check those results "by hand", because we love linear algebra! ((-1) * (-2)) + (2 * 1) + (2 * 1) ###Output _____no_output_____ ###Markdown SLU12 - Linear Algebra & NumPy, Part 1 Learning Notebook 1/2 - vectors *Here we start with the most important concept in Linear Algebra, the vector, and learn about the NumPy library and its main object, the numpy array. In Learning Notebook 2/2 we will then extend this concept and all its magical properties to matrices.* --- **What's in this notebook**1. [Vectors](1.-Vectors) 1.1 [Vector definition](1.1-Vector-definition) - vector definition, representations and __the transpose__ of a vector 1.2 [Vector norm](1.2-Vector-norm) 1.3 [Vector operations: multiplication by scalar and addition](1.3-Vector-operations:-multiplication-by-scalar-and-addition) 1.4 [Linear combinations and linear independence](1.4-Linear-combinations-and-linear-independence) 1.5 [Representing all vectors in space](1.5-Representing-all-vectors-in-space) 1.6 [Dot product](1.6-Dot-product) 1.7 [Orthogonal vectors](1.7-Orthogonal-vectors) 1.8 [Vectors recap](1.8-Vectors-recap) 2. [Introduction to NumPy arrays](2.-Introduction-to-NumPy-arrays) 2.1 [The NumPy package](2.1-The-NumPy-package) 2.2 [The `ndarray`](2.2-The-ndarray) - the numpy array, dimensions and shape of an array 2.3 [Vectors and linear algebra using NumPy](2.3-Vectors-and-linear-algebra-using-NumPy) - basic linear algebra operations on vectors --- Imports ###Code # numpy is the package we're going to learn about # it is a widespread convention to import numpy using the alias np # this convention makes your code more readable, so do use it import numpy as np # auxiliary stuff import utils ###Output _____no_output_____ ###Markdown --- You probably know a lot about programming in Python at this point, so take a moment to feel proud of yourself.Now, to become a good data professional, you also need Mathematics. Why? Because just about any machine learning algorithm you will use is built on linear algebra, calculus and statistics concepts.Blindly feeding data into an ML algorithm just because you know it predicts stuff, will leave you in the same situation of a cat owner who puts his cat into the washing machine just because its purpose is to clean... *Do not be a negligent cat owner...* ---By the end of SLUs 12 and 13, you'll be familiar with all the linear algebra you need to read the matrix form solution to the *multiple linear regression algorithm*, the most popular starting point for machine learning students:$$ \mathbf{\beta} = (X^TX)^{-1}(X^T\mathbf{y})$$That's a lot of Maths! 🙀--- Recommendations for units (SLU12 + SLU13)**1 - Don't rush it**: linear algebra concepts take time to internalize;**2 - Follow the sequence**: each topic builds upon the one before;**3 - Solve the (optional) *pen and paper exercises***: they serve to help you check your learning;**4 - Reach out on Slack if you get stuck**: [*There is no such thing as a dumb question*](https://en.wikipedia.org/wiki/Carl_Sagan)._The most important thing in SLUs 12+13 is to get some intuition on the basic concepts in Linear Algebra, not to master it in 2 weeks!! If Mathematics is not your cup of tea, but you still want to become a good (and responsible) data scientist, you can and definitely should continue studying Linear Algebra **and** Statistics after the course._ --- 0. Intro What is Linear Algebra?The formal definition of Linear algebra as per [Wikipedia](https://en.wikipedia.org/wiki/Linear_algebra) is that it is the branch of Mathematics concerning linear equations and functions, and how we represent them in vector spaces and matrices. If you're new to Linear Algebra however, this definition will probably not be very helpful. So to put it more simply, [linear algebra is about](https://machinelearningmastery.com/gentle-introduction-linear-algebra/) using "arithmetic on columns of numbers called __vectors__ and arrays of numbers called __matrices__, to create new columns and arrays of numbers."> Note that the definition above is quite simplistic, to the level of [Gauss](https://en.wikipedia.org/wiki/Carl_Friedrich_Gauss) probably not approving it if he were alive. But it serves our purposes. --- 1. Vectors Vectors in 1-Dimensional (1D) spaces Consider the kitten below, walking timidly at a speed of $0.1$ m/s in order to reach his food bowl:The __velocity__ of the kitten is a quantity **defined by both a magnitude and a direction**, which we call the velocity **vector** of the kitten, $\mathbf{v} = [0.1]$ m/s:- The value $0.1$ corresponds to a **magnitude** which tells us how fast the kitten is walking;- We know the kitten is walking in a __straight line__ to the food bowl, so we can define his **direction**.We could draw this 1D (1-dimensional) vector in a 1D [coordinate system](https://en.wikipedia.org/wiki/Coordinate_system), as follows:---Our kitten has now eaten all his food, and he's staring at you, asking for a refill:He won't move until you feed him, and now we could represent his zero velocity by the zero vector $\mathbf{v} = [0]$ m/s. --- Vectors in 2-Dimensional (2D) spaces A **2-dimensional vector** belongs to the 2-dimensional real coordinate space, $\mathbb{R}^2$, and we can plot it using a Cartesian coordinate system.Notice that the vector $[-1, 2]$ has its tail located at the origin of the x-y plane, $(0,0)$, and its tip (head) located at the point $(-1, 2)$. To go from tail to tip, we walk 1 step leftwards (x-coordinate = -1) and 2 steps upwards (y-coordinate = 2).📌 In linear algebra it is standard to root the vector at the origin of the coordinate system. We can draw the 2D-vector of a cyclist's velocity climbing a hill at 12 km/h, $\mathbf{v}$, on the xy-plane, where y is perpendicular to the centre of the Earth: > 📝 **Pen and paper exercise 1**: Grab a pen (actually pencil+rubber might be better) and a piece of paper and draw the vectors $[-1, 2]$, $[2, -1]$ and $[1, 2]$ on the xy-plane. Notice that they all have the same *magnitude* (length) but different *direction*s. Vectors in 3-Dimensional (3D) spaces A **3-dimensional vector** belongs to the 3-dimensional real coordinate space, $\mathbb{R}^3$. We can draw it on the xyz coordinate system, using the same logic as for the xy-plane.To get from the tail to the tip of the vector, for the 1st component (x coordinate) you would walk parallelly to the yz [plane](https://en.wikipedia.org/wiki/Plane_(geometry)), for the 2nd component (y coordinate) you would walk parallelly to the xz plane, and for the 3rd element (z coordinate), you would walk parallelly to the xy plane.You can play with your own 3-D vectors in this applet: https://www.intmath.com/vectors/3d-space-interactive-applet.php. Vectors in $n$-dimensional spaces Now what about a **4-dimensional vector**?The human brain is not able to *visualize* more than 3 dimensions, although it's possible to overcome this limitation with some brilliant [interactive visualizations](https://ciechanow.ski/tesseract/) or the help of [Carl Sagan](https://vimeo.com/199561184)....Though that's a good exercise, it doesn't really matter whether or not you can see in 4D. In linear algebra you can extend properties of vectors and matrices, and the operations between them, to **any number of dimensions**. 1.1 Vector definition An $m$-dimensional vector $\mathbf{x}$ is an ordered list of $m$ scalars represented as $\mathbf{x} = \left[x_1, x_2,..., x_m\right]$, $x_i \in \mathbb{R}$. It has a magnitude and a direction. **Some notes on mathematical notation:**- $x_i \in \mathbb{R}$ means that each scalar $x_i$ in the vector belongs ($\in$) to the set of all real numbers ($\mathbb{R}$)- $m$ belongs to the set of all positive integer numbers, $m \in \mathbb{Z}^+$- when describing vectors we usually use square brackets `[]` and **not** round brackets `()`, although these might be used somewhere else.- Vectors are usually represented by bold lowercase letters and scalars by a non-bold lowercase letter. However, you might find different notations (e.g. arrow over lowercase letter, $\overrightarrow{v}$, or arrow and uppercase such as the [force vector](https://en.wikipedia.org/wiki/Force) $\overrightarrow{F}$) **Real coordinate space**> An $m$-dimensional vector belongs to a [real coordinate space](https://en.wikipedia.org/wiki/Real_coordinate_space) of $m$ dimensions, denoted by $\mathbb{R}^m$, where we have the set of all different $m$-dimensional vectors. Vectors can be defined either by their magnitude and direction (geometrically) or as a list of numbers (numerically) (a) **Geometrically** speaking, a vector is an arrow pointing in space, with a given *magnitude* (length), and a *direction*, describing where the arrow points to. (b) **Numerically** speaking, you can think of the same vector as an ordered list of scalars (real numbers). **Equality of vectors*** $\mathbf{u}$ and $\mathbf{v}$ are equal if they have the same magnitude and direction, **which implies** that the ordered lists which represent them are equal, **element-wise**. 1.1.1 Vector representations and the transpose We can represent **the same** vector in several ways. For example, we can represent a given *4-dimensional vector* as:* an ordered list, $\left[0,\; -1,\; 2.6,\; \sqrt{3}\right]$, * a **row vector**,$ \begin{bmatrix} 0 & -1 & 2.6 & \sqrt{3}\\ \end{bmatrix}$, * or its **transpose**, a **column vector**,$ \begin{bmatrix} 0 & -1 & 2.6 & \sqrt{3}\\ \end{bmatrix}^T = \begin{bmatrix} 0 \\ -1 \\ 2.6 \\ \sqrt{3} \\ \end{bmatrix}$.The relevance of the type of representation we use will become evident when we introduce matrices. For now, just know that the row representation of a vector is called the **transpose** of its column representation, and vice versa. 1.2 Vector norm **Definition** The norm of an $m$-dimensional vector $\mathbf{x} = \left[x_1, x_2, ..., x_m\right]$, $x_i\in \mathbb{R}$, also known as the magnitude or length, is defined as $\|\mathbf{x}\| = \sqrt{x_1^2 + x_2^2 + ... + x_m^2}$. **Explanation** The **magnitude** of a 2-dimensional vector, also called the **norm** or the **length**, can be determined by the [Pythagorean theorem](https://en.wikipedia.org/wiki/Pythagorean_theorem), which says that "In a right angled triangle, the square of the hypotenuse is equal to the sum of the squares of the other two sides".On the xy-plane below, the dashed lines represent the two other sides of a right angled triangle, and the hypothenuse corresponds to the length of the vector:We can represent the vector on the image by an ordered list: $\mathbf{a} = [a_1, a_2]$, with components $a_1 = -1$ ($x$ coordinate) and $a_2 = 2$ ($y$ coordinate).Let's use the Pythagorean theorem to **find the norm of $\mathbf{a}$**, $\| a\|$:$$\| a\|^2 = a_1^2 + a_2^2$$$$\| a\| = \sqrt{a_1^2 + a_2^2} = \sqrt{(-1)^2 + (2)^2} = \sqrt{5} $$You can actually use this formula with any $m$-dimensional vector. (*It's a kind of magic...* 🎵) > 📝 **Pen and paper exercise 2**: Calculate the norm of the vectors $[-1, 2]$, $[2, -1]$ and $[1, 2]$, which you've drawn in the last exercise. You should find that they all have the same norm. Find one more vector with the same norm as them. >> **Notice that** there is an infinite number of 2-D vectors with the same norm (length). --- Ever asked yourself...[*bad pun source*](https://math.stackexchange.com/questions/62789/what-does-linear-mean-in-linear-algebracomment146861_62789)Now to the [serious answer](https://math.stackexchange.com/questions/62789/what-does-linear-mean-in-linear-algebra/6279162791): linear algebra is "linear" because it's about linear functions. Remember learning about $y = mx + b$, the equation for the line, in math class? ###Code # run this cell and check the plots below utils.plot_school_functions() ###Output _____no_output_____ ###Markdown In linear algebra we deal with linear functions (*deal with it!*). This means that all transformations we do on our data are based on linear relations, just like the line on the first plot. We don't need to worry about polynomial functions, exponentials, or other evil sorceries. Only simple, beautiful, linear magic. 😍😃Simple, yet powerful. --- 1.3 Vector operations: multiplication by scalar and addition 1.3.1 Multiplying a vector by a scalar What happens with a vector if we multiply it by a scalar?Consider the vector $\mathbf{u}=\begin{bmatrix}1\\2\\\end{bmatrix}$. On the image below you can see several vectors that result from multiplying the vector $\mathbf{u}$ by different scalars. **Multiplying $\mathbf{u}$ by -1:** $\hspace{5cm}\mathbf{v}=-1\cdot \mathbf{u}=-1\cdot\begin{bmatrix}1\\2\\\end{bmatrix}$ $=\begin{bmatrix}-1\times 1\\-1\times 2\\\end{bmatrix}=$ $\begin{bmatrix}-1\\-2\\\end{bmatrix}$Multiplying a 2D vector by $-1$ causes it to rotate $180^{\circ}$ ($\pi$ radians) around the origin. Its *magnitude* (norm) remains the same, but the *direction* changes. **Multiplying $\mathbf{u}$ by 0:**$\hspace{5cm} 0 \cdot \mathbf{u}=0\cdot\begin{bmatrix}1\\2\\\end{bmatrix} = $ $\begin{bmatrix}0\\0\\\end{bmatrix}$Multiplying any vector by $0$ results in a vector with the same dimension, where all components are zero (the zero vector). **Multiplying $\mathbf{u}$ by 2**$:\hspace{5cm} \mathbf{w}=2\cdot \mathbf{u}=2\cdot\begin{bmatrix}1\\2\\\end{bmatrix}$ $= \begin{bmatrix}2\\4\\\end{bmatrix}$Multiplying a vector by a positive scalar increases its *magnitude* but does not affect its *direction*. **Multiplying $\mathbf{u}$ by 1:**$\hspace{5cm} 1\cdot\mathbf{u}=1\cdot\begin{bmatrix}1\\2\\\end{bmatrix} = $ $\begin{bmatrix}1\times 1\\1\times 2\\\end{bmatrix} = \begin{bmatrix}1\\2\\\end{bmatrix} = \mathbf{u}$Multiplying any vector by the scalar 1 does not change the vector (**identity property**). > Note that the dot symbol $\cdot $ in the expressions above denotes multiplication, however as we'll see in a few sections, when it is written **between two vectors** it means **dot product** instead. > 📝 **Pen and paper exercise 3**: Multiply the vector $\mathbf{u}$ by the scalar -2. What happens to its *magnitude* and *direction*? --- 1.3.2 Addition and subtraction To add two $m$-dimensional vectors, we simply add the corresponding components from each vector.For example, we can add vectors $\mathbf{u} = \begin{bmatrix} 1\\ 2\\\end{bmatrix}$ and $\mathbf{v} = \begin{bmatrix} 3\\ 1\\\end{bmatrix}$ as follows: $\hspace{.2cm} \mathbf{w} = \mathbf{u} + \mathbf{v} = \begin{bmatrix} 1\\ 2\\\end{bmatrix} + $ $\begin{bmatrix} 3\\ 1\\\end{bmatrix} = $ $\begin{bmatrix} 1 + 3\\ 2 + 1\\\end{bmatrix} = $ $\begin{bmatrix} 4\\ 3\\\end{bmatrix}$**Geometrical visualization:**The vectors $\mathbf{u}$, $\mathbf{v}$ and $\mathbf{w}$ are plotted on the image above.Notice we place the tail (origin) of $\mathbf{b}$, which is equivalent to the vector $\mathbf{v}$ (equal in magnitude and direction), to the tip of vector $\mathbf{u}$.Hence we walk 3 steps rightwards (x-coordinate of $\mathbf{v}$ = 3) and 1 step upwards (y-coordinate of $\mathbf{v}$ = 1), getting to the tip of $\mathbf{w}$, the result of adding $\mathbf{u}$ and $\mathbf{v}$.**Vector addition is commutative:**Notice that we could also start at vector $\mathbf{v}$, add vector $\mathbf{a}$ (the equivalent of vector $\mathbf{u}$), and we would still get the vector $\mathbf{w}$ as a result. This means that the **addition between vectors is commutative**. The same applies to any two or more $m$-dimensional vectors added together. > ❗ You **cannot** add vectors with different dimensions!!> > For example, if you tried to add $[1, 2]$ with $[1, 2, 3]$, you would have no corresponding component on the 1st vector to add to the 3rd component of the second vector. > 📝 **Pen and paper exercise 4**: Draw the vector $\mathbf{x} = \mathbf{u} - \mathbf{v}$ on the xy-plane:> - Multiply the vector $\mathbf{v}$ by the scalar $-1$ and draw the resulting vector, $\mathbf{-v}$;> - Add $\mathbf{-v}$ to vector $\mathbf{u}$; **Properties of vector addition and scalar multiplication (for any $m$-dimensional vectors):**$\;\;\text{1. }\;\; \mathbf{u} + \mathbf{v} = \mathbf{v} + \mathbf{u}$$\;\;\text{2. }\;\; \mathbf{u} + \mathbf{0} = \mathbf{u}$$\;\;\text{3. }\;\; c\left(\mathbf{u} + \mathbf{v}\right) = c\mathbf{u} + c\mathbf{v},\hspace{.2cm} c\in \mathbb{R}$$\;\;\text{4. }\;\; \left(cd\right)\mathbf{u} = c\left(d\mathbf{u}\right),\hspace{.2cm} c,d \in \mathbb{R}$$\;\;\text{5. }\;\; \mathbf{u} + (\mathbf{v} + \mathbf{w}) = (\mathbf{u} + \mathbf{v}) + \mathbf{w}$$\;\;\text{6. }\;\; \mathbf{u} + (-\mathbf{u}) = \mathbf{0}$$\;\;\text{7. }\;\; (c + d) \mathbf{u} = c \mathbf{u} + d \mathbf{u}$$\;\;\text{8. }\;\; 1\mathbf{u} = \mathbf{u}$ > 📝 **Pen and paper exercise 5 (this one is for the skeptical minds)**: Choose 2 of the properties above and check their veracity.>> You can use, for example, the vectors $\mathbf{u} = \begin{bmatrix}1\\ 2\end{bmatrix},\;\;$> $\mathbf{v} = \begin{bmatrix}2\\ 4\end{bmatrix},\;\;$> $\mathbf{w} = \begin{bmatrix}0\\ -1\end{bmatrix}\;\;$> and the scalars (real numbers) $c=-0.2$ and $d=\frac{1}{4}$. If you "find out" any of the rules is wrong, I'm sorry but you probably made some error on the arithmetics along the way.***Trust me, linear algebra will never fail you. Never. Ever.*** --- --- 1.4 Linear combinations and linear independence 1.4.1 Linear combination Every time we scale vectors and add them together, we're performing a **linear combination**. This is what it looks like for 2 $m$-dimensional vectors:$$c_1 \cdot \begin{bmatrix} u_1\\ u_2\\ ... \\ u_m\\\end{bmatrix}+c_2\cdot\begin{bmatrix}v_1\\v_2\\\dots\\v_m\\\end{bmatrix}=\begin{bmatrix}c_1\cdot u_1 + c_2\cdot v_1\\ c_1\cdot u_2 + c_2\cdot v_2\\ ... \\ c_1\cdot u_m + c_2\cdot v_m\\\end{bmatrix},\hspace{.2cm} c_i\in \mathbb{R}$$In the *pen and paper* exercise **4**, you basically performed a linear combination between vectors using scalars $1$ and $-1$.---We can have a linear combination of $n$ vectors, as follows:$$c_1\cdot \mathbf{x_1} + c_2\cdot \mathbf{x_2} + ... + c_n\cdot \mathbf{x_n},\hspace{.2cm} c_i\in \mathbb{R},\hspace{.2cm} \mathbf{x_i}\in\mathbb{R}^m$$Note that $\mathbf{x_1}, \mathbf{x_2},..., \mathbf{x_n}$ are **not vector components but actual vectors** (bold lowercase letter). --- ExampleFor $\mathbf{v_1} = \begin{bmatrix}1\\-1\end{bmatrix}$, $\mathbf{v_2} = \begin{bmatrix}2\\2\end{bmatrix}$ and $\mathbf{w} = \begin{bmatrix}4\\0\end{bmatrix}$, we have $w = 2\cdot \mathbf{v_1} + 1\cdot\mathbf{v_2}$. Thus $\mathbf{w}$ can be written as a linear combination of $\mathbf{v_1}$ and $\mathbf{v_2}$. --- 1.4.2 Linear independence **Definition**If we have two vectors $\mathbf{a}$ and $\mathbf{b}$, and $\mathbf{b}$ can be expressed as $c\cdot \mathbf{a}, c\in \mathbb{R}$, we say that $\mathbf{a}$ and $\mathbf{b}$ are **linearly dependent**, or collinear. If either one cannot be expressed as a linear combination of the other, then we say they are **linearly independent**, or non-collinear. **Explanation**For example, $[2, 1]$ and $[4, 2]$ are linearly dependent. You can find a constant that multiplied by one will give you the other. For example, $[2, 1]\times 2 = [4, 2]$ or $[4, 2]\times \frac{1}{2} = [2, 1]$The vectors $[2, 1]$ and $[0, 1]$, on the other hand, are linearly independent. As much as you may try, you can never find a constant that multiplied by one of them will yield the other. --- Generally, a set of vectors is said to be [linearly dependent](https://en.wikipedia.org/wiki/Linear_independence) **if at least one of the vectors in the set** can be defined as a **linear combination** of the others.> 📌 **Tip**: The concept of linear dependence is extremely important in data science! --- 1.5 Representing all vectors in space In the image above we can see the resulting vectors of 4 distinct linear combinations of vectors $\mathbf{u}=[1,2]$ and $\mathbf{v}=[3,1]$, namely:- $1\cdot \mathbf{u} + 1\cdot \mathbf{v}$- $2\cdot \mathbf{u} + 1\cdot \mathbf{v}$- $-1\cdot \mathbf{u} + 1\cdot \mathbf{v}$- $1\cdot \mathbf{u} + (-1)\cdot \mathbf{v}$❗ Actually, if you had the time (*infinite time*) to plot all possible linear combinations of vectors $\mathbf{u}$ and $\mathbf{v}$, you would fill the entire xy-plane, and get **all 2-dimensional vectors, this is, all vectors in $\mathbb{R}^2$**.**But only because** you used **2** vectors in **2-D** that are **linearly independent**. --- Skeptical minds corner So, we could **NOT** create the set of all the 2-dimensional vectors if our vectors were **linearly Dependent**.Feeling skeptic about this?If this doesn't yet make sense to you, write down some linear combinations, i.e. ($c\mathbf{u} + d\mathbf{v}$), for the collinear vectors $\mathbf{u}=[1,2]$ and $\mathbf{v} = [2,4]$, varying scalars $c$ and $d$ as much as you want.Now try not to get [stuck on the line](https://www.theguardian.com/politics/video/2012/aug/01/boris-johnson-stuck-zip-wire-video)... --- **We can define an $n$-dimensional space with linear combinations of $n$ linearly independent vectors**- in a **2D** space, we need **2 and only 2 linearly independent vectors** to define all other 2-dimensional vectors as linear combinations of these 2 vectors;- in a **3D** space, we need **3 and only 3 linearly independent vectors** to define all other 3-dimensional vectors as linear combinations of these 3 vectors;- and so on and so forth.This also means that, for example, if you define 3 vectors in 2-dimensional space, any one of them will be a linear combination of the other two.You see, linear algebra is a minimalist: if you can use only 2 vectors to represent a 2D space, why waste a 3rd one? 1.6 Dot product Definition We already know how to multiply vectors by scalars and add vectors together. But can we multiply one vector by another? Yes we can! Actually, we can do it in [several ways](https://en.wikipedia.org/wiki/Multiplication_of_vectors). However, let's just focus on the most simple one: the **dot product**, a.k.a. the **scalar product** -- because the result is a scalar. The dot product of two $m$-dimensional vectors $\mathbf{u}=[u_1, u_2, ..., u_m]$ and $\mathbf{v}=[v_1, v_2, ..., v_m]$ is a scalar given by: $$\mathbf{u}\cdot \mathbf{v} = u_1 v_1 + u_2 v_2 + ... + u_m v_m$$ Consider the vectors $\mathbf{a} = [1, 2, 0]$ and $\mathbf{b} = [-1, 4, -2]$. The dot product between $\mathbf{a}$ and $\mathbf{b}$ is:$$\mathbf{a}\cdot \mathbf{b} = 1\times (-1) + 2\times 4 + 0\times (-2) = -1 + 8 + 0 = 7$$ --- **Another way to determine the dot product**You might also find the dot product of two vectors written as $\mathbf{u} \cdot \mathbf{v} = \|u\|\|v\|\cos{\theta} = \|v\|\|u\|\cos{\theta}$.This means that $\mathbf{u} \cdot \mathbf{v}$ is the magnitude of $\mathbf{v}$ times the magnitude of the component of $\mathbf{u}$ that points along $\mathbf{v}$, namely $\|u\|\cos{\theta}$ (projection of $\mathbf{u}$ onto $\mathbf{v}$):We can therefore determine the angle between any two *non zero* vectors by using the relation: $\;\;\;\;\mathbf{u} \cdot \mathbf{v} = \|u\|\|v\|\cos{\theta} \iff \cos{\theta} = \frac{\mathbf{u} \cdot \mathbf{v}} {\|u\|\|v\|}$ --- > 📝 **Pen and paper exercise 6 (for the Math nerds)**: Find the angle $\theta$ (in degrees or radians) between the vectors $\mathbf{u} = \begin{bmatrix}1\\ 0\end{bmatrix}$ and $\mathbf{v} = \begin{bmatrix}2\\ 2\sqrt{3}\end{bmatrix}$ using the formula $\mathbf{u} \cdot \mathbf{v} = \|u\|\|v\|\cos{\theta}$.> - Calculate $\mathbf{u}\cdot\mathbf{v}$ using the [dot product formula](1.6-Dot-product);> - Find $\|u\|\|v\|$ using the formula for the [norm of a vector](1.2-Vector-norm).>> You get **3 ⭐️s** if you solve this using a calculator and **5 ⭐️s** if you can solve it without a calculator (you may use the [unit circle](https://en.wikipedia.org/wiki/Unit_circle) below). Once more, all of this applies to any two vectors in any $m$-dimensional real space. --- 1.6.2 Properties of the dot product You don't need to memorize them all, just know they exist.$\;\;\;\;\;\text{1. }\;\; \mathbf{u} \cdot \mathbf{u} = \|\mathbf{u}\|^2$$\;\;\;\;\;\text{2. }\;\; \mathbf{0} \cdot \mathbf{u} = \mathbf{0}$$\;\;\;\;\;\text{3. }\;\; \mathbf{u} \cdot \mathbf{v} = \mathbf{v} \cdot \mathbf{u}$$\;\;\;\;\;\text{4. }\;\; (c \mathbf{u}) \cdot \mathbf{v} = c (\mathbf{u} \cdot \mathbf{v})$$\;\;\;\;\;\text{5. }\;\; \mathbf{u} \cdot \mathbf{v} = \|\mathbf{u}\|\|\mathbf{v}\|\cos{\theta}$$\;\;\;\;\;\text{6. }\;\; \mathbf{u} \cdot (\mathbf{v} + \mathbf{w}) = \mathbf{u} \cdot \mathbf{v} + \mathbf{u} \cdot \mathbf{w}$Remember that $c$ is a scalar (non-bold lowercase letter) and $\mathbf{u}$, $\mathbf{v}$ and $\mathbf{w}$ represent vectors (bold lowercase letters). Also, we have here a special vector, the **zero vector**, where all elements are equal to zero, which we denote by $\mathbf{0}$. If you're still unsure about dot product properties, check this [video](https://www.youtube.com/watch?v=rVQ3G9epCjw). 1.7 Orthogonal vectors Two vectors $\mathbf{u}$ and $\mathbf{v}$ are said to be orthogonal if their dot product is equal to zero: $\;\;\mathbf{u}\cdot \mathbf{v} = \mathbf{0}$ If we think about the formula $\mathbf{u} \cdot \mathbf{v} = \|v\|\|u\|\cos{\theta}$, we see that $\|u\|\cos{\theta}$ (projection of vector $\mathbf{u}$ onto $\mathbf{v}$) fits in a point at the tail of $\mathbf{v}$, having magnitude zero: > 📝 **Pen and paper exercise 7**: Determine the dot product between vectors $[1,0]$ and $[0,-2]$. Are they orthogonal? --- 1.8 Vectors recap 1. Vectors can be represented as **ordered lists of scalars**. They have both **magnitude and direction**;2. The **transpose** of the row vector is a column vector, and vice-versa;3. The **norm**/magnitude/length of an $m$-dimensional vector $\mathbf{x}$ is given by $\| \mathbf{x}\| = \sqrt{x_1^2 + x_2^2 + ... + x_m^2}$;4. We can represent the set of all $m$-dimensional vectors using linear combinations of $m$ linearly independent vectors (also $m$-dimensional);5. Several properties of addition and multiplication by scalars are generalizable for vectors, such as commutativity, associativity and distributivity;6. The **dot product** between two vectors, $\mathbf{u} \cdot \mathbf{v}$, can be defined in 2 equivalent manners: (i) $\mathbf{u} \cdot \mathbf{v} = \|u\|\|v\|\cos{\theta}$, where $\theta$ refers to the angle between $\mathbf{u}$ and $\mathbf{v}$, or (ii) $\mathbf{u} \cdot \mathbf{v} = u_1 v_1 + u_2 v_2 + ... + u_m v_m.$ --- Break time!Look through your window and watch the world outside. You were blind and now you see: everywhere there are vectors, everywhere there is linear algebra...See you soon! --- 2. Introduction to NumPy arrays No old school pen and paper on this section!! Time to put Python and your machine to work. 💻🐍 2.1 The NumPy package You might have noticed we have imported a package at the beginning of the notebook, which goes by the name of ``numpy``:```python it is a widespread convention to import numpy using the alias np this convention makes your code more readable, so do use itimport numpy as np````numpy` is commonly imported with the alias `np`. This means that every time we instantiate a new object from NumPy, call a NumPy function or use a module from it, we'll use `np` instead of `numpy`. I know it's fun to be different, and make up your own aliases, but it's better to be readable than to be laughable.[NumPy](https://numpy.org/) is the fundamental package for scientific computing with Python. Among many other amazing possibilities, it allows us to work efficiently with vectors and matrices, performing lots of linear algebra operations.In the `requirements.txt` file, you can see that we are using version 1.20 of NumPy. A detailed reference documentation of the functions and classes contained in this package is available in the [NumPy reference](https://numpy.org/doc/1.20/reference/index.html) webpage. You can also download the [pdf version](https://numpy.org/doc/1.20/numpy-ref.pdf) here. 2.1.1 Why NumPy? You might remember lists from SLU04. You already know that you can represent a vector by an ordered list, as follows: ###Code u = [2, -1, 0, 2, 0.4, 3, 6, 0, 1] # 9-dimensional vector print("Length of the list representing vector u (not the same as the length of the vector!):", len(u)) ###Output Length of the list representing vector u (not the same as the length of the vector!): 9 ###Markdown Using Python lists and for loops, we could implement some basic operations. At first, you might think this is a reasonable approach for small tasks such as linear combinations of vectors (recall [section 1.4](1.4-Linear-combinations-and-linear-independence)). But is it? Let's run a simulation to compare using Python lists with using NumPy (do not worry about the code behind this). We'll see how long it would take to compute a simple linear combination of the form $2\mathbf{u} + 2\mathbf{v}$ between two vectors of length $10^6$, using NumPy *versus* Python lists. **Creating a linear combination using NumPy** ###Code # perform linear combination using NumPy magic numpy_duration = utils.lincomb_numpy() print("Using NumPy arrays we took {:.2} seconds to perform a linear combination.".format(numpy_duration)) ###Output Using NumPy arrays we took 0.043 seconds to perform a linear combination. ###Markdown **Creating a linear combination using Python lists and `for` loops** ###Code # perform linear combination using Python lists and for loops python_duration = utils.lincomb_lists() print("Using Python lists we took {:.2} seconds to perform a linear combination.".format(python_duration)) ###Output Using Python lists we took 0.24 seconds to perform a linear combination. ###Markdown How much faster was NumPy?... ###Code print("Python lists approach was {} times SLOWER than NumPy!!".format(int(python_duration/numpy_duration))) ###Output Python lists approach was 5 times SLOWER than NumPy!! ###Markdown [Ain't nobody got time for that!!](https://www.youtube.com/watch?v=bFEoMO0pc7k&feature=youtu.be&t=10)Other than being much faster, NumPy is also awesome because: - It uses less memory to store the same amount of data - It interfaces with libraries you'll often use, such as Pandas, Scikit-learn, Matplotlib and many others; - It supports a great variety of numerical types; - It has a comprehensive list of [functions, modules and objects](https://numpy.org/doc/1.20/reference/index.html) useful for linear algebra. By the way, now is the time to start getting comfortable reading documentation. You can either: - refer to documentation pages (for example, you could consult the [documentation webpage for version 1.20.0 of NumPy](https://numpy.org/doc/1.20/) to check its functionalities); - access the docstring using `?` inside a jupyter cell. ###Code # write the name of the function followed by a quotation mark # The docstring with basic information on the function # should appear at the lower part of your browser when you run this cell print? ###Output _____no_output_____ ###Markdown You can close the pager with the docstring when you're done. 2.2 The `ndarray` The main object in NumPy is the [NumPy array](https://numpy.org/doc/1.20/reference/arrays.ndarray.html), or simply `ndarray`. An ndarray is a collection of items, all of the same size and type. You can think of arrays as tables (2 dimensions), but you can actually have arrays of 3, 4 or 5 dimensions, and so on. Here we'll deal mainly with ndarrays of 1 or 2 dimensions, where the items stored are numbers. 2.2.1 Create an `ndarray` Let's create an array to represent a vector, using [np.array()](https://numpy.org/doc/1.20/reference/generated/numpy.array.html): ###Code u = [0, 1, 2] # this is a Python list representing a vector a = np.array(u) # create a NumPy array object using the Python list above a ###Output _____no_output_____ ###Markdown You can also create a table of numbers (a matrix) from a list of lists, using `np.array()`: ###Code # notice we input 1 list with 3 lists in it b = np.array([[0, 1, 2], # 1st row [3, 4, 5], # 2nd row [6, 7, 8]]) # 3rd row print(b) # you can use Python's built-in function print() to print an array ###Output [[0 1 2] [3 4 5] [6 7 8]] ###Markdown 2.2.2 Dimensions and shape of an array If you are to manipulate arrays correctly, you definitely need to know what **shape** and **dimension** mean in terms of NumPy arrays. **a) Number of array dimensions** Let's start by checking the array dimensions of `a`, which represents a 3-dimensional vector, using the attribute **`ndim`**: ###Code a = np.array([0, 1, 2]) # a 3-dimensional row vector a.ndim # number of array dimensions ###Output _____no_output_____ ###Markdown Wait, what?! Our array has 1 dimension?... But our vector is 3-dimensional!... What's happening? Similarly to when you nest a list inside another list, which is inside another list, and so on... you can also create as many *axes* (**array dimensions**) in arrays as you wish. You could access the first axis (*axis 0*) with the first pair of brackets `[]`, the second axis (*axis 1*) with the second pair of brackets `[]`, and so on. So we can store any number of elements in a 1-dimensional ndarray:- vector: `[1, 2, 3, 4]` has 4 dimensions, but we can store it in a 1D array, `np.array([1, 2, 3, 4])` - vector: `[1, 2, 3, 4, 0, 0, 1]` has 7 dimensions, but we can store it in a 1D array, `np.array([1, 2, 3, 4, 0, 0, 1])` ... A 2D array (2 dimensions) has 2 axes. You can think of it as a table of numbers (matrix): ###Code # you can think of b as a table (matrix) represented by a numpy array b = np.array( [[0, 1, 2], [3, 4, 5], [6, 7, 8]] ) b.ndim # b is a 2D array ###Output _____no_output_____ ###Markdown We can access the first row in the table like this: ###Code b[0] # access 1st row in b ###Output _____no_output_____ ###Markdown We can access the second element of the first row in the table as follows: ###Code b[0][1] # access 2nd element of the 1st row in b ###Output _____no_output_____ ###Markdown We can't access a third dimension because the array is 2D. Thus, the following command will throw an `IndexError` (which we'll catch, because we're awesome): ###Code # trying to access an element in the 3rd axis of a 2D array does not compute try: b[0][2][0] except IndexError as e: print("IndexError:", e) ###Output IndexError: invalid index to scalar variable. ###Markdown **Represent a row vector vs. its transpose in an ndarray** We can represent a column vector with a 2D array (2 axes): ###Code a = np.array([[0], [1], [2]]) # a 2D numpy array, a 3-dimensional COLUMN vector a.ndim # number of array dimensions ###Output _____no_output_____ ###Markdown Notice the difference between a 1D array: ###Code np.array([0, 1, 2]) ###Output _____no_output_____ ###Markdown and a 2D array with the same elements: ###Code np.array([[0, 1, 2]]) ###Output _____no_output_____ ###Markdown In the 2D array, we have one extra outside square bracket (just like in a nested list). **b) Shape of an array** The other attribute you should understand is the **shape** of the array. An array's shape is a *tuple of integers* which indicates the size of the array in each dimension (axis). Hence, for a table (matrix) with $m$ rows and $n$ columns, the shape will be $(m, n)$.The length of the shape tuple corresponds to the number of axes, given by `.ndim`, as we just saw. ###Code a = np.array([[0], [1], [2]]) # same vector as in the last code cell a.shape # shape of the array (number of elements in axis 0, number of elements in axis 1) ###Output _____no_output_____ ###Markdown Above, we see that axis 0 has size 3 (3 rows in the column vector) and axis 1 has size 1 (1 column).Luckily for us, we wont need more than 2 dimensions to represent vectors and matrices. --- Notice that the underlying class which creates the NumPy array is the class `numpy.ndarray`. However, it is advisable to construct arrays using its built-in functions, such as `array`, `zeros` or `ones`.For simplicity, we'll refer to NumPy arrays and arrays interchangeably throughout this notebook (note that these are **not** the Python [`array.array`](https://www.tutorialspoint.com/python/python_arrays.htm) objects).```Pythonif (student.question == "What is a class?") or (student.question == "What is an object?"): print("How dare you?! You go review SLU09!")```You can check a quick explanation of `ndarrays` [here](https://www.tutorialspoint.com/numpy/numpy_ndarray_object.htm). --- 2.3 Vectors and linear algebra using NumPy Time to have some fun! Let's put all the knowledge we gathered about vectors to use. 2.3.1 Representing vectors with ndarrays: transpose, `reshape()` Remember learning that the transpose of a row vector is a column vector and vice-versa? NumPy has the transpose implemented as an attribute of arrays.If we start with a 4-dimensional column vector, represented by an array of shape `(4, 1)`, we'll have `2` axes (`.ndim` = 2): ###Code a = np.array([[0], [1], [2], [3]]) # a 4-dimensional column vector print("a:\n", a, "\n") print("a.shape:", a.shape) print("a.ndim:", a.ndim) ###Output a: [[0] [1] [2] [3]] a.shape: (4, 1) a.ndim: 2 ###Markdown Getting the attribute `.T` (for transpose) of the array will return a row vector represented by a 2D array, just as we expected: ###Code a_T = a.T # the transpose of a print("a_T:", a_T, "\n") print("a_T.shape:", a_T.shape) print("a_T.ndim:", a.ndim) ###Output a_T: [[0 1 2 3]] a_T.shape: (1, 4) a_T.ndim: 2 ###Markdown **What would happen if you used a 1D array?** If we try to transpose an array with only 1 dimension (thus the tuple shape has only 1 element), we get exactly the same shape!! ###Code print("shape of 1D array: ", np.array([0,1,2,3]).shape) print("shape of the transpose of 1D array: ", np.array([0,1,2,3]).T.shape) ###Output shape of 1D array: (4,) shape of the transpose of 1D array: (4,) ###Markdown For a 1D array, we cannot get the transpose of our vector using `.T`! --- `reshape()` The `reshape` method allows you to reshape an array of data to any given shape. For example, for the 1-dimensional array below, `array_1d`, with 6 elements (shape = `(6,)`)... ###Code array_1d = np.array([0, 1, 2, 3, 4, 5]) # 1-dimensional array, with 6 elements, shape is (6, ) print(array_1d) print("shape: ", array_1d.shape) ###Output [0 1 2 3 4 5] shape: (6,) ###Markdown ...we can reshape it to a 2-dimensional array with the **same** 6 elements, displayed in a 2-dimensional array of shape `(3, 2)`: ###Code array_reshaped = array_1d.reshape((3, 2)) # reshape to a table! print(array_reshaped) print("shape: ", array_reshaped.shape) ###Output [[0 1] [2 3] [4 5]] shape: (3, 2) ###Markdown We can't however reshape to a shape which is not compatible with the number of elements we have (say, for example, `(4, 2)`): ###Code # we can't reshape to a table with 8 entries 'cause we only have 6 elements in the array! # we'll just catch that ValueError, that NumPy will throw at us, flawlessly try: array_1d.reshape((4, 2)) except ValueError as e: print("ValueError:", e) ###Output ValueError: cannot reshape array of size 6 into shape (4,2) ###Markdown --- **Creating row and column vectors with `reshape()`** To **get the transpose of a vector represented by a 1D array in NumPy**, we would need to **first reshape** the array to 2D, using `.reshape()`, with argument `(1, -1)`, to get a **row vector**, or `(-1, 1)` to get a **column vector**. Consider a 1-dimensional array representing a 4-dimensional vector: ###Code a = np.array([0,1,2,3]) # vector represented by a 1D array a.shape # shape of array a ###Output _____no_output_____ ###Markdown We can convert this 1D array to a 2D array row vector using `reshape()` with argument `(1, -1)`: ###Code a_row = a.reshape((1, -1)) # use reshape to get a 2D array representation of a row vector print("a_row:\n", a_row, "\n") print("a_row.shape:", a_row.shape) ###Output a_row: [[0 1 2 3]] a_row.shape: (1, 4) ###Markdown We could also convert it directly to a column vector using `reshape()` with argument `(-1, 1)`: ###Code a_column = a.reshape((-1, 1)) # use reshape to get a 2D array representation of a column vector print("a_column:\n", a_column, "\n") print("a_column.shape:", a_column.shape) ###Output a_column: [[0] [1] [2] [3]] a_column.shape: (4, 1) ###Markdown Because we now have our vector in a 2D array, we could use the transpose attribute safely: ###Code # same result print(a_column.T) ###Output [[0 1 2 3]] ###Markdown > 📌 **Tip**: Errors due to incompatible NumPy shapes and dimensions are a very common issue when using libraries which build on the `ndarray` object, such as some data science libraries you'll learn about. But since you're now aware of the difference between dimension and shape in ndarrays, you'll be much quicker to stop and debug such issues later on! --- NumPy also has a module called [`numpy.linalg`](https://numpy.org/doc/1.20/reference/routines.linalg.html), which is entirely dedicated to linear algebra operations. How amazing is that? 2.3.2 Vector norm using [`numpy.linalg.norm()`](https://numpy.org/doc/1.20/reference/generated/numpy.linalg.norm.html) [Just like most humans](https://vimeo.com/379750591), vectors always conform to the norm (recall section [1.2 Vector norm](1.2-Vector-norm)):$$\|\mathbf{u}\| = \sqrt{u_1^2 + u_2^2 + ... + u_m^2} = \sqrt{\sum_{i}^{m} u_i^2}$$ ###Code np.linalg.norm? ###Output _____no_output_____ ###Markdown Ok, from the docstring we see that `np.linalg.norm` expects an array. Let's determine the norm of the vector $\begin{bmatrix}-1\\ 2\end{bmatrix}$, as we did in section [1.2 Vector norm](1.2-Vector-norm), using the Pythagorean theorem: ###Code a = np.array([[-1], [2]]) np.linalg.norm(a) ###Output _____no_output_____ ###Markdown Which is actually the square root of $5$: ###Code # np.sqrt() computes the square root value of its input np.sqrt(5) ###Output _____no_output_____ ###Markdown 2.3.3 Vector operations (multiplication by scalars and addition) Remember linear combinations being about multiplying vectors by scalars and adding them together?$$c\; \mathbf{u} + d\; \mathbf{v}$$ ###Code # let's create two ndarrays representing 4-dimensional vectors u = np.array([1, 0, 1, 1]) v = np.array([1, -2, 0, 1]) # print vectors to check your lucky numbers print(f"u = {u}") print(f"v = {v}\n") ###Output u = [1 0 1 1] v = [ 1 -2 0 1] ###Markdown We can use the [numeric operators](https://docs.python.org/3/library/stdtypes.htmlnumeric-types-int-float-complex) we already know with NumPy. ###Code # multiplication by a scalar -(1/2) * u # addition u + v # linear combination (-(1/2) * u) + (1 * v) ###Output _____no_output_____ ###Markdown 2.3.4 Dot product Finally, the dot product:$$\mathbf{u} \cdot \mathbf{v} = u_1 v_1 + ... + u_m v_m$$ Let's use [`numpy.dot`](https://numpy.org/doc/1.20/reference/generated/numpy.dot.html?highlight=dot%20product) to determine the dot product of two vectors! ###Code # create two vectors using numpy arrays u = np.array([-1, 2, 2]) # row vector, 1D array v = np.array([-2, 1, 1]) # row vector, 1D array # determine the dot product between vectors u and v np.dot(u, v) # create two vectors using numpy arrays u = np.array([-1, 2, 2]).reshape((1, 3)) # row vector, 2D array v = np.array([-2, 1, 1]) # row vector, 1D array # determine the dot product between vectors u and v np.dot(u, v) # let's check those results "by hand", because we love linear algebra! ((-1) * (-2)) + (2 * 1) + (2 * 1) ###Output _____no_output_____
examples/ch15/snippets_ipynb/16_02-03.ipynb
###Markdown 16.2 Case Study: Classification with k-Nearest Neighbors and the Digits Dataset, Part 1**This file contains Sections 16.2 and 16.3 and all of their subsections and Self Check exercises** Classification Problems Our Approach ![Self Check Exercises check mark image](files/art/check.png) 16.2 Self Check**1. _(Fill-In)_** `________` classification divides samples into two distinct classes, and `________`-classification divides samples into many distinct classes.**Answer:** binary, multi. 16.2.1 k-Nearest Neighbors Algorithm Hyperparameters and Hyperparameter Tuning ![Self Check Exercises check mark image](files/art/check.png) 16.2.1 Self Check**1. _(True/False)_** In machine learning, a model implements a machine-learning algorithm. In scikit-learn, models are called estimators.**Answer:** True.**2. _(Fill-In)_** The process of choosing the best value of *k* for the k-nearest neighbors algorithm is called `________`**Answer:** hyperparameter tuning. 16.2.2 Loading the Dataset**We added `%matplotlib inline` to enable Matplotlib in this notebook.** ###Code %matplotlib inline from sklearn.datasets import load_digits digits = load_digits() ###Output _____no_output_____ ###Markdown Displaying the Description ###Code print(digits.DESCR) ###Output _____no_output_____ ###Markdown Checking the Sample and Target Sizes ###Code digits.target[::100] digits.data.shape digits.target.shape ###Output _____no_output_____ ###Markdown A Sample Digit Image ###Code digits.images[13] ###Output _____no_output_____ ###Markdown Preparing the Data for Use with Scikit-Learn ###Code digits.data[13] ###Output _____no_output_____ ###Markdown ![Self Check Exercises check mark image](files/art/check.png) 16.2.2 Self Check**1. _(Fill-In)_** A `Bunch` object’s `________` and `________` attributes are NumPy arrays containing the dataset’s samples and labels, respectively.**Answer:** `data`, `target`.**2. _(True/False)_** A scikit-learn `Bunch` object contains only a dataset’s data.**Answer:** False. A scikit-learn `Bunch` object contains a dataset’s data and information about the dataset (called metadata), available through the `DESCR` attribute.**3. _(IPython Session)_** For sample number `22` in the Digits dataset, display the 8-by-8 image data and numeric value of the digit the image represents.**Answer:** ###Code digits.images[22] digits.target[22] ###Output _____no_output_____ ###Markdown 16.2.3 Visualizing the Data Creating the Diagram ###Code import matplotlib.pyplot as plt figure, axes = plt.subplots(nrows=4, ncols=6, figsize=(6, 4)) ### Displaying Each Image and Removing the Axes Labels for item in zip(axes.ravel(), digits.images, digits.target): axes, image, target = item axes.imshow(image, cmap=plt.cm.gray_r) axes.set_xticks([]) # remove x-axis tick marks axes.set_yticks([]) # remove y-axis tick marks axes.set_title(target) plt.tight_layout() # This placeholder cell was added because we had to combine # the sections snippets 12-13 for the visualization to work in Jupyter # and want the subsequent snippet numbers to match the book ###Output _____no_output_____ ###Markdown ![Self Check Exercises check mark image](files/art/check.png) 16.2.3 Self Check**1. _(Fill-In)_** The process of familiarizing yourself with your data is called `________`.**Answer:** data exploration.**2. _(IPython Session)_** Display the image for sample number `22` of the Digits dataset. **Answer:** ###Code axes = plt.subplot() image = plt.imshow(digits.images[22], cmap=plt.cm.gray_r) xticks = axes.set_xticks([]) yticks = axes.set_yticks([]) # placeholder due to merge of prior cells # placeholder due to merge of prior cells # placeholder due to merge of prior cells ###Output _____no_output_____ ###Markdown 16.2.4 Splitting the Data for Training and Testing ###Code from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( digits.data, digits.target, random_state=11) ###Output _____no_output_____ ###Markdown Training and Testing Set Sizes ###Code X_train.shape X_test.shape ###Output _____no_output_____ ###Markdown ![Self Check Exercises check mark image](files/art/check.png) 16.2.4 Self Check**1. _(True/False)_** You should typically use all of a dataset’s data to train a model.**Answer:** False. It’s important to set aside a portion of your data for testing, so you can evaluate a model’s performance using data that the model has not yet seen. **2. _(Discussion)_** For the Digits dataset, what numbers of samples would the following statement reserve for training and testing purposes? ```pythonX_train, X_test, y_train, y_test = train_test_split( digits.data, digits.target, test_size=0.40)```**Answer:** 1078 and 719. 16.2.5 Creating the Model ###Code from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier() ###Output _____no_output_____ ###Markdown 16.2.6 Training the Model ###Code knn.fit(X=X_train, y=y_train) ###Output _____no_output_____ ###Markdown ![Self Check Exercises check mark image](files/art/check.png) 16.2.6 Self Check**1. _(Fill-In)_** The `KNeighborsClassifier` is said to be `________` because its work is performed only when you use it to make predictions.**Answer:** lazy.**2. _(True/False)_** Each scikit-learn estimator’s `fit` method simply loads a dataset.**Answer:** False. For most, scikit-learn estimators, the `fit` method loads the data into the estimator then uses that data to perform complex calculations behind the scenes that learn from the data and train the model. 16.2.7 Predicting Digit Classes ###Code predicted = knn.predict(X=X_test) expected = y_test predicted[:20] expected[:20] wrong = [(p, e) for (p, e) in zip(predicted, expected) if p != e] wrong ###Output _____no_output_____ ###Markdown ![Self Check Exercises check mark image](files/art/check.png) 16.2.7 Self Check**1. _(IPython Session)_** Using the `predicted` and `expected` arrays, calculate and display the prediction accuracy percentage.**Answer:** ###Code print(f'{(len(expected) - len(wrong)) / len(expected):.2%}') ###Output _____no_output_____ ###Markdown **2. _(IPython Session)_** Rewrite the list comprehension in snippet `[29]` using a for loop. Which coding style do you prefer?**Answer:** ###Code wrong = [] for p, e in zip(predicted, expected): if p != e: wrong.append((p, e)) wrong ###Output _____no_output_____ ###Markdown 16.3 Case Study: Classification with k-Nearest Neighbors and the Digits Dataset, Part 2 16.3.1 Metrics for Model Accuracy Estimator Method `score` ###Code print(f'{knn.score(X_test, y_test):.2%}') ###Output _____no_output_____ ###Markdown Confusion Matrix ###Code from sklearn.metrics import confusion_matrix confusion = confusion_matrix(y_true=expected, y_pred=predicted) confusion ###Output _____no_output_____ ###Markdown Classification Report ###Code from sklearn.metrics import classification_report names = [str(digit) for digit in digits.target_names] print(classification_report(expected, predicted, target_names=names)) ###Output _____no_output_____ ###Markdown Visualizing the Confusion Matrix ###Code import pandas as pd confusion_df = pd.DataFrame(confusion, index=range(10), columns=range(10)) import seaborn as sns axes = sns.heatmap(confusion_df, annot=True, cmap='nipy_spectral_r') ###Output _____no_output_____ ###Markdown ![Self Check Exercises check mark image](files/art/check.png) 16.3.1 Self Check**1. _(Fill-In)_** A Seaborn `________` displays values as colors, often with values of higher magnitude displayed as more intense colors.**Answer:** heat map.**2. _(True/False)_** In a classification report, the precision specifies the total number of correct predictions for a class divided by the total number of samples for that class. **Answer:** True.**3. _(Discussion)_** Explain row 3 of the confusion matrix presented in this section:```[ 0, 0, 0, 42, 0, 1, 0, 1, 0, 0]```**Answer:** The number `42` in column index 3 indicates that 42 `3`s were correctly predicted as 3s. The number `1` at column indices 5 and 7 indicates that one `3` was incorrectly classified as a `5` and one was incorrectly classified as a `7`. 16.3.2 K-Fold Cross-Validation KFold Class ###Code from sklearn.model_selection import KFold kfold = KFold(n_splits=10, random_state=11, shuffle=True) ###Output _____no_output_____ ###Markdown Using the `KFold` Object with Function `cross_val_score` ###Code from sklearn.model_selection import cross_val_score scores = cross_val_score(estimator=knn, X=digits.data, y=digits.target, cv=kfold) scores print(f'Mean accuracy: {scores.mean():.2%}') print(f'Accuracy standard deviation: {scores.std():.2%}') ###Output _____no_output_____ ###Markdown ![Self Check Exercises check mark image](files/art/check.png) 16.3.2 Self Check**1. _(True/False)_** Randomizing the data by shuffling it before splitting it into folds is particularly important if the samples might be ordered or grouped. **Answer:** True.**2. _(True/False)_** When you call `cross_val_score` to peform k-fold cross-validation, the function returns the best score produced while testing the model with each fold.**Answer:** False. The function returns an array containing the scores for each fold. The mean of those scores is the estimator’s overall score. 16.3.3 Running Multiple Models to Find the Best One ###Code from sklearn.svm import SVC from sklearn.naive_bayes import GaussianNB estimators = { 'KNeighborsClassifier': knn, 'SVC': SVC(gamma='scale'), 'GaussianNB': GaussianNB()} for estimator_name, estimator_object in estimators.items(): kfold = KFold(n_splits=10, random_state=11, shuffle=True) scores = cross_val_score(estimator=estimator_object, X=digits.data, y=digits.target, cv=kfold) print(f'{estimator_name:>20}: ' + f'mean accuracy={scores.mean():.2%}; ' + f'standard deviation={scores.std():.2%}') ###Output _____no_output_____ ###Markdown Scikit-Learn Estimator Diagram ![Self Check Exercises check mark image](files/art/check.png) 16.3.3 Self Check**1. _(True/False)_** You should choose the best estimator before performing your machine learning study.**Answer:** False. It’s difficult to know in advance which machine learning model(s) will perform best for a given dataset, especially when they hide the details of how they operate from their users. For this reason, you should run multiple models to determine which is the best for your study. **2. _(Discussion)_** How would you modify the code in this section to so that it would also test a `LinearSVC` estimator?**Answer:** You’d import the `LinearSVC` class, add a key–value pair to the `estimators` dictionary (`'LinearSVC': LinearSVC()`), then execute the `for` loop, which tests every estimator in the dictionary. 16.3.4 Hyperparameter Tuning ###Code for k in range(1, 20, 2): kfold = KFold(n_splits=10, random_state=11, shuffle=True) knn = KNeighborsClassifier(n_neighbors=k) scores = cross_val_score(estimator=knn, X=digits.data, y=digits.target, cv=kfold) print(f'k={k:<2}; mean accuracy={scores.mean():.2%}; ' + f'standard deviation={scores.std():.2%}') ###Output _____no_output_____ ###Markdown ![Self Check Exercises check mark image](files/art/check.png) 16.3.4 Self Check**1. _(True/False)_** When you create an estimator object, the default hyperparameter values that scikit-learn uses are generally the best ones for every machine learning study. **Answer:** False. The default hyperparameter values make it easy for you to test estimators quickly. In real-world machine learning studies, you’ll want to use hyperparameter tuning to choose hyperparameter values that produce the best possible predictions. ###Code ########################################################################## # (C) Copyright 2019 by Deitel & Associates, Inc. and # # Pearson Education, Inc. All Rights Reserved. # # # # DISCLAIMER: The authors and publisher of this book have used their # # best efforts in preparing the book. These efforts include the # # development, research, and testing of the theories and programs # # to determine their effectiveness. The authors and publisher make # # no warranty of any kind, expressed or implied, with regard to these # # programs or to the documentation contained in these books. The authors # # and publisher shall not be liable in any event for incidental or # # consequential damages in connection with, or arising out of, the # # furnishing, performance, or use of these programs. # ########################################################################## ###Output _____no_output_____
examples/Covariance driven stochastic subspace identification.ipynb
###Markdown Generating dataLet us start by generating some data. Consider a shear frame with 9 floors, the shear frame is damped with Rayleigh damping, defined by 5% in the first and last mode. The load (input) consists of zero mean independent white process noise applied to all floorsThe output is assumed to be accelerations at each floor together with white, zero mean, independent measurement noise. ###Code # Create a shear frame sf = strid.utils.ShearFrame(9, 1e3, 1e7) sf.set_rayleigh_damping_matrix([sf.get_natural_frequency(1), sf.get_natural_frequency(sf.n)], [.05]*2) # Determine the time discretization and period Tmax = 1. / strid.w2f(sf.get_natural_frequency(1)) fmax = strid.w2f(sf.get_natural_frequency(sf.n)) T = 1000*Tmax fs = 10 * fmax t = np.arange(0., T, 1/fs) # Define loads on system ## Unmeasureable: Stochastic loads on all floors (Process noise) w = np.random.normal(size=(sf.n, t.size)) * 1e-1 ## Load matrix, f F = w.copy() # Simulate response, accelerations at each floor measured y0, _, _ = sf.simulate(t, F) # Add measurement noise v = np.random.normal(size=y0.shape)*y0.std()*1e-0 y = y0 + v plt.figure("Accelerations measured at top floor") plt.plot(t, y[-1], label="w/noise") plt.plot(t, y0[-1], label="wo/noise") plt.legend() plt.xlabel("Time (s)") plt.ylabel("Amplitude") plt.figure("PSD of accelerations at top floor") for yi in [y[-1], y0[-1]]: freqs, Gyy = scipy.signal.welch(yi, fs, nperseg=2**10) plt.semilogy(freqs, Gyy) for n in range(1, 1+sf.n): plt.axvline(strid.w2f(sf.get_natural_frequency(n)), alpha=.3) plt.ylabel('PSD') plt.xlabel('Frequency (Hz)') ###Output _____no_output_____ ###Markdown System identificationUse the measured outputs $y$ to determine the system properties of the shear frame. First we create a subspace identification instance with the input, note that we choose to use the covariance driven method `CovarianceDrivenStochasticSID`, but note that `strid` also has an implementation of the data driven method `DataDrivenStochasticSID`. ###Code ssid = strid.CovarianceDrivenStochasticSID(y, fs) ###Output _____no_output_____ ###Markdown We will use a stabilization diagram to determine the physical modes (poles), first we perform system identification from the data with 25 block rows and model order between 5 and 150 in increments of 5. ###Code modes = {} for i, order in enumerate(range(5, 150, 5)): A, C, G, R0 = ssid.perform(order, 25) modes[order] = strid.Mode.find_modes_from_ss(A, C, ssid.fs) ###Output _____no_output_____ ###Markdown and then we plot the identified modes in the stabilization diagram and we pick the stable modes. ###Code stabdiag = strid.StabilizationDiagram() stabdiag.plot(modes) ###Output _____no_output_____ ###Markdown Finally, we can access the picked modes from the `picked_modes` property of the stabilization diagram object, see below. ###Code modes = stabdiag.picked_modes ###Output _____no_output_____ ###Markdown Comparison between estimated and true modesBelow, we compare the identified modes against the exact solution. ###Code fig = plt.figure("Damping estimate") axd = fig.add_axes((0.1, 0.1, .8, .8)) axd.set(xlabel='Frequency', ylabel='Damping ratio', title='Estimated and true frequency and damping', ylim=(0, .10) ) figmodes, axes = plt.subplots(ncols=3, nrows=3, dpi=144) res = [] for n in range(1, 1+sf.n): ax = axes.flatten()[n-1] un = sf.get_mode_shape(n) fn = strid.w2f(sf.get_natural_frequency(n)) xin = sf.get_rayleigh_damping_ratio(n) nmax = np.argmax([strid.modal_assurance_criterion(mode.v, un) for mode in modes]) mode = modes[nmax] line, = axd.plot(mode.f, mode.xi, 'x') line, = axd.plot(fn, xin, 'o', mec=line.get_color(), mfc=(0, 0, 0, 0)) ferr = (mode.f-fn)/fn * 100 xierr = (mode.xi-xin)/xin*100 mac = strid.modal_assurance_criterion(un, mode.v) res.append([n, ferr, xierr, mac*100,]) v_true = np.r_[0., un] v = np.r_[0, mode.v] v = strid.modal_scale_factor(v, v_true)*v z = np.arange(v.size) ax.plot(v_true, z, label='True') ax.plot(v.real, z, label='Estimated') if n == 2: ax.legend(bbox_to_anchor=(.5, 1.20), loc='lower center', ncol=2) axd.legend(['Estimated', 'True'],) ax.axvline(0., color=(0, 0, 0, .3)) ax.set_title(f"Mode {n}") ax.axis('off') ax.set_xlim(-.5, .5) ###Output _____no_output_____
Notes/Fundamentals_0_Subject_Introduction.ipynb
###Markdown 0 Subject Introduction 0.1 Problem SolvingWith both typical and computational problem solving, the following steps are performed:1. Identify the problem2. Structure the problem - i.e., analyse and understand the problem3. Look for possible solutions4. Make a decision5. Implement the solution6. Monitor and/or seek feedbackDo note that each step should be revisited as greater insights into the problem are gleaned.Computational thinking primarily encompasses several primary concepts, which suggests how we should address problem solving from a computational perspective: 0.2 Programs and Programming Languages>A **program** is the actual expression of an algorithm in a specific programming language. It allows the computer to execute the problem solution through a sequence of instructions.Essentially, an algorithm is generally produced in the design phase of a problem solution, while a program that expresses the algorithm is produced in the programming stage. A program is also made up of lines of code that can be categorized into either an **expression** or a **statement**. >An **expression** is a syntactic entity in a programming language that may be evaluated to determine its value.>A **statement**, on the other hand, is a syntactic entity, which has no value (merely an instruction.)Example:>```python>answers = 1+1: '1+1' is an expression>if answers == 2:> print('Good Outlook') print('Good Outlook') is a statement.>```There are typically 3 basic statements:1. Input statements2. Output statements3. Assignment statementsIn general, input and output (I/O) allows the communication between an information processing system (such as a computer) and an external entity (such as a human).Input is information supplied to a computer or program.Output is information provided by a computer or program.Assignment statements will be elaborated further in the later section.>```python>x = input("Enter a positive integer value for x: ") example of input statement. Note that the user input is assigned to the variable x>print(x) print statement to display the output>```The most basic statement, and typically the first code you write when learning a programming language, is the output statement, which outputs certain data - e.g., printing `“hello world”`. There is typically also an input statement that will request input from the user. 0.2.1 `print()` functionThe `print(YOUR_SPECIFIED_MESSAGE)` function prints `YOUR_SPECIFIED_MESSAGE` to the screen, or other standard output device.> It's demo time. 0.2.2 `input()` functionThe `input(YOUR_MESSAGE_HERE)` function allows user input, where `YOUR_MESSAGE_HERE` is a `str` object. Variable assigned is also of `str` type. > It's demo time. Talk about type casting. 0.2.3 Formatting output with `.format()` method or `f-string`> It's demo time. ###Code a = int(input("Enter a value for a:")) print( f"My marks are {a+b} and {b**2}") ###Output _____no_output_____
Language Model/4) Wikitext-103 with Transformer.ipynb
###Markdown [Data Set link](https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/) ###Code !wget https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-v1.zip !unzip ./wikitext-103-v1.zip # !wget https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-v1.zip # !unzip ./wikitext-2-v1.zip import sys if not sys.warnoptions: import warnings warnings.simplefilter("ignore") argsdata = './wikitext-103' # or './wikitext-103' argsbatch_size = 30 argsemsize=200 argsnhead=2 argsnhid=200 argsnlayers=2 argsdropout=0.4 argslog_interval=200 argseval_interval=5000 argsclip=0.25 argsseed=42 argsbptt=35 argscuda=True argslr=5 argsepochs=2 argstemperature = 1.0 argssave='./model.pt' argscheckpoint = './model.pt' argsoutf='generated.txt' argswords=300 import os import math import time import pickle import torch import torch.nn as nn import torch.nn.functional as F from io import open class Dictionary(object): def __init__(self): self.word2idx = {} self.idx2word = [] def add_word(self, word): if word not in self.word2idx: self.idx2word.append(word) self.word2idx[word] = len(self.idx2word) - 1 return self.word2idx[word] def __len__(self): return len(self.idx2word) class Corpus(object): def __init__(self, path): self.dictionary = Dictionary() self.train = self.tokenize(os.path.join(path, 'wiki.train.tokens')) self.valid = self.tokenize(os.path.join(path, 'wiki.valid.tokens')) self.test = self.tokenize(os.path.join(path, 'wiki.test.tokens')) def tokenize(self, path): """Tokenizes a text file.""" assert os.path.exists(path) # Add words to the dictionary with open(path, 'r', encoding="utf8") as f: for line in f: words = line.split() + ['<eos>'] for word in words: self.dictionary.add_word(word) # Tokenize file content with open(path, 'r', encoding="utf8") as f: idss = [] for line in f: words = line.split() + ['<eos>'] ids = [] for word in words: ids.append(self.dictionary.word2idx[word]) idss.append(torch.tensor(ids).type(torch.int64)) ids = torch.cat(idss) return ids # Temporarily leave PositionalEncoding module here. Will be moved somewhere else. class PositionalEncoding(nn.Module): r"""Inject some information about the relative or absolute position of the tokens in the sequence. The positional encodings have the same dimension as the embeddings, so that the two can be summed. Here, we use sine and cosine functions of different frequencies. .. math:: \text{PosEncoder}(pos, 2i) = sin(pos/10000^(2i/d_model)) \text{PosEncoder}(pos, 2i+1) = cos(pos/10000^(2i/d_model)) \text{where pos is the word position and i is the embed idx) Args: d_model: the embed dim (required). dropout: the dropout value (default=0.1). max_len: the max. length of the incoming sequence (default=5000). Examples: >>> pos_encoder = PositionalEncoding(d_model) """ def __init__(self, d_model, dropout=0.1, max_len=5000): super(PositionalEncoding, self).__init__() self.dropout = nn.Dropout(p=dropout) pe = torch.zeros(max_len, d_model) position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1) div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model)) pe[:, 0::2] = torch.sin(position * div_term) pe[:, 1::2] = torch.cos(position * div_term) pe = pe.unsqueeze(0).transpose(0, 1) self.register_buffer('pe', pe) def forward(self, x): r"""Inputs of forward function Args: x: the sequence fed to the positional encoder model (required). Shape: x: [sequence length, batch size, embed dim] output: [sequence length, batch size, embed dim] Examples: >>> output = pos_encoder(x) """ x = x + self.pe[:x.size(0), :] return self.dropout(x) class TransformerModel(nn.Module): """Container module with an encoder, a recurrent or transformer module, and a decoder.""" def __init__(self, ntoken, ninp, nhead, nhid, nlayers, dropout=0.5): super(TransformerModel, self).__init__() try: from torch.nn import TransformerEncoder, TransformerEncoderLayer except: raise ImportError('TransformerEncoder module does not exist in PyTorch 1.1 or lower.') self.model_type = 'Transformer' self.src_mask = None self.pos_encoder = PositionalEncoding(ninp, dropout) encoder_layers = TransformerEncoderLayer(ninp, nhead, nhid, dropout) self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers) self.encoder = nn.Embedding(ntoken, ninp) self.ninp = ninp self.decoder = nn.Linear(ninp, ntoken) self.init_weights() def _generate_square_subsequent_mask(self, sz): mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1) mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0)) return mask def init_weights(self): initrange = 0.1 self.encoder.weight.data.uniform_(-initrange, initrange) self.decoder.bias.data.zero_() self.decoder.weight.data.uniform_(-initrange, initrange) def forward(self, src, has_mask=True): if has_mask: device = src.device if self.src_mask is None or self.src_mask.size(0) != len(src): mask = self._generate_square_subsequent_mask(len(src)).to(device) self.src_mask = mask else: self.src_mask = None src = self.encoder(src) * math.sqrt(self.ninp) src = self.pos_encoder(src) output = self.transformer_encoder(src, self.src_mask) output = self.decoder(output) return F.log_softmax(output, dim=-1) %%time # Set the random seed manually for reproducibility. torch.manual_seed(argsseed) if torch.cuda.is_available(): if not argscuda: print("WARNING: You have a CUDA device, so you should probably run with --cuda") device = torch.device("cuda" if argscuda else "cpu") ############################################################################### # Load data ############################################################################### if(os.path.exists('./corpus')): with open('corpus', 'rb') as data_file: corpus = pickle.load(data_file) else: corpus = Corpus(argsdata) with open('corpus', 'wb') as data_file: pickle.dump(corpus, data_file) ntokens = len(corpus.dictionary) # Starting from sequential data, batchify arranges the dataset into columns. # For instance, with the alphabet as the sequence and batch size 4, we'd get # ┌ a g m s ┐ # │ b h n t │ # │ c i o u │ # │ d j p v │ # │ e k q w │ # └ f l r x ┘. # These columns are treated as independent by the model, which means that the # dependence of e. g. 'g' on 'f' can not be learned, but allows more efficient # batch processing. def batchify(data, bsz): # Work out how cleanly we can divide the dataset into bsz parts. nbatch = data.size(0) // bsz # Trim off any extra elements that wouldn't cleanly fit (remainders). data = data.narrow(0, 0, nbatch * bsz) # Evenly divide the data across the bsz batches. data = data.view(bsz, -1).t().contiguous() return data.to(device) eval_batch_size = 50 train_data = batchify(corpus.train, argsbatch_size) val_data = batchify(corpus.valid, eval_batch_size) test_data = batchify(corpus.test, eval_batch_size) def print_gentext(): input = torch.randint(ntokens, (1, 1), dtype=torch.long).to(device) with torch.no_grad(): # no tracking history for i in range(argswords): output = model(input, False) word_weights = output[-1].squeeze().div(argstemperature).exp().cpu() word_idx = torch.multinomial(word_weights, 1)[0] word_tensor = torch.Tensor([[word_idx]]).long().to(device) input = torch.cat([input, word_tensor], 0) word = corpus.dictionary.idx2word[word_idx] print(word + ('\n' if i % 20 == 19 else ' '),end='') def evaluate1(): global best_val_loss global lr val_loss = evaluate(val_data) print('-' * 89) print('| epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | ' 'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time), val_loss, math.exp(val_loss))) print('-' * 89) print('Generated Text:') print_gentext() # Save the model if the validation loss is the best we've seen so far. if not best_val_loss or val_loss < best_val_loss: with open(argssave, 'wb') as f: torch.save(model, f) best_val_loss = val_loss else: # Anneal the learning rate if no improvement has been seen in the validation dataset. lr /= 4.0 ############################################################################### # Build the model ############################################################################### ntokens = len(corpus.dictionary) model = TransformerModel(ntokens, argsemsize, argsnhead, argsnhid, argsnlayers, argsdropout).to(device) criterion = nn.CrossEntropyLoss() ############################################################################### # Training code ############################################################################### def repackage_hidden(h): """Wraps hidden states in new Tensors, to detach them from their history.""" if isinstance(h, torch.Tensor): return h.detach() else: return tuple(repackage_hidden(v) for v in h) # get_batch subdivides the source data into chunks of length args.bptt. # If source is equal to the example output of the batchify function, with # a bptt-limit of 2, we'd get the following two Variables for i = 0: # ┌ a g m s ┐ ┌ b h n t ┐ # └ b h n t ┘ └ c i o u ┘ # Note that despite the name of the function, the subdivison of data is not # done along the batch dimension (i.e. dimension 1), since that was handled # by the batchify function. The chunks are along dimension 0, corresponding # to the seq_len dimension in the LSTM. def get_batch(source, i): seq_len = min(argsbptt, len(source) - 1 - i) data = source[i:i+seq_len] target = source[i+1:i+1+seq_len].view(-1) return data, target def evaluate(data_source): # Turn on evaluation mode which disables dropout. model.eval() total_loss = 0. ntokens = len(corpus.dictionary) with torch.no_grad(): for i in range(0, data_source.size(0) - 1, argsbptt): data, targets = get_batch(data_source, i) output = model(data) output_flat = output.view(-1, ntokens) total_loss += len(data) * criterion(output_flat, targets).item() return total_loss / (len(data_source) - 1) def train(): # Turn on training mode which enables dropout. model.train() total_loss = 0. start_time = time.time() ntokens = len(corpus.dictionary) for batch, i in enumerate(range(0, train_data.size(0) - 1, argsbptt)): data, targets = get_batch(train_data, i) # Starting each batch, we detach the hidden state from how it was previously produced. # If we didn't, the model would try backpropagating all the way to start of the dataset. model.zero_grad() output = model(data) loss = criterion(output.view(-1, ntokens), targets) loss.backward() # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs. torch.nn.utils.clip_grad_norm_(model.parameters(), argsclip) for p in model.parameters(): p.data.add_(-lr, p.grad.data) total_loss += loss.item() if batch% argseval_interval == 0 and batch > 0: evaluate1() if batch % argslog_interval == 0 and batch > 0: cur_loss = total_loss / argslog_interval elapsed = time.time() - start_time print('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | ' 'loss {:5.2f} | ppl {:8.2f}'.format( epoch, batch, len(train_data) // argsbptt, lr, elapsed * 1000 / argslog_interval, cur_loss, math.exp(cur_loss))) total_loss = 0 start_time = time.time() # Loop over epochs. lr = argslr best_val_loss = None # At any point you can hit Ctrl + C to break out of training early. try: for epoch in range(1, argsepochs+1): epoch_start_time = time.time() train() val_loss = evaluate(val_data) print('-' * 89) print('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | ' 'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time), val_loss, math.exp(val_loss))) print('-' * 89) print('Generated Text:') print_gentext() # Save the model if the validation loss is the best we've seen so far. if not best_val_loss or val_loss < best_val_loss: with open(argssave, 'wb') as f: torch.save(model, f) best_val_loss = val_loss else: # Anneal the learning rate if no improvement has been seen in the validation dataset. lr /= 4.0 except KeyboardInterrupt: print('-' * 89) print('Exiting from training early') # Load the best saved model. with open(argssave, 'rb') as f: model = torch.load(f) # Run on test data. test_loss = evaluate(test_data) print('=' * 89) print('| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format( test_loss, math.exp(test_loss))) print('=' * 89) !nvidia-smi with open(argssave, 'wb') as f: torch.save(model, f) ############################################################################### # Language Modeling on Wikitext-103 # # This generates new sentences sampled from the language model # ############################################################################### # Set the random seed manually for reproducibility. torch.manual_seed(argsseed) if torch.cuda.is_available(): if not argscuda: print("WARNING: You have a CUDA device, so you should probably run with --cuda") device = torch.device("cuda" if argscuda else "cpu") if argstemperature < 1e-3: parser.error("--temperature has to be greater or equal 1e-3") with open(argscheckpoint, 'rb') as f: model = torch.load(f).to(device) model.eval() if(os.path.exists('./corpus')): with open('corpus', 'rb') as data_file: corpus = pickle.load(data_file) else: corpus = Corpus(argsdata) with open('corpus', 'wb') as data_file: pickle.dump(corpus, data_file) ntokens = len(corpus.dictionary) input = torch.randint(ntokens, (1, 1), dtype=torch.long).to(device) with open(argsoutf, 'w') as outf: with torch.no_grad(): # no tracking history for i in range(argswords): output = model(input, False) word_weights = output[-1].squeeze().div(argstemperature).exp().cpu() word_idx = torch.multinomial(word_weights, 1)[0] word_tensor = torch.Tensor([[word_idx]]).long().to(device) input = torch.cat([input, word_tensor], 0) word = corpus.dictionary.idx2word[word_idx] outf.write(word + ('\n' if i % 20 == 19 else ' ')) print(word + ('\n' if i % 20 == 19 else ' '),end='') #if i % argslog_interval == 0: # print('| Generated {}/{} words'.format(i, argswords)) ###Output Leela Schwarzkopf noted " the charming girl " and the bad paternal love " , which she said is capable of keeping it involved " a little realization featuring a [ classic ' s ] or heartfelt miracle against those previous friends , conceptual opening decorate season though she is COFS , " praising her persona of storyteller " . " . In 1855 second " myth and the destroyed " , Dwight and subtle settings ( the Western likes — they were arranged poem beat Hindsight ' White American " that of a sophisticated Welsh album , deprecating theme and familiar image of much extraordinary scholar Britney bluesier " according to date in the crescendos " ( Mii girl , fancy and encounters with the director of animation drama wordplay , penetrating parclose distorted , Ragam , intensely , given . Ryan 's popularity and that Rodman ] exciting and not so large numbers around her story arc — and an attractive , the classic hip , Captive emotional and horror commentator , Nirguna the song @-@ organic vein would be enduring Broderbund . " . " . " . " because he 's taste fits essentially missing a pleasant and fun , " . " . " . " and mundane " , rather than giving the writing ballads , the reality pretty heavy tough throw [ she waited for backfired — but none of env " straightforward work on an amazing appetite for the " darker look of Stunning kiss heart @-@ high subtle heroism a dreamy voice " happy — so @-@ colored spaceship . " trained humour and always funny vivid veil and touching lawmaking Goes 's physical apad " , much joy . " . " , palpable Bijeljina ' There were already acting
Puzzles/nombre_de_chemins_sur_un_quadrillage.ipynb
###Markdown nombre_de_chemins_sur_un_quadrillageSur un quadrillage tel que le quadrillage ci-dessous, on appelle *chemin correct* un déplacement :- le long des arêtes jaunes,- partant du coin supérieur gauche,- finissant au coin inférieur droit,- n'empruntant jamais une arête de droite à gauche ou de bas en haut.Construire un algorithme permettant de déterminer le nombre de *chemins corrects*. ###Code %matplotlib notebook from affichages.nombre_de_chemins_sur_un_quadrillage import * d = Dessin() d.nouveau() ###Output _____no_output_____
usecase_2/Solanum-Lycopersicum_Brix-SolubleSolids-Sugars.ipynb
###Markdown Implementation algorithmAlex Warwick Vesztrocy, Christophe Dessimoz, Henning Redestig, Prioritising candidate genes causing QTL using hierarchical orthologous groups, *Bioinformatics*, Volume 34, Issue 17, 01 September 2018, Pages i612–i619, https://doi.org/10.1093/bioinformatics/bty615 ###Code import qtlsearch import pandas as pd from IPython.display import Image,SVG search = qtlsearch.SEARCH( "http://localhost:8890/sparql", "http://sparql.omabrowser.org/sparql", "https://sparql.uniprot.org/sparql") ###Output _____no_output_____ ###Markdown Brix, Soluble Solids, SugarsGO-terms: `GO:0006094` `GO:0046370` `GO:0046369` `GO:0005985` `GO:0015770`QTL from: Chromosome `9`, around `3474710`Candidate: `Lin5` (`Solyc09g010080`) Define the QTL and compute genes within this interval ###Code d=100000 intervalT = search.make_interval( "http://pbg-ld.candygene-nlesc.surf-hosted.nl/genome/Solanum_lycopersicum/chromosome/9", 3474710-d, 3474710+d) #genes for interval genesT = search.interval_genes(intervalT) ###Output _____no_output_____ ###Markdown Compute the list of GO annotations ###Code qtls = [genesT.index] go_annotations = pd.concat([search.get_child_annotations("GO:0006094"), search.get_child_annotations("GO:0046370"), search.get_child_annotations("GO:0046369"), search.get_child_annotations("GO:0005985"), search.get_child_annotations("GO:0015770")]) print(go_annotations) ###Output _____no_output_____ ###Markdown Get data and do computations ###Code result = qtlsearch.QTLSEARCH(search, qtls,go_annotations) #self.qtl_gene_roots, self.qtl_gene_protein, self.hog_group_trees, self.hog_group_genes intervalT ###Output _____no_output_____ ###Markdown Create report ###Code report_list = result.report() for report in report_list: display(report) ###Output _____no_output_____
tcr_classifier.ipynb
###Markdown Table of Contents1&nbsp;&nbsp;A computational approach to discover CD8+ T-cell receptor characteristics underlying peptide recognition2&nbsp;&nbsp;Functions3&nbsp;&nbsp;Input data4&nbsp;&nbsp;Feature generation5&nbsp;&nbsp;Classification5.1&nbsp;&nbsp;Feature importance5.2&nbsp;&nbsp;Learning curve A computational approach to discover CD8+ T-cell receptor characteristics underlying peptide recognition ###Code # imports import string import joblib import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import warnings from boruta import BorutaPy from IPython import display from pyteomics import electrochem, mass, parser from sklearn import ensemble, feature_extraction, metrics, model_selection, pipeline, preprocessing %matplotlib inline pd.options.display.max_rows = 20 matplotlib.rcParams['font.family'] = 'sans-serif' matplotlib.rcParams['font.size'] = 6 ###Output reading in boruta ###Markdown Functions ###Code def split_rows(df, column, symbol): ''' Given a dataframe, the name of a column in the dataframe and a symbol to split on: Split rows in two rows if that row's column value contains the symbol to split on. Each new row is identical to the original except for the column value which is replaced by one of its split parts. ''' s = df[column].str.split(symbol, expand=True).stack() i = s.index.get_level_values(0) df2 = df.loc[i].copy() df2[column] = s.values return df2 def gene_to_family(gene): ''' Format V- and J-genes to be encoded uniformly and reduce to family level. ''' gene_string = str(gene) if gene_string == 'unresolved': return gene_string if '-' in gene_string: gene_string = gene_string.split('-')[0] if len(gene_string) == 1 and int(gene_string): gene_string = '0'+gene_string return gene_string def uniformise_gene(gene): ''' Uniformly encode genes as XX-XX (if family+ gene known) or XX (if only family known) where each X represents an integer. ''' gene = str(gene) if gene == 'unresolved': return gene if '-' in gene: for part in set(gene.split('-')): if len(part) == 1 and int(part): gene = gene.replace(part, '0'+part) elif len(gene) == 1 and int(gene): gene = '0'+gene return gene ###Output _____no_output_____ ###Markdown Input data ###Code data = pd.read_csv('data.csv') # don't split on J_gene because J_gene column doesn't contain splits data = split_rows(data, 'V_gene', '/').reset_index(drop=True) for gene in ['V_gene', 'J_gene']: data[gene] = data[gene].apply(lambda x:uniformise_gene(x)) data[gene.replace('gene','family')] = data[gene].apply(lambda x: gene_to_family(x)) display.display(data) ###Output _____no_output_____ ###Markdown Feature generationVarious features were constructed:* The prefix and postfix categorical attributes were one-hot encoded. This is useful because most classification algorithms are not able to deal with categorical features but require numerical features instead.* The sequence length (computed by [Pyteomics](https://pythonhosted.org/pyteomics/) [Goloborodko2013], modX notation allowed).* The number of times each amino acid occurs in the sequence.* The average of the chemical properties (basicity, hydrophobicity, helicity) and the substitution of the individual amino acids in the sequence, normalized by the sequence length.* The pI of the full sequence (computed by [Pyteomics](https://pythonhosted.org/pyteomics/)).* The peptide mass (computed by [Pyteomics](https://pythonhosted.org/pyteomics/)).* Individual features of each amino acid at specific positions in the sequences: * For sequences with an odd length, the center amino acid is assigned index `0`, with ascending indices to the right and descending indices to the left. For sequences with an even length there is no `0` index, but the center two amino acids have index `-1` and `1`. * For each position the following features are generated: which amino acid occurs at this position; the basicity, hydrophobicity, helicity, pI, and substitution value for the amino acid at this position. In total this results in 417 different features for each sequence.[Goloborodko2013] Goloborodko, A. A., Levitsky, L. I., Ivanov, M. V. & Gorshkov, M. V. Pyteomics-a Python framework for exploratory data analysis and rapid software prototyping in proteomics. *Journal of The American Society for Mass Spectrometry* **24**, 301–304 (2013). ###Code # physicochemical amino acid properties basicity = {'A': 206.4, 'B': 210.7, 'C': 206.2, 'D': 208.6, 'E': 215.6, 'F': 212.1, 'G': 202.7, 'H': 223.7, 'I': 210.8, 'K': 221.8, 'L': 209.6, 'M': 213.3, 'N': 212.8, 'P': 214.4, 'Q': 214.2, 'R': 237.0, 'S': 207.6, 'T': 211.7, 'V': 208.7, 'W': 216.1, 'X': 210.2, 'Y': 213.1, 'Z': 214.9} hydrophobicity = {'A': 0.16, 'B': -3.14, 'C': 2.50, 'D': -2.49, 'E': -1.50, 'F': 5.00, 'G': -3.31, 'H': -4.63, 'I': 4.41, 'K': -5.00, 'L': 4.76, 'M': 3.23, 'N': -3.79, 'P': -4.92, 'Q': -2.76, 'R': -2.77, 'S': -2.85, 'T': -1.08, 'V': 3.02, 'W': 4.88, 'X': 4.59, 'Y': 2.00, 'Z': -2.13} helicity = {'A': 1.24, 'B': 0.92, 'C': 0.79, 'D': 0.89, 'E': 0.85, 'F': 1.26, 'G': 1.15, 'H': 0.97, 'I': 1.29, 'K': 0.88, 'L': 1.28, 'M': 1.22, 'N': 0.94, 'P': 0.57, 'Q': 0.96, 'R': 0.95, 'S': 1.00, 'T': 1.09, 'V': 1.27, 'W': 1.07, 'X': 1.29, 'Y': 1.11, 'Z': 0.91} mutation_stability = {'A': 13, 'C': 52, 'D': 11, 'E': 12, 'F': 32, 'G': 27, 'H': 15, 'I': 10, 'K': 24, 'L': 34, 'M': 6, 'N': 6, 'P': 20, 'Q': 10, 'R': 17, 'S': 10, 'T': 11, 'V': 17, 'W': 55, 'Y': 31} # feature conversion and generation features_list = [] # numeric encoding of the HLA_peptide class labels features_list.append(pd.DataFrame(preprocessing.LabelEncoder().fit_transform(data['HLA_peptide']), columns=['HLA_peptide'])) # one-hot encoding of pre and post onehot_encoder = feature_extraction.DictVectorizer(sparse=False) features_list.append(pd.DataFrame( onehot_encoder.fit_transform(data[['V_gene', 'J_gene', 'V_family', 'J_family']].to_dict(orient='records')), columns=onehot_encoder.feature_names_)) # sequence length features_list.append(data['CDR3_sequence'].apply( lambda sequence: parser.length(sequence)).to_frame() .rename(columns={'CDR3_sequence': 'length'})) # number of occurences of each amino acid aa_counts = pd.DataFrame.from_records( [parser.amino_acid_composition(sequence) for sequence in data['CDR3_sequence']]).fillna(0) aa_counts.columns = ['count_{}'.format(column) for column in aa_counts.columns] features_list.append(aa_counts) # physicochemical properties: (average) basicity, (average) hydrophobicity, # (average) helicity, pI, (average) mutation stability features_list.append(data['CDR3_sequence'].apply( lambda seq: sum([basicity[aa] for aa in seq]) / parser.length(seq)) .to_frame().rename(columns={'CDR3_sequence': 'avg_basicity'})) features_list.append(data['CDR3_sequence'].apply( lambda seq: sum([hydrophobicity[aa] for aa in seq]) / parser.length(seq)) .to_frame().rename(columns={'CDR3_sequence': 'avg_hydrophobicity'})) features_list.append(data['CDR3_sequence'].apply( lambda seq: sum([helicity[aa] for aa in seq]) / parser.length(seq)) .to_frame().rename(columns={'CDR3_sequence': 'avg_helicity'})) features_list.append(data['CDR3_sequence'].apply( lambda seq: electrochem.pI(seq)).to_frame().rename(columns={'CDR3_sequence': 'pI'})) features_list.append(data['CDR3_sequence'].apply( lambda seq: sum([mutation_stability[aa] for aa in seq]) / parser.length(seq)) .to_frame().rename(columns={'CDR3_sequence': 'avg_mutation_stability'})) # peptide mass features_list.append(data['CDR3_sequence'].apply( lambda seq: mass.fast_mass(seq)).to_frame().rename(columns={'CDR3_sequence': 'mass'})) # positional features # amino acid occurence and physicochemical properties at a given position from the center pos_aa, pos_basicity, pos_hydro, pos_helicity, pos_pI, pos_mutation = [[] for _ in range(6)] for sequence in data['CDR3_sequence']: length = parser.length(sequence) start_pos = -1 * (length // 2) pos_range = list(range(start_pos, start_pos + length)) if length % 2 == 1 else\ list(range(start_pos, 0)) + list(range(1, start_pos + length + 1)) pos_aa.append({'pos_{}_{}'.format(pos, aa): 1 for pos, aa in zip(pos_range, sequence)}) pos_basicity.append({'pos_{}_basicity'.format(pos): basicity[aa] for pos, aa in zip(pos_range, sequence)}) pos_hydro.append({'pos_{}_hydrophobicity'.format(pos): hydrophobicity[aa] for pos, aa in zip(pos_range, sequence)}) pos_helicity.append({'pos_{}_helicity'.format(pos): helicity[aa] for pos, aa in zip(pos_range, sequence)}) pos_pI.append({'pos_{}_pI'.format(pos): electrochem.pI(aa) for pos, aa in zip(pos_range, sequence)}) pos_mutation.append({'pos_{}_mutation_stability'.format(pos): mutation_stability[aa] for pos, aa in zip(pos_range, sequence)}) features_list.append(pd.DataFrame.from_records(pos_aa).fillna(0)) features_list.append(pd.DataFrame.from_records(pos_basicity).fillna(0)) features_list.append(pd.DataFrame.from_records(pos_hydro).fillna(0)) features_list.append(pd.DataFrame.from_records(pos_helicity).fillna(0)) features_list.append(pd.DataFrame.from_records(pos_pI).fillna(0)) features_list.append(pd.DataFrame.from_records(pos_mutation).fillna(0)) # combine all features features = pd.concat(features_list, axis=1) print('Samples: {} - features: {}'.format(features.shape[0], features.shape[1])) ###Output Samples: 237 - features: 443 ###Markdown ClassificationPipeline consists of feature selection and classification; repeated CV split is used to get a robust performance assessment. ###Code # run a single iteration of the predictor (extracted to a method for multithreaded computation) def run_predictor(predictor, X, y, train_index, test_index): X_train, X_test = X[train_index], X[test_index] y_train, y_test = y[train_index], y[test_index] # train the predictor pipeline predictor.fit(X_train, y_train) # extract the relevant feature importances importances = np.zeros(X.shape[1], float) importances[predictor.named_steps['feature_selection'].support_] =\ predictor.named_steps['classification'].feature_importances_ # return the predictions to evaluate the performance predictions_proba = predictor.predict_proba(X_test)[:, 1] return (y_test, np.array(predictions_proba > 0.5, np.int)),\ (y_test, predictions_proba),\ importances # create a prediction pipeline consisting of feature selection and classification classifier = ensemble.RandomForestClassifier(200, n_jobs=-1, random_state=0) predictor = pipeline.Pipeline([('feature_selection', BorutaPy(ensemble.ExtraTreesClassifier(n_jobs=-1), n_estimators='auto', random_state=0)), ('classification', classifier)]) # turn off numpy RuntimeWarning with warnings.catch_warnings(): warnings.simplefilter("ignore") # apply the prediction pipeline features_noclass = features.drop('HLA_peptide', axis=1) X = features_noclass.values y = features['HLA_peptide'].values # do multiple splits to get a more accurate evaluation of the performance repeats = 100 sss = model_selection.StratifiedShuffleSplit(n_splits=repeats, test_size=0.2, random_state=0) result = np.asarray(joblib.Parallel(n_jobs=-1)(joblib.delayed(run_predictor) (predictor, X, y, train_index, test_index) for train_index, test_index in sss.split(X, y))) predictions = result[:, 0] predictions_proba = result[:, 1] feature_importances = result[:, 2] f, axarr = plt.subplots(2, 2, figsize=(12, 12)) # evaluate the performance of the prediction pipeline # accuracy accuracy_mean = np.mean([metrics.accuracy_score(y_test, y_pred) for y_test, y_pred in predictions]) accuracy_std = np.std([metrics.accuracy_score(y_test, y_pred) for y_test, y_pred in predictions]) # AUC and average precision auc_mean = np.mean([metrics.roc_auc_score(y_test, y_pred) for y_test, y_pred in predictions_proba]) auc_std = np.std([metrics.roc_auc_score(y_test, y_pred) for y_test, y_pred in predictions_proba]) avg_precision_mean = np.mean([metrics.average_precision_score(y_test, y_pred) for y_test, y_pred in predictions_proba]) avg_precision_std = np.std([metrics.average_precision_score(y_test, y_pred) for y_test, y_pred in predictions_proba]) avg_precision_inverted_mean = np.mean([metrics.average_precision_score(1 - y_test, 1 - y_pred) for y_test, y_pred in predictions_proba]) avg_precision_inverted_std = np.std([metrics.average_precision_score(1 - y_test, 1 - y_pred) for y_test, y_pred in predictions_proba]) # mean and standard deviation of ROC and precision-recall curves interval = np.linspace(0, 1, 100) tprs, precisions, precisions_inverted = [], [], [] for y_test, y_pred in predictions_proba: fpr, tpr, _ = metrics.roc_curve(y_test, y_pred) tprs.append(np.interp(interval, fpr, tpr)) precision, recall, _ = metrics.precision_recall_curve(y_test, y_pred) precisions.append(np.interp(interval, recall[::-1], precision)) # inverted precision precision_inverted, recall_inverted, _ = metrics.precision_recall_curve(1 - y_test, 1 - y_pred) precisions_inverted.append(np.interp(interval, recall_inverted[::-1], precision_inverted)) tpr_mean = np.mean(tprs, axis=0) tpr_mean[0], tpr_mean[-1] = 0.0, 1.0 tpr_std = np.std(tprs, axis=0) precision_mean = np.mean(precisions, axis=0) precision_std = np.std(precisions, axis=0) precision_inverted_mean = np.mean(precisions_inverted, axis=0) precision_inverted_std = np.std(precisions_inverted, axis=0) # feature importance feat_import_s = pd.Series(np.mean(feature_importances, axis=0), index=features_noclass.columns.values, name='Feature importances') significant_features = feat_import_s[feat_import_s > 0.01].sort_values(ascending=False) # print accuracy print('Classification accuracy = {:.2%} ± {:.2%}'.format(accuracy_mean, accuracy_std)) # plot ROC curve axarr[0,0].plot(interval, tpr_mean, label='AUROC = {:.2f} ± {:.2f}'.format(auc_mean, auc_std)) axarr[0,0].fill_between(interval, tpr_mean - tpr_std, tpr_mean + tpr_std, alpha=0.5) axarr[0,0].plot([0, 1], [0, 1], 'k--') axarr[0,0].set_xlim([-0.05, 1.05]) axarr[0,0].set_ylim([-0.05, 1.05]) axarr[0,0].set_xlabel('False Positive Rate') axarr[0,0].set_ylabel('True Positive Rate') axarr[0,0].legend(loc='lower right') axarr[0,0].annotate(string.ascii_uppercase[0], xy=(-0.1,1.1), xycoords='axes fraction', fontsize=16, xytext=(0, -15), textcoords='offset points', weight='bold', ha='right', va='top') # plot precision-recall curve axarr[0,1].set_title('FLKEKGGL') axarr[0,1].plot(interval[::-1], precision_mean, label='Mean PR = {:.2f} ± {:.2f}'.format(avg_precision_mean, avg_precision_std)) axarr[0,1].fill_between(interval[::-1], precision_mean - precision_std, precision_mean + precision_std, alpha=0.5) axarr[0,1].legend(loc='lower right') axarr[0,1].set_xlim([-0.05, 1.05]) axarr[0,1].set_ylim([-0.05, 1.05]) axarr[0,1].set_xlabel('Recall') axarr[0,1].set_ylabel('Precision') plt.legend(loc='lower right') axarr[0, 1].annotate(string.ascii_uppercase[1], xy=(-0.1,1.1), xycoords='axes fraction', fontsize=16, xytext=(0, -15), textcoords='offset points', weight='bold', ha='right', va='top') axarr[1,0].set_title('EIYKRWII') axarr[1,0].plot(interval[::-1], precision_inverted_mean, label='Mean PR = {:.2f} ± {:.2f}'.format(avg_precision_inverted_mean, avg_precision_inverted_std)) axarr[1,0].fill_between(interval[::-1], precision_inverted_mean - precision_inverted_std, precision_inverted_mean + precision_inverted_std, alpha=0.5) axarr[1,0].legend(loc='lower right') axarr[1,0].set_xlim([-0.05, 1.05]) axarr[1,0].set_ylim([-0.05, 1.05]) axarr[1,0].set_xlabel('Recall') axarr[1,0].set_ylabel('Precision') plt.legend(loc='lower right') axarr[1,0].annotate(string.ascii_uppercase[2], xy=(-0.1,1.1), xycoords='axes fraction', fontsize=16, xytext=(0, -15), textcoords='offset points', weight='bold', ha='right', va='top') # plot feature importance sns.barplot(x=significant_features.index.values, y=significant_features, palette='Blues_d', ax=axarr[1,1]) axarr[1,1].set_xticklabels(significant_features.index.values, rotation='vertical', fontsize=10) axarr[1,1].set_ylabel('Feature importance') axarr[1,1].annotate(string.ascii_uppercase[3], xy=(-0.1,1.1), xycoords='axes fraction', fontsize=16, xytext=(0, -15), textcoords='offset points', weight='bold', ha='right', va='top') print(significant_features) plt.savefig('validation_one.pdf', bbox_inches='tight', dpi=600) plt.show() plt.close() ###Output Classification accuracy = 75.90% ± 5.45% avg_basicity 0.137350 avg_helicity 0.105305 pos_1_helicity 0.102358 pos_1_hydrophobicity 0.082974 pos_2_basicity 0.080863 count_R 0.069032 pos_3_mutation_stability 0.063179 avg_hydrophobicity 0.040724 pos_2_helicity 0.040645 pos_0_hydrophobicity 0.038702 length 0.036358 pos_3_hydrophobicity 0.033291 avg_mutation_stability 0.032128 pos_1_basicity 0.029568 pos_-4_S 0.019961 pos_4_S 0.013550 Name: Feature importances, dtype: float64 ###Markdown Feature importance ###Code sns.barplot(x=significant_features.index.values, y=significant_features, palette='Blues_d') plt.xticks(rotation='vertical', fontsize=10) plt.ylabel('Feature importance') plt.savefig('feature_importance.pdf', bbox_inches='tight', dpi=600) print(significant_features) ###Output avg_basicity 0.137350 avg_helicity 0.105305 pos_1_helicity 0.102358 pos_1_hydrophobicity 0.082974 pos_2_basicity 0.080863 count_R 0.069032 pos_3_mutation_stability 0.063179 avg_hydrophobicity 0.040724 pos_2_helicity 0.040645 pos_0_hydrophobicity 0.038702 length 0.036358 pos_3_hydrophobicity 0.033291 avg_mutation_stability 0.032128 pos_1_basicity 0.029568 pos_-4_S 0.019961 pos_4_S 0.013550 Name: Feature importances, dtype: float64 ###Markdown Learning curveMore data increases performance ###Code # compute the learning curve selected_features = np.mean(feature_importances, axis=0) > 0.01 X_filtered = X[:, selected_features] train_sizes, train_scores, test_scores = model_selection.learning_curve( classifier, X_filtered, y, train_sizes=np.linspace(.1, 1., 10), scoring='roc_auc', n_jobs=-1, cv=model_selection.StratifiedShuffleSplit(n_splits=100, test_size=0.2, random_state=0)) train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) test_scores_mean = np.mean(test_scores, axis=1) test_scores_std = np.std(test_scores, axis=1) # plot the learning curve plt.figure(figsize=(6, 6)) with open('learning_curve_onevsone.txt', 'w') as f: out_data = [] for d in [train_sizes, train_scores_mean, train_scores_std, test_scores_mean, test_scores_std]: out_data.append('\t'.join(str(s) for s in d)) f.write('\n'.join(out_data)) plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.2, color=sns.color_palette()[0]) plt.plot(train_sizes, train_scores_mean, 'o-', label='Training score') plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.2, color=sns.color_palette()[1]) plt.plot(train_sizes, test_scores_mean, 'o-', label='Cross-validation score') plt.xlabel('Training set size') plt.ylabel('Score') plt.ylim(0.5, 1.05) plt.legend(loc='lower right') plt.savefig('learning_curve.pdf', bbox_inches='tight', dpi=600) ###Output _____no_output_____
OpenCV/Untitled19 (1).ipynb
###Markdown countour detection in only certain area ###Code valid_cntrs = [] for i,cntr in enumerate(contours): x,y,w,h = cv2.boundingRect(cntr) if (x <= 300) & (y >= 200) & (cv2.contourArea(cntr) >= 0): valid_cntrs.append(cntr) # count of discovered contours len(valid_cntrs) img = cv2.imread('Photograph.jpg') dmy =img.copy() cv2.drawContours(dmy, valid_cntrs, -1, (127,200,0), 2) cv2.line(dmy, (0, 220),(256,80),(100, 255, 255)) plt.imshow(dmy) plt.show() ###Output _____no_output_____
pandas_timezones.ipynb
###Markdown Pandas TimezonesSee Chris Albon's [post](https://chrisalbon.com/machine_learning/preprocessing_dates_and_times/convert_pandas_column_timezone/) for reference. ###Code from IPython.display import display # neat trick to display dataframes import pandas as pd ###Output _____no_output_____ ###Markdown convert naive datetimeindex to timezoneworks for both `pd.date_range` and `pd.DatetimeIndex` ###Code date_range = pd.date_range(start='2015', end='2016', freq='h')[:-1] display(date_range) display(date_range.tz_localize('UTC')) ###Output _____no_output_____ ###Markdown Convert index from UTC to specific timezoneLet's see a case where we have a column of datetimes (in UTC) and a column of timezones, and we want to convert each timezone into the corresponding timezone.* start and end can be pd.datetime objects as well* `DatetimeIndex` can be already generated for a specific timezone ###Code date_range = pd.date_range(start='2015', end='2016', freq='h', tz='UTC')[:-1] display(date_range) display(date_range.tz_convert('Europe/London')) ###Output _____no_output_____ ###Markdown Convert df column to respective timezones* using `np.random.choice` returns an array and looses the timezone awareness!* `tz_localize` can be used on a df column by using the `.dt` method.* `tz_convert` does not accept a list (array, list or anything else) of timezones, so we need to use `apply`. It's not blazing fast. ###Code import numpy as np # only needed to choose random timezones import pytz # only needed to get a list of available timezones date_range = pd.date_range(start='2015', end='2016', freq='h')[:-1] size = len(date_range) df = pd.DataFrame({ 'datetime': np.random.choice(date_range, size), # random datetimes from the interval 'timezone': np.random.choice(pytz.all_timezones, size) # random timezones }) # tz localize works on columns if we use .dt df['datetime'] = df['datetime'].dt.tz_localize('UTC') display(df.head()) # convert each datetime to the corresponding timezone df['local_datetime'] = df.apply( lambda row: row['datetime'].tz_convert(row['timezone']), axis=1 ) display(df.head()) ###Output _____no_output_____
src/jupyter-notebooks/JL_Dataset.ipynb
###Markdown Aim is to obtain a dataset class ###Code import torch import torch.nn as nn import torch.nn.functional as F import pandas as pd import numpy as np root_dir = '/home/sri/lip_reading/data/' clean_files_path = 'clean_files.csv' df = pd.read_csv(root_dir + clean_files_path) def get_files(root_dir, file_path, is_train = True): ''' Get records for either train or test ''' df = pd.read_csv(root_dir + file_path) if is_train: return df[df['is_train'] == 1] else: return df[df['is_train'] == 0] t_df = get_files(root_dir, clean_files_path, is_train=True) a = t_df.iloc[9] a a['mp4'] a['mp3'] a['txt'] ###Output _____no_output_____ ###Markdown A record describes a single datapoint. Each record consists of an mp4 file, mp3 file and the full text representation. ###Code from torch.utils.data import Dataset import imageio from PIL import Image class LRWDataset(Dataset): COL_MP4 = 'mp4' COL_MP3 = 'mp3' COL_TXT = 'txt' def __init__(self, root_dir, clean_files_path, is_train=True): self.root_dir = root_dir self.df = self._get_files(root_dir, clean_files_path, is_train) def __len__(self): return len(self.df) def __getitem__(self, idx): mp4, mp3, txt = self._get_records(idx) reversed_mp3 = self._get_reversed_mp3_as_tensor(self.root_dir + mp3) reversed_txt = self._get_reversed_txt_as_tensor(self.root_dir + txt) reversed_mp4 = self._get_reversed_frames_as_tensors(self.root_dir + mp4) return reversed_mp4, reversed_mp3, reversed_txt def _get_files(self, root_dir, file_path, is_train = True): ''' Get records for either train or test ''' df = pd.read_csv(root_dir + file_path) if is_train: return df[df['is_train'] == 1] else: return df[df['is_train'] == 0] def _get_records(self, idx): record = df.iloc[idx] mp4 = record[LRWDataset.COL_MP4] mp3 = record[LRWDataset.COL_MP3] txt = record[LRWDataset.COL_TXT] return mp4, mp3, txt def _get_reversed_mp3_as_tensor(self, mp3_path): return mp3_path def _get_reversed_txt_as_tensor(self, txt_path): return txt_path def _get_reversed_frames_as_tensors(self, mp4_file): reader = imageio.get_reader(mp4_file) reader = imageio.get_reader(mp4_file) imgs = np.array(reader.get_data(0)) imgs = imgs.reshape(1, *imgs.shape) count = reader.count_frames() for i in range(1, count): frame = np.array(reader.get_data(i)) frame = frame.reshape(1, *frame.shape) imgs = np.vstack((imgs, frame)) frames = torch.from_numpy(imgs) rev_frames = torch.flip(frames, [0]) return rev_frames dataset = LRWDataset(root_dir, clean_files_path, is_train=True) t_mp4 = dataset[18] t_mp4[0].shape import _torch_sox ###Output _____no_output_____
GAUSSIAN_SOBEL_Filter.ipynb
###Markdown ###Code import numpy as np import matplotlib.pyplot as plt import scipy from scipy import ndimage from sklearn.datasets import fetch_openml mnist = fetch_openml('mnist_784') x = mnist.data y = mnist.target g_s = np.zeros_like(x) for i in range(70000): g_s[i]=scipy.ndimage.gaussian_filter(x[i],5) x_s = np.zeros_like(x) for i in range(70000): x_s[i]=scipy.ndimage.sobel(g_s[i]) from sklearn.model_selection import train_test_split train_img, test_img, train_lbl, test_lbl = train_test_split( x_s, mnist.target, test_size=1/7.0, random_state=0) from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(train_img) train_img = scaler.transform(train_img) test_img = scaler.transform(test_img) from sklearn.svm import SVC svc_model=SVC() import time f=time.time() svc_model.fit(train_img, train_lbl) y=time.time() print(f-y) y_predict=svc_model.predict(test_img) from sklearn import metrics d=svc_model.score(test_img,test_lbl) print(d*100) t=svc_model.score(train_img,train_lbl) print(t*100) ###Output 98.15666666666667
XLSR_Wav2Vec2_for_Indonesian_Evaluation-Sundanese.ipynb
###Markdown Load the dataset ###Code def load_dataset_sundanese(): urls = [ "https://www.openslr.org/resources/44/su_id_female.zip", "https://www.openslr.org/resources/44/su_id_male.zip" ] dm = DownloadManager() download_dirs = dm.download_and_extract(urls) data_dirs = [ Path(download_dirs[0])/"su_id_female/wavs", Path(download_dirs[1])/"su_id_male/wavs", ] filenames = [ Path(download_dirs[0])/"su_id_female/line_index.tsv", Path(download_dirs[1])/"su_id_male/line_index.tsv", ] dfs = [] dfs.append(pd.read_csv(filenames[0], sep='\t4?\t', names=["path", "sentence"])) dfs.append(pd.read_csv(filenames[1], sep='\t\t', names=["path", "sentence"])) for i, dir in enumerate(data_dirs): dfs[i]["path"] = dfs[i].apply(lambda row: str(data_dirs[i]) + "/" + row + ".wav", axis=1) df = pd.concat(dfs) # df = df.sample(frac=1, random_state=1).reset_index(drop=True) dataset = Dataset.from_pandas(df) dataset = dataset.remove_columns('__index_level_0__') return dataset.train_test_split(test_size=0.1, seed=1) dataset = load_dataset_sundanese() # We can also just load it from the disk created during training # dataset = datasets.load_from_disk("dataset_sundanese") dataset test_dataset = dataset['test'] wer = load_metric("wer") %%capture model_name = "cahya/wav2vec2-large-xlsr-sundanese" processor = Wav2Vec2Processor.from_pretrained(model_name) model = Wav2Vec2ForCTC.from_pretrained(model_name) model.to("cuda") #chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“\%\‘\'\”]' chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“\%\‘\'\”_\�]' resampler = torchaudio.transforms.Resample(48_000, 16_000) test_dataset = test_dataset.map(speech_file_to_array_fn) result = test_dataset.map(evaluate, batched=True, batch_size=8) print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"]))) ###Output _____no_output_____
2D-Segmentation-Audi-A2D2.ipynb
###Markdown 2D Semantic Segmentation on the Audi A2D2 DatasetIn this notebook, we train a model on the 2D semantic segmentation annotations from the Audi A2D2 Dataset https://www.a2d2.audi/a2d2/en/dataset.html. The dataset can also be accessed from the the AWS Open Data Registry https://registry.opendata.aws/aev-a2d2/. We do the following: 1. We download the semantic segmentation dataset archive 1. We inspect and describe the data 1. We run local processing to produce a dataset manifest (list of all records), and split the data in training and validation sections. 1. We send the data to Amazon S3 1. We create a PyTorch script training a DeepLabV3 model, that we test locally for few iterations 1. We launch our training script on a remote, long-running machine with SageMaker Training API. 1. We show how to run bayesian parameter search to tune the metric of your choice (loss, accuracy, troughput...). To keep costs low, this is deactivated by default 1. We open a model checkpoint (collected in parallel to training by SageMaker Training) to check prediction qualityThe demo was created from a SageMaker ml.g4dn.16xlarge Notebook instance, with a Jupyter Kernel `conda_pytorch_latest_p37` (`torch 1.8.1+cu111`, `torchvision 0.9.1+cu111`). Feel free to use a different instance for the download and pre-processing step so that a GPU doesn't sit idle, and switch to a different instance type later. Note that the dataset download and extraction **do not run well on SageMaker Studio Notebooks**, whose storage is EFS based and struggles to handle the 80k+ files composing the dataset. Launching API calls (training job, tuning jobs) from Studio should run fine though.**IMPORTANT NOTES*** **This sample is written for single-GPU instances only. Using machines with more than 1 GPU or running the training code on more than 1 machines will not use all available hardware*** **Running this demo necessitates at least 400 Gb of local storage space*** **Running this demo on an ml.G4dn.16xlarge instance in region eu-west-1 takes approximately 50min of notebook uptime and approximately 12h of SageMaker Training job execution (excluding the bayesian parameter search, de-activated by default). This represents approximately 6 USD of notebook usage (if running on ml.g4dn.16xlarge) and 72 USD of training API*** **This demo uses non-AWS, open-source libraries including PyTorch, PIL, matplotlib, Torchvision. Use appropriate due diligence to verify if that use fits the software standards and compliance rules in place at your organization** * **This sample is provided for demonstration purposes. Make sure to conduct appropriate testing if derivating this code for your own use-cases. In general it is recommend to isolate development from production environments. Read more in the AWS Well Architected Framework https://aws.amazon.com/architecture/well-architected/** ###Code import json import multiprocessing as mp import os import time import uuid import boto3 from PIL import Image import sagemaker from sagemaker import Session from sagemaker.pytorch import PyTorch from sagemaker import get_execution_role sess = Session() bucket = '<enter an S3 bucket of your choice here>' # SageMaker will use this bucket to store data, script and model checkpoints s3 = boto3.client('s3') ###Output _____no_output_____ ###Markdown 1. Dataset preparation ###Code # Data will be downloaded there, and new folders created. Feel free to customize work_dir = '/home/ec2-user/SageMaker' dataset_prefix = 'a2d2_images' data_dir = work_dir + '/' + dataset_prefix # locations used for local testing local_dataset_cache = work_dir + '/a2d2-tmp' local_checkpoint_location = work_dir + '/a2d2-checkpoints' ###Output _____no_output_____ ###Markdown Download files ###Code %%time # Download images. This took 12min on a ml.g4dn.16xlarge instance in eu-west-1 region ! aws s3 cp s3://aev-autonomous-driving-dataset/camera_lidar_semantic.tar $work_dir %%time # Download labels ! aws s3 cp s3://aev-autonomous-driving-dataset/camera_lidar_semantic_instance.tar $work_dir %%time # Download the README ! aws s3 cp s3://aev-autonomous-driving-dataset/README-SemSeg.txt $work_dir ###Output _____no_output_____ ###Markdown UncompressThis takes about 20min ###Code # We create a new folder dedicated to the A2D2 dataset print('Creating folder {}'.format(data_dir)) try: os.mkdir(data_dir) except(FileExistsError): print('Directory already exists') %%time ! tar -xf {work_dir}/camera_lidar_semantic.tar -C $data_dir ###Output _____no_output_____ ###Markdown Analyse dataset structureWe check how labels and images are organized. This was necessary to build an appropriate Dataset class ###Code # Frames are grouped in 23 sequences data_folder = 'camera_lidar_semantic' os.listdir(os.path.join(data_dir, data_folder)) # each sequence contain folders for labels, lidar and camera capture os.listdir(os.path.join(data_dir, data_folder, '20180925_112730')) # each of those folders contain one of multiple folders based on camera that captured the data os.listdir(os.path.join(data_dir, data_folder, '20180925_112730/camera')) # 10 first records of the front center camera capture of the 2018-09-25 11:27:30 sequence os.listdir(os.path.join(data_dir, data_folder, '20180925_112730/camera/cam_front_center'))[:10] # view one image image_id = '000074771' with Image.open(os.path.join(data_dir, data_folder, '20180925_112730/camera/cam_front_center/' + '20180925112730_camera_frontcenter_{}.png'.format(image_id))) as im: im.show() # view associated label with Image.open(os.path.join(data_dir, data_folder, '20180925_112730/label/cam_front_center/' + '20180925112730_label_frontcenter_{}.png'.format(image_id))) as im: im.show() ###Output _____no_output_____ ###Markdown Anomalies to watch out of* On October 2021 record `a2d2_images/camera_lidar_semantic/20180925_135056/label/cam_side_left/20180925135056_label_sideleft_000026512.png` returns a 4-channel image Pre-processTo simplify the ML process, we build a flat JSON manifest mapping, for a given record ID, the path to image and to label ###Code root = os.path.join(data_dir, data_folder) # where we'll read images from relative = os.path.join(dataset_prefix, data_folder) # the image key prefix we'll use to write images in S3 # we sort sequences so that train-test split by sequence index is deterministic sequences = [s for s in os.listdir(root) if s.startswith('2018')] sequences.sort() print(sequences) manifest = {} for s in sequences: cameras = os.listdir(root + '/{}/camera'.format(s)) for c in cameras: images = [f for f in os.listdir(root + '/{}/camera/{}'.format(s, c)) if f.endswith('.png')] for i in images: label_name = i.replace('camera', 'label') im_id = i[:i.find('_')] + '_' + i[i.rfind('.')-9:i.rfind('.')] image_path_local = root + '/{}/camera/{}/{}'.format(s, c, i) label_path_local = root + '/{}/label/{}/{}'.format(s, c, label_name) image_path_manifest = relative + '/{}/camera/{}/{}'.format(s, c, i) label_path_manifest = relative + '/{}/label/{}/{}'.format(s, c, label_name) # create record only if both image file and label file exist: if os.path.isfile(image_path_local) and os.path.isfile(label_path_local): manifest[im_id] = {} manifest[im_id]['sequence_id'] = s manifest[im_id]['image_name'] = i manifest[im_id]['label_name'] = label_name # remove the work-dir from the path so that the manifest stays small and generic manifest[im_id]['image_path'] = image_path_manifest manifest[im_id]['label_path'] = label_path_manifest else: print('issue with image {} : -------'.format(image_path_local)) # check if both image and label exist print('image file {} exists: {}'.format(image_path_local, os.path.isfile(image_path_local))) print('label file {} exists: {}'.format(image_path_local, os.path.isfile(image_path_local))) print("Created a dataset manifest with {} records".format(len(manifest))) ###Output _____no_output_____ ###Markdown We then send images to S3 with a multi-processing call. This should take 10-15min on a large G4 instance and results in 139 Gb on S3. You can try to go faster using more workers in the `multiprocessing.Pool(workers)`, but be aware that too much concurrency may cause instability and crashes in your kernel and instance ###Code def send_images_to_s3(image_id): s3.upload_file(Filename=work_dir + '/' + manifest[image_id]['image_path'], Bucket=bucket, Key=manifest[image_id]['image_path']) s3.upload_file(Filename=work_dir + '/' + manifest[image_id]['label_path'], Bucket=bucket, Key=manifest[image_id]['label_path']) %%time with mp.Pool(mp.cpu_count()) as pool: pool.map(send_images_to_s3, manifest.keys()) # we also need to send class_list to S3 s3.upload_file( Filename=root + '/' + 'class_list.json', Bucket=bucket, Key=dataset_prefix + '/metadata/class_list.json') ###Output _____no_output_____ ###Markdown We split the dataset in a training and validation manifest ###Code split = 0.9 train_sequences = sequences[:int(split*len(sequences))] val_sequences = sequences[int(split*len(sequences)):] train_manifest = {k:manifest[k] for k in manifest.keys() if manifest[k]['sequence_id'] in train_sequences} val_manifest = {k:manifest[k] for k in manifest.keys() if manifest[k]['sequence_id'] in val_sequences} print("training set contains {} records".format(len(train_manifest))) print("validation set contains {} records".format(len(val_manifest))) with open(work_dir + "/train_manifest.json", "w") as file: json.dump(train_manifest, file) with open(work_dir + "/val_manifest.json", "w") as file: json.dump(val_manifest, file) for file in ['train_manifest.json', 'val_manifest.json']: s3.upload_file( Filename=work_dir + '/' + file, Bucket=bucket, Key=dataset_prefix + '/metadata/{}'.format(file)) train_path = 's3://{}/'.format(bucket) + dataset_prefix + '/metadata/' print('Training manifests sent to {}'.format(train_path)) ###Output _____no_output_____ ###Markdown 2. Single-GPU trainingWe create the training script as single Python file. To make training code scalable and portable, we create a custom PyTorch Dataset that reads images and segmentation masks directly from S3, and save in local cache in case of later re-use (eg if training with multiple epochs). That was we have a data pipeline that does not need to wait for all dataset to be downloaded locally, but that will read at low-latency after the first epoch.**Note** this DL training code is **far from state-of-the-art**. The goal of this sample is not to reach a good accuracy, but rather to show how to scale custom training jobs in Amazon SageMaker. * **Better accuracy** can likely be reached using data augmentation, learning rate scheduling, a better backbone, and adding the auxiliary DeepLabV3 loss. And why not a totally different segmentation model instead of DeepLabV3 * **Better throughput** can likely be reached using a sequential-access dataset, that reads group of records, or the SageMaker Fast File Mode, that streams files upon read request. Also, although I'm configuring it below, I am not sure if float16 precision compute occur and if NVIDIA TensorCores are actually used. This would be an important step to make full use of the computational power of modern NVIDIA cards. Converting labels to grayscale should also help making the dataloading lighter. Offloading data loading to the GPU, for example using NVIDIA DALI, is another axis to explore to boost throughput. Run locally ###Code # create a folder to cache dataset as it is downloaded by the dataset class print('Local dataset cache created at {}'.format(local_dataset_cache)) print('Local checkpoints will be stored at {}'.format(local_checkpoint_location)) try: os.mkdir(local_dataset_cache) except(FileExistsError): print('{} already exists'.format(local_dataset_cache)) try: os.mkdir(local_checkpoint_location) except(FileExistsError): print('{} already exists'.format(local_checkpoint_location)) ###Output _____no_output_____ ###Markdown Single-device codecan be run in a Python process ###Code %%time # test on 20 iterations. # This takes a few minutes. You can see instance activity live using htop or nividia-smi in instance terminal ! python a2d2_code/train.py --dataset $work_dir \ --cache $local_dataset_cache \ --height 604 \ --width 960 \ --checkpoint-dir $local_checkpoint_location \ --batch 12 \ --network deeplabv3_mobilenet_v3_large \ --workers 24 \ --log-freq 20 \ --prefetch 2 \ --bucket $bucket \ --eval-size 10 \ --iterations 20 \ --class-list a2d2_images/camera_lidar_semantic/class_list.json ###Output _____no_output_____ ###Markdown Launch in SageMaker TrainingWe use the SageMaker Python SDK to orchestrate SageMaker Training clusters. Note that if you don't want to learn yet another SDK, you can also do exactly the same thing with existing AWS SDKs, for example the AWS CLI (https://docs.aws.amazon.com/cli/latest/reference/sagemaker/create-training-job.html) and boto3 (https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sagemaker.htmlSageMaker.Client.create_training_job) ###Code config = { 'bucket': bucket, 'cache': '/opt/ml/input/data/dataset', 'height': 604, 'width': 960, 'epochs': 10, 'batch': 12, 'prefetch': 1, 'workers': 40, 'eval-size': 36, 'lr': 0.183, 'momentum': 0.928, 'lr_warmup_ratio':0.1, 'lr_decay_per_epoch': 0.3, 'log-freq': 500} # Training time of this job will be approximately 12h token = str(uuid.uuid4())[:10] # we create a unique token to avoid checkpoint collisions in S3 job = PyTorch( entry_point='train.py', source_dir='a2d2_code', role=get_execution_role(), framework_version='1.8.1', instance_count=1, instance_type='ml.g4dn.16xlarge', base_job_name='A2D2-single-GPU-seg-training', py_version='py36', hyperparameters=config, checkpoint_s3_uri='s3://{}/{}/checkpoints'.format(bucket, token), # S3 destination of /opt/ml/checkpoints files output_path='s3://{}/{}'.format(bucket, token), code_location='s3://{}/{}/code'.format(bucket, token), # source_dir code will be staged in S3 there environment={"SMDEBUG_LOG_LEVEL":"off"}, # reduce verbosity of Debugger debugger_hook_config=False, # deactivate debugger to avoid warnings in model artifact disable_profiler=True, # keep running resources to a minimum to avoid permission errors metric_definitions=[ {"Name": "Train_loss", "Regex": "Training_loss: ([0-9.]+).*$"}, {"Name": "Learning_rate", "Regex": "learning rate: ([0-9.]+).*$"}, {"Name": "Val_loss", "Regex": "Val_loss: ([0-9.]+).*$"}, {"Name": "Throughput", "Regex": "Throughput: ([0-9.]+).*$"} ], tags=[{'Key': 'Project', 'Value': 'A2D2_segmentation'}]) # tag the job for experiment tracking ###Output _____no_output_____ ###Markdown SageMaker-managed I/O uploads only dataset metadata (class_list and manifest). The actual records and labels are fetched upon request directly from S3 or local cache via our custom `Dataset` ###Code # we do an asynchronous fit, so the job doesn't keep the client waiting. # closing and shutting down your notebook will not stop this job. # if you want to stop this SageMaker Training job, use an AWS SDK or the console job.fit({'dataset': train_path}, wait=False) ###Output _____no_output_____ ###Markdown Custom Metric Tuning SageMaker Automated Model Tuning is a serverless managed, use-case agnostic parameter search service. With SageMaker AMT (sometimes named HPO - Hyperparameter Optimization) you can tune any parameter declared in the container hyperparameter dictionary (continuous, integer or categorical) and you can tune for any metric (minimize or maximize) that you can regexp from your container or script logs. SageMaker AMT is not limited to model tuning. You can be creative with it, and for example tune jobs to minimize the training time or training cost. See https://aws.amazon.com/blogs/machine-learning/aerobotics-improves-training-speed-by-24-times-per-sample-with-amazon-sagemaker-and-tensorflow/ for a nice example. More info: * https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning.html * *Amazon SageMaker Automatic Model Tuning: Scalable Gradient-Free Optimization*, Perone et al. (https://arxiv.org/abs/2012.08489) ###Code Tuning = False # set to true if you want to test the tuning below print("tuning cell set to {}".format(Tuning)) if Tuning: # we use the SageMaker Tuner from sagemaker.tuner import IntegerParameter, ContinuousParameter tuning_config = { 'bucket': bucket, 'cache': '/opt/ml/input/data/dataset', 'height': 604, 'width': 960, 'epochs': 5, 'prefetch': 1, 'workers': 40, 'eval-size': 36, 'log-freq': 500} tuning_config = PyTorch( entry_point='train.py', source_dir='a2d2_code', role=get_execution_role(), framework_version='1.8.1', instance_count=1, instance_type='ml.g4dn.16xlarge', py_version='py36', max_run=28800, # cap the max runtime at 8h per job hyperparameters=tuning_config, checkpoint_s3_uri='s3://{}/checkpoints'.format(bucket), # S3 destination of /opt/ml/checkpoints files output_path='s3://{}'.format(bucket), code_location='s3://{}/code'.format(bucket), # source_dir code will be staged in S3 there environment={"SMDEBUG_LOG_LEVEL":"off"}, # reduce verbosity of Debugger debugger_hook_config=False, # deactivate debugger to avoid warnings in model artifact disable_profiler=True, # keep running resources to a minimum to avoid permission errors metric_definitions=[ {"Name": "Val_loss", "Regex": "Val_loss: ([0-9.]+).*$"}, ], tags=[{'Key': 'Project', 'Value': 'A2D2_segmentation'}]) # Define exploration boundaries hyperparameter_ranges = { 'lr': ContinuousParameter(0.001, 0.01), 'momentum': ContinuousParameter(0.8, 0.99), 'lr_warmup_ratio': ContinuousParameter(1, 10), 'lr_decay_per_epoch': ContinuousParameter(0.1, 0.8), 'batch': IntegerParameter(6, 12) } # create Optimizer # you can tune for anything you can regexp from your logs # in this sample we minimize the validation loss Optimizer = sagemaker.tuner.HyperparameterTuner( estimator=tuning_config, hyperparameter_ranges=hyperparameter_ranges, base_tuning_job_name='Loss-tuner', objective_type='Minimize', objective_metric_name='Val_loss', strategy='Bayesian', early_stopping_type='Auto', metric_definitions=[ {"Name": "Val_loss", "Regex": "Val_loss: ([0-9.]+).*$"} ], max_jobs=40, max_parallel_jobs=2) Optimizer.fit({'dataset': train_path}, wait=False) print("Tuning job launched") ###Output _____no_output_____ ###Markdown 3. Predict with trained modelto test the trained model, we run inference on couple samples from the validation set. You can run this section on its own once you have a trained model ###Code from matplotlib import pyplot as plt import torch from torchvision import models from torchvision.io import read_image from torchvision.datasets.vision import VisionDataset from torchvision.models.segmentation.deeplabv3 import DeepLabHead from torchvision.transforms import Resize from torchvision.transforms.functional import InterpolationMode ###Output _____no_output_____ ###Markdown Bring a checkpoint from S3In the cell below we download a checkpoint produced by a training job, which could come either from the above-launched training job or from the training job launched from the optional tuning step.To check available checkpoints for a given training job, you can inspect the S3 ARN returned at `CheckpointConfig` by `boto3` `describe_training_job`, or you can also check in the training job detail page the S3 Output Path URL in the "Checkpoint configuration" section ###Code # you need to wait around 15min until you have the first checkpoint showing up in Amazon S3 ! aws s3 cp <s3 URI of a checkpoint> $work_dir model = torch.load(os.path.join(work_dir, 'final_model.pth')) # replace with your model name if different model.eval() ###Output _____no_output_____ ###Markdown instantiate the datasetThis is necessary for inference pre-processing (applying same transforms to input and label as at training) ###Code height = 604 width = 960 from a2d2_code.a2d2_utils import A2D2_S3_dataset image_transform = Resize( (height, width), interpolation=InterpolationMode.BILINEAR) target_transform = Resize( (height, width), interpolation=InterpolationMode.NEAREST) train_data = A2D2_S3_dataset( manifest_file=work_dir + '/train_manifest.json', class_list=data_dir + '/camera_lidar_semantic/class_list.json', transform=image_transform, target_transform=target_transform, cache='/home/ec2-user/', height=height, width=width, s3_bucket=bucket) ###Output _____no_output_____ ###Markdown We measure pixel accuracy on couple pictures. IoU is also relevant for segmentation, we leave that for a later iteration :) ###Code plt.rcParams["figure.figsize"] = [15, 7] def pixel_acc(T1, T2): return (T1 == T2).sum()/(T1.size()[0]*T1.size()[1]) # take first 10 pictures from the val_manifest with open(work_dir + '/val_manifest.json') as file: val_manifest = json.load(file) pic_ids = list(val_manifest.keys())[:10] for pic_id in pic_ids: image_path = val_manifest[pic_id]['image_path'] label_path = val_manifest[pic_id]['label_path'] pic = image_transform(read_image(os.path.join(work_dir, image_path))) label = target_transform(read_image(os.path.join(work_dir, label_path))) mask = torch.zeros(height, width) for rgb, cid in train_data.rgb2ids.items(): color_mask = label == torch.Tensor(rgb).reshape([3,1,1]) seg_mask = color_mask.sum(dim=0) == 3 mask[seg_mask] = cid mask = mask.type(torch.int64) pred = model(torch.div(pic, 255).unsqueeze(0).to("cuda:0"))["out"] flat_pred = torch.argmax(pred, dim=1)[0] mask_np = mask.cpu().numpy() flat_pred_np = flat_pred.cpu().numpy() fig, (ax1, ax2) = plt.subplots(1, 2) fig.suptitle(pic_id) ax1.imshow(mask_np) ax2.imshow(flat_pred_np) print("Image {}: PIXEL ACCURACY: {}".format(pic_id, pixel_acc(flat_pred.cuda(), mask.cuda()))) print("*"*20) ###Output _____no_output_____
Assignments/DSCI633_Asssignment6.ipynb
###Markdown **Importing the Usual Libraries** ###Code #importing libraries import sys import pandas as pd import seaborn as sns import numpy as np import matplotlib.pyplot as plt import sklearn from pandas import DataFrame from sklearn.mixture import GaussianMixture from sklearn.metrics import silhouette_samples,silhouette_score ###Output _____no_output_____ ###Markdown Loading the Iris dataset ###Code #loading the iris dataset sklearn from sklearn.datasets import load_iris iris = load_iris() df= pd.DataFrame(data= np.c_[iris['data'],iris['target']], columns= iris['feature_names'] + ['target']) df.head() df.info() df.isnull().sum() df.columns df.groupby('target').size() df.describe() ###Output _____no_output_____ ###Markdown GMM CLUSTERING ###Code #GMM CLUSTERING from sklearn.mixture import GaussianMixture from sklearn import mixture gmm=GaussianMixture(n_components=3) #gaussion mixture model gmm.fit(df) y_gmm_cluster= gmm.predict(df) y_gmm_cluster blob_centers = np.array( [[ 0.2, 2.3], [-1.5 , 2.3], [-2.8, 1.8], [-2.8, 2.8], [-2.8, 1.3]]) blob_std = np.array([0.4, 0.3, 0.1, 0.1, 0.1]) from sklearn.datasets import make_blobs X, y= sklearn.datasets.make_blobs( n_samples=2000, centers=blob_centers, cluster_std=blob_std, shuffle=True, random_state=None ) print(np.sum(y_gmm_cluster)/len(y_gmm_cluster)) #model accuracy plt.scatter(X[:, 0], X[:, 1], c=y) plt.show() ###Output _____no_output_____ ###Markdown **KMEANS** ###Code kms = KMeans(n_clusters = 4, init = 'k-means++', max_iter = 300, n_init = 10, random_state = 0) y_kms = kms.fit_predict(X) print('Centroids :') print(kms.cluster_centers_) plt.scatter(kms.cluster_centers_[:, 0], kms.cluster_centers_[:,1], s = 100, c = 'red', label = 'Centroids') ###Output _____no_output_____ ###Markdown New Instances ###Code X_new = np.array([[0, 2], [3, 2], [-3, 3], [-3, 2.5]]) km = KMeans(n_clusters = 3, init = 'k-means++', max_iter = 300, n_init = 10, random_state = 0) y_new = km.fit_predict(X_new) print('Y_new:',y_new) # print('Centroids :') #print(km.cluster_centers_) # plt.scatter(km.cluster_centers_[:, 0], km.cluster_centers_[:,1], s = 100, c = 'red', label = 'Centroids') import matplotlib.pyplot as plt from scipy.spatial import Voronoi, voronoi_plot_2d voro = Voronoi(X_new) voronoi_plot_2d fig = voronoi_plot_2d(voro) voronoi_plot_2d fig = voronoi_plot_2d(voro, show_vertices=False, line_colors='orange',line_width=2, line_alpha=0.6, point_size=2) plt.show() ###Output _____no_output_____ ###Markdown **K=3** ###Code k=3 kmodel1= KMeans(n_clusters=k, init = 'k-means++', max_iter = 300, n_init = 10, random_state = 0) kmodel1.fit(X) kmodel1.cluster_centers_ plt.scatter(kmodel1.cluster_centers_[:, 0], kmodel1.cluster_centers_[:,1], s = 100, c = 'red', label = 'Centroids') y_kmodel1 = kmodel1.fit_predict(X) print(y_kmodel1) ###Output [0 1 0 ... 2 0 0] ###Markdown **K=8** ###Code k=8 kmodel8 = KMeans(n_clusters = k, init = 'k-means++', max_iter = 300, n_init = 10, random_state = 0) y_kmodel8 = kmodel8.fit_predict(X) print('Centroids :') print(kmodel8.cluster_centers_) plt.scatter(kmodel8.cluster_centers_[:, 0], kmodel8.cluster_centers_[:,1], s = 100, c = 'red', label = 'Centroids') kmodel8.inertia_ ###Output _____no_output_____ ###Markdown **Plotting inertia as a function of k** ###Code # Plotting the k vs inertias inertia = [] for i in range(1,10): kmeanss = KMeans(i) kmeanss.fit(X) inertia_iteration = kmeanss.inertia_ inertia.append(inertia_iteration) print("Inertias:",inertia) plt.plot(inertia) plt.title("Elbow method : various n_cluster values: ") plt.xlabel("Number of clusters - k") plt.ylabel("inertias") plt.show() ###Output _____no_output_____
Chapter4_TheGreatestTheoremNeverTold/Ch4 Exercises.ipynb
###Markdown Question 1 1\. How would you estimate the quantity $E\left[ \cos{X} \right]$, where $X \sim \text{Exp}(4)$? What about $E\left[ \cos{X} | X \lt 1\right]$, i.e. the expected value *given* we know $X$ is less than 1? Would you need more samples than the original samples size to be equally accurate? Uncomment images in this cell to see graphs. ###Code import scipy.stats as stats from numpy import cos exp = stats.expon(scale=4) X = exp.rvs(10) Y = cos(X) X, cos(X), cos(X[X<1]) # E(cos(X)) <= mean(cos(X_i)) for i in a sample cos(X).mean() # E(cos(X) | X < 1) <= mean(cos(X_j)) for j in a sample where all X_j < 1 # Yes, this requires more samples overall since we're filtering many out cos(X[X<1]).mean() ###Output _____no_output_____
Tutorial/ch4.ipynb
###Markdown Chapter 4: Bayesian Statistics (Working in progress proof of concept) ###Code # These two lines are necessary only if gempy is not installed import sys, os sys.path.append("../") # Importing gempy import gempy as gp # Embedding matplotlib figures into the notebooks %matplotlib inline # Aux imports import numpy as np geo_data = gp.read_pickle('../input_data/sandstone.pickle') # Assigning series to formations as well as their order (timewise) gp.set_data_series(geo_data, {"EarlyGranite_Series": 'EarlyGranite', "BIF_Series":('SimpleMafic2', 'SimpleBIF'), "SimpleMafic_Series":'SimpleMafic1'}, order_series = ["EarlyGranite_Series", "BIF_Series", "SimpleMafic_Series"], verbose=1) ###Output _____no_output_____ ###Markdown Setting uncertainties adding the values to the Dataframe. ###Code geo_data.interfaces['X_std'] = None geo_data.interfaces['Y_std'] = 0 geo_data.interfaces['Z_std'] = 100 geo_data.foliations['X_std'] = None geo_data.foliations['Y_std'] = 0 geo_data.foliations['Z_std'] = 0 geo_data.foliations['dip_std'] = 10 geo_data.foliations['azimuth_std'] = 10 geo_data.foliations.head() # input_data_T = interp_data.interpolator.tg.input_parameters_list() # input_data_P = interp_data.get_input_data(u_grade=[3, 3]) # select = interp_data.interpolator.pandas_rest_layer_points['formation'] == 'Reservoir' interp_data = gp.InterpolatorInput(geo_data, compile_theano=False, u_grade=[9,9,9]) ###Output I am in the setting float32 I am here [2, 2] ###Markdown Now the generation of the geomodel will be an operation embedded in a larger tree. ###Code import theano import theano.tensor as T geomodel = theano.OpFromGraph(interp_data.interpolator.tg.input_parameters_list(), [interp_data.interpolator.tg.whole_block_model(0)], on_unused_input='ignore', ) ###Output _____no_output_____ ###Markdown Because now the GeMpy model is a theano operation and not a theano function, to call it we need to use theano variables (with theano functions we call them with python variables). This is very easy to modify, we just need to use theano shared to convert our python input data into theano variables.The pymc3 objects are already theano variables (pm.Normal and so on). Now the trick is that using the theano function T.set_subtensor, we can change one deterministic value of the input arrays(the ones printed in the cell above) by a stochastic pymc3 object. Then with the new arrays we just have to call the theano operation and pymc will do the rest ###Code # This is the creation of the model import pymc3 as pm theano.config.compute_test_value = 'off' #theano.config.warn_float64 = 'warn' model = pm.Model() with model: # We create the Stochastic parameters. In this case only the Z position # of the interfaces Z_rest = pm.Normal('Z_unc_rest', interp_data.interpolator.pandas_rest_layer_points['Z'].as_matrix().astype('float32'), interp_data.interpolator.pandas_rest_layer_points['Z_std'].as_matrix().astype('float32'), dtype='float32', shape = (66)) Z_ref = pm.Normal('Z_unc_ref', interp_data.interpolator.ref_layer_points[:, 2].astype('float32'), interp_data.interpolator.ref_layer_points[:, 2].astype('float32'), dtype='float32', shape = (66)) # We convert a python variable to theano.shared input_sh = [] for i in interp_data.get_input_data(): input_sh.append(theano.shared(i)) # We add the stochastic value to the correspondant array. rest array is # a n_points*3 (XYZ) array. We only want to change Z in this case. input_sh[4] = T.set_subtensor( input_sh[4][:, 2], Z_ref) input_sh[5] = T.set_subtensor( input_sh[5][:, 2], Z_rest) # With the stochastic parameters we create the geomodel result: geo_model = pm.Deterministic('GemPy', geomodel(input_sh[0], input_sh[1], input_sh[2], input_sh[3], input_sh[4], input_sh[5])) theano.config.compute_test_value = 'ignore' # This is the sampling # BEFORE RUN THIS FOR LONG CHECK IN THE MODULE THEANOGRAF THAT THE FLAG # THEANO OPTIMIZER IS IN 'fast_run'!! with model: # backend = pm.backends.ndarray.NDArray('geomodels') step = pm.NUTS() trace = pm.sample(30, tune=10, init=None, step=step, ) import matplotlib.pyplot as plt for i in range(100): gp.plot_section(geo_data, trace.get_values('GemPy')[i][-1, 0, :], 13, direction='y', plot_data=False) plt.show() ###Output _____no_output_____
Examples/ExtraSensory-ComparingNormalization.ipynb
###Markdown Comparing Normalization on the ExtraSensory dataset Set up the Notebook ###Code %load_ext autoreload %autoreload 2 %matplotlib inline import importlib, sys, os sys.path.insert(0, os.path.abspath('..')) if(importlib.util.find_spec("mFlow") is None): !git clone https://github.com/mlds-lab/mFlow.git !pip install ./mFlow else: print("mFlow module found") ###Output mFlow module found ###Markdown Import modules ###Code from mFlow.Blocks.data_loader_extrasensory import extrasensory_data_loader from mFlow.Blocks.filter import MisingLabelFilter, MisingDataColumnFilter, Take from mFlow.Blocks.imputer import Imputer from mFlow.Blocks.normalizer import Normalizer from mFlow.Blocks.experimental_protocol import ExpTrainTest, ExpCV, ExpWithin from mFlow.Blocks.results_analysis import ResultsConcat, ResultsCVSummarize, DataYieldReport from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score from sklearn.linear_model import LogisticRegression import matplotlib.pyplot as plt from mFlow.Workflow.workflow import workflow import mFlow.Workflow.compute_graph ###Output _____no_output_____ ###Markdown Define the workflowThis workflow uses a basic train/test experiment to compare the use of feature normalization to no feature normalization. The model used is logistic regression with a fixed regularization hyper-parameter. Linear models are not sensitive to feature scaling; however, when regularization is added, the regularizer is sensitive to feature scaling and different results will be obtained depending on whether and what type of normalization is applied.The workflow includes a column filter that screens out feature dimensions that are less than 20% observed, a missing label filter that removes instances without labels, and mean imputation. Next, the workflow includes two branches, one that performs normalization, and one that does not. Each branch then executes a train/test experiment. Data loading and filtering is shared across the two branches of the workflow. The workflow computation graph is then created and displayed along with the results. In this experiment, we can see that the use of normalization results in almost a 15% performance improvement relative to zero imputation. ###Code metrics = [accuracy_score, f1_score, precision_score, recall_score] df_raw = extrasensory_data_loader(label="SLEEPING"); df_cf = MisingDataColumnFilter(df_raw); df_lf = MisingLabelFilter(df_cf); df_imp = Imputer(df_lf, method="mean") df_norm = Normalizer(df_imp) models = {"No Norm - LR(C=1e-4)": LogisticRegression(solver="lbfgs",max_iter=100)} res_no = ExpTrainTest(df_imp, models, metrics=metrics); models = {"Norm - LR(C=1e-4)": LogisticRegression(solver="lbfgs",max_iter=100)} res_yes = ExpTrainTest(df_norm, models, metrics=metrics); res_cat = ResultsConcat(res_no, res_yes) flow = workflow({"results":res_cat}) flow.draw(); plt.show(); output=flow.run(); output['results']['report'] ###Output _____no_output_____
ML Projects/LSTM_Stock_Prediction_Model.ipynb
###Markdown IMPORTING LIBRARIES AND READING THE DATASET ###Code import pandas as pd import numpy as np import matplotlib.pyplot as plt import math from sklearn.preprocessing import MinMaxScaler import tensorflow as tf from tensorflow.keras.models import Sequential from tensorflow.keras.callbacks import EarlyStopping, LearningRateScheduler from tensorflow.keras.layers import Dense from tensorflow.keras.layers import LSTM df_nasdaq = pd.read_csv("NDAQ.csv") ###Output _____no_output_____ ###Markdown PRE-PROCESSING DATASET ###Code # Extracting the closing prices from DataFrame df = df_nasdaq.reset_index()['Close'] # Plotting the values plt.plot(df) # Scaling the data between 0-1 # Because LSTM model works better with scaled data scaler = MinMaxScaler(feature_range = (0, 1)) df = scaler.fit_transform(np.array(df).reshape(-1, 1)) # SPLITTING THR DATA INTO TRAINING AND TESTING SET split_size = 0.8 training_size = int(len(df)*split_size) train_data = df[0:training_size, :] test_data = df[training_size:, :1] # PRE-PROCESSING TRAINING AND TESTING DATA INTO X (FEATURES) AND Y (VALUES) timestep = 25 def preprocess_data(dataset, timestep): X_train, Y_train = [], [] for i in range(len(dataset) - timestep-1): X_temp = dataset[i:(i+timestep), 0] Y_temp = dataset[i+timestep, 0] X_train.append(X_temp) Y_train.append(Y_temp) return np.array(X_train), np.array(Y_train) x_train, y_train = preprocess_data(train_data, timestep) x_test, y_test = preprocess_data(test_data, timestep) # Converting 2D Array into 3D Array (FOR fitting in LSTM Model) x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], 1 ) x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], 1 ) ###Output _____no_output_____ ###Markdown CREATING A STACKED LSTM MODEL ###Code lstm_model = tf.keras.models.Sequential([ tf.keras.layers.Input(shape=x_train.shape[-2:]), tf.keras.layers.LSTM(128, activation='relu', return_sequences=True), tf.keras.layers.Dropout(0.1), tf.keras.layers.LSTM(64, activation='relu', return_sequences=True), tf.keras.layers.Dropout(0.1), tf.keras.layers.LSTM(32, activation='relu', return_sequences=False), tf.keras.layers.Dense(1) ]) lstm_model.compile(optimizer='adam', loss='mse') lstm_model.summary() # CREATING A LEARNING RATE SCHEDULER : def scheduler(epoch, lr): rate = 1 - 0.02 if epoch%2 == 0: return lr*rate return lr LRscheduler = LearningRateScheduler(scheduler) callbacks = [LRscheduler] # Fitting the model on Training Dataset history = lstm_model.fit(x_train, y_train, epochs = 50, validation_data=(x_test, y_test), verbose = 2, callbacks = callbacks) ###Output Epoch 1/50 31/31 - 6s - loss: 0.0141 - val_loss: 0.0505 Epoch 2/50 31/31 - 2s - loss: 0.0013 - val_loss: 0.0060 Epoch 3/50 31/31 - 2s - loss: 0.0010 - val_loss: 0.0055 Epoch 4/50 31/31 - 2s - loss: 9.6615e-04 - val_loss: 0.0043 Epoch 5/50 31/31 - 2s - loss: 9.7562e-04 - val_loss: 0.0056 Epoch 6/50 31/31 - 2s - loss: 8.7398e-04 - val_loss: 0.0017 Epoch 7/50 31/31 - 2s - loss: 8.3948e-04 - val_loss: 0.0020 Epoch 8/50 31/31 - 2s - loss: 7.9806e-04 - val_loss: 0.0014 Epoch 9/50 31/31 - 2s - loss: 7.6924e-04 - val_loss: 0.0012 Epoch 10/50 31/31 - 2s - loss: 7.9487e-04 - val_loss: 0.0023 Epoch 11/50 31/31 - 2s - loss: 7.8287e-04 - val_loss: 9.8069e-04 Epoch 12/50 31/31 - 2s - loss: 7.1353e-04 - val_loss: 9.3197e-04 Epoch 13/50 31/31 - 2s - loss: 7.0356e-04 - val_loss: 7.8205e-04 Epoch 14/50 31/31 - 2s - loss: 6.7412e-04 - val_loss: 0.0015 Epoch 15/50 31/31 - 2s - loss: 7.0916e-04 - val_loss: 7.0059e-04 Epoch 16/50 31/31 - 2s - loss: 5.8889e-04 - val_loss: 8.5027e-04 Epoch 17/50 31/31 - 2s - loss: 5.5553e-04 - val_loss: 0.0022 Epoch 18/50 31/31 - 2s - loss: 5.9148e-04 - val_loss: 5.9416e-04 Epoch 19/50 31/31 - 2s - loss: 5.5036e-04 - val_loss: 0.0011 Epoch 20/50 31/31 - 2s - loss: 5.2173e-04 - val_loss: 7.0358e-04 Epoch 21/50 31/31 - 2s - loss: 5.0134e-04 - val_loss: 8.0050e-04 Epoch 22/50 31/31 - 2s - loss: 5.8387e-04 - val_loss: 0.0023 Epoch 23/50 31/31 - 2s - loss: 4.5085e-04 - val_loss: 0.0014 Epoch 24/50 31/31 - 2s - loss: 4.4203e-04 - val_loss: 0.0012 Epoch 25/50 31/31 - 2s - loss: 4.2661e-04 - val_loss: 7.0786e-04 Epoch 26/50 31/31 - 2s - loss: 4.2786e-04 - val_loss: 0.0014 Epoch 27/50 31/31 - 2s - loss: 4.7925e-04 - val_loss: 0.0010 Epoch 28/50 31/31 - 2s - loss: 4.3548e-04 - val_loss: 0.0037 Epoch 29/50 31/31 - 2s - loss: 4.5917e-04 - val_loss: 0.0091 Epoch 30/50 31/31 - 2s - loss: 4.6815e-04 - val_loss: 0.0011 Epoch 31/50 31/31 - 2s - loss: 4.3071e-04 - val_loss: 0.0018 Epoch 32/50 31/31 - 2s - loss: 3.7778e-04 - val_loss: 0.0022 Epoch 33/50 31/31 - 2s - loss: 4.0354e-04 - val_loss: 0.0013 Epoch 34/50 31/31 - 2s - loss: 3.7075e-04 - val_loss: 8.1459e-04 Epoch 35/50 31/31 - 2s - loss: 3.5730e-04 - val_loss: 0.0017 Epoch 36/50 31/31 - 2s - loss: 3.4503e-04 - val_loss: 0.0021 Epoch 37/50 31/31 - 2s - loss: 3.7148e-04 - val_loss: 0.0034 Epoch 38/50 31/31 - 2s - loss: 3.8819e-04 - val_loss: 0.0019 Epoch 39/50 31/31 - 2s - loss: 4.3031e-04 - val_loss: 0.0019 Epoch 40/50 31/31 - 2s - loss: 3.7253e-04 - val_loss: 0.0028 Epoch 41/50 31/31 - 2s - loss: 3.8415e-04 - val_loss: 0.0063 Epoch 42/50 31/31 - 2s - loss: 3.9213e-04 - val_loss: 0.0035 Epoch 43/50 31/31 - 2s - loss: 3.6305e-04 - val_loss: 0.0029 Epoch 44/50 31/31 - 2s - loss: 3.5576e-04 - val_loss: 0.0049 Epoch 45/50 31/31 - 2s - loss: 3.4676e-04 - val_loss: 0.0032 Epoch 46/50 31/31 - 2s - loss: 3.3803e-04 - val_loss: 0.0034 Epoch 47/50 31/31 - 2s - loss: 3.4917e-04 - val_loss: 0.0037 Epoch 48/50 31/31 - 2s - loss: 3.3968e-04 - val_loss: 0.0022 Epoch 49/50 31/31 - 2s - loss: 3.2893e-04 - val_loss: 0.0073 Epoch 50/50 31/31 - 2s - loss: 3.5916e-04 - val_loss: 0.0044 ###Markdown EVALUATING THE MODEL ###Code test_predict = lstm_model.predict(x_test) train_predict = lstm_model.predict(x_train) import seaborn as sns sns.set_style("darkgrid") plt.plot(test_predict, label = "Predicted") plt.plot(test_data[0:len(test_predict)], label = "Actual") plt.legend() plt.plot(train_predict, label = "Predicted") plt.plot(train_data[0:len(train_predict)], label = "Actual") plt.legend() ###Output _____no_output_____ ###Markdown PREDICTING FUTURE PRICES ###Code x_input = test_data[len(test_data)-timestep: ].reshape(1,-1) temp_input = list(x_input) temp_input = temp_input[0].tolist() lst_output = [] for i in range(30): if(len(temp_input)>timestep): print(type(temp_input[0])) x_input = np.array(temp_input[1:]) x_input = x_input.reshape(-1, 1) x_input = x_input.reshape((1, timestep, 1)) # y_pred = lstm_model.predict(x_input, verbose = 0) temp_input.append(y_pred[0][0].tolist()) temp_input = temp_input[1:] lst_output.append(y_pred[0].tolist()) else: x_input = x_input.reshape((1, timestep, 1)) y_pred = lstm_model.predict(x_input, verbose = 0) temp_input.append(y_pred[0][0].tolist()) lst_output.extend(y_pred[0].tolist()) new_lst = [] for i in range(len(lst_output)): if i == 0: new_lst.append(lst_output[i]) else: new_lst.append(lst_output[i][0]) future = np.arange(100, 131) prev = np.arange(0, 100) new_lst = np.array(new_lst).reshape(-1, 1) prev = np.array(prev).reshape(-1, 1) future = np.array(future[1:]).reshape(-1, 1) plt.plot(future, scaler.inverse_transform(new_lst)) plt.plot(prev, scaler.inverse_transform(df[1159: ])) df_nasdaq = df_nasdaq.reset_index()['Close'] df_predict=df_nasdaq.tolist() df_predict.extend(scaler.inverse_transform(new_lst)) import seaborn as sns sns.set_style("darkgrid") plt.plot(df_predict[1159:]) plt.axvspan(100, 130, color='lime', alpha=0.2) plt.xlabel("Stock Price ($)") plt.ylabel("Days") ###Output _____no_output_____
a-simple-xgboost-model-v4-2.ipynb
###Markdown OverviewThis notebook works on the IEEE-CIS Fraud Detection competition. Here I build a simple XGBoost model based on a balanced dataset. Lessons:. keep the categorical variables as single items. Use a high max_depth for xgboost (maybe 40) Ideas to try:. train divergence of expected value (eg. for TransactionAmt and distance based on the non-fraud subset (not all subset as in the case now). try using a temporal approach to CV ###Code # all imports necessary for this notebook %matplotlib inline import pandas as pd import numpy as np import matplotlib.pyplot as plt import random import gc import copy import missingno as msno import xgboost from xgboost import XGBClassifier, XGBRegressor from sklearn.model_selection import StratifiedKFold, cross_validate, train_test_split from sklearn.metrics import roc_auc_score, r2_score import warnings warnings.filterwarnings('ignore') import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) # Helpers def seed_everything(seed=0): '''Seed to make all processes deterministic ''' random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) def drop_correlated_cols(df, threshold, sample_frac = 1): '''Drops one of two dataframe's columns whose pairwise pearson's correlation is above the provided threshold''' if sample_frac != 1: dataset = df.sample(frac = sample_frac).copy() else: dataset = df col_corr = set() # Set of all the names of deleted columns corr_matrix = dataset.corr() for i in range(len(corr_matrix.columns)): if corr_matrix.columns[i] in col_corr: continue for j in range(i): if (corr_matrix.iloc[i, j] >= threshold) and (corr_matrix.columns[j] not in col_corr): colname = corr_matrix.columns[i] # getting the name of column col_corr.add(colname) del dataset gc.collect() df.drop(columns = col_corr, inplace = True) def calc_feature_difference(df, feature_name, indep_features, min_r2 = 0.1, min_r2_improv = 0, frac1 = 0.1, max_depth_start = 2, max_depth_step = 4): from copy import deepcopy print("Feature name %s" %feature_name) #print("Indep_features %s" %indep_features) is_imrpoving = True curr_max_depth = max_depth_start best_r2 = float("-inf") clf_best = np.nan while is_imrpoving: clf = XGBRegressor(max_depth = curr_max_depth) rand_sample_indeces = df[df[feature_name].notnull()].sample(frac = frac1).index clf.fit(df.loc[rand_sample_indeces, indep_features], df.loc[rand_sample_indeces, feature_name]) rand_sample_indeces = df[df[feature_name].notnull()].sample(frac = frac1).index pred_y = clf.predict(df.loc[rand_sample_indeces, indep_features]) r2Score = r2_score(df.loc[rand_sample_indeces, feature_name], pred_y) print("%d, R2 score %.4f" % (curr_max_depth, r2Score)) curr_max_depth = curr_max_depth + max_depth_step if r2Score > best_r2: best_r2 = r2Score clf_best = deepcopy(clf) if r2Score < best_r2 + (best_r2 * min_r2_improv) or (curr_max_depth > max_depth_start * max_depth_step and best_r2 < min_r2 / 2): is_imrpoving = False print("The best R2 score of %.4f" % ( best_r2)) if best_r2 > min_r2: pred_feature = clf_best.predict(df.loc[:, indep_features]) return (df[feature_name] - pred_feature) else: return df[feature_name] seed_everything() pd.set_option('display.max_columns', 500) master_df = pd.read_csv('/kaggle/input/ieee-preprocessed/master_df_top_300.csv') master_df.head() ###Output _____no_output_____ ###Markdown master_df.shape ###Code for col in master_df.select_dtypes(exclude='number').columns: master_df[col] = master_df[col].astype('category').cat.codes train_balanced = master_df[master_df['isFraud'].notnull()] temp_list_to_drop = [] temp_list_to_drop.extend(['isFraud', 'TransactionID', 'TransactionDT', 'is_train_df']) print(train_balanced.shape) clf = XGBClassifier(max_depth=50) clf.fit(train_balanced.drop(columns=temp_list_to_drop), train_balanced['isFraud']) gc.collect() # prepare submission temp_list_to_drop = [] #temp_list_to_drop = list(cols_cat) temp_list_to_drop.extend(['isFraud', 'TransactionID', 'TransactionDT']) temp_list_to_include = list(set(master_df.columns).difference(set(temp_list_to_drop))) temp_list_to_drop = [] #temp_list_to_drop = list(cols_cat) temp_list_to_drop.extend(['isFraud', 'TransactionID', 'TransactionDT']) temp_list_to_include = list(train_balanced.drop(columns=temp_list_to_drop).columns) temp_list_to_drop = [] #temp_list_to_drop = list(cols_cat) temp_list_to_drop.extend(['isFraud', 'TransactionID', 'TransactionDT', 'is_train_df']) counter_from = master_df.loc[master_df['is_train_df']==0, 'isFraud'].index[0] len_master_df = len(master_df) print(counter_from) print(len_master_df) print('start!!') while counter_from < len_master_df: print(counter_from) counter_to = counter_from + 10000 pred = pd.DataFrame() #print(len(master_df['isFraud'].loc[counter_from:counter_to])) #print(len(master_df.loc[counter_from:counter_to, [col for col in master_df.columns if col not in temp_list_to_drop]])) master_df['isFraud'].loc[counter_from:counter_to] = clf.predict_proba(master_df.loc[counter_from:counter_to, [col for col in master_df.columns if col not in temp_list_to_drop]])[:, 1] counter_from += 10000 gc.collect() #print(temp_list_to_include) #sample_submission.head() counter_from = master_df.loc[master_df['is_train_df']==0, 'isFraud'].index[0] submission = pd.DataFrame(master_df[['TransactionID', 'isFraud']].loc[counter_from:]).reset_index(drop = True) submission.head() submission.describe() submission.to_csv('submission.csv', index=False) ###Output _____no_output_____
Python-Week 3/18 august 2021 Day 8.ipynb
###Markdown oop day 2 ###Code class student(): def __init__(self,name,regd): self.name=name self.regd=regd y=student('kalinga',500) y.name class student(): def __init__(self,name,regd): self.name=name self.regd=regd def xyz(self): print(f"hello!i am {self.name} and regd no is {self.regd}") z=student('kalinga',500) z.xyz() ###Output hello!i am kalinga and regd no is 500 ###Markdown class object attribute ###Code class student(): course= "intro to ML"# course is an attributr defined as class object def __init__(self,name,regd): self.name=name self.regd=regd def xyz(self): print(f"hello!i am {self.name} and regd no is {self.regd}") z=student('krishna',420) z.name class student(): def __init__(self,name,regd): self.name=name self.regd=regd def xyz(self,score): print(f"hello!i am {self.name} and regd no is {self.regd}and score is {score}") z=student('Raj',122) z.xyz(10) ###Output hello!i am Raj and regd no is 122and score is 10 ###Markdown wap to calculate area and circumference of a circle attribute=radius method circumference-> evaluate circumference of a circle . ###Code class circle(): def __init__(self,r): self.r=r self.area=3.14*(self.r)*(self.r) def circumference(self): print(f"circumference is {2*3.14*(self.r)}") z=circle(2) z.circumference() z.area() ###Output _____no_output_____
01_Getting_&_Knowing_Your_Data/Occupation/My_Solution.ipynb
###Markdown Ex3 - Getting and Knowing your Data This time we are going to pull data directly from the internet.Special thanks to: https://github.com/justmarkham for sharing the dataset and materials. Step 1. Import the necessary libraries ###Code import pandas as pd ###Output _____no_output_____ ###Markdown Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/justmarkham/DAT8/master/data/u.user). Step 3. Assign it to a variable called users and use the 'user_id' as index ###Code users = pd.read_csv("https://raw.githubusercontent.com/justmarkham/DAT8/master/data/u.user", sep="|", index_col="user_id") ###Output _____no_output_____ ###Markdown Step 4. See the first 25 entries ###Code users.head(25) ###Output _____no_output_____ ###Markdown Step 5. See the last 10 entries ###Code users.tail(10) ###Output _____no_output_____ ###Markdown Step 6. What is the number of observations in the dataset? ###Code len(users) ###Output _____no_output_____ ###Markdown Step 7. What is the number of columns in the dataset? ###Code users.shape[1] ###Output _____no_output_____ ###Markdown Step 8. Print the name of all the columns. ###Code users.columns ###Output _____no_output_____ ###Markdown Step 9. How is the dataset indexed? ###Code users.index ###Output _____no_output_____ ###Markdown Step 10. What is the data type of each column? ###Code users.dtypes ###Output _____no_output_____ ###Markdown Step 11. Print only the occupation column ###Code users.occupation ###Output _____no_output_____ ###Markdown Step 12. How many different occupations are in this dataset? ###Code users.occupation.nunique() ###Output _____no_output_____ ###Markdown Step 13. What is the most frequent occupation? ###Code users.occupation.mode() ###Output _____no_output_____ ###Markdown Step 14. Summarize the DataFrame. ###Code users.describe() ###Output _____no_output_____ ###Markdown Step 15. Summarize all the columns ###Code users.describe(include="all") ###Output _____no_output_____ ###Markdown Step 16. Summarize only the occupation column ###Code users.occupation.describe() ###Output _____no_output_____ ###Markdown Step 17. What is the mean age of users? ###Code users.age.mean() ###Output _____no_output_____ ###Markdown Step 18. What is the age with least occurrence? ###Code age_values = users.age.value_counts() age_values[age_values == age_values.min()] ###Output _____no_output_____
Oracle_Jupyter/Oracle_IPython_SQL_magic.ipynb
###Markdown SQL Magic extension for Oracle and IPython- An example of how to query Oracle from IPython using SQL magic extensions- This is a straighforward interface for running SQL- The results integrate easily into the python environment and with [pandas](http://pandas.pydata.org/)- Dependencies: needs an [Oracle client installation](http://www.oracle.com/technetwork/topics/linuxx86-64soft-092277.html), [cx_Oracle](https://pypi.python.org/pypi/cx_Oracle) and [ipython-sql](https://github.com/LucaCanali/ipython-sql) Connect to Oracle and run a query using %SQL magic extensions- SQL magic extensions introduce the %%sql cell magic and %sql line magic for running SQL in IPython- The connect string to the database uses [sql alchemy syntax](http://docs.sqlalchemy.org/en/latest/core/engines.html) ###Code # loads the SQL magic extensions %load_ext sql # Connect to Oracle %sql oracle+cx_oracle://scott:tiger@dbserver:1521/?service_name=orcl.mydomain.com %%sql select * from emp ###Output 0 rows affected. ###Markdown Bind variables and DML ###Code Employee_name="SCOTT" %sql select * from emp where ename=:Employee_name %sql update emp set sal=3500 where ename=:Employee_name %sql commit %sql select * from emp where ename=:Employee_name ###Output 1 rows affected. Done. 0 rows affected. ###Markdown Additional example of the integration with the IPython environment ###Code myResultSet=%sql select ename "Employee Name", sal "Salary" from emp %matplotlib inline import matplotlib matplotlib.style.use('ggplot') myResultSet.bar() ###Output 0 rows affected. ###Markdown Integration with Pandas This opens many additional possibilities for data analysis ###Code %%sql select e1.ename "Employee Name", e1.job "Job", e2.ename "Manager Name" from emp e1, emp e2 where e1.mgr = e2.empno(+) # save result set into my_ResultSet and copy it to pandas in my_DataFrame my_ResultSet = _ my_DataFrame=my_ResultSet.DataFrame() my_DataFrame.head() ###Output _____no_output_____
Python/0_how_to_setup.ipynb
###Markdown Installing PythonIf you need to install `Python`, please follow the instructions in this [link](https://swcarpentry.github.io/python-novice-inflammation/setup.html). This link is from The Carpentries website. I am a certified instructor for The Carpentries, and they have great documentation on how to setup your computer. **TL;DR:** The easiest way is to install Python is through [Anaconda](https://docs.anaconda.com/anaconda/install/). You can follow the instructions depending on your operating system. Installing Python librariesThe Python library I use the most is `pandas`. So, you'll need to install it as well. There are many ways you can install a library. You can use `conda install` or `pip install`. **Which one do I use?** First, check the online and official documentation for the library or package you want to install. The proper instructions will be listed there. For instance, [`pandas`](https://pandas.pydata.org/docs/getting_started/install.html) documentation has in depth instructions on how to install it properly. Well maintained libraries most likely have great documentation online.I normally use `pip`, which is convenient because we can also install libraries from Jupyter notebook like so: ###Code !pip install pandas ###Output Requirement already satisfied: pandas in /Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages (1.2.0) Requirement already satisfied: pytz>=2017.3 in /Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages (from pandas) (2020.4) Requirement already satisfied: python-dateutil>=2.7.3 in /Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages (from pandas) (2.8.1) Requirement already satisfied: numpy>=1.16.5 in /Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages (from pandas) (1.19.4) Requirement already satisfied: six>=1.5 in /Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages (from python-dateutil>=2.7.3->pandas) (1.15.0)
BookExercises/DeepLearningwithPython/Keras_CNNModel_4.ipynb
###Markdown Predict with Saved Model ###Code import os import numpy as np from keras.preprocessing.image import ImageDataGenerator test_dir = '' datagen = ImageDataGenerator(rescale=1./255) batch_size = 20 def extract_features(directory, sample_count): features = np.zeros(shape=(sample_count, 18 ,13, 2048)) labels = np.zeros(shape=(sample_count)) generator = datagen.flow_from_directory(directory, target_size=(640,480), batch_size=batch_sizem class_mode='binary') i = 0 for inputs_batch, labels_batch in generator: features_batch = conv_base.predict(inputs_batch) # 從批次影像中萃取特徵 features[i * batch_size : (i+1) * batch_size] = features_batch labels[i * batch_size : (i+1) * batch_size] = labels_batch i += 1 print(i, end=' ') # 檢視特徵萃取進度 if i * batch_size >= sample_count: #當萃取樣本數超過設定的樣本數時,執行break中斷 break return features, labels test_features, test_labels = extract_features(test_dir, 45) # Numbers of test samples test features = np.reshape(test_features, (45, 18*13*2048)) predict_result = model.predict_classes(test_features) print() correct_num = 0 for i in range(len(test_features)): print(str(predict_result[i]), ', Correct Answer is 'm str(test_labels[i])) if predict_result[i] == test_labels[i]: correct_num = correct_num + 1 print('Accuracy: ', correct_num/len(test_features)) ###Output _____no_output_____
kaggle-lung-cancer-approach2/04-nodule-segmentation-3d-test.ipynb
###Markdown Train 3d nodule detector with LUNA16 dataset ###Code INPUT_DIR='../../input/nodules/' OUTPUT_DIR='../../output/lung-cancer/04/' MODEL_FILE='../../output/lung-cancer/03-ok/tf-checkpoint-best9615' IMAGE_DIMS = (50,50,50,1) %matplotlib inline import numpy as np import pandas as pd import h5py import matplotlib.pyplot as plt import sklearn import os import glob from modules.logging import logger import modules.utils as utils from modules.utils import Timer import modules.logging import modules.cnn as cnn import modules.ctscan as ctscan ###Output _____no_output_____ ###Markdown Test Prepare CNN model ###Code logger.info('Prepare CNN for training') network = cnn.net_nodule3d_swethasubramanian(IMAGE_DIMS) model = cnn.prepare_cnn_model(network, OUTPUT_DIR, model_file=MODEL_FILE) ###Output 2017-03-28 00:36:58,355 INFO Prepare CNN for training 2017-03-28 00:36:58,575 INFO Prepare CNN 2017-03-28 00:36:58,588 INFO Preparing output dir 2017-03-28 00:36:58,589 INFO Initializing network... ###Markdown Evaluate results ###Code logger.info('Evaluate dataset') cnn.evaluate_dataset(INPUT_DIR + 'nodules-test.h5', model, batch_size=12, confusion_matrix=True) ###Output _____no_output_____
Machine Learning Files/Training.ipynb
###Markdown ###Code import tensorflow as tf import tensorflow_hub as hub import os import shutil from zipfile import ZipFile from tqdm.notebook import tqdm from tensorflow.keras.models import load_model, save_model, Sequential from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.layers.experimental.preprocessing import Rescaling, Resizing from tensorflow.keras.layers import Dense, Dropout, Flatten, GlobalAveragePooling2D from tensorflow.keras.optimizers import Adam, Adamax from tensorflow.keras.callbacks import EarlyStopping import matplotlib.pyplot as plt import numpy as np ###Output _____no_output_____ ###Markdown Prerequisites ###Code path = '/content/drive/My Drive/External Datasets/COVID19-Project/Datasets/main_dataset.zip' file = ZipFile(path, 'r') file.extractall() test_dir = '/content/main/test' train_dir = '/content/main/train' val_dir = '/content/main/validation' # EfficientNet-Lite4 img_height = 380 img_width = 380 batch_size = 64 # Xception img_height = 512 img_width = 512 batch_size = 64 datagen = ImageDataGenerator(horizontal_flip = True, rescale = (1/255)) train_gen = datagen.flow_from_directory( train_dir, target_size = (img_height, img_width), color_mode = 'rgb', batch_size = batch_size, shuffle = True ) test_gen = datagen.flow_from_directory( test_dir, target_size = (img_height, img_width), color_mode = 'rgb', shuffle = False, batch_size = batch_size, ) val_gen = datagen.flow_from_directory( val_dir, target_size = (img_height, img_width), color_mode = 'rgb', batch_size = batch_size, shuffle = True ) class_names = ['covid', 'non-covid', 'non-informative'] ###Output _____no_output_____ ###Markdown Training the model ###Code URL = 'https://tfhub.dev/tensorflow/efficientnet/lite4/feature-vector/2' pretrained_model = hub.KerasLayer(URL, input_shape = (img_height, img_width,3)) model = Sequential() model.add(pretrained_model) model.add(Flatten()) model.add(Dense(512, activation = 'relu')) model.add(Dropout(.30)) model.add(Dense(3, activation = 'softmax')) model.compile(optimizer = Adamax(), loss= 'categorical_crossentropy', metrics = ['accuracy']) es = EarlyStopping(monitor = 'val_loss', patience =3) history = model.fit(train_gen, validation_data= val_gen, epochs = 5, callbacks = es) history1 = model.fit(train_gen, validation_data= val_gen, epochs = 5, callbacks = es) metrics = model.evaluate(test_gen) model.trainable = True os.mkdir('/content/drive/My Drive/External Datasets/COVID19-Project/successful_models/model_checkpoints') model.save('EfficientNet-L4-v1.h5') history2 = model.fit(train_gen, validation_data= val_gen, epochs = 5, callbacks = es) model.evaluate(test_gen) plt.plot(history1.history['accuracy']) plt.plot(history1.history['val_accuracy']) plt.title('Model Accuracy') plt.ylabel('Accuracy') plt.xlabel('Epoch') plt.legend(['Train','Test'], loc = 'upper left') plt.show() plt.plot(history1.history['loss']) plt.plot(history1.history['val_loss']) plt.title('Model Loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['Train', 'Test'], loc = 'upper left') plt.show() os.chdir('/content/drive/My Drive/External Datasets/COVID19-Project/successful_models') model.save('EfficientNet-Lite4-v3.h5') ###Output _____no_output_____ ###Markdown Plotting the confusion matrix ###Code model = load_model('/content/drive/My Drive/External Datasets/COVID19-Project/successful_models/Xception-v2.h5') model.summary() predictions = model.predict(test_gen) print(predictions) rounded_predictions = np.argmax(predictions, axis =1) labels = test_gen.classes from mlxtend.plotting import plot_confusion_matrix from sklearn.metrics import confusion_matrix cs = confusion_matrix(labels, rounded_predictions) cm = plot_confusion_matrix(conf_mat = cs, figsize =(8,8)) ###Output _____no_output_____
Geron/Geron_ch9_Unsupervised/Unsupervised_01_Kmeans.ipynb
###Markdown Clustering.Applications: * preprocessor for data analysis,* segmentation of images or customers,* anomaly and novelty detection,* semi-supervised (starting with few labels),* search-for-similar image,* nonlinear dimensionality reduction. K-means ClusteringAlgorithm invented separately by Lloyd 1957 and Forgy 1965.* centroid selection (random or inspired initially),* label instances by closest centroid,* select a new centroid using mean per label,* repeat.Limitations:* num clusters (K) requied input parameter, * assumes clusters are equal size,* converges on local optima.Expected linear time but worst case O(n^m) (actually book only said "exponential in instances"). Cluster the Iris data ###Code from sklearn.datasets import load_iris iris = load_iris() X = iris.data y = iris.target from sklearn.cluster import KMeans K = 3 km = KMeans(n_clusters=K) y_pred = km.fit_predict(X) y_pred km.cluster_centers_ # Mean squared distance to centroid. km.inertia_ # Predict the cluster for a point near a centroid. import numpy as np hypothetical = np.array([[5, 3, 1, 0]]) km.predict(hypothetical) # Compute one point's distance to centroid. # This is a form of dimensionality reduction. km.transform(hypothetical) # One point's score. # Score = negative inertia. # Useful for situations where better score means tighter fit. km.score(hypothetical) # Redo with more parameter settings but get same results. init_val = 'random' # default = K-means++ n_init_val = 10 # maximun number of times to start again with random init km = KMeans(n_clusters=K,init=init_val,n_init=n_init_val,random_state=42) y_rand = km.fit_predict(X) y_rand == y_pred # Redo with more clusters. km = KMeans(n_clusters=10) y_lots = km.fit_predict(X) y_lots # Silhouette is a metric for estimating optimal K. from sklearn.metrics import silhouette_score # Crashes if K less than 2 for K in range(2,10): km = KMeans(n_clusters=K) km.fit_predict(X) s=silhouette_score(X,km.labels_) print("Clusters=%d Score=%f"%(K,s)) # We know there are 3 clusters but 2 of them are similar. # This analysis says 2 clusters is optimal. # Score ranges -1 (edge of cluster) to +1 (near its centroid). # Can take score per point or score overall. # With "knife diagram" plot of score per point for each cluster, # try to choose a K with the most uniform knife shapes. ###Output _____no_output_____
ArbresBinaires.ipynb
###Markdown Arbres binaires Ce qu'on veut faireOn veut créer une structure de données pour les arbres binaires d'entiers.Un arbre binaire d'entiers est ici :- ou bien l'arbre vide ;- ou bien un nœud contenant un entier et ses deux fils, gauche et droit, qui sont des arbres binaires d'entiers.La définition est donc récursive.L'idée est de créer une classe (qu'on pourra ranger dans un module), permettant les opérations suivantes :- création d'un arbre vide ;- création d'un arbre à partir d'un entier et de deux arbres ;- test si un arbre est vide ou non ;- racine d'un arbre ;- fils gauche et fils droit d'un arbre ###Code class Noeud: def __init__(self,valeur,gauche,droit): self.n = valeur self.g = gauche self.d = droit class ArbreBinaire: def __init__(self,c): self.r = c def creeVide(): return ArbreBinaire(None) def creeNGD(valeur,gauche=None,droit=None): return ArbreBinaire(Noeud(valeur,gauche,droit)) def estVide(self): return self.r is None def racine(self): assert not(self.r is None),'Arbre vide' return self.r.n def filsGauche(self): assert not(self.r is None),'Arbre vide' return self.r.g def filsDroit(self): assert not(self.r is None),'Arbre vide' return self.r.d a = ArbreBinaire.creeNGD(12) # arbre sans fils, souvent appelé feuille b = ArbreBinaire.creeNGD(14) c = ArbreBinaire.creeNGD(7) a d = ArbreBinaire.creeNGD(2,b,c) e = ArbreBinaire.creeNGD(3,None,a) # attention à bien spécifier le fils gauche s'il est vide f = ArbreBinaire.creeNGD(1,e,d) f.racine(),f.filsDroit().filsGauche().racine(),f.filsGauche().racine() ###Output _____no_output_____ ###Markdown Une autre implémentation, purement fonctionnelle ###Code def arbreVide(): return None def arbreNGD(valeur,gauche=None,droit=None): return (valeur,gauche,droit) a = arbreNGD(12) b = arbreNGD(14) c = arbreNGD(7) d = arbreNGD(2,b,c) e = arbreNGD(3,None,a) f = arbreNGD(1,e,d) f # évidemment, c'est plus simple à déchiffrer, c'est aussi plus rustique... # mais ça fait le boulot ! ###Output _____no_output_____
mnist-models/mnist-models.ipynb
###Markdown MNIST ModelsTrying out some models on MNIST. There is a basic MLP, CNN, and variations of VGG16. ###Code # get mnist data import tensorflow as tf import numpy as np from keras import models, layers, optimizers, utils, datasets mnist = datasets.mnist (x_train, y_train),(x_test, y_test) = mnist.load_data() x_train = x_train.astype('float32') x_train = x_train.reshape(60000, 784)/255 x_test = x_test.astype('float32') x_test = x_test.reshape(10000, 784)/255 y_train = utils.to_categorical(y_train, 10) y_test = utils.to_categorical(y_test, 10) print(x_train.shape) print(y_train.shape) print(x_test.shape) print(y_test.shape) # set up experiment for MLP inputs = layers.Input(shape=(784,)) x = layers.Dense(500, activation='relu')(inputs) x = layers.Dropout(0.1)(x) outputs = layers.Dense(10, activation='softmax')(x) model = models.Model(inputs=inputs, outputs=outputs) model.compile(loss='categorical_crossentropy', optimizer='Nadam', metrics=['accuracy']) # run experiment for MLP history = model.fit(x_train[:1024], y_train[:1024], batch_size=128, epochs=10, validation_data=(x_train[1024:2048],y_train[1024:2048])) score = model.evaluate(x_test, y_test) print('Test Loss:', score[0], 'Test_accuracy:', score[1]) # plot result for MLP import matplotlib.pyplot as plt plt.plot(history.history['acc']) plt.plot(history.history['val_acc']) plt.legend(['Training Accuracy', 'Validation Accuracy']) plt.title('Performance of our MLP model on MNIST') plt.xlabel('Epoch') plt.xlabel('Accuracy') plt.ylim((0,1)) plt.show() # reload data for CNN (x_train, y_train),(x_test, y_test) = mnist.load_data() x_train = x_train.astype('float32') x_train = x_train.reshape(60000, 28, 28, 1)/255 x_test = x_test.astype('float32') x_test = x_test.reshape(10000, 28, 28, 1)/255 y_train = utils.to_categorical(y_train, 10) y_test = utils.to_categorical(y_test, 10) print(x_train.shape) print(y_train.shape) print(x_test.shape) print(y_test.shape) # set up experiment for CNN inputs = layers.Input(shape=(28, 28, 1)) x = layers.Conv2D(32, kernel_size=(3, 3), activation='relu')(inputs) x = layers.Conv2D(64, (3, 3), activation='relu')(x) x = layers.MaxPooling2D(pool_size=(2, 2))(x) x = layers.Dropout(0.25)(x) x = layers.Flatten()(x) x = layers.Dense(128, activation='relu')(x) x = layers.Dropout(0.5)(x) outputs = layers.Dense(10, activation='softmax')(x) model = models.Model(inputs=inputs, outputs=outputs) model.compile(loss='categorical_crossentropy', optimizer=optimizers.Adadelta(), metrics=['accuracy']) # run experiment for CNN history = model.fit(x_train[:1024], y_train[:1024], batch_size=128, epochs=20, validation_data=(x_train[1024:2048],y_train[1024:2048])) score = model.evaluate(x_test, y_test) print('Test Loss:', score[0], 'Test_accuracy:', score[1]) # plot result for CNN import matplotlib.pyplot as plt plt.plot(history.history['acc']) plt.plot(history.history['val_acc']) plt.legend(['Training Accuracy', 'Validation Accuracy']) plt.title('Performance of our CNN model on MNIST') plt.xlabel('Epoch') plt.xlabel('Accuracy') plt.ylim((0,1)) plt.show() # prep data for vgg16 from scipy import ndimage (x_train, y_train),(x_test, y_test) = mnist.load_data() x_val = x_train[1024:2048] y_val = y_train[1024:2048] x_train = x_train[:1024] y_train = y_train[:1024] x_train = ndimage.zoom(x_train, (1,8,8)) x_train = x_train.astype('float32') x_train = x_train.reshape(1024, 224, 224, 1)/255 x_train = np.concatenate((x_train, x_train, x_train), 3) x_val = ndimage.zoom(x_val, (1,8,8)) x_val = x_val.astype('float32') x_val = x_val.reshape(1024, 224, 224, 1)/255 x_val = np.concatenate((x_val, x_val, x_val), 3) x_test = ndimage.zoom(x_test, (1,8,8)) x_test = x_test.astype('float32') x_test = x_test.reshape(10000, 224, 224, 1)/255 x_test = np.concatenate((x_test, x_test, x_test), 3) y_train = utils.to_categorical(y_train, 10) y_val = utils.to_categorical(y_val, 10) y_test = utils.to_categorical(y_test, 10) print(x_train.shape) print(y_train.shape) print(x_val.shape) print(y_val.shape) print(x_test.shape) print(y_test.shape) # run experiment with vgg16 with only training new layers from keras.applications.vgg16 import VGG16 base_model = VGG16(weights='imagenet', include_top=False) # make 3 final layers on the model x = base_model.output x = layers.GlobalAveragePooling2D()(x) x = layers.Dense(1024, activation='relu')(x) outputs = layers.Dense(10, activation='softmax')(x) # 10 classes model = models.Model(inputs=base_model.input, outputs=outputs) # only train new layers for layer in base_model.layers: layer.trainable = False model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) hist = model.fit(x_train, y_train, batch_size=16, epochs=30, validation_data=(x_val,y_val)) score = model.evaluate(x_test, y_test) print('Test Loss:', score[0], 'Test_accuracy:', score[1]) #score = model.evaluate(x_test, y_test) #print('Test Loss:', score[0], 'Test_accuracy:', score[1]) plt.plot(hist.history['acc']) plt.plot(hist.history['val_acc']) plt.legend(['Training Accuracy', 'Validation Accuracy']) plt.title('Performance of VGG16 model on MNIST') plt.xlabel('Epoch') plt.xlabel('Accuracy') plt.ylim((0,1)) plt.show() # run experiment with vgg16 with training new layers, and # last 2 of original base_model = VGG16(weights='imagenet', include_top=False) # make 3 final layers on the model x = base_model.output x = layers.GlobalAveragePooling2D()(x) x = layers.Dense(1024, activation='relu')(x) outputs = layers.Dense(10, activation='softmax')(x) # 10 classes model = models.Model(inputs=base_model.input, outputs=outputs) # train new layers and last 2 of original for layer in base_model.layers[:-2]: layer.trainable = False model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) hist = model.fit(x_train, y_train, batch_size=16, epochs=30, validation_data=(x_val,y_val)) score = model.evaluate(x_test, y_test) print('Test Loss:', score[0], 'Test_accuracy:', score[1]) # apparently this is the limit to the test size for this # model on my GPU score = model.evaluate(x_test[:30], y_test[:30]) print('Test Loss:', score[0], 'Test_accuracy:', score[1]) plt.plot(hist.history['acc']) plt.plot(hist.history['val_acc']) plt.legend(['Training Accuracy', 'Validation Accuracy']) plt.title('Performance of VGG16 model on MNIST') plt.xlabel('Epoch') plt.xlabel('Accuracy') plt.ylim((0,1)) plt.show() ###Output _____no_output_____
cheme-sci-computing/programming/review-linear-system-control-flow.ipynb
###Markdown Review of Solving Linear Systems and Control FlowTeng-Jui LinContent adapted from UW CHEME 375, Chemical Engineering Computer Skills, in Spring 2021.- Python skills and numerical methods - Solving linear systems by [`scipy.linalg.solve()`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.solve.html)- ChemE applications - Solving recycle streams - Linear system method - Tear stream method Solving recycle streams**Problem Statement.** A system that consists a reactor and a separator with a recycle is shown below. The fresh feed is 1000 mol/h of pure A. The reaction is isomerization of A to B with a single pass conversion rate of 75%. The split fraction from steam 3 to 4 is 30%. Generate a stream table.![%E5%9B%BE%E7%89%87.png](attachment:%E5%9B%BE%E7%89%87.png) Linear system method**Solution.***Given values.* We are given the values- fresh feed of A $F_1^A = 1000 \ \text{mol A/h}$- fresh feed of B $F_1^B = 0 \ \text{mol B/h}$- single pass conversion rate $X = 0.75$- split fraction of A, B $f_{3-4} = 0.3$*Degree of freedom analysis.* Adding all the degree of freedom analysis, the degree of freedom is$$\small\begin{aligned}& \text{8 total unknown variables} (F_2^A, F_2^B, F_3^A, F_3^B, F_4^A, F_4^B, F_5^A, F_5^B) \\- & \text{2 mixing point material balance (A, B)} \\- & \text{2 separator material balance (A, B)} \\+ & \text{1 reactor reaction } (\mathrm{A \to B}) \\- & \text{2 reactor reactive species (A, B)} \\- & \text{0 reactor nonreactive species} \\- & \text{3 other relation (conversion, split fraction of A, B)} \\\hline & \text{0 degrees of freedom}\end{aligned}$$So the process variables can be uniquely determined.*Mixing point balance*$$\begin{aligned}&\text{A balance:} &F_1^A + F_4^A &= F_2^A \\&\text{B balance:} &F_1^B + F_4^B &= F_2^B \\\end{aligned}$$*Separator balance*$$\begin{aligned}&\text{A balance:} &F_3^A = F_4^A + F_5^A \\&\text{B balance:} &F_3^B = F_4^B + F_5^B \\\end{aligned}$$*Reactor balance with single pass conversion rate*$$\begin{aligned}&\text{A balance:} &F_3^A &= (1-X) F_2^A \\&\text{B balance:} &F_3^B &= F_2^B + X F_2^A \\\end{aligned}$$*Split fraction*$$\begin{aligned}&\text{A balance:} &F_4^A &= f_{3-4}F_3^A \\&\text{B balance:} &F_4^B &= f_{3-4}F_3^B \\\end{aligned}$$*Establish linear system.* Write the variable terms on the left and known constant terms on the right:$$\begin{aligned}\text{mixing point A balance:}& &F_2^A - F_4^A &= F_1^A \\\text{mixing point B balance:}& &F_2^B - F_4^B &= F_1^B \\\text{separator A balance:}& &F_4^A + F_5^A - F_3^A &= 0 \\\text{separator B balance:}& &F_4^B + F_5^B - F_3^B &= 0 \\\text{reactor A balance:}& &F_3^A - (1-X) F_2^A &= 0 \\\text{reactor B balance:}& &F_3^B - F_2^B - X F_2^A &= 0 \\\text{split fraction:}& &f_{3-4}F_3^A - F_4^A &= 0 \\\text{split fraction:}& &f_{3-4}F_3^B - F_4^B &= 0 \\\end{aligned}$$The the system of linear equations can be written in the form of$$\mathbf{Ax = b}$$where$$\mathbf{A} =\begin{bmatrix}1 & 0 & 0 & 0 & -1 & 0 & 0 & 0 \\0 & 1 & 0 & 0 & 0 & -1 & 0 & 0 \\0 & 0 & -1 & 0 & 1 & 0 & 1 & 0 \\0 & 0 & 0 & -1 & 0 & 1 & 0 & 1 \\X-1 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \\X & -1 & 0 & 1 & 0 & 0 & 0 & 0 \\0 & 0 & f_{3-4} & 0 & -1 & 0 & 0 & 0 \\0 & 0 & 0 & f_{3-4} & 0 & -1 & 0 & 0\end{bmatrix},\mathbf{x} =\begin{bmatrix}F_2^A \\ F_2^B \\ F_3^A \\ F_3^B \\ F_4^A \\ F_4^B \\ F_5^A \\ F_5^B \\\end{bmatrix},\mathbf{b} =\begin{bmatrix}F_1^A \\ F_1^B \\ 0 \\ 0 \\ 0 \\ 0 \\ 0 \\ 0\end{bmatrix}$$We can then solve the system using `scipy.linalg.solve()`. ###Code import numpy as np from scipy.linalg import solve # define process specifications conv_a = 0.75 split_3_4_a = 0.2 F_1_a = 1000 F_1_b = 0 A = np.array([[1, 0, 0, 0, -1, 0, 0, 0], [0, 1, 0, 0, 0, -1, 0, 0], [0, 0, -1, 0, 1, 0, 1, 0], [0, 0, 0, -1, 0, 1, 0, 1], [conv_a-1, 0, 1, 0, 0, 0, 0, 0], [-conv_a, -1, 0, 1, 0, 0, 0, 0], [0, 0, split_3_4_a, 0, -1, 0, 0, 0], [0, 0, 0, split_3_4_a, 0, -1, 0, 0]]) b = np.array([F_1_a, F_1_b, 0, 0, 0, 0, 0, 0]) solve(A, b).reshape(-1, 2) ###Output _____no_output_____ ###Markdown The solution is therefore$$\mathbf{x} =\begin{bmatrix}F_2^A \\ F_2^B \\ F_3^A \\ F_3^B \\ F_4^A \\ F_4^B \\ F_5^A \\ F_5^B \\\end{bmatrix} =\begin{bmatrix}1053 \\ 197 \\ 263 \\ 987 \\ 53 \\ 197 \\ 211 \\ 789 \\\end{bmatrix}$$We can now construct the stream table:|Component|Steam 1|Steam 2|Steam 3|Steam 4|Steam 5||-:|-:|-:|-:|-:|-:||A [mol/h]|1000|1053|263|53|211||B [mol/h]|0|197|987|197|789||Total [mol/h]|1000|1250|1250|250|1000| Tear stream method**Solution.** Tear stream method focuses on solving for the recycle stream. In this method, we view the recycle stream as two separate streams, steams 4a and 4b, with equal mass (and molar) flow rate:![%E5%9B%BE%E7%89%87.png](attachment:%E5%9B%BE%E7%89%87.png)We first give an (arbitrary) guess to steam 4a, for example,$$\begin{aligned}F_{4a}^A & = 0 \\F_{4a}^B & = 0\end{aligned}$$We then obtain the expression of the recycle stream 4b for A and B in terms of the guesses of steam 4a:$$\begin{aligned}F_{4b}^A & = f_{3-4}(1-X)(F_1^A + F_{4a}^A) \\F_{4b}^B & = f_{3-4}X(F_1^A + F_{4a}^A) + f_{3-4}(F_1^B + F_{4a}^B)\end{aligned}$$We then use the result of stream 4b to update the guesses of steam 4a because they should be the same by our definition:$$\begin{aligned}F_{4a}^A & = F_{4b}^A \\F_{4a}^B & = F_{4b}^B\end{aligned}$$After iterations, the result should converge so that there are not difference between stream 4a and 4b. ###Code import numpy as np # define process specifications conv_a = 0.75 split_3_4_a = 0.2 F_1_a = 1000 F_1_b = 0 # define iteration parameters tolerance = 1e-8 max_iter = 1000 i = 0 # define dummy values to enter loop abs_error_4a_b = tolerance * 2 rel_error_4a_b = tolerance * 2 # define initial guesses of tear stream F_4a_a_guess = 0 F_4a_b_guess = 0 F_4a_a = F_4a_a_guess F_4a_b = F_4a_b_guess # tear stream method logic while (abs_error_4a_b > tolerance or rel_error_4a_b > tolerance) and i < max_iter: # calculate via material balance F_4b_a = split_3_4_a * (1 - conv_a) * (F_1_a + F_4a_a) F_4b_b = split_3_4_a * conv_a * (F_1_a + F_4a_a) + split_3_4_a * (F_1_b + F_4a_b) # calculate errors abs_error_4a_b = abs(F_4b_b - F_4a_b) rel_error_4a_b = abs_error_4a_b / F_4b_b # set next guess of 4a to current 4b F_4a_a = F_4b_a F_4a_b = F_4b_b i += 1 print(f'Iterations: {i}') print(f'Absolute error: {abs_error_4a_b:.2e}') print(f'Relative error: {rel_error_4a_b:.2e}') print(f'A in recycle stream: {F_4a_a:.1f}') print(f'B in recycle stream: {F_4a_b:.1f}') ###Output Iterations: 16 Absolute error: 6.55e-09 Relative error: 3.32e-11 A in recycle stream: 52.6 B in recycle stream: 197.4
notebooks/2020-07-19 Initial ventilator plots.ipynb
###Markdown Cohen waveforms ###Code import os os.getcwd() data_files = glob('../skgaip/lung/cyril/data/no*waveforms.csv') data = [] metadata = [] def parse_metadata(filename): tokens = list(map(int, filter(lambda x: len(x)>0, re.split('[^0-9]', filename)))) return dict(zip(['C', 'R', 'f', 'PIP', 'PEEP'], tokens[1:])) for f in data_files: df = pd.read_csv(f) metadata.append(parse_metadata(f)) data.append(df[["'control_in'"," 'control_out'"," 'pressure'"," 'timestamp'"]].to_numpy()) ###Output _____no_output_____ ###Markdown Breath Waveform explainer ###Code # TBD ###Output _____no_output_____ ###Markdown Proxy control signal explainer ###Code valve = ValveCurve('../skgaip/lung/cyril/data/valve_response_bidir.csv') ###Output _____no_output_____ ###Markdown Current controller ###Code def plot(i, T=2000, raw_control_in=True, control_in_delay=None, figsize=(16,5.5), ylim=(0, 70), title=True, pressure_only=False): plt.rc('figure', figsize=figsize) control_in, control_out, pressure, tt = data[i][:T].T tt = tt - min(tt) control_in_label = "Control signal (% max flow)" # control_in_label = "Control signal" if not raw_control_in: control_in = valve.at(np.clip(control_in, 0, 100)) fig, ax1 = plt.subplots() delay = 1 ax1.set_xlabel("Time (seconds)") ax1.set_ylabel("Pressure (cm H2O)") ax1.set_ylim(*ylim) tube_pressure = ax1.plot(tt[:-delay], pressure[delay:], c='rygbm'[i], label="Tube pressure (cm H2O)") pip = ax1.axhline(metadata[i]['PIP'], ls='--', c='k', label="PIP (cm H2O)") peep = ax1.axhline(metadata[i]['PEEP'], ls=':', c='k', label="PEEP (cm H2O)") ax1.fill_between(tt, plt.ylim()[0], plt.ylim()[1], where=control_out.astype(bool), color='lightgray', alpha=0.3) if not pressure_only: ax2 = ax1.twinx() ax2.set_ylabel("% max flow") if control_in_delay is not None: control_delay = ax2.plot(tt + control_in_delay, control_in, c='rygbm'[i], ls="--", label="Delayed control signal") control = ax2.plot(tt, control_in, c='gray', label=control_in_label) if title: plt.title("Breath cycle with compliance {}, resistance {}, flow {}".format(metadata[i]["C"], metadata[i]["R"], metadata[i]["f"])) lines = [tube_pressure[0], pip, peep] if not pressure_only: lines.append(control[0]) if control_in_delay is not None: lines.append(control_delay[0]) labels = [line.get_label() for line in lines] ax1.legend(lines, labels, loc="upper right") plot(3) metadata[0] from datetime import datetime datetime.now().timestamp() ###Output _____no_output_____ ###Markdown Existing simulator ###Code # track waveform from demo plt.rc('figure', figsize=(12, 2)) T = 10 # simulation length in real time dt = 0.05 # time discretization tt = np.arange(T // dt) * dt # environment: lung physics + target waveform lung = DemoLung(leak=False, peep_valve=5, PC=40, RP=1, dt=dt) target = BreathWaveform([3, 40], [0.3, 1.2, 1.7, 3]) # control policy pid = PID([3, 4, 0], dt=dt) # example code uses hand-tuned time-varying PID for t in tt: err = target.at(t) - lung.current_pressure pip = pid.feed(err) peep = int(t % 3.0 > 1.2) # like example code, use breath phase to control peep lung.step(pip, peep) plt.xlabel("Time (s)") plt.ylabel("Pressure (mm Hg)") plt.plot(tt, target.at(tt), c='gray', ls='--', label="Target pressure") plt.plot(tt, lung.pressures, c='b', label="Lung pressure") plt.legend() ###Output _____no_output_____ ###Markdown Delay ###Code plot(4, T=500, raw_control_in=True, control_in_delay=0.045, figsize=(14, 4.5)) ###Output _____no_output_____ ###Markdown Updated simulator ###Code plt.rc('figure', figsize=(14,5)) for i,K_P in enumerate([0.3]): ax1 = plt.subplot(111+i) # track waveform T = 3 # simulation length in real time dt = 0.003 # time discretization tt = np.arange(T // dt) * dt # environment: lung physics + target waveform lung = DelayLung() target = BreathWaveform([3, 40], [0.1, 1.2, 1.7, 3]) # control policy pid = PID([K_P, 0, 0], dt=dt) prev_u = 0 for t in tt: err = target.at(t) - lung.pressure u = pid.feed(err) prev_u = u peep = int(t % 3.0 > 1.2) lung.step(u, peep) target_pressure = ax1.plot(tt, target.at(tt), c='k', ls='--', label="Target pressure (cm H2O)") lung_pressure = ax1.plot(tt, lung.pressures, c='b', label="Lung pressure (cm H2O)") ax1.set_ylabel("Pressure (cm H2O)") ax2 = ax1.twinx() control = ax2.plot(tt, lung.controls_in, c='gray', label="Control signal (% max flow)") ax2.set_ylabel("% max flow") # ax2.set_ylabel(control_in_label) # if control_in_delay is not None: # control_delay = ax2.plot(tt + control_in_delay, control_in, c='rygbm'[i], ls="--", label="Delayed control signal") # control = ax2.plot(tt, control_in, c='gray', label=control_in_label) # plt.title("Breath cycle with compliance {}, resistance {}, flow {}".format(metadata[i]["C"], metadata[i]["R"], metadata[i]["f"])) lines = [target_pressure[0], lung_pressure[0], control[0]] labels = [line.get_label() for line in lines] ax1.legend(lines, labels, loc=0) # plt.twinx().plot(tt, lung.controls_in, c='gray') ###Output _____no_output_____ ###Markdown Before ###Code plot(4, T=8000, raw_control_in=True, figsize=(10,5), ylim=(0, 60), title=False, pressure_only=True) ###Output _____no_output_____
nbs/rl/policies/rl.policies.bandit_policies.ipynb
###Markdown Bandit Policies> Implementation of bandit policies including UCB and TS. ###Code #hide from nbdev.showdoc import * #export import numpy as np import pandas as pd from scipy.stats import norm, beta from abc import ABC, abstractmethod #export class ABPolicy(ABC): def __init__(self, bandit, slate_size, scores_logging): self.name = None self.slate_size = slate_size self.history = pd.DataFrame(data=None, columns=['item_id', 'reward']) if scores_logging is True: self.scores_log = pd.DataFrame(data=None, columns=bandit.actions) else: self.scores_log = None @abstractmethod def get_recommendations(self): ... def update(self, rewards): # append new events to history self.history = self.history.append(rewards, ignore_index=True) def _sort_actions(self, scores): """ Sort actions by score and shuffle actions with same score Inputs: scores: pandas.Series with actions as index """ sorted_actions = sorted( scores.sample(frac=1).index, key=lambda idx: scores.loc[idx], reverse=True) return sorted_actions def _update_scores_history(self, scores): if self.scores_log is not None: self.scores_log = self.scores_log.append( pd.DataFrame( data=scores.to_numpy().reshape((1,-1)), columns=self.scores_log.columns), ignore_index=True) self.scores_log = self.scores_log.astype('float') #export class EpsilonGreedy(ABPolicy): def __init__(self, bandit, epsilon, slate_size=1, scores_logging=False): super(EpsilonGreedy, self).__init__(bandit, slate_size, scores_logging) self.name = '{}-Greedy'.format(epsilon) self.epsilon = epsilon self.action_values = pd.DataFrame(data=0, columns=['value', 'count'], index=bandit.actions) def get_recommendations(self): # sort actions by value and shuffle actions with same value sorted_actions = self._sort_actions(self.action_values['value']) # choose recommendations if np.random.random() < self.epsilon: recs = np.random.choice(sorted_actions[self.slate_size:], size=self.slate_size, replace=False) else: recs = sorted_actions[:self.slate_size] # update history of action scores self._update_scores_history(self.action_values['value']) return recs def update(self, rewards): super(EpsilonGreedy, self).update(rewards) # update action values for _, (item_id, reward) in rewards.iterrows(): value = self.action_values.loc[item_id, 'value'] N = self.action_values.loc[item_id, 'count'] self.action_values.loc[item_id, 'value'] = (value * N + reward) / (N + 1) self.action_values.loc[item_id, 'count'] += 1 #export class UCB(ABPolicy): def __init__(self, bandit, slate_size=1, scores_logging=False): super(UCB, self).__init__(bandit, slate_size, scores_logging) self.name = 'UCB' self.action_values = pd.DataFrame(data=0, columns=['value', 'count'], index=bandit.actions) def get_recommendations(self): # compute UCB for each action current_step = len(self.history) if current_step > 0: scores = self.action_values['count'].apply( lambda N: np.sqrt(2*np.log(current_step) / N) if N > 0 else np.Inf) scores = scores + self.action_values['value'] else: scores = pd.Series(data=np.Inf, index=self.action_values.index) # sort actions by score and shuffle actions with same score sorted_actions = self._sort_actions(scores) # choose recommendations recs = sorted_actions[:self.slate_size] # update history of action scores self._update_scores_history(scores) return recs def update(self, rewards): super(UCB, self).update(rewards) # update action values for _, (item_id, reward) in rewards.iterrows(): value = self.action_values.loc[item_id, 'value'] N = self.action_values.loc[item_id, 'count'] self.action_values.loc[item_id, 'value'] = (value * N + reward) / (N + 1) self.action_values.loc[item_id, 'count'] += 1 #export class TS(ABPolicy): def __init__(self, bandit, slate_size=1, scores_logging=False): super(TS, self).__init__(bandit, slate_size, scores_logging) self.name = 'Thompson Sampling' self.beta_params = pd.DataFrame(data=1, columns=['alpha', 'beta'], index=bandit.actions) def get_recommendations(self): # sample expected value for each action expected_values = pd.Series( data=4.5 * beta.rvs(self.beta_params['alpha'], self.beta_params['beta']) + 0.5, index=self.beta_params.index) # sort actions by value and shuffle actions with same value sorted_actions = self._sort_actions(expected_values) # choose recommendations recs = sorted_actions[:self.slate_size] # update history of action scores self._update_scores_history(expected_values) return recs def update(self, rewards): super(TS, self).update(rewards) # update action value distribution prior for _, (item_id, reward) in rewards.iterrows(): self.beta_params.loc[item_id, 'alpha'] += (reward - 0.5) / 4.5 self.beta_params.loc[item_id, 'beta'] += (5.0 - reward) / 4.5 ###Output _____no_output_____ ###Markdown Evaluating bandits offline with replay method on movielens dataset ###Code !wget -q --show-progress http://files.grouplens.org/datasets/movielens/ml-100k.zip !unzip -q ml-100k.zip rating_df = pd.read_csv('ml-100k/u.data', sep='\t', header=None, names=['user_id','movie_id','rating','timestamp'], usecols=['movie_id', 'rating']) rating_df.columns = ['movieId', 'rating'] def get_data(data, num_ratings, num_movies): """ Make each movieId/action uniformly distributed """ # filters out movies with less than `num_ratings` ratings movies = data.groupby('movieId').agg({'rating': 'count'}) if num_movies is not None: movies_to_keep = movies[(movies['rating'] >= num_ratings)].sample( n=num_movies, random_state=12).index else: movies_to_keep = movies[(movies['rating'] >= num_ratings)].index data = data[data['movieId'].isin(movies_to_keep)] # take a random sample of size `num_ratings` for each movie data = data.groupby('movieId').sample(n=num_ratings, random_state=42) # shuffle rows to randomize data stream data = data.sample(frac=1, random_state=42) # reset index to create pseudo-timestamp index data = data.reset_index(drop=True) return data NUM_RATINGS = 30 # with full dataset -> 10000 # with small dataset -> 30 NUM_MOVIES = None SLATE_SIZE = 5 BATCH_SIZE = 100 # with replay eval -> 100 # with simulated env -> 1 STREAM_LENGTH = 150 # with full dataset -> 50000 # with small dataset -> 150 MODE = 'replay' # 'replay' or 'sim' SCORES_LOG = False # logging movie scores or not # get data logged_events = get_data(rating_df, NUM_RATINGS, NUM_MOVIES) class ReplayBandit(): """ Implementation of a bandit problem with replay evaluation """ def __init__(self, logged_events, batch_size=1): self.events = logged_events.rename(columns={'rating': 'reward'}) self.actions = np.sort(logged_events['movieId'].unique()) self.batch_size = batch_size self.stream_length = len(self.events) // batch_size def get_rewards(self, recommendations, n_event): # generate events idx = n_event * self.batch_size events = self.events.iloc[idx:idx+self.batch_size] # keep only events that match with the recommendation slate rewards = events[events['movieId'].isin(recommendations)] return rewards bandit = ReplayBandit(logged_events, BATCH_SIZE) STREAM_LENGTH = bandit.stream_length print("NUMBER OF MOVIES/ACTIONS: {}".format(len(bandit.actions))) # instantiate policies policies = [ EpsilonGreedy(bandit, epsilon=0.1, slate_size=SLATE_SIZE, scores_logging=SCORES_LOG), UCB(bandit, slate_size=SLATE_SIZE, scores_logging=SCORES_LOG), TS(bandit, slate_size=SLATE_SIZE, scores_logging=SCORES_LOG), ] # evaluate policies for policy in policies: print("POLICY: {}".format(policy.name)) from tqdm.notebook import tqdm for i in tqdm(range(STREAM_LENGTH), ascii=True): recs = policy.get_recommendations() rewards = bandit.get_rewards(recs, i) policy.update(rewards) print("HISTORY LENGTH: {}".format(len(policy.history))) print() import matplotlib.pyplot as plt from matplotlib.ticker import FormatStrFormatter import seaborn as sns def plot_rewards(*policies, title=None): fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(11,5)) fig.suptitle(title) for policy in policies: # get cumulative rewards cumsum_rewards = policy.history.reward.cumsum() # get average rewards timesteps = np.arange(len(cumsum_rewards)) + 1 avg_rewards = cumsum_rewards / timesteps # plots ax1.plot(timesteps, avg_rewards, label=policy.name) ax2.plot(timesteps, cumsum_rewards, label=policy.name) # ax1.yaxis.set_major_formatter(FormatStrFormatter('%.1f')) ax1.set_xlabel('time step') ax1.set_ylabel('average reward') ax1.legend(loc='lower right') # ax2.yaxis.set_major_formatter(FormatStrFormatter('%d')) ax2.set_xlabel('time step') ax2.set_ylabel('cumulative reward') ax2.legend(loc='lower right') # plt.tight_layout() plt.show() def plot_action_values(*policies): fig, axs = plt.subplots(nrows=1, ncols=len(policies), figsize=(15,5), squeeze=False) fig.suptitle("Action scores") axs = axs.ravel() for i, policy in enumerate(policies): cbar = True if i == len(axs)-1 else False sns.heatmap(policy.scores_log.T, ax=axs[i], vmin=2.5, vmax=5, cmap='hot', cbar=cbar, xticklabels=1000, yticklabels=False) axs[i].set_xlabel('time step') axs[i].title.set_text(policy.name) axs[0].set_ylabel('movieId') plt.tight_layout() plt.show() # plot results plot_rewards(*policies) if SCORES_LOG is True: plot_action_values(*policies) #hide %reload_ext watermark %watermark -a "Sparsh A." -m -iv -u -t -d -p recohut ###Output Author: Sparsh A. Last updated: 2021-12-26 05:47:55 recohut: 0.0.7 Compiler : GCC 7.5.0 OS : Linux Release : 5.4.144+ Machine : x86_64 Processor : x86_64 CPU cores : 2 Architecture: 64bit pandas : 1.1.5 seaborn : 0.11.2 torch : 1.10.0+cu111 IPython : 5.5.0 numpy : 1.19.5 matplotlib: 3.2.2
6.3.1-time_series.ipynb
###Markdown Copyright 2019 The TensorFlow Authors. ###Code #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ###Output _____no_output_____ ###Markdown Time series forecasting View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook This tutorial is an introduction to time series forecasting using Recurrent Neural Networks (RNNs). This is covered in two parts: first, you will forecast a univariate time series, then you will forecast a multivariate time series. ###Code import tensorflow as tf import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import os import pandas as pd mpl.rcParams['figure.figsize'] = (8, 6) mpl.rcParams['axes.grid'] = False ###Output _____no_output_____ ###Markdown The weather datasetThis tutorial uses a [weather time series dataset recorded by the Max Planck Institute for Biogeochemistry.This dataset contains 14 different features such as air temperature, atmospheric pressure, and humidity. These were collected every 10 minutes, beginning in 2003. For efficiency, you will use only the data collected between 2009 and 2016. This section of the dataset was prepared by François Chollet for his book [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python). ###Code zip_path = tf.keras.utils.get_file( origin='https://storage.googleapis.com/tensorflow/tf-keras-datasets/jena_climate_2009_2016.csv.zip', fname='jena_climate_2009_2016.csv.zip', extract=True) csv_path, _ = os.path.splitext(zip_path) df = pd.read_csv(csv_path) ###Output _____no_output_____ ###Markdown Let's take a glance at the data. ###Code df.head() ###Output _____no_output_____ ###Markdown As you can see above, an observation is recorded every 10 mintues. This means that, for a single hour, you will have 6 observations. Similarly, a single day will contain 144 (6x24) observations. Given a specific time, let's say you want to predict the temperature 6 hours in the future. In order to make this prediction, you choose to use 5 days of observations. Thus, you would create a window containing the last 720(5x144) observations to train the model. Many such configurations are possible, making this dataset a good one to experiment with.The function below returns the above described windows of time for the model to train on. The parameter `history_size` is the size of the past window of information. The `target_size` is how far in the future does the model need to learn to predict. The `target_size` is the label that needs to be predicted. ###Code def univariate_data(dataset, start_index, end_index, history_size, target_size): data = [] labels = [] start_index = start_index + history_size if end_index is None: end_index = len(dataset) - target_size for i in range(start_index, end_index): indices = range(i-history_size, i) # Reshape data from (history_size,) to (history_size, 1) data.append(np.reshape(dataset[indices], (history_size, 1))) labels.append(dataset[i+target_size]) return np.array(data), np.array(labels) ###Output _____no_output_____ ###Markdown 바로 다음것을 예측할 때 target_size = 0을 넣는다. In both the following tutorials, the first 300,000 rows of the data will be the training dataset, and there remaining will be the validation dataset. This amounts to ~2100 days worth of training data. ###Code TRAIN_SPLIT = 300000 ###Output _____no_output_____ ###Markdown Setting seed to ensure reproducibility. ###Code tf.random.set_seed(13) ###Output _____no_output_____ ###Markdown Part 1: Forecast a univariate time seriesFirst, you will train a model using only a single feature (temperature), and use it to make predictions for that value in the future.Let's first extract only the temperature from the dataset. ###Code uni_data = df['T (degC)'] uni_data.index = df['Date Time'] uni_data.head() ###Output _____no_output_____ ###Markdown Let's observe how this data looks across time. ###Code uni_data.plot(subplots=True) uni_data = uni_data.values ###Output _____no_output_____ ###Markdown It is important to scale features before training a neural network. Standardization is a common way of doing this scaling by subtracting the mean and dividing by the standard deviation of each feature.You could also use a `tf.keras.utils.normalize` method that rescales the values into a range of [0,1]. Note: The mean and standard deviation should only be computed using the training data. 표준화를 시킨다음에 학습을 시키려고 한다. ###Code uni_train_mean = uni_data[:TRAIN_SPLIT].mean() uni_train_std = uni_data[:TRAIN_SPLIT].std() ###Output _____no_output_____ ###Markdown Let's standardize the data. ###Code uni_data = (uni_data-uni_train_mean)/uni_train_std ###Output _____no_output_____ ###Markdown Let's now create the data for the univariate model. For part 1, the model will be given the last 20 recorded temperature observations, and needs to learn to predict the temperature at the next time step. ###Code univariate_past_history = 20 univariate_future_target = 0 x_train_uni, y_train_uni = univariate_data(uni_data, 0, TRAIN_SPLIT, univariate_past_history, univariate_future_target) x_val_uni, y_val_uni = univariate_data(uni_data, TRAIN_SPLIT, None, univariate_past_history, univariate_future_target) x_train_uni[3] y_train_uni[3] ###Output _____no_output_____ ###Markdown This is what the `univariate_data` function returns. ###Code print ('Single window of past history') print (x_train_uni[0]) print ('\n Target temperature to predict') print (y_train_uni[0]) ###Output Single window of past history [[-1.99766294] [-2.04281897] [-2.05439744] [-2.0312405 ] [-2.02660912] [-2.00113649] [-1.95134907] [-1.95134907] [-1.98492663] [-2.04513467] [-2.08334362] [-2.09723778] [-2.09376424] [-2.09144854] [-2.07176515] [-2.07176515] [-2.07639653] [-2.08913285] [-2.09260639] [-2.10418486]] Target temperature to predict -2.1041848598100876 ###Markdown Now that the data has been created, let's take a look at a single example. The information given to the network is given in blue, and it must predict the value at the red cross. ###Code def create_time_steps(length): return list(range(-length, 0)) def show_plot(plot_data, delta, title): labels = ['History', 'True Future', 'Model Prediction'] marker = ['.-', 'rx', 'go'] time_steps = create_time_steps(plot_data[0].shape[0]) if delta: future = delta else: future = 0 plt.title(title) for i, x in enumerate(plot_data): if i: plt.plot(future, plot_data[i], marker[i], markersize=10, label=labels[i]) else: plt.plot(time_steps, plot_data[i].flatten(), marker[i], label=labels[i]) plt.legend() plt.xlim([time_steps[0], (future+5)*2]) plt.xlabel('Time-Step') return plt show_plot([x_train_uni[0], y_train_uni[0]], 0, 'Sample Example') ###Output _____no_output_____ ###Markdown BaselineBefore proceeding to train a model, let's first set a simple baseline. Given an input point, the baseline method looks at all the history and predicts the next point to be the average of the last 20 observations. ###Code def baseline(history): return np.mean(history) show_plot([x_train_uni[0], y_train_uni[0], baseline(x_train_uni[0])], 0, 'Baseline Prediction Example') ###Output _____no_output_____ ###Markdown Let's see if you can beat this baseline using a recurrent neural network. Recurrent neural networkA Recurrent Neural Network (RNN) is a type of neural network well-suited to time series data. RNNs process a time series step-by-step, maintaining an internal state summarizing the information they've seen so far. For more details, read the [RNN tutorial](https://www.tensorflow.org/tutorials/sequences/recurrent). In this tutorial, you will use a specialized RNN layer called Long Short Term Memory ([LSTM](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/layers/LSTM))Let's now use `tf.data` to shuffle, batch, and cache the dataset. ###Code BATCH_SIZE = 256 BUFFER_SIZE = 10000 # train할 때 몇개를 갖고와서 학습을 할 것인지. train_univariate = tf.data.Dataset.from_tensor_slices((x_train_uni, y_train_uni)) train_univariate = train_univariate.cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat() val_univariate = tf.data.Dataset.from_tensor_slices((x_val_uni, y_val_uni)) val_univariate = val_univariate.batch(BATCH_SIZE).repeat() ###Output _____no_output_____ ###Markdown The following visualisation should help you understand how the data is represented after batching.![Time Series](images/time_series.png) You will see the LSTM requires the input shape of the data it is being given. ###Code simple_lstm_model = tf.keras.models.Sequential([ tf.keras.layers.LSTM(8, input_shape=x_train_uni.shape[-2:]), tf.keras.layers.Dense(1) ]) simple_lstm_model.compile(optimizer='adam', loss='mae') simple_lstm_model.summary() ###Output Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= lstm (LSTM) (None, 8) 320 _________________________________________________________________ dense (Dense) (None, 1) 9 ================================================================= Total params: 329 Trainable params: 329 Non-trainable params: 0 _________________________________________________________________ ###Markdown Let's make a sample prediction, to check the output of the model. ###Code for x, y in val_univariate.take(1): print(simple_lstm_model.predict(x).shape) ###Output (256, 1) ###Markdown Let's train the model now. Due to the large size of the dataset, in the interest of saving time, each epoch will only run for 200 steps, instead of the complete training data as normally done. ###Code EVALUATION_INTERVAL = 200 EPOCHS = 10 simple_lstm_model.fit(train_univariate, epochs=EPOCHS, steps_per_epoch=EVALUATION_INTERVAL, validation_data=val_univariate, validation_steps=50) ###Output Train for 200 steps, validate for 50 steps Epoch 1/10 200/200 [==============================] - 1s 6ms/step - loss: 0.4075 - val_loss: 0.1351 Epoch 2/10 200/200 [==============================] - 0s 2ms/step - loss: 0.1118 - val_loss: 0.0360 Epoch 3/10 200/200 [==============================] - 0s 2ms/step - loss: 0.0490 - val_loss: 0.0289 Epoch 4/10 200/200 [==============================] - 0s 2ms/step - loss: 0.0444 - val_loss: 0.0257 Epoch 5/10 200/200 [==============================] - 0s 2ms/step - loss: 0.0299 - val_loss: 0.0235 Epoch 6/10 200/200 [==============================] - 0s 2ms/step - loss: 0.0317 - val_loss: 0.0225 Epoch 7/10 200/200 [==============================] - 0s 2ms/step - loss: 0.0287 - val_loss: 0.0207 Epoch 8/10 200/200 [==============================] - 1s 3ms/step - loss: 0.0263 - val_loss: 0.0200 Epoch 9/10 200/200 [==============================] - 0s 2ms/step - loss: 0.0254 - val_loss: 0.0182 Epoch 10/10 200/200 [==============================] - 1s 6ms/step - loss: 0.0228 - val_loss: 0.0174 ###Markdown Predict using the simple LSTM modelNow that you have trained your simple LSTM, let's try and make a few predictions. ###Code for x, y in val_univariate.take(3): plot = show_plot([x[0].numpy(), y[0].numpy(), simple_lstm_model.predict(x)[0]], 0, 'Simple LSTM model') plot.show() ###Output _____no_output_____ ###Markdown This looks better than the baseline. Now that you have seen the basics, let's move on to part two, where you will work with a multivariate time series. Part 2: Forecast a multivariate time series The original dataset contains fourteen features. For simplicity, this section considers only three of the original fourteen. The features used are air temperature, atmospheric pressure, and air density. To use more features, add their names to this list. ###Code features_considered = ['p (mbar)', 'T (degC)', 'rho (g/m**3)'] features = df[features_considered] features.index = df['Date Time'] features.head() ###Output _____no_output_____ ###Markdown Let's have a look at how each of these features vary across time. ###Code features.plot(subplots=True) ###Output _____no_output_____ ###Markdown As mentioned, the first step will be to standardize the dataset using the mean and standard deviation of the training data. ###Code dataset = features.values data_mean = dataset[:TRAIN_SPLIT].mean(axis=0) data_std = dataset[:TRAIN_SPLIT].std(axis=0) dataset = (dataset-data_mean)/data_std ###Output _____no_output_____ ###Markdown Single step modelIn a single step setup, the model learns to predict a single point in the future based on some history provided.The below function performs the same windowing task as below, however, here it samples the past observation based on the step size given. ###Code def multivariate_data(dataset, target, start_index, end_index, history_size, target_size, step, single_step=False): data = [] labels = [] start_index = start_index + history_size if end_index is None: end_index = len(dataset) - target_size for i in range(start_index, end_index): indices = range(i-history_size, i, step) data.append(dataset[indices]) if single_step: labels.append(target[i+target_size]) else: labels.append(target[i:i+target_size]) return np.array(data), np.array(labels) ###Output _____no_output_____ ###Markdown In this tutorial, the network is shown data from the last five (5) days, i.e. 720 observations that are sampled every hour. The sampling is done every one hour since a drastic change is not expected within 60 minutes. Thus, 120 observation represent history of the last five days. For the single step prediction model, the label for a datapoint is the temperature 12 hours into the future. In order to create a label for this, the temperature after 72(12*6) observations is used. ###Code past_history = 720 future_target = 72 # 72개 후의 기후를 예측 STEP = 6 x_train_single, y_train_single = multivariate_data(dataset, dataset[:, 1], 0, TRAIN_SPLIT, past_history, future_target, STEP, single_step=True) x_val_single, y_val_single = multivariate_data(dataset, dataset[:, 1], TRAIN_SPLIT, None, past_history, future_target, STEP, single_step=True) ###Output _____no_output_____ ###Markdown Let's look at a single data-point. ###Code print ('Single window of past history : {}'.format(x_train_single[0].shape)) train_data_single = tf.data.Dataset.from_tensor_slices((x_train_single, y_train_single)) train_data_single = train_data_single.cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat() val_data_single = tf.data.Dataset.from_tensor_slices((x_val_single, y_val_single)) val_data_single = val_data_single.batch(BATCH_SIZE).repeat() single_step_model = tf.keras.models.Sequential() single_step_model.add(tf.keras.layers.LSTM(32, input_shape=x_train_single.shape[-2:])) single_step_model.add(tf.keras.layers.Dense(1)) single_step_model.compile(optimizer=tf.keras.optimizers.RMSprop(), loss='mae') ###Output _____no_output_____ ###Markdown Let's check out a sample prediction. ###Code for x, y in val_data_single.take(1): print(single_step_model.predict(x).shape) single_step_history = single_step_model.fit(train_data_single, epochs=EPOCHS, steps_per_epoch=EVALUATION_INTERVAL, validation_data=val_data_single, validation_steps=50) def plot_train_history(history, title): loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(len(loss)) plt.figure() plt.plot(epochs, loss, 'b', label='Training loss') plt.plot(epochs, val_loss, 'r', label='Validation loss') plt.title(title) plt.legend() plt.show() plot_train_history(single_step_history, 'Single Step Training and validation loss') ###Output _____no_output_____ ###Markdown Predict a single step futureNow that the model is trained, let's make a few sample predictions. The model is given the history of three features over the past five days sampled every hour (120 data-points), since the goal is to predict the temperature, the plot only displays the past temperature. The prediction is made one day into the future (hence the gap between the history and prediction). ###Code for x, y in val_data_single.take(3): plot = show_plot([x[0][:, 1].numpy(), y[0].numpy(), single_step_model.predict(x)[0]], 12, 'Single Step Prediction') plot.show() ###Output _____no_output_____ ###Markdown Multi-Step modelIn a multi-step prediction model, given a past history, the model needs to learn to predict a range of future values. Thus, unlike a single step model, where only a single future point is predicted, a multi-step model predict a sequence of the future.For the multi-step model, the training data again consists of recordings over the past five days sampled every hour. However, here, the model needs to learn to predict the temperature for the next 12 hours. Since an obversation is taken every 10 minutes, the output is 72 predictions. For this task, the dataset needs to be prepared accordingly, thus the first step is just to create it again, but with a different target window. ###Code future_target = 72 x_train_multi, y_train_multi = multivariate_data(dataset, dataset[:, 1], 0, TRAIN_SPLIT, past_history, future_target, STEP) x_val_multi, y_val_multi = multivariate_data(dataset, dataset[:, 1], TRAIN_SPLIT, None, past_history, future_target, STEP) ###Output _____no_output_____ ###Markdown Let's check out a sample data-point. ###Code print ('Single window of past history : {}'.format(x_train_multi[0].shape)) print ('\n Target temperature to predict : {}'.format(y_train_multi[0].shape)) train_data_multi = tf.data.Dataset.from_tensor_slices((x_train_multi, y_train_multi)) train_data_multi = train_data_multi.cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat() val_data_multi = tf.data.Dataset.from_tensor_slices((x_val_multi, y_val_multi)) val_data_multi = val_data_multi.batch(BATCH_SIZE).repeat() ###Output _____no_output_____ ###Markdown Plotting a sample data-point. ###Code def multi_step_plot(history, true_future, prediction): plt.figure(figsize=(12, 6)) num_in = create_time_steps(len(history)) num_out = len(true_future) plt.plot(num_in, np.array(history[:, 1]), label='History') plt.plot(np.arange(num_out)/STEP, np.array(true_future), 'bo', label='True Future') if prediction.any(): plt.plot(np.arange(num_out)/STEP, np.array(prediction), 'ro', label='Predicted Future') plt.legend(loc='upper left') plt.show() ###Output _____no_output_____ ###Markdown In this plot and subsequent similar plots, the history and the future data are sampled every hour. ###Code for x, y in train_data_multi.take(1): multi_step_plot(x[0], y[0], np.array([0])) ###Output _____no_output_____ ###Markdown Since the task here is a bit more complicated than the previous task, the model now consists of two LSTM layers. Finally, since 72 predictions are made, the dense layer outputs 72 predictions. ###Code multi_step_model = tf.keras.models.Sequential() multi_step_model.add(tf.keras.layers.LSTM(32, return_sequences=True, input_shape=x_train_multi.shape[-2:])) multi_step_model.add(tf.keras.layers.LSTM(16, activation='relu')) multi_step_model.add(tf.keras.layers.Dense(72)) multi_step_model.compile(optimizer=tf.keras.optimizers.RMSprop(clipvalue=1.0), loss='mae') ###Output _____no_output_____ ###Markdown Let's see how the model predicts before it trains. ###Code for x, y in val_data_multi.take(1): print (multi_step_model.predict(x).shape) multi_step_history = multi_step_model.fit(train_data_multi, epochs=EPOCHS, steps_per_epoch=EVALUATION_INTERVAL, validation_data=val_data_multi, validation_steps=50) plot_train_history(multi_step_history, 'Multi-Step Training and validation loss') ###Output _____no_output_____ ###Markdown Predict a multi-step futureLet's now have a look at how well your network has learnt to predict the future. ###Code for x, y in val_data_multi.take(3): multi_step_plot(x[0], y[0], multi_step_model.predict(x)[0]) ###Output _____no_output_____
Exploring eBay Cars Sales Data_Project/Exploring eBay Cars Sales Data.ipynb
###Markdown We will be working on a dataset of used cars from eBay Kleinanzeigen, a classifieds section of the German eBay website.The data dictionary provided with data is as follows: - `dateCrawled` - When this ad was first crawled. All field-values are taken from this date. - `name` - Name of the car. - `seller` - Whether the seller is private or a dealer. - `offerTyp`e - The type of listing - `price` - The price on the ad to sell the car. - `abtest` - Whether the listing is included in an A/B test. - `vehicleType` - The vehicle Type. - `yearOfRegistration` - The year in which which year the car was first registered. - `gearbox` - The transmission type. - `powerPS` - The power of the car in PS. - `model` - The car model name. - `kilometer` - How many kilometers the car has driven. - `monthOfRegistration` - The month in which which year the car was first registered. - `fuelType` - What type of fuel the car uses. - `brand` - The brand of the car. - `notRepairedDamage` - If the car has a damage which is not yet repaired. - `dateCreated` - The date on which the eBay listing was created. - `nrOfPictures` - The number of pictures in the ad. - `postalCode`- The postal code for the location of the vehicle. - `lastSeenOnline` - When the crawler saw this ad last online.The aim of this project is to clean the data and analyze the included used car listings. ###Code import numpy as np import pandas as pd import warnings warnings.filterwarnings('ignore') autos = pd.read_csv("D:/Developer/Data Science/Projects/CSV files/autos.csv", encoding = 'Latin-1') autos.head(3) ###Output _____no_output_____ ###Markdown Cleaning Column Names ###Code autos.columns autos.columns = ['date_crawled', 'name', 'seller', 'offer_type', 'price', 'ab_test', 'vehicle_type', 'registration_year', 'gearbox', 'power', 'model', 'odometer', 'registration_month', 'fuel_type', 'brand', 'unrepaired_damage', 'date_created', 'number_pictures', 'postal_code','last_seen'] len(autos) #371528 rows imported len(autos.columns) #20 columns imported ###Output _____no_output_____ ###Markdown Renaming column ###Code autos.rename({'odometer':'odometer_km'}, axis = 1, inplace = True) ###Output _____no_output_____ ###Markdown Changing Type ###Code for col in ['date_crawled', 'date_created', 'last_seen']: autos[col] = pd.to_datetime(autos[col]) for col in ['name', 'seller', 'offer_type', 'ab_test', 'vehicle_type', 'gearbox', 'model', 'fuel_type', 'power']: autos[col] = autos[col].astype('category') ###Output _____no_output_____ ###Markdown zfill consider whole postal code equal to zero rather than considering it as a some number ###Code autos['postal_code'] = autos['postal_code'].astype('str').str.zfill(5) autos['odometer_km'] = autos['odometer_km'].astype('float') autos['price'] = autos['price'].astype('float') ###Output _____no_output_____ ###Markdown Initial Exploration and Cleaning ###Code autos['unrepaired_damage'].unique() autos['number_pictures'].unique() autos['number_pictures'].nunique() autos.drop('number_pictures', axis = 1, inplace = True) autos.drop('unrepaired_damage', axis = 1, inplace = True) autos.drop('offer_type', axis = 1, inplace = True) autos.drop('seller', axis = 1, inplace = True) autos.head(3) autos.info() autos.describe(include='all') ###Output _____no_output_____ ###Markdown Finding Outliers ***Find outliers with the Interquartile Range where q1 is 25 percentile and q3 75 percentile value of .describe()*** ###Code autos['price'].describe().apply(lambda x: format(x, 'f')) #scientific notation suppressed. autos['odometer_km'].describe().apply(lambda x: format(x, 'f')) cols = ['price', 'odometer_km'] q1 = autos[cols].quantile(0.25) q3 = autos[cols].quantile(0.75) iqr = q3 - q1 # Return a boolean array of the rows with (any) non-outlier column values condition = ~((autos[cols] < (q1 - 1.5 * iqr)) | (autos[cols] > (q3 +1.5 * iqr))).any(axis = 1) # Filter our dataframe based on condition filtered_autos = autos[condition] len(filtered_autos) ###Output _____no_output_____ ###Markdown **Original Rows: 371528**\**After outliers**\**Adjusted Rows: 295620** Exploring the Odometer and Price Columns ###Code filtered_autos['odometer_km'].value_counts() ###Output _____no_output_____ ###Markdown It can be seen that the values in this field are rounded, which might indicate that sellers had to choose from pre-set options for this field. Additionally, there are more high mileage than low mileage vehicles. ###Code filtered_autos['price'].describe() print('Total Number of Unique Values:',filtered_autos['price'].nunique()) print('Lowest Price:',filtered_autos['price'].min()) print('Highest Price:',filtered_autos['price'].max()) ###Output Total Number of Unique Values: 2990 Lowest Price: 0.0 Highest Price: 16270.0 ###Markdown Removing values which display no price. ###Code filtered_price = filtered_autos[filtered_autos['price'].between(1, 16270)] ###Output _____no_output_____ ###Markdown Exploring the Date Columns ###Code #showing distribution of values for dates #to include missing values in the distribution and to use percentile instead of counts(dropna). filtered_autos[['date_crawled','date_created','last_seen',]].value_counts(normalize = True).sort_index().head(10) autos['registration_year'].describe() filtered_autos['registration_year'].describe() ###Output _____no_output_____ ###Markdown Dealing with Incorrect Registration Year DataA car can't be first registered before the listing was seen, any vehicle with a registration year above 2016 is definitely inaccurate. Determining the earliest valid year is more difficult. Realistically, it could be somewhere in the first few decades of the 1900s.The above mentioned problem can be solved by remove the listings with these values. Let's determine what percentage of our data has invalid values in this column: ###Code filtered_autos_1900_2016 = filtered_autos[filtered_autos['registration_year'].between(1900, 2016)] filtered_autos_1900_2016['registration_year'].value_counts(normalize = True) ###Output _____no_output_____ ###Markdown It appears that most of the vehicles were first registered in the past 20 years. Exploring Price & Mileage by Brand ###Code filtered_autos['brand'].value_counts() ###Output _____no_output_____ ###Markdown Selecting top 20 brands. ###Code first20_brands = filtered_autos['brand'].value_counts().index[0:20] first20_brands ###Output _____no_output_____ ###Markdown Below we will construct a dictionary with unique brand name as key and mean value of price and mileage as value. ###Code mean_price = {} mean_mileage = {} for key in first20_brands: selected_top20_brands = filtered_price[filtered_price['brand'] == key] mean_price[key] = round(selected_top20_brands['price'].agg(np.mean), 4) mean_mileage[key] = round(selected_top20_brands['odometer_km'].mean(), 4) mean_price mean_mileage ###Output _____no_output_____ ###Markdown Converting from dictionary to dataframe. ###Code def top20_brands(x, y): mean_price_df = pd.DataFrame.from_dict(x, orient = 'index', columns = ['mean_price']) mean_mileage_df = pd.DataFrame.from_dict(y, orient = 'index', columns = ['mean_mileage']) mean_price_mileage = pd.concat([mean_price_df, mean_mileage_df], axis = 1) return mean_price_mileage top20_brands(mean_price, mean_mileage) ###Output _____no_output_____
fastai_TSC_rMASTIF.ipynb
###Markdown This Notebook is an adaptation of the [Fastai Lesson 1 notebook](https://github.com/fastai/fastai/blob/master/courses/dl1/lesson1.ipynb) to the [rMASTIF (Croatian) Traffic Sign Classification Dataset](http://www.zemris.fer.hr/~kalfa/Datasets/rMASTIF/) (4044 traffic sign examples in train and 1784 in test set, from 31 classes): ###Code # Put these at the top of every notebook, to get automatic reloading and inline plotting %reload_ext autoreload %autoreload 2 %matplotlib inline ###Output _____no_output_____ ###Markdown Here we import the libraries we need. ###Code # This file contains all the main external libs we'll use from fastai.imports import * from fastai.transforms import * from fastai.conv_learner import * from fastai.model import * from fastai.dataset import * from fastai.sgdr import * from fastai.plots import * ###Output _____no_output_____ ###Markdown Classes visualization `PATH` is the path to your data - if you use the recommended setup approaches from the lesson, you won't need to change this. `sz` is the size that the images will be resized to in order to ensure that the training runs quickly. ###Code PATH = "D:/Datasets/TrafficSigns/rmastif_fastai/" sz=299 ###Output _____no_output_____ ###Markdown The loading and visualization code below are adapted from Waleed Abdulla's blog post on [Traffic Sign Recognition with Tensorflow](https://medium.com/@waleedka/traffic-sign-recognition-with-tensorflow-629dffc391a6) ###Code import skimage.transform import skimage.data def load_data(data_dir): # Get all subdirectories of data_dir. Each represents a label. directories = [d for d in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, d))] # Loop through the label directories and collect the data in # two lists, labels and images. labels = [] images = [] for d in directories: label_dir = os.path.join(data_dir, d) file_names = [os.path.join(label_dir, f) for f in os.listdir(label_dir) if f.endswith(".ppm")] for f in file_names: current_img = skimage.data.imread(f) resized_img = skimage.transform.resize(current_img, (32, 32)) images.append(resized_img) labels.append(int(d)) return images, labels train_data_dir = PATH + 'train/' test_data_dir = PATH + 'valid/' images_test, labels_test = load_data(test_data_dir) def display_images_and_labels(images, labels): """Display the first image of each label.""" unique_labels = set(labels) plt.figure(figsize=(15, 15)) i = 1 for label in unique_labels: # Pick the first image for each label. image = images[labels.index(label)] plt.subplot(8, 8, i) # A grid of 8 rows x 8 columns plt.axis('off') plt.title("Label {0} ({1})".format(label, labels.count(label))) i += 1 _ = plt.imshow(image) plt.show() display_images_and_labels(images_test, labels_test) ###Output _____no_output_____ ###Markdown It's important that you have a working NVidia GPU set up. The programming framework used to behind the scenes to work with NVidia GPUs is called CUDA. Therefore, you need to ensure the following line returns `True` before you proceed. If you have problems with this, please check the FAQ and ask for help on [the forums](http://forums.fast.ai). ###Code torch.cuda.is_available() ###Output _____no_output_____ ###Markdown In addition, NVidia provides special accelerated functions for deep learning in a package called CuDNN. Although not strictly necessary, it will improve training performance significantly, and is included by default in all supported fastai configurations. Therefore, if the following does not return `True`, you may want to look into why. ###Code torch.backends.cudnn.enabled ###Output _____no_output_____ ###Markdown Define the model We're going to use a pre-trained model, that is, a model created by some one else to solve a different problem. Instead of building a model from scratch to solve a similar problem, we'll use a model trained on ImageNet (1.2 million images and 1000 classes) as a starting point. The model is a Convolutional Neural Network (CNN), a type of Neural Network that builds state-of-the-art models for computer vision. We'll be learning all about CNNs during this course.We will be using the resnet50 model. resnet50 is a version of the model that won the 2015 ImageNet competition. Here is more info on [resnet models](https://github.com/KaimingHe/deep-residual-networks). ###Code # Uncomment the below if you need to reset your precomputed activations # shutil.rmtree(f'{PATH}tmp', ignore_errors=True) arch=resnet50 bs=32 ###Output _____no_output_____ ###Markdown Data augmentation If you try training for more epochs, you'll notice that we start to *overfit*, which means that our model is learning to recognize the specific images in the training set, rather than generalizing such that we also get good results on the validation set. One way to fix this is to effectively create more data, through *data augmentation*. This refers to randomly changing the images in ways that shouldn't impact their interpretation, such as horizontal flipping, zooming, and rotating.We can do this by passing `aug_tfms` (*augmentation transforms*) to `tfms_from_model`, with a list of functions to apply that randomly change the image however we wish. For photos that are largely taken from the side (e.g. most photos of dogs and cats, as opposed to photos taken from the top down, such as satellite imagery) we can use the pre-defined list of functions `transforms_side_on`. We can also specify random zooming of images up to specified scale by adding the `max_zoom` parameter. ###Code tfms = tfms_from_model(arch, sz, aug_tfms=transforms_basic, max_zoom=1.3) def get_augs(): data = ImageClassifierData.from_paths(PATH, bs=bs, tfms=tfms, num_workers=4) x,_ = next(iter(data.aug_dl)) return data.trn_ds.denorm(x)[1] ims = np.stack([get_augs() for i in range(6)]) print(ims.shape) plots(ims, rows=2) ###Output _____no_output_____ ###Markdown Let's create a new `data` object that includes this augmentation in the transforms, as well as a ConvLearner object.Please note that we use a high dropout value, because we're fitting a large model into a small datasetIMPORTANT NOTE: In this work, the test set is used directly as validation set. In general, one should rather use a validation set different than the test set, so that hyper parameter tuning is done on the validation set, and only the final model is tested on the test set. However, in this case, since the code is mostly taken "out-of-the-box" from fast.ai lesson, and no hyper-parameter tuning has been performed, the results are still legitimate. ###Code data = ImageClassifierData.from_paths(PATH, tfms=tfms,bs=bs) ###Output _____no_output_____ ###Markdown For now, we set precompute = True, which prevents the data augmentation to have effect, but allows for faster training of the last layers ###Code learn = ConvLearner.pretrained(arch, data, precompute=True, ps=0.8) ###Output _____no_output_____ ###Markdown Choosing a learning rate The *learning rate* determines how quickly or how slowly you want to update the *weights* (or *parameters*). Learning rate is one of the most difficult parameters to set, because it significantly affect model performance.The method `learn.lr_find()` helps you find an optimal learning rate. It uses the technique developed in the 2015 paper [Cyclical Learning Rates for Training Neural Networks](http://arxiv.org/abs/1506.01186), where we simply keep increasing the learning rate from a very small value, until the loss stops decreasing. We can plot the learning rate across batches to see what this looks like.We first create a new learner, since we want to know how to set the learning rate for a new (untrained) model. ###Code lrf=learn.lr_find(start_lr=1e-5) ###Output _____no_output_____ ###Markdown Our `learn` object contains an attribute `sched` that contains our learning rate scheduler, and has some convenient plotting functionality including this one: ###Code learn.sched.plot_lr() ###Output _____no_output_____ ###Markdown Note that in the previous plot *iteration* is one iteration (or *minibatch*) of SGD. In one epoch there are (num_train_samples/num_iterations) of SGD.We can see the plot of loss versus learning rate to see where our loss stops decreasing: ###Code learn.sched.plot() lr = 1e-1 ###Output _____no_output_____ ###Markdown The loss is still clearly improving at lr=1e-1 (0.1), so that's what we use. Note that the optimal learning rate can change as we training the model, so you may want to re-run this function from time to time. Learning Without Data Augmentation (precompute=false) The [1cycle training policy](https://arxiv.org/abs/1803.09820) is used here, with parameters set according to the experiment 3 provided in Sylvain Gugger's notebook: https://github.com/sgugger/Deep-Learning/blob/master/Cyclical%20LR%20and%20momentums.ipynb ###Code learn.fit(lr, 1, cycle_len=3, use_clr_beta=(20,10,0.95,0.85),wds=1e-4) learn.sched.plot_lr() learn.sched.plot_loss() ###Output _____no_output_____ ###Markdown Unfreeze all layers Now that we have a good final layer trained, we can try fine-tuning the other layers. To tell the learner that we want to unfreeze the remaining layers, just call (surprise surprise!) `unfreeze()`. ###Code learn.precompute=False learn.unfreeze() ###Output _____no_output_____ ###Markdown Note that the other layers have *already* been trained to recognize imagenet photos (whereas our final layers where randomly initialized), so we want to be careful of not destroying the carefully tuned weights that are already there.Generally speaking, the earlier layers (as we've seen) have more general-purpose features. Therefore we would expect them to need less fine-tuning for new datasets. For this reason we will use different learning rates for different layers: the first few layers will be at 1e-4, the middle layers at 1e-3, and our FC layers we'll leave at 1e-2 as before. We refer to this as *differential learning rates*. ###Code lrs=np.array([lr/9,lr/3,lr]) ###Output _____no_output_____ ###Markdown Use Learning Rate Finder again ###Code learn.lr_find(lrs/1000) learn.sched.plot() lr = 5e-2 lrs=np.array([lr/9,lr/3,lr]) ###Output _____no_output_____ ###Markdown Train again The [1cycle training policy](https://arxiv.org/abs/1803.09820) is used here again, with the same parameters ###Code learn.fit(lrs, 1, cycle_len=20, use_clr_beta=(20,10,0.95,0.85), wds=1e-4) learn.sched.plot_lr() ###Output _____no_output_____ ###Markdown Note that's what being plotted above is the learning rate of the *final layers*. The learning rates of the earlier layers are fixed at the same multiples of the final layer rates as we initially requested (i.e. the first layers have 100x smaller, and middle layers 10x smaller learning rates, since we set `lr=np.array([1e-4,1e-3,1e-2])`. ###Code learn.sched.plot_loss() learn.save('299_all_MASTIF_TSC_3') learn.load('299_all_MASTIF_TSC_3') ###Output _____no_output_____ ###Markdown There is something else we can do with data augmentation: use it at *inference* time (also known as *test* time). Not surprisingly, this is known as *test time augmentation*, or just *TTA*.TTA simply makes predictions not just on the images in your validation set, but also makes predictions on a number of randomly augmented versions of them too (by default, it uses the original image along with 4 randomly augmented versions). It then takes the average prediction from these images, and uses that. To use TTA on the validation set, we can use the learner's `TTA()` method. ###Code log_preds,y = learn.TTA() probs = np.mean(np.exp(log_preds),0) accuracy_np(probs, y) ###Output _____no_output_____
Notebooks/Exploration/hansen_et_al_experiment.ipynb
###Markdown Code adapted from: https://menvuthy.github.io/Vuthy/forest%20cover/forest-cover-cambo-copy/ + https://blog.gishub.org/earth-engine-tutorial-30-how-to-get-image-properties-and-descriptive-statistics + https://developers.google.com/earth-engine/tutorials/tutorial_forest_03a Further Resources- https://github.com/RSPB/GFCalculator/blob/master/GFCalculator.py ###Code import ee import geemap from geemap import * import json from geemap import geojson_to_ee, ee_to_geojson from ipyleaflet import GeoJSON import os # !pip install geemap Map = geemap.Map() ###Output _____no_output_____ ###Markdown Set Region Of Interest ###Code #clip image to only include aoi region file_path = os.path.abspath('/Users/joycelynlongdon/Desktop/Cambridge/CambridgeCoding/MRES/Data/GeoJSONS/PIREDD_mai_ndombe.geojson') with open(file_path) as f: studyRegion = json.load(f) studyRegion = ee.FeatureCollection(studyRegion).first().geometry() #print(studyRegion) Map.setCenter(18.4276, -2.6357,7) Map.addLayer(studyRegion,{},"studyRegion") Map ###Output ee.Geometry({ "functionInvocationValue": { "functionName": "Element.geometry", "arguments": { "feature": { "functionInvocationValue": { "functionName": "Collection.first", "arguments": { "collection": { "functionInvocationValue": { "functionName": "Collection", "arguments": { "features": { "arrayValue": { "values": [ { "functionInvocationValue": { "functionName": "Feature", "arguments": { "geometry": { "functionInvocationValue": { "functionName": "GeometryConstructors.Polygon", "arguments": { "coordinates": { "constantValue": [ [ [ 16.6425704956055, -1.98846942098849 ], [ 16.6315841674805, -2.0927733356438925 ], [ 16.6205978393555, -2.1421780526291276 ], [ 16.5986251831055, -2.1751136476393973 ], [ 16.549186706543, -2.230004702705891 ], [ 16.6315841674805, -2.230004702705891 ], [ 16.724967956543, -2.1751136476393973 ], [ 16.768913269043, -2.153156663524256 ], [ 16.7963790893555, -2.1147311820101176 ], [ 16.878776550293, -2.120220595176218 ], [ 16.911735534668, -2.153156663524256 ], [ 16.966667175293, -2.153156663524256 ], [ 17.010612487793, -2.136688717648249 ], [ 17.065544128418, -2.0927733356438925 ], [ 17.1259689331055, -2.0927733356438925 ], [ 17.1918869018555, -2.076304748891308 ], [ 17.2468185424805, -2.0488567236098203 ], [ 17.285270690917997, -1.966509850442529 ], [ 17.3347091674805, -1.9170997650224688 ], [ 17.345695495605497, -1.8786687106335822 ], [ 17.466545104980497, -1.8676882533208028 ], [ 17.483024597168, -1.7963136261565165 ], [ 17.5434494018555, -1.7963136261565165 ], [ 17.638206481933604, -1.7976862417926116 ], [ 17.651939392089854, -1.7908231533069343 ], [ 17.665672302246104, -1.7839600391179224 ], [ 17.68901824951173, -1.7839600391179224 ], [ 17.691764831542983, -1.7825874132043735 ], [ 17.701377868652358, -1.7551346810636501 ], [ 17.706871032714854, -1.7372901886328997 ], [ 17.72197723388673, -1.709836794806311 ], [ 17.728843688964854, -1.6974826390964608 ], [ 17.74120330810548, -1.683755706855783 ], [ 17.75768280029298, -1.6755195010575505 ], [ 17.775535583496108, -1.6741467967195256 ], [ 17.798881530761733, -1.671401385160608 ], [ 17.816734313964854, -1.6700286779412803 ], [ 17.83733367919923, -1.6700286779412803 ], [ 17.86205291748048, -1.6686559697630663 ], [ 17.877159118652354, -1.6590469857335546 ], [ 17.893638610839854, -1.661792414516626 ], [ 17.934837341308604, -1.6659105505332232 ], [ 17.956809997558608, -1.671401385160608 ], [ 17.965049743652354, -1.672774091420286 ], [ 17.98839569091798, -1.661792414516626 ], [ 18.008995056152354, -1.6686559697630663 ], [ 18.02135467529298, -1.6782649068467383 ], [ 18.039207458496104, -1.6919918778514316 ], [ 18.02959442138673, -1.733172204839698 ], [ 18.03234100341798, -1.7372901886328997 ], [ 18.03508758544923, -1.751016736324034 ], [ 18.04332733154298, -1.7496440860658198 ], [ 18.05706024169923, -1.7523893855769257 ], [ 18.06804656982423, -1.7496440860658198 ], [ 18.05980682373048, -1.7716063691192543 ], [ 18.07079315185548, -1.7784695293263888 ], [ 18.080406188964854, -1.7716063691192543 ], [ 18.091392517089854, -1.7702337340233167 ], [ 18.10375213623048, -1.7839600391179224 ], [ 18.124351501464858, -1.7949410094890434 ], [ 18.12847137451173, -1.7853326640072564 ], [ 18.14495086669923, -1.7770968993239187 ], [ 18.15593719482423, -1.7867052878716376 ], [ 18.173789978027354, -1.8072945223036307 ], [ 18.171043395996108, -1.8169027519733305 ], [ 18.168296813964854, -1.8278835232806203 ], [ 18.179283142089854, -1.8429819740854612 ], [ 18.19164276123048, -1.8567077273870347 ], [ 18.19026947021487, -1.8704333740979158 ], [ 18.19164276123048, -1.8841589134312098 ], [ 18.179283142089854, -1.8965118063730428 ], [ 18.161430358886733, -1.9006294177849432 ], [ 18.16280364990237, -1.9116096668179035 ], [ 18.168296813964872, -1.926707394479835 ], [ 18.17104339599612, -1.9390599812528866 ], [ 18.18477630615237, -1.9610199125262915 ], [ 18.191642761230497, -1.977489672039944 ], [ 18.17653656005862, -1.9994490968853693 ], [ 18.17653656005862, -2.0063113570454383 ], [ 18.17928314208987, -2.0131735884140958 ], [ 18.17653656005862, -2.015918472877917 ], [ 18.17104339599612, -2.015918472877917 ], [ 18.18752288818362, -2.03238768223908 ], [ 18.197135925292994, -2.043367061902404 ], [ 18.20400238037112, -2.043367061902404 ], [ 18.21224212646487, -2.0461118951050343 ], [ 18.22048187255862, -2.0461118951050343 ], [ 18.2246017456055, -2.0529739575447254 ], [ 18.25069427490237, -2.092773335643867 ], [ 18.279533386230494, -2.101007564240218 ], [ 18.30287933349612, -2.117475891023843 ], [ 18.298759460449247, -2.161390569814226 ], [ 18.30562591552737, -2.203931703788394 ], [ 18.301506042480494, -2.2533327839586588 ], [ 18.30837249755862, -2.287638105783935 ], [ 18.31111907958987, -2.323314769908254 ], [ 18.30699920654297, -2.346298298202687 ], [ 18.302536010742188, -2.3507577451238473 ], [ 18.299102783203125, -2.355903243095241 ], [ 18.290176391601573, -2.367223271654424 ], [ 18.282966613769542, -2.3733977937460478 ], [ 18.27850341796875, -2.3747699060236154 ], [ 18.274383544921875, -2.3778571536645723 ], [ 18.26854705810547, -2.380944394397848 ], [ 18.262367248535156, -2.385403729910527 ], [ 18.255844116210938, -2.381630446955156 ], [ 18.248291015624996, -2.3843746537667907 ], [ 18.249664306640625, -2.3891770025143497 ], [ 18.251724243164062, -2.3919211942605494 ], [ 18.25103759765625, -2.4004967580531793 ], [ 18.23421478271487, -2.393293288075277 ], [ 18.144950866699247, -2.376828071884703 ], [ 18.1037521362305, -2.3795722882586876 ], [ 18.08864593505862, -2.3864328052843393 ], [ 18.070793151855497, -2.393293288075277 ], [ 18.048820495605494, -2.393293288075277 ], [ 18.029594421386744, -2.3823164991708334 ], [ 17.987022399902372, -2.4056420705140775 ], [ 17.96779632568362, -2.4125024569254423 ], [ 17.955436706542994, -2.4193628087290135 ], [ 17.927970886230504, -2.4715003411858247 ], [ 17.8730392456055, -2.493452376306776 ], [ 17.807121276855508, -2.5373553445957278 ], [ 17.713737487793004, -2.647106187195243 ], [ 17.6752853393555, -2.7239260182365483 ], [ 17.5764083862305, -2.773307616493278 ], [ 17.5104904174805, -2.828173639486196 ], [ 17.461051940918, -2.828173639486196 ], [ 17.345695495605497, -2.8830370671040906 ], [ 17.2687911987305, -2.9653272324398485 ], [ 17.241325378418, -3.014698404011999 ], [ 17.1918869018555, -3.014698404011999 ], [ 17.109489440918004, -2.9872702496140904 ], [ 17.043571472167997, -3.014698404011999 ], [ 16.977653503418, -3.014698404011999 ], [ 16.9392013549805, -3.0421258669263413 ], [ 16.999626159668, -3.080523141380916 ], [ 17.0600509643555, -3.124404044097061 ], [ 17.164421081542997, -3.1408589110717373 ], [ 17.224845886230497, -3.157313518735739 ], [ 17.3017501831055, -3.2121603207785396 ], [ 17.3786544799805, -3.2121603207785396 ], [ 17.4885177612305, -3.2724883980455695 ], [ 17.570915222168, -3.316361086025376 ], [ 17.6423263549805, -3.316361086025376 ], [ 17.7192306518555, -3.3273289548694125 ], [ 17.7412033081055, -3.3711992056252034 ], [ 17.7961349487305, -3.3986171092087667 ], [ 17.8510665893555, -3.4260342336390766 ], [ 17.922477722168, -3.409584052829599 ], [ 17.9829025268555, -3.3986171092087667 ], [ 18.021354675293, -3.3821664601566166 ], [ 18.065299987793, -3.420550871385538 ], [ 18.153190612793, -3.447967367989137 ], [ 18.208122253418, -3.4150674777657253 ], [ 18.252067565917997, -3.4370008638469947 ], [ 18.3454513549805, -3.5082808694583134 ], [ 18.438835144043004, -3.5521427932870453 ], [ 18.570671081543, -3.596002632832406 ], [ 18.691520690918, -3.6234139627474016 ], [ 18.768424987793, -3.6453424290749363 ], [ 18.856315612792997, -3.678234125061053 ], [ 19.010124206543, -3.722087829707387 ], [ 19.076042175293004, -3.765939350900909 ], [ 19.1804122924805, -3.79882654351889 ], [ 19.257316589355504, -3.8317124827698223 ], [ 19.361686706543, -3.908441406234201 ], [ 19.471549987793008, -3.974203473095141 ], [ 19.537467956543, -4.0235215869311345 ], [ 19.614372253417997, -4.050919251223997 ], [ 19.674797058105497, -4.0728367149556215 ], [ 19.768180847168, -4.0783159877365405 ], [ 19.8176193237305, -4.105711790236391 ], [ 19.911003112793, -4.165979237304949 ], [ 19.9714279174805, -4.204328849310254 ], [ 20.042839050293004, -4.286500213799611 ], [ 20.141716003418, -4.2919779925947115 ], [ 20.1691818237305, -4.341276227117982 ], [ 20.2131271362305, -4.3467536106376645 ], [ 20.2460861206055, -4.385094178052537 ], [ 20.295524597168, -4.368662746205936 ], [ 20.344963073730504, -4.374139930221497 ], [ 20.394401550293, -4.297455732086248 ], [ 20.438346862793008, -4.3029334322245 ], [ 20.449333190918, -4.248154665917803 ], [ 20.5097579956055, -4.242676574487001 ], [ 20.5427169799805, -4.231720275109175 ], [ 20.603141784668, -4.209807211682725 ], [ 20.658073425293, -4.160500568273739 ], [ 20.658073425293, -4.0728367149556215 ], [ 20.647087097168004, -4.034480763583009 ], [ 20.647087097168004, -3.9413230897097318 ], [ 20.647087097168004, -3.8920000803309205 ], [ 20.647087097168004, -3.7769018870373547 ], [ 20.647087097168004, -3.66727036073282 ], [ 20.647087097168004, -3.5795554388800865 ], [ 20.619621276855504, -3.5356948147279663 ], [ 20.636100769043, -3.305393095520415 ], [ 20.658073425293, -3.2670041764564086 ], [ 20.6855392456055, -3.146343809172181 ], [ 20.745964050293, -3.003727224879855 ], [ 20.7844161987305, -2.894009436766731 ], [ 20.822868347167997, -2.7842810263128426 ], [ 20.866813659668, -2.7458736474153898 ], [ 20.8723068237305, -2.691003825471513 ], [ 20.910758972168, -2.630644170559691 ], [ 20.954704284668, -2.5702815939410275 ], [ 20.9711837768555, -2.4989403279867672 ], [ 21.0151290893555, -2.487964401736435 ], [ 21.0151290893555, -2.4660122755566225 ], [ 21.0151290893555, -2.433083408120002 ], [ 21.0151290893555, -2.3727117370965116 ], [ 20.943717956542997, -2.306848728392543 ], [ 20.8942794799805, -2.2025594279915515 ], [ 20.8393478393555, -2.1641351953285315 ], [ 20.800895690918004, -2.147667367937919 ], [ 20.756950378418, -2.0927733356438925 ], [ 20.7294845581055, -2.0488567236098203 ], [ 20.658073425293, -1.9719997702972107 ], [ 20.619621276855504, -2.0214082279133923 ], [ 20.5537033081055, -2.032387682239105 ], [ 20.4877853393555, -2.0872838258784814 ], [ 20.4328536987305, -2.0927733356438925 ], [ 20.399894714355497, -2.147667367937919 ], [ 20.361442565918, -2.1970703121709025 ], [ 20.3339767456055, -2.1751136476393973 ], [ 20.295524597168, -2.1311993630456003 ], [ 20.2460861206055, -2.070815181770178 ], [ 20.2131271362305, -1.9994490968853948 ], [ 20.174674987793, -1.9335699537841755 ], [ 20.1691818237305, -1.911609666817916 ], [ 20.1142501831055, -1.9061195510553148 ], [ 20.042839050293004, -1.8841589134312353 ], [ 19.976921081542997, -1.8786687106335822 ], [ 19.943962097168, -1.8127849455005813 ], [ 19.9055099487305, -1.746898782536681 ], [ 19.861564636230504, -1.6700286779413185 ], [ 19.8505783081055, -1.6370834188282568 ], [ 19.795646667480497, -1.5931555663451487 ], [ 19.757194519043, -1.5657001812225722 ], [ 19.724235534668, -1.6096286221851004 ], [ 19.691276550293, -1.6480652325615546 ], [ 19.6638107299805, -1.686501101063057 ], [ 19.647331237793, -1.7688610979111157 ], [ 19.614372253417997, -1.8018040825055333 ], [ 19.5429611206055, -1.8018040825055333 ], [ 19.537467956543, -1.7523893855769639 ], [ 19.548454284668, -1.7084641147548614 ], [ 19.548454284668, -1.6535561167429353 ], [ 19.5100021362305, -1.615119611290474 ], [ 19.493522644043, -1.5382444364896137 ], [ 19.482536315918, -1.4888232098617369 ], [ 19.4550704956055, -1.422926523849842 ], [ 19.416618347168, -1.3789943485562064 ], [ 19.3891525268555, -1.3185862887267856 ], [ 19.361686706543, -1.2307174210082525 ], [ 19.3342208862305, -1.1318614945598777 ], [ 19.279289245605497, -1.0494789690935697 ], [ 19.2133712768555, -1.0330021981523347 ], [ 19.2133712768555, -1.0110330375631282 ], [ 19.2133712768555, -0.9835713781514517 ], [ 19.1804122924805, -0.8682300144406042 ], [ 19.158439636230497, -0.7913337881775484 ], [ 19.0925216674805, -0.7418997384208258 ], [ 19.054069519043, -0.7858411444008407 ], [ 18.977165222167997, -0.8242894963418169 ], [ 18.916740417480497, -0.8242894963418169 ], [ 18.894767761230504, -0.8242894963418169 ], [ 18.8398361206055, -0.8242894963418169 ], [ 18.801383972168, -0.8242894963418169 ], [ 18.795890808105497, -0.879215065180961 ], [ 18.757438659668, -0.9286473877100603 ], [ 18.757438659668, -0.9670942736327892 ], [ 18.773918151855504, -1.0220176366420748 ], [ 18.856315612792997, -1.0659556532348722 ], [ 18.8727951049805, -1.1263693971200497 ], [ 18.8508224487305, -1.1703058825825772 ], [ 18.801383972168, -1.1428456581857496 ], [ 18.7629318237305, -1.1318614945598777 ], [ 18.740959167480504, -1.1867818885736947 ], [ 18.724479675293, -1.2197336047450065 ], [ 18.675041198730497, -1.23620931220159 ], [ 18.636589050293, -1.23620931220159 ], [ 18.570671081543, -1.23620931220159 ], [ 18.5212326049805, -1.2087497436491321 ], [ 18.460807800293, -1.1757978954010635 ], [ 18.361930847168, -1.1208772893295265 ], [ 18.3015060424805, -1.1044009043583471 ], [ 18.197135925293004, -1.1044009043583471 ], [ 18.153190612793, -1.1263693971200497 ], [ 18.142204284668, -1.181289897414951 ], [ 18.1147384643555, -1.2142416797759086 ], [ 17.9389572143555, -1.3515363255670576 ], [ 17.537956237793, -1.477840563295234 ], [ 17.472038269043004, -1.477840563295234 ], [ 17.153434753418, -1.7249362104075299 ], [ 17.076530456543, -1.7633705433145612 ], [ 17.049064636230504, -1.7743516362590053 ], [ 16.999626159668, -1.746898782536681 ], [ 16.9721603393555, -1.7414081634492025 ], [ 16.922721862792997, -1.7414081634492025 ], [ 16.867790222168004, -1.7414081634492025 ], [ 16.834831237793, -1.7523893855769639 ], [ 16.790885925293, -1.7578799725197214 ], [ 16.790885925293, -1.8072945223036816 ], [ 16.8293380737305, -1.7908231533069725 ], [ 16.8622970581055, -1.8237657418891526 ], [ 16.911735534668, -1.8237657418891526 ], [ 16.944694519043, -1.8567077273870602 ], [ 16.933708190918, -1.9829795556205012 ], [ 16.6425704956055, -1.98846942098849 ] ] ] } } } }, "metadata": { "constantValue": {} } } } } ] } } } } } } } } } } }) ###Markdown Process Satellite Data ###Code point = ee.Geometry.Point(18.4276, -2.6357) #define cloud free composite #define cloud mask def cloudMask(image): #Bits 3 and 5 are cloud shadow and cloud, respectively. cloudShadowBitMask = (1 << 3) cloudsBitMask = (1 << 5) #Get the pixel QA band. qa = image.select('pixel_qa') #Both flags should be set to zero, indicating clear conditions. mask = qa.bitwiseAnd(cloudShadowBitMask).eq(0).And(qa.bitwiseAnd(cloudsBitMask).eq(0)) return image.updateMask(mask) #grab image collection for 2013 l8_2013 = ee.ImageCollection('LANDSAT/LC08/C01/T1_SR').filterDate('2013-01-01', '2013-12-31') cloud_free_image_2013 = l8_2013.map(cloudMask)#apply cloud mask median_image_2013 = cloud_free_image_2013.median().select(['B1','B2','B3','B4','B5','B6','B7','B10','B11','pixel_qa'])#take median pixel values for all bands l8_2013 = median_image_2013.clip(studyRegion)#clip to studyRegion #grab image collection for 2019 l8_2019 = ee.ImageCollection('LANDSAT/LC08/C01/T1_SR').filterDate('2013-01-01', '2013-12-31') cloud_free_image_2019 = l8_2019.map(cloudMask)#apply cloud mask median_image_2019 = cloud_free_image_2019.median().select(['B1','B2','B3','B4','B5','B6','B7','B10','B11','pixel_qa'])#take median pixel values for all bands l8_2019 = median_image_2019.clip(studyRegion)#clip to studyRegion vis_params_1 = { 'min': 0, 'max': 3000, 'bands': ['B4', 'B3', 'B2'] #RGB Composite } vis_params_2 = { 'min': 0, 'max': 3000, 'bands': ['B5', 'B6', 'B7'] #RGB Composite } Map.addLayer(l8_2013, vis_params_1, "Landsat-8_2013") Map.addLayer(l8_2019, vis_params_2, "Landsat-8_2019") Map #print area - doesn't work, need to know how to translate this into python properly #print('The total area is:', l8_2013.multiply(ee.Image.pixelArea())) ###Output _____no_output_____ ###Markdown Explore Hansen Data for Region ###Code hansen_2020 = ee.Image('UMD/hansen/global_forest_change_2020_v1_8').clip(studyRegion) #Accessing the forest loss and gain layers #update yearly so get the most recent version lossImage = hansen_2020.select(['loss']) gainImage = hansen_2013.select(['gain']) #merging the loss and gain layers gainAndLoss = gainImage.And(lossImage) #Accessing forest cover layer treeCover = hansen_2013.select(['treecover2000']) #Add the tree cover layer in green. Map.addLayer(treeCover.updateMask(treeCover), {"palette": ['000000', '00FF00'], max: 100}, 'Forest Cover'); #Add the loss layer in red. Map.addLayer(lossImage.updateMask(lossImage), {"palette": ['FF0000']}, 'Loss'); #Add the gain layer in blue. Map.addLayer(gainImage.updateMask(gainImage), {"palette": ['0000FF']}, 'Gain'); #Add the merged gain and loss in purple Map.addLayer(gainAndLoss.updateMask(gainAndLoss), {"palette": 'FF00FF'}, 'Gain and Loss') Map ###Output _____no_output_____ ###Markdown Chart Yearly Forest Loss ###Code #calculate loss statistics hansen_2020 = ee.Image('UMD/hansen/global_forest_change_2020_v1_8').clip(studyRegion) #Accessing the forest loss layer lossImage = hansen_2020.select(['loss']) lossAreaImage = lossImage.multiply(ee.Image.pixelArea()) lossYear = hansen_2020.select(['lossyear']) lossByYear = lossAreaImage.addBands(lossYear).reduceRegion(**{"reducer": ee.Reducer.sum().group(1), "geometry": studyRegion, "scale": 30, "maxPixels":1e10}) lossByYear.getInfo() #conver stats into years (formatting) def format(stats): d = ee.Dictionary(stats); return [ee.Number(d.get('group')).format("20%02d"), d.get('sum')] stats = ee.List(lossByYear.get('groups')) statsFormatted = format(stats) statsDictionary = ee.Dictionary(statsFormatted.flatten) statsFormatted.getInfo() #get stats for the region of interest yearly_hansen_array = [] lossImage = hansen_2013.select(['loss']) stats = lossImage.reduceRegion(**{ 'reducer': ee.Reducer.sum(), 'geometry': studyRegion, 'scale': 30, 'maxPixels':1e10 }) stats.getInfo() #computation of yearly forest loss - not really sure here what the ouptu statistics are? it also has #various classes which it is not obvious what they are either input_zone = ee.FeatureCollection(studyRegion) out_path = ('/Users/joycelynlongdon/Desktop/Cambridge/CambridgeCoding/MRES/GEE_examples/Output Data/Hansen_Results') forest_cover = os.path.join(out_path, 'forest-cover-2013.csv') geemap.zonal_statistics_by_group(forestAt2015.updateMask(forestAt2015), input_zone, forest_cover, statistics_type='SUM', denominator=10000, decimal_places=2) #create yearly forest cover map - will work on making it more efficient with a loop later on loss = image.select(['loss']) lossYear = hansen_2013.select(['lossyear']) forest = hansen_2013.select(['treecover2000']) # visualization setting vis = { 'min': 0, 'max': 100, 'palette': ['#000000', '#005500', '#00AB00', '#00FF00'] } #2013 lossInFirst13 = lossYear.gte(1).And(lossYear.lte(13)) forestAt2013 = forest.where(lossInFirst13.eq(1), 0) #2014 lossInFirst14 = lossYear.gte(1).And(lossYear.lte(14)) forestAt2014 = forest.where(lossInFirst14.eq(1), 0) #2015 lossInFirst15 = lossYear.gte(1).And(lossYear.lte(15)) forestAt2015 = forest.where(lossInFirst15.eq(1), 0) #2016 lossInFirst16 = lossYear.gte(1).And(lossYear.lte(16)) forestAt2016 = forest.where(lossInFirst16.eq(1), 0) #2017 lossInFirst17 = lossYear.gte(1).And(lossYear.lte(17)) forestAt2017 = forest.where(lossInFirst17.eq(1), 0) #2018 lossInFirst18 = lossYear.gte(1).And(lossYear.lte(18)) forestAt2018 = forest.where(lossInFirst18.eq(1), 0) #2019 lossInFirst19 = lossYear.gte(1).And(lossYear.lte(19)) forestAt2019 = forest.where(lossInFirst19.eq(1), 0) Map.addLayer(forestAt2013.updateMask(forestAt2013), vis, 'Forest in 2001') Map.addLayer(forestAt2019.updateMask(forestAt2019), vis, 'Forest in 2019') Map ###Output _____no_output_____
knowledge_DS_Vr_0_1.ipynb
###Markdown -------------------------------------------------->**1- Definições****DataFrame** possui uma estrutura tabular de N-dimensões, onde cada coluna é um campo da tabela e cada linha um registro.**Series** é uma matriz unidimensional que contém uma sequencia de valores acompanhado de seus respectivos índices.**size()** usada em várias coluna**value_counts()** usada em uma coluna por vez**tipos de variáveis**float ou int numéricas que podem ser contínuas nominais ex: cor dos olhos ordinais ex: P,M e G (category) -------------------------------------------------- **!pip install biblioteca**.....instala biblioteca externa -------------------------------------------------- ------------------------------------------------------->**2- Limpeza de dados(Data cleaning)** "Não se apresse nessa fase"***-Duplicados******-Ausentes***[preencher{para categóricos rotular como "ausente"}{para numéricos rotular como "0"} ou eliminar]***-Em branco******-Espaço em branco******-Conversão******-Outliers indesejados***[com valores discrepantes],***-Padronizar***[Ex. DOG, CAT, Cachorro, Gato, cachorro, gato] [erros de digitação]***-Irrelevantes***[Não se usa] **Objetivos:** Integridade dos dados, Exatidão dos dados, Precisão dos dados e Relevância dos dados. **2.1- Read**- **pd.read_csv ... , sep=';', encoding = 'latin1', skiprows=1, decimal=','**.... Tratar o dataframe, separador';', codificado, pula um linha, converte numero o q estava como object - **df = pd.read_csv("link do csv", usecols=['coluna1','coluna2','coluna3','coluna4'])**....escolhe quais as colunas que queremos trabalhar: coluna1,2,3 ou 4 (ou usecols=[0,1,3,4])- **df.to_csv('nome do arquivo.csv', index=False, sep=';', decimal=',')**...Salva o df como .csv, sem indexar,separado por ponto e virgula com decimal virgula- **df.to_csv("aulas.csv")**....salvar DataFrame externamente em um formato específico - **link = 'https://raw.github........csv'** **df = pd.read_csv(link)**.... **2.2- Analizando**- **df.info()**...analiza os tipos das colunas- **df.head(x)**- **df.sample(x)**.....lista x linhas aleatórias- **df[['coluna1',...,'colunaX']].sample(x)**..lista x linhas aleatórias com varias colunas- **df.sample(x).T** ....inverte linhas e colunas e lista x linhas aleatórias- **df[0:5].T**....inverte linhas e colunas e ver os 5 primeiros registros- **df[-5:].T**....inverte linhas e colunas e exibe as cinco últimos registros- **df[20:30].T**....inverte linhas e colunas e seleciona as linhas 20 até 29- **df.tail()**- **df.shape**....verifica o tamanho do dataframe (linhas, colunas)- **df.describe()**...(analizar só colunas numéricas)- **df.dtypes**.... tipo de cada coluna - **df.select_dtypes(include='object').head()**...exibe 5 registros primeiros SOMENTE das colunas que seje tipo object- **df.select_dtypes(include=['int', 'datetime', 'object'])**...exibe 5 registros primeiros das colunas com 'int', 'datetime', 'object'- **df.select_dtypes(exclude='int')**....exclui os tipos int- **df.columns**.... retorna uma lista com o nome de todas as colunas do DataFrame- **df[['coluna1', 'coluna2']]**...seleciona multiplas colunas- **df.columns**....acessa lista de colunas - **df['coluna1'].sum()** ....Soma campos da coluna1- **print('palavraoufrase:', df['coluna'].sum())**....exibe'palavraoufrase' junto do somatório de todos valores de 'coluna'- **df["coluna1"].value_counts(normalize=True)**....Normaliza os valores em porcentagem- **df['coluna'].unique()**....Exibe valores únicos na oredm que aparece sem ordenar e inclui valores NA da coluna especificada- **df['coluna1'].value_counts()**....Retorna uma série contendo contagens de valores únicos em ordem decrescente e sem valores NA .- **df['coluna'].nunique()**.....Conta o número de elementos distintos no eixo especificado e PODE ignorar os valores NaN.**2.3- Nulos e ausentes**- **df.count()**...Verifica valores validos(não nulos) em cada coluna- **df.size**...Verifica valores validos(e nulos) em cada coluna- **df[df['coluna'].isnull()]**...Exibe as linhas com valores ausentes(NaN) de uma coluna no df- **df.isnull().sum()**... retorna o somatório de número de valores ausentes para cada coluna.- **df.isnull().sum().sum()**... retorna o número total de valores ausentes.- **df.isna().mean()**.... descobre a porcentagem de valores que estão faltando- **df.dropna()**....retorna linhas que NÃO CONTÉM um NaN- **df.isna()**....exibe os valores no df que são NaN(True) e os que não são(False)------------------------------------------------------->**3- Manipulando os dados** (Análise Exploratória)**3.1- Descarta e preenche**- **df.dropna(axis = 0)**...descarta linhas se houver algum valor NaN- **df.dropna(axis = 1)**...descarta colunas se houver algum valor NaN- **df.dropna(thresh=len(df)*0.9, axis=1)**...descarta colunas com mais de 10% dos valores ausentes- **df.fillna(value=X)**....Preenche todos os valores NaN pelo valor X- **df.fillna(axis=0, method='ffill')**...substitui os valores de NaN pelos valores da linha anterior- **df.fillna(axis=1, method='ffill')**...substitui os valores de NaN pelos valores da coluna anterior- **df.fillna(axis=0, method='bfill')**...substitui os valores de NaN pelos valores da linha posterior- **df.fillna(axis=1, method='bfill')**...substitui os valores de NaN pelos valores da coluna posterior-**df['coluna'].fillna(value=df['coluna'].mean(), inplace=True)**...substitui os valores de NaN na coluna pela média**3.2- Palavras**- **df.Coluna.str.rstrip().head()**....rstrip() remove os espaços à direita, enquanto o lstrip() remove espaços à esquerda da string- **df.coluna.str.upper().head()**....texto em minúsculo com o método lower() , ou tudo em maiúsculo com o método upper()**3.3- Índice**- **df.index**....gera automaticamente um índice sequencial para cada linha do arquivo- **df.set_index('coluna escolhida', inplace=True)**....define 'coluna escolhida' como índice e inplace=True para que a mudança seja feita.**3.4- Conversão**- **df['colunapint'] = df['colunapint'].astype(int)**....Muda o tipo da coluna para int- **df = df.astype({'colunapint': 'int'})**....Muda o tipo da coluna para int- **df['coluna'] = pd.to_numeric(df['coluna'], errors='coerce')**....Muda o tipo da coluna para float e valores inválidos são convertidos para NaN**3.5- Ordena, Substitui, Transforma e Renomeia Colunas**- **df['coluna'] = df['coluna'].str.replace(',','').astype(float).sample(x)**...na coluna troque qualquer numero com , por nada, transforme em float e exiba x registros aleatorios-**pd.rename(columns = {'original_col1_name': 'new_col1_name', 'original_col2_name'})**...renomeia várias colunas- **df.sort_values(by='column_name', ascending=False)**.....Ordena coluna 'column_name'em ordem descedente '- **df.sort_values(by = ['col_1', 'col_2'], ascending = [True, False])**....ordena as colunas do DataFrame de forma distinta, uma em ordem ascendente e a outra em descendente.- **df['coluna1].sort_values(by='coluna 2', ascending=False)[0:5].T**...ordena a coluna1 pela coluna2, em decrescente, transposta e visualiza os 5 primeiros registros**3.6- Coluna**- **df['novo nome coluna'] = df['coluna'].str.len()**.....Cria coluna com o número total de caracteres em cada linha da coluna- **df['coluna_antiga'] = df['coluna_atualizada']**... atualiza valores na coluna- **df.drop(columns='coluna_a_ser_deletada', inplace=True)**...deleta coluna específica- **df.drop(['col_1', 'col_2', 'col_3'], axis=1)**...deleta várias colunas e axis=1 deleta todas as linhas dessas colunas- **df.rename(columns={'nome antigo': 'nome novo'}, inplace = True)**....Troca o nome da coluna- **df[['Coluna1', 'Coluna2', 'Coluna3']]**....exibe as colunas- **df.sort_values(by='coluna1', ascending=False)**...Ordena o dataframe pelos valores pela coluna1 do maior para o menor.- **df.drop_duplicates(subset='coluna1', keep='first', inplace=True)**....dropa as linhas duplicadas da coluna1, mantendo a primeira e salvando -**df['coluna_nova'] = df['coluna_antiga'].apply(lambda x: x == 17)**...cria uma coluna_nova com True ou False checando cada linha e aplicando um x(True) quando linha(x) for = 17**3.6- Consulta**- **df[(df['coluna1'] > 500) & (df['coluna1'] 500 E - **df[df['coluna1'] > df['coluna2']].count()**.........Cria um novo df somente com coluna1 > coluna2 e diz a quantidade- **df[(df['coluna'] - **df[df['coluna1'] == 'valor_coluna1']['coluna2']**... Saber o valor da coluna2 pelo valor da coluna1- ¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨- **df.query('coluna > 20').head()**....5 primeiros registros com coluna > 20- **df.query('coluna1 > 20 & coluna2=="registro"').head()**....Com o operador lógico & as duas condições precisam ser atendidas obrigatoriamente.- **df.query('coluna1 > 20 | coluna2=="registro"').head()**....Com o operador | apenas a primeira condição precisa ser atendida obrigatoriamente.- **df.query('nomecoluna in ["C","Q"]', inplace=True)**....Retorna as linhas onde nomecoluna é igual a C ou Q.- **df.query('coluna1 =="valor in coluna1" & coluna2 =="valor in coluna2"')**.... consulta(==,!=,>,>=,- **df.query('coluna1 **3.7- DataFrame**- **df_copia = df_original.copy()**....cria uma cópia idêntica do dataframe original- **variavel = df[df["coluna1"] == 'um valor da coluna1'].copy()**....cria um novo dataframe que tenha 'um valor da coluna1' em coluna1 e independente do df original- **df = df[['coluna1', 'coluna2', 'coluna3', 'coluna4']]**....Atualiza/recria o dataframe df com as colunas especificadas. - **df_novo = df[df['coluna'] != 'conteúdo da coluna']**....criar(copia) um novo dataframe sem as linhas contendo 'conteúdo da coluna'- **df[df['coluna1'].str.contains('parte da palavra')].count()**...Cria um novo df somente com pesquisa dentro da coluna1 linhas que tenham partes de palavras específicadas.- **df.set_index('Coluna_única_escolhida', drop=False, inplace=True)**...Cria um novo índice para o dataframe(drop=False ==> não descarta(apaga) a coluna original mais se torne o indice do df).- **df1e2 = df1.join(df2)**...Junta(MERGE) os dataframes 1 e 2 horizontalmente pelo índice iguais previamente definidos **3.8- Maior**- **df.nlargest(x, 'coluna', keep='last')** ... Retorna linha com MAIOR valor da coluna, x quantidade de linhas p mostrar e decrescente.((last, first e all)) - **df[df['coluna1'] == df['coluna1'].max()]**....Cria um novo df somente com valor maior na coluna1 dentro do DataFrame**3.9- Menor**- **df.nsmallest(x, 'coluna')** ... Retorna o x MENOR valor da coluna- **df[df['coluna1'] == df['coluna1'].min()]**....Cria um novo df somente com valor menor na coluna1 dentro do DataFrame- **df[df['coluna1'] == df['coluna1'].min()] ['coluna2']**....Cria um novo df somente com valor menor na coluna1 com uma coluna2 dentro do DataFrame- **df[df['coluna1'] == df['coluna1'].min()] [['coluna2', 'coluna3', 'coluna4' ]]**....Cria um novo df somente com valor menor na coluna1 com VÁRIAS colunas dentro do DataFrame**3.10- Groupby**- **df.groupby(by='Sex').size()**...dados foram divididos em male e female e a função size() calcula o tamanho de cada grupo- **df.groupby(by='Sex')['Age'].mean()**....média das idades por sexo- **df.groupby(['Sex','Survived']).agg({'Age': np.mean, 'PassengerId': np.size})**....média de idade e a quantidade de passageiros que sobreviveram e que não sobreviveram por sexo- **df.groupby('coluna1')['coluna2'].sum().sort_values(ascending=False)** ...agrupa cada linha da coluna1 com a coluna2,soma e ordena do > p - **df.groupby('coluna1')['coluna2'].max()** ...agrupa cada linha da coluna1 com o máximo valor da coluna2- **df.groupby('coluna1')['coluna2'].min()**...agrupa cada linha da coluna1 com o minimo valor da coluna2- **df.groupby("coluna1").mean()**...agrupa cada ocorrência-única da coluna1 com cada média- **df.groupby("coluna1").mean()["coluna2"].sort_values()**....agrupa cada ocorrência-única da coluna1 com cada média, OBTEM coluna2 com cada média e coloca em ordem crescente-**df['coluna1'].apply(função específica)** Aplica-se uma função(Ex: raiz, soma, algebrica, etc..) em todas as linhas da coluna1**3.11- Linha e colunas p nome**- **df.loc[, ]**....seleciona dados de linhas e colunas pelo suas NOMENCLUTURA (rótulo,título das colunas ou números inteiros)- **df.loc[[1,2,3]]**....retorna informações das linhas 1, 2 e 3- **df.loc[10:20:2]**......retorna linhas da 10° ao 20° posição de 2 em 2- **df.loc[10:]**......retorna linhas da 10° até a última linha- **df.loc[[1,2], ['coluna 1','coluna 2','coluna 3']]**....retorna informações das linhas 1, 2 com as colunas 1,2 e 3- **df.loc[3]**....Retorna SOMENTE TODA linha 3- **df.loc[df.groupby('coluna1')['coluna2'].idxmax()]**...Localiza linhas do dataframe pelo agrupamento da coluna1 e a coluna2 pelo indice da linha q tem o valor máximo **idxmax** - pega o INDICE DA LINHA que tem o valor maximo (pegou a linha inteira) **loc** - server para localizar uma linha com base no indice e alterar dados **3.12- Linha e colunas p índice**- **df.iloc[, ]**....seleciona dados de linhas e colunas pelo seu valor numérico de ÍNDICE.- **df.iloc[0]**....Seleciona a primeira linha do dataset- **df.iloc[-1]**....Seleciona a última linha- **df.iloc[:,0]**....Todos os dados da primeira coluna do dataset- **df.iloc[0:5,-1]**....Do primeiro ao quinto dado da última coluna- **df.iloc[0:3]**....resgatando as primeiras três linhas do dataset- **df.iloc[:, 1:3]**....todos os dados da segunda e terceira coluna- **df.iloc[[0,2,4], 5:8]**....1º,3º e 5º elementos e 6ª a 8ª colunas**3.13- plot****3.13.1- Matplotlib**- **%matplotlib inline**.....Saídas de plotagem para aparer no notebook- ¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨- **df['coluna1'].plot.hist()**....Plota gráfico simples (line,bar,barh,pie,box,hist...)- **df['coluna1'].sort_values(ascending=False).plot.hist(title='Nome_do_titulo', bins=30, edgecolor='black')**....Exibe as OCORRÊNCIAS TOTAIS(Repetidas=hist) na coluna1, ordena do > p - **df.plot.scatter(x='coluna1', y='coluna2')**....gráfico de dispersão usando um DataFrame e especificando colunas **3.13.2- Seaborn**- **import seaborn as sns**- **import matplotlib.pyplot as plt**- ¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨**distplot**- **sns.distplot(df['coluna_escolhida'], bins=30, color='blue', kde=False)**....30 barras, cor azul e sem linha de dispersão(em volta do gráfico)- **plt.title('nome_do_titulo', fontsize=x)**....Titulo do gráfico com tamanho x- **plt.xlabel('nome _do_eixoX', fontsize=x)**....descrição em x com tamanho x- **plt.ylabel('nome _do_eixoY', fontsize=x)**....descrição em y com tamanho x- **plt.axvline(x, color='cor', linestyle='--')**....x define em qual valor vai ser traçada a linha vertical, com cor e com estilo pontilhado- ¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨**swarmplot**- **sns.swarmplot(data=nome_dataframe, x='coluna_p_X', y='coluna _p_Y')**- **plt.axhline(1.0, color='black', linestyle='--')**....x define em qual valor vai ser traçada a linha vertical, com cor e com estilo pontilhado**3.14- PLOTANDO POR COLUNAS SELECIONADAS**- **lista = ['coluna1', .... 'coluna x' ]**....cria uma lista com x colunas- **total = df[lista].sum()**.....Soma total de cada coluna- **total.plot.barh()**....Plota um gráfico de barras horizontal**3.15- CÁLCULOS****3.15.1- Percentual**- **total_geral = df['coluna_total_geral'].sum()**......soma a coluna especificada- **total_coluna = df['coluna'].sum()**...soma a coluna especificada- **print('palavraoufrase:', (total_coluna/total_geral * 100).round(2))**...calcula o percentual ###Code ###Output _____no_output_____
rover_ml/colab/RC_Car_End_to_End_Image_Regression_with_CNNs_(RGB_camera).ipynb
###Markdown Development of an End-to-End ML Model for Navigating an RC car with a Camera Run in Google Colab View source on GitHub Environment Setup Import Dependencies ###Code import os import csv import cv2 import matplotlib.pyplot as plt import random import pprint import numpy as np from numpy import expand_dims %tensorflow_version 1.x import tensorflow as tf tf.logging.set_verbosity(tf.logging.ERROR) from keras import backend as K from keras.models import Model, Sequential from keras.models import load_model from keras.layers import Dense, GlobalAveragePooling2D, MaxPooling2D, Lambda, Cropping2D from keras.layers.convolutional import Convolution2D from keras.layers.core import Flatten, Dense, Dropout, SpatialDropout2D from keras.optimizers import Adam from keras.callbacks import ModelCheckpoint, TensorBoard from keras.callbacks import EarlyStopping, ReduceLROnPlateau from keras.preprocessing.image import ImageDataGenerator from keras.preprocessing.image import load_img from keras.preprocessing.image import img_to_array from google.colab.patches import cv2_imshow import sklearn from sklearn.model_selection import train_test_split import pandas as pd print("Tensorflow Version:",tf.__version__) print("Tensorflow Keras Version:",tf.keras.__version__) print("Eager mode: ", tf.executing_eagerly()) ###Output _____no_output_____ ###Markdown Confirm TensorFlow can see the GPU Simply select "GPU" in the Accelerator drop-down in Notebook Settings (either through the Edit menu or the command palette at cmd/ctrl-shift-P). ###Code device_name = tf.test.gpu_device_name() if device_name != '/device:GPU:0': #raise SystemError('GPU device not found') print('GPU device not found') else: print('Found GPU at: {}'.format(device_name)) #GPU count and name !nvidia-smi -L ###Output _____no_output_____ ###Markdown Load the Dataset Download and Extract the Dataset ###Code # Download the dataset !curl -O https://selbystorage.s3-us-west-2.amazonaws.com/research/office_2/office_2.tar.gz data_set = 'office_2' tar_file = data_set + '.tar.gz' # Unzip the .tgz file # -x for extract # -v for verbose # -z for gnuzip # -f for file (should come at last just before file name) # -C to extract the zipped contents to a different directory !tar -xvzf $tar_file ###Output _____no_output_____ ###Markdown Parse the CSV File ###Code # Define path to csv file csv_path = data_set + '/interpolated.csv' # Load the CSV file into a pandas dataframe df = pd.read_csv(csv_path, sep=",") # Print the dimensions print("Dataset Dimensions:") print(df.shape) # Print the first 5 lines of the dataframe for review print("\nDataset Summary:") df.head(5) ###Output _____no_output_____ ###Markdown Clean and Pre-process the Dataset Remove Unneccessary Columns ###Code # Remove 'index' and 'frame_id' columns df.drop(['index','frame_id'],axis=1,inplace=True) # Verify new dataframe dimensions print("Dataset Dimensions:") print(df.shape) # Print the first 5 lines of the new dataframe for review print("\nDataset Summary:") df.head(5) ###Output _____no_output_____ ###Markdown Detect Missing Data ###Code # Detect Missing Values print("Any Missing Values?: {}".format(df.isnull().values.any())) # Total Sum print("\nTotal Number of Missing Values: {}".format(df.isnull().sum().sum())) # Sum Per Column print("\nTotal Number of Missing Values per Column:") print(df.isnull().sum()) ###Output _____no_output_____ ###Markdown Remove Zero Throttle Values ###Code # Determine if any throttle values are zeroes print("Any 0 throttle values?: {}".format(df['speed'].eq(0).any())) # Determine number of 0 throttle values: print("\nNumber of 0 throttle values: {}".format(df['speed'].eq(0).sum())) # Remove rows with 0 throttle values if df['speed'].eq(0).any(): df = df.query('speed != 0') # Reset the index df.reset_index(inplace=True,drop=True) # Verify new dataframe dimensions print("\nNew Dataset Dimensions:") print(df.shape) df.head(5) ###Output _____no_output_____ ###Markdown View Label Statistics ###Code # Steering Command Statistics print("\nSteering Command Statistics:") print(df['angle'].describe()) print("\nThrottle Command Statistics:") # Throttle Command Statistics print(df['speed'].describe()) ###Output _____no_output_____ ###Markdown View Histogram of Steering Commands ###Code #@title Select the number of histogram bins num_bins = 25 #@param {type:"slider", min:5, max:50, step:1} hist, bins = np.histogram(df['angle'], num_bins) center = (bins[:-1]+ bins[1:]) * 0.5 plt.bar(center, hist, width=0.05) #plt.plot((np.min(df['angle']), np.max(df['angle'])), (samples_per_bin, samples_per_bin)) # Normalize the histogram (150-300 for RBG) #@title Normalize the Histogram { run: "auto" } hist = True #@param {type:"boolean"} remove_list = [] samples_per_bin = 200 if hist: for j in range(num_bins): list_ = [] for i in range(len(df['angle'])): if df.loc[i,'angle'] >= bins[j] and df.loc[i,'angle'] <= bins[j+1]: list_.append(i) random.shuffle(list_) list_ = list_[samples_per_bin:] remove_list.extend(list_) print('removed:', len(remove_list)) df.drop(df.index[remove_list], inplace=True) df.reset_index(inplace=True) df.drop(['index'],axis=1,inplace=True) print('remaining:', len(df)) hist, _ = np.histogram(df['angle'], (num_bins)) plt.bar(center, hist, width=0.05) plt.plot((np.min(df['angle']), np.max(df['angle'])), (samples_per_bin, samples_per_bin)) ###Output _____no_output_____ ###Markdown View a Sample Image ###Code # View a Single Image index = random.randint(0,df.shape[0]-1) img_name = data_set + '/' + df.loc[index,'filename'] angle = df.loc[index,'angle'] center_image = cv2.imread(img_name) center_image_mod = cv2.resize(center_image, (320,180)) center_image_mod = cv2.cvtColor(center_image_mod,cv2.COLOR_RGB2BGR) # Crop the image height_min = 75 height_max = center_image_mod.shape[0] width_min = 0 width_max = center_image_mod.shape[1] crop_img = center_image_mod[height_min:height_max, width_min:width_max] plt.subplot(2,1,1) plt.imshow(center_image_mod) plt.grid(False) plt.xlabel('angle: {:.2}'.format(angle)) plt.show() plt.subplot(2,1,2) plt.imshow(crop_img) plt.grid(False) plt.xlabel('angle: {:.2}'.format(angle)) plt.show() ###Output _____no_output_____ ###Markdown View Multiple Images ###Code # Number of Images to Display num_images = 4 # Display the images i = 0 for i in range (i,num_images): index = random.randint(0,df.shape[0]-1) image_path = df.loc[index,'filename'] angle = df.loc[index,'angle'] img_name = data_set + '/' + image_path image = cv2.imread(img_name) image = cv2.resize(image, (320,180)) image = cv2.cvtColor(image,cv2.COLOR_RGB2BGR) plt.subplot(num_images/2,num_images/2,i+1) plt.xticks([]) plt.yticks([]) plt.grid(False) plt.imshow(image, cmap=plt.cm.binary) plt.xlabel('angle: {:.3}'.format(angle)) i += 1 ###Output _____no_output_____ ###Markdown Split the Dataset Define an ImageDataGenerator to Augment Images ###Code # Create image data augmentation generator and choose augmentation types datagen = ImageDataGenerator( #rotation_range=20, zoom_range=0.15, #width_shift_range=0.1, #height_shift_range=0.2, #shear_range=10, brightness_range=[0.5,1.0], #horizontal_flip=True, #vertical_flip=True, #channel_shift_range=100.0, fill_mode="reflect") ###Output _____no_output_____ ###Markdown View Image Augmentation Examples ###Code # load the image index = random.randint(0,df.shape[0]-1) img_name = data_set + '/' + df.loc[index,'filename'] original_image = cv2.imread(img_name) original_image = cv2.cvtColor(original_image,cv2.COLOR_RGB2BGR) original_image = cv2.resize(original_image, (320,180)) label = df.loc[index,'angle'] # convert to numpy array data = img_to_array(original_image) # expand dimension to one sample test = expand_dims(data, 0) # prepare iterator it = datagen.flow(test, batch_size=1) # generate batch of images batch = it.next() # convert to unsigned integers for viewing image_aug = batch[0].astype('uint8') print("Augmenting a Single Image: \n") plt.subplot(2,1,1) plt.imshow(original_image) plt.grid(False) plt.xlabel('angle: {:.2}'.format(label)) plt.show() plt.subplot(2,1,2) plt.imshow(image_aug) plt.grid(False) plt.xlabel('angle: {:.2}'.format(label)) plt.show() print("Multiple Augmentations: \n") # generate samples and plot for i in range(0,num_images): # define subplot plt.subplot(num_images/2,num_images/2,i+1) # generate batch of images batch = it.next() # convert to unsigned integers for viewing image = batch[0].astype('uint8') # plot raw pixel data plt.imshow(image) # show the figure plt.show() ###Output _____no_output_____ ###Markdown Define a Data Generator ###Code def generator(samples, batch_size=32, aug=0): num_samples = len(samples) while 1: # Loop forever so the generator never terminates for offset in range(0, num_samples, batch_size): batch_samples = samples[offset:offset + batch_size] #print(batch_samples) images = [] angles = [] for batch_sample in batch_samples: if batch_sample[5] != "filename": name = data_set + '/' + batch_sample[3] center_image = cv2.imread(name) center_image = cv2.cvtColor(center_image,cv2.COLOR_RGB2BGR) center_image = cv2.resize( center_image, (320, 180)) #resize from 720x1280 to 180x320 angle = float(batch_sample[4]) if not aug: images.append(center_image) angles.append(angle) else: data = img_to_array(center_image) sample = expand_dims(data, 0) it = datagen.flow(sample, batch_size=1) batch = it.next() image_aug = batch[0].astype('uint8') if random.random() < .5: image_aug = np.fliplr(image_aug) angle = -1 * angle images.append(image_aug) angles.append(angle) X_train = np.array(images) y_train = np.array(angles) yield sklearn.utils.shuffle(X_train, y_train) ###Output _____no_output_____ ###Markdown Split the Dataset ###Code samples = [] samples = df.values.tolist() sklearn.utils.shuffle(samples) train_samples, validation_samples = train_test_split(samples, test_size=0.2) print("Number of traing samples: ", len(train_samples)) print("Number of validation samples: ", len(validation_samples)) ###Output _____no_output_____ ###Markdown Define Training and Validation Data Generators ###Code batch_size_value = 32 img_aug = 0 train_generator = generator(train_samples, batch_size=batch_size_value, aug=img_aug) validation_generator = generator( validation_samples, batch_size=batch_size_value, aug=0) ###Output _____no_output_____ ###Markdown Compile and Train the Model Build the Model ###Code # Initialize the model model = Sequential() # trim image to only see section with road # (top_crop, bottom_crop), (left_crop, right_crop) model.add(Cropping2D(cropping=((height_min,0), (width_min,0)), input_shape=(180,320,3))) # Preprocess incoming data, centered around zero with small standard deviation model.add(Lambda(lambda x: (x / 255.0) - 0.5)) # Nvidia model model.add(Convolution2D(24, (5, 5), activation="relu", name="conv_1", strides=(2, 2))) model.add(Convolution2D(36, (5, 5), activation="relu", name="conv_2", strides=(2, 2))) model.add(Convolution2D(48, (5, 5), activation="relu", name="conv_3", strides=(2, 2))) model.add(SpatialDropout2D(.5, dim_ordering='default')) model.add(Convolution2D(64, (3, 3), activation="relu", name="conv_4", strides=(1, 1))) model.add(Convolution2D(64, (3, 3), activation="relu", name="conv_5", strides=(1, 1))) model.add(Flatten()) model.add(Dense(1164)) model.add(Dropout(.5)) model.add(Dense(100, activation='relu')) model.add(Dropout(.5)) model.add(Dense(50, activation='relu')) model.add(Dropout(.5)) model.add(Dense(10, activation='relu')) model.add(Dropout(.5)) model.add(Dense(1)) model.compile(loss='mse', optimizer=Adam(lr=0.001), metrics=['mse','mae','mape','cosine']) # Print model sumamry model.summary() ###Output _____no_output_____ ###Markdown Setup Checkpoints ###Code # checkpoint model_path = './model' !if [ -d $model_path ]; then echo 'Directory Exists'; else mkdir $model_path; fi filepath = model_path + "/weights-improvement-{epoch:02d}-{val_loss:.2f}.hdf5" checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='auto', period=1) ###Output _____no_output_____ ###Markdown Setup Early Stopping to Prevent Overfitting ###Code # The patience parameter is the amount of epochs to check for improvement early_stop = EarlyStopping(monitor='val_loss', patience=10) ###Output _____no_output_____ ###Markdown Reduce Learning Rate When a Metric has Stopped Improving ###Code reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=5, min_lr=0.001) ###Output _____no_output_____ ###Markdown Setup Tensorboard ###Code # Clear any logs from previous runs !rm -rf ./Graph/ # Launch Tensorboard !pip install -U tensorboardcolab from tensorboardcolab import * tbc = TensorBoardColab() # Configure the Tensorboard Callback tbCallBack = TensorBoard(log_dir='./Graph', histogram_freq=1, write_graph=True, write_grads=True, write_images=True, batch_size=batch_size_value, update_freq='epoch') ###Output _____no_output_____ ###Markdown Load Existing Model ###Code load = True #@param {type:"boolean"} if load: # Returns a compiled model identical to the previous one !curl -O https://selbystorage.s3-us-west-2.amazonaws.com/research/office_2/model.h5 !mv model.h5 model/ model_path_full = model_path + '/' + 'model.h5' model = load_model(model_path_full) print("Loaded previous model: {} \n".format(model_path_full)) else: print("No previous model loaded \n") ###Output _____no_output_____ ###Markdown Train the Model ###Code # Define step sizes STEP_SIZE_TRAIN = len(train_samples) / batch_size_value STEP_SIZE_VALID = len(validation_samples) / batch_size_value # Define number of epochs n_epoch = 5 # Define callbacks # callbacks_list = [TensorBoardColabCallback(tbc)] # callbacks_list = [TensorBoardColabCallback(tbc), early_stop] # callbacks_list = [TensorBoardColabCallback(tbc), early_stop, checkpoint] callbacks_list = [TensorBoardColabCallback(tbc), early_stop, checkpoint, reduce_lr] # Fit the model history_object = model.fit_generator( generator=train_generator, steps_per_epoch=STEP_SIZE_TRAIN, validation_data=validation_generator, validation_steps=STEP_SIZE_VALID, callbacks=callbacks_list, use_multiprocessing=True, epochs=n_epoch) ###Output _____no_output_____ ###Markdown Save the Model ###Code # Save model model_path_full = model_path + '/' model.save(model_path_full + 'model.h5') with open(model_path_full + 'model.json', 'w') as output_json: output_json.write(model.to_json()) ###Output _____no_output_____ ###Markdown Evaluate the Model Plot the Training Results ###Code # Plot the training and validation loss for each epoch print('Generating loss chart...') plt.plot(history_object.history['loss']) plt.plot(history_object.history['val_loss']) plt.title('model mean squared error loss') plt.ylabel('mean squared error loss') plt.xlabel('epoch') plt.legend(['training set', 'validation set'], loc='upper right') plt.savefig(model_path + '/model.png') # Done print('Done.') ###Output _____no_output_____ ###Markdown Print Performance Metrics ###Code scores = model.evaluate_generator(validation_generator, STEP_SIZE_VALID, use_multiprocessing=True) metrics_names = model.metrics_names for i in range(len(model.metrics_names)): print("Metric: {} - {}".format(metrics_names[i],scores[i])) ###Output _____no_output_____ ###Markdown Compute Prediction Statistics ###Code # Define image loading function def load_images(dataframe): # initialize images array images = [] for i in dataframe.index.values: name = data_set + '/' + dataframe.loc[i,'filename'] center_image = cv2.imread(name) center_image = cv2.resize(center_image, (320,180)) images.append(center_image) return np.array(images) # Load images test_size = 200 df_test = df.sample(frac=1).reset_index(drop=True) df_test = df_test.head(test_size) test_images = load_images(df_test) batch_size = 32 preds = model.predict(test_images, batch_size=batch_size, verbose=1) #print("Preds: {} \n".format(preds)) testY = df_test.iloc[:,4].values #print("Labels: {} \n".format(testY)) df_testY = pd.Series(testY) df_preds = pd.Series(preds.flatten) # Replace 0 angle values if df_testY.eq(0).any(): df_testY.replace(0, 0.0001,inplace=True) # Calculate the difference diff = preds.flatten() - df_testY percentDiff = (diff / testY) * 100 absPercentDiff = np.abs(percentDiff) # compute the mean and standard deviation of the absolute percentage # difference mean = np.mean(absPercentDiff) std = np.std(absPercentDiff) print("[INFO] mean: {:.2f}%, std: {:.2f}%".format(mean, std)) # Compute the mean and standard deviation of the difference print(diff.describe()) # Plot a histogram of the prediction errors num_bins = 25 hist, bins = np.histogram(diff, num_bins) center = (bins[:-1]+ bins[1:]) * 0.5 plt.bar(center, hist, width=0.05) plt.title('Historgram of Predicted Error') plt.xlabel('Steering Angle') plt.ylabel('Number of predictions') plt.xlim(-2.0, 2.0) plt.plot(np.min(diff), np.max(diff)) # Plot a Scatter Plot of the Error plt.scatter(testY, preds) plt.xlabel('True Values ') plt.ylabel('Predictions ') plt.axis('equal') plt.axis('square') plt.xlim([-1.75,1.75]) plt.ylim([-1.75,1.75]) plt.plot([-1.75, 1.75], [-1.75, 1.75], color='k', linestyle='-', linewidth=.1) ###Output _____no_output_____ ###Markdown Plot a Prediction ###Code # Plot the image with the actual and predicted steering angle index = random.randint(0,df_test.shape[0]-1) img_name = data_set + '/' + df_test.loc[index,'filename'] center_image = cv2.imread(img_name) center_image = cv2.cvtColor(center_image,cv2.COLOR_RGB2BGR) center_image_mod = cv2.resize(center_image, (320,180)) #resize from 720x1280 to 180x320 plt.imshow(center_image_mod) plt.grid(False) plt.xlabel('Actual: {:.2f} Predicted: {:.2f}'.format(df_test.loc[index,'angle'],float(preds[index]))) plt.show() ###Output _____no_output_____ ###Markdown Visualize the Network Show the Model Summary ###Code model.summary() ###Output _____no_output_____ ###Markdown Access Individual Layers ###Code # Creating a mapping of layer name ot layer details # We will create a dictionary layers_info which maps a layer name to its charcteristics layers_info = {} for i in model.layers: layers_info[i.name] = i.get_config() # Here the layer_weights dictionary will map every layer_name to its corresponding weights layer_weights = {} for i in model.layers: layer_weights[i.name] = i.get_weights() pprint.pprint(layers_info['conv_5']) ###Output _____no_output_____ ###Markdown Visualize the filters ###Code # Visualize the first filter of each convolution layer layers = model.layers layer_ids = [2,3,4,6,7] #plot the filters fig,ax = plt.subplots(nrows=1,ncols=5) for i in range(5): ax[i].imshow(layers[layer_ids[i]].get_weights()[0][:,:,:,0][:,:,0],cmap='gray') ax[i].set_title('Conv'+str(i+1)) ax[i].set_xticks([]) ax[i].set_yticks([]) ###Output _____no_output_____ ###Markdown Visualize the Saliency Map ###Code !pip install -I scipy==1.2.* !pip install git+https://github.com/raghakot/keras-vis.git -U # import specific functions from keras-vis package from vis.utils import utils from vis.visualization import visualize_saliency, visualize_cam, overlay # View a Single Image index = random.randint(0,df.shape[0]-1) img_name = data_set + '/' + df.loc[index,'filename'] sample_image = cv2.imread(img_name) sample_image = cv2.cvtColor(sample_image,cv2.COLOR_RGB2BGR) sample_image_mod = cv2.resize(sample_image, (320,180)) plt.imshow(sample_image_mod) layer_idx = utils.find_layer_idx(model, 'conv_5') grads = visualize_saliency(model, layer_idx, filter_indices=None, seed_input=sample_image_mod, grad_modifier='absolute', backprop_modifier='guided') plt.imshow(grads, alpha = 0.6) ###Output _____no_output_____
lectures-labs/labs/03_neural_recsys/Short_Intro_to_Embeddings_with_Keras.ipynb
###Markdown Categorical EmbeddingsWe will use the embeddings through the whole lab. They are simply represented by a matrix of tunable parameters (weights).Let us assume that we are given a pre-trained embedding matrix for an vocabulary of size 10. Each embedding vector in that matrix has dimension 4. Those dimensions are too small to be realistic and are only used for demonstration purposes: ###Code import numpy as np embedding_size = 4 vocab_size = 10 embedding_matrix = np.arange(embedding_size * vocab_size, dtype='float32') embedding_matrix = embedding_matrix.reshape(vocab_size, embedding_size) print(embedding_matrix) ###Output _____no_output_____ ###Markdown To access the embedding for a given integer (ordinal) symbol $i$, you may either: - simply index (slice) the embedding matrix by $i$, using numpy integer indexing: ###Code i = 3 print(embedding_matrix[i]) ###Output _____no_output_____ ###Markdown - compute a one-hot encoding vector $\mathbf{v}$ of $i$, then compute a dot product with the embedding matrix: ###Code def onehot_encode(dim, label): return np.eye(dim)[label] onehot_i = onehot_encode(vocab_size, i) print(onehot_i) embedding_vector = np.dot(onehot_i, embedding_matrix) print(embedding_vector) ###Output _____no_output_____ ###Markdown The Embedding layer in KerasIn Keras, embeddings have an extra parameter, `input_length` which is typically used when having a sequence of symbols as input (think sequence of words). In our case, the length will always be 1.```pyEmbedding(output_dim=embedding_size, input_dim=vocab_size, input_length=sequence_length, name='my_embedding')```furthermore, we load the fixed weights from the previous matrix instead of using a random initialization:```pyEmbedding(output_dim=embedding_size, input_dim=vocab_size, weights=[embedding_matrix], input_length=sequence_length, name='my_embedding')``` ###Code from keras.layers import Embedding embedding_layer = Embedding( output_dim=embedding_size, input_dim=vocab_size, weights=[embedding_matrix], input_length=1, name='my_embedding') ###Output _____no_output_____ ###Markdown Let's use it as part of a Keras model: ###Code from keras.layers import Input from keras.models import Model x = Input(shape=[1], name='input') embedding = embedding_layer(x) model = Model(inputs=x, outputs=embedding) ###Output _____no_output_____ ###Markdown The output of an embedding layer is then a 3-d tensor of shape `(batch_size, sequence_length, embedding_size)`. ###Code model.output_shape ###Output _____no_output_____ ###Markdown `None` is a marker for dynamic dimensions.The embedding weights can be retrieved as model parameters: ###Code model.get_weights() ###Output _____no_output_____ ###Markdown The `model.summary()` method gives the list of trainable parameters per layer in the model: ###Code model.summary() ###Output _____no_output_____ ###Markdown We can use the `predict` method of the Keras embedding model to project a single integer label into the matching embedding vector: ###Code labels_to_encode = np.array([[3]]) model.predict(labels_to_encode) ###Output _____no_output_____ ###Markdown Let's do the same for a batch of integers: ###Code labels_to_encode = np.array([[3], [3], [0], [9]]) model.predict(labels_to_encode) ###Output _____no_output_____ ###Markdown The output of an embedding layer is then a 3-d tensor of shape `(batch_size, sequence_length, embedding_size)`.To remove the sequence dimension, useless in our case, we use the `Flatten()` layer ###Code from keras.layers import Flatten x = Input(shape=[1], name='input') y = Flatten()(embedding_layer(x)) model2 = Model(inputs=x, outputs=y) model2.output_shape model2.predict(np.array([3])) ###Output _____no_output_____ ###Markdown **Question** how many trainable parameters does `model2` have? Check your answer with `model2.summary()`. Note that we re-used the same `embedding_layer` instance in both `model` and `model2`: therefore **the two models share exactly the same weights in memory**: ###Code model2.set_weights([np.ones(shape=(vocab_size, embedding_size))]) labels_to_encode = np.array([[3]]) model2.predict(labels_to_encode) model.predict(labels_to_encode) ###Output _____no_output_____ ###Markdown **Home assignment**:The previous model definitions used the [function API of Keras](https://keras.io/getting-started/functional-api-guide/). Because the embedding and flatten layers are just stacked one after the other it is possible to instead use the [Sequential model API](https://keras.io/getting-started/sequential-model-guide/).Defined a third model named `model3` using the sequential API and that also reuses the same embedding layer to share parameters with `model` and `model2`. ###Code from keras.models import Sequential # TODO model3 = None # print(model3.predict(labels_to_encode)) # %load solutions/embeddings_sequential_model.py ###Output _____no_output_____
Chapter02_Feature_Understanding/Whitehouse Visitor Log Dataset.ipynb
###Markdown Whitehouse Visitor Log Dataset ###Code # import packages we need for exploratory data analysis (EDA) import pandas as pd # to store tabular data import numpy as np # to do some math import matplotlib.pyplot as plt # a popular data visualization tool import seaborn as sns # another popular data visualization tool %matplotlib inline plt.style.use('fivethirtyeight') # a popular data visualization theme # load in the data set whitehouse = pd.read_csv('/Users/divyasusarla/Downloads/whitehouse_waves-2016_12.csv') whitehouse.info() whitehouse.isnull().sum() whitehouse.dropna(axis=1, inplace=True) whitehouse.head() whitehouse['APPT_START_DATE'] = pd.to_datetime(whitehouse['APPT_START_DATE']) whitehouse['APPT_END_DATE'] = pd.to_datetime(whitehouse['APPT_END_DATE']) whitehouse['APPT_DURATION'] = whitehouse['APPT_END_DATE'] - whitehouse['APPT_START_DATE'] np.mean(whitehouse['APPT_DURATION']) ###Output _____no_output_____
Emotion_Tracker.ipynb
###Markdown __Disclaimer__: Welcome to the Emotion Tracker developed in Collaboration with Prof. Peter Gloor and the support from Josephine Van Delden. This is an alpha version of the upcoming web service. ###Code #@title # Step 0: Upload Video # Imports from google.colab import files import ipywidgets as widgets from IPython.display import HTML from IPython.display import display from IPython.display import clear_output from datetime import datetime import cv2 import numpy as np import pandas as pd import subprocess import io import os import base64 import librosa import librosa.display import altair as alt from scipy.signal import chirp, find_peaks, peak_widths import matplotlib.pyplot as plt from google.colab.patches import cv2_imshow !curl -o happy.png https://i.imgur.com/PXpWO5C.png !curl -o sad.png https://i.imgur.com/mAYh4Qt.png uploaded = files.upload() for fn in uploaded.keys(): print('Successfully uploaded file "{NAME_OF_UPLOADED_VIDEO}".'.format( NAME_OF_UPLOADED_VIDEO=fn,)) NAME_OF_UPLOADED_VIDEO = 'paprika.mov' # NAME_OF_UPLOADED_VIDEO = '10s_silence.mov' # NAME_OF_UPLOADED_VIDEO = 'hyper_tests.avi' NAME_OF_UPLOADED_VIDEO = 'paprika_lq.mov' #@title # Step 1: Choose Hyperparameters # Clean up old files save new ones. try: os.remove("pls_delete.mp4") os.remove("pls_delete.mpeg") os.remove("pls_delete.wav") os.remove("pls_delete_play.mp4") except FileNotFoundError: print("Files already deleted.") else: print("Files removed.") INPUT_VIDEO = NAME_OF_UPLOADED_VIDEO # Constants INPUT_VIDEO_NAME = 'pls_delete' OUTPUT_VIDEO_NAME_MPEG = 'pls_delete.mpeg' OUTPUT_VIDEO_NAME_MP4 = '{}.mp4'.format(INPUT_VIDEO_NAME) OUTPUT_VIDEO_NAME_WAV = '{}.wav'.format(INPUT_VIDEO_NAME) FRAMES_PER_SECOND = 30 # Params Interesting_Points = 2#@param {type:"integer"} NO_OF_ROIS = (Interesting_Points-1) Sound_Features = 4#@param {type:"integer"} NO_OF_MFCC = Sound_Features Shrink_Regions_by_px = 10#@param {type:"integer"} ERODE_1 = Shrink_Regions_by_px ERODE_2 = ERODE_1 # Dilate: Makes intersting regions grow. Grow_Regions_by_px = 20#@param {type:"integer"} DILATE_1 = Grow_Regions_by_px DILATE_2 = DILATE_1 Lumen_Threshold = 127#@param {type:"integer"} MASK_THRESH = Lumen_Threshold Countour_Approximation = 60#@param {type:"integer"} CONTOUR_AREA = Countour_Approximation OPENCV_MAJOR_VERSION = int(cv2.__version__.split('.')[0]) class Leaf(): def __init__(self, id, hsv_frame, track_window): self.id = id self.track_window = track_window self.term_crit = \ (cv2.TERM_CRITERIA_COUNT | cv2.TERM_CRITERIA_EPS, 10, 1) # Initialize the histogram. self.x, self.y, self.w, self.h = track_window roi = hsv_frame[y:y+h, x:x+w] roi_hist = cv2.calcHist([roi], [0], None, [16], [0, 180]) self.roi_hist = cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX) # Initialize the Kalman filter. self.kalman = cv2.KalmanFilter(4, 2) self.kalman.measurementMatrix = np.array( [[1, 0, 0, 0], [0, 1, 0, 0]], np.float32) self.kalman.transitionMatrix = np.array( [[1, 0, 1, 0], [0, 1, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1]], np.float32) self.kalman.processNoiseCov = np.array( [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], np.float32) * 0.03 cx = x+w/2 cy = y+h/2 self.kalman.statePre = np.array( [[cx], [cy], [0], [0]], np.float32) self.kalman.statePost = np.array( [[cx], [cy], [0], [0]], np.float32) def get_plant_values(self): return np.array([ self.id, self.x, self.y, self.w, self.h ]) def update(self, frame, hsv_frame): back_proj = cv2.calcBackProject( [hsv_frame], [0], self.roi_hist, [0, 180], 1) ret, self.track_window = cv2.meanShift( back_proj, self.track_window, self.term_crit) x, y, w, h = self.track_window center = np.array([x+w/2, y+h/2], np.float32) prediction = self.kalman.predict() estimate = self.kalman.correct(center) center_offset = estimate[:,0][:2] - center self.track_window = (x + int(center_offset[0]), y + int(center_offset[1]), w, h) x, y, w, h = self.track_window # Draw the predicted center position as a blue circle. cv2.circle(frame, (int(prediction[0]), int(prediction[1])), 4, (255, 0, 0), -1) # Draw the corrected tracking window as a cyan rectangle. cv2.rectangle(frame, (x,y), (x+w, y+h), (255, 255, 0), 2) # Draw the ID above the rectangle in blue text. cv2.putText(frame, 'ID: %d' % self.id, (x, y-5), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 0), 1, cv2.LINE_AA) #@title # Step 2: Feature Engineering print("Starting time: {}".format(datetime.now())) # Create a VideoCapture object import time start_time = time.time() cap = cv2.VideoCapture(INPUT_VIDEO) arr = [] no_of_frame = 0 # Default resolutions of the frame are obtained and casted to int. frame_width = int(cap.get(3)) frame_height = int(cap.get(4)) # Create the KNN background subtractor. bg_subtractor = cv2.createBackgroundSubtractorKNN() history_length = 20 bg_subtractor.setHistory(history_length) erode_kernel = cv2.getStructuringElement( cv2.MORPH_ELLIPSE, (ERODE_1, ERODE_2)) dilate_kernel = cv2.getStructuringElement( cv2.MORPH_ELLIPSE, (DILATE_1, DILATE_2)) leafs = [] num_history_frames_populated = 0 fourcc = cv2.VideoWriter_fourcc('M','P','E','G') out = cv2.VideoWriter(OUTPUT_VIDEO_NAME_MPEG, fourcc, FRAMES_PER_SECOND, (frame_width,frame_height)) grabbed_frames = 0 while True: grabbed, frame = cap.read() grabbed_frames += 1 # print(grabbed_frames) if (grabbed is False): break no_of_frame = no_of_frame + 1 # Apply the KNN background subtractor. fg_mask = bg_subtractor.apply(frame) # Let the background subtractor build up a history. if num_history_frames_populated < history_length: num_history_frames_populated += 1 continue # resources: https://docs.opencv.org/3.4/d7/d4d/tutorial_py_thresholding.html # Try this out: # th3 = cv.adaptiveThreshold(img,255,cv.ADAPTIVE_THRESH_GAUSSIAN_C,\ # cv.THRESH_BINARY,11,2) # Create the thresholded image. # MASK-Threshold _, thresh = cv2.threshold(fg_mask, MASK_THRESH, 255, cv2.THRESH_BINARY) cv2.erode(thresh, erode_kernel, thresh, iterations=2) cv2.dilate(thresh, dilate_kernel, thresh, iterations=2) # Detect contours in the thresholded image. if OPENCV_MAJOR_VERSION >= 4: # OpenCV 4 or a later version is being used. contours, hier = cv2.findContours( thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) else: _, contours, hier = cv2.findContours( thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # Draw green rectangles around large contours. # Also, if no leafs are being tracked yet, create some. should_initialize_leafs = len(leafs) == 0 id = 0 for c in contours: if cv2.contourArea(c) > CONTOUR_AREA: (x, y, w, h) = cv2.boundingRect(c) cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 1) if should_initialize_leafs: leafs.append( Leaf(id, hsv_frame, (x, y, w, h))) frame_no_and_values = np.array([ id, x, y, w, h ]) a = np.append(frame_no_and_values, [no_of_frame]) arr.append(a) id += 1 # Update the tracking of each leaf. for leaf in leafs: leaf.update(frame, hsv_frame) frame_no_and_values = np.array([ id, x, y, w, h ]) a = np.append(frame_no_and_values, [no_of_frame]) arr.append(a) out.write(frame) cap.release() out.release() cv2.destroyAllWindows() motion_array = np.array(arr) print("Processing took", round(((time.time() - start_time)/60), 2), "minutes.") start_time2 = time.time() # # To Do: shorten videos to maximal 20s, so that you can watch all of them. # subprocess.run('ffmpeg -i {} {}'.format(OUTPUT_VIDEO_NAME_MPEG, OUTPUT_VIDEO_NAME_MP4), shell=True) # from moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip # ffmpeg_extract_subclip('pls_delete.mp4', 0, 10, targetname="pls_delete_play.mp4") # print("Conversion to mp4 and creating preview took", round(((time.time() - start_time2)/60), 2), "minutes.") # @title #Step 3: Preview the Video def playvideo(filename): video = io.open(filename, 'r+b').read() encoded = base64.b64encode(video) return HTML(data='''<video width="80%" style="display:block; margin:0 auto;" alt="test" controls> <source src="data:video/mp4;base64,{0}" type="video/mp4"/> </video>'''.format(encoded.decode('ascii'))) playvideo('pls_delete_play.mp4') # @title # Step 4: Data Visualizations # ____ VIDEO PROCESSING ____ # This is the output NumPy Array from the video processing data = motion_array # Create a Pandas DataFrame from NumPy arrays with UniqueID, x- & y-Value of ROI (i.e., Region of Interest), as well as width and height of the accompanying moving windows. # Frame refers to video frame (30 fps) on which point was tracked. # RESET DF df_video = pd.DataFrame(data=data[0:,0:], index=[i for i in range(data.shape[0])], columns=['ID','X','Y','W','H','Frame']) # Calculate Elapsed Time for Frames of Video df_video["Elapsed"] = round(df_video["Frame"]/30, 1) # Remove Duplicates df_video_wo_dups = df_video.drop_duplicates(subset=None, keep='first', inplace=False) df_video_queried = df_video_wo_dups.query('0 <= ID <= {}'.format(NO_OF_ROIS)) # df_video_4_cols = df_video_queried.filter(items=['ID', 'X', 'Y', 'Elapsed']) df_video_3_cols = df_video_queried.filter(items=['ID', 'Y', 'Elapsed']) # Minding that Matplotlib inverts x/y Values # df_inverted_x_y_minus_1 = df_video_4_cols.sub([0, frame_width, frame_height, 0], axis='columns') df_inverted_x_y_minus_1 = df_video_3_cols.sub([0, frame_height, 0], axis='columns') # df_inverted_x_y = df_inverted_x_y_minus_1.mul([1, -1, -1, 1], axis='columns') df_inverted_x_y = df_inverted_x_y_minus_1.mul([1, -1, 1], axis='columns') df_video = df_inverted_x_y # Use Pivot Feature of Pandas DataFrame # df_pivoted_video = pd.pivot_table(df_video, values=["X", "Y"], index=pd.Grouper(key='Elapsed'), columns=["ID"],) df_pivoted_video = pd.pivot_table(df_video, values=["Y"], index=pd.Grouper(key='Elapsed'), columns=["ID"],) # Create flexible number of indices based on number of Regions of Interest int(df_video_queried.max(axis = 0)[0]); counter = 0 # x_indices = [] y_indices = [] while counter <= int(df_video_queried.max(axis = 0)[0]): # x_indices.append(('X' + str(counter))); y_indices.append(('Y' + str(counter))) counter = counter + 1 df_pivoted_video.columns = y_indices # + x_indices # ____AUDIO PROCESSING____ # Extracts Audio from Input Video command = "ffmpeg -i {} -ab 160k -ac 2 -ar 44100 -vn {}".format(INPUT_VIDEO, OUTPUT_VIDEO_NAME_WAV) subprocess.call(command, shell=True) # Unpack Audio audio_path = INPUT_VIDEO x , sr = librosa.load(audio_path) # Get the MFCCs (and choose the number of features to extract); manipulate Pandas DataFrame. mfccs = librosa.feature.mfcc(x, sr=sr, n_mfcc=NO_OF_MFCC) audio_data = np.transpose(mfccs) # Calculate the step length between a mfcc and the audio in seconds window_length = 512 # default value by mfcc algorithm length = (round((window_length/sr)*mfccs.shape[1]), 2) step_size = length[0]/mfccs.shape[1] # Getting Elapsed Time of Audio from Audiofile audio_time_data = np.arange(0, (length[0]), step_size) audio_time_data = np.round(audio_time_data, 1) df_audio_time_steps = pd.DataFrame(data=audio_time_data, index=[i for i in range(audio_time_data.shape[0])], columns=['Elapsed_t']) # Create the Audio Pandas DataFrame df_audio = pd.DataFrame(data=audio_data, index=[i for i in range(audio_data.shape[0])], columns=['MFCC'+str(i) for i in range(audio_data.shape[1])]) df_audio_w_time = pd.concat([df_audio_time_steps, df_audio], axis=1, join='outer') df_audio_w_time_w_o_dups = df_audio_w_time.drop_duplicates(subset='Elapsed_t', keep='first') df_audio_w_time_w_o_dups ## Merge AUDIO and VIDEO # df_merged = pd.concat([df_audio_w_time_w_o_dups, df_pivoted_video], axis = 1, join= 'outer') df_merged = pd.merge(df_audio_w_time_w_o_dups, df_pivoted_video, how='left', left_on=['Elapsed_t'], right_on=['Elapsed']) # add 'indicator=True' to see functionality df_bf_filled = df_merged.fillna(method='bfill') # backward filling (NANs filling up) df_filled = df_bf_filled.fillna(method='ffill') # forward filling (NANs filling down) # transform from wide to long-form data # https://altair-viz.github.io/user_guide/data.html df_filled_every_two_seconds = df_filled.iloc[::20] df_filled_melted = df_filled_every_two_seconds.melt('Elapsed_t', var_name='Variables', value_name='Values') alt.Chart(df_filled_melted).mark_line().encode( x='Elapsed_t', y='Values', color='Variables' ).properties( width=900, height=400, ).interactive(bind_y=False) # @title # Step 5: Happiness Indicator total_happiness_counter = 0 per_curve_happiness_counter = 0 img_happy = cv2.imread('happy.png', cv2.IMREAD_UNCHANGED) img_sad = cv2.imread('sad.png', cv2.IMREAD_UNCHANGED) # obviously one cannot assume that the height is fixed HEIGHT = 100 # HEIGHT = (max(x[peaks])/2 ) # To Do: has to be a regex and start with y(i) values for tracked_move in df_filled.columns[5:]: x = df_filled[tracked_move] # try without height bc its an absolute value peaks, _ = find_peaks(x, HEIGHT, distance=50) per_curve_happiness_counter = 0 per_curve_happiness_indicators = len(x[peaks]) happiness_threshold = (max(x[peaks]) + min(x[peaks]))/2 for value in x[peaks]: if value > happiness_threshold: per_curve_happiness_counter = per_curve_happiness_counter + 1 total_happiness_counter = total_happiness_counter + 1 if per_curve_happiness_counter > (per_curve_happiness_indicators/2): print("This plant observed that the speaker is >>> happy <<<.") cv2_imshow(img_happy) else: print("This plant observed that the speaker is >>> sad <<<.") cv2_imshow(img_sad) plt.plot(x) plt.plot(peaks, x[peaks], "x") plt.show() ###Output This plant observed that the speaker is >>> happy <<<. ###Markdown Additional Features ###Code # @title ## Dummy Sad Plant img_happy = cv2.imread('happy.png', cv2.IMREAD_UNCHANGED) img_sad = cv2.imread('sad.png', cv2.IMREAD_UNCHANGED) print("This plant observed that the speaker is >>> sad <<<.") cv2_imshow(img_sad) # @title ## Camera Capturing [in Process] from IPython.display import display, Javascript from google.colab.output import eval_js from base64 import b64decode def take_photo(filename='photo.jpg', quality=0.8): js = Javascript(''' async function takePhoto(quality) { const div = document.createElement('div'); const capture = document.createElement('button'); capture.textContent = 'Capture'; div.appendChild(capture); const video = document.createElement('video'); video.style.display = 'block'; const stream = await navigator.mediaDevices.getUserMedia({video: true}); document.body.appendChild(div); div.appendChild(video); video.srcObject = stream; await video.play(); // Resize the output to fit the video element. google.colab.output.setIframeHeight(document.documentElement.scrollHeight, true); // Wait for Capture to be clicked. await new Promise((resolve) => capture.onclick = resolve); const canvas = document.createElement('canvas'); canvas.width = video.videoWidth; canvas.height = video.videoHeight; canvas.getContext('2d').drawImage(video, 0, 0); stream.getVideoTracks()[0].stop(); div.remove(); return canvas.toDataURL('image/jpeg', quality); } ''') display(js) data = eval_js('takePhoto({})'.format(quality)) binary = b64decode(data.split(',')[1]) with open(filename, 'wb') as f: f.write(binary) return filename from IPython.display import Image try: filename = take_photo() print('Saved to {}'.format(filename)) # Show the image which was just taken. display(Image(filename)) except Exception as err: # Errors will be thrown if the user does not have a webcam or if they do not # grant the page permission to access it. print(str(err)) # @title ## Export to Excel df_filled.to_excel('11_min_video.xlsx') df_filled_melted.to_excel('11_min_video_melted.xlsx') # @title ## Interactive Scatter Plot # df_filled # alt.Chart(df_filled_melted).mark_circle().encode( # alt.X(alt.repeat("column"), type='quantitative'), # alt.Y(alt.repeat("row"), type='quantitative'), # ).properties( # width=150, # height=150 # ).repeat( # row=['Elapsed_t', 'Variables', 'Values'], # column=['Variables', 'Elapsed_t', 'Values'] # ).interactive() # @title ## Check for Movie Duration import moviepy.editor # Converts into more readable format def convert(seconds): hours = seconds // 3600 seconds %= 3600 mins = seconds // 60 seconds %= 60 return hours, mins, seconds # Duration of Input Video video = moviepy.editor.VideoFileClip(INPUT_VIDEO) video_duration = float(video.duration) hours, mins, secs = convert(video_duration) print("The Video named {} Seconds:".format(INPUT_VIDEO), secs) # Duration of Resulting Video video2 = moviepy.editor.VideoFileClip(OUTPUT_VIDEO_NAME_MP4) video_duration2 = float(video2.duration) hours, mins, secs = convert(video_duration2) print("The Video named {} Seconds:".format(OUTPUT_VIDEO_NAME_MP4), secs) # Cross-check Length of Video and Audio print("Length of Audio in Seconds: ", (512/sr)*mfccs.shape[1]) # @title ## Getting FPS of Video #!/usr/bin/env python import cv2 import time if __name__ == '__main__' : # Start default camera video = cv2.VideoCapture(INPUT_VIDEO) fps = video.get(cv2.CAP_PROP_FPS) print("Frames per second using video.get(cv2.CAP_PROP_FPS) : {0}".format(fps)) # Release video video.release() #@title ## Download Video via GitHub # Imports from google.colab import files import ipywidgets as widgets from IPython.display import HTML from IPython.display import display from IPython.display import clear_output from datetime import datetime import cv2 import numpy as np import pandas as pd import subprocess import io import os import base64 import librosa import librosa.display import altair as alt from scipy.signal import chirp, find_peaks, peak_widths import matplotlib.pyplot as plt from google.colab.patches import cv2_imshow !curl -o happy.png https://i.imgur.com/PXpWO5Cs.png !curl -o sad.png https://i.imgur.com/mAYh4Qts.png !curl -o paprika_lq.mov https://github.com/plantions/published/blob/master/paprika_lq.mov !curl -o paprika.mov https://github.com/plantions/published/blob/master/paprika.mov # uploaded = files.upload() # for fn in uploaded.keys(): # print('Successfully uploaded file "{NAME_OF_UPLOADED_VIDEO}".'.format( # NAME_OF_UPLOADED_VIDEO=fn,)) # NAME_OF_UPLOADED_VIDEO = fn # # NAME_OF_UPLOADED_VIDEO = '10s_silence.mov' # # NAME_OF_UPLOADED_VIDEO = 'hyper_tests.avi' ###Output % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 2850 100 2850 0 0 31666 0 --:--:-- --:--:-- --:--:-- 32022 % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 2326 100 2326 0 0 26431 0 --:--:-- --:--:-- --:--:-- 26134 % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 70704 0 70704 0 0 206k 0 --:--:-- --:--:-- --:--:-- 206k % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 70648 0 70648 0 0 227k 0 --:--:-- --:--:-- --:--:-- 226k
Project_1_Facial_Keypoints/4. Fun with Keypoints.ipynb
###Markdown Facial FiltersUsing your trained facial keypoint detector, you can now do things like add filters to a person's face, automatically. In this optional notebook, you can play around with adding sunglasses to detected face's in an image by using the keypoints detected around a person's eyes. Checkout the `images/` directory to see what pther .png's have been provided for you to try, too!Let's start this process by looking at a sunglasses .png that we'll be working with! ###Code # import necessary resources import matplotlib.image as mpimg import matplotlib.pyplot as plt import numpy as np import pandas as pd import os import cv2 # load in sunglasses image with cv2 and IMREAD_UNCHANGED sunglasses = cv2.imread('images/sunglasses.png', cv2.IMREAD_UNCHANGED) # plot our image plt.imshow(sunglasses) # print out its dimensions print('Image shape: ', sunglasses.shape) ###Output Image shape: (1123, 3064, 4) ###Markdown The 4th dimensionYou'll note that this image actually has *4 color channels*, not just 3 as your avg RGB image does. This is due to the flag we set `cv2.IMREAD_UNCHANGED`, which tells this to read in another color channel. Alpha channelIt has the usual red, blue, and green channels any color image has, and the 4th channel respresents the **transparency level of each pixel** in the image; this is often called the **alpha** channel. Here's how the transparency channel works: the lower the value, the more transparent, or see-through, the pixel will become. The lower bound (completely transparent) is zero here, so any pixels set to 0 will not be seen; these look like white background pixels in the image above, but they are actually totally transparent. This transparent channel allows us to place this rectangular image of sunglasses on an image of a face and still see the face area that is techically covered by the transparentbackground of the sunglasses image!Let's check out the alpha channel of our sunglasses image in the next Python cell. Because many of the pixels in the background of the image have an alpha value of 0, we'll need to explicitly print out non-zero values if we want to see them. ###Code # print out the sunglasses transparency (alpha) channel alpha_channel = sunglasses[:,:,3] print ('The alpha channel looks like this (black pixels = transparent): ') plt.imshow(alpha_channel, cmap='gray') # just to double check that there are indeed non-zero values # let's find and print out every value greater than zero values = np.where(alpha_channel != 0) print ('The non-zero values of the alpha channel are: ') print (values) ###Output The non-zero values of the alpha channel are: (array([ 17, 17, 17, ..., 1109, 1109, 1109]), array([ 687, 688, 689, ..., 2376, 2377, 2378])) ###Markdown Overlaying imagesThis means that when we place this sunglasses image on top of another image, we can use the transparency channel as a filter:* If the pixels are non-transparent (alpha_channel > 0), overlay them on the new image Keypoint locationsIn doing this, it's helpful to understand which keypoint belongs to the eyes, mouth, etc., so in the image below we also print the index of each facial keypoint directly on the image so you can tell which keypoints are for the eyes, eyebrows, etc.,It may be useful to use keypoints that correspond to the edges of the face to define the width of the sunglasses, and the locations of the eyes to define the placement.Next, we'll load in an example image. Below, you've been given an image and set of keypoints from the provided training set of data, but you can use your own CNN model to generate keypoints for *any* image of a face (as in Notebook 3) and go through the same overlay process! ###Code # load in the data if you have not already! # otherwise, you may comment out this cell # -- DO NOT CHANGE THIS CELL -- # !mkdir /data !wget -P /data/ https://s3.amazonaws.com/video.udacity-data.com/topher/2018/May/5aea1b91_train-test-data/train-test-data.zip !unzip -n /data/train-test-data.zip -d /data # load in training data key_pts_frame = pd.read_csv('/data/training_frames_keypoints.csv') # print out some stats about the data print('Number of images: ', key_pts_frame.shape[0]) # helper function to display keypoints def show_keypoints(image, key_pts): """Show image with keypoints""" plt.imshow(image) plt.scatter(key_pts[:, 0], key_pts[:, 1], s=20, marker='.', c='m') # a selected image n = 120 image_name = key_pts_frame.iloc[n, 0] image = mpimg.imread(os.path.join('/data/training/', image_name)) key_pts = key_pts_frame.iloc[n, 1:].as_matrix() key_pts = key_pts.astype('float').reshape(-1, 2) print('Image name: ', image_name) plt.figure(figsize=(5, 5)) show_keypoints(image, key_pts) plt.show() ###Output /opt/conda/lib/python3.6/site-packages/ipykernel_launcher.py:5: FutureWarning: Method .as_matrix will be removed in a future version. Use .values instead. """ ###Markdown Next, you'll see an example of placing sunglasses on the person in the loaded image.Note that the keypoints are numbered off-by-one in the numbered image above, and so `key_pts[0,:]` corresponds to the first point (1) in the labelled image. ###Code # Display sunglasses on top of the image in the appropriate place # copy of the face image for overlay image_copy = np.copy(image) # top-left location for sunglasses to go # 17 = edge of left eyebrow x = int(key_pts[17, 0]) y = int(key_pts[17, 1]) # height and width of sunglasses # h = length of nose h = int(abs(key_pts[27,1] - key_pts[34,1])) # w = left to right eyebrow edges w = int(abs(key_pts[17,0] - key_pts[26,0])) # read in sunglasses sunglasses = cv2.imread('images/sunglasses.png', cv2.IMREAD_UNCHANGED) # resize sunglasses new_sunglasses = cv2.resize(sunglasses, (w, h), interpolation = cv2.INTER_CUBIC) # get region of interest on the face to change roi_color = image_copy[y:y+h,x:x+w] # find all non-transparent pts ind = np.argwhere(new_sunglasses[:,:,3] > 0) # for each non-transparent point, replace the original image pixel with that of the new_sunglasses for i in range(3): roi_color[ind[:,0],ind[:,1],i] = new_sunglasses[ind[:,0],ind[:,1],i] # set the area of the image to the changed region with sunglasses image_copy[y:y+h,x:x+w] = roi_color # display the result! plt.imshow(image_copy) ###Output _____no_output_____
gee_change_detectoin.ipynb
###Markdown Rapid-DSM - Google Earth Engine Change Detection Import Python Libraries ###Code %load_ext autoreload %autoreload 2 import os import folium import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import json from IPython.display import Image import gee_helpers as geeh import geopandas as gpd from shapely.geometry import shape ###Output The autoreload extension is already loaded. To reload it, use: %reload_ext autoreload ###Markdown Authenticate Goolge Features Google Earth Engine Cli ###Code !earthengine authenticate ###Output _____no_output_____ ###Markdown Google Earth Engine for Python ###Code # Import the Earth Engine library. import ee # Trigger the authentication flow. ee.Authenticate() ee.Initialize() ###Output _____no_output_____ ###Markdown Default Configuration Goolge Cloud Storage ###Code ## Google Drive Save Location figures_save_location = "/output" from google.cloud import storage storage_client = storage.Client(project=project_id) ###Output _____no_output_____ ###Markdown Get AOI for Wake CountyUsing open data portal api AOI: Walnut Creek Subwatershed ###Code # Data from Google Earth Engine # HUC12: USGS Watershed Boundary Dataset of Subwatersheds subwatershed = ee.FeatureCollection("USGS/WBD/2017/HUC12") aoi = (subwatershed.filter( ee.Filter.And( ee.Filter.eq("huc12", "030202011101"), ee.Filter.eq("states", "NC"), ee.Filter.eq("name", "Walnut Creek") ) )) url = aoi.getDownloadUrl(filetype="GEOJSON") df_aoi = gpd.read_file(url) # df_aoi.plot() df_envelope = gpd.GeoDataFrame(geometry=gpd.GeoSeries(df_aoi['geometry'].envelope)) df_envelope.plot() print(df_envelope.area) aoi = df_envelope.to_file("aoi.json", driver='GeoJSON') #make shapefile for GEE later df_envelope.to_file("aoi") with open("aoi.json", "r") as f: aoi_json = json.loads(f.read()) print(aoi_json['features'][0]['geometry']) os.environ['AOI_STRING'] = AOI_STRING = str(aoi_json['features'][0]['geometry']) ###Output _____no_output_____ ###Markdown Change Detection Land Cover Land Change Value Mappings ###Code # Maps OSM key values to localized schema land_covers = { 'highway_residential': "road", 'highway_motorway': "road", 'highway_trunk': "road", 'highway_primary': "road", 'highway_secondary': "road", 'highway_tertiary': "road", 'highway_unclassified': "road", 'highway_motorway_link': "road", 'highway_trunk_link': "road", 'highway_primary_link': "road", 'highway_secondary_link': "road", 'highway_tertiary_link': "road", 'parking_surface': "developed", 'surface_grass': "grass", 'landuse_grass': "grass", 'landuse_meadow': "grass", "natural_grassland": "grass", 'natural_water': "water", 'natural_wood': "forest", 'building_house': "building", 'building_residential': "building", 'building_retail': "building", 'building_public': "building" } query_keys = tuple(set([k.split('_')[0] for k in land_covers.keys()])) query_values = tuple(set([k.split('_')[1] for k in land_covers.keys()])) print(query_keys) # Map of human readable landclasses to coded values land_classes = { "road": 0, # Developed "building": 1, #Developed "barren": 2, "forest": 3, "grass": 4, #Herbaceous "water": 5, "developed": 6 } #colors from https://www.mrlc.gov/data/legends/national-land-cover-database-2011-nlcd2011-legend landcover_color_palette = [ 'E29E8C', # Class 0 - road/highways NLCD class color 22 'B50000', # Class 1 - Building dark red NLCD class color 24 'D2CDC0', # Class 2 - barren NLCD class color 31 '38814E', # Class 3 - Forest NLCD class color 42 '85C77E', # Class 4 - Grass NLCD class color 41 '5475A8', # Class 5 - water NLCD class color 11 'E8D1D1' # Class 5 - developed NLCD class color 21 ] ###Output _____no_output_____ ###Markdown Priority Change Values ###Code change_priority = { "No Change": 0, "road to building": 7, "road to barren": 4, "road to water": 0, "road to grass": 1, "road to forest": 1, "road to developed": 4, "building to road": 1, "building to barren": 7, "building to water": 0, "building to grass": 3, "building to forest": 3, "building to developed": 5, "barren to road": 3, "barren to building": 7, "barren to water": 0, "barren to grass": 2, "barren to forest": 2, "barren to developed": 5, "water to road": 0, "water to building": 0, "water to barren": 0, "water to grass": 0, "water to forest": 0, "water to developed": 0, "grass to road": 3, "grass to building": 7, "grass to barren": 3, "grass to water":0, "grass to forest": 3, "grass to developed": 5, "forest to road": 3, "forest to building": 7, "forest to barren": 7, "forest to water": 0, "forest to grass": 3, "forest to developed": 7, "developed to road": 3, "developed to building": 7, "developed to barren": 7, "developed to water": 0, "developed to grass": 3, "developed to forest": 3 } ###Output _____no_output_____ ###Markdown Functions ###Code # Define a method for displaying Earth Engine image tiles to folium map. # Add EE drawing method to folium. folium.Map.add_ee_layer = geeh.add_ee_layer result = geeh.generateFromToExpression(land_classes) thematic_change_expression = result['expression'] tmp_change_class_data = [{"FromTo":v, "ClassID":result["labels"][v]} for v in result["labels"]] df_change_classes = pd.DataFrame(tmp_change_class_data) df_change_classes["priority"] = df_change_classes["FromTo"].map(change_priority) aoi_bbox = ee.FeatureCollection(gee_imagecollection_aoi) subwatershed = ee.FeatureCollection("USGS/WBD/2017/HUC12") aoi = (subwatershed.filter( ee.Filter.And( ee.Filter.eq("huc12", "030202011101"), ee.Filter.eq("states", "NC"), ee.Filter.eq("name", "Walnut Creek") ) )) coords = aoi.geometry().centroid().getInfo()['coordinates'] center_map = [coords[1], coords[0]] ###Output _____no_output_____ ###Markdown Download OpenStreetMap Data ###Code # Query Google Big Query to generate training data for image classification. # https://cloud.google.com/resource-manager/docs/creating-managing-projects #old tag values removing roads to see if I can fix the max #AND tags.value in ("water", "grass","meadow","grassland", "forest", "residential","secondary","tertiary","unclassified", "primary", "motorway", "trunk","motorway_link","trunk_link","primary_link","secondary_link","tertiary_link", "quarry", "farmland", "sand", "ground", "dirt", "wood", "retail","public", "construction", "surface", "house") df_landcover_samples = pd.io.gbq.read_gbq(''' SELECT feature_type,osm_timestamp, tags.key, tags.value, ST_ASGEOJSON(geometry) as geom FROM `bigquery-public-data.geo_openstreetmap.planet_features` AS feats, UNNEST(feats.all_tags) AS tags WHERE tags.key in ("building", "landuse", "highway", "water", "natural", "surface", "parking") AND tags.value in ("water", "grass","meadow","grassland", "forest", "residential","secondary", "primary", "motorway", "trunk","trunk_link","primary_link","secondary_link","tertiary_link", "quarry", "farmland", "sand", "ground", "dirt", "wood", "retail","public", "construction", "surface", "house") AND ST_INTERSECTS(feats.geometry, ST_GEOGFROMGEOJSON('{"type": "Polygon","coordinates": [[[ -78.782195642777026, 35.692691564447983 ],[ -78.531885144268472, 35.692691564447983 ],[ -78.531885144268472, 35.800084957942794 ],[ -78.782195642777026, 35.800084957942794 ],[ -78.782195642777026, 35.692691564447983 ]] ]}')) ''', project_id=project_id) #project_id is defined in config # Create dataframe column that represents the OSM key value pair df_landcover_samples["keyvalue"] = df_landcover_samples["key"] + "_" + df_landcover_samples["value"] # Map land cover and land class values to matching OSM key value combinations and drop and rows that don't match. df_landcover_samples["landcover"] = df_landcover_samples["keyvalue"].map(land_covers) df_landcover_samples["landclass"] = df_landcover_samples["landcover"].map(land_classes) df_landcover_samples = df_landcover_samples.dropna() df_landcover_samples.isnull().sum() df_null = df_landcover_samples[df_landcover_samples.isna().any(axis=1)] df_landcover_samples.to_csv('gbq_landcover.csv') assert df_null.size == 0, "DataFrame df_null should have a size of 0" df_landcover_samples.head() df_landcover_samples.describe(include='all') sns.factorplot("landcover", data=df_landcover_samples, aspect=2, kind="count", color='steelblue') plt.title('OpenStreetMap Feature Counts', fontsize=14) plt.tight_layout() plt.savefig(os.path.join(figures_save_location,"osm_count" + "_by_type"),dpi=300) ###Output _____no_output_____ ###Markdown Export Data to CSV for Backup ###Code df_landcover_samples.to_csv('gbq_landcover.csv') import geopandas as gpd from shapely.geometry import shape df_landcover_samples = gpd.read_file("data/gbq_landcover.csv") df_landcover_samples['features'] = df_landcover_samples['geom'].apply(lambda x: {"geometry": json.loads(x) , "properties": {}, "type": "Feature"}) df_samples = df_landcover_samples[(df_landcover_samples['key'] != 'highway') & (df_landcover_samples['feature_type']!= 'lines')] # base_geo = { # "type": "FeatureCollection", # "features": [] # } # base_geo['features'] = df_samples['features'].tolist() # for f in base_geo['features']: # try: # newShape = shape(f) # except: # print(f) # gdf = gpd.GeoDataFrame.from_features(base_geo['features']) # with open("data/osm_samples.geojson", "w") as f: # dump = json.dumps(base_geo) # f.write(dump) # close file # f.close() # gdf['features'] = gdf['geom'].apply(lambda x: {'geometry': shape(x) if x , 'properties': {}, 'type': 'Feature'}) df_samples['geometry'] = df_samples['geom'].apply(lambda x: shape(json.loads(x)) if x else None) # df_samples['geom'] = df_samples['geom'].apply(shape) # print(df_landcover_samples['geomerty']) # gdf2 = gpd.read_file("data/osm_samples.geojson") # gdf2.head() # df_samples = gdf.set_geometry(df_samples.geometry) # gdf2 = gpd.GeoDataFrame.from_features(gdf['features']) # gdf = gdf.set_geometry(gdf2.geometry) # gdf = gdf.set_geometry(gdf.geometry) df_samples.head() df_samples.plot("landcover", legend=True) df_samples['landcover'].unique() df_samples['landclass'] = pd.to_numeric(df_samples['landclass'], downcast="integer") df_samples['landclass'].unique() # df_samples['landclass'] = df_samples['landclass'].astype(int) df_samples[['landcover','landclass','geometry']].set_crs('epsg:4326').to_file('data/osm_samples.gpkg', driver='GPKG', layer='osm_samples') df_landcover_samples['date'] = pd.to_datetime(df_landcover_samples.osm_timestamp,format='%Y%m', errors='coerce') df_landcover_samples['month_year'] = pd.to_datetime(df_landcover_samples['date']).dt.to_period('M') df_landcover_samples['count'] = 1 df_landcover_samples.head() # df_osm_temporal = df_landcover_samples.groupby(by=["month_year",'landcover'], as_index=False).count() # df_osm_temporal = df_osm_temporal.pivot( "month_year","landcover", "count") # df_osm_temporal = df_osm_temporal.resample('1M').count().fillna(0) # ax = df_osm_temporal.plot(kind='area') # ax.set_xlabel("Year") # ax.set_ylabel("Count") # plt.savefig(os.path.join(figures_save_location,"osm_year_count" + "_by_type"),dpi=300) for index, row in df_landcover_samples.iterrows(): geojson = json.loads(row["geom"]) feature = shape(geojson) row["geometry"] = feature df_landcover_samples.head() ###Output _____no_output_____ ###Markdown Convert raw OSM data to geojson so that it can be transformed into a GEE FeatureCollection.Buffer road data to match road size ###Code """ Convert raw OSM data to geojson so that it can be transformed into a GEE FeatureCollection. """ sample_features = [] road_features = [] water_features = [] for index, row in df_landcover_samples.iterrows(): feature = {'geometry': row['geom'], 'properties': {}, 'type': 'Feature'} # geojson = json.loads(feature) # if row["key"] != "highway" and geojson["type"] != "Point" and len(geojson["coordinates"]) > 2: # geojson["type"] = "Polygon" # first_node = geojson["coordinates"][0] # node_len = len(geojson["coordinates"]) - 1 # coordinates =[geojson["coordinates"]] # geojson["coordinates"] = coordinates # feature = ee.Feature(geojson,{"landcover": row["landcover"], "landclass": row["landclass"]}) #Values deriverd from https://safety.fhwa.dot.gov/geometric/pubs/mitigationstrategies/chapter3/3_lanewidth.cfm and # https://wiki.openstreetmap.org/wiki/Highway_classes if row["key"] == "highway" and geojson["type"] == "LineString": # if row['value'] == 'residential': # feature = feature.buffer(2.7 * 1) # elif row['value'] == 'unclassifed': # feature = feature.buffer(2.7 * 1) # elif row['value'] == 'tertiary': #new # feature = feature.buffer(2.7 * 2) # elif row['value'] == 'secondary': #new # feature = feature.buffer(3.6 * 2) # elif row['value'] == 'primary': # feature = feature.buffer(3.6 * 2) # elif row['value'] == 'trunk': # feature = feature.buffer(3.6 * 4) # elif row['value'] == 'motorway': # feature = feature.buffer(3.6 * 4) # elif row['value'] == 'motorway_link': # feature = feature.buffer(3.6 * 1) # elif row['value'] == 'trunk_link': # feature = feature.buffer(3.6 * 1) # elif row['value'] == 'primary_link': # feature = feature.buffer(3.6 * 1) # elif row['value'] == 'secondary_link': # feature = feature.buffer(3.6 * 1) # elif row['value'] == 'tertiary_link': # feature = feature.buffer(3.6 * 1) road_features.append(feature) elif row["value"] == "water": water_features.append(feature) else: sample_features.append(feature) print(f"Sample Features {len(sample_features)}") print(f"Road Features {len(road_features)}") print(f"Water Features {len(water_features)}") # sample_feature_collection = ee.FeatureCollection(sample_features[0:(int(len(sample_features)/4))]) # water_feature_collection = ee.FeatureCollection(water_features) # road_feature_collection = ee.FeatureCollection(road_features) # road_class = ee.Image().byte().paint(road_feature_collection, "landclass").rename("landclass") # classes = ee.Image().byte().paint(sample_feature_collection, "landclass").rename("landclass") gdf2 = gpd.GeoDataFrame.from_features(json.loads(str(sample_features)) gdf = gdf.set_geometry(gdf2.geometry) gdf.head() ###Output _____no_output_____ ###Markdown Load Planet ImageColelction ###Code # Fetch Planet Data. start_image = '2018-01-30T15:21:54' end_image = '2019-12-31T15:38:18' start_image = "2018-06-01" end_image = "2020-08-26" # planet_scope = ee.ImageCollection('users/ctwhite/planetdata/rapid_dsm_aoi').filterDate(start_image, end_image).filterBounds(aoi) #Auto generated print("ImageCollection: ", imageCollection) planet_scope_ic = ee.ImageCollection(imageCollection) print("ImageCollection Size: ", planet_scope_ic.size().getInfo()) planet_scope = (planet_scope_ic .filterBounds(aoi) .filterDate(start_image, end_image) .filter(ee.Filter.gt('heavy_haze_percent', 0).Not()) .filter(ee.Filter.gt('light_haze_percent', 0).Not()) .filter(ee.Filter.eq('cloud_percent', 0))) print("ImageCollection Filtered Size: ", planet_scope.size().getInfo()) ###Output _____no_output_____ ###Markdown Export OSM Data ###Code myMap = folium.Map(location=center_map, zoom_start=12, height=500) planet_scope_mosaic_log = planet_scope.median().log() planet_scope_mosaic_vis_params = {"bands": ['b3','b2','b1'], 'min':5.67, 'max': 8.39} myMap.add_ee_layer(planet_scope_mosaic_log, planet_scope_mosaic_vis_params, 'PlanetScope Log Median') classes = sample_feature_collection.filterBounds(aoi).filter(ee.Filter.notNull(['landclass'])).reduceToImage(properties=["landclass"], reducer=ee.Reducer.first()).clipToCollection(aoi) road_rast = road_feature_collection.filterBounds(aoi).filter(ee.Filter.notNull(['landclass'])).reduceToImage(properties=["landclass"], reducer=ee.Reducer.first()).clipToCollection(aoi) water_class = water_feature_collection.filterBounds(aoi).filter(ee.Filter.notNull(['landclass'])).reduceToImage(properties=["landclass"], reducer=ee.Reducer.first()).clipToCollection(aoi) aoi_rast = ee.Image().byte().paint(aoi) aoi_mask_tmp = aoi_rast.eq(1) aoi_mask = aoi_mask_tmp.Not() aoi_mask = aoi_mask.mask(aoi_mask) myMap.add_ee_layer(classes, {"min": 0, "max": 6, "palette": landcover_color_palette}, 'Trainging Classes ') # myMap.add_ee_layer(road_feature_collection.draw(color= 'blue', strokeWidth= 2), {}, 'Roads') myMap.add_ee_layer(water_class.clipToCollection(aoi), {"min": 0, "max": 6, "palette": landcover_color_palette}, 'Water Classes ') myMap.add_ee_layer(aoi_mask, {"palette": ["blue"], "opacity": 0.25}, 'AOI Mask') myMap.add_child(folium.LayerControl()) display(myMap) sns.set_palette(sns.color_palette([ "#%s" % c if c is not "purple" else c for c in landcover_color_palette])) myMap = folium.Map(location=center_map, zoom_start=12, height=500) planet_scope_mosaic_log = planet_scope.median().log() planet_scope_mosaic_vis_params = {"bands": ['b3','b2','b1'], 'min':5.67, 'max': 8.39} myMap.add_ee_layer(planet_scope_mosaic_log, planet_scope_mosaic_vis_params, 'PlanetScope Log Median') classes_rast = sample_feature_collection.filter(ee.Filter.notNull(['landclass'])).reduceToImage(properties=["landclass"], reducer=ee.Reducer.first()) aoi_rast = ee.Image().byte().paint(aoi) # Removed water for sample data raster aoi_mask_tmp = aoi_rast.eq(1) aoi_mask = aoi_mask_tmp.Not() aoi_mask = aoi_mask.mask(aoi_mask) myMap.add_ee_layer(classes_rast.updateMask(aoi_mask), {"min": 0, "max": 6, "palette": landcover_color_palette}, 'Sample Features') myMap.add_ee_layer(aoi_mask, {"palette": ["blue"], "opacity": 0.25}, 'AOI Mask') myMap.add_child(folium.LayerControl()) display(myMap) #https://www.mrlc.gov/data/legends/national-land-cover-database-2016-nlcd2016-legend ###Output _____no_output_____ ###Markdown Add Bare Earth Samples Derived from NDVI and BSI ###Code ps_median = planet_scope.median().clipToCollection(aoi_bbox) bsiVisParam = {"bands":["bsi"],"min":99.543,"max":103.578,"palette":["ff3e04","ffb308","ffec08","ffffff"],'dimensions': 1000}; # Image(url=bsi.clip(aoi).getThumbUrl(bsiVisParam)) # bare_earth_bsi = bsi.gte(103).And(bsi.lte(105.5)).And(ndvi.gt(0.16)).And(ndvi.lt(0.22)).selfMask() classes_mask = ps_median.where(classes.gt(0), -9999).neq(-9999).selfMask() ndvi = createNDVI(ps_median) #.updateMask(classes_mask) bsi = createBSI(ps_median) #.updateMask(classes_mask) ndwi = createNDWI(ps_median) #.updateMask(classes_mask) dsbi = createDSBI(ps_median) #experimental index to seperate grass, forest, and impervious # ndbsvi = createBSI_NDVI_index(bsi, ndvi) bare_earth_bsi = ( bsi.gt(103.5) .And(bsi.lte(107)) .And(ndvi.gt(0.15)) .And(ndvi.lt(0.2)) # .And(ndwi.lt(0)) #Filter out water features .selfMask()) objectId = bare_earth_bsi.connectedComponents(ee.Kernel.plus(1), 128) # Compute the number of pixels in each object defined by the "labels" band. objectSize = objectId.select('labels').connectedPixelCount(128,False) # Get a pixel area image. pixelArea = ee.Image.pixelArea() # Multiply pixel area by the number of pixels in an object to calculate # the object area. The result is an image where each pixel # of an object relates the area of the object in m^2. objectArea = objectSize.multiply(pixelArea) # Map.addLayer(objectArea, null, 'objectArea'); ## Threshold the `objectArea` image to define a mask that will mask out areaMask = objectArea #.gte(5); # Update the mask of the `objectId` layer defined previously using the # minimum area mask just defined. bare_earth_samples = objectId.updateMask(classes_mask.select('b1')) #.updateMask(areaMask) bare_earth_vector = bare_earth_samples.select('labels').reduceToVectors(geometry=aoi,labelProperty='labels',scale=30 ) # center_map = [35.752265080410844,-78.63944155399622] # Create a folium map object. print(center_map) myMap = folium.Map(location=center_map, zoom_start=14, height=500) planet_scope_mosaic_log = planet_scope.median().log().clipToCollection(aoi_bbox) planet_scope_mosaic_vis_params = {"bands": ['b3','b2','b1'], 'min':5.67, 'max': 8.39} myMap.add_ee_layer(planet_scope_mosaic_log, planet_scope_mosaic_vis_params, 'PlanetScope Log Median') myMap.add_ee_layer(bsi.gt(104).And(bsi.lt(105)).selfMask(), {"palette": ["yellow"],"bands":["bsi"],"min":102.7,"max":105,"opacity": 0.9}, 'BSI') myMap.add_ee_layer(ndvi.gt(0.25).And(ndvi.lt(0.27)).selfMask(), {"palette": ["red"], "opacity": 0.5}, 'NDVI') # print(bare_earth_samples.bandNames().getInfo()) myMap.add_ee_layer(bare_earth_samples.select(['labels']), {"palette": ['FF0000']}, 'Large hotspots') myMap.add_child(folium.LayerControl()) display(myMap) ###Output _____no_output_____ ###Markdown Export Before after Images to Google Cloud Storage ###Code # before_image = planet_scope.filterDate("2020-04-01", "2020-04-30").median() # after_image = planet_scope.filterDate("2020-05-01", "2020-05-31").median() # before_image = planet_scope.filterDate('2018-06-01', '2018-12-31').median().clip(aoi) # after_image = planet_scope.filterDate('2019-01-01', '2019-03-01').median().clip(aoi) # 2018-12-18T15:05:08 to 2019-02-26T14:54:21 # Hardcoded for testing start_image = "2018-06-01" end_image = "2020-08-26" from datetime import datetime date_format = "%Y-%m-%d" # date_format = "%Y-%m-%dT%H:%M:%S" d1 = datetime.strptime(start_image,date_format) d2 = datetime.strptime(end_image,date_format) # date1 + (date2 - date1) / 2 date_midpoint = d1 + (d2 - d1) / 2 # first dat print(date_midpoint.isoformat()) print(date_midpoint) before_img_col = planet_scope.filterBounds(aoi).filterDate(start_image, date_midpoint) after_img_col = planet_scope.filterBounds(aoi).filterDate(date_midpoint, end_image) print("ImageCollection Before Size: ", before_img_col.size().getInfo()) print("ImageCollection After Size: ", after_img_col.size().getInfo()) before_image = before_img_col.median().toFloat().clipToCollection(aoi_bbox) after_image = after_img_col.median().toFloat().clipToCollection(aoi_bbox) # Try mosaic over median # before_image = before_img_col.mosaic().toFloat().clipToCollection(aoi_bbox) # after_image = after_img_col.mosaic().toFloat().clipToCollection(aoi_bbox) print(after_image.getInfo()) # before_start_date = before_image.getInfo() # print(before_start_date) ps_collection_obcd = { "before": before_image, "after": after_image } exportToDrive(before_image,"ps_before_3m", resolution=3) exportToDrive(after_image,"ps_after_3m", resolution=3) myMap = folium.Map(location=center_map, zoom_start=12, height=500) planet_scope_mosaic_log = planet_scope.median().log() planet_scope_mosaic_vis_params = {"bands": ['b3','b2','b1'], 'min':5.67, 'max': 8.39} myMap.add_ee_layer(before_image.log(), planet_scope_mosaic_vis_params, 'PlanetScope Log Median') buffered_building_permits_feature_collection = building_permits_feature_collection.map(lambda f: f.buffer(f.get('buf_dist'))) building_permits_rast = ee.Image().byte().paint(buffered_building_permits_feature_collection) myMap.add_ee_layer(building_permits_rast, {"palette": ["yellow"], "opacity": 0.60}, 'Building Permits') myMap.add_ee_layer(aoi_mask, {"palette": ["blue"], "opacity": 0.25}, 'AOI Mask') myMap.add_child(folium.LayerControl()) display(myMap) ###Output _____no_output_____ ###Markdown Testing Statistical Threashold Classification ###Code ndvi = createNDVI(ps_median) bsi = createBSI(ps_median) dsbi = createDSBI(ps_median) #experimental index to seperate grass, forest, and impervious ndbsvi = createBSI_NDVI_index(bsi, ndvi) count = 0 def quantileIndexes(image, band, palette=["ff3e04","ffb308","ffec08","ffffff"]): percentiles = image.reduceRegion( reducer=ee.Reducer.percentile([10,25,50,75,90]), geometry=aoi, scale=30, # tileScale=4, maxPixels= 1e12) percentile10th = ee.Number(percentiles.get(band+'_p10')) lowerQuartile = ee.Number(percentiles.get(band+'_p25')) median = ee.Number(percentiles.get(band+'_p50')) upperQuartile = ee.Number(percentiles.get(band+'_p75')) percentile90th = ee.Number(percentiles.get(band+'_p90')) print("10th: {}".format(percentile10th.getInfo())) print("lower: {}".format(lowerQuartile.getInfo())) print("median: {}".format(median.getInfo())) print("upper: {}".format(upperQuartile.getInfo())) print("90th: {}".format(percentile90th.getInfo())) # Get AOI centroid # center_map = aoi.geometry().centroid().getInfo()['coordinates'].reverse() myMap = folium.Map(location=center_map, zoom_start=16, height=500) planet_scope_mosaic_log = planet_scope.median().log().clip(aoi) planet_scope_mosaic_vis_params = {"bands": ['b3','b2','b1'], 'min':5.67, 'max': 8.39} myMap.add_ee_layer(planet_scope_mosaic_log, planet_scope_mosaic_vis_params, 'PlanetScope Log Median') #Low to High myMap.add_ee_layer(image, {"bands":[band],"min":lowerQuartile.getInfo(),"max":upperQuartile,"palette":palette}, '%s low to High' %band) #Low to Med myMap.add_ee_layer(image, {"bands":[band],"min":lowerQuartile.getInfo(),"max":median.getInfo(),"palette":palette}, '%s low to Med' % band) #Med to High myMap.add_ee_layer(image, {"bands":[band],"min":median.getInfo(),"max":upperQuartile.getInfo(),"palette":palette}, '%s Med to High' % band) #low myMap.add_ee_layer(image, {"bands":[band],"max":lowerQuartile.getInfo(),"palette":palette}, '%s low' % band) #high myMap.add_ee_layer(image, {"bands":[band],"min":upperQuartile.getInfo(),"palette":palette}, '%s high' % band) #masks # myMap.add_ee_layer(water,{},"Water Mask") landcover_color_palette = [ 'E29E8C', # Class 0 - road/highways NLCD class color 22 'B50000', # Class 1 - Building dark red NLCD class color 24 'D2CDC0', # Class 2 - barren NLCD class color 31 '38814E', # Class 3 - Forest NLCD class color 42 '85C77E', # Class 4 - Grass NLCD class color 41 '5475A8', # Class 5 - water NLCD class color 11 'E8D1D1' # Class 5 - developed NLCD class color 21 ] dem = ee.Image("USGS/NED").select('elevation') dsm = ee.Image("JAXA/ALOS/AW3D30/V2_2").select('AVE_DSM') buildings_canopy = dsm.subtract(dem) #.focal_min(radius=1) # Less Than or equal to Lower myMap.add_ee_layer(image.updateMask(image.lte(lowerQuartile)), {"bands":[band],"palette":[landcover_color_palette[0]],"max": lowerQuartile.getInfo()}, '%s Less Than or equal to Lower (road/highways)' % band) # Less Than or equal to 10th myMap.add_ee_layer(image.updateMask(image.lte(percentile10th)), {"bands":[band],"palette":["grey"],"max": percentile10th.getInfo()}, '%s Less Than or equal to 10th (GREY) - Developed Imperious Road/Building/Asphalt' % band) # Less Than median greater than lower myMap.add_ee_layer(image.updateMask(image.gt(lowerQuartile).And(image.lt(median))), {"bands":[band],"min":lowerQuartile.getInfo(),"max":median.getInfo(),"palette":[landcover_color_palette[4]]}, '%s Less Than median greater than lower mask (GRASS)' % band) # Greater Than Lower And Less than Upper myMap.add_ee_layer(image.updateMask(image.gt(lowerQuartile).And(image.lt(upperQuartile))), {"bands":[band],"min":lowerQuartile.getInfo(),"max":upperQuartile.getInfo(),"palette":[landcover_color_palette[4]]}, '%s Greater Than Lower And Less than Upper (Grass/Forest)' % band) # Greater Than median less than upper myMap.add_ee_layer(image.updateMask(image.gt(median).And(image.lt(upperQuartile))), {"bands":[band],"min":median.getInfo(),"max":upperQuartile.getInfo(),"palette":[landcover_color_palette[3]]}, '%s Greater Than median less than upper mask (Forest Boundary)' % band) # Greater Than or equal to Upper myMap.add_ee_layer(image.updateMask(image.gte(upperQuartile)), {"bands":[band],"palette":[landcover_color_palette[3]],"min":upperQuartile.getInfo()}, '%s Greater Than or equal to Upper (Forest)' % band) # Greater Than or equal to percentile90th myMap.add_ee_layer(image.updateMask(image.gte(percentile90th)), {"bands":[band],"palette":[landcover_color_palette[3]],"min":percentile90th.getInfo()}, '%s Greater Than or equal to 90th (Forest)' % band) # Grass DSM myMap.add_ee_layer(image.updateMask(image.gt(lowerQuartile).And(buildings_canopy.lte(2))), {"bands":[band],"min":lowerQuartile.getInfo(),"max":upperQuartile.getInfo(),"palette":[landcover_color_palette[4]]}, '%s Grass with DSM)' % band) # Forest DSM myMap.add_ee_layer(image.updateMask(image.gt(lowerQuartile).And(buildings_canopy.gt(2))), {"bands":[band],"min":lowerQuartile.getInfo(),"max":upperQuartile.getInfo(),"palette":[landcover_color_palette[3]]}, '%s Forest with DSM)' % band) # Developed DSM myMap.add_ee_layer(image.updateMask(image.lte(lowerQuartile).And(buildings_canopy.lte(3))), {"bands":[band],"palette":[landcover_color_palette[0]],"max": percentile10th.getInfo()}, '%s Developed with DSM' % band) # Building DSM myMap.add_ee_layer(image.updateMask(image.lte(lowerQuartile).And(buildings_canopy.gt(3))), {"bands":[band],"palette":[landcover_color_palette[1]],"max": percentile10th.getInfo()}, '%s Buildings with DSM' % band) myMap.add_child(folium.LayerControl()) display(myMap) # quantileIndexes(ndbsvi, 'ndbsiv') quantileIndexes(ndvi.clipToCollection(aoi), 'ndvi', ['green','white', 'purple']) ###Output _____no_output_____ ###Markdown Bare Earth Merge ###Code bare_earth_feature_collection = bare_earth_vector.map(lambda f: f.set({"landclass": 2, "landcover": "barren"})) combined_feature_collection = sample_feature_collection.merge(bare_earth_feature_collection).map(lambda f: f.set("area", f.area(0.001))); fc_list = combined_feature_collection.toList(count=1500).getInfo() list_of_properties = [f['properties'] for f in fc_list] df_combined_samples = pd.DataFrame(list_of_properties) df_sample_stats = df_combined_samples.groupby(['landcover','landclass'])['area'].describe().reset_index() df_sample_stats['area']= df_combined_samples.groupby(['landcover','landclass'])['area'].sum().values df_sample_stats.sort_values('landclass', inplace=True) print(df_sample_stats) print(df_sample_stats[df_sample_stats['landclass'] == 2]['50%'].head(20)) #Get min area min_area = df_sample_stats['area'].min() print(min_area) # calcluate the sample size by dividing the minmum total class area and dividing it by the median object area feature class. df_sample_stats['sample_size'] = df_sample_stats['50%'].apply(lambda x: int(min_area / x) if int(min_area / x) else 2, 1) df_sample_stats['median'] = df_sample_stats['50%'] # Reformating for paper df_sample_stats[['landcover','count','min', 'median', 'sample_size']].set_index('landcover') # Get a list of sample sizes stratifed_samples_sizes = df_sample_stats['sample_size'].astype(int).values.tolist() # Set water class to 0 because we will copy this data in after classification #stratifed_samples_sizes[5] = 0 # Remove Water # stratifed_samples_sizes[2] = 19 # stratifed_samples_sizes[0] = 0 #roads added post classification print(stratifed_samples_sizes) # Get a list of sample sizes stratifed_samples_sizes = df_sample_stats['sample_size'].astype(int).values.tolist() # Set water class to 0 because we will copy this data in after classification #stratifed_samples_sizes[5] = 0 # Remove Water # stratifed_samples_sizes[2] = 19 # stratifed_samples_sizes[0] = 0 #roads added post classification print(stratifed_samples_sizes) f, ax = plt.subplots(figsize=(12, 8)) ax.set_xscale("log") df_combined_samples['area_km'] = df_combined_samples['area'].apply(lambda x: x / 1e6) # df_combined_samples = df_combined_samples[df_combined_samples['landcover'] != 'road'] # Plot the orbital period with horizontal boxes sns.boxplot(x="area", y="landcover", data=df_combined_samples.sort_values('landclass'), whis=[0, 100],palette=sns.color_palette(['#B50000','#D2CDC0', '#38814E','#85C77E', '#E8D1D1'])) # Add in points to show each observation sns.stripplot(x="area", y="landcover", data=df_combined_samples.sort_values('landclass'), size=2, color=".3", linewidth=0, dodge=True) # Tweak the visual presentation ax.xaxis.grid(True) ax.set(ylabel="") ax.set_xlabel("Area m2",fontsize=16 ) ax.tick_params(labelsize=16) sns.despine(trim=True, left=True) # ax.axvline(x=df_sample_stats[df_sample_stats['landcover'] == 'road']['50%'].values, linewidth=1, color="#"+landcover_color_palette[0],linestyle="--", label="Road") # ax.axvline(x=df_sample_stats[df_sample_stats['landcover'] == 'building']['50%'].values, linewidth=1, color="#"+landcover_color_palette[1],linestyle="--", label="Building") # ax.axvline(x=df_sample_stats[df_sample_stats['landcover'] == 'barren']['50%'].values, linewidth=1, color="#"+landcover_color_palette[2],linestyle="--", label="Barren") # ax.axvline(x=df_sample_stats[df_sample_stats['landcover'] == 'forest']['50%'].values, linewidth=1, color="#"+landcover_color_palette[3],linestyle="--", label="Forest") # ax.axvline(x=df_sample_stats[df_sample_stats['landcover'] == 'grass']['50%'].values, linewidth=1, color="#"+landcover_color_palette[4],linestyle="--", label="Grass") # ax.axvline(x=df_sample_stats[df_sample_stats['landcover'] == 'water']['50%'].values, linewidth=1, color="#"+landcover_color_palette[5],linestyle="--", label="Water") plt.title('Land Cover Samples', fontsize=24) # landcover_color_palette = [ # 'E29E8C', # Class 0 - road/highways NLCD class color 22 # 'B50000', # Class 1 - Building dark red NLCD class color 24 # 'D2CDC0', # Class 2 - barren NLCD class color 31 # '38814E', # Class 3 - Forest NLCD class color 42 # '85C77E', # Class 4 - Grass NLCD class color 41 # '5475A8', # Class 5 - water NLCD class color 11 # 'E8D1D1' # Class 5 - developed NLCD class color 21 # ] plt.savefig(os.path.join(figures_save_location,"Samples Box Area Plot"),dpi=300) classes_w_barren = ee.Image().byte().paint(combined_feature_collection, "landclass").rename("landclass").where( classes.eq(5).selfMask(), 5).where( classes.eq(0).selfMask(), 0).where( classes.eq(1).selfMask(), 1 ) ###Output _____no_output_____ ###Markdown Outlire Detection ###Code geeh.detectOutlires(before_image.select('b1'),'b1',scale=30) geeh.detectOutlires(before_image.select('b4'),'b4') # Set visualization parameters. visParams = {"bands":["b3", "b2", "b1"], "min": 366, "max": 2617,"gamma":2} # Get AOI centroid center_map = aoi.geometry().centroid().getInfo()['coordinates'].reverse() #[aoi.centroid().getInfo()['coordinates'][1],aoi.centroid().getInfo()['coordinates'][0]] # Create a folium map object. myMap = folium.Map(location=center_map, zoom_start=13, height=500) # Add the elevation model to the map object. planet_scope_mosaic = planet_scope.median() mapID = ee.Image(planet_scope_mosaic).getMapId(visParams) myMap.add_ee_layer(planet_scope_mosaic, visParams, 'PlanetScope Mosaic') myMap.add_ee_layer(before_image, visParams, 'April') myMap.add_ee_layer(after_image, visParams, 'May') analog_nir = before_image.select('b4').addBands(after_image.select('b4')) analog_red = before_image.select('b3').addBands(after_image.select('b3')) analog_blue = before_image.select('b2').addBands(after_image.select('b2')) analog_green = before_image.select('b1').addBands(after_image.select('b1')) analog_ndvi = createNDVI(before_image).select('ndvi').addBands(createNDVI(after_image).select('ndvi')) analog_bsi = createBSI(before_image).select('bsi').addBands(createBSI(after_image).select('bsi')) nir_min_max = analog_nir.reduceRegion(reducer=ee.Reducer.minMax(),geometry=aoi,scale=3,maxPixels= 1e12) print(nir_min_max.getInfo()) nir_min = ee.Number(nir_min_max.get("b4_1_min")) nir_max = ee.Number(nir_min_max.get("b4_1_max")) myMap.add_ee_layer(analog_nir, {"bands": ["b4","b4_1","b4_1"], "min":nir_min, "max":nir_max}, 'Analog NIR') analog_red_min_max = analog_red.reduceRegion(reducer=ee.Reducer.minMax(),geometry=aoi,scale=3,maxPixels= 1e12) print(analog_red_min_max.getInfo()) analog_red_min = ee.Number(analog_red_min_max.get("b3_1_min")) analog_red_max = ee.Number(analog_red_min_max.get("b3_1_max")) myMap.add_ee_layer(analog_red, {"bands": ["b3", "b3_1", "b3_1"], "min":analog_red_min, "max":analog_red_max}, 'Analog B3 Red') analog_blue_min_max = analog_blue.reduceRegion(reducer=ee.Reducer.minMax(),geometry=aoi,scale=3,maxPixels= 1e12) print(analog_blue_min_max.getInfo()) analog_blue_min = ee.Number(analog_blue_min_max.get("b2_1_min")) analog_blue_max = ee.Number(analog_blue_min_max.get("b2_1_max")) myMap.add_ee_layer(analog_blue, {"bands": ["b2", "b2_1", "b2_1"], "min":analog_blue_min, "max":analog_blue_max}, 'Analog B2 Blue') myMap.add_ee_layer(analog_green, {"bands": ["b1", "b1_1", "b1_1"], "min":326, "max":4233}, 'Analog B1 Green') analog_ndvi_min_max = analog_ndvi.reduceRegion(reducer=ee.Reducer.minMax(),geometry=aoi,scale=3,maxPixels= 1e12) print(analog_ndvi_min_max.getInfo()) analog_ndvi_min = ee.Number(analog_ndvi_min_max.get("ndvi_1_min")) analog_ndvi_max = ee.Number(analog_ndvi_min_max.get("ndvi_1_max")) myMap.add_ee_layer(analog_ndvi, {"bands": ["ndvi", "ndvi_1", "ndvi_1"], "min":analog_ndvi_min, "max":analog_ndvi_max}, 'Analog NDVI') analog_bsi_min_max = analog_bsi.reduceRegion(reducer=ee.Reducer.minMax(),geometry=aoi,scale=3,maxPixels= 1e12) print(analog_bsi_min_max.getInfo()) analog_bsi_min = ee.Number(analog_bsi_min_max.get("bsi_1_min")) analog_bsi_max = ee.Number(analog_bsi_min_max.get("bsi_1_max")) myMap.add_ee_layer(analog_bsi, {"bands": ["bsi", "bsi_1", "bsi_1"], "min":analog_bsi_min, "max":analog_bsi_max}, 'Analog BSI') # Add study area markers # folium.GeoJson(cary_park.toGeoJSON()).add_to(myMap) # folium.GeoJson(centenial.toGeoJSON()).add_to(myMap) # Add a layer control panel to the map. myMap.add_child(folium.LayerControl()) # Display the map. display(myMap) ###Output _____no_output_____ ###Markdown Generate Change Mask ###Code water_mask = ps_median.where(classes.eq(5), -9999).neq(-9999).select('b3').selfMask() binary_red_change = before_image.select('b3').subtract(after_image.select('b3'))#.updateMask(water_mask) binary_reducer = ee.Reducer.mean().combine(reducer2 = ee.Reducer.stdDev(), sharedInputs=True) binary_red_std = binary_red_change.reduceRegion(reducer=binary_reducer, geometry=aoi.geometry(),scale=30,maxPixels= 1e12) neg_3std = binary_red_std.get('b3_mean').getInfo() + (binary_red_std.get('b3_stdDev').getInfo() * -2.5) red_change_mask = binary_red_change.focal_min(1).lte(neg_3std).selfMask() sample_mask = binary_red_change.focal_min(1).gt(neg_3std).selfMask() myMap = folium.Map(location=center_map, zoom_start=15, height=900) for layer in ps_collection_obcd: myMap.add_ee_layer(ps_collection_obcd[layer], {"bands":["b3", "b2", "b1"], "min": 366, "max": 2617,"gamma":2}, "PS %s" % layer) myMap.add_ee_layer(red_change_mask, {"palette": ['red'], 'opacity': 0.8}, 'Red Change Mask') myMap.add_child(folium.LayerControl()) # Display the map. display(myMap) geeh.quantileIndexes(binary_red_change, 'b3') geeh.detectOutlires(binary_red_change, 'b3', scale=30) geeh.detectOutlires(binary_nir_change, 'b4') objectId = red_change_mask.connectedComponents(ee.Kernel.plus(1), 128) # objectId128 = red_change_mask.connectedComponents(ee.Kernel.plus(1), 128) # Compute the number of pixels in each object defined by the "labels" band. objectSize = objectId.select('labels').connectedPixelCount(128,False) # Get a pixel area image. pixelArea = ee.Image.pixelArea() # Multiply pixel area by the number of pixels in an object to calculate # the object area. The result is an image where each pixel # of an object relates the area of the object in m^2. objectArea = objectSize.multiply(pixelArea) # Map.addLayer(objectArea, null, 'objectArea'); ## Threshold the `objectArea` image to define a mask that will mask out areaMask = objectArea.gte(1000); # Update the mask of the `objectId` layer defined previously using the # minimum area mask just defined. change_locations = objectId.updateMask(areaMask) myMap = folium.Map(location=center_map, zoom_start=15, height=900) for layer in ps_collection_obcd: myMap.add_ee_layer(ps_collection_obcd[layer], {"bands":["b3", "b2", "b1"], "min": 366, "max": 2617,"gamma":2}, "PS %s" % layer) # myMap.add_ee_layer(objectId128.randomVisualizer(), {'opacity': 0.75}, 'objectId128') myMap.add_ee_layer(change_locations.randomVisualizer(), {'opacity': 0.75}, 'Change Locations') myMap.add_child(folium.LayerControl()) # Display the map. display(myMap) ###Output _____no_output_____ ###Markdown Remove Samples from change pixels ###Code #Switch Back to OSM classes because the bearren class was acting up classes_w_barren_change_mask = classes_w_barren.updateMask(sample_mask) # Get AOI centroid # Create a folium map object. myMap = folium.Map(location=center_map, zoom_start=14, height=500) # Add the elevation model to the map object. planet_scope_mosaic = planet_scope.median() planet_scope_mosaic_log = planet_scope.median().log() planet_scope_mosaic_vis_params = {"bands": ['b3','b2','b1'], 'min':5.67, 'max': 8.39} myMap.add_ee_layer(planet_scope_mosaic_log, planet_scope_mosaic_vis_params, 'PlanetScope Log Median') myMap.add_ee_layer(change_locations.randomVisualizer().updateMask(red_change_mask), {'opacity': 0.75}, 'Change Locations') # Add a layer control panel to the map. myMap.add_child(folium.LayerControl()) # Display the map. display(myMap) ###Output _____no_output_____ ###Markdown Explore Segmentation Cluster Params ###Code myMap = folium.Map(location=center_map, zoom_start=16, height=900) myMap.add_ee_layer(ps_median, {"bands":["b3", "b2", "b1"], "min": 366, "max": 2617,"gamma":2}, "PS %s" % layer) cluster_sizes = [10, 15, 20, 25, 30,35,40,50,100] for size in cluster_sizes: print("Size: %s, Neighborhood: %s" % (size, size * 2)) seeds = ee.Algorithms.Image.Segmentation.seedGrid(size); #8 was good # 5 compactness = [0.8] for c in compactness: # Run SNIC on the regular square grid. snic = ee.Algorithms.Image.Segmentation.SNIC( size=size, image= ps_median, compactness= c, # was 5 connectivity= 8, # was 8 neighborhoodSize=size*4, #16was 3, seeds= seeds ) snic = snic.select(['b1_mean', 'b2_mean', 'b3_mean', 'b4_mean', 'clusters'], ['b1', 'b2', 'b3', 'b4', 'clusters']) clusters = snic.select('clusters') myMap.add_ee_layer(clusters.randomVisualizer(), None, 'Cluster %s, Compactness %s' % (size,c)) # train = ps_median.sample(aoi) # test_seg_classifier = ee.Clusterer.wekaCascadeKMeans().train(train,inputProperties=ps_median.bandNames().getInfo()) # unsup_seg = ps_median.cluster(test_seg_classifier) # myMap.add_ee_layer(unsup_seg.randomVisualizer(), None, 'Unsupervised Image Seg') # buildings = classes.select('landclass').eq(1).selfMask() # myMap.add_ee_layer(buildings, {"palette":['grey']}, 'Buildings') myMap.add_child(folium.LayerControl()) # Display the map. display(myMap) ###Output _____no_output_____ ###Markdown Building and Canopy Extraction ###Code dem = ee.Image("USGS/NED").select('elevation') dsm = ee.Image("JAXA/ALOS/AW3D30/V2_2").select('AVE_DSM') buildings_canopy = dsm.subtract(dem)#.addBands(clusters).reduceConnectedComponents(ee.Reducer.median(), 'clusters',256).rename('object_height') # Create a folium map object. myMap = folium.Map(location=center_map, zoom_start=12, height=500) # Add the elevation model to the map object. planet_scope_mosaic = planet_scope.median() planet_scope_mosaic_log = planet_scope.median().log() planet_scope_mosaic_vis_params = {"bands": ['b3','b2','b1'], 'min':5.67, 'max': 8.39} myMap.add_ee_layer(planet_scope_mosaic_log.updateMask(aoi_mask), planet_scope_mosaic_vis_params, 'PlanetScope Log Median') myMap.add_ee_layer(buildings_canopy.updateMask(aoi_mask), {"min": 1, "palette": ['red','orange','yellow', 'purple','lime', 'green'], "opacity": 0.7}, "Building and Canopy") myMap.add_ee_layer( ps_collection_obcd["before"], planet_scope_mosaic_vis_params, 'PlanetScope Before') # Add a layer control panel to the map. myMap.add_child(folium.LayerControl()) # Display the map. display(myMap) ###Output _____no_output_____ ###Markdown Classify Data ###Code # https://gis.stackexchange.com/questions/273658/performing-object-based-image-classification-in-google-earth-engine ps_collection_obcd_classified = {} ps_collection_obcd_metrics = {} def classifyImage(image, aoi, scale=3): # for image in ps_collection_obcd: print("Lable: " + image) image_label = image geometry = aoi scale = scale bands = ['b1', 'b2', 'b3', 'b4'] # Covert 16-bit unsigned integer to 8-bit unsigned img = ps_collection_obcd[image].select(bands) size = 30 #seeds = ee.Algorithms.Image.Segmentation.seedGrid(size); #8 was good # 5 # Run SNIC on the regular square grid. #snic = ee.Algorithms.Image.Segmentation.SNIC( # image= img, # compactness= 0.8, # was 5 # connectivity= 8, # was 8 # neighborhoodSize=int(size*5), #16was 3, # seeds= seeds #) #snic = snic.select(['b1_mean', 'b2_mean', 'b3_mean', 'b4_mean', 'clusters'], ['b1', 'b2', 'b3', 'b4', 'clusters']) #clusters = snic.select('clusters') # Compute per-cluster stdDev. #stdDev = img.addBands(clusters).reduceConnectedComponents(ee.Reducer.stdDev(), 'clusters', 256) ndviMedian = createNDVI(img) #.addBands(clusters).reduceConnectedComponents(ee.Reducer.median(), 'clusters',256) ndwiMedian = createNDWI(img)#.addBands(clusters).reduceConnectedComponents(ee.Reducer.median(), 'clusters',256) bsiMedian = createBSI(img)#.addBands(clusters).reduceConnectedComponents(ee.Reducer.median(), 'clusters',256) # dsbiMedian = createDSBI(img).addBands(clusters).reduceConnectedComponents(ee.Reducer.median(), 'clusters',256) # Low pass filter medianPixel = img.reduceNeighborhood(reducer=ee.Reducer.median(), kernel=ee.Kernel.square(5)) stdPixel = img.reduceNeighborhood(reducer=ee.Reducer.stdDev(), kernel=ee.Kernel.square(5)) minMax = img.reduceNeighborhood(reducer=ee.Reducer.minMax(), kernel=ee.Kernel.square(5)) # canny = ee.Algorithms.CannyEdgeDetector(image=img, threshold= 10, sigma= 1) # hough = ee.Algorithms.HoughTransform(canny, 256, 600, 100) # ndbsivMedian = createBSI_NDVI_index(bsiMedian.select('bsi'),ndviMedian.select('ndvi')).addBands(clusters).reduceConnectedComponents(ee.Reducer.median(), 'clusters',256) # laplacian8 = ee.Kernel.laplacian8(magnitude, normalize) glcm_nir = img.select('b3') # GLCM only works on Images 32bit and lower glcm = glcm_nir.toInt32().glcmTexture(size=3)#.addBands(clusters).reduceConnectedComponents(ee.Reducer.median(), 'clusters',256) #Area, Perimeter, Width and Height #area = ee.Image.pixelArea().addBands(clusters).reduceConnectedComponents(ee.Reducer.sum(), 'clusters', 256) #minMax = clusters.reduceNeighborhood(ee.Reducer.minMax(), ee.Kernel.square(1)); #perimeterPixels = minMax.select(0).neq(minMax.select(1)).rename('perimeter'); #perimeter = perimeterPixels.addBands(clusters).reduceConnectedComponents(ee.Reducer.sum(), 'clusters', 256); #sizes = ee.Image.pixelLonLat().addBands(clusters).reduceConnectedComponents(ee.Reducer.minMax(), 'clusters', 256) #width = sizes.select('longitude_max').subtract(sizes.select('longitude_min')).rename('width') #height = sizes.select('latitude_max').subtract(sizes.select('latitude_min')).rename('height') #gives metrics of shape smoothness (low) or roughtness (high) #shape_index = perimeter.divide(area.sqrt().multiply(4)) #Chapter 9 p.419 IDIP objectPropertiesImage = ee.Image.cat([ img.select(['b1', 'b4']), medianPixel.select(['b1_median', 'b4_median']), stdPixel.select(['b1_stdDev','b4_stdDev']), # hough, minMax.select(['b1_min','b1_max', 'b4_min', 'b4_max']), # stdDev.select(['b3', 'b4']), ndviMedian.select('ndvi'), ndwiMedian.select('ndwi'), # dsbiMedian.select('dsbi'), bsiMedian.select('bsi'), # ndbsivMedian.select('ndbsiv'), glcm.select('b3_contrast', 'b3_asm', 'b3_corr'), # area, # perimeter, # width, # height, # shape_index # buildings_canopy.clipToCollection(aoi) ]).float(); print("objects properties image created") #Mean center the data to enable a faster covariance reducer #and an SD stretch of the principal components. bandNames = objectPropertiesImage.bandNames().getInfo() meanDict = objectPropertiesImage.reduceRegion(reducer= ee.Reducer.mean(), geometry = geometry, scale=scale, maxPixels=1e12, bestEffort=True, tileScale=16) means = ee.Image.constant(meanDict.values(bandNames)) centered = objectPropertiesImage.subtract(means); print("PCA:") # pcImage = getPrincipalComponents(objectPropertiesImage, scale, geometry).float() pcImage = objectPropertiesImage trainingPartition = pcImage.addBands(classes_w_barren_change_mask.select('landclass')).stratifiedSample(numPoints=20000, classBand='landclass', classValues=[0,1,2,3,4,5,6], # classPoints=[1000,1000,1000,1000,1000,1000,1000], #[0,2000,2500,5000,5000,0,2000], classPoints=[0,7540, 3530, 1400, 3750, 0, 2580], scale=scale, # tileScale=4, seed=10, region=aoi) # trainingPartition = pcImage.sampleRegions( # collection= combined_feature_collection, # properties=['landclass'], # scale=scale) print("training samples genereated") withRandom = trainingPartition.randomColumn(); split = 0.7 # Roughly 70% training, 30% testing. trainingPartition = withRandom.filter(ee.Filter.lt('random', split)) testingPartition = withRandom.filter(ee.Filter.gte('random',split)) #Train from all sample data and test from clusters # if image == "after": # pcImage = pcImage.updateMask(change_locations.select('labels')) # testingPartition = pcImage.addBands(classes_w_barren_change_mask.select('landclass')).stratifiedSample(numPoints=10000, # classBand='landclass', # classValues=[0,1,2,3,4,5,6], # classPoints=stratifed_samples_sizes, # scale=scale, # tileScale=4, # seed=40, # region=aoi) white_list_features = pcImage.bandNames().getInfo() print(white_list_features) def autoMLRF(image, training, testing): white_list_features = image.bandNames().getInfo() num_trees = [10,50,100,250,500] #,1000] winner_score = 0 winner = None winner_trees = 0 for t in num_trees: classifier = ee.Classifier.smileRandomForest(numberOfTrees=t).train(features=training, classProperty='landclass',inputProperties= white_list_features) validated = testing.classify(classifier) testAccuracy = validated.errorMatrix('landclass', 'classification') overallAccuracy = testAccuracy.accuracy().getInfo() print("Trees: %s, OA: %s" %(t, overallAccuracy)) if winner_score < overallAccuracy: winner_score = overallAccuracy winner = classifier winner_trees = t print("Winner had Trees: %s, OA: %s" %(winner_trees, winner_score)) # Get the most important features white_list_feature_importance = createFeatureImportanceBarChart(winner, image_label).tolist() print(white_list_feature_importance) # Rerun winning classifier with important features # winner = ee.Classifier.smileRandomForest(winner_trees).train(features=training, classProperty='landclass',inputProperties= white_list_features) return winner classifier = autoMLRF(pcImage, trainingPartition,testingPartition) #Classify the validation data. validated = testingPartition.classify(classifier) #post-processing # water = classes_w_barren.select('landclass').eq(5).selfMask() # roads = road_class.select('landclass').eq(0).selfMask().clipToCollection(aoi) # notWater = classes_w_barren.select('landclass').neq(5).selfMask() # validated = validated.where(water, water).where(roads, roads) #Get a confusion matrix representing expected accuracy. testAccuracy = validated.errorMatrix('landclass', 'classification') print(' Resubstitution error matrix: ', testAccuracy.getInfo()) print(' Training overall accuracy: ', testAccuracy.accuracy().getInfo()) print(' Consumers accuracy: ', testAccuracy.consumersAccuracy().getInfo()) print(' Producers accuracy: ', testAccuracy.producersAccuracy().getInfo()) print(' Kappa: ', testAccuracy.kappa().getInfo()) # createConfusionMatixFigure(testAccuracy, label=image) print("Classifier Created") #Classify the image with the same bands used for training. #Mask out water since we are adding that in post processing classified = pcImage.select(white_list_features).classify(classifier) #Apply median low-pass filter to smooth results # post_classification = post_classification.focal_mode(1,'square', 'meters') #Add water and roads from OSM to classification output post_classification = ( classified # .where(classes_w_barren.select('landclass').eq(5), water.multiply(5)) .where(water_class, 5) .where(road_class.select('landclass').eq(0),0) ) print("Image Classified") ps_collection_obcd_metrics[image] = {} ps_collection_obcd_metrics[image]["training"] = trainingPartition ps_collection_obcd_metrics[image]["testing"] = testingPartition ps_collection_obcd_metrics[image]["rawClassification"] = classified ps_collection_obcd_metrics[image]["classifier"] = classifier ps_collection_obcd_metrics[image]["accuracy"] = testAccuracy ps_collection_obcd_classified[image] = post_classification return classified # for image in ps_collection_obcd: before_classified = classifyImage('before', aoi_bbox,scale=3) # exportEarthEngineImage( # image=classifyImage('before', aoi_bbox), # desc="before_image_classified", # imageName="before_image_classified", # region=aoi_bbox, # saveLocation="CloudStorage") createConfusionMatixFigure(ps_collection_obcd_metrics["before"]["accuracy"], label="before") after_classified = classifyImage('after', aoi_bbox, scale=3) print(ps_collection_obcd_metrics) createConfusionMatixFigure(ps_collection_obcd_metrics["after"]["accuracy"], label="after") test = ps_collection_obcd_metrics["after"]["training"].toList(10000).getInfo() type(test) list_of_properties = [f['properties'] for f in test] df = pd.DataFrame(list_of_properties) df["landcover"] = df['landclass'].apply(str).map({ "0": 'road', "1": 'building', "2": 'barren', "3": 'forest', "4": 'grass', "5": 'water', "6": 'developed' }) df.head(5) df.groupby("landcover").describe() pair_plot_palette = landcover_color_palette[1:5] + landcover_color_palette[6:7] sns.set_palette(sns.color_palette(["#" + c for c in pair_plot_palette])) sns.pairplot(df[['ndvi', 'ndwi', 'b1', 'b4', 'bsi','b4_median','b4_min', 'landcover']], hue='landcover') # ['pc3', 'pc7', 'pc10', 'pc13', 'pc11'] # sns.pairplot(df[['pc12', 'pc4', 'pc3', 'pc11', 'pc6', 'landcover']], hue="landcover") # water = classes.select('landclass').eq(5).selfMask() # barren = classes.select('landclass').eq(2).selfMask() # classified = classified.cat(water) center_map = [cary_park.centroid().getInfo()['coordinates'][1],cary_park.centroid().getInfo()['coordinates'][0]] myMap = folium.Map(location=center_map, zoom_start=15, height=900) # myMap.add_ee_layer(water, {"palette": ['blue']}, 'water') for layer in ps_collection_obcd_classified: myMap.add_ee_layer(ps_collection_obcd[layer].updateMask(aoi_mask), {"bands":["b3", "b2", "b1"], "min": 366, "max": 2617,"gamma":2}, "PS %s" % layer) # for from_image_key in ps_collection_obcd_classified: myMap.add_ee_layer(ps_collection_obcd_classified["before"].updateMask(aoi_mask), {"min": 0, "max": 6, "palette": landcover_color_palette, "opacity": 0.6}, "%s" % "before") myMap.add_ee_layer(ps_collection_obcd_classified["after"].updateMask(aoi_mask), {"min": 0, "max": 6, "palette": landcover_color_palette, "opacity": 0.6}, "%s" % "after") # nlcd = ee.Image('USGS/NLCD/NLCD2016').clip(aoi) # myMap.add_ee_layer(nlcd.select('landcover'), {}, 'NLCD') # myMap.add_ee_layer(change_locations.randomVisualizer(), {}, "Binary Change Mask") # myMap.add_ee_layer(barren, {"palette": ['olive']}, 'barren') myMap.add_child(folium.LayerControl()) # Display the map. display(myMap) ###Output _____no_output_____ ###Markdown Export Classified Maps ###Code # exportToDrive(ps_collection_obcd['before'],"classified_before_30m", resolution=30) # exportToDrive(ps_collection_obcd['after'],"classified_after_30m", resolution=30) print(f"Before Classified: {ps_collection_obcd_classified['before'].bandNames().getInfo()}") print(f"After Classified: {ps_collection_obcd_classified['after'].bandNames().getInfo()}") ee.batch.Export.image.toDrive( image=ps_collection_obcd_classified['before'], folder=figures_save_location, description='classified_before_3m', fileNamePrefix='classified_before_3m', scale=3, fileFormat= 'GeoTIFF', region=aoi_bbox.geometry(), formatOptions= {"cloudOptimized": True}).start() ee.batch.Export.image.toDrive( image=ps_collection_obcd_classified['after'], folder=figures_save_location, description='classified_after_3m', fileNamePrefix='classified_after_3m', scale=3, fileFormat= 'GeoTIFF', region=aoi_bbox.geometry(), formatOptions= {"cloudOptimized": True}).start() ###Output _____no_output_____ ###Markdown Generate Thematic Change Maps ###Code ps_thematic_change_collection_obcd = {} for from_image_key in ps_collection_obcd_classified: from_image = ps_collection_obcd_classified[from_image_key] for to_image_key in ps_collection_obcd_classified: if from_image_key != to_image_key: to_image = ps_collection_obcd_classified[to_image_key] thematic_change_key = "from_%s_to_%s" % (from_image_key, to_image_key) thematic_change_image = generateThematicChangeImage(from_image, to_image, thematic_change_expression) ps_thematic_change_collection_obcd[thematic_change_key] = thematic_change_image expression = "" for index, row in df_change_classes.iterrows(): expression = expression + "(b('constant') == {}) ? {} :".format(row['ClassID'],row["priority"]) expression = expression + " 0" print(expression) print(change_locations.bandNames().getInfo()) def getImageTotalArea(image): print("AreaImage Start") areaImage = image.multiply(0).rename('area') print("AreaImage End") totalArea = ee.Number(areaImage.add(1).reduceRegion( reducer= ee.Reducer.sum(), geometry= aoi, scale= 30, maxPixels= 1e12).get('area')) return totalArea.divide(1e6).getInfo() print("totalArea: {} km2".format(getImageTotalArea(ps_median.select('b3')))) print("Total Change Area: {} km2".format(getImageTotalArea(change_locations.select('b3')))) priority_change = (ps_thematic_change_collection_obcd['from_before_to_after'] .expression(expression) .select(['constant'],['priority'])) #.clip(aoi) #.updateMask(change_locations.select('labels')) print("priority_change") print(priority_change.bandNames().getInfo()) exportToDrive(priority_change,"priority_change_30m", resolution=30) def printMinMax(image, imageName): print("{0}:{1}".format(imageName, image.reduceRegion(reducer= ee.Reducer.minMax(), geometry=aoi, scale=30,maxPixels= 1e16).getInfo())) priority_change_objectId = priority_change.addBands(change_locations.select("labels")) objectSize = priority_change_objectId.select('labels').connectedPixelCount(128,False) print("objectSize: {}".format(objectSize.bandNames().getInfo())) # printMinMax(objectSize,"objectSize") # Get a pixel area image. # Make sure pixels are set to correct scale 3m = 9m2 pixelArea = priority_change_objectId.select('labels').multiply(0).add(9).rename('area') # pixelArea = ee.Image.pixelArea() printMinMax(pixelArea,"pixelArea") # Multiply pixel area by the number of pixels in an object to calculate # the object area. The result is an image where each pixel # of an object relates the area of the object in m^2. objectArea = objectSize.multiply(pixelArea).select(['labels'], ['area']) print("objectArea: {}".format(objectArea.bandNames().getInfo())) printMinMax(objectArea,"objectArea") print("priority_change_objectId: {}".format(priority_change_objectId.bandNames().getInfo())) #Get the mean of the change areas priority and multiple it by the objects area objectPriority = priority_change_objectId.reduceConnectedComponents(reducer=ee.Reducer.mean(),labelBand= 'labels') print("objectPriority: {}".format(objectPriority.bandNames().getInfo())) # printMinMax(objectPriority,"objectPriority") reducer = ee.Reducer.mean().splitWeights() objectPriorityWeighted = priority_change_objectId.addBands(objectArea.select(['area'])).reduceConnectedComponents(reducer=reducer,labelBand= 'labels') print("objectPriorityWeighted: {}".format(objectPriorityWeighted.bandNames().getInfo())) # printMinMax(objectPriority,"objectPriority") # objectPriorityWeighted = objectPriority.divide(objectSize) # print("objectPriorityWeighted: {}".format(objectPriorityWeighted.bandNames().getInfo())) # printMinMax(objectPriorityWeighted,"objectPriorityWeighted") priorityQueue = objectPriority.addBands([priority_change_objectId.select('labels'),objectArea.select('area')]).reduceConnectedComponents(reducer=ee.Reducer.product(),labelBand= 'labels') print("priorityQueue: {}".format(priorityQueue.bandNames().getInfo())) # printMinMax(priorityQueue,"priorityQueue") priorityQueueMutli = objectPriority.multiply(objectArea) print("priorityQueueMutli: {}".format(priorityQueueMutli.bandNames().getInfo())) # printMinMax(priorityQueueMutli,"priorityQueueMutli") myMap = folium.Map(location=center_map, zoom_start=16, height=900) for layer in ps_collection_obcd: myMap.add_ee_layer(ps_collection_obcd[layer], {"bands":["b3", "b2", "b1"], "min": 366, "max": 2617,"gamma":2}, "PS %s" % layer) # Priority Map priority_change_viz = {"min": 0, "max": 7, 'palette': ['feebe2','fcc5c0','fa9fb5','f768a1','dd3497','ae017e','7a0177']} myMap.add_ee_layer(priority_change, priority_change_viz, "Priority Change") # Mean Object Priority priority_queue_viz = {'opacity': 1, 'palette': ["ffffb2","fecc5c","fd8d3c","f03b20","FF0000"]} myMap.add_ee_layer(objectPriority.select('priority'), priority_queue_viz, 'Object Priority') myMap.add_ee_layer(objectPriorityWeighted.select('mean'), priority_queue_viz, 'Object Priority Weighted') # Obejct Size eg number of pixels object_size_viz = {'min':52, 'max':128,'opacity': 1, 'palette': ["ffffb2","fecc5c","fd8d3c","f03b20","FF0000"]} myMap.add_ee_layer(objectSize, object_size_viz, 'Object Size') # Object Area object_area_viz = {'min':468, 'max':1152,'opacity': 1, 'palette': ["ffffb2","fecc5c","fd8d3c","f03b20","FF0000"]} myMap.add_ee_layer(objectArea, object_area_viz, 'Object Area') priority_queue_viz = {'opacity': 1, 'palette': ["ffffb2","fecc5c","fd8d3c","f03b20","FF0000"]} myMap.add_ee_layer(priorityQueue.select('priority'), priority_queue_viz, 'Priority Queue') myMap.add_ee_layer(priorityQueueMutli.select('priority'), priority_queue_viz, 'Priority Queue Multi') myMap.add_child(folium.LayerControl()) # Display the map. display(myMap) print(priority_change.bandNames().getInfo()) print(priorityQueueMutli.bandNames().getInfo()) printMinMax(priorityQueueMutli,"priorityQueueMutli") print(priorityQueueMutli.getInfo()) ee.batch.Export.image.toDrive( image=priorityQueue, folder=figures_save_location, description='priority_queue_30m', fileNamePrefix='priority_queue_30m', # fileNamePrefix='gee_priority_queue_multi_30m', scale=30, fileFormat= 'GeoTIFF', region=aoi.geometry(), # region=cary_park, formatOptions= {"cloudOptimized": True}).start() ee.batch.Export.image.toDrive( image=priority_change, folder=figures_save_location, description='priority_change_30m', fileNamePrefix='priority_change_30m', # fileNamePrefix='gee_priority_queue_multi_30m', scale=30, fileFormat= 'GeoTIFF', region=aoi.geometry(), # region=cary_park, formatOptions= {"cloudOptimized": True}).start() priority_queue_viz = { 'palette': ["ffffb2","fecc5c","fd8d3c","f03b20","FF0000"] } ee.batch.Export.image.toDrive( # image=priority_change.select('priority'), # image=priorityQueue, # image=priorityQueue.addBands(change_locations.select("labels")).toDouble(), image=priorityQueueMutli, #.addBands(change_locations.select("labels")).toDouble(), folder=figures_save_location, description='priorit30myQueueMutli30m', fileNamePrefix='priorityQueueMutli30m', # fileNamePrefix='gee_priority_queue_multi_30m', scale=30, fileFormat= 'GeoTIFF', region=aoi.geometry(), # region=cary_park, formatOptions= {"cloudOptimized": True}).start() priorityQueueMutli.getDownloadURL({ "palette":["ffffb2","fecc5c","fd8d3c","f03b20","FF0000"], "bands":["priority"], "scale": 30, "region":aoi.geometry()}) # priority_change_connected_components = priority_change.connectedComponents(ee.Kernal.square(10, 'pixels', True)) #Uniquely label the hotspot image objects. hotspots = priority_change.updateMask(priority_change.gt(5)) objectId = hotspots.connectedComponents(connectedness = ee.Kernel.square(1),maxSize = 128) #Compute the number of pixels in each object defined by the "labels" band. objectSize = objectId.select('labels').connectedPixelCount(maxSize= 128, eightConnected= True) #Get a pixel area image. pixelArea = ee.Image.pixelArea() #Multiply pixel area by the number of pixels in an object to calculate #the object area. The result is an image where each pixel #of an object relates the area of the object in m^2. objectArea = objectSize.multiply(pixelArea) areaMask = objectArea.gte(400) # Make a suitable image for `reduceConnectedComponents()` by adding a label # band to the `kelvin` temperature image. hotspots = hotspots.addBands(objectId.select(['labels'])) #Calculate the mean temperature per object defined by the previously added #"labels" band. patchTemp = hotspots.reduceConnectedComponents(reducer = ee.Reducer.mean(),labelBand = 'labels',maxSize = 128) # print(patchTemp.bandNames().getInfo()) #weight x area patchTempWeighted = patchTemp.multiply(objectArea).updateMask(areaMask) filter_geom = centenial # Normalize values between [0, 1] reducedDict = patchTempWeighted.reduceRegion(ee.Reducer.minMax(), filter_geom, scale=3, bestEffort= True, tileScale=16, crs='EPSG:3857',maxPixels= 1e12) # stdDev = ee.Number(image.reduceRegion( # reducer= ee.Reducer.stdDev(), # geometry= region, # scale= scale, # maxPixels= 1e12).get(band)); # print(reducedDict.getInfo()) minValue = reducedDict.get("constant_min").getInfo() maxValue = reducedDict.get("constant_max").getInfo() print(minValue) print(maxValue) normalizedPriorityChange = patchTempWeighted.unitScale(minValue, maxValue) # Get AOI centroid center_map = [filter_geom.centroid().getInfo()['coordinates'][1],filter_geom.centroid().getInfo()['coordinates'][0]] # Create a folium map object. myMap = folium.Map(location=center_map, zoom_start=16, height=900) for layer in ps_collection_obcd_classified: myMap.add_ee_layer(ps_collection_obcd[layer], {"bands":["b3", "b2", "b1"], "min": 366, "max": 2617,"gamma":2}, "PS %s" % layer) # for from_image_key in ps_collection_obcd_classified: # myMap.add_ee_layer(ps_collection_obcd_classified[from_image_key], {"min": 0, "max": 5, "palette": landcover_color_palette, "opacity": 0.4}, "%s" % from_image_key) # thematic_change = ps_thematic_change_collection_obcd['from_before_to_after'] mask = priority_change.updateMask(priority_change.neq(0)) mask = mask.focal_median(3, 'square') myMap.add_ee_layer(mask.updateMask(mask.gt(3)), {"palette": ['green','yellow','orange','red', 'purple'], "opacity":0.5}, "Priority Change") high_priority_mask = priority_change.updateMask(priority_change.eq(7)) myMap.add_ee_layer(high_priority_mask, {"palette": ['purple'], "opacity":0.7}, "High Priority Change") myMap.add_ee_layer(patchTempWeighted, {"palette": ['ffffd9','edf8b1','c7e9b4','7fcdbb','41b6c4','1d91c0','225ea8','253494','081d58'], "opacity":0.7}, 'patchTempWeighted') myMap.add_ee_layer(normalized_priority_change, {"min": 0, "max": 1, "palette": ['ffffd9','edf8b1','c7e9b4','7fcdbb','41b6c4','1d91c0','225ea8','253494','081d58'], "opacity":0.7}, 'normalizedpatchTempWeighted') # myMap.add_ee_layer(normalized_priority_change, {"min": 0, "max": 1, "palette": ['orange', 'purple'], "opacity":0.7}, 'patchTempWeighted Orange') myMap.add_ee_layer(binary_change_mask_red.updateMask(red_change_mask), {"max": 1, 'min': 0, "palette": ['orange', 'purple'], 'opacity': 0.75}, 'Red Binary') # normalized = normalizedPriorityChange.focal_median(500,'square','meters') # myMap.add_ee_layer(normalized, {"min": 0, "max": 1, "palette": ['ffffd9','edf8b1','c7e9b4','7fcdbb','41b6c4','1d91c0','225ea8','253494','081d58'], "opacity":0.7}, 'patchTempWeighted normalized image') # folium.GeoJson(aoi.toGeoJSON(), name="Study Area").add_to(myMap) # Add a layer control panel to the map. myMap.add_child(folium.LayerControl()) # Display the map. display(myMap) class_vis_Params = {'min':0, 'max':1, 'dimensions': 1000, "palette": ['ffffd9','edf8b1','c7e9b4','7fcdbb','41b6c4','1d91c0','225ea8','253494','081d58']} Image(url=normalized.clip(aoi) .getThumbUrl(class_vis_Params)) ###Output _____no_output_____ ###Markdown Export Data to Google Cloud Storage ###Code #Export High Priority Change exportEarthEngineImage( image=normalizedPriorityChange.visualize(bands="constant", min=0, max=1, palette=['yellow','orange', 'red', 'purple']), desc="HighPriorityChangeMapCentenial", imageName="HighPriorityChangeCentenial", region=centenial, saveLocation="CloudStorage") #Export AOI for layer in ps_collection_obcd: exportEarthEngineImage( image=ps_collection_obcd[layer], desc="AOI %s" % layer, imageName="AOI_%s" % layer, region=aoi, saveLocation="CloudStorage") #Export LULC Maps for from_image_key in ps_collection_obcd_classified: exportEarthEngineImage( image=ps_collection_obcd_classified[from_image_key], desc="LULC %s" % layer, imageName="LULC_%s" % layer, region=aoi, saveLocation="CloudStorage") exportEarthEngineImage( image=thematic_change, desc="Thematic Change Map Raw", imageName="Raw_Thematic_Change", region=aoi, saveLocation="CloudStorage") ###Output _____no_output_____